Consistent formatting for Dawn/Tint.

This CL updates the clang format files to have a single shared format
between Dawn and Tint. The major changes are tabs are 4 spaces, lines
are 100 columns and namespaces are not indented.

Bug: dawn:1339
Change-Id: I4208742c95643998d9fd14e77a9cc558071ded39
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/87603
Commit-Queue: Dan Sinclair <dsinclair@chromium.org>
Reviewed-by: Corentin Wallez <cwallez@chromium.org>
Kokoro: Kokoro <noreply+kokoro@google.com>
diff --git a/src/dawn/CPPLINT.cfg b/src/dawn/CPPLINT.cfg
deleted file mode 100644
index f5c9c6d..0000000
--- a/src/dawn/CPPLINT.cfg
+++ /dev/null
@@ -1 +0,0 @@
-filter=-runtime/indentation_namespace
diff --git a/src/dawn/common/Assert.h b/src/dawn/common/Assert.h
index ee9eeb4..ba4a429 100644
--- a/src/dawn/common/Assert.h
+++ b/src/dawn/common/Assert.h
@@ -32,32 +32,32 @@
 // MSVC triggers a warning in /W4 for do {} while(0). SDL worked around this by using (0,0) and
 // points out that it looks like an owl face.
 #if defined(DAWN_COMPILER_MSVC)
-#    define DAWN_ASSERT_LOOP_CONDITION (0, 0)
+#define DAWN_ASSERT_LOOP_CONDITION (0, 0)
 #else
-#    define DAWN_ASSERT_LOOP_CONDITION (0)
+#define DAWN_ASSERT_LOOP_CONDITION (0)
 #endif
 
 // DAWN_ASSERT_CALLSITE_HELPER generates the actual assert code. In Debug it does what you would
 // expect of an assert and in release it tries to give hints to make the compiler generate better
 // code.
 #if defined(DAWN_ENABLE_ASSERTS)
-#    define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition)  \
-        do {                                                          \
-            if (!(condition)) {                                       \
-                HandleAssertionFailure(file, func, line, #condition); \
-            }                                                         \
-        } while (DAWN_ASSERT_LOOP_CONDITION)
+#define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition)  \
+    do {                                                          \
+        if (!(condition)) {                                       \
+            HandleAssertionFailure(file, func, line, #condition); \
+        }                                                         \
+    } while (DAWN_ASSERT_LOOP_CONDITION)
 #else
-#    if defined(DAWN_COMPILER_MSVC)
-#        define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) __assume(condition)
-#    elif defined(DAWN_COMPILER_CLANG) && defined(__builtin_assume)
-#        define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) __builtin_assume(condition)
-#    else
-#        define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) \
-            do {                                                         \
-                DAWN_UNUSED(sizeof(condition));                          \
-            } while (DAWN_ASSERT_LOOP_CONDITION)
-#    endif
+#if defined(DAWN_COMPILER_MSVC)
+#define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) __assume(condition)
+#elif defined(DAWN_COMPILER_CLANG) && defined(__builtin_assume)
+#define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) __builtin_assume(condition)
+#else
+#define DAWN_ASSERT_CALLSITE_HELPER(file, func, line, condition) \
+    do {                                                         \
+        DAWN_UNUSED(sizeof(condition));                          \
+    } while (DAWN_ASSERT_LOOP_CONDITION)
+#endif
 #endif
 
 #define DAWN_ASSERT(condition) DAWN_ASSERT_CALLSITE_HELPER(__FILE__, __func__, __LINE__, condition)
@@ -68,8 +68,8 @@
     } while (DAWN_ASSERT_LOOP_CONDITION)
 
 #if !defined(DAWN_SKIP_ASSERT_SHORTHANDS)
-#    define ASSERT DAWN_ASSERT
-#    define UNREACHABLE DAWN_UNREACHABLE
+#define ASSERT DAWN_ASSERT
+#define UNREACHABLE DAWN_UNREACHABLE
 #endif
 
 void HandleAssertionFailure(const char* file,
diff --git a/src/dawn/common/BitSetIterator.h b/src/dawn/common/BitSetIterator.h
index 0f1997c..a011249 100644
--- a/src/dawn/common/BitSetIterator.h
+++ b/src/dawn/common/BitSetIterator.h
@@ -62,24 +62,18 @@
         uint32_t mOffset;
     };
 
-    Iterator begin() const {
-        return Iterator(mBits);
-    }
-    Iterator end() const {
-        return Iterator(std::bitset<N>(0));
-    }
+    Iterator begin() const { return Iterator(mBits); }
+    Iterator end() const { return Iterator(std::bitset<N>(0)); }
 
   private:
     const std::bitset<N> mBits;
 };
 
 template <size_t N, typename T>
-BitSetIterator<N, T>::BitSetIterator(const std::bitset<N>& bitset) : mBits(bitset) {
-}
+BitSetIterator<N, T>::BitSetIterator(const std::bitset<N>& bitset) : mBits(bitset) {}
 
 template <size_t N, typename T>
-BitSetIterator<N, T>::BitSetIterator(const BitSetIterator& other) : mBits(other.mBits) {
-}
+BitSetIterator<N, T>::BitSetIterator(const BitSetIterator& other) : mBits(other.mBits) {}
 
 template <size_t N, typename T>
 BitSetIterator<N, T>& BitSetIterator<N, T>::operator=(const BitSetIterator& other) {
diff --git a/src/dawn/common/Compiler.h b/src/dawn/common/Compiler.h
index db75933..fc29c40 100644
--- a/src/dawn/common/Compiler.h
+++ b/src/dawn/common/Compiler.h
@@ -29,50 +29,50 @@
 
 // Clang and GCC, check for __clang__ too to catch clang-cl masquarading as MSVC
 #if defined(__GNUC__) || defined(__clang__)
-#    if defined(__clang__)
-#        define DAWN_COMPILER_CLANG
-#    else
-#        define DAWN_COMPILER_GCC
-#    endif
+#if defined(__clang__)
+#define DAWN_COMPILER_CLANG
+#else
+#define DAWN_COMPILER_GCC
+#endif
 
-#    if defined(__i386__) || defined(__x86_64__)
-#        define DAWN_BREAKPOINT() __asm__ __volatile__("int $3\n\t")
-#    else
+#if defined(__i386__) || defined(__x86_64__)
+#define DAWN_BREAKPOINT() __asm__ __volatile__("int $3\n\t")
+#else
 // TODO(cwallez@chromium.org): Implement breakpoint on all supported architectures
-#        define DAWN_BREAKPOINT()
-#    endif
+#define DAWN_BREAKPOINT()
+#endif
 
-#    define DAWN_BUILTIN_UNREACHABLE() __builtin_unreachable()
-#    define DAWN_LIKELY(x) __builtin_expect(!!(x), 1)
-#    define DAWN_UNLIKELY(x) __builtin_expect(!!(x), 0)
+#define DAWN_BUILTIN_UNREACHABLE() __builtin_unreachable()
+#define DAWN_LIKELY(x) __builtin_expect(!!(x), 1)
+#define DAWN_UNLIKELY(x) __builtin_expect(!!(x), 0)
 
-#    if !defined(__has_cpp_attribute)
-#        define __has_cpp_attribute(name) 0
-#    endif
+#if !defined(__has_cpp_attribute)
+#define __has_cpp_attribute(name) 0
+#endif
 
-#    define DAWN_DECLARE_UNUSED __attribute__((unused))
-#    if defined(NDEBUG)
-#        define DAWN_FORCE_INLINE inline __attribute__((always_inline))
-#    endif
-#    define DAWN_NOINLINE __attribute__((noinline))
+#define DAWN_DECLARE_UNUSED __attribute__((unused))
+#if defined(NDEBUG)
+#define DAWN_FORCE_INLINE inline __attribute__((always_inline))
+#endif
+#define DAWN_NOINLINE __attribute__((noinline))
 
 // MSVC
 #elif defined(_MSC_VER)
-#    define DAWN_COMPILER_MSVC
+#define DAWN_COMPILER_MSVC
 
 extern void __cdecl __debugbreak(void);
-#    define DAWN_BREAKPOINT() __debugbreak()
+#define DAWN_BREAKPOINT() __debugbreak()
 
-#    define DAWN_BUILTIN_UNREACHABLE() __assume(false)
+#define DAWN_BUILTIN_UNREACHABLE() __assume(false)
 
-#    define DAWN_DECLARE_UNUSED
-#    if defined(NDEBUG)
-#        define DAWN_FORCE_INLINE __forceinline
-#    endif
-#    define DAWN_NOINLINE __declspec(noinline)
+#define DAWN_DECLARE_UNUSED
+#if defined(NDEBUG)
+#define DAWN_FORCE_INLINE __forceinline
+#endif
+#define DAWN_NOINLINE __declspec(noinline)
 
 #else
-#    error "Unsupported compiler"
+#error "Unsupported compiler"
 #endif
 
 // It seems that (void) EXPR works on all compilers to silence the unused variable warning.
@@ -82,16 +82,16 @@
 
 // Add noop replacements for macros for features that aren't supported by the compiler.
 #if !defined(DAWN_LIKELY)
-#    define DAWN_LIKELY(X) X
+#define DAWN_LIKELY(X) X
 #endif
 #if !defined(DAWN_UNLIKELY)
-#    define DAWN_UNLIKELY(X) X
+#define DAWN_UNLIKELY(X) X
 #endif
 #if !defined(DAWN_FORCE_INLINE)
-#    define DAWN_FORCE_INLINE inline
+#define DAWN_FORCE_INLINE inline
 #endif
 #if !defined(DAWN_NOINLINE)
-#    define DAWN_NOINLINE
+#define DAWN_NOINLINE
 #endif
 
 #endif  // SRC_DAWN_COMMON_COMPILER_H_
diff --git a/src/dawn/common/CoreFoundationRef.h b/src/dawn/common/CoreFoundationRef.h
index d790c4d..3a4724e 100644
--- a/src/dawn/common/CoreFoundationRef.h
+++ b/src/dawn/common/CoreFoundationRef.h
@@ -22,12 +22,8 @@
 template <typename T>
 struct CoreFoundationRefTraits {
     static constexpr T kNullValue = nullptr;
-    static void Reference(T value) {
-        CFRetain(value);
-    }
-    static void Release(T value) {
-        CFRelease(value);
-    }
+    static void Reference(T value) { CFRetain(value); }
+    static void Release(T value) { CFRelease(value); }
 };
 
 template <typename T>
diff --git a/src/dawn/common/DynamicLib.cpp b/src/dawn/common/DynamicLib.cpp
index 182673e..8767ec4 100644
--- a/src/dawn/common/DynamicLib.cpp
+++ b/src/dawn/common/DynamicLib.cpp
@@ -19,14 +19,14 @@
 #include "dawn/common/Platform.h"
 
 #if DAWN_PLATFORM_WINDOWS
-#    include "dawn/common/windows_with_undefs.h"
-#    if DAWN_PLATFORM_WINUWP
-#        include "dawn/common/WindowsUtils.h"
-#    endif
+#include "dawn/common/windows_with_undefs.h"
+#if DAWN_PLATFORM_WINUWP
+#include "dawn/common/WindowsUtils.h"
+#endif
 #elif DAWN_PLATFORM_POSIX
-#    include <dlfcn.h>
+#include <dlfcn.h>
 #else
-#    error "Unsupported platform for DynamicLib"
+#error "Unsupported platform for DynamicLib"
 #endif
 
 DynamicLib::~DynamicLib() {
@@ -48,11 +48,11 @@
 
 bool DynamicLib::Open(const std::string& filename, std::string* error) {
 #if DAWN_PLATFORM_WINDOWS
-#    if DAWN_PLATFORM_WINUWP
+#if DAWN_PLATFORM_WINUWP
     mHandle = LoadPackagedLibrary(UTF8ToWStr(filename.c_str()).c_str(), 0);
-#    else
+#else
     mHandle = LoadLibraryA(filename.c_str());
-#    endif
+#endif
     if (mHandle == nullptr && error != nullptr) {
         *error = "Windows Error: " + std::to_string(GetLastError());
     }
@@ -63,7 +63,7 @@
         *error = dlerror();
     }
 #else
-#    error "Unsupported platform for DynamicLib"
+#error "Unsupported platform for DynamicLib"
 #endif
 
     return mHandle != nullptr;
@@ -79,7 +79,7 @@
 #elif DAWN_PLATFORM_POSIX
     dlclose(mHandle);
 #else
-#    error "Unsupported platform for DynamicLib"
+#error "Unsupported platform for DynamicLib"
 #endif
 
     mHandle = nullptr;
@@ -101,7 +101,7 @@
         *error = dlerror();
     }
 #else
-#    error "Unsupported platform for DynamicLib"
+#error "Unsupported platform for DynamicLib"
 #endif
 
     return proc;
diff --git a/src/dawn/common/GPUInfo.cpp b/src/dawn/common/GPUInfo.cpp
index 5b73517..014297d 100644
--- a/src/dawn/common/GPUInfo.cpp
+++ b/src/dawn/common/GPUInfo.cpp
@@ -20,89 +20,89 @@
 #include "dawn/common/Assert.h"
 
 namespace gpu_info {
-    namespace {
-        // Intel
-        // Referenced from the following Mesa source code:
-        // https://github.com/mesa3d/mesa/blob/master/include/pci_ids/i965_pci_ids.h
-        // gen9
-        const std::array<uint32_t, 25> Skylake = {
-            {0x1902, 0x1906, 0x190A, 0x190B, 0x190E, 0x1912, 0x1913, 0x1915, 0x1916,
-             0x1917, 0x191A, 0x191B, 0x191D, 0x191E, 0x1921, 0x1923, 0x1926, 0x1927,
-             0x192A, 0x192B, 0x192D, 0x1932, 0x193A, 0x193B, 0x193D}};
-        // gen9p5
-        const std::array<uint32_t, 20> Kabylake = {
-            {0x5916, 0x5913, 0x5906, 0x5926, 0x5921, 0x5915, 0x590E, 0x591E, 0x5912, 0x5917,
-             0x5902, 0x591B, 0x593B, 0x590B, 0x591A, 0x590A, 0x591D, 0x5908, 0x5923, 0x5927}};
-        const std::array<uint32_t, 17> Coffeelake = {
-            {0x87CA, 0x3E90, 0x3E93, 0x3E99, 0x3E9C, 0x3E91, 0x3E92, 0x3E96, 0x3E98, 0x3E9A, 0x3E9B,
-             0x3E94, 0x3EA9, 0x3EA5, 0x3EA6, 0x3EA7, 0x3EA8}};
-        const std::array<uint32_t, 5> Whiskylake = {{0x3EA1, 0x3EA4, 0x3EA0, 0x3EA3, 0x3EA2}};
-        const std::array<uint32_t, 21> Cometlake = {
-            {0x9B21, 0x9BA0, 0x9BA2, 0x9BA4, 0x9BA5, 0x9BA8, 0x9BAA, 0x9BAB, 0x9BAC, 0x9B41, 0x9BC0,
-             0x9BC2, 0x9BC4, 0x9BC5, 0x9BC6, 0x9BC8, 0x9BCA, 0x9BCB, 0x9BCC, 0x9BE6, 0x9BF6}};
+namespace {
+// Intel
+// Referenced from the following Mesa source code:
+// https://github.com/mesa3d/mesa/blob/master/include/pci_ids/i965_pci_ids.h
+// gen9
+const std::array<uint32_t, 25> Skylake = {{0x1902, 0x1906, 0x190A, 0x190B, 0x190E, 0x1912, 0x1913,
+                                           0x1915, 0x1916, 0x1917, 0x191A, 0x191B, 0x191D, 0x191E,
+                                           0x1921, 0x1923, 0x1926, 0x1927, 0x192A, 0x192B, 0x192D,
+                                           0x1932, 0x193A, 0x193B, 0x193D}};
+// gen9p5
+const std::array<uint32_t, 20> Kabylake = {{0x5916, 0x5913, 0x5906, 0x5926, 0x5921, 0x5915, 0x590E,
+                                            0x591E, 0x5912, 0x5917, 0x5902, 0x591B, 0x593B, 0x590B,
+                                            0x591A, 0x590A, 0x591D, 0x5908, 0x5923, 0x5927}};
+const std::array<uint32_t, 17> Coffeelake = {{0x87CA, 0x3E90, 0x3E93, 0x3E99, 0x3E9C, 0x3E91,
+                                              0x3E92, 0x3E96, 0x3E98, 0x3E9A, 0x3E9B, 0x3E94,
+                                              0x3EA9, 0x3EA5, 0x3EA6, 0x3EA7, 0x3EA8}};
+const std::array<uint32_t, 5> Whiskylake = {{0x3EA1, 0x3EA4, 0x3EA0, 0x3EA3, 0x3EA2}};
+const std::array<uint32_t, 21> Cometlake = {
+    {0x9B21, 0x9BA0, 0x9BA2, 0x9BA4, 0x9BA5, 0x9BA8, 0x9BAA, 0x9BAB, 0x9BAC, 0x9B41, 0x9BC0,
+     0x9BC2, 0x9BC4, 0x9BC5, 0x9BC6, 0x9BC8, 0x9BCA, 0x9BCB, 0x9BCC, 0x9BE6, 0x9BF6}};
 
-        // According to Intel graphics driver version schema, build number is generated from the
-        // last two fields.
-        // See https://www.intel.com/content/www/us/en/support/articles/000005654/graphics.html for
-        // more details.
-        uint32_t GetIntelD3DDriverBuildNumber(const D3DDriverVersion& driverVersion) {
-            return driverVersion[2] * 10000 + driverVersion[3];
-        }
+// According to Intel graphics driver version schema, build number is generated from the
+// last two fields.
+// See https://www.intel.com/content/www/us/en/support/articles/000005654/graphics.html for
+// more details.
+uint32_t GetIntelD3DDriverBuildNumber(const D3DDriverVersion& driverVersion) {
+    return driverVersion[2] * 10000 + driverVersion[3];
+}
 
-    }  // anonymous namespace
+}  // anonymous namespace
 
-    bool IsAMD(PCIVendorID vendorId) {
-        return vendorId == kVendorID_AMD;
-    }
-    bool IsARM(PCIVendorID vendorId) {
-        return vendorId == kVendorID_ARM;
-    }
-    bool IsImgTec(PCIVendorID vendorId) {
-        return vendorId == kVendorID_ImgTec;
-    }
-    bool IsIntel(PCIVendorID vendorId) {
-        return vendorId == kVendorID_Intel;
-    }
-    bool IsMesa(PCIVendorID vendorId) {
-        return vendorId == kVendorID_Mesa;
-    }
-    bool IsNvidia(PCIVendorID vendorId) {
-        return vendorId == kVendorID_Nvidia;
-    }
-    bool IsQualcomm(PCIVendorID vendorId) {
-        return vendorId == kVendorID_Qualcomm;
-    }
-    bool IsSwiftshader(PCIVendorID vendorId, PCIDeviceID deviceId) {
-        return vendorId == kVendorID_Google && deviceId == kDeviceID_Swiftshader;
-    }
-    bool IsWARP(PCIVendorID vendorId, PCIDeviceID deviceId) {
-        return vendorId == kVendorID_Microsoft && deviceId == kDeviceID_WARP;
+bool IsAMD(PCIVendorID vendorId) {
+    return vendorId == kVendorID_AMD;
+}
+bool IsARM(PCIVendorID vendorId) {
+    return vendorId == kVendorID_ARM;
+}
+bool IsImgTec(PCIVendorID vendorId) {
+    return vendorId == kVendorID_ImgTec;
+}
+bool IsIntel(PCIVendorID vendorId) {
+    return vendorId == kVendorID_Intel;
+}
+bool IsMesa(PCIVendorID vendorId) {
+    return vendorId == kVendorID_Mesa;
+}
+bool IsNvidia(PCIVendorID vendorId) {
+    return vendorId == kVendorID_Nvidia;
+}
+bool IsQualcomm(PCIVendorID vendorId) {
+    return vendorId == kVendorID_Qualcomm;
+}
+bool IsSwiftshader(PCIVendorID vendorId, PCIDeviceID deviceId) {
+    return vendorId == kVendorID_Google && deviceId == kDeviceID_Swiftshader;
+}
+bool IsWARP(PCIVendorID vendorId, PCIDeviceID deviceId) {
+    return vendorId == kVendorID_Microsoft && deviceId == kDeviceID_WARP;
+}
+
+int CompareD3DDriverVersion(PCIVendorID vendorId,
+                            const D3DDriverVersion& version1,
+                            const D3DDriverVersion& version2) {
+    if (IsIntel(vendorId)) {
+        uint32_t buildNumber1 = GetIntelD3DDriverBuildNumber(version1);
+        uint32_t buildNumber2 = GetIntelD3DDriverBuildNumber(version2);
+        return buildNumber1 < buildNumber2 ? -1 : (buildNumber1 == buildNumber2 ? 0 : 1);
     }
 
-    int CompareD3DDriverVersion(PCIVendorID vendorId,
-                                const D3DDriverVersion& version1,
-                                const D3DDriverVersion& version2) {
-        if (IsIntel(vendorId)) {
-            uint32_t buildNumber1 = GetIntelD3DDriverBuildNumber(version1);
-            uint32_t buildNumber2 = GetIntelD3DDriverBuildNumber(version2);
-            return buildNumber1 < buildNumber2 ? -1 : (buildNumber1 == buildNumber2 ? 0 : 1);
-        }
+    // TODO(crbug.com/dawn/823): support other GPU vendors
+    UNREACHABLE();
+    return 0;
+}
 
-        // TODO(crbug.com/dawn/823): support other GPU vendors
-        UNREACHABLE();
-        return 0;
-    }
-
-    // Intel GPUs
-    bool IsSkylake(PCIDeviceID deviceId) {
-        return std::find(Skylake.cbegin(), Skylake.cend(), deviceId) != Skylake.cend();
-    }
-    bool IsKabylake(PCIDeviceID deviceId) {
-        return std::find(Kabylake.cbegin(), Kabylake.cend(), deviceId) != Kabylake.cend();
-    }
-    bool IsCoffeelake(PCIDeviceID deviceId) {
-        return (std::find(Coffeelake.cbegin(), Coffeelake.cend(), deviceId) != Coffeelake.cend()) ||
-               (std::find(Whiskylake.cbegin(), Whiskylake.cend(), deviceId) != Whiskylake.cend()) ||
-               (std::find(Cometlake.cbegin(), Cometlake.cend(), deviceId) != Cometlake.cend());
-    }
+// Intel GPUs
+bool IsSkylake(PCIDeviceID deviceId) {
+    return std::find(Skylake.cbegin(), Skylake.cend(), deviceId) != Skylake.cend();
+}
+bool IsKabylake(PCIDeviceID deviceId) {
+    return std::find(Kabylake.cbegin(), Kabylake.cend(), deviceId) != Kabylake.cend();
+}
+bool IsCoffeelake(PCIDeviceID deviceId) {
+    return (std::find(Coffeelake.cbegin(), Coffeelake.cend(), deviceId) != Coffeelake.cend()) ||
+           (std::find(Whiskylake.cbegin(), Whiskylake.cend(), deviceId) != Whiskylake.cend()) ||
+           (std::find(Cometlake.cbegin(), Cometlake.cend(), deviceId) != Cometlake.cend());
+}
 }  // namespace gpu_info
diff --git a/src/dawn/common/GPUInfo.h b/src/dawn/common/GPUInfo.h
index 9a03677..9b7f4c0 100644
--- a/src/dawn/common/GPUInfo.h
+++ b/src/dawn/common/GPUInfo.h
@@ -23,44 +23,44 @@
 
 namespace gpu_info {
 
-    static constexpr PCIVendorID kVendorID_AMD = 0x1002;
-    static constexpr PCIVendorID kVendorID_ARM = 0x13B5;
-    static constexpr PCIVendorID kVendorID_ImgTec = 0x1010;
-    static constexpr PCIVendorID kVendorID_Intel = 0x8086;
-    static constexpr PCIVendorID kVendorID_Mesa = 0x10005;
-    static constexpr PCIVendorID kVendorID_Nvidia = 0x10DE;
-    static constexpr PCIVendorID kVendorID_Qualcomm = 0x5143;
-    static constexpr PCIVendorID kVendorID_Google = 0x1AE0;
-    static constexpr PCIVendorID kVendorID_Microsoft = 0x1414;
+static constexpr PCIVendorID kVendorID_AMD = 0x1002;
+static constexpr PCIVendorID kVendorID_ARM = 0x13B5;
+static constexpr PCIVendorID kVendorID_ImgTec = 0x1010;
+static constexpr PCIVendorID kVendorID_Intel = 0x8086;
+static constexpr PCIVendorID kVendorID_Mesa = 0x10005;
+static constexpr PCIVendorID kVendorID_Nvidia = 0x10DE;
+static constexpr PCIVendorID kVendorID_Qualcomm = 0x5143;
+static constexpr PCIVendorID kVendorID_Google = 0x1AE0;
+static constexpr PCIVendorID kVendorID_Microsoft = 0x1414;
 
-    static constexpr PCIDeviceID kDeviceID_Swiftshader = 0xC0DE;
-    static constexpr PCIDeviceID kDeviceID_WARP = 0x8c;
+static constexpr PCIDeviceID kDeviceID_Swiftshader = 0xC0DE;
+static constexpr PCIDeviceID kDeviceID_WARP = 0x8c;
 
-    bool IsAMD(PCIVendorID vendorId);
-    bool IsARM(PCIVendorID vendorId);
-    bool IsImgTec(PCIVendorID vendorId);
-    bool IsIntel(PCIVendorID vendorId);
-    bool IsMesa(PCIVendorID vendorId);
-    bool IsNvidia(PCIVendorID vendorId);
-    bool IsQualcomm(PCIVendorID vendorId);
-    bool IsSwiftshader(PCIVendorID vendorId, PCIDeviceID deviceId);
-    bool IsWARP(PCIVendorID vendorId, PCIDeviceID deviceId);
+bool IsAMD(PCIVendorID vendorId);
+bool IsARM(PCIVendorID vendorId);
+bool IsImgTec(PCIVendorID vendorId);
+bool IsIntel(PCIVendorID vendorId);
+bool IsMesa(PCIVendorID vendorId);
+bool IsNvidia(PCIVendorID vendorId);
+bool IsQualcomm(PCIVendorID vendorId);
+bool IsSwiftshader(PCIVendorID vendorId, PCIDeviceID deviceId);
+bool IsWARP(PCIVendorID vendorId, PCIDeviceID deviceId);
 
-    using D3DDriverVersion = std::array<uint16_t, 4>;
+using D3DDriverVersion = std::array<uint16_t, 4>;
 
-    // Do comparison between two driver versions. Currently we only support the comparison between
-    // Intel D3D driver versions.
-    // - Return -1 if build number of version1 is smaller
-    // - Return 1 if build number of version1 is bigger
-    // - Return 0 if version1 and version2 represent same driver version
-    int CompareD3DDriverVersion(PCIVendorID vendorId,
-                                const D3DDriverVersion& version1,
-                                const D3DDriverVersion& version2);
+// Do comparison between two driver versions. Currently we only support the comparison between
+// Intel D3D driver versions.
+// - Return -1 if build number of version1 is smaller
+// - Return 1 if build number of version1 is bigger
+// - Return 0 if version1 and version2 represent same driver version
+int CompareD3DDriverVersion(PCIVendorID vendorId,
+                            const D3DDriverVersion& version1,
+                            const D3DDriverVersion& version2);
 
-    // Intel architectures
-    bool IsSkylake(PCIDeviceID deviceId);
-    bool IsKabylake(PCIDeviceID deviceId);
-    bool IsCoffeelake(PCIDeviceID deviceId);
+// Intel architectures
+bool IsSkylake(PCIDeviceID deviceId);
+bool IsKabylake(PCIDeviceID deviceId);
+bool IsCoffeelake(PCIDeviceID deviceId);
 
 }  // namespace gpu_info
 #endif  // SRC_DAWN_COMMON_GPUINFO_H_
diff --git a/src/dawn/common/HashUtils.h b/src/dawn/common/HashUtils.h
index 1fa421c..342c9b6 100644
--- a/src/dawn/common/HashUtils.h
+++ b/src/dawn/common/HashUtils.h
@@ -50,7 +50,7 @@
 #elif defined(DAWN_PLATFORM_32_BIT)
     const size_t offset = 0x9e3779b9;
 #else
-#    error "Unsupported platform"
+#error "Unsupported platform"
 #endif
     *hash ^= Hash(value) + offset + (*hash << 6) + (*hash >> 2);
 }
@@ -89,13 +89,13 @@
 #endif
 
 namespace std {
-    template <typename Index, size_t N>
-    struct hash<ityp::bitset<Index, N>> {
-      public:
-        size_t operator()(const ityp::bitset<Index, N>& value) const {
-            return Hash(static_cast<const std::bitset<N>&>(value));
-        }
-    };
+template <typename Index, size_t N>
+struct hash<ityp::bitset<Index, N>> {
+  public:
+    size_t operator()(const ityp::bitset<Index, N>& value) const {
+        return Hash(static_cast<const std::bitset<N>&>(value));
+    }
+};
 }  // namespace std
 
 #endif  // SRC_DAWN_COMMON_HASHUTILS_H_
diff --git a/src/dawn/common/IOKitRef.h b/src/dawn/common/IOKitRef.h
index d8fe960..33367b1 100644
--- a/src/dawn/common/IOKitRef.h
+++ b/src/dawn/common/IOKitRef.h
@@ -22,12 +22,8 @@
 template <typename T>
 struct IOKitRefTraits {
     static constexpr T kNullValue = IO_OBJECT_NULL;
-    static void Reference(T value) {
-        IOObjectRetain(value);
-    }
-    static void Release(T value) {
-        IOObjectRelease(value);
-    }
+    static void Reference(T value) { IOObjectRetain(value); }
+    static void Release(T value) { IOObjectRelease(value); }
 };
 
 template <typename T>
diff --git a/src/dawn/common/LinkedList.h b/src/dawn/common/LinkedList.h
index 5227041..b9503ee 100644
--- a/src/dawn/common/LinkedList.h
+++ b/src/dawn/common/LinkedList.h
@@ -99,10 +99,8 @@
 template <typename T>
 class LinkNode {
   public:
-    LinkNode() : previous_(nullptr), next_(nullptr) {
-    }
-    LinkNode(LinkNode<T>* previous, LinkNode<T>* next) : previous_(previous), next_(next) {
-    }
+    LinkNode() : previous_(nullptr), next_(nullptr) {}
+    LinkNode(LinkNode<T>* previous, LinkNode<T>* next) : previous_(previous), next_(next) {}
 
     LinkNode(LinkNode<T>&& rhs) {
         next_ = rhs.next_;
@@ -154,22 +152,14 @@
         return true;
     }
 
-    LinkNode<T>* previous() const {
-        return previous_;
-    }
+    LinkNode<T>* previous() const { return previous_; }
 
-    LinkNode<T>* next() const {
-        return next_;
-    }
+    LinkNode<T>* next() const { return next_; }
 
     // Cast from the node-type to the value type.
-    const T* value() const {
-        return static_cast<const T*>(this);
-    }
+    const T* value() const { return static_cast<const T*>(this); }
 
-    T* value() {
-        return static_cast<T*>(this);
-    }
+    T* value() { return static_cast<T*>(this); }
 
   private:
     friend class LinkedList<T>;
@@ -183,8 +173,7 @@
     // The "root" node is self-referential, and forms the basis of a circular
     // list (root_.next() will point back to the start of the list,
     // and root_->previous() wraps around to the end of the list).
-    LinkedList() : root_(&root_, &root_) {
-    }
+    LinkedList() : root_(&root_, &root_) {}
 
     ~LinkedList() {
         // If any LinkNodes still exist in the LinkedList, there will be outstanding references to
@@ -194,9 +183,7 @@
     }
 
     // Appends |e| to the end of the linked list.
-    void Append(LinkNode<T>* e) {
-        e->InsertBefore(&root_);
-    }
+    void Append(LinkNode<T>* e) { e->InsertBefore(&root_); }
 
     // Moves all elements (in order) of the list and appends them into |l| leaving the list empty.
     void MoveInto(LinkedList<T>* l) {
@@ -212,21 +199,13 @@
         root_.previous_ = &root_;
     }
 
-    LinkNode<T>* head() const {
-        return root_.next();
-    }
+    LinkNode<T>* head() const { return root_.next(); }
 
-    LinkNode<T>* tail() const {
-        return root_.previous();
-    }
+    LinkNode<T>* tail() const { return root_.previous(); }
 
-    const LinkNode<T>* end() const {
-        return &root_;
-    }
+    const LinkNode<T>* end() const { return &root_; }
 
-    bool empty() const {
-        return head() == end();
-    }
+    bool empty() const { return head() == end(); }
 
   private:
     LinkNode<T> root_;
@@ -235,8 +214,7 @@
 template <typename T>
 class LinkedListIterator {
   public:
-    explicit LinkedListIterator(LinkNode<T>* node) : current_(node), next_(node->next()) {
-    }
+    explicit LinkedListIterator(LinkNode<T>* node) : current_(node), next_(node->next()) {}
 
     // We keep an early reference to the next node in the list so that even if the current element
     // is modified or removed from the list, we have a valid next node.
@@ -246,13 +224,9 @@
         return *this;
     }
 
-    bool operator!=(const LinkedListIterator<T>& other) const {
-        return current_ != other.current_;
-    }
+    bool operator!=(const LinkedListIterator<T>& other) const { return current_ != other.current_; }
 
-    LinkNode<T>* operator*() const {
-        return current_;
-    }
+    LinkNode<T>* operator*() const { return current_; }
 
   private:
     LinkNode<T>* current_;
diff --git a/src/dawn/common/Log.cpp b/src/dawn/common/Log.cpp
index 18d4af2..ab1eb07 100644
--- a/src/dawn/common/Log.cpp
+++ b/src/dawn/common/Log.cpp
@@ -21,97 +21,96 @@
 #include "dawn/common/Platform.h"
 
 #if defined(DAWN_PLATFORM_ANDROID)
-#    include <android/log.h>
+#include <android/log.h>
 #endif
 
 namespace dawn {
 
-    namespace {
+namespace {
 
-        const char* SeverityName(LogSeverity severity) {
-            switch (severity) {
-                case LogSeverity::Debug:
-                    return "Debug";
-                case LogSeverity::Info:
-                    return "Info";
-                case LogSeverity::Warning:
-                    return "Warning";
-                case LogSeverity::Error:
-                    return "Error";
-                default:
-                    UNREACHABLE();
-                    return "";
-            }
-        }
+const char* SeverityName(LogSeverity severity) {
+    switch (severity) {
+        case LogSeverity::Debug:
+            return "Debug";
+        case LogSeverity::Info:
+            return "Info";
+        case LogSeverity::Warning:
+            return "Warning";
+        case LogSeverity::Error:
+            return "Error";
+        default:
+            UNREACHABLE();
+            return "";
+    }
+}
 
 #if defined(DAWN_PLATFORM_ANDROID)
-        android_LogPriority AndroidLogPriority(LogSeverity severity) {
-            switch (severity) {
-                case LogSeverity::Debug:
-                    return ANDROID_LOG_INFO;
-                case LogSeverity::Info:
-                    return ANDROID_LOG_INFO;
-                case LogSeverity::Warning:
-                    return ANDROID_LOG_WARN;
-                case LogSeverity::Error:
-                    return ANDROID_LOG_ERROR;
-                default:
-                    UNREACHABLE();
-                    return ANDROID_LOG_ERROR;
-            }
-        }
+android_LogPriority AndroidLogPriority(LogSeverity severity) {
+    switch (severity) {
+        case LogSeverity::Debug:
+            return ANDROID_LOG_INFO;
+        case LogSeverity::Info:
+            return ANDROID_LOG_INFO;
+        case LogSeverity::Warning:
+            return ANDROID_LOG_WARN;
+        case LogSeverity::Error:
+            return ANDROID_LOG_ERROR;
+        default:
+            UNREACHABLE();
+            return ANDROID_LOG_ERROR;
+    }
+}
 #endif  // defined(DAWN_PLATFORM_ANDROID)
 
-    }  // anonymous namespace
+}  // anonymous namespace
 
-    LogMessage::LogMessage(LogSeverity severity) : mSeverity(severity) {
+LogMessage::LogMessage(LogSeverity severity) : mSeverity(severity) {}
+
+LogMessage::~LogMessage() {
+    std::string fullMessage = mStream.str();
+
+    // If this message has been moved, its stream is empty.
+    if (fullMessage.empty()) {
+        return;
     }
 
-    LogMessage::~LogMessage() {
-        std::string fullMessage = mStream.str();
-
-        // If this message has been moved, its stream is empty.
-        if (fullMessage.empty()) {
-            return;
-        }
-
-        const char* severityName = SeverityName(mSeverity);
+    const char* severityName = SeverityName(mSeverity);
 
 #if defined(DAWN_PLATFORM_ANDROID)
-        android_LogPriority androidPriority = AndroidLogPriority(mSeverity);
-        __android_log_print(androidPriority, "Dawn", "%s: %s\n", severityName, fullMessage.c_str());
+    android_LogPriority androidPriority = AndroidLogPriority(mSeverity);
+    __android_log_print(androidPriority, "Dawn", "%s: %s\n", severityName, fullMessage.c_str());
 #else   // defined(DAWN_PLATFORM_ANDROID)
-        FILE* outputStream = stdout;
-        if (mSeverity == LogSeverity::Warning || mSeverity == LogSeverity::Error) {
-            outputStream = stderr;
-        }
+    FILE* outputStream = stdout;
+    if (mSeverity == LogSeverity::Warning || mSeverity == LogSeverity::Error) {
+        outputStream = stderr;
+    }
 
-        // Note: we use fprintf because <iostream> includes static initializers.
-        fprintf(outputStream, "%s: %s\n", severityName, fullMessage.c_str());
-        fflush(outputStream);
+    // Note: we use fprintf because <iostream> includes static initializers.
+    fprintf(outputStream, "%s: %s\n", severityName, fullMessage.c_str());
+    fflush(outputStream);
 #endif  // defined(DAWN_PLATFORM_ANDROID)
-    }
+}
 
-    LogMessage DebugLog() {
-        return LogMessage(LogSeverity::Debug);
-    }
+LogMessage DebugLog() {
+    return LogMessage(LogSeverity::Debug);
+}
 
-    LogMessage InfoLog() {
-        return LogMessage(LogSeverity::Info);
-    }
+LogMessage InfoLog() {
+    return LogMessage(LogSeverity::Info);
+}
 
-    LogMessage WarningLog() {
-        return LogMessage(LogSeverity::Warning);
-    }
+LogMessage WarningLog() {
+    return LogMessage(LogSeverity::Warning);
+}
 
-    LogMessage ErrorLog() {
-        return LogMessage(LogSeverity::Error);
-    }
+LogMessage ErrorLog() {
+    return LogMessage(LogSeverity::Error);
+}
 
-    LogMessage DebugLog(const char* file, const char* function, int line) {
-        LogMessage message = DebugLog();
-        message << file << ":" << line << "(" << function << ")";
-        return message;
-    }
+LogMessage DebugLog(const char* file, const char* function, int line) {
+    LogMessage message = DebugLog();
+    message << file << ":" << line << "(" << function << ")";
+    return message;
+}
 
 }  // namespace dawn
diff --git a/src/dawn/common/Log.h b/src/dawn/common/Log.h
index fc7481a..3b338b3 100644
--- a/src/dawn/common/Log.h
+++ b/src/dawn/common/Log.h
@@ -47,47 +47,47 @@
 
 namespace dawn {
 
-    // Log levels mostly used to signal intent where the log message is produced and used to route
-    // the message to the correct output.
-    enum class LogSeverity {
-        Debug,
-        Info,
-        Warning,
-        Error,
-    };
+// Log levels mostly used to signal intent where the log message is produced and used to route
+// the message to the correct output.
+enum class LogSeverity {
+    Debug,
+    Info,
+    Warning,
+    Error,
+};
 
-    // Essentially an ostringstream that will print itself in its destructor.
-    class LogMessage {
-      public:
-        explicit LogMessage(LogSeverity severity);
-        ~LogMessage();
+// Essentially an ostringstream that will print itself in its destructor.
+class LogMessage {
+  public:
+    explicit LogMessage(LogSeverity severity);
+    ~LogMessage();
 
-        LogMessage(LogMessage&& other) = default;
-        LogMessage& operator=(LogMessage&& other) = default;
+    LogMessage(LogMessage&& other) = default;
+    LogMessage& operator=(LogMessage&& other) = default;
 
-        template <typename T>
-        LogMessage& operator<<(T&& value) {
-            mStream << value;
-            return *this;
-        }
+    template <typename T>
+    LogMessage& operator<<(T&& value) {
+        mStream << value;
+        return *this;
+    }
 
-      private:
-        LogMessage(const LogMessage& other) = delete;
-        LogMessage& operator=(const LogMessage& other) = delete;
+  private:
+    LogMessage(const LogMessage& other) = delete;
+    LogMessage& operator=(const LogMessage& other) = delete;
 
-        LogSeverity mSeverity;
-        std::ostringstream mStream;
-    };
+    LogSeverity mSeverity;
+    std::ostringstream mStream;
+};
 
-    // Short-hands to create a LogMessage with the respective severity.
-    LogMessage DebugLog();
-    LogMessage InfoLog();
-    LogMessage WarningLog();
-    LogMessage ErrorLog();
+// Short-hands to create a LogMessage with the respective severity.
+LogMessage DebugLog();
+LogMessage InfoLog();
+LogMessage WarningLog();
+LogMessage ErrorLog();
 
-    // DAWN_DEBUG is a helper macro that creates a DebugLog and outputs file/line/function
-    // information
-    LogMessage DebugLog(const char* file, const char* function, int line);
+// DAWN_DEBUG is a helper macro that creates a DebugLog and outputs file/line/function
+// information
+LogMessage DebugLog(const char* file, const char* function, int line);
 #define DAWN_DEBUG() ::dawn::DebugLog(__FILE__, __func__, __LINE__)
 
 }  // namespace dawn
diff --git a/src/dawn/common/Math.cpp b/src/dawn/common/Math.cpp
index f0dd0a1..3ee1ba3 100644
--- a/src/dawn/common/Math.cpp
+++ b/src/dawn/common/Math.cpp
@@ -22,7 +22,7 @@
 #include "dawn/common/Platform.h"
 
 #if defined(DAWN_COMPILER_MSVC)
-#    include <intrin.h>
+#include <intrin.h>
 #endif
 
 uint32_t ScanForward(uint32_t bits) {
@@ -54,13 +54,13 @@
 uint32_t Log2(uint64_t value) {
     ASSERT(value != 0);
 #if defined(DAWN_COMPILER_MSVC)
-#    if defined(DAWN_PLATFORM_64_BIT)
+#if defined(DAWN_PLATFORM_64_BIT)
     // NOLINTNEXTLINE(runtime/int)
     unsigned long firstBitIndex = 0ul;
     unsigned char ret = _BitScanReverse64(&firstBitIndex, value);
     ASSERT(ret != 0);
     return firstBitIndex;
-#    else   // defined(DAWN_PLATFORM_64_BIT)
+#else   // defined(DAWN_PLATFORM_64_BIT)
     // NOLINTNEXTLINE(runtime/int)
     unsigned long firstBitIndex = 0ul;
     if (_BitScanReverse(&firstBitIndex, value >> 32)) {
@@ -69,10 +69,10 @@
     unsigned char ret = _BitScanReverse(&firstBitIndex, value & 0xFFFFFFFF);
     ASSERT(ret != 0);
     return firstBitIndex;
-#    endif  // defined(DAWN_PLATFORM_64_BIT)
-#else       // defined(DAWN_COMPILER_MSVC)
+#endif  // defined(DAWN_PLATFORM_64_BIT)
+#else   // defined(DAWN_COMPILER_MSVC)
     return 63 - static_cast<uint32_t>(__builtin_clzll(value));
-#endif      // defined(DAWN_COMPILER_MSVC)
+#endif  // defined(DAWN_COMPILER_MSVC)
 }
 
 uint64_t NextPowerOfTwo(uint64_t n) {
diff --git a/src/dawn/common/NSRef.h b/src/dawn/common/NSRef.h
index ddec95e..4afb5e0 100644
--- a/src/dawn/common/NSRef.h
+++ b/src/dawn/common/NSRef.h
@@ -20,7 +20,7 @@
 #import <Foundation/NSObject.h>
 
 #if !defined(__OBJC__)
-#    error "NSRef can only be used in Objective C/C++ code."
+#error "NSRef can only be used in Objective C/C++ code."
 #endif
 
 // This file contains smart pointers that automatically reference and release Objective C objects
@@ -67,12 +67,8 @@
 template <typename T>
 struct NSRefTraits {
     static constexpr T kNullValue = nullptr;
-    static void Reference(T value) {
-        [value retain];
-    }
-    static void Release(T value) {
-        [value release];
-    }
+    static void Reference(T value) { [value retain]; }
+    static void Release(T value) { [value release]; }
 };
 
 template <typename T>
@@ -80,13 +76,9 @@
   public:
     using RefBase<T*, NSRefTraits<T*>>::RefBase;
 
-    const T* operator*() const {
-        return this->Get();
-    }
+    const T* operator*() const { return this->Get(); }
 
-    T* operator*() {
-        return this->Get();
-    }
+    T* operator*() { return this->Get(); }
 };
 
 template <typename T>
@@ -104,13 +96,9 @@
   public:
     using RefBase<T, NSRefTraits<T>>::RefBase;
 
-    const T operator*() const {
-        return this->Get();
-    }
+    const T operator*() const { return this->Get(); }
 
-    T operator*() {
-        return this->Get();
-    }
+    T operator*() { return this->Get(); }
 };
 
 template <typename T>
diff --git a/src/dawn/common/Numeric.h b/src/dawn/common/Numeric.h
index a9a4521..50f6d40 100644
--- a/src/dawn/common/Numeric.h
+++ b/src/dawn/common/Numeric.h
@@ -22,17 +22,17 @@
 
 namespace detail {
 
-    template <typename T>
-    inline constexpr uint32_t u32_sizeof() {
-        static_assert(sizeof(T) <= std::numeric_limits<uint32_t>::max());
-        return uint32_t(sizeof(T));
-    }
+template <typename T>
+inline constexpr uint32_t u32_sizeof() {
+    static_assert(sizeof(T) <= std::numeric_limits<uint32_t>::max());
+    return uint32_t(sizeof(T));
+}
 
-    template <typename T>
-    inline constexpr uint32_t u32_alignof() {
-        static_assert(alignof(T) <= std::numeric_limits<uint32_t>::max());
-        return uint32_t(alignof(T));
-    }
+template <typename T>
+inline constexpr uint32_t u32_alignof() {
+    static_assert(alignof(T) <= std::numeric_limits<uint32_t>::max());
+    return uint32_t(alignof(T));
+}
 
 }  // namespace detail
 
diff --git a/src/dawn/common/Platform.h b/src/dawn/common/Platform.h
index 5e4f9d7..8e81b48 100644
--- a/src/dawn/common/Platform.h
+++ b/src/dawn/common/Platform.h
@@ -16,67 +16,67 @@
 #define SRC_DAWN_COMMON_PLATFORM_H_
 
 #if defined(_WIN32) || defined(_WIN64)
-#    include <winapifamily.h>
-#    define DAWN_PLATFORM_WINDOWS 1
-#    if WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP
-#        define DAWN_PLATFORM_WIN32 1
-#    elif WINAPI_FAMILY == WINAPI_FAMILY_PC_APP
-#        define DAWN_PLATFORM_WINUWP 1
-#    else
-#        error "Unsupported Windows platform."
-#    endif
+#include <winapifamily.h>
+#define DAWN_PLATFORM_WINDOWS 1
+#if WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP
+#define DAWN_PLATFORM_WIN32 1
+#elif WINAPI_FAMILY == WINAPI_FAMILY_PC_APP
+#define DAWN_PLATFORM_WINUWP 1
+#else
+#error "Unsupported Windows platform."
+#endif
 
 #elif defined(__linux__)
-#    define DAWN_PLATFORM_LINUX 1
-#    define DAWN_PLATFORM_POSIX 1
-#    if defined(__ANDROID__)
-#        define DAWN_PLATFORM_ANDROID 1
-#    endif
+#define DAWN_PLATFORM_LINUX 1
+#define DAWN_PLATFORM_POSIX 1
+#if defined(__ANDROID__)
+#define DAWN_PLATFORM_ANDROID 1
+#endif
 
 #elif defined(__APPLE__)
-#    define DAWN_PLATFORM_APPLE 1
-#    define DAWN_PLATFORM_POSIX 1
-#    include <TargetConditionals.h>
-#    if TARGET_OS_IPHONE
-#        define DAWN_PLATFORM_IOS
-#    elif TARGET_OS_MAC
-#        define DAWN_PLATFORM_MACOS
-#    else
-#        error "Unsupported Apple platform."
-#    endif
+#define DAWN_PLATFORM_APPLE 1
+#define DAWN_PLATFORM_POSIX 1
+#include <TargetConditionals.h>
+#if TARGET_OS_IPHONE
+#define DAWN_PLATFORM_IOS
+#elif TARGET_OS_MAC
+#define DAWN_PLATFORM_MACOS
+#else
+#error "Unsupported Apple platform."
+#endif
 
 #elif defined(__Fuchsia__)
-#    define DAWN_PLATFORM_FUCHSIA 1
-#    define DAWN_PLATFORM_POSIX 1
+#define DAWN_PLATFORM_FUCHSIA 1
+#define DAWN_PLATFORM_POSIX 1
 
 #elif defined(__EMSCRIPTEN__)
-#    define DAWN_PLATFORM_EMSCRIPTEN 1
-#    define DAWN_PLATFORM_POSIX 1
+#define DAWN_PLATFORM_EMSCRIPTEN 1
+#define DAWN_PLATFORM_POSIX 1
 
 #else
-#    error "Unsupported platform."
+#error "Unsupported platform."
 #endif
 
 // Distinguish mips32.
 #if defined(__mips__) && (_MIPS_SIM == _ABIO32) && !defined(__mips32__)
-#    define __mips32__
+#define __mips32__
 #endif
 
 // Distinguish mips64.
 #if defined(__mips__) && (_MIPS_SIM == _ABI64) && !defined(__mips64__)
-#    define __mips64__
+#define __mips64__
 #endif
 
 #if defined(_WIN64) || defined(__aarch64__) || defined(__x86_64__) || defined(__mips64__) || \
     defined(__s390x__) || defined(__PPC64__)
-#    define DAWN_PLATFORM_64_BIT 1
+#define DAWN_PLATFORM_64_BIT 1
 static_assert(sizeof(sizeof(char)) == 8, "Expect sizeof(size_t) == 8");
 #elif defined(_WIN32) || defined(__arm__) || defined(__i386__) || defined(__mips32__) || \
     defined(__s390__) || defined(__EMSCRIPTEN__)
-#    define DAWN_PLATFORM_32_BIT 1
+#define DAWN_PLATFORM_32_BIT 1
 static_assert(sizeof(sizeof(char)) == 4, "Expect sizeof(size_t) == 4");
 #else
-#    error "Unsupported platform"
+#error "Unsupported platform"
 #endif
 
 #endif  // SRC_DAWN_COMMON_PLATFORM_H_
diff --git a/src/dawn/common/RefBase.h b/src/dawn/common/RefBase.h
index 5613916..8f06f19 100644
--- a/src/dawn/common/RefBase.h
+++ b/src/dawn/common/RefBase.h
@@ -36,17 +36,13 @@
 class RefBase {
   public:
     // Default constructor and destructor.
-    RefBase() : mValue(Traits::kNullValue) {
-    }
+    RefBase() : mValue(Traits::kNullValue) {}
 
-    ~RefBase() {
-        Release(mValue);
-    }
+    ~RefBase() { Release(mValue); }
 
     // Constructors from nullptr.
     // NOLINTNEXTLINE(runtime/explicit)
-    constexpr RefBase(std::nullptr_t) : RefBase() {
-    }
+    constexpr RefBase(std::nullptr_t) : RefBase() {}
 
     RefBase<T, Traits>& operator=(std::nullptr_t) {
         Set(Traits::kNullValue);
@@ -55,9 +51,7 @@
 
     // Constructors from a value T.
     // NOLINTNEXTLINE(runtime/explicit)
-    RefBase(T value) : mValue(value) {
-        Reference(value);
-    }
+    RefBase(T value) : mValue(value) { Reference(value); }
 
     RefBase<T, Traits>& operator=(const T& value) {
         Set(value);
@@ -65,18 +59,14 @@
     }
 
     // Constructors from a RefBase<T>
-    RefBase(const RefBase<T, Traits>& other) : mValue(other.mValue) {
-        Reference(other.mValue);
-    }
+    RefBase(const RefBase<T, Traits>& other) : mValue(other.mValue) { Reference(other.mValue); }
 
     RefBase<T, Traits>& operator=(const RefBase<T, Traits>& other) {
         Set(other.mValue);
         return *this;
     }
 
-    RefBase(RefBase<T, Traits>&& other) {
-        mValue = other.Detach();
-    }
+    RefBase(RefBase<T, Traits>&& other) { mValue = other.Detach(); }
 
     RefBase<T, Traits>& operator=(RefBase<T, Traits>&& other) {
         if (&other != this) {
@@ -113,28 +103,16 @@
     }
 
     // Comparison operators.
-    bool operator==(const T& other) const {
-        return mValue == other;
-    }
+    bool operator==(const T& other) const { return mValue == other; }
 
-    bool operator!=(const T& other) const {
-        return mValue != other;
-    }
+    bool operator!=(const T& other) const { return mValue != other; }
 
-    const T operator->() const {
-        return mValue;
-    }
-    T operator->() {
-        return mValue;
-    }
+    const T operator->() const { return mValue; }
+    T operator->() { return mValue; }
 
     // Smart pointer methods.
-    const T& Get() const {
-        return mValue;
-    }
-    T& Get() {
-        return mValue;
-    }
+    const T& Get() const { return mValue; }
+    T& Get() { return mValue; }
 
     [[nodiscard]] T Detach() {
         T value{std::move(mValue)};
diff --git a/src/dawn/common/RefCounted.h b/src/dawn/common/RefCounted.h
index 0593544..1ee413d 100644
--- a/src/dawn/common/RefCounted.h
+++ b/src/dawn/common/RefCounted.h
@@ -45,12 +45,8 @@
 template <typename T>
 struct RefCountedTraits {
     static constexpr T* kNullValue = nullptr;
-    static void Reference(T* value) {
-        value->Reference();
-    }
-    static void Release(T* value) {
-        value->Release();
-    }
+    static void Reference(T* value) { value->Reference(); }
+    static void Release(T* value) { value->Release(); }
 };
 
 template <typename T>
diff --git a/src/dawn/common/Result.cpp b/src/dawn/common/Result.cpp
index 2101e47..c009df6 100644
--- a/src/dawn/common/Result.cpp
+++ b/src/dawn/common/Result.cpp
@@ -17,14 +17,14 @@
 // Implementation details of the tagged pointer Results
 namespace detail {
 
-    intptr_t MakePayload(const void* pointer, PayloadType type) {
-        intptr_t payload = reinterpret_cast<intptr_t>(pointer);
-        ASSERT((payload & 3) == 0);
-        return payload | type;
-    }
+intptr_t MakePayload(const void* pointer, PayloadType type) {
+    intptr_t payload = reinterpret_cast<intptr_t>(pointer);
+    ASSERT((payload & 3) == 0);
+    return payload | type;
+}
 
-    PayloadType GetPayloadType(intptr_t payload) {
-        return static_cast<PayloadType>(payload & 3);
-    }
+PayloadType GetPayloadType(intptr_t payload) {
+    return static_cast<PayloadType>(payload & 3);
+}
 
 }  // namespace detail
diff --git a/src/dawn/common/Result.h b/src/dawn/common/Result.h
index 82ac894..849cd30 100644
--- a/src/dawn/common/Result.h
+++ b/src/dawn/common/Result.h
@@ -63,7 +63,7 @@
     Result();
     Result(std::unique_ptr<E> error);
 
-    Result(Result<void, E> && other);
+    Result(Result<void, E>&& other);
     Result<void, E>& operator=(Result<void, E>&& other);
 
     ~Result();
@@ -89,23 +89,23 @@
 // tagged pointer. The tag for Success is 0 so that returning the value is fastest.
 
 namespace detail {
-    // Utility functions to manipulate the tagged pointer. Some of them don't need to be templated
-    // but we really want them inlined so we keep them in the headers
-    enum PayloadType {
-        Success = 0,
-        Error = 1,
-        Empty = 2,
-    };
+// Utility functions to manipulate the tagged pointer. Some of them don't need to be templated
+// but we really want them inlined so we keep them in the headers
+enum PayloadType {
+    Success = 0,
+    Error = 1,
+    Empty = 2,
+};
 
-    intptr_t MakePayload(const void* pointer, PayloadType type);
-    PayloadType GetPayloadType(intptr_t payload);
+intptr_t MakePayload(const void* pointer, PayloadType type);
+PayloadType GetPayloadType(intptr_t payload);
 
-    template <typename T>
-    static T* GetSuccessFromPayload(intptr_t payload);
-    template <typename E>
-    static E* GetErrorFromPayload(intptr_t payload);
+template <typename T>
+static T* GetSuccessFromPayload(intptr_t payload);
+template <typename E>
+static E* GetErrorFromPayload(intptr_t payload);
 
-    constexpr static intptr_t kEmptyPayload = Empty;
+constexpr static intptr_t kEmptyPayload = Empty;
 }  // namespace detail
 
 template <typename T, typename E>
@@ -116,12 +116,12 @@
     static_assert(alignof_if_defined_else_default<E, 4> >= 4,
                   "Result<T*, E*> reserves two bits for tagging pointers");
 
-    Result(T * success);
+    Result(T* success);
     Result(std::unique_ptr<E> error);
 
     // Support returning a Result<T*, E*> from a Result<TChild*, E*>
     template <typename TChild>
-    Result(Result<TChild*, E> && other);
+    Result(Result<TChild*, E>&& other);
     template <typename TChild>
     Result<T*, E>& operator=(Result<TChild*, E>&& other);
 
@@ -151,7 +151,7 @@
     Result(const T* success);
     Result(std::unique_ptr<E> error);
 
-    Result(Result<const T*, E> && other);
+    Result(Result<const T*, E>&& other);
     Result<const T*, E>& operator=(Result<const T*, E>&& other);
 
     ~Result();
@@ -178,13 +178,13 @@
                   "Result<Ref<T>, E> reserves two bits for tagging pointers");
 
     template <typename U>
-    Result(Ref<U> && success);
+    Result(Ref<U>&& success);
     template <typename U>
     Result(const Ref<U>& success);
     Result(std::unique_ptr<E> error);
 
     template <typename U>
-    Result(Result<Ref<U>, E> && other);
+    Result(Result<Ref<U>, E>&& other);
     template <typename U>
     Result<Ref<U>, E>& operator=(Result<Ref<U>, E>&& other);
 
@@ -209,10 +209,10 @@
 template <typename T, typename E>
 class [[nodiscard]] Result {
   public:
-    Result(T && success);
+    Result(T&& success);
     Result(std::unique_ptr<E> error);
 
-    Result(Result<T, E> && other);
+    Result(Result<T, E>&& other);
     Result<T, E>& operator=(Result<T, E>&& other);
 
     ~Result();
@@ -237,16 +237,13 @@
 
 // Implementation of Result<void, E>
 template <typename E>
-Result<void, E>::Result() {
-}
+Result<void, E>::Result() {}
 
 template <typename E>
-Result<void, E>::Result(std::unique_ptr<E> error) : mError(std::move(error)) {
-}
+Result<void, E>::Result(std::unique_ptr<E> error) : mError(std::move(error)) {}
 
 template <typename E>
-Result<void, E>::Result(Result<void, E>&& other) : mError(std::move(other.mError)) {
-}
+Result<void, E>::Result(Result<void, E>&& other) : mError(std::move(other.mError)) {}
 
 template <typename E>
 Result<void, E>& Result<void, E>::operator=(Result<void, E>&& other) {
@@ -271,8 +268,7 @@
 }
 
 template <typename E>
-void Result<void, E>::AcquireSuccess() {
-}
+void Result<void, E>::AcquireSuccess() {}
 
 template <typename E>
 std::unique_ptr<E> Result<void, E>::AcquireError() {
@@ -282,29 +278,27 @@
 // Implementation details of the tagged pointer Results
 namespace detail {
 
-    template <typename T>
-    T* GetSuccessFromPayload(intptr_t payload) {
-        ASSERT(GetPayloadType(payload) == Success);
-        return reinterpret_cast<T*>(payload);
-    }
+template <typename T>
+T* GetSuccessFromPayload(intptr_t payload) {
+    ASSERT(GetPayloadType(payload) == Success);
+    return reinterpret_cast<T*>(payload);
+}
 
-    template <typename E>
-    E* GetErrorFromPayload(intptr_t payload) {
-        ASSERT(GetPayloadType(payload) == Error);
-        return reinterpret_cast<E*>(payload ^ 1);
-    }
+template <typename E>
+E* GetErrorFromPayload(intptr_t payload) {
+    ASSERT(GetPayloadType(payload) == Error);
+    return reinterpret_cast<E*>(payload ^ 1);
+}
 
 }  // namespace detail
 
 // Implementation of Result<T*, E>
 template <typename T, typename E>
-Result<T*, E>::Result(T* success) : mPayload(detail::MakePayload(success, detail::Success)) {
-}
+Result<T*, E>::Result(T* success) : mPayload(detail::MakePayload(success, detail::Success)) {}
 
 template <typename T, typename E>
 Result<T*, E>::Result(std::unique_ptr<E> error)
-    : mPayload(detail::MakePayload(error.release(), detail::Error)) {
-}
+    : mPayload(detail::MakePayload(error.release(), detail::Error)) {}
 
 template <typename T, typename E>
 template <typename TChild>
@@ -355,13 +349,11 @@
 // Implementation of Result<const T*, E*>
 template <typename T, typename E>
 Result<const T*, E>::Result(const T* success)
-    : mPayload(detail::MakePayload(success, detail::Success)) {
-}
+    : mPayload(detail::MakePayload(success, detail::Success)) {}
 
 template <typename T, typename E>
 Result<const T*, E>::Result(std::unique_ptr<E> error)
-    : mPayload(detail::MakePayload(error.release(), detail::Error)) {
-}
+    : mPayload(detail::MakePayload(error.release(), detail::Error)) {}
 
 template <typename T, typename E>
 Result<const T*, E>::Result(Result<const T*, E>&& other) : mPayload(other.mPayload) {
@@ -415,13 +407,11 @@
 
 template <typename T, typename E>
 template <typename U>
-Result<Ref<T>, E>::Result(const Ref<U>& success) : Result(Ref<U>(success)) {
-}
+Result<Ref<T>, E>::Result(const Ref<U>& success) : Result(Ref<U>(success)) {}
 
 template <typename T, typename E>
 Result<Ref<T>, E>::Result(std::unique_ptr<E> error)
-    : mPayload(detail::MakePayload(error.release(), detail::Error)) {
-}
+    : mPayload(detail::MakePayload(error.release(), detail::Error)) {}
 
 template <typename T, typename E>
 template <typename U>
@@ -473,12 +463,10 @@
 
 // Implementation of Result<T, E>
 template <typename T, typename E>
-Result<T, E>::Result(T&& success) : mType(Success), mSuccess(std::move(success)) {
-}
+Result<T, E>::Result(T&& success) : mType(Success), mSuccess(std::move(success)) {}
 
 template <typename T, typename E>
-Result<T, E>::Result(std::unique_ptr<E> error) : mType(Error), mError(std::move(error)) {
-}
+Result<T, E>::Result(std::unique_ptr<E> error) : mType(Error), mError(std::move(error)) {}
 
 template <typename T, typename E>
 Result<T, E>::~Result() {
diff --git a/src/dawn/common/SerialStorage.h b/src/dawn/common/SerialStorage.h
index 2eae0ad..0d4c8b5 100644
--- a/src/dawn/common/SerialStorage.h
+++ b/src/dawn/common/SerialStorage.h
@@ -193,8 +193,7 @@
 template <typename Derived>
 SerialStorage<Derived>::BeginEnd::BeginEnd(typename SerialStorage<Derived>::StorageIterator start,
                                            typename SerialStorage<Derived>::StorageIterator end)
-    : mStartIt(start), mEndIt(end) {
-}
+    : mStartIt(start), mEndIt(end) {}
 
 template <typename Derived>
 typename SerialStorage<Derived>::Iterator SerialStorage<Derived>::BeginEnd::begin() const {
@@ -210,8 +209,7 @@
 
 template <typename Derived>
 SerialStorage<Derived>::Iterator::Iterator(typename SerialStorage<Derived>::StorageIterator start)
-    : mStorageIterator(start), mSerialIterator(nullptr) {
-}
+    : mStorageIterator(start), mSerialIterator(nullptr) {}
 
 template <typename Derived>
 typename SerialStorage<Derived>::Iterator& SerialStorage<Derived>::Iterator::operator++() {
@@ -257,8 +255,7 @@
 SerialStorage<Derived>::ConstBeginEnd::ConstBeginEnd(
     typename SerialStorage<Derived>::ConstStorageIterator start,
     typename SerialStorage<Derived>::ConstStorageIterator end)
-    : mStartIt(start), mEndIt(end) {
-}
+    : mStartIt(start), mEndIt(end) {}
 
 template <typename Derived>
 typename SerialStorage<Derived>::ConstIterator SerialStorage<Derived>::ConstBeginEnd::begin()
@@ -276,8 +273,7 @@
 template <typename Derived>
 SerialStorage<Derived>::ConstIterator::ConstIterator(
     typename SerialStorage<Derived>::ConstStorageIterator start)
-    : mStorageIterator(start), mSerialIterator(nullptr) {
-}
+    : mStorageIterator(start), mSerialIterator(nullptr) {}
 
 template <typename Derived>
 typename SerialStorage<Derived>::ConstIterator&
diff --git a/src/dawn/common/SlabAllocator.cpp b/src/dawn/common/SlabAllocator.cpp
index 23540f5..b4d1827 100644
--- a/src/dawn/common/SlabAllocator.cpp
+++ b/src/dawn/common/SlabAllocator.cpp
@@ -25,19 +25,16 @@
 // IndexLinkNode
 
 SlabAllocatorImpl::IndexLinkNode::IndexLinkNode(Index index, Index nextIndex)
-    : index(index), nextIndex(nextIndex) {
-}
+    : index(index), nextIndex(nextIndex) {}
 
 // Slab
 
 SlabAllocatorImpl::Slab::Slab(char allocation[], IndexLinkNode* head)
-    : allocation(allocation), freeList(head), prev(nullptr), next(nullptr), blocksInUse(0) {
-}
+    : allocation(allocation), freeList(head), prev(nullptr), next(nullptr), blocksInUse(0) {}
 
 SlabAllocatorImpl::Slab::Slab(Slab&& rhs) = default;
 
-SlabAllocatorImpl::SentinelSlab::SentinelSlab() : Slab(nullptr, nullptr) {
-}
+SlabAllocatorImpl::SentinelSlab::SentinelSlab() : Slab(nullptr, nullptr) {}
 
 SlabAllocatorImpl::SentinelSlab::SentinelSlab(SentinelSlab&& rhs) = default;
 
@@ -83,8 +80,7 @@
       mTotalAllocationSize(rhs.mTotalAllocationSize),
       mAvailableSlabs(std::move(rhs.mAvailableSlabs)),
       mFullSlabs(std::move(rhs.mFullSlabs)),
-      mRecycledSlabs(std::move(rhs.mRecycledSlabs)) {
-}
+      mRecycledSlabs(std::move(rhs.mRecycledSlabs)) {}
 
 SlabAllocatorImpl::~SlabAllocatorImpl() = default;
 
diff --git a/src/dawn/common/SlabAllocator.h b/src/dawn/common/SlabAllocator.h
index c94bc25..e828dea 100644
--- a/src/dawn/common/SlabAllocator.h
+++ b/src/dawn/common/SlabAllocator.h
@@ -168,8 +168,7 @@
     SlabAllocator(size_t totalObjectBytes,
                   uint32_t objectSize = u32_sizeof<T>,
                   uint32_t objectAlignment = u32_alignof<T>)
-        : SlabAllocatorImpl(totalObjectBytes / objectSize, objectSize, objectAlignment) {
-    }
+        : SlabAllocatorImpl(totalObjectBytes / objectSize, objectSize, objectAlignment) {}
 
     template <typename... Args>
     T* Allocate(Args&&... args) {
@@ -177,9 +176,7 @@
         return new (ptr) T(std::forward<Args>(args)...);
     }
 
-    void Deallocate(T* object) {
-        SlabAllocatorImpl::Deallocate(object);
-    }
+    void Deallocate(T* object) { SlabAllocatorImpl::Deallocate(object); }
 };
 
 #endif  // SRC_DAWN_COMMON_SLABALLOCATOR_H_
diff --git a/src/dawn/common/StackContainer.h b/src/dawn/common/StackContainer.h
index f531261..ba3bfae 100644
--- a/src/dawn/common/StackContainer.h
+++ b/src/dawn/common/StackContainer.h
@@ -41,16 +41,11 @@
     // maintaining this for as long as any containers using this allocator are
     // live.
     struct Source {
-        Source() : used_stack_buffer_(false) {
-        }
+        Source() : used_stack_buffer_(false) {}
 
         // Casts the buffer in its right type.
-        T* stack_buffer() {
-            return reinterpret_cast<T*>(stack_buffer_);
-        }
-        const T* stack_buffer() const {
-            return reinterpret_cast<const T*>(&stack_buffer_);
-        }
+        T* stack_buffer() { return reinterpret_cast<T*>(stack_buffer_); }
+        const T* stack_buffer() const { return reinterpret_cast<const T*>(&stack_buffer_); }
 
         // The buffer itself. It is not of type T because we don't want the
         // constructors and destructors to be automatically called. Define a POD
@@ -73,8 +68,7 @@
 
     // For the straight up copy c-tor, we can share storage.
     StackAllocator(const StackAllocator<T, stack_capacity>& rhs)
-        : std::allocator<T>(), source_(rhs.source_) {
-    }
+        : std::allocator<T>(), source_(rhs.source_) {}
 
     // ISO C++ requires the following constructor to be defined,
     // and std::vector in VC++2008SP1 Release fails with an error
@@ -84,18 +78,15 @@
     // no guarantee that the Source buffer of Ts is large enough
     // for Us.
     template <typename U, size_t other_capacity>
-    StackAllocator(const StackAllocator<U, other_capacity>& other) : source_(nullptr) {
-    }
+    StackAllocator(const StackAllocator<U, other_capacity>& other) : source_(nullptr) {}
 
     // This constructor must exist. It creates a default allocator that doesn't
     // actually have a stack buffer. glibc's std::string() will compare the
     // current allocator against the default-constructed allocator, so this
     // should be fast.
-    StackAllocator() : source_(nullptr) {
-    }
+    StackAllocator() : source_(nullptr) {}
 
-    explicit StackAllocator(Source* source) : source_(source) {
-    }
+    explicit StackAllocator(Source* source) : source_(source) {}
 
     // Actually do the allocation. Use the stack buffer if nobody has used it yet
     // and the size requested fits. Otherwise, fall through to the standard
@@ -154,28 +145,18 @@
     // shorter lifetimes than the source. The copy will share the same allocator
     // and therefore the same stack buffer as the original. Use std::copy to
     // copy into a "real" container for longer-lived objects.
-    ContainerType& container() {
-        return container_;
-    }
-    const ContainerType& container() const {
-        return container_;
-    }
+    ContainerType& container() { return container_; }
+    const ContainerType& container() const { return container_; }
 
     // Support operator-> to get to the container. This allows nicer syntax like:
     //   StackContainer<...> foo;
     //   std::sort(foo->begin(), foo->end());
-    ContainerType* operator->() {
-        return &container_;
-    }
-    const ContainerType* operator->() const {
-        return &container_;
-    }
+    ContainerType* operator->() { return &container_; }
+    const ContainerType* operator->() const { return &container_; }
 
     // Retrieves the stack source so that that unit tests can verify that the
     // buffer is being used properly.
-    const typename Allocator::Source& stack_data() const {
-        return stack_data_;
-    }
+    const typename Allocator::Source& stack_data() const { return stack_data_; }
 
   protected:
     typename Allocator::Source stack_data_;
@@ -225,8 +206,7 @@
     : public StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity> {
   public:
     StackVector()
-        : StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity>() {
-    }
+        : StackContainer<std::vector<T, StackAllocator<T, stack_capacity>>, stack_capacity>() {}
 
     // We need to put this in STL containers sometimes, which requires a copy
     // constructor. We can't call the regular copy constructor because that will
@@ -244,12 +224,8 @@
 
     // Vectors are commonly indexed, which isn't very convenient even with
     // operator-> (using "->at()" does exception stuff we don't want).
-    T& operator[](size_t i) {
-        return this->container().operator[](i);
-    }
-    const T& operator[](size_t i) const {
-        return this->container().operator[](i);
-    }
+    T& operator[](size_t i) { return this->container().operator[](i); }
+    const T& operator[](size_t i) const { return this->container().operator[](i); }
 
   private:
     // StackVector(const StackVector& rhs) = delete;
diff --git a/src/dawn/common/SystemUtils.cpp b/src/dawn/common/SystemUtils.cpp
index c8df8af..2d47fd1 100644
--- a/src/dawn/common/SystemUtils.cpp
+++ b/src/dawn/common/SystemUtils.cpp
@@ -18,17 +18,17 @@
 #include "dawn/common/Log.h"
 
 #if defined(DAWN_PLATFORM_WINDOWS)
-#    include <Windows.h>
-#    include <vector>
+#include <Windows.h>
+#include <vector>
 #elif defined(DAWN_PLATFORM_LINUX)
-#    include <dlfcn.h>
-#    include <limits.h>
-#    include <unistd.h>
-#    include <cstdlib>
+#include <dlfcn.h>
+#include <limits.h>
+#include <unistd.h>
+#include <cstdlib>
 #elif defined(DAWN_PLATFORM_MACOS) || defined(DAWN_PLATFORM_IOS)
-#    include <dlfcn.h>
-#    include <mach-o/dyld.h>
-#    include <vector>
+#include <dlfcn.h>
+#include <mach-o/dyld.h>
+#include <vector>
 #endif
 
 #include <array>
@@ -84,7 +84,7 @@
     return setenv(variableName, value, 1) == 0;
 }
 #else
-#    error "Implement Get/SetEnvironmentVar for your platform."
+#error "Implement Get/SetEnvironmentVar for your platform."
 #endif
 
 #if defined(DAWN_PLATFORM_WINDOWS)
@@ -134,7 +134,7 @@
     return {};
 }
 #else
-#    error "Implement GetExecutablePath for your platform."
+#error "Implement GetExecutablePath for your platform."
 #endif
 
 std::optional<std::string> GetExecutableDirectory() {
@@ -168,15 +168,15 @@
     static int placeholderSymbol = 0;
     HMODULE module = nullptr;
 // GetModuleHandleEx is unavailable on UWP
-#    if defined(DAWN_IS_WINUWP)
+#if defined(DAWN_IS_WINUWP)
     return {};
-#    else
+#else
     if (!GetModuleHandleExA(
             GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
             reinterpret_cast<LPCSTR>(&placeholderSymbol), &module)) {
         return {};
     }
-#    endif
+#endif
     return GetHModulePath(module);
 }
 #elif defined(DAWN_PLATFORM_FUCHSIA)
@@ -188,7 +188,7 @@
     return {};
 }
 #else
-#    error "Implement GetModulePath for your platform."
+#error "Implement GetModulePath for your platform."
 #endif
 
 std::optional<std::string> GetModuleDirectory() {
@@ -208,8 +208,7 @@
 ScopedEnvironmentVar::ScopedEnvironmentVar(const char* variableName, const char* value)
     : mName(variableName),
       mOriginalValue(GetEnvironmentVar(variableName)),
-      mIsSet(SetEnvironmentVar(variableName, value)) {
-}
+      mIsSet(SetEnvironmentVar(variableName, value)) {}
 
 ScopedEnvironmentVar::~ScopedEnvironmentVar() {
     if (mIsSet) {
diff --git a/src/dawn/common/TypedInteger.h b/src/dawn/common/TypedInteger.h
index 4844419..b9d43a4 100644
--- a/src/dawn/common/TypedInteger.h
+++ b/src/dawn/common/TypedInteger.h
@@ -50,8 +50,8 @@
 //     uint32_t aValue = static_cast<uint32_t>(a);
 //
 namespace detail {
-    template <typename Tag, typename T>
-    class TypedIntegerImpl;
+template <typename Tag, typename T>
+class TypedIntegerImpl;
 }  // namespace detail
 
 template <typename Tag, typename T, typename = std::enable_if_t<std::is_integral<T>::value>>
@@ -62,200 +62,198 @@
 #endif
 
 namespace detail {
-    template <typename Tag, typename T>
-    class alignas(T) TypedIntegerImpl {
-        static_assert(std::is_integral<T>::value, "TypedInteger must be integral");
-        T mValue;
+template <typename Tag, typename T>
+class alignas(T) TypedIntegerImpl {
+    static_assert(std::is_integral<T>::value, "TypedInteger must be integral");
+    T mValue;
 
-      public:
-        constexpr TypedIntegerImpl() : mValue(0) {
-            static_assert(alignof(TypedIntegerImpl) == alignof(T));
-            static_assert(sizeof(TypedIntegerImpl) == sizeof(T));
-        }
+  public:
+    constexpr TypedIntegerImpl() : mValue(0) {
+        static_assert(alignof(TypedIntegerImpl) == alignof(T));
+        static_assert(sizeof(TypedIntegerImpl) == sizeof(T));
+    }
 
-        // Construction from non-narrowing integral types.
-        template <typename I,
-                  typename = std::enable_if_t<
-                      std::is_integral<I>::value &&
-                      std::numeric_limits<I>::max() <= std::numeric_limits<T>::max() &&
-                      std::numeric_limits<I>::min() >= std::numeric_limits<T>::min()>>
-        explicit constexpr TypedIntegerImpl(I rhs) : mValue(static_cast<T>(rhs)) {
-        }
+    // Construction from non-narrowing integral types.
+    template <typename I,
+              typename =
+                  std::enable_if_t<std::is_integral<I>::value &&
+                                   std::numeric_limits<I>::max() <= std::numeric_limits<T>::max() &&
+                                   std::numeric_limits<I>::min() >= std::numeric_limits<T>::min()>>
+    explicit constexpr TypedIntegerImpl(I rhs) : mValue(static_cast<T>(rhs)) {}
 
-        // Allow explicit casts only to the underlying type. If you're casting out of an
-        // TypedInteger, you should know what what you're doing, and exactly what type you
-        // expect.
-        explicit constexpr operator T() const {
-            return static_cast<T>(this->mValue);
-        }
+    // Allow explicit casts only to the underlying type. If you're casting out of an
+    // TypedInteger, you should know what what you're doing, and exactly what type you
+    // expect.
+    explicit constexpr operator T() const { return static_cast<T>(this->mValue); }
 
 // Same-tag TypedInteger comparison operators
-#define TYPED_COMPARISON(op)                                        \
-    constexpr bool operator op(const TypedIntegerImpl& rhs) const { \
-        return mValue op rhs.mValue;                                \
-    }
-        TYPED_COMPARISON(<)
-        TYPED_COMPARISON(<=)
-        TYPED_COMPARISON(>)
-        TYPED_COMPARISON(>=)
-        TYPED_COMPARISON(==)
-        TYPED_COMPARISON(!=)
+#define TYPED_COMPARISON(op) \
+    constexpr bool operator op(const TypedIntegerImpl& rhs) const { return mValue op rhs.mValue; }
+    TYPED_COMPARISON(<)
+    TYPED_COMPARISON(<=)
+    TYPED_COMPARISON(>)
+    TYPED_COMPARISON(>=)
+    TYPED_COMPARISON(==)
+    TYPED_COMPARISON(!=)
 #undef TYPED_COMPARISON
 
-        // Increment / decrement operators for for-loop iteration
-        constexpr TypedIntegerImpl& operator++() {
-            ASSERT(this->mValue < std::numeric_limits<T>::max());
-            ++this->mValue;
-            return *this;
+    // Increment / decrement operators for for-loop iteration
+    constexpr TypedIntegerImpl& operator++() {
+        ASSERT(this->mValue < std::numeric_limits<T>::max());
+        ++this->mValue;
+        return *this;
+    }
+
+    constexpr TypedIntegerImpl operator++(int) {
+        TypedIntegerImpl ret = *this;
+
+        ASSERT(this->mValue < std::numeric_limits<T>::max());
+        ++this->mValue;
+        return ret;
+    }
+
+    constexpr TypedIntegerImpl& operator--() {
+        ASSERT(this->mValue > std::numeric_limits<T>::min());
+        --this->mValue;
+        return *this;
+    }
+
+    constexpr TypedIntegerImpl operator--(int) {
+        TypedIntegerImpl ret = *this;
+
+        ASSERT(this->mValue > std::numeric_limits<T>::min());
+        --this->mValue;
+        return ret;
+    }
+
+    template <typename T2 = T>
+    static constexpr std::enable_if_t<std::is_unsigned<T2>::value, decltype(T(0) + T2(0))> AddImpl(
+        TypedIntegerImpl<Tag, T> lhs,
+        TypedIntegerImpl<Tag, T2> rhs) {
+        static_assert(std::is_same<T, T2>::value);
+
+        // Overflow would wrap around
+        ASSERT(lhs.mValue + rhs.mValue >= lhs.mValue);
+        return lhs.mValue + rhs.mValue;
+    }
+
+    template <typename T2 = T>
+    static constexpr std::enable_if_t<std::is_signed<T2>::value, decltype(T(0) + T2(0))> AddImpl(
+        TypedIntegerImpl<Tag, T> lhs,
+        TypedIntegerImpl<Tag, T2> rhs) {
+        static_assert(std::is_same<T, T2>::value);
+
+        if (lhs.mValue > 0) {
+            // rhs is positive: |rhs| is at most the distance between max and |lhs|.
+            // rhs is negative: (positive + negative) won't overflow
+            ASSERT(rhs.mValue <= std::numeric_limits<T>::max() - lhs.mValue);
+        } else {
+            // rhs is postive: (negative + positive) won't underflow
+            // rhs is negative: |rhs| isn't less than the (negative) distance between min
+            // and |lhs|
+            ASSERT(rhs.mValue >= std::numeric_limits<T>::min() - lhs.mValue);
         }
+        return lhs.mValue + rhs.mValue;
+    }
 
-        constexpr TypedIntegerImpl operator++(int) {
-            TypedIntegerImpl ret = *this;
+    template <typename T2 = T>
+    static constexpr std::enable_if_t<std::is_unsigned<T>::value, decltype(T(0) - T2(0))> SubImpl(
+        TypedIntegerImpl<Tag, T> lhs,
+        TypedIntegerImpl<Tag, T2> rhs) {
+        static_assert(std::is_same<T, T2>::value);
 
-            ASSERT(this->mValue < std::numeric_limits<T>::max());
-            ++this->mValue;
-            return ret;
+        // Overflow would wrap around
+        ASSERT(lhs.mValue - rhs.mValue <= lhs.mValue);
+        return lhs.mValue - rhs.mValue;
+    }
+
+    template <typename T2 = T>
+    static constexpr std::enable_if_t<std::is_signed<T>::value, decltype(T(0) - T2(0))> SubImpl(
+        TypedIntegerImpl<Tag, T> lhs,
+        TypedIntegerImpl<Tag, T2> rhs) {
+        static_assert(std::is_same<T, T2>::value);
+
+        if (lhs.mValue > 0) {
+            // rhs is positive: positive minus positive won't overflow
+            // rhs is negative: |rhs| isn't less than the (negative) distance between |lhs|
+            // and max.
+            ASSERT(rhs.mValue >= lhs.mValue - std::numeric_limits<T>::max());
+        } else {
+            // rhs is positive: |rhs| is at most the distance between min and |lhs|
+            // rhs is negative: negative minus negative won't overflow
+            ASSERT(rhs.mValue <= lhs.mValue - std::numeric_limits<T>::min());
         }
+        return lhs.mValue - rhs.mValue;
+    }
 
-        constexpr TypedIntegerImpl& operator--() {
-            ASSERT(this->mValue > std::numeric_limits<T>::min());
-            --this->mValue;
-            return *this;
-        }
+    template <typename T2 = T>
+    constexpr std::enable_if_t<std::is_signed<T2>::value, TypedIntegerImpl> operator-() const {
+        static_assert(std::is_same<T, T2>::value);
+        // The negation of the most negative value cannot be represented.
+        ASSERT(this->mValue != std::numeric_limits<T>::min());
+        return TypedIntegerImpl(-this->mValue);
+    }
 
-        constexpr TypedIntegerImpl operator--(int) {
-            TypedIntegerImpl ret = *this;
+    constexpr TypedIntegerImpl operator+(TypedIntegerImpl rhs) const {
+        auto result = AddImpl(*this, rhs);
+        static_assert(std::is_same<T, decltype(result)>::value, "Use ityp::Add instead.");
+        return TypedIntegerImpl(result);
+    }
 
-            ASSERT(this->mValue > std::numeric_limits<T>::min());
-            --this->mValue;
-            return ret;
-        }
-
-        template <typename T2 = T>
-        static constexpr std::enable_if_t<std::is_unsigned<T2>::value, decltype(T(0) + T2(0))>
-        AddImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
-            static_assert(std::is_same<T, T2>::value);
-
-            // Overflow would wrap around
-            ASSERT(lhs.mValue + rhs.mValue >= lhs.mValue);
-            return lhs.mValue + rhs.mValue;
-        }
-
-        template <typename T2 = T>
-        static constexpr std::enable_if_t<std::is_signed<T2>::value, decltype(T(0) + T2(0))>
-        AddImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
-            static_assert(std::is_same<T, T2>::value);
-
-            if (lhs.mValue > 0) {
-                // rhs is positive: |rhs| is at most the distance between max and |lhs|.
-                // rhs is negative: (positive + negative) won't overflow
-                ASSERT(rhs.mValue <= std::numeric_limits<T>::max() - lhs.mValue);
-            } else {
-                // rhs is postive: (negative + positive) won't underflow
-                // rhs is negative: |rhs| isn't less than the (negative) distance between min
-                // and |lhs|
-                ASSERT(rhs.mValue >= std::numeric_limits<T>::min() - lhs.mValue);
-            }
-            return lhs.mValue + rhs.mValue;
-        }
-
-        template <typename T2 = T>
-        static constexpr std::enable_if_t<std::is_unsigned<T>::value, decltype(T(0) - T2(0))>
-        SubImpl(TypedIntegerImpl<Tag, T> lhs, TypedIntegerImpl<Tag, T2> rhs) {
-            static_assert(std::is_same<T, T2>::value);
-
-            // Overflow would wrap around
-            ASSERT(lhs.mValue - rhs.mValue <= lhs.mValue);
-            return lhs.mValue - rhs.mValue;
-        }
-
-        template <typename T2 = T>
-        static constexpr std::enable_if_t<std::is_signed<T>::value, decltype(T(0) - T2(0))> SubImpl(
-            TypedIntegerImpl<Tag, T> lhs,
-            TypedIntegerImpl<Tag, T2> rhs) {
-            static_assert(std::is_same<T, T2>::value);
-
-            if (lhs.mValue > 0) {
-                // rhs is positive: positive minus positive won't overflow
-                // rhs is negative: |rhs| isn't less than the (negative) distance between |lhs|
-                // and max.
-                ASSERT(rhs.mValue >= lhs.mValue - std::numeric_limits<T>::max());
-            } else {
-                // rhs is positive: |rhs| is at most the distance between min and |lhs|
-                // rhs is negative: negative minus negative won't overflow
-                ASSERT(rhs.mValue <= lhs.mValue - std::numeric_limits<T>::min());
-            }
-            return lhs.mValue - rhs.mValue;
-        }
-
-        template <typename T2 = T>
-        constexpr std::enable_if_t<std::is_signed<T2>::value, TypedIntegerImpl> operator-() const {
-            static_assert(std::is_same<T, T2>::value);
-            // The negation of the most negative value cannot be represented.
-            ASSERT(this->mValue != std::numeric_limits<T>::min());
-            return TypedIntegerImpl(-this->mValue);
-        }
-
-        constexpr TypedIntegerImpl operator+(TypedIntegerImpl rhs) const {
-            auto result = AddImpl(*this, rhs);
-            static_assert(std::is_same<T, decltype(result)>::value, "Use ityp::Add instead.");
-            return TypedIntegerImpl(result);
-        }
-
-        constexpr TypedIntegerImpl operator-(TypedIntegerImpl rhs) const {
-            auto result = SubImpl(*this, rhs);
-            static_assert(std::is_same<T, decltype(result)>::value, "Use ityp::Sub instead.");
-            return TypedIntegerImpl(result);
-        }
-    };
+    constexpr TypedIntegerImpl operator-(TypedIntegerImpl rhs) const {
+        auto result = SubImpl(*this, rhs);
+        static_assert(std::is_same<T, decltype(result)>::value, "Use ityp::Sub instead.");
+        return TypedIntegerImpl(result);
+    }
+};
 
 }  // namespace detail
 
 namespace std {
 
-    template <typename Tag, typename T>
-    class numeric_limits<detail::TypedIntegerImpl<Tag, T>> : public numeric_limits<T> {
-      public:
-        static detail::TypedIntegerImpl<Tag, T> max() noexcept {
-            return detail::TypedIntegerImpl<Tag, T>(std::numeric_limits<T>::max());
-        }
-        static detail::TypedIntegerImpl<Tag, T> min() noexcept {
-            return detail::TypedIntegerImpl<Tag, T>(std::numeric_limits<T>::min());
-        }
-    };
+template <typename Tag, typename T>
+class numeric_limits<detail::TypedIntegerImpl<Tag, T>> : public numeric_limits<T> {
+  public:
+    static detail::TypedIntegerImpl<Tag, T> max() noexcept {
+        return detail::TypedIntegerImpl<Tag, T>(std::numeric_limits<T>::max());
+    }
+    static detail::TypedIntegerImpl<Tag, T> min() noexcept {
+        return detail::TypedIntegerImpl<Tag, T>(std::numeric_limits<T>::min());
+    }
+};
 
 }  // namespace std
 
 namespace ityp {
 
-    // These helpers below are provided since the default arithmetic operators for small integer
-    // types like uint8_t and uint16_t return integers, not their same type. To avoid lots of
-    // casting or conditional code between Release/Debug. Callsites should use ityp::Add(a, b) and
-    // ityp::Sub(a, b) instead.
+// These helpers below are provided since the default arithmetic operators for small integer
+// types like uint8_t and uint16_t return integers, not their same type. To avoid lots of
+// casting or conditional code between Release/Debug. Callsites should use ityp::Add(a, b) and
+// ityp::Sub(a, b) instead.
 
-    template <typename Tag, typename T>
-    constexpr ::detail::TypedIntegerImpl<Tag, T> Add(::detail::TypedIntegerImpl<Tag, T> lhs,
-                                                     ::detail::TypedIntegerImpl<Tag, T> rhs) {
-        return ::detail::TypedIntegerImpl<Tag, T>(
-            static_cast<T>(::detail::TypedIntegerImpl<Tag, T>::AddImpl(lhs, rhs)));
-    }
+template <typename Tag, typename T>
+constexpr ::detail::TypedIntegerImpl<Tag, T> Add(::detail::TypedIntegerImpl<Tag, T> lhs,
+                                                 ::detail::TypedIntegerImpl<Tag, T> rhs) {
+    return ::detail::TypedIntegerImpl<Tag, T>(
+        static_cast<T>(::detail::TypedIntegerImpl<Tag, T>::AddImpl(lhs, rhs)));
+}
 
-    template <typename Tag, typename T>
-    constexpr ::detail::TypedIntegerImpl<Tag, T> Sub(::detail::TypedIntegerImpl<Tag, T> lhs,
-                                                     ::detail::TypedIntegerImpl<Tag, T> rhs) {
-        return ::detail::TypedIntegerImpl<Tag, T>(
-            static_cast<T>(::detail::TypedIntegerImpl<Tag, T>::SubImpl(lhs, rhs)));
-    }
+template <typename Tag, typename T>
+constexpr ::detail::TypedIntegerImpl<Tag, T> Sub(::detail::TypedIntegerImpl<Tag, T> lhs,
+                                                 ::detail::TypedIntegerImpl<Tag, T> rhs) {
+    return ::detail::TypedIntegerImpl<Tag, T>(
+        static_cast<T>(::detail::TypedIntegerImpl<Tag, T>::SubImpl(lhs, rhs)));
+}
 
-    template <typename T>
-    constexpr std::enable_if_t<std::is_integral<T>::value, T> Add(T lhs, T rhs) {
-        return static_cast<T>(lhs + rhs);
-    }
+template <typename T>
+constexpr std::enable_if_t<std::is_integral<T>::value, T> Add(T lhs, T rhs) {
+    return static_cast<T>(lhs + rhs);
+}
 
-    template <typename T>
-    constexpr std::enable_if_t<std::is_integral<T>::value, T> Sub(T lhs, T rhs) {
-        return static_cast<T>(lhs - rhs);
-    }
+template <typename T>
+constexpr std::enable_if_t<std::is_integral<T>::value, T> Sub(T lhs, T rhs) {
+    return static_cast<T>(lhs - rhs);
+}
 
 }  // namespace ityp
 
diff --git a/src/dawn/common/UnderlyingType.h b/src/dawn/common/UnderlyingType.h
index 5b499da..b4ff8ea 100644
--- a/src/dawn/common/UnderlyingType.h
+++ b/src/dawn/common/UnderlyingType.h
@@ -22,27 +22,27 @@
 // template parameter. It includes a specialization for detail::TypedIntegerImpl which yields
 // the wrapped integer type.
 namespace detail {
-    template <typename T, typename Enable = void>
-    struct UnderlyingTypeImpl;
+template <typename T, typename Enable = void>
+struct UnderlyingTypeImpl;
 
-    template <typename I>
-    struct UnderlyingTypeImpl<I, typename std::enable_if_t<std::is_integral<I>::value>> {
-        using type = I;
-    };
+template <typename I>
+struct UnderlyingTypeImpl<I, typename std::enable_if_t<std::is_integral<I>::value>> {
+    using type = I;
+};
 
-    template <typename E>
-    struct UnderlyingTypeImpl<E, typename std::enable_if_t<std::is_enum<E>::value>> {
-        using type = std::underlying_type_t<E>;
-    };
+template <typename E>
+struct UnderlyingTypeImpl<E, typename std::enable_if_t<std::is_enum<E>::value>> {
+    using type = std::underlying_type_t<E>;
+};
 
-    // Forward declare the TypedInteger impl.
-    template <typename Tag, typename T>
-    class TypedIntegerImpl;
+// Forward declare the TypedInteger impl.
+template <typename Tag, typename T>
+class TypedIntegerImpl;
 
-    template <typename Tag, typename I>
-    struct UnderlyingTypeImpl<TypedIntegerImpl<Tag, I>> {
-        using type = typename UnderlyingTypeImpl<I>::type;
-    };
+template <typename Tag, typename I>
+struct UnderlyingTypeImpl<TypedIntegerImpl<Tag, I>> {
+    using type = typename UnderlyingTypeImpl<I>::type;
+};
 }  // namespace detail
 
 template <typename T>
diff --git a/src/dawn/common/ityp_array.h b/src/dawn/common/ityp_array.h
index d84db7c..a410302 100644
--- a/src/dawn/common/ityp_array.h
+++ b/src/dawn/common/ityp_array.h
@@ -26,75 +26,64 @@
 
 namespace ityp {
 
-    // ityp::array is a helper class that wraps std::array with the restriction that
-    // indices must be a particular type |Index|. Dawn uses multiple flat maps of
-    // index-->data, and this class helps ensure an indices cannot be passed interchangably
-    // to a flat map of a different type.
-    template <typename Index, typename Value, size_t Size>
-    class array : private std::array<Value, Size> {
-        using I = UnderlyingType<Index>;
-        using Base = std::array<Value, Size>;
+// ityp::array is a helper class that wraps std::array with the restriction that
+// indices must be a particular type |Index|. Dawn uses multiple flat maps of
+// index-->data, and this class helps ensure an indices cannot be passed interchangably
+// to a flat map of a different type.
+template <typename Index, typename Value, size_t Size>
+class array : private std::array<Value, Size> {
+    using I = UnderlyingType<Index>;
+    using Base = std::array<Value, Size>;
 
-        static_assert(Size <= std::numeric_limits<I>::max());
+    static_assert(Size <= std::numeric_limits<I>::max());
 
-      public:
-        constexpr array() = default;
+  public:
+    constexpr array() = default;
 
-        template <typename... Values>
-        // NOLINTNEXTLINE(runtime/explicit)
-        constexpr array(Values&&... values) : Base{std::forward<Values>(values)...} {
-        }
+    template <typename... Values>
+    // NOLINTNEXTLINE(runtime/explicit)
+    constexpr array(Values&&... values) : Base{std::forward<Values>(values)...} {}
 
-        Value& operator[](Index i) {
-            I index = static_cast<I>(i);
-            ASSERT(index >= 0 && index < I(Size));
-            return Base::operator[](index);
-        }
+    Value& operator[](Index i) {
+        I index = static_cast<I>(i);
+        ASSERT(index >= 0 && index < I(Size));
+        return Base::operator[](index);
+    }
 
-        constexpr const Value& operator[](Index i) const {
-            I index = static_cast<I>(i);
-            ASSERT(index >= 0 && index < I(Size));
-            return Base::operator[](index);
-        }
+    constexpr const Value& operator[](Index i) const {
+        I index = static_cast<I>(i);
+        ASSERT(index >= 0 && index < I(Size));
+        return Base::operator[](index);
+    }
 
-        Value& at(Index i) {
-            I index = static_cast<I>(i);
-            ASSERT(index >= 0 && index < I(Size));
-            return Base::at(index);
-        }
+    Value& at(Index i) {
+        I index = static_cast<I>(i);
+        ASSERT(index >= 0 && index < I(Size));
+        return Base::at(index);
+    }
 
-        constexpr const Value& at(Index i) const {
-            I index = static_cast<I>(i);
-            ASSERT(index >= 0 && index < I(Size));
-            return Base::at(index);
-        }
+    constexpr const Value& at(Index i) const {
+        I index = static_cast<I>(i);
+        ASSERT(index >= 0 && index < I(Size));
+        return Base::at(index);
+    }
 
-        typename Base::iterator begin() noexcept {
-            return Base::begin();
-        }
+    typename Base::iterator begin() noexcept { return Base::begin(); }
 
-        typename Base::const_iterator begin() const noexcept {
-            return Base::begin();
-        }
+    typename Base::const_iterator begin() const noexcept { return Base::begin(); }
 
-        typename Base::iterator end() noexcept {
-            return Base::end();
-        }
+    typename Base::iterator end() noexcept { return Base::end(); }
 
-        typename Base::const_iterator end() const noexcept {
-            return Base::end();
-        }
+    typename Base::const_iterator end() const noexcept { return Base::end(); }
 
-        constexpr Index size() const {
-            return Index(I(Size));
-        }
+    constexpr Index size() const { return Index(I(Size)); }
 
-        using Base::back;
-        using Base::data;
-        using Base::empty;
-        using Base::fill;
-        using Base::front;
-    };
+    using Base::back;
+    using Base::data;
+    using Base::empty;
+    using Base::fill;
+    using Base::front;
+};
 
 }  // namespace ityp
 
diff --git a/src/dawn/common/ityp_bitset.h b/src/dawn/common/ityp_bitset.h
index 43ca81a..e9cfa05 100644
--- a/src/dawn/common/ityp_bitset.h
+++ b/src/dawn/common/ityp_bitset.h
@@ -21,116 +21,95 @@
 
 namespace ityp {
 
-    // ityp::bitset is a helper class that wraps std::bitset with the restriction that
-    // indices must be a particular type |Index|.
-    template <typename Index, size_t N>
-    class bitset : private std::bitset<N> {
-        using I = UnderlyingType<Index>;
-        using Base = std::bitset<N>;
+// ityp::bitset is a helper class that wraps std::bitset with the restriction that
+// indices must be a particular type |Index|.
+template <typename Index, size_t N>
+class bitset : private std::bitset<N> {
+    using I = UnderlyingType<Index>;
+    using Base = std::bitset<N>;
 
-        static_assert(sizeof(I) <= sizeof(size_t));
+    static_assert(sizeof(I) <= sizeof(size_t));
 
-        explicit constexpr bitset(const Base& rhs) : Base(rhs) {
-        }
+    explicit constexpr bitset(const Base& rhs) : Base(rhs) {}
 
-      public:
-        using reference = typename Base::reference;
+  public:
+    using reference = typename Base::reference;
 
-        constexpr bitset() noexcept : Base() {
-        }
+    constexpr bitset() noexcept : Base() {}
 
-        // NOLINTNEXTLINE(runtime/explicit)
-        constexpr bitset(uint64_t value) noexcept : Base(value) {
-        }
+    // NOLINTNEXTLINE(runtime/explicit)
+    constexpr bitset(uint64_t value) noexcept : Base(value) {}
 
-        constexpr bool operator[](Index i) const {
-            return Base::operator[](static_cast<I>(i));
-        }
+    constexpr bool operator[](Index i) const { return Base::operator[](static_cast<I>(i)); }
 
-        typename Base::reference operator[](Index i) {
-            return Base::operator[](static_cast<I>(i));
-        }
+    typename Base::reference operator[](Index i) { return Base::operator[](static_cast<I>(i)); }
 
-        bool test(Index i) const {
-            return Base::test(static_cast<I>(i));
-        }
+    bool test(Index i) const { return Base::test(static_cast<I>(i)); }
 
-        using Base::all;
-        using Base::any;
-        using Base::count;
-        using Base::none;
-        using Base::size;
+    using Base::all;
+    using Base::any;
+    using Base::count;
+    using Base::none;
+    using Base::size;
 
-        bool operator==(const bitset& other) const noexcept {
-            return Base::operator==(static_cast<const Base&>(other));
-        }
+    bool operator==(const bitset& other) const noexcept {
+        return Base::operator==(static_cast<const Base&>(other));
+    }
 
-        bool operator!=(const bitset& other) const noexcept {
-            return Base::operator!=(static_cast<const Base&>(other));
-        }
+    bool operator!=(const bitset& other) const noexcept {
+        return Base::operator!=(static_cast<const Base&>(other));
+    }
 
-        bitset& operator&=(const bitset& other) noexcept {
-            return static_cast<bitset&>(Base::operator&=(static_cast<const Base&>(other)));
-        }
+    bitset& operator&=(const bitset& other) noexcept {
+        return static_cast<bitset&>(Base::operator&=(static_cast<const Base&>(other)));
+    }
 
-        bitset& operator|=(const bitset& other) noexcept {
-            return static_cast<bitset&>(Base::operator|=(static_cast<const Base&>(other)));
-        }
+    bitset& operator|=(const bitset& other) noexcept {
+        return static_cast<bitset&>(Base::operator|=(static_cast<const Base&>(other)));
+    }
 
-        bitset& operator^=(const bitset& other) noexcept {
-            return static_cast<bitset&>(Base::operator^=(static_cast<const Base&>(other)));
-        }
+    bitset& operator^=(const bitset& other) noexcept {
+        return static_cast<bitset&>(Base::operator^=(static_cast<const Base&>(other)));
+    }
 
-        bitset operator~() const noexcept {
-            return bitset(*this).flip();
-        }
+    bitset operator~() const noexcept { return bitset(*this).flip(); }
 
-        bitset& set() noexcept {
-            return static_cast<bitset&>(Base::set());
-        }
+    bitset& set() noexcept { return static_cast<bitset&>(Base::set()); }
 
-        bitset& set(Index i, bool value = true) {
-            return static_cast<bitset&>(Base::set(static_cast<I>(i), value));
-        }
+    bitset& set(Index i, bool value = true) {
+        return static_cast<bitset&>(Base::set(static_cast<I>(i), value));
+    }
 
-        bitset& reset() noexcept {
-            return static_cast<bitset&>(Base::reset());
-        }
+    bitset& reset() noexcept { return static_cast<bitset&>(Base::reset()); }
 
-        bitset& reset(Index i) {
-            return static_cast<bitset&>(Base::reset(static_cast<I>(i)));
-        }
+    bitset& reset(Index i) { return static_cast<bitset&>(Base::reset(static_cast<I>(i))); }
 
-        bitset& flip() noexcept {
-            return static_cast<bitset&>(Base::flip());
-        }
+    bitset& flip() noexcept { return static_cast<bitset&>(Base::flip()); }
 
-        bitset& flip(Index i) {
-            return static_cast<bitset&>(Base::flip(static_cast<I>(i)));
-        }
+    bitset& flip(Index i) { return static_cast<bitset&>(Base::flip(static_cast<I>(i))); }
 
-        using Base::to_string;
-        using Base::to_ullong;
-        using Base::to_ulong;
+    using Base::to_string;
+    using Base::to_ullong;
+    using Base::to_ulong;
 
-        friend bitset operator&(const bitset& lhs, const bitset& rhs) noexcept {
-            return bitset(static_cast<const Base&>(lhs) & static_cast<const Base&>(rhs));
-        }
+    friend bitset operator&(const bitset& lhs, const bitset& rhs) noexcept {
+        return bitset(static_cast<const Base&>(lhs) & static_cast<const Base&>(rhs));
+    }
 
-        friend bitset operator|(const bitset& lhs, const bitset& rhs) noexcept {
-            return bitset(static_cast<const Base&>(lhs) | static_cast<const Base&>(rhs));
-        }
+    friend bitset operator|(const bitset& lhs, const bitset& rhs) noexcept {
+        return bitset(static_cast<const Base&>(lhs) | static_cast<const Base&>(rhs));
+    }
 
-        friend bitset operator^(const bitset& lhs, const bitset& rhs) noexcept {
-            return bitset(static_cast<const Base&>(lhs) ^ static_cast<const Base&>(rhs));
-        }
+    friend bitset operator^(const bitset& lhs, const bitset& rhs) noexcept {
+        return bitset(static_cast<const Base&>(lhs) ^ static_cast<const Base&>(rhs));
+    }
 
-        friend BitSetIterator<N, Index> IterateBitSet(const bitset& bitset) {
-            return BitSetIterator<N, Index>(static_cast<const Base&>(bitset));
-        }
+    friend BitSetIterator<N, Index> IterateBitSet(const bitset& bitset) {
+        return BitSetIterator<N, Index>(static_cast<const Base&>(bitset));
+    }
 
-        friend struct std::hash<bitset>;
-    };
+    friend struct std::hash<bitset>;
+};
 
 }  // namespace ityp
 
@@ -147,7 +126,7 @@
     using I = UnderlyingType<Index>;
 #if defined(DAWN_COMPILER_MSVC)
     if constexpr (N > 32) {
-#    if defined(DAWN_PLATFORM_64_BIT)
+#if defined(DAWN_PLATFORM_64_BIT)
         // NOLINTNEXTLINE(runtime/int)
         unsigned long firstBitIndex = 0ul;
         unsigned char ret = _BitScanReverse64(&firstBitIndex, bitset.to_ullong());
@@ -155,7 +134,7 @@
             return Index(static_cast<I>(0));
         }
         return Index(static_cast<I>(firstBitIndex + 1));
-#    else   // defined(DAWN_PLATFORM_64_BIT)
+#else   // defined(DAWN_PLATFORM_64_BIT)
         if (bitset.none()) {
             return Index(static_cast<I>(0));
         }
@@ -165,7 +144,7 @@
             }
         }
         UNREACHABLE();
-#    endif  // defined(DAWN_PLATFORM_64_BIT)
+#endif  // defined(DAWN_PLATFORM_64_BIT)
     } else {
         // NOLINTNEXTLINE(runtime/int)
         unsigned long firstBitIndex = 0ul;
diff --git a/src/dawn/common/ityp_span.h b/src/dawn/common/ityp_span.h
index 7b0bb2b..4f76b57 100644
--- a/src/dawn/common/ityp_span.h
+++ b/src/dawn/common/ityp_span.h
@@ -22,81 +22,65 @@
 
 namespace ityp {
 
-    // ityp::span is a helper class that wraps an unowned packed array of type |Value|.
-    // It stores the size and pointer to first element. It has the restriction that
-    // indices must be a particular type |Index|. This provides a type-safe way to index
-    // raw pointers.
-    template <typename Index, typename Value>
-    class span {
-        using I = UnderlyingType<Index>;
+// ityp::span is a helper class that wraps an unowned packed array of type |Value|.
+// It stores the size and pointer to first element. It has the restriction that
+// indices must be a particular type |Index|. This provides a type-safe way to index
+// raw pointers.
+template <typename Index, typename Value>
+class span {
+    using I = UnderlyingType<Index>;
 
-      public:
-        constexpr span() : mData(nullptr), mSize(0) {
-        }
-        constexpr span(Value* data, Index size) : mData(data), mSize(size) {
-        }
+  public:
+    constexpr span() : mData(nullptr), mSize(0) {}
+    constexpr span(Value* data, Index size) : mData(data), mSize(size) {}
 
-        constexpr Value& operator[](Index i) const {
-            ASSERT(i < mSize);
-            return mData[static_cast<I>(i)];
-        }
+    constexpr Value& operator[](Index i) const {
+        ASSERT(i < mSize);
+        return mData[static_cast<I>(i)];
+    }
 
-        Value* data() noexcept {
-            return mData;
-        }
+    Value* data() noexcept { return mData; }
 
-        const Value* data() const noexcept {
-            return mData;
-        }
+    const Value* data() const noexcept { return mData; }
 
-        Value* begin() noexcept {
-            return mData;
-        }
+    Value* begin() noexcept { return mData; }
 
-        const Value* begin() const noexcept {
-            return mData;
-        }
+    const Value* begin() const noexcept { return mData; }
 
-        Value* end() noexcept {
-            return mData + static_cast<I>(mSize);
-        }
+    Value* end() noexcept { return mData + static_cast<I>(mSize); }
 
-        const Value* end() const noexcept {
-            return mData + static_cast<I>(mSize);
-        }
+    const Value* end() const noexcept { return mData + static_cast<I>(mSize); }
 
-        Value& front() {
-            ASSERT(mData != nullptr);
-            ASSERT(static_cast<I>(mSize) >= 0);
-            return *mData;
-        }
+    Value& front() {
+        ASSERT(mData != nullptr);
+        ASSERT(static_cast<I>(mSize) >= 0);
+        return *mData;
+    }
 
-        const Value& front() const {
-            ASSERT(mData != nullptr);
-            ASSERT(static_cast<I>(mSize) >= 0);
-            return *mData;
-        }
+    const Value& front() const {
+        ASSERT(mData != nullptr);
+        ASSERT(static_cast<I>(mSize) >= 0);
+        return *mData;
+    }
 
-        Value& back() {
-            ASSERT(mData != nullptr);
-            ASSERT(static_cast<I>(mSize) >= 0);
-            return *(mData + static_cast<I>(mSize) - 1);
-        }
+    Value& back() {
+        ASSERT(mData != nullptr);
+        ASSERT(static_cast<I>(mSize) >= 0);
+        return *(mData + static_cast<I>(mSize) - 1);
+    }
 
-        const Value& back() const {
-            ASSERT(mData != nullptr);
-            ASSERT(static_cast<I>(mSize) >= 0);
-            return *(mData + static_cast<I>(mSize) - 1);
-        }
+    const Value& back() const {
+        ASSERT(mData != nullptr);
+        ASSERT(static_cast<I>(mSize) >= 0);
+        return *(mData + static_cast<I>(mSize) - 1);
+    }
 
-        Index size() const {
-            return mSize;
-        }
+    Index size() const { return mSize; }
 
-      private:
-        Value* mData;
-        Index mSize;
-    };
+  private:
+    Value* mData;
+    Index mSize;
+};
 
 }  // namespace ityp
 
diff --git a/src/dawn/common/ityp_stack_vec.h b/src/dawn/common/ityp_stack_vec.h
index fb3fcf7..d35adf6 100644
--- a/src/dawn/common/ityp_stack_vec.h
+++ b/src/dawn/common/ityp_stack_vec.h
@@ -24,82 +24,53 @@
 
 namespace ityp {
 
-    template <typename Index, typename Value, size_t StaticCapacity>
-    class stack_vec : private StackVector<Value, StaticCapacity> {
-        using I = UnderlyingType<Index>;
-        using Base = StackVector<Value, StaticCapacity>;
-        using VectorBase = std::vector<Value, StackAllocator<Value, StaticCapacity>>;
-        static_assert(StaticCapacity <= std::numeric_limits<I>::max());
+template <typename Index, typename Value, size_t StaticCapacity>
+class stack_vec : private StackVector<Value, StaticCapacity> {
+    using I = UnderlyingType<Index>;
+    using Base = StackVector<Value, StaticCapacity>;
+    using VectorBase = std::vector<Value, StackAllocator<Value, StaticCapacity>>;
+    static_assert(StaticCapacity <= std::numeric_limits<I>::max());
 
-      public:
-        stack_vec() : Base() {
-        }
-        explicit stack_vec(Index size) : Base() {
-            this->container().resize(static_cast<I>(size));
-        }
+  public:
+    stack_vec() : Base() {}
+    explicit stack_vec(Index size) : Base() { this->container().resize(static_cast<I>(size)); }
 
-        Value& operator[](Index i) {
-            ASSERT(i < size());
-            return Base::operator[](static_cast<I>(i));
-        }
+    Value& operator[](Index i) {
+        ASSERT(i < size());
+        return Base::operator[](static_cast<I>(i));
+    }
 
-        constexpr const Value& operator[](Index i) const {
-            ASSERT(i < size());
-            return Base::operator[](static_cast<I>(i));
-        }
+    constexpr const Value& operator[](Index i) const {
+        ASSERT(i < size());
+        return Base::operator[](static_cast<I>(i));
+    }
 
-        void resize(Index size) {
-            this->container().resize(static_cast<I>(size));
-        }
+    void resize(Index size) { this->container().resize(static_cast<I>(size)); }
 
-        void reserve(Index size) {
-            this->container().reserve(static_cast<I>(size));
-        }
+    void reserve(Index size) { this->container().reserve(static_cast<I>(size)); }
 
-        Value* data() {
-            return this->container().data();
-        }
+    Value* data() { return this->container().data(); }
 
-        const Value* data() const {
-            return this->container().data();
-        }
+    const Value* data() const { return this->container().data(); }
 
-        typename VectorBase::iterator begin() noexcept {
-            return this->container().begin();
-        }
+    typename VectorBase::iterator begin() noexcept { return this->container().begin(); }
 
-        typename VectorBase::const_iterator begin() const noexcept {
-            return this->container().begin();
-        }
+    typename VectorBase::const_iterator begin() const noexcept { return this->container().begin(); }
 
-        typename VectorBase::iterator end() noexcept {
-            return this->container().end();
-        }
+    typename VectorBase::iterator end() noexcept { return this->container().end(); }
 
-        typename VectorBase::const_iterator end() const noexcept {
-            return this->container().end();
-        }
+    typename VectorBase::const_iterator end() const noexcept { return this->container().end(); }
 
-        typename VectorBase::reference front() {
-            return this->container().front();
-        }
+    typename VectorBase::reference front() { return this->container().front(); }
 
-        typename VectorBase::const_reference front() const {
-            return this->container().front();
-        }
+    typename VectorBase::const_reference front() const { return this->container().front(); }
 
-        typename VectorBase::reference back() {
-            return this->container().back();
-        }
+    typename VectorBase::reference back() { return this->container().back(); }
 
-        typename VectorBase::const_reference back() const {
-            return this->container().back();
-        }
+    typename VectorBase::const_reference back() const { return this->container().back(); }
 
-        Index size() const {
-            return Index(static_cast<I>(this->container().size()));
-        }
-    };
+    Index size() const { return Index(static_cast<I>(this->container().size())); }
+};
 
 }  // namespace ityp
 
diff --git a/src/dawn/common/ityp_vector.h b/src/dawn/common/ityp_vector.h
index 2088c4f..3d402cf 100644
--- a/src/dawn/common/ityp_vector.h
+++ b/src/dawn/common/ityp_vector.h
@@ -24,85 +24,75 @@
 
 namespace ityp {
 
-    // ityp::vector is a helper class that wraps std::vector with the restriction that
-    // indices must be a particular type |Index|.
-    template <typename Index, typename Value>
-    class vector : public std::vector<Value> {
-        using I = UnderlyingType<Index>;
-        using Base = std::vector<Value>;
+// ityp::vector is a helper class that wraps std::vector with the restriction that
+// indices must be a particular type |Index|.
+template <typename Index, typename Value>
+class vector : public std::vector<Value> {
+    using I = UnderlyingType<Index>;
+    using Base = std::vector<Value>;
 
-      private:
-        // Disallow access to base constructors and untyped index/size-related operators.
-        using Base::Base;
-        using Base::operator=;
-        using Base::operator[];
-        using Base::at;
-        using Base::reserve;
-        using Base::resize;
-        using Base::size;
+  private:
+    // Disallow access to base constructors and untyped index/size-related operators.
+    using Base::Base;
+    using Base::operator=;
+    using Base::operator[];
+    using Base::at;
+    using Base::reserve;
+    using Base::resize;
+    using Base::size;
 
-      public:
-        vector() : Base() {
-        }
+  public:
+    vector() : Base() {}
 
-        explicit vector(Index size) : Base(static_cast<I>(size)) {
-        }
+    explicit vector(Index size) : Base(static_cast<I>(size)) {}
 
-        vector(Index size, const Value& init) : Base(static_cast<I>(size), init) {
-        }
+    vector(Index size, const Value& init) : Base(static_cast<I>(size), init) {}
 
-        vector(const vector& rhs) : Base(static_cast<const Base&>(rhs)) {
-        }
+    vector(const vector& rhs) : Base(static_cast<const Base&>(rhs)) {}
 
-        vector(vector&& rhs) : Base(static_cast<Base&&>(rhs)) {
-        }
+    vector(vector&& rhs) : Base(static_cast<Base&&>(rhs)) {}
 
-        vector(std::initializer_list<Value> init) : Base(init) {
-        }
+    vector(std::initializer_list<Value> init) : Base(init) {}
 
-        vector& operator=(const vector& rhs) {
-            Base::operator=(static_cast<const Base&>(rhs));
-            return *this;
-        }
+    vector& operator=(const vector& rhs) {
+        Base::operator=(static_cast<const Base&>(rhs));
+        return *this;
+    }
 
-        vector& operator=(vector&& rhs) noexcept {
-            Base::operator=(static_cast<Base&&>(rhs));
-            return *this;
-        }
+    vector& operator=(vector&& rhs) noexcept {
+        Base::operator=(static_cast<Base&&>(rhs));
+        return *this;
+    }
 
-        Value& operator[](Index i) {
-            ASSERT(i >= Index(0) && i < size());
-            return Base::operator[](static_cast<I>(i));
-        }
+    Value& operator[](Index i) {
+        ASSERT(i >= Index(0) && i < size());
+        return Base::operator[](static_cast<I>(i));
+    }
 
-        constexpr const Value& operator[](Index i) const {
-            ASSERT(i >= Index(0) && i < size());
-            return Base::operator[](static_cast<I>(i));
-        }
+    constexpr const Value& operator[](Index i) const {
+        ASSERT(i >= Index(0) && i < size());
+        return Base::operator[](static_cast<I>(i));
+    }
 
-        Value& at(Index i) {
-            ASSERT(i >= Index(0) && i < size());
-            return Base::at(static_cast<I>(i));
-        }
+    Value& at(Index i) {
+        ASSERT(i >= Index(0) && i < size());
+        return Base::at(static_cast<I>(i));
+    }
 
-        constexpr const Value& at(Index i) const {
-            ASSERT(i >= Index(0) && i < size());
-            return Base::at(static_cast<I>(i));
-        }
+    constexpr const Value& at(Index i) const {
+        ASSERT(i >= Index(0) && i < size());
+        return Base::at(static_cast<I>(i));
+    }
 
-        constexpr Index size() const {
-            ASSERT(std::numeric_limits<I>::max() >= Base::size());
-            return Index(static_cast<I>(Base::size()));
-        }
+    constexpr Index size() const {
+        ASSERT(std::numeric_limits<I>::max() >= Base::size());
+        return Index(static_cast<I>(Base::size()));
+    }
 
-        void resize(Index size) {
-            Base::resize(static_cast<I>(size));
-        }
+    void resize(Index size) { Base::resize(static_cast<I>(size)); }
 
-        void reserve(Index size) {
-            Base::reserve(static_cast<I>(size));
-        }
-    };
+    void reserve(Index size) { Base::reserve(static_cast<I>(size)); }
+};
 
 }  // namespace ityp
 
diff --git a/src/dawn/common/vulkan_platform.h b/src/dawn/common/vulkan_platform.h
index 17d275f..ef90910 100644
--- a/src/dawn/common/vulkan_platform.h
+++ b/src/dawn/common/vulkan_platform.h
@@ -16,10 +16,10 @@
 #define SRC_DAWN_COMMON_VULKAN_PLATFORM_H_
 
 #if !defined(DAWN_ENABLE_BACKEND_VULKAN)
-#    error "vulkan_platform.h included without the Vulkan backend enabled"
+#error "vulkan_platform.h included without the Vulkan backend enabled"
 #endif
 #if defined(VULKAN_CORE_H_)
-#    error "vulkan.h included before vulkan_platform.h"
+#error "vulkan.h included before vulkan_platform.h"
 #endif
 
 #include <cstddef>
@@ -36,7 +36,7 @@
 // (like vulkan.h on 64 bit) but makes sure the types are different on 32 bit architectures.
 
 #if defined(DAWN_PLATFORM_64_BIT)
-#    define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = struct object##_T*;
+#define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = struct object##_T*;
 // This function is needed because MSVC doesn't accept reinterpret_cast from uint64_t from uint64_t
 // TODO(cwallez@chromium.org): Remove this once we rework vulkan_platform.h
 template <typename T>
@@ -44,13 +44,13 @@
     return reinterpret_cast<T>(u64);
 }
 #elif defined(DAWN_PLATFORM_32_BIT)
-#    define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = uint64_t;
+#define DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object) using object = uint64_t;
 template <typename T>
 T NativeNonDispatachableHandleFromU64(uint64_t u64) {
     return u64;
 }
 #else
-#    error "Unsupported platform"
+#error "Unsupported platform"
 #endif
 
 // Define a placeholder Vulkan handle for use before we include vulkan.h
@@ -67,89 +67,73 @@
 
 namespace dawn::native::vulkan {
 
-    namespace detail {
-        template <typename T>
-        struct WrapperStruct {
-            T member;
-        };
+namespace detail {
+template <typename T>
+struct WrapperStruct {
+    T member;
+};
 
-        template <typename T>
-        static constexpr size_t AlignOfInStruct = alignof(WrapperStruct<T>);
+template <typename T>
+static constexpr size_t AlignOfInStruct = alignof(WrapperStruct<T>);
 
-        static constexpr size_t kNativeVkHandleAlignment = AlignOfInStruct<VkSomeHandle>;
-        static constexpr size_t kUint64Alignment = AlignOfInStruct<uint64_t>;
+static constexpr size_t kNativeVkHandleAlignment = AlignOfInStruct<VkSomeHandle>;
+static constexpr size_t kUint64Alignment = AlignOfInStruct<uint64_t>;
 
-        // Simple handle types that supports "nullptr_t" as a 0 value.
-        template <typename Tag, typename HandleType>
-        class alignas(detail::kNativeVkHandleAlignment) VkHandle {
-          public:
-            // Default constructor and assigning of VK_NULL_HANDLE
-            VkHandle() = default;
-            VkHandle(std::nullptr_t) {
-            }
+// Simple handle types that supports "nullptr_t" as a 0 value.
+template <typename Tag, typename HandleType>
+class alignas(detail::kNativeVkHandleAlignment) VkHandle {
+  public:
+    // Default constructor and assigning of VK_NULL_HANDLE
+    VkHandle() = default;
+    VkHandle(std::nullptr_t) {}
 
-            // Use default copy constructor/assignment
-            VkHandle(const VkHandle<Tag, HandleType>& other) = default;
-            VkHandle& operator=(const VkHandle<Tag, HandleType>&) = default;
+    // Use default copy constructor/assignment
+    VkHandle(const VkHandle<Tag, HandleType>& other) = default;
+    VkHandle& operator=(const VkHandle<Tag, HandleType>&) = default;
 
-            // Comparisons between handles
-            bool operator==(VkHandle<Tag, HandleType> other) const {
-                return mHandle == other.mHandle;
-            }
-            bool operator!=(VkHandle<Tag, HandleType> other) const {
-                return mHandle != other.mHandle;
-            }
+    // Comparisons between handles
+    bool operator==(VkHandle<Tag, HandleType> other) const { return mHandle == other.mHandle; }
+    bool operator!=(VkHandle<Tag, HandleType> other) const { return mHandle != other.mHandle; }
 
-            // Comparisons between handles and VK_NULL_HANDLE
-            bool operator==(std::nullptr_t) const {
-                return mHandle == 0;
-            }
-            bool operator!=(std::nullptr_t) const {
-                return mHandle != 0;
-            }
+    // Comparisons between handles and VK_NULL_HANDLE
+    bool operator==(std::nullptr_t) const { return mHandle == 0; }
+    bool operator!=(std::nullptr_t) const { return mHandle != 0; }
 
-            // Implicit conversion to real Vulkan types.
-            operator HandleType() const {
-                return GetHandle();
-            }
+    // Implicit conversion to real Vulkan types.
+    operator HandleType() const { return GetHandle(); }
 
-            HandleType GetHandle() const {
-                return mHandle;
-            }
+    HandleType GetHandle() const { return mHandle; }
 
-            HandleType& operator*() {
-                return mHandle;
-            }
+    HandleType& operator*() { return mHandle; }
 
-            static VkHandle<Tag, HandleType> CreateFromHandle(HandleType handle) {
-                return VkHandle{handle};
-            }
-
-          private:
-            explicit VkHandle(HandleType handle) : mHandle(handle) {
-            }
-
-            HandleType mHandle = 0;
-        };
-    }  // namespace detail
-
-    static constexpr std::nullptr_t VK_NULL_HANDLE = nullptr;
-
-    template <typename Tag, typename HandleType>
-    HandleType* AsVkArray(detail::VkHandle<Tag, HandleType>* handle) {
-        return reinterpret_cast<HandleType*>(handle);
+    static VkHandle<Tag, HandleType> CreateFromHandle(HandleType handle) {
+        return VkHandle{handle};
     }
 
+  private:
+    explicit VkHandle(HandleType handle) : mHandle(handle) {}
+
+    HandleType mHandle = 0;
+};
+}  // namespace detail
+
+static constexpr std::nullptr_t VK_NULL_HANDLE = nullptr;
+
+template <typename Tag, typename HandleType>
+HandleType* AsVkArray(detail::VkHandle<Tag, HandleType>* handle) {
+    return reinterpret_cast<HandleType*>(handle);
+}
+
 }  // namespace dawn::native::vulkan
 
-#define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object)                           \
-    DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object)                      \
-    namespace dawn::native::vulkan {                                        \
-        using object = detail::VkHandle<struct VkTag##object, ::object>;    \
-        static_assert(sizeof(object) == sizeof(uint64_t));                  \
-        static_assert(alignof(object) == detail::kUint64Alignment);         \
-        static_assert(sizeof(object) == sizeof(::object));                  \
-        static_assert(alignof(object) == detail::kNativeVkHandleAlignment); \
+#define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object)                       \
+    DAWN_DEFINE_NATIVE_NON_DISPATCHABLE_HANDLE(object)                  \
+    namespace dawn::native::vulkan {                                    \
+    using object = detail::VkHandle<struct VkTag##object, ::object>;    \
+    static_assert(sizeof(object) == sizeof(uint64_t));                  \
+    static_assert(alignof(object) == detail::kUint64Alignment);         \
+    static_assert(sizeof(object) == sizeof(::object));                  \
+    static_assert(alignof(object) == detail::kNativeVkHandleAlignment); \
     }  // namespace dawn::native::vulkan
 
 // Import additional parts of Vulkan that are supported on our architecture and preemptively include
@@ -157,36 +141,36 @@
 // defines are defined already in the Vulkan-Header BUILD.gn, but are needed when building with
 // CMake, hence they cannot be removed at the moment.
 #if defined(DAWN_PLATFORM_WINDOWS)
-#    ifndef VK_USE_PLATFORM_WIN32_KHR
-#        define VK_USE_PLATFORM_WIN32_KHR
-#    endif
-#    include "dawn/common/windows_with_undefs.h"
+#ifndef VK_USE_PLATFORM_WIN32_KHR
+#define VK_USE_PLATFORM_WIN32_KHR
+#endif
+#include "dawn/common/windows_with_undefs.h"
 #endif  // DAWN_PLATFORM_WINDOWS
 
 #if defined(DAWN_USE_X11)
-#    define VK_USE_PLATFORM_XLIB_KHR
-#    ifndef VK_USE_PLATFORM_XCB_KHR
-#        define VK_USE_PLATFORM_XCB_KHR
-#    endif
-#    include "dawn/common/xlib_with_undefs.h"
+#define VK_USE_PLATFORM_XLIB_KHR
+#ifndef VK_USE_PLATFORM_XCB_KHR
+#define VK_USE_PLATFORM_XCB_KHR
+#endif
+#include "dawn/common/xlib_with_undefs.h"
 #endif  // defined(DAWN_USE_X11)
 
 #if defined(DAWN_ENABLE_BACKEND_METAL)
-#    ifndef VK_USE_PLATFORM_METAL_EXT
-#        define VK_USE_PLATFORM_METAL_EXT
-#    endif
+#ifndef VK_USE_PLATFORM_METAL_EXT
+#define VK_USE_PLATFORM_METAL_EXT
+#endif
 #endif  // defined(DAWN_ENABLE_BACKEND_METAL)
 
 #if defined(DAWN_PLATFORM_ANDROID)
-#    ifndef VK_USE_PLATFORM_ANDROID_KHR
-#        define VK_USE_PLATFORM_ANDROID_KHR
-#    endif
+#ifndef VK_USE_PLATFORM_ANDROID_KHR
+#define VK_USE_PLATFORM_ANDROID_KHR
+#endif
 #endif  // defined(DAWN_PLATFORM_ANDROID)
 
 #if defined(DAWN_PLATFORM_FUCHSIA)
-#    ifndef VK_USE_PLATFORM_FUCHSIA
-#        define VK_USE_PLATFORM_FUCHSIA
-#    endif
+#ifndef VK_USE_PLATFORM_FUCHSIA
+#define VK_USE_PLATFORM_FUCHSIA
+#endif
 #endif  // defined(DAWN_PLATFORM_FUCHSIA)
 
 // The actual inclusion of vulkan.h!
@@ -200,7 +184,7 @@
 #elif defined(DAWN_PLATFORM_32_BIT)
 static constexpr uint64_t VK_NULL_HANDLE = 0;
 #else
-#    error "Unsupported platform"
+#error "Unsupported platform"
 #endif
 
 #endif  // SRC_DAWN_COMMON_VULKAN_PLATFORM_H_
diff --git a/src/dawn/common/windows_with_undefs.h b/src/dawn/common/windows_with_undefs.h
index 337ed60..63c27db 100644
--- a/src/dawn/common/windows_with_undefs.h
+++ b/src/dawn/common/windows_with_undefs.h
@@ -18,7 +18,7 @@
 #include "dawn/common/Platform.h"
 
 #if !defined(DAWN_PLATFORM_WINDOWS)
-#    error "windows_with_undefs.h included on non-Windows"
+#error "windows_with_undefs.h included on non-Windows"
 #endif
 
 // This header includes <windows.h> but removes all the extra defines that conflict with identifiers
diff --git a/src/dawn/common/xlib_with_undefs.h b/src/dawn/common/xlib_with_undefs.h
index 8073aa2..f9db481 100644
--- a/src/dawn/common/xlib_with_undefs.h
+++ b/src/dawn/common/xlib_with_undefs.h
@@ -18,7 +18,7 @@
 #include "dawn/common/Platform.h"
 
 #if !defined(DAWN_PLATFORM_LINUX)
-#    error "xlib_with_undefs.h included on non-Linux"
+#error "xlib_with_undefs.h included on non-Linux"
 #endif
 
 // This header includes <X11/Xlib.h> but removes all the extra defines that conflict with
diff --git a/src/dawn/fuzzers/DawnWireServerFuzzer.cpp b/src/dawn/fuzzers/DawnWireServerFuzzer.cpp
index 5250d69..1d325d4 100644
--- a/src/dawn/fuzzers/DawnWireServerFuzzer.cpp
+++ b/src/dawn/fuzzers/DawnWireServerFuzzer.cpp
@@ -29,39 +29,37 @@
 
 namespace {
 
-    class DevNull : public dawn::wire::CommandSerializer {
-      public:
-        size_t GetMaximumAllocationSize() const override {
-            // Some fuzzer bots have a 2GB allocation limit. Pick a value reasonably below that.
-            return 1024 * 1024 * 1024;
-        }
-        void* GetCmdSpace(size_t size) override {
-            if (size > buf.size()) {
-                buf.resize(size);
-            }
-            return buf.data();
-        }
-        bool Flush() override {
-            return true;
-        }
-
-      private:
-        std::vector<char> buf;
-    };
-
-    std::unique_ptr<dawn::native::Instance> sInstance;
-    WGPUProcDeviceCreateSwapChain sOriginalDeviceCreateSwapChain = nullptr;
-
-    bool sCommandsComplete = false;
-
-    WGPUSwapChain ErrorDeviceCreateSwapChain(WGPUDevice device,
-                                             WGPUSurface surface,
-                                             const WGPUSwapChainDescriptor*) {
-        WGPUSwapChainDescriptor desc = {};
-        // A 0 implementation will trigger a swapchain creation error.
-        desc.implementation = 0;
-        return sOriginalDeviceCreateSwapChain(device, surface, &desc);
+class DevNull : public dawn::wire::CommandSerializer {
+  public:
+    size_t GetMaximumAllocationSize() const override {
+        // Some fuzzer bots have a 2GB allocation limit. Pick a value reasonably below that.
+        return 1024 * 1024 * 1024;
     }
+    void* GetCmdSpace(size_t size) override {
+        if (size > buf.size()) {
+            buf.resize(size);
+        }
+        return buf.data();
+    }
+    bool Flush() override { return true; }
+
+  private:
+    std::vector<char> buf;
+};
+
+std::unique_ptr<dawn::native::Instance> sInstance;
+WGPUProcDeviceCreateSwapChain sOriginalDeviceCreateSwapChain = nullptr;
+
+bool sCommandsComplete = false;
+
+WGPUSwapChain ErrorDeviceCreateSwapChain(WGPUDevice device,
+                                         WGPUSurface surface,
+                                         const WGPUSwapChainDescriptor*) {
+    WGPUSwapChainDescriptor desc = {};
+    // A 0 implementation will trigger a swapchain creation error.
+    desc.implementation = 0;
+    return sOriginalDeviceCreateSwapChain(device, surface, &desc);
+}
 
 }  // namespace
 
diff --git a/src/dawn/fuzzers/DawnWireServerFuzzer.h b/src/dawn/fuzzers/DawnWireServerFuzzer.h
index d6349ce..5ebc6b3 100644
--- a/src/dawn/fuzzers/DawnWireServerFuzzer.h
+++ b/src/dawn/fuzzers/DawnWireServerFuzzer.h
@@ -22,17 +22,17 @@
 
 namespace dawn::native {
 
-    class Instance;
+class Instance;
 
 }  // namespace dawn::native
 
 namespace DawnWireServerFuzzer {
 
-    using MakeDeviceFn = std::function<wgpu::Device(dawn::native::Instance*)>;
+using MakeDeviceFn = std::function<wgpu::Device(dawn::native::Instance*)>;
 
-    int Initialize(int* argc, char*** argv);
+int Initialize(int* argc, char*** argv);
 
-    int Run(const uint8_t* data, size_t size, MakeDeviceFn MakeDevice, bool supportsErrorInjection);
+int Run(const uint8_t* data, size_t size, MakeDeviceFn MakeDevice, bool supportsErrorInjection);
 
 }  // namespace DawnWireServerFuzzer
 
diff --git a/src/dawn/native/Adapter.cpp b/src/dawn/native/Adapter.cpp
index c6b1039..b24e920 100644
--- a/src/dawn/native/Adapter.cpp
+++ b/src/dawn/native/Adapter.cpp
@@ -24,207 +24,206 @@
 
 namespace dawn::native {
 
-    AdapterBase::AdapterBase(InstanceBase* instance, wgpu::BackendType backend)
-        : mInstance(instance), mBackend(backend) {
-        mSupportedFeatures.EnableFeature(Feature::DawnNative);
-        mSupportedFeatures.EnableFeature(Feature::DawnInternalUsages);
+AdapterBase::AdapterBase(InstanceBase* instance, wgpu::BackendType backend)
+    : mInstance(instance), mBackend(backend) {
+    mSupportedFeatures.EnableFeature(Feature::DawnNative);
+    mSupportedFeatures.EnableFeature(Feature::DawnInternalUsages);
+}
+
+MaybeError AdapterBase::Initialize() {
+    DAWN_TRY_CONTEXT(InitializeImpl(), "initializing adapter (backend=%s)", mBackend);
+    DAWN_TRY_CONTEXT(
+        InitializeSupportedFeaturesImpl(),
+        "gathering supported features for \"%s\" - \"%s\" (vendorId=%#06x deviceId=%#06x "
+        "backend=%s type=%s)",
+        mName, mDriverDescription, mVendorId, mDeviceId, mBackend, mAdapterType);
+    DAWN_TRY_CONTEXT(
+        InitializeSupportedLimitsImpl(&mLimits),
+        "gathering supported limits for \"%s\" - \"%s\" (vendorId=%#06x deviceId=%#06x "
+        "backend=%s type=%s)",
+        mName, mDriverDescription, mVendorId, mDeviceId, mBackend, mAdapterType);
+
+    // Enforce internal Dawn constants.
+    mLimits.v1.maxVertexBufferArrayStride =
+        std::min(mLimits.v1.maxVertexBufferArrayStride, kMaxVertexBufferArrayStride);
+    mLimits.v1.maxBindGroups = std::min(mLimits.v1.maxBindGroups, kMaxBindGroups);
+    mLimits.v1.maxVertexAttributes =
+        std::min(mLimits.v1.maxVertexAttributes, uint32_t(kMaxVertexAttributes));
+    mLimits.v1.maxVertexBuffers =
+        std::min(mLimits.v1.maxVertexBuffers, uint32_t(kMaxVertexBuffers));
+    mLimits.v1.maxInterStageShaderComponents =
+        std::min(mLimits.v1.maxInterStageShaderComponents, kMaxInterStageShaderComponents);
+    mLimits.v1.maxSampledTexturesPerShaderStage =
+        std::min(mLimits.v1.maxSampledTexturesPerShaderStage, kMaxSampledTexturesPerShaderStage);
+    mLimits.v1.maxSamplersPerShaderStage =
+        std::min(mLimits.v1.maxSamplersPerShaderStage, kMaxSamplersPerShaderStage);
+    mLimits.v1.maxStorageBuffersPerShaderStage =
+        std::min(mLimits.v1.maxStorageBuffersPerShaderStage, kMaxStorageBuffersPerShaderStage);
+    mLimits.v1.maxStorageTexturesPerShaderStage =
+        std::min(mLimits.v1.maxStorageTexturesPerShaderStage, kMaxStorageTexturesPerShaderStage);
+    mLimits.v1.maxUniformBuffersPerShaderStage =
+        std::min(mLimits.v1.maxUniformBuffersPerShaderStage, kMaxUniformBuffersPerShaderStage);
+    mLimits.v1.maxDynamicUniformBuffersPerPipelineLayout =
+        std::min(mLimits.v1.maxDynamicUniformBuffersPerPipelineLayout,
+                 kMaxDynamicUniformBuffersPerPipelineLayout);
+    mLimits.v1.maxDynamicStorageBuffersPerPipelineLayout =
+        std::min(mLimits.v1.maxDynamicStorageBuffersPerPipelineLayout,
+                 kMaxDynamicStorageBuffersPerPipelineLayout);
+
+    return {};
+}
+
+bool AdapterBase::APIGetLimits(SupportedLimits* limits) const {
+    return GetLimits(limits);
+}
+
+void AdapterBase::APIGetProperties(AdapterProperties* properties) const {
+    properties->vendorID = mVendorId;
+    properties->deviceID = mDeviceId;
+    properties->name = mName.c_str();
+    properties->driverDescription = mDriverDescription.c_str();
+    properties->adapterType = mAdapterType;
+    properties->backendType = mBackend;
+}
+
+bool AdapterBase::APIHasFeature(wgpu::FeatureName feature) const {
+    return mSupportedFeatures.IsEnabled(feature);
+}
+
+size_t AdapterBase::APIEnumerateFeatures(wgpu::FeatureName* features) const {
+    return mSupportedFeatures.EnumerateFeatures(features);
+}
+
+DeviceBase* AdapterBase::APICreateDevice(const DeviceDescriptor* descriptor) {
+    DeviceDescriptor defaultDesc = {};
+    if (descriptor == nullptr) {
+        descriptor = &defaultDesc;
     }
-
-    MaybeError AdapterBase::Initialize() {
-        DAWN_TRY_CONTEXT(InitializeImpl(), "initializing adapter (backend=%s)", mBackend);
-        DAWN_TRY_CONTEXT(
-            InitializeSupportedFeaturesImpl(),
-            "gathering supported features for \"%s\" - \"%s\" (vendorId=%#06x deviceId=%#06x "
-            "backend=%s type=%s)",
-            mName, mDriverDescription, mVendorId, mDeviceId, mBackend, mAdapterType);
-        DAWN_TRY_CONTEXT(
-            InitializeSupportedLimitsImpl(&mLimits),
-            "gathering supported limits for \"%s\" - \"%s\" (vendorId=%#06x deviceId=%#06x "
-            "backend=%s type=%s)",
-            mName, mDriverDescription, mVendorId, mDeviceId, mBackend, mAdapterType);
-
-        // Enforce internal Dawn constants.
-        mLimits.v1.maxVertexBufferArrayStride =
-            std::min(mLimits.v1.maxVertexBufferArrayStride, kMaxVertexBufferArrayStride);
-        mLimits.v1.maxBindGroups = std::min(mLimits.v1.maxBindGroups, kMaxBindGroups);
-        mLimits.v1.maxVertexAttributes =
-            std::min(mLimits.v1.maxVertexAttributes, uint32_t(kMaxVertexAttributes));
-        mLimits.v1.maxVertexBuffers =
-            std::min(mLimits.v1.maxVertexBuffers, uint32_t(kMaxVertexBuffers));
-        mLimits.v1.maxInterStageShaderComponents =
-            std::min(mLimits.v1.maxInterStageShaderComponents, kMaxInterStageShaderComponents);
-        mLimits.v1.maxSampledTexturesPerShaderStage = std::min(
-            mLimits.v1.maxSampledTexturesPerShaderStage, kMaxSampledTexturesPerShaderStage);
-        mLimits.v1.maxSamplersPerShaderStage =
-            std::min(mLimits.v1.maxSamplersPerShaderStage, kMaxSamplersPerShaderStage);
-        mLimits.v1.maxStorageBuffersPerShaderStage =
-            std::min(mLimits.v1.maxStorageBuffersPerShaderStage, kMaxStorageBuffersPerShaderStage);
-        mLimits.v1.maxStorageTexturesPerShaderStage = std::min(
-            mLimits.v1.maxStorageTexturesPerShaderStage, kMaxStorageTexturesPerShaderStage);
-        mLimits.v1.maxUniformBuffersPerShaderStage =
-            std::min(mLimits.v1.maxUniformBuffersPerShaderStage, kMaxUniformBuffersPerShaderStage);
-        mLimits.v1.maxDynamicUniformBuffersPerPipelineLayout =
-            std::min(mLimits.v1.maxDynamicUniformBuffersPerPipelineLayout,
-                     kMaxDynamicUniformBuffersPerPipelineLayout);
-        mLimits.v1.maxDynamicStorageBuffersPerPipelineLayout =
-            std::min(mLimits.v1.maxDynamicStorageBuffersPerPipelineLayout,
-                     kMaxDynamicStorageBuffersPerPipelineLayout);
-
-        return {};
+    auto result = CreateDeviceInternal(descriptor);
+    if (result.IsError()) {
+        mInstance->ConsumedError(result.AcquireError());
+        return nullptr;
     }
+    return result.AcquireSuccess().Detach();
+}
 
-    bool AdapterBase::APIGetLimits(SupportedLimits* limits) const {
-        return GetLimits(limits);
+void AdapterBase::APIRequestDevice(const DeviceDescriptor* descriptor,
+                                   WGPURequestDeviceCallback callback,
+                                   void* userdata) {
+    static constexpr DeviceDescriptor kDefaultDescriptor = {};
+    if (descriptor == nullptr) {
+        descriptor = &kDefaultDescriptor;
     }
+    auto result = CreateDeviceInternal(descriptor);
 
-    void AdapterBase::APIGetProperties(AdapterProperties* properties) const {
-        properties->vendorID = mVendorId;
-        properties->deviceID = mDeviceId;
-        properties->name = mName.c_str();
-        properties->driverDescription = mDriverDescription.c_str();
-        properties->adapterType = mAdapterType;
-        properties->backendType = mBackend;
-    }
-
-    bool AdapterBase::APIHasFeature(wgpu::FeatureName feature) const {
-        return mSupportedFeatures.IsEnabled(feature);
-    }
-
-    size_t AdapterBase::APIEnumerateFeatures(wgpu::FeatureName* features) const {
-        return mSupportedFeatures.EnumerateFeatures(features);
-    }
-
-    DeviceBase* AdapterBase::APICreateDevice(const DeviceDescriptor* descriptor) {
-        DeviceDescriptor defaultDesc = {};
-        if (descriptor == nullptr) {
-            descriptor = &defaultDesc;
-        }
-        auto result = CreateDeviceInternal(descriptor);
-        if (result.IsError()) {
-            mInstance->ConsumedError(result.AcquireError());
-            return nullptr;
-        }
-        return result.AcquireSuccess().Detach();
-    }
-
-    void AdapterBase::APIRequestDevice(const DeviceDescriptor* descriptor,
-                                       WGPURequestDeviceCallback callback,
-                                       void* userdata) {
-        static constexpr DeviceDescriptor kDefaultDescriptor = {};
-        if (descriptor == nullptr) {
-            descriptor = &kDefaultDescriptor;
-        }
-        auto result = CreateDeviceInternal(descriptor);
-
-        if (result.IsError()) {
-            std::unique_ptr<ErrorData> errorData = result.AcquireError();
-            // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
-            callback(WGPURequestDeviceStatus_Error, nullptr,
-                     errorData->GetFormattedMessage().c_str(), userdata);
-            return;
-        }
-
-        Ref<DeviceBase> device = result.AcquireSuccess();
-
-        WGPURequestDeviceStatus status =
-            device == nullptr ? WGPURequestDeviceStatus_Unknown : WGPURequestDeviceStatus_Success;
+    if (result.IsError()) {
+        std::unique_ptr<ErrorData> errorData = result.AcquireError();
         // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
-        callback(status, ToAPI(device.Detach()), nullptr, userdata);
+        callback(WGPURequestDeviceStatus_Error, nullptr, errorData->GetFormattedMessage().c_str(),
+                 userdata);
+        return;
     }
 
-    uint32_t AdapterBase::GetVendorId() const {
-        return mVendorId;
-    }
+    Ref<DeviceBase> device = result.AcquireSuccess();
 
-    uint32_t AdapterBase::GetDeviceId() const {
-        return mDeviceId;
-    }
+    WGPURequestDeviceStatus status =
+        device == nullptr ? WGPURequestDeviceStatus_Unknown : WGPURequestDeviceStatus_Success;
+    // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+    callback(status, ToAPI(device.Detach()), nullptr, userdata);
+}
 
-    wgpu::BackendType AdapterBase::GetBackendType() const {
-        return mBackend;
-    }
+uint32_t AdapterBase::GetVendorId() const {
+    return mVendorId;
+}
 
-    InstanceBase* AdapterBase::GetInstance() const {
-        return mInstance;
-    }
+uint32_t AdapterBase::GetDeviceId() const {
+    return mDeviceId;
+}
 
-    FeaturesSet AdapterBase::GetSupportedFeatures() const {
-        return mSupportedFeatures;
-    }
+wgpu::BackendType AdapterBase::GetBackendType() const {
+    return mBackend;
+}
 
-    bool AdapterBase::SupportsAllRequiredFeatures(
-        const ityp::span<size_t, const wgpu::FeatureName>& features) const {
-        for (wgpu::FeatureName f : features) {
-            if (!mSupportedFeatures.IsEnabled(f)) {
-                return false;
-            }
-        }
-        return true;
-    }
+InstanceBase* AdapterBase::GetInstance() const {
+    return mInstance;
+}
 
-    WGPUDeviceProperties AdapterBase::GetAdapterProperties() const {
-        WGPUDeviceProperties adapterProperties = {};
-        adapterProperties.deviceID = mDeviceId;
-        adapterProperties.vendorID = mVendorId;
-        adapterProperties.adapterType = static_cast<WGPUAdapterType>(mAdapterType);
+FeaturesSet AdapterBase::GetSupportedFeatures() const {
+    return mSupportedFeatures;
+}
 
-        mSupportedFeatures.InitializeDeviceProperties(&adapterProperties);
-        // This is OK for now because there are no limit feature structs.
-        // If we add additional structs, the caller will need to provide memory
-        // to store them (ex. by calling GetLimits directly instead). Currently,
-        // we keep this function as it's only used internally in Chromium to
-        // send the adapter properties across the wire.
-        GetLimits(FromAPI(&adapterProperties.limits));
-        return adapterProperties;
-    }
-
-    bool AdapterBase::GetLimits(SupportedLimits* limits) const {
-        ASSERT(limits != nullptr);
-        if (limits->nextInChain != nullptr) {
+bool AdapterBase::SupportsAllRequiredFeatures(
+    const ityp::span<size_t, const wgpu::FeatureName>& features) const {
+    for (wgpu::FeatureName f : features) {
+        if (!mSupportedFeatures.IsEnabled(f)) {
             return false;
         }
-        if (mUseTieredLimits) {
-            limits->limits = ApplyLimitTiers(mLimits.v1);
-        } else {
-            limits->limits = mLimits.v1;
-        }
-        return true;
+    }
+    return true;
+}
+
+WGPUDeviceProperties AdapterBase::GetAdapterProperties() const {
+    WGPUDeviceProperties adapterProperties = {};
+    adapterProperties.deviceID = mDeviceId;
+    adapterProperties.vendorID = mVendorId;
+    adapterProperties.adapterType = static_cast<WGPUAdapterType>(mAdapterType);
+
+    mSupportedFeatures.InitializeDeviceProperties(&adapterProperties);
+    // This is OK for now because there are no limit feature structs.
+    // If we add additional structs, the caller will need to provide memory
+    // to store them (ex. by calling GetLimits directly instead). Currently,
+    // we keep this function as it's only used internally in Chromium to
+    // send the adapter properties across the wire.
+    GetLimits(FromAPI(&adapterProperties.limits));
+    return adapterProperties;
+}
+
+bool AdapterBase::GetLimits(SupportedLimits* limits) const {
+    ASSERT(limits != nullptr);
+    if (limits->nextInChain != nullptr) {
+        return false;
+    }
+    if (mUseTieredLimits) {
+        limits->limits = ApplyLimitTiers(mLimits.v1);
+    } else {
+        limits->limits = mLimits.v1;
+    }
+    return true;
+}
+
+ResultOrError<Ref<DeviceBase>> AdapterBase::CreateDeviceInternal(
+    const DeviceDescriptor* descriptor) {
+    ASSERT(descriptor != nullptr);
+
+    for (uint32_t i = 0; i < descriptor->requiredFeaturesCount; ++i) {
+        wgpu::FeatureName f = descriptor->requiredFeatures[i];
+        DAWN_TRY(ValidateFeatureName(f));
+        DAWN_INVALID_IF(!mSupportedFeatures.IsEnabled(f), "Requested feature %s is not supported.",
+                        f);
     }
 
-    ResultOrError<Ref<DeviceBase>> AdapterBase::CreateDeviceInternal(
-        const DeviceDescriptor* descriptor) {
-        ASSERT(descriptor != nullptr);
+    if (descriptor->requiredLimits != nullptr) {
+        DAWN_TRY_CONTEXT(ValidateLimits(mUseTieredLimits ? ApplyLimitTiers(mLimits.v1) : mLimits.v1,
+                                        descriptor->requiredLimits->limits),
+                         "validating required limits");
 
-        for (uint32_t i = 0; i < descriptor->requiredFeaturesCount; ++i) {
-            wgpu::FeatureName f = descriptor->requiredFeatures[i];
-            DAWN_TRY(ValidateFeatureName(f));
-            DAWN_INVALID_IF(!mSupportedFeatures.IsEnabled(f),
-                            "Requested feature %s is not supported.", f);
-        }
-
-        if (descriptor->requiredLimits != nullptr) {
-            DAWN_TRY_CONTEXT(
-                ValidateLimits(mUseTieredLimits ? ApplyLimitTiers(mLimits.v1) : mLimits.v1,
-                               descriptor->requiredLimits->limits),
-                "validating required limits");
-
-            DAWN_INVALID_IF(descriptor->requiredLimits->nextInChain != nullptr,
-                            "nextInChain is not nullptr.");
-        }
-        return CreateDeviceImpl(descriptor);
+        DAWN_INVALID_IF(descriptor->requiredLimits->nextInChain != nullptr,
+                        "nextInChain is not nullptr.");
     }
+    return CreateDeviceImpl(descriptor);
+}
 
-    void AdapterBase::SetUseTieredLimits(bool useTieredLimits) {
-        mUseTieredLimits = useTieredLimits;
-    }
+void AdapterBase::SetUseTieredLimits(bool useTieredLimits) {
+    mUseTieredLimits = useTieredLimits;
+}
 
-    void AdapterBase::ResetInternalDeviceForTesting() {
-        mInstance->ConsumedError(ResetInternalDeviceForTestingImpl());
-    }
+void AdapterBase::ResetInternalDeviceForTesting() {
+    mInstance->ConsumedError(ResetInternalDeviceForTestingImpl());
+}
 
-    MaybeError AdapterBase::ResetInternalDeviceForTestingImpl() {
-        return DAWN_INTERNAL_ERROR(
-            "ResetInternalDeviceForTesting should only be used with the D3D12 backend.");
-    }
+MaybeError AdapterBase::ResetInternalDeviceForTestingImpl() {
+    return DAWN_INTERNAL_ERROR(
+        "ResetInternalDeviceForTesting should only be used with the D3D12 backend.");
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/Adapter.h b/src/dawn/native/Adapter.h
index d0f7cf1..9a1b24f 100644
--- a/src/dawn/native/Adapter.h
+++ b/src/dawn/native/Adapter.h
@@ -28,71 +28,70 @@
 
 namespace dawn::native {
 
-    class DeviceBase;
+class DeviceBase;
 
-    class AdapterBase : public RefCounted {
-      public:
-        AdapterBase(InstanceBase* instance, wgpu::BackendType backend);
-        virtual ~AdapterBase() = default;
+class AdapterBase : public RefCounted {
+  public:
+    AdapterBase(InstanceBase* instance, wgpu::BackendType backend);
+    virtual ~AdapterBase() = default;
 
-        MaybeError Initialize();
+    MaybeError Initialize();
 
-        // WebGPU API
-        bool APIGetLimits(SupportedLimits* limits) const;
-        void APIGetProperties(AdapterProperties* properties) const;
-        bool APIHasFeature(wgpu::FeatureName feature) const;
-        size_t APIEnumerateFeatures(wgpu::FeatureName* features) const;
-        void APIRequestDevice(const DeviceDescriptor* descriptor,
-                              WGPURequestDeviceCallback callback,
-                              void* userdata);
-        DeviceBase* APICreateDevice(const DeviceDescriptor* descriptor = nullptr);
+    // WebGPU API
+    bool APIGetLimits(SupportedLimits* limits) const;
+    void APIGetProperties(AdapterProperties* properties) const;
+    bool APIHasFeature(wgpu::FeatureName feature) const;
+    size_t APIEnumerateFeatures(wgpu::FeatureName* features) const;
+    void APIRequestDevice(const DeviceDescriptor* descriptor,
+                          WGPURequestDeviceCallback callback,
+                          void* userdata);
+    DeviceBase* APICreateDevice(const DeviceDescriptor* descriptor = nullptr);
 
-        uint32_t GetVendorId() const;
-        uint32_t GetDeviceId() const;
-        wgpu::BackendType GetBackendType() const;
-        InstanceBase* GetInstance() const;
+    uint32_t GetVendorId() const;
+    uint32_t GetDeviceId() const;
+    wgpu::BackendType GetBackendType() const;
+    InstanceBase* GetInstance() const;
 
-        void ResetInternalDeviceForTesting();
+    void ResetInternalDeviceForTesting();
 
-        FeaturesSet GetSupportedFeatures() const;
-        bool SupportsAllRequiredFeatures(
-            const ityp::span<size_t, const wgpu::FeatureName>& features) const;
-        WGPUDeviceProperties GetAdapterProperties() const;
+    FeaturesSet GetSupportedFeatures() const;
+    bool SupportsAllRequiredFeatures(
+        const ityp::span<size_t, const wgpu::FeatureName>& features) const;
+    WGPUDeviceProperties GetAdapterProperties() const;
 
-        bool GetLimits(SupportedLimits* limits) const;
+    bool GetLimits(SupportedLimits* limits) const;
 
-        void SetUseTieredLimits(bool useTieredLimits);
+    void SetUseTieredLimits(bool useTieredLimits);
 
-        virtual bool SupportsExternalImages() const = 0;
+    virtual bool SupportsExternalImages() const = 0;
 
-      protected:
-        uint32_t mVendorId = 0xFFFFFFFF;
-        uint32_t mDeviceId = 0xFFFFFFFF;
-        std::string mName;
-        wgpu::AdapterType mAdapterType = wgpu::AdapterType::Unknown;
-        std::string mDriverDescription;
-        FeaturesSet mSupportedFeatures;
+  protected:
+    uint32_t mVendorId = 0xFFFFFFFF;
+    uint32_t mDeviceId = 0xFFFFFFFF;
+    std::string mName;
+    wgpu::AdapterType mAdapterType = wgpu::AdapterType::Unknown;
+    std::string mDriverDescription;
+    FeaturesSet mSupportedFeatures;
 
-      private:
-        virtual ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(
-            const DeviceDescriptor* descriptor) = 0;
+  private:
+    virtual ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(const DeviceDescriptor* descriptor) = 0;
 
-        virtual MaybeError InitializeImpl() = 0;
+    virtual MaybeError InitializeImpl() = 0;
 
-        // Check base WebGPU features and discover supported featurees.
-        virtual MaybeError InitializeSupportedFeaturesImpl() = 0;
+    // Check base WebGPU features and discover supported featurees.
+    virtual MaybeError InitializeSupportedFeaturesImpl() = 0;
 
-        // Check base WebGPU limits and populate supported limits.
-        virtual MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) = 0;
+    // Check base WebGPU limits and populate supported limits.
+    virtual MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) = 0;
 
-        ResultOrError<Ref<DeviceBase>> CreateDeviceInternal(const DeviceDescriptor* descriptor);
+    ResultOrError<Ref<DeviceBase>> CreateDeviceInternal(const DeviceDescriptor* descriptor);
 
-        virtual MaybeError ResetInternalDeviceForTestingImpl();
-        InstanceBase* mInstance = nullptr;
-        wgpu::BackendType mBackend;
-        CombinedLimits mLimits;
-        bool mUseTieredLimits = false;
-    };
+    virtual MaybeError ResetInternalDeviceForTestingImpl();
+    InstanceBase* mInstance = nullptr;
+    wgpu::BackendType mBackend;
+    CombinedLimits mLimits;
+    bool mUseTieredLimits = false;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/AsyncTask.cpp b/src/dawn/native/AsyncTask.cpp
index b1af966..00e6a64 100644
--- a/src/dawn/native/AsyncTask.cpp
+++ b/src/dawn/native/AsyncTask.cpp
@@ -20,62 +20,61 @@
 
 namespace dawn::native {
 
-    AsyncTaskManager::AsyncTaskManager(dawn::platform::WorkerTaskPool* workerTaskPool)
-        : mWorkerTaskPool(workerTaskPool) {
-    }
+AsyncTaskManager::AsyncTaskManager(dawn::platform::WorkerTaskPool* workerTaskPool)
+    : mWorkerTaskPool(workerTaskPool) {}
 
-    void AsyncTaskManager::PostTask(AsyncTask asyncTask) {
-        // If these allocations becomes expensive, we can slab-allocate tasks.
-        Ref<WaitableTask> waitableTask = AcquireRef(new WaitableTask());
-        waitableTask->taskManager = this;
-        waitableTask->asyncTask = std::move(asyncTask);
+void AsyncTaskManager::PostTask(AsyncTask asyncTask) {
+    // If these allocations becomes expensive, we can slab-allocate tasks.
+    Ref<WaitableTask> waitableTask = AcquireRef(new WaitableTask());
+    waitableTask->taskManager = this;
+    waitableTask->asyncTask = std::move(asyncTask);
 
-        {
-            // We insert new waitableTask objects into mPendingTasks in main thread (PostTask()),
-            // and we may remove waitableTask objects from mPendingTasks in either main thread
-            // (WaitAllPendingTasks()) or sub-thread (TaskCompleted), so mPendingTasks should be
-            // protected by a mutex.
-            std::lock_guard<std::mutex> lock(mPendingTasksMutex);
-            mPendingTasks.emplace(waitableTask.Get(), waitableTask);
-        }
-
-        // Ref the task since it is accessed inside the worker function.
-        // The worker function will acquire and release the task upon completion.
-        waitableTask->Reference();
-        waitableTask->waitableEvent =
-            mWorkerTaskPool->PostWorkerTask(DoWaitableTask, waitableTask.Get());
-    }
-
-    void AsyncTaskManager::HandleTaskCompletion(WaitableTask* task) {
+    {
+        // We insert new waitableTask objects into mPendingTasks in main thread (PostTask()),
+        // and we may remove waitableTask objects from mPendingTasks in either main thread
+        // (WaitAllPendingTasks()) or sub-thread (TaskCompleted), so mPendingTasks should be
+        // protected by a mutex.
         std::lock_guard<std::mutex> lock(mPendingTasksMutex);
-        auto iter = mPendingTasks.find(task);
-        if (iter != mPendingTasks.end()) {
-            mPendingTasks.erase(iter);
-        }
+        mPendingTasks.emplace(waitableTask.Get(), waitableTask);
     }
 
-    void AsyncTaskManager::WaitAllPendingTasks() {
-        std::unordered_map<WaitableTask*, Ref<WaitableTask>> allPendingTasks;
+    // Ref the task since it is accessed inside the worker function.
+    // The worker function will acquire and release the task upon completion.
+    waitableTask->Reference();
+    waitableTask->waitableEvent =
+        mWorkerTaskPool->PostWorkerTask(DoWaitableTask, waitableTask.Get());
+}
 
-        {
-            std::lock_guard<std::mutex> lock(mPendingTasksMutex);
-            allPendingTasks.swap(mPendingTasks);
-        }
-
-        for (auto& [_, task] : allPendingTasks) {
-            task->waitableEvent->Wait();
-        }
+void AsyncTaskManager::HandleTaskCompletion(WaitableTask* task) {
+    std::lock_guard<std::mutex> lock(mPendingTasksMutex);
+    auto iter = mPendingTasks.find(task);
+    if (iter != mPendingTasks.end()) {
+        mPendingTasks.erase(iter);
     }
+}
 
-    bool AsyncTaskManager::HasPendingTasks() {
+void AsyncTaskManager::WaitAllPendingTasks() {
+    std::unordered_map<WaitableTask*, Ref<WaitableTask>> allPendingTasks;
+
+    {
         std::lock_guard<std::mutex> lock(mPendingTasksMutex);
-        return !mPendingTasks.empty();
+        allPendingTasks.swap(mPendingTasks);
     }
 
-    void AsyncTaskManager::DoWaitableTask(void* task) {
-        Ref<WaitableTask> waitableTask = AcquireRef(static_cast<WaitableTask*>(task));
-        waitableTask->asyncTask();
-        waitableTask->taskManager->HandleTaskCompletion(waitableTask.Get());
+    for (auto& [_, task] : allPendingTasks) {
+        task->waitableEvent->Wait();
     }
+}
+
+bool AsyncTaskManager::HasPendingTasks() {
+    std::lock_guard<std::mutex> lock(mPendingTasksMutex);
+    return !mPendingTasks.empty();
+}
+
+void AsyncTaskManager::DoWaitableTask(void* task) {
+    Ref<WaitableTask> waitableTask = AcquireRef(static_cast<WaitableTask*>(task));
+    waitableTask->asyncTask();
+    waitableTask->taskManager->HandleTaskCompletion(waitableTask.Get());
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/AsyncTask.h b/src/dawn/native/AsyncTask.h
index b71c80e..d2c28fa 100644
--- a/src/dawn/native/AsyncTask.h
+++ b/src/dawn/native/AsyncTask.h
@@ -23,43 +23,43 @@
 #include "dawn/common/RefCounted.h"
 
 namespace dawn::platform {
-    class WaitableEvent;
-    class WorkerTaskPool;
+class WaitableEvent;
+class WorkerTaskPool;
 }  // namespace dawn::platform
 
 namespace dawn::native {
 
-    // TODO(crbug.com/dawn/826): we'll add additional things to AsyncTask in the future, like
-    // Cancel() and RunNow(). Cancelling helps avoid running the task's body when we are just
-    // shutting down the device. RunNow() could be used for more advanced scenarios, for example
-    // always doing ShaderModule initial compilation asynchronously, but being able to steal the
-    // task if we need it for synchronous pipeline compilation.
-    using AsyncTask = std::function<void()>;
+// TODO(crbug.com/dawn/826): we'll add additional things to AsyncTask in the future, like
+// Cancel() and RunNow(). Cancelling helps avoid running the task's body when we are just
+// shutting down the device. RunNow() could be used for more advanced scenarios, for example
+// always doing ShaderModule initial compilation asynchronously, but being able to steal the
+// task if we need it for synchronous pipeline compilation.
+using AsyncTask = std::function<void()>;
 
-    class AsyncTaskManager {
+class AsyncTaskManager {
+  public:
+    explicit AsyncTaskManager(dawn::platform::WorkerTaskPool* workerTaskPool);
+
+    void PostTask(AsyncTask asyncTask);
+    void WaitAllPendingTasks();
+    bool HasPendingTasks();
+
+  private:
+    class WaitableTask : public RefCounted {
       public:
-        explicit AsyncTaskManager(dawn::platform::WorkerTaskPool* workerTaskPool);
-
-        void PostTask(AsyncTask asyncTask);
-        void WaitAllPendingTasks();
-        bool HasPendingTasks();
-
-      private:
-        class WaitableTask : public RefCounted {
-          public:
-            AsyncTask asyncTask;
-            AsyncTaskManager* taskManager;
-            std::unique_ptr<dawn::platform::WaitableEvent> waitableEvent;
-        };
-
-        static void DoWaitableTask(void* task);
-        void HandleTaskCompletion(WaitableTask* task);
-
-        std::mutex mPendingTasksMutex;
-        std::unordered_map<WaitableTask*, Ref<WaitableTask>> mPendingTasks;
-        dawn::platform::WorkerTaskPool* mWorkerTaskPool;
+        AsyncTask asyncTask;
+        AsyncTaskManager* taskManager;
+        std::unique_ptr<dawn::platform::WaitableEvent> waitableEvent;
     };
 
+    static void DoWaitableTask(void* task);
+    void HandleTaskCompletion(WaitableTask* task);
+
+    std::mutex mPendingTasksMutex;
+    std::unordered_map<WaitableTask*, Ref<WaitableTask>> mPendingTasks;
+    dawn::platform::WorkerTaskPool* mWorkerTaskPool;
+};
+
 }  // namespace dawn::native
 
 #endif  // SRC_DAWN_NATIVE_ASYNCTASK_H_
diff --git a/src/dawn/native/AttachmentState.cpp b/src/dawn/native/AttachmentState.cpp
index 1e38d9d..bbb8ecd 100644
--- a/src/dawn/native/AttachmentState.cpp
+++ b/src/dawn/native/AttachmentState.cpp
@@ -21,155 +21,148 @@
 
 namespace dawn::native {
 
-    AttachmentStateBlueprint::AttachmentStateBlueprint(
-        const RenderBundleEncoderDescriptor* descriptor)
-        : mSampleCount(descriptor->sampleCount) {
-        ASSERT(descriptor->colorFormatsCount <= kMaxColorAttachments);
+AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderBundleEncoderDescriptor* descriptor)
+    : mSampleCount(descriptor->sampleCount) {
+    ASSERT(descriptor->colorFormatsCount <= kMaxColorAttachments);
+    for (ColorAttachmentIndex i(uint8_t(0));
+         i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorFormatsCount)); ++i) {
+        wgpu::TextureFormat format = descriptor->colorFormats[static_cast<uint8_t>(i)];
+        if (format != wgpu::TextureFormat::Undefined) {
+            mColorAttachmentsSet.set(i);
+            mColorFormats[i] = format;
+        }
+    }
+    mDepthStencilFormat = descriptor->depthStencilFormat;
+}
+
+AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPipelineDescriptor* descriptor)
+    : mSampleCount(descriptor->multisample.count) {
+    if (descriptor->fragment != nullptr) {
+        ASSERT(descriptor->fragment->targetCount <= kMaxColorAttachments);
         for (ColorAttachmentIndex i(uint8_t(0));
-             i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorFormatsCount)); ++i) {
-            wgpu::TextureFormat format = descriptor->colorFormats[static_cast<uint8_t>(i)];
+             i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->fragment->targetCount));
+             ++i) {
+            wgpu::TextureFormat format =
+                descriptor->fragment->targets[static_cast<uint8_t>(i)].format;
             if (format != wgpu::TextureFormat::Undefined) {
                 mColorAttachmentsSet.set(i);
                 mColorFormats[i] = format;
             }
         }
-        mDepthStencilFormat = descriptor->depthStencilFormat;
     }
+    if (descriptor->depthStencil != nullptr) {
+        mDepthStencilFormat = descriptor->depthStencil->format;
+    }
+}
 
-    AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPipelineDescriptor* descriptor)
-        : mSampleCount(descriptor->multisample.count) {
-        if (descriptor->fragment != nullptr) {
-            ASSERT(descriptor->fragment->targetCount <= kMaxColorAttachments);
-            for (ColorAttachmentIndex i(uint8_t(0));
-                 i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->fragment->targetCount));
-                 ++i) {
-                wgpu::TextureFormat format =
-                    descriptor->fragment->targets[static_cast<uint8_t>(i)].format;
-                if (format != wgpu::TextureFormat::Undefined) {
-                    mColorAttachmentsSet.set(i);
-                    mColorFormats[i] = format;
-                }
-            }
+AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPassDescriptor* descriptor) {
+    for (ColorAttachmentIndex i(uint8_t(0));
+         i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorAttachmentCount)); ++i) {
+        TextureViewBase* attachment = descriptor->colorAttachments[static_cast<uint8_t>(i)].view;
+        if (attachment == nullptr) {
+            continue;
         }
-        if (descriptor->depthStencil != nullptr) {
-            mDepthStencilFormat = descriptor->depthStencil->format;
+        mColorAttachmentsSet.set(i);
+        mColorFormats[i] = attachment->GetFormat().format;
+        if (mSampleCount == 0) {
+            mSampleCount = attachment->GetTexture()->GetSampleCount();
+        } else {
+            ASSERT(mSampleCount == attachment->GetTexture()->GetSampleCount());
         }
     }
+    if (descriptor->depthStencilAttachment != nullptr) {
+        TextureViewBase* attachment = descriptor->depthStencilAttachment->view;
+        mDepthStencilFormat = attachment->GetFormat().format;
+        if (mSampleCount == 0) {
+            mSampleCount = attachment->GetTexture()->GetSampleCount();
+        } else {
+            ASSERT(mSampleCount == attachment->GetTexture()->GetSampleCount());
+        }
+    }
+    ASSERT(mSampleCount > 0);
+}
 
-    AttachmentStateBlueprint::AttachmentStateBlueprint(const RenderPassDescriptor* descriptor) {
-        for (ColorAttachmentIndex i(uint8_t(0));
-             i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->colorAttachmentCount));
-             ++i) {
-            TextureViewBase* attachment =
-                descriptor->colorAttachments[static_cast<uint8_t>(i)].view;
-            if (attachment == nullptr) {
-                continue;
-            }
-            mColorAttachmentsSet.set(i);
-            mColorFormats[i] = attachment->GetFormat().format;
-            if (mSampleCount == 0) {
-                mSampleCount = attachment->GetTexture()->GetSampleCount();
-            } else {
-                ASSERT(mSampleCount == attachment->GetTexture()->GetSampleCount());
-            }
-        }
-        if (descriptor->depthStencilAttachment != nullptr) {
-            TextureViewBase* attachment = descriptor->depthStencilAttachment->view;
-            mDepthStencilFormat = attachment->GetFormat().format;
-            if (mSampleCount == 0) {
-                mSampleCount = attachment->GetTexture()->GetSampleCount();
-            } else {
-                ASSERT(mSampleCount == attachment->GetTexture()->GetSampleCount());
-            }
-        }
-        ASSERT(mSampleCount > 0);
+AttachmentStateBlueprint::AttachmentStateBlueprint(const AttachmentStateBlueprint& rhs) = default;
+
+size_t AttachmentStateBlueprint::HashFunc::operator()(
+    const AttachmentStateBlueprint* attachmentState) const {
+    size_t hash = 0;
+
+    // Hash color formats
+    HashCombine(&hash, attachmentState->mColorAttachmentsSet);
+    for (ColorAttachmentIndex i : IterateBitSet(attachmentState->mColorAttachmentsSet)) {
+        HashCombine(&hash, attachmentState->mColorFormats[i]);
     }
 
-    AttachmentStateBlueprint::AttachmentStateBlueprint(const AttachmentStateBlueprint& rhs) =
-        default;
+    // Hash depth stencil attachment
+    HashCombine(&hash, attachmentState->mDepthStencilFormat);
 
-    size_t AttachmentStateBlueprint::HashFunc::operator()(
-        const AttachmentStateBlueprint* attachmentState) const {
-        size_t hash = 0;
+    // Hash sample count
+    HashCombine(&hash, attachmentState->mSampleCount);
 
-        // Hash color formats
-        HashCombine(&hash, attachmentState->mColorAttachmentsSet);
-        for (ColorAttachmentIndex i : IterateBitSet(attachmentState->mColorAttachmentsSet)) {
-            HashCombine(&hash, attachmentState->mColorFormats[i]);
-        }
+    return hash;
+}
 
-        // Hash depth stencil attachment
-        HashCombine(&hash, attachmentState->mDepthStencilFormat);
-
-        // Hash sample count
-        HashCombine(&hash, attachmentState->mSampleCount);
-
-        return hash;
+bool AttachmentStateBlueprint::EqualityFunc::operator()(const AttachmentStateBlueprint* a,
+                                                        const AttachmentStateBlueprint* b) const {
+    // Check set attachments
+    if (a->mColorAttachmentsSet != b->mColorAttachmentsSet) {
+        return false;
     }
 
-    bool AttachmentStateBlueprint::EqualityFunc::operator()(
-        const AttachmentStateBlueprint* a,
-        const AttachmentStateBlueprint* b) const {
-        // Check set attachments
-        if (a->mColorAttachmentsSet != b->mColorAttachmentsSet) {
+    // Check color formats
+    for (ColorAttachmentIndex i : IterateBitSet(a->mColorAttachmentsSet)) {
+        if (a->mColorFormats[i] != b->mColorFormats[i]) {
             return false;
         }
-
-        // Check color formats
-        for (ColorAttachmentIndex i : IterateBitSet(a->mColorAttachmentsSet)) {
-            if (a->mColorFormats[i] != b->mColorFormats[i]) {
-                return false;
-            }
-        }
-
-        // Check depth stencil format
-        if (a->mDepthStencilFormat != b->mDepthStencilFormat) {
-            return false;
-        }
-
-        // Check sample count
-        if (a->mSampleCount != b->mSampleCount) {
-            return false;
-        }
-
-        return true;
     }
 
-    AttachmentState::AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint)
-        : AttachmentStateBlueprint(blueprint), ObjectBase(device) {
+    // Check depth stencil format
+    if (a->mDepthStencilFormat != b->mDepthStencilFormat) {
+        return false;
     }
 
-    AttachmentState::~AttachmentState() {
-        GetDevice()->UncacheAttachmentState(this);
+    // Check sample count
+    if (a->mSampleCount != b->mSampleCount) {
+        return false;
     }
 
-    size_t AttachmentState::ComputeContentHash() {
-        // TODO(dawn:549): skip this traversal and reuse the blueprint.
-        return AttachmentStateBlueprint::HashFunc()(this);
-    }
+    return true;
+}
 
-    ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments>
-    AttachmentState::GetColorAttachmentsMask() const {
-        return mColorAttachmentsSet;
-    }
+AttachmentState::AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint)
+    : AttachmentStateBlueprint(blueprint), ObjectBase(device) {}
 
-    wgpu::TextureFormat AttachmentState::GetColorAttachmentFormat(
-        ColorAttachmentIndex index) const {
-        ASSERT(mColorAttachmentsSet[index]);
-        return mColorFormats[index];
-    }
+AttachmentState::~AttachmentState() {
+    GetDevice()->UncacheAttachmentState(this);
+}
 
-    bool AttachmentState::HasDepthStencilAttachment() const {
-        return mDepthStencilFormat != wgpu::TextureFormat::Undefined;
-    }
+size_t AttachmentState::ComputeContentHash() {
+    // TODO(dawn:549): skip this traversal and reuse the blueprint.
+    return AttachmentStateBlueprint::HashFunc()(this);
+}
 
-    wgpu::TextureFormat AttachmentState::GetDepthStencilFormat() const {
-        ASSERT(HasDepthStencilAttachment());
-        return mDepthStencilFormat;
-    }
+ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> AttachmentState::GetColorAttachmentsMask()
+    const {
+    return mColorAttachmentsSet;
+}
 
-    uint32_t AttachmentState::GetSampleCount() const {
-        return mSampleCount;
-    }
+wgpu::TextureFormat AttachmentState::GetColorAttachmentFormat(ColorAttachmentIndex index) const {
+    ASSERT(mColorAttachmentsSet[index]);
+    return mColorFormats[index];
+}
+
+bool AttachmentState::HasDepthStencilAttachment() const {
+    return mDepthStencilFormat != wgpu::TextureFormat::Undefined;
+}
+
+wgpu::TextureFormat AttachmentState::GetDepthStencilFormat() const {
+    ASSERT(HasDepthStencilAttachment());
+    return mDepthStencilFormat;
+}
+
+uint32_t AttachmentState::GetSampleCount() const {
+    return mSampleCount;
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/AttachmentState.h b/src/dawn/native/AttachmentState.h
index c1e6445..815ce29 100644
--- a/src/dawn/native/AttachmentState.h
+++ b/src/dawn/native/AttachmentState.h
@@ -29,54 +29,53 @@
 
 namespace dawn::native {
 
-    class DeviceBase;
+class DeviceBase;
 
-    // AttachmentStateBlueprint and AttachmentState are separated so the AttachmentState
-    // can be constructed by copying the blueprint state instead of traversing descriptors.
-    // Also, AttachmentStateBlueprint does not need a refcount like AttachmentState.
-    class AttachmentStateBlueprint {
-      public:
-        // Note: Descriptors must be validated before the AttachmentState is constructed.
-        explicit AttachmentStateBlueprint(const RenderBundleEncoderDescriptor* descriptor);
-        explicit AttachmentStateBlueprint(const RenderPipelineDescriptor* descriptor);
-        explicit AttachmentStateBlueprint(const RenderPassDescriptor* descriptor);
+// AttachmentStateBlueprint and AttachmentState are separated so the AttachmentState
+// can be constructed by copying the blueprint state instead of traversing descriptors.
+// Also, AttachmentStateBlueprint does not need a refcount like AttachmentState.
+class AttachmentStateBlueprint {
+  public:
+    // Note: Descriptors must be validated before the AttachmentState is constructed.
+    explicit AttachmentStateBlueprint(const RenderBundleEncoderDescriptor* descriptor);
+    explicit AttachmentStateBlueprint(const RenderPipelineDescriptor* descriptor);
+    explicit AttachmentStateBlueprint(const RenderPassDescriptor* descriptor);
 
-        AttachmentStateBlueprint(const AttachmentStateBlueprint& rhs);
+    AttachmentStateBlueprint(const AttachmentStateBlueprint& rhs);
 
-        // Functors necessary for the unordered_set<AttachmentState*>-based cache.
-        struct HashFunc {
-            size_t operator()(const AttachmentStateBlueprint* attachmentState) const;
-        };
-        struct EqualityFunc {
-            bool operator()(const AttachmentStateBlueprint* a,
-                            const AttachmentStateBlueprint* b) const;
-        };
-
-      protected:
-        ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> mColorAttachmentsSet;
-        ityp::array<ColorAttachmentIndex, wgpu::TextureFormat, kMaxColorAttachments> mColorFormats;
-        // Default (texture format Undefined) indicates there is no depth stencil attachment.
-        wgpu::TextureFormat mDepthStencilFormat = wgpu::TextureFormat::Undefined;
-        uint32_t mSampleCount = 0;
+    // Functors necessary for the unordered_set<AttachmentState*>-based cache.
+    struct HashFunc {
+        size_t operator()(const AttachmentStateBlueprint* attachmentState) const;
+    };
+    struct EqualityFunc {
+        bool operator()(const AttachmentStateBlueprint* a, const AttachmentStateBlueprint* b) const;
     };
 
-    class AttachmentState final : public AttachmentStateBlueprint,
-                                  public ObjectBase,
-                                  public CachedObject {
-      public:
-        AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint);
+  protected:
+    ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> mColorAttachmentsSet;
+    ityp::array<ColorAttachmentIndex, wgpu::TextureFormat, kMaxColorAttachments> mColorFormats;
+    // Default (texture format Undefined) indicates there is no depth stencil attachment.
+    wgpu::TextureFormat mDepthStencilFormat = wgpu::TextureFormat::Undefined;
+    uint32_t mSampleCount = 0;
+};
 
-        ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> GetColorAttachmentsMask() const;
-        wgpu::TextureFormat GetColorAttachmentFormat(ColorAttachmentIndex index) const;
-        bool HasDepthStencilAttachment() const;
-        wgpu::TextureFormat GetDepthStencilFormat() const;
-        uint32_t GetSampleCount() const;
+class AttachmentState final : public AttachmentStateBlueprint,
+                              public ObjectBase,
+                              public CachedObject {
+  public:
+    AttachmentState(DeviceBase* device, const AttachmentStateBlueprint& blueprint);
 
-        size_t ComputeContentHash() override;
+    ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> GetColorAttachmentsMask() const;
+    wgpu::TextureFormat GetColorAttachmentFormat(ColorAttachmentIndex index) const;
+    bool HasDepthStencilAttachment() const;
+    wgpu::TextureFormat GetDepthStencilFormat() const;
+    uint32_t GetSampleCount() const;
 
-      private:
-        ~AttachmentState() override;
-    };
+    size_t ComputeContentHash() override;
+
+  private:
+    ~AttachmentState() override;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/BackendConnection.cpp b/src/dawn/native/BackendConnection.cpp
index abcc271..0c54731 100644
--- a/src/dawn/native/BackendConnection.cpp
+++ b/src/dawn/native/BackendConnection.cpp
@@ -16,21 +16,20 @@
 
 namespace dawn::native {
 
-    BackendConnection::BackendConnection(InstanceBase* instance, wgpu::BackendType type)
-        : mInstance(instance), mType(type) {
-    }
+BackendConnection::BackendConnection(InstanceBase* instance, wgpu::BackendType type)
+    : mInstance(instance), mType(type) {}
 
-    wgpu::BackendType BackendConnection::GetType() const {
-        return mType;
-    }
+wgpu::BackendType BackendConnection::GetType() const {
+    return mType;
+}
 
-    InstanceBase* BackendConnection::GetInstance() const {
-        return mInstance;
-    }
+InstanceBase* BackendConnection::GetInstance() const {
+    return mInstance;
+}
 
-    ResultOrError<std::vector<Ref<AdapterBase>>> BackendConnection::DiscoverAdapters(
-        const AdapterDiscoveryOptionsBase* options) {
-        return DAWN_FORMAT_VALIDATION_ERROR("DiscoverAdapters not implemented for this backend.");
-    }
+ResultOrError<std::vector<Ref<AdapterBase>>> BackendConnection::DiscoverAdapters(
+    const AdapterDiscoveryOptionsBase* options) {
+    return DAWN_FORMAT_VALIDATION_ERROR("DiscoverAdapters not implemented for this backend.");
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/BackendConnection.h b/src/dawn/native/BackendConnection.h
index d9f5dac..04fe35b 100644
--- a/src/dawn/native/BackendConnection.h
+++ b/src/dawn/native/BackendConnection.h
@@ -23,28 +23,28 @@
 
 namespace dawn::native {
 
-    // An common interface for all backends. Mostly used to create adapters for a particular
-    // backend.
-    class BackendConnection {
-      public:
-        BackendConnection(InstanceBase* instance, wgpu::BackendType type);
-        virtual ~BackendConnection() = default;
+// An common interface for all backends. Mostly used to create adapters for a particular
+// backend.
+class BackendConnection {
+  public:
+    BackendConnection(InstanceBase* instance, wgpu::BackendType type);
+    virtual ~BackendConnection() = default;
 
-        wgpu::BackendType GetType() const;
-        InstanceBase* GetInstance() const;
+    wgpu::BackendType GetType() const;
+    InstanceBase* GetInstance() const;
 
-        // Returns all the adapters for the system that can be created by the backend, without extra
-        // options (such as debug adapters, custom driver libraries, etc.)
-        virtual std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() = 0;
+    // Returns all the adapters for the system that can be created by the backend, without extra
+    // options (such as debug adapters, custom driver libraries, etc.)
+    virtual std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() = 0;
 
-        // Returns new adapters created with the backend-specific options.
-        virtual ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
-            const AdapterDiscoveryOptionsBase* options);
+    // Returns new adapters created with the backend-specific options.
+    virtual ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
+        const AdapterDiscoveryOptionsBase* options);
 
-      private:
-        InstanceBase* mInstance = nullptr;
-        wgpu::BackendType mType;
-    };
+  private:
+    InstanceBase* mInstance = nullptr;
+    wgpu::BackendType mType;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/BindGroup.cpp b/src/dawn/native/BindGroup.cpp
index df91fe7..802213c 100644
--- a/src/dawn/native/BindGroup.cpp
+++ b/src/dawn/native/BindGroup.cpp
@@ -29,517 +29,498 @@
 
 namespace dawn::native {
 
-    namespace {
+namespace {
 
-        // Helper functions to perform binding-type specific validation
+// Helper functions to perform binding-type specific validation
 
-        MaybeError ValidateBufferBinding(const DeviceBase* device,
-                                         const BindGroupEntry& entry,
-                                         const BindingInfo& bindingInfo) {
-            DAWN_INVALID_IF(entry.buffer == nullptr, "Binding entry buffer not set.");
+MaybeError ValidateBufferBinding(const DeviceBase* device,
+                                 const BindGroupEntry& entry,
+                                 const BindingInfo& bindingInfo) {
+    DAWN_INVALID_IF(entry.buffer == nullptr, "Binding entry buffer not set.");
 
-            DAWN_INVALID_IF(entry.sampler != nullptr || entry.textureView != nullptr,
-                            "Expected only buffer to be set for binding entry.");
+    DAWN_INVALID_IF(entry.sampler != nullptr || entry.textureView != nullptr,
+                    "Expected only buffer to be set for binding entry.");
 
-            DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
+    DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
 
-            DAWN_TRY(device->ValidateObject(entry.buffer));
+    DAWN_TRY(device->ValidateObject(entry.buffer));
 
-            ASSERT(bindingInfo.bindingType == BindingInfoType::Buffer);
+    ASSERT(bindingInfo.bindingType == BindingInfoType::Buffer);
 
-            wgpu::BufferUsage requiredUsage;
-            uint64_t maxBindingSize;
-            uint64_t requiredBindingAlignment;
-            switch (bindingInfo.buffer.type) {
-                case wgpu::BufferBindingType::Uniform:
-                    requiredUsage = wgpu::BufferUsage::Uniform;
-                    maxBindingSize = device->GetLimits().v1.maxUniformBufferBindingSize;
-                    requiredBindingAlignment =
-                        device->GetLimits().v1.minUniformBufferOffsetAlignment;
-                    break;
-                case wgpu::BufferBindingType::Storage:
-                case wgpu::BufferBindingType::ReadOnlyStorage:
-                    requiredUsage = wgpu::BufferUsage::Storage;
-                    maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
-                    requiredBindingAlignment =
-                        device->GetLimits().v1.minStorageBufferOffsetAlignment;
-                    break;
-                case kInternalStorageBufferBinding:
-                    requiredUsage = kInternalStorageBuffer;
-                    maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
-                    requiredBindingAlignment =
-                        device->GetLimits().v1.minStorageBufferOffsetAlignment;
-                    break;
-                case wgpu::BufferBindingType::Undefined:
-                    UNREACHABLE();
-            }
+    wgpu::BufferUsage requiredUsage;
+    uint64_t maxBindingSize;
+    uint64_t requiredBindingAlignment;
+    switch (bindingInfo.buffer.type) {
+        case wgpu::BufferBindingType::Uniform:
+            requiredUsage = wgpu::BufferUsage::Uniform;
+            maxBindingSize = device->GetLimits().v1.maxUniformBufferBindingSize;
+            requiredBindingAlignment = device->GetLimits().v1.minUniformBufferOffsetAlignment;
+            break;
+        case wgpu::BufferBindingType::Storage:
+        case wgpu::BufferBindingType::ReadOnlyStorage:
+            requiredUsage = wgpu::BufferUsage::Storage;
+            maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
+            requiredBindingAlignment = device->GetLimits().v1.minStorageBufferOffsetAlignment;
+            break;
+        case kInternalStorageBufferBinding:
+            requiredUsage = kInternalStorageBuffer;
+            maxBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
+            requiredBindingAlignment = device->GetLimits().v1.minStorageBufferOffsetAlignment;
+            break;
+        case wgpu::BufferBindingType::Undefined:
+            UNREACHABLE();
+    }
 
-            uint64_t bufferSize = entry.buffer->GetSize();
+    uint64_t bufferSize = entry.buffer->GetSize();
 
-            // Handle wgpu::WholeSize, avoiding overflows.
-            DAWN_INVALID_IF(entry.offset > bufferSize,
-                            "Binding offset (%u) is larger than the size (%u) of %s.", entry.offset,
-                            bufferSize, entry.buffer);
+    // Handle wgpu::WholeSize, avoiding overflows.
+    DAWN_INVALID_IF(entry.offset > bufferSize,
+                    "Binding offset (%u) is larger than the size (%u) of %s.", entry.offset,
+                    bufferSize, entry.buffer);
 
-            uint64_t bindingSize =
-                (entry.size == wgpu::kWholeSize) ? bufferSize - entry.offset : entry.size;
+    uint64_t bindingSize =
+        (entry.size == wgpu::kWholeSize) ? bufferSize - entry.offset : entry.size;
 
-            DAWN_INVALID_IF(bindingSize > bufferSize,
-                            "Binding size (%u) is larger than the size (%u) of %s.", bindingSize,
-                            bufferSize, entry.buffer);
+    DAWN_INVALID_IF(bindingSize > bufferSize,
+                    "Binding size (%u) is larger than the size (%u) of %s.", bindingSize,
+                    bufferSize, entry.buffer);
 
-            DAWN_INVALID_IF(bindingSize == 0, "Binding size is zero");
+    DAWN_INVALID_IF(bindingSize == 0, "Binding size is zero");
 
-            // Note that no overflow can happen because we already checked that
-            // bufferSize >= bindingSize
-            DAWN_INVALID_IF(
-                entry.offset > bufferSize - bindingSize,
-                "Binding range (offset: %u, size: %u) doesn't fit in the size (%u) of %s.",
-                entry.offset, bufferSize, bindingSize, entry.buffer);
+    // Note that no overflow can happen because we already checked that
+    // bufferSize >= bindingSize
+    DAWN_INVALID_IF(entry.offset > bufferSize - bindingSize,
+                    "Binding range (offset: %u, size: %u) doesn't fit in the size (%u) of %s.",
+                    entry.offset, bufferSize, bindingSize, entry.buffer);
 
-            DAWN_INVALID_IF(!IsAligned(entry.offset, requiredBindingAlignment),
-                            "Offset (%u) does not satisfy the minimum %s alignment (%u).",
-                            entry.offset, bindingInfo.buffer.type, requiredBindingAlignment);
+    DAWN_INVALID_IF(!IsAligned(entry.offset, requiredBindingAlignment),
+                    "Offset (%u) does not satisfy the minimum %s alignment (%u).", entry.offset,
+                    bindingInfo.buffer.type, requiredBindingAlignment);
 
-            DAWN_INVALID_IF(!(entry.buffer->GetUsage() & requiredUsage),
-                            "Binding usage (%s) of %s doesn't match expected usage (%s).",
-                            entry.buffer->GetUsageExternalOnly(), entry.buffer, requiredUsage);
+    DAWN_INVALID_IF(!(entry.buffer->GetUsage() & requiredUsage),
+                    "Binding usage (%s) of %s doesn't match expected usage (%s).",
+                    entry.buffer->GetUsageExternalOnly(), entry.buffer, requiredUsage);
 
-            DAWN_INVALID_IF(bindingSize < bindingInfo.buffer.minBindingSize,
-                            "Binding size (%u) is smaller than the minimum binding size (%u).",
-                            bindingSize, bindingInfo.buffer.minBindingSize);
+    DAWN_INVALID_IF(bindingSize < bindingInfo.buffer.minBindingSize,
+                    "Binding size (%u) is smaller than the minimum binding size (%u).", bindingSize,
+                    bindingInfo.buffer.minBindingSize);
 
-            DAWN_INVALID_IF(bindingSize > maxBindingSize,
-                            "Binding size (%u) is larger than the maximum binding size (%u).",
-                            bindingSize, maxBindingSize);
+    DAWN_INVALID_IF(bindingSize > maxBindingSize,
+                    "Binding size (%u) is larger than the maximum binding size (%u).", bindingSize,
+                    maxBindingSize);
 
-            return {};
-        }
+    return {};
+}
 
-        MaybeError ValidateTextureBinding(DeviceBase* device,
-                                          const BindGroupEntry& entry,
-                                          const BindingInfo& bindingInfo) {
-            DAWN_INVALID_IF(entry.textureView == nullptr, "Binding entry textureView not set.");
+MaybeError ValidateTextureBinding(DeviceBase* device,
+                                  const BindGroupEntry& entry,
+                                  const BindingInfo& bindingInfo) {
+    DAWN_INVALID_IF(entry.textureView == nullptr, "Binding entry textureView not set.");
 
-            DAWN_INVALID_IF(entry.sampler != nullptr || entry.buffer != nullptr,
-                            "Expected only textureView to be set for binding entry.");
+    DAWN_INVALID_IF(entry.sampler != nullptr || entry.buffer != nullptr,
+                    "Expected only textureView to be set for binding entry.");
 
-            DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
+    DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
 
-            DAWN_TRY(device->ValidateObject(entry.textureView));
+    DAWN_TRY(device->ValidateObject(entry.textureView));
 
-            TextureViewBase* view = entry.textureView;
+    TextureViewBase* view = entry.textureView;
 
-            Aspect aspect = view->GetAspects();
-            DAWN_INVALID_IF(!HasOneBit(aspect), "Multiple aspects (%s) selected in %s.", aspect,
-                            view);
+    Aspect aspect = view->GetAspects();
+    DAWN_INVALID_IF(!HasOneBit(aspect), "Multiple aspects (%s) selected in %s.", aspect, view);
 
-            TextureBase* texture = view->GetTexture();
-            switch (bindingInfo.bindingType) {
-                case BindingInfoType::Texture: {
-                    SampleTypeBit supportedTypes =
-                        texture->GetFormat().GetAspectInfo(aspect).supportedSampleTypes;
-                    SampleTypeBit requiredType =
-                        SampleTypeToSampleTypeBit(bindingInfo.texture.sampleType);
+    TextureBase* texture = view->GetTexture();
+    switch (bindingInfo.bindingType) {
+        case BindingInfoType::Texture: {
+            SampleTypeBit supportedTypes =
+                texture->GetFormat().GetAspectInfo(aspect).supportedSampleTypes;
+            SampleTypeBit requiredType = SampleTypeToSampleTypeBit(bindingInfo.texture.sampleType);
 
-                    DAWN_INVALID_IF(
-                        !(texture->GetUsage() & wgpu::TextureUsage::TextureBinding),
-                        "Usage (%s) of %s doesn't include TextureUsage::TextureBinding.",
-                        texture->GetUsage(), texture);
+            DAWN_INVALID_IF(!(texture->GetUsage() & wgpu::TextureUsage::TextureBinding),
+                            "Usage (%s) of %s doesn't include TextureUsage::TextureBinding.",
+                            texture->GetUsage(), texture);
 
-                    DAWN_INVALID_IF(
-                        texture->IsMultisampledTexture() != bindingInfo.texture.multisampled,
-                        "Sample count (%u) of %s doesn't match expectation (multisampled: %d).",
-                        texture->GetSampleCount(), texture, bindingInfo.texture.multisampled);
-
-                    DAWN_INVALID_IF(
-                        (supportedTypes & requiredType) == 0,
-                        "None of the supported sample types (%s) of %s match the expected sample "
-                        "types (%s).",
-                        supportedTypes, texture, requiredType);
-
-                    DAWN_INVALID_IF(
-                        entry.textureView->GetDimension() != bindingInfo.texture.viewDimension,
-                        "Dimension (%s) of %s doesn't match the expected dimension (%s).",
-                        entry.textureView->GetDimension(), entry.textureView,
-                        bindingInfo.texture.viewDimension);
-                    break;
-                }
-                case BindingInfoType::StorageTexture: {
-                    DAWN_INVALID_IF(
-                        !(texture->GetUsage() & wgpu::TextureUsage::StorageBinding),
-                        "Usage (%s) of %s doesn't include TextureUsage::StorageBinding.",
-                        texture->GetUsage(), texture);
-
-                    ASSERT(!texture->IsMultisampledTexture());
-
-                    DAWN_INVALID_IF(
-                        texture->GetFormat().format != bindingInfo.storageTexture.format,
-                        "Format (%s) of %s expected to be (%s).", texture->GetFormat().format,
-                        texture, bindingInfo.storageTexture.format);
-
-                    DAWN_INVALID_IF(
-                        entry.textureView->GetDimension() !=
-                            bindingInfo.storageTexture.viewDimension,
-                        "Dimension (%s) of %s doesn't match the expected dimension (%s).",
-                        entry.textureView->GetDimension(), entry.textureView,
-                        bindingInfo.storageTexture.viewDimension);
-
-                    DAWN_INVALID_IF(entry.textureView->GetLevelCount() != 1,
-                                    "mipLevelCount (%u) of %s expected to be 1.",
-                                    entry.textureView->GetLevelCount(), entry.textureView);
-                    break;
-                }
-                default:
-                    UNREACHABLE();
-                    break;
-            }
-
-            return {};
-        }
-
-        MaybeError ValidateSamplerBinding(const DeviceBase* device,
-                                          const BindGroupEntry& entry,
-                                          const BindingInfo& bindingInfo) {
-            DAWN_INVALID_IF(entry.sampler == nullptr, "Binding entry sampler not set.");
-
-            DAWN_INVALID_IF(entry.textureView != nullptr || entry.buffer != nullptr,
-                            "Expected only sampler to be set for binding entry.");
-
-            DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
-
-            DAWN_TRY(device->ValidateObject(entry.sampler));
-
-            ASSERT(bindingInfo.bindingType == BindingInfoType::Sampler);
-
-            switch (bindingInfo.sampler.type) {
-                case wgpu::SamplerBindingType::NonFiltering:
-                    DAWN_INVALID_IF(
-                        entry.sampler->IsFiltering(),
-                        "Filtering sampler %s is incompatible with non-filtering sampler "
-                        "binding.",
-                        entry.sampler);
-                    [[fallthrough]];
-                case wgpu::SamplerBindingType::Filtering:
-                    DAWN_INVALID_IF(
-                        entry.sampler->IsComparison(),
-                        "Comparison sampler %s is incompatible with non-comparison sampler "
-                        "binding.",
-                        entry.sampler);
-                    break;
-                case wgpu::SamplerBindingType::Comparison:
-                    DAWN_INVALID_IF(
-                        !entry.sampler->IsComparison(),
-                        "Non-comparison sampler %s is imcompatible with comparison sampler "
-                        "binding.",
-                        entry.sampler);
-                    break;
-                default:
-                    UNREACHABLE();
-                    break;
-            }
-
-            return {};
-        }
-
-        MaybeError ValidateExternalTextureBinding(
-            const DeviceBase* device,
-            const BindGroupEntry& entry,
-            const ExternalTextureBindingEntry* externalTextureBindingEntry,
-            const ExternalTextureBindingExpansionMap& expansions) {
-            DAWN_INVALID_IF(externalTextureBindingEntry == nullptr,
-                            "Binding entry external texture not set.");
+            DAWN_INVALID_IF(texture->IsMultisampledTexture() != bindingInfo.texture.multisampled,
+                            "Sample count (%u) of %s doesn't match expectation (multisampled: %d).",
+                            texture->GetSampleCount(), texture, bindingInfo.texture.multisampled);
 
             DAWN_INVALID_IF(
-                entry.sampler != nullptr || entry.textureView != nullptr || entry.buffer != nullptr,
-                "Expected only external texture to be set for binding entry.");
+                (supportedTypes & requiredType) == 0,
+                "None of the supported sample types (%s) of %s match the expected sample "
+                "types (%s).",
+                supportedTypes, texture, requiredType);
+
+            DAWN_INVALID_IF(entry.textureView->GetDimension() != bindingInfo.texture.viewDimension,
+                            "Dimension (%s) of %s doesn't match the expected dimension (%s).",
+                            entry.textureView->GetDimension(), entry.textureView,
+                            bindingInfo.texture.viewDimension);
+            break;
+        }
+        case BindingInfoType::StorageTexture: {
+            DAWN_INVALID_IF(!(texture->GetUsage() & wgpu::TextureUsage::StorageBinding),
+                            "Usage (%s) of %s doesn't include TextureUsage::StorageBinding.",
+                            texture->GetUsage(), texture);
+
+            ASSERT(!texture->IsMultisampledTexture());
+
+            DAWN_INVALID_IF(texture->GetFormat().format != bindingInfo.storageTexture.format,
+                            "Format (%s) of %s expected to be (%s).", texture->GetFormat().format,
+                            texture, bindingInfo.storageTexture.format);
 
             DAWN_INVALID_IF(
-                expansions.find(BindingNumber(entry.binding)) == expansions.end(),
-                "External texture binding entry %u is not present in the bind group layout.",
-                entry.binding);
+                entry.textureView->GetDimension() != bindingInfo.storageTexture.viewDimension,
+                "Dimension (%s) of %s doesn't match the expected dimension (%s).",
+                entry.textureView->GetDimension(), entry.textureView,
+                bindingInfo.storageTexture.viewDimension);
 
-            DAWN_TRY(ValidateSingleSType(externalTextureBindingEntry->nextInChain,
-                                         wgpu::SType::ExternalTextureBindingEntry));
+            DAWN_INVALID_IF(entry.textureView->GetLevelCount() != 1,
+                            "mipLevelCount (%u) of %s expected to be 1.",
+                            entry.textureView->GetLevelCount(), entry.textureView);
+            break;
+        }
+        default:
+            UNREACHABLE();
+            break;
+    }
 
-            DAWN_TRY(device->ValidateObject(externalTextureBindingEntry->externalTexture));
+    return {};
+}
 
-            return {};
+MaybeError ValidateSamplerBinding(const DeviceBase* device,
+                                  const BindGroupEntry& entry,
+                                  const BindingInfo& bindingInfo) {
+    DAWN_INVALID_IF(entry.sampler == nullptr, "Binding entry sampler not set.");
+
+    DAWN_INVALID_IF(entry.textureView != nullptr || entry.buffer != nullptr,
+                    "Expected only sampler to be set for binding entry.");
+
+    DAWN_INVALID_IF(entry.nextInChain != nullptr, "nextInChain must be nullptr.");
+
+    DAWN_TRY(device->ValidateObject(entry.sampler));
+
+    ASSERT(bindingInfo.bindingType == BindingInfoType::Sampler);
+
+    switch (bindingInfo.sampler.type) {
+        case wgpu::SamplerBindingType::NonFiltering:
+            DAWN_INVALID_IF(entry.sampler->IsFiltering(),
+                            "Filtering sampler %s is incompatible with non-filtering sampler "
+                            "binding.",
+                            entry.sampler);
+            [[fallthrough]];
+        case wgpu::SamplerBindingType::Filtering:
+            DAWN_INVALID_IF(entry.sampler->IsComparison(),
+                            "Comparison sampler %s is incompatible with non-comparison sampler "
+                            "binding.",
+                            entry.sampler);
+            break;
+        case wgpu::SamplerBindingType::Comparison:
+            DAWN_INVALID_IF(!entry.sampler->IsComparison(),
+                            "Non-comparison sampler %s is imcompatible with comparison sampler "
+                            "binding.",
+                            entry.sampler);
+            break;
+        default:
+            UNREACHABLE();
+            break;
+    }
+
+    return {};
+}
+
+MaybeError ValidateExternalTextureBinding(
+    const DeviceBase* device,
+    const BindGroupEntry& entry,
+    const ExternalTextureBindingEntry* externalTextureBindingEntry,
+    const ExternalTextureBindingExpansionMap& expansions) {
+    DAWN_INVALID_IF(externalTextureBindingEntry == nullptr,
+                    "Binding entry external texture not set.");
+
+    DAWN_INVALID_IF(
+        entry.sampler != nullptr || entry.textureView != nullptr || entry.buffer != nullptr,
+        "Expected only external texture to be set for binding entry.");
+
+    DAWN_INVALID_IF(expansions.find(BindingNumber(entry.binding)) == expansions.end(),
+                    "External texture binding entry %u is not present in the bind group layout.",
+                    entry.binding);
+
+    DAWN_TRY(ValidateSingleSType(externalTextureBindingEntry->nextInChain,
+                                 wgpu::SType::ExternalTextureBindingEntry));
+
+    DAWN_TRY(device->ValidateObject(externalTextureBindingEntry->externalTexture));
+
+    return {};
+}
+
+}  // anonymous namespace
+
+MaybeError ValidateBindGroupDescriptor(DeviceBase* device, const BindGroupDescriptor* descriptor) {
+    DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+
+    DAWN_TRY(device->ValidateObject(descriptor->layout));
+
+    DAWN_INVALID_IF(
+        descriptor->entryCount != descriptor->layout->GetUnexpandedBindingCount(),
+        "Number of entries (%u) did not match the number of entries (%u) specified in %s."
+        "\nExpected layout: %s",
+        descriptor->entryCount, static_cast<uint32_t>(descriptor->layout->GetBindingCount()),
+        descriptor->layout, descriptor->layout->EntriesToString());
+
+    const BindGroupLayoutBase::BindingMap& bindingMap = descriptor->layout->GetBindingMap();
+    ASSERT(bindingMap.size() <= kMaxBindingsPerPipelineLayout);
+
+    ityp::bitset<BindingIndex, kMaxBindingsPerPipelineLayout> bindingsSet;
+    for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
+        const BindGroupEntry& entry = descriptor->entries[i];
+
+        const auto& it = bindingMap.find(BindingNumber(entry.binding));
+        DAWN_INVALID_IF(it == bindingMap.end(),
+                        "In entries[%u], binding index %u not present in the bind group layout."
+                        "\nExpected layout: %s",
+                        i, entry.binding, descriptor->layout->EntriesToString());
+
+        BindingIndex bindingIndex = it->second;
+        ASSERT(bindingIndex < descriptor->layout->GetBindingCount());
+
+        DAWN_INVALID_IF(bindingsSet[bindingIndex],
+                        "In entries[%u], binding index %u already used by a previous entry", i,
+                        entry.binding);
+
+        bindingsSet.set(bindingIndex);
+
+        // Below this block we validate entries based on the bind group layout, in which
+        // external textures have been expanded into their underlying contents. For this reason
+        // we must identify external texture binding entries by checking the bind group entry
+        // itself.
+        // TODO(dawn:1293): Store external textures in
+        // BindGroupLayoutBase::BindingDataPointers::bindings so checking external textures can
+        // be moved in the switch below.
+        const ExternalTextureBindingEntry* externalTextureBindingEntry = nullptr;
+        FindInChain(entry.nextInChain, &externalTextureBindingEntry);
+        if (externalTextureBindingEntry != nullptr) {
+            DAWN_TRY(ValidateExternalTextureBinding(
+                device, entry, externalTextureBindingEntry,
+                descriptor->layout->GetExternalTextureBindingExpansionMap()));
+            continue;
         }
 
-    }  // anonymous namespace
+        const BindingInfo& bindingInfo = descriptor->layout->GetBindingInfo(bindingIndex);
 
-    MaybeError ValidateBindGroupDescriptor(DeviceBase* device,
-                                           const BindGroupDescriptor* descriptor) {
-        DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+        // Perform binding-type specific validation.
+        switch (bindingInfo.bindingType) {
+            case BindingInfoType::Buffer:
+                DAWN_TRY_CONTEXT(ValidateBufferBinding(device, entry, bindingInfo),
+                                 "validating entries[%u] as a Buffer."
+                                 "\nExpected entry layout: %s",
+                                 i, bindingInfo);
+                break;
+            case BindingInfoType::Texture:
+            case BindingInfoType::StorageTexture:
+                DAWN_TRY_CONTEXT(ValidateTextureBinding(device, entry, bindingInfo),
+                                 "validating entries[%u] as a Texture."
+                                 "\nExpected entry layout: %s",
+                                 i, bindingInfo);
+                break;
+            case BindingInfoType::Sampler:
+                DAWN_TRY_CONTEXT(ValidateSamplerBinding(device, entry, bindingInfo),
+                                 "validating entries[%u] as a Sampler."
+                                 "\nExpected entry layout: %s",
+                                 i, bindingInfo);
+                break;
+            case BindingInfoType::ExternalTexture:
+                UNREACHABLE();
+                break;
+        }
+    }
 
-        DAWN_TRY(device->ValidateObject(descriptor->layout));
+    // This should always be true because
+    //  - numBindings has to match between the bind group and its layout.
+    //  - Each binding must be set at most once
+    //
+    // We don't validate the equality because it wouldn't be possible to cover it with a test.
+    ASSERT(bindingsSet.count() == descriptor->layout->GetUnexpandedBindingCount());
 
-        DAWN_INVALID_IF(
-            descriptor->entryCount != descriptor->layout->GetUnexpandedBindingCount(),
-            "Number of entries (%u) did not match the number of entries (%u) specified in %s."
-            "\nExpected layout: %s",
-            descriptor->entryCount, static_cast<uint32_t>(descriptor->layout->GetBindingCount()),
-            descriptor->layout, descriptor->layout->EntriesToString());
+    return {};
+}  // anonymous namespace
 
-        const BindGroupLayoutBase::BindingMap& bindingMap = descriptor->layout->GetBindingMap();
-        ASSERT(bindingMap.size() <= kMaxBindingsPerPipelineLayout);
+// BindGroup
 
-        ityp::bitset<BindingIndex, kMaxBindingsPerPipelineLayout> bindingsSet;
-        for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
-            const BindGroupEntry& entry = descriptor->entries[i];
+BindGroupBase::BindGroupBase(DeviceBase* device,
+                             const BindGroupDescriptor* descriptor,
+                             void* bindingDataStart)
+    : ApiObjectBase(device, descriptor->label),
+      mLayout(descriptor->layout),
+      mBindingData(mLayout->ComputeBindingDataPointers(bindingDataStart)) {
+    for (BindingIndex i{0}; i < mLayout->GetBindingCount(); ++i) {
+        // TODO(enga): Shouldn't be needed when bindings are tightly packed.
+        // This is to fill Ref<ObjectBase> holes with nullptrs.
+        new (&mBindingData.bindings[i]) Ref<ObjectBase>();
+    }
 
-            const auto& it = bindingMap.find(BindingNumber(entry.binding));
-            DAWN_INVALID_IF(it == bindingMap.end(),
-                            "In entries[%u], binding index %u not present in the bind group layout."
-                            "\nExpected layout: %s",
-                            i, entry.binding, descriptor->layout->EntriesToString());
+    for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
+        const BindGroupEntry& entry = descriptor->entries[i];
 
-            BindingIndex bindingIndex = it->second;
-            ASSERT(bindingIndex < descriptor->layout->GetBindingCount());
+        BindingIndex bindingIndex =
+            descriptor->layout->GetBindingIndex(BindingNumber(entry.binding));
+        ASSERT(bindingIndex < mLayout->GetBindingCount());
 
-            DAWN_INVALID_IF(bindingsSet[bindingIndex],
-                            "In entries[%u], binding index %u already used by a previous entry", i,
-                            entry.binding);
+        // Only a single binding type should be set, so once we found it we can skip to the
+        // next loop iteration.
 
-            bindingsSet.set(bindingIndex);
-
-            // Below this block we validate entries based on the bind group layout, in which
-            // external textures have been expanded into their underlying contents. For this reason
-            // we must identify external texture binding entries by checking the bind group entry
-            // itself.
-            // TODO(dawn:1293): Store external textures in
-            // BindGroupLayoutBase::BindingDataPointers::bindings so checking external textures can
-            // be moved in the switch below.
-            const ExternalTextureBindingEntry* externalTextureBindingEntry = nullptr;
-            FindInChain(entry.nextInChain, &externalTextureBindingEntry);
-            if (externalTextureBindingEntry != nullptr) {
-                DAWN_TRY(ValidateExternalTextureBinding(
-                    device, entry, externalTextureBindingEntry,
-                    descriptor->layout->GetExternalTextureBindingExpansionMap()));
-                continue;
-            }
-
-            const BindingInfo& bindingInfo = descriptor->layout->GetBindingInfo(bindingIndex);
-
-            // Perform binding-type specific validation.
-            switch (bindingInfo.bindingType) {
-                case BindingInfoType::Buffer:
-                    DAWN_TRY_CONTEXT(ValidateBufferBinding(device, entry, bindingInfo),
-                                     "validating entries[%u] as a Buffer."
-                                     "\nExpected entry layout: %s",
-                                     i, bindingInfo);
-                    break;
-                case BindingInfoType::Texture:
-                case BindingInfoType::StorageTexture:
-                    DAWN_TRY_CONTEXT(ValidateTextureBinding(device, entry, bindingInfo),
-                                     "validating entries[%u] as a Texture."
-                                     "\nExpected entry layout: %s",
-                                     i, bindingInfo);
-                    break;
-                case BindingInfoType::Sampler:
-                    DAWN_TRY_CONTEXT(ValidateSamplerBinding(device, entry, bindingInfo),
-                                     "validating entries[%u] as a Sampler."
-                                     "\nExpected entry layout: %s",
-                                     i, bindingInfo);
-                    break;
-                case BindingInfoType::ExternalTexture:
-                    UNREACHABLE();
-                    break;
-            }
+        if (entry.buffer != nullptr) {
+            ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
+            mBindingData.bindings[bindingIndex] = entry.buffer;
+            mBindingData.bufferData[bindingIndex].offset = entry.offset;
+            uint64_t bufferSize = (entry.size == wgpu::kWholeSize)
+                                      ? entry.buffer->GetSize() - entry.offset
+                                      : entry.size;
+            mBindingData.bufferData[bindingIndex].size = bufferSize;
+            continue;
         }
 
-        // This should always be true because
-        //  - numBindings has to match between the bind group and its layout.
-        //  - Each binding must be set at most once
-        //
-        // We don't validate the equality because it wouldn't be possible to cover it with a test.
-        ASSERT(bindingsSet.count() == descriptor->layout->GetUnexpandedBindingCount());
+        if (entry.textureView != nullptr) {
+            ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
+            mBindingData.bindings[bindingIndex] = entry.textureView;
+            continue;
+        }
 
-        return {};
-    }  // anonymous namespace
+        if (entry.sampler != nullptr) {
+            ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
+            mBindingData.bindings[bindingIndex] = entry.sampler;
+            continue;
+        }
 
-    // BindGroup
+        // Here we unpack external texture bindings into multiple additional bindings for the
+        // external texture's contents. New binding locations previously determined in the bind
+        // group layout are created in this bind group and filled with the external texture's
+        // underlying resources.
+        const ExternalTextureBindingEntry* externalTextureBindingEntry = nullptr;
+        FindInChain(entry.nextInChain, &externalTextureBindingEntry);
+        if (externalTextureBindingEntry != nullptr) {
+            mBoundExternalTextures.push_back(externalTextureBindingEntry->externalTexture);
 
-    BindGroupBase::BindGroupBase(DeviceBase* device,
-                                 const BindGroupDescriptor* descriptor,
-                                 void* bindingDataStart)
-        : ApiObjectBase(device, descriptor->label),
-          mLayout(descriptor->layout),
-          mBindingData(mLayout->ComputeBindingDataPointers(bindingDataStart)) {
+            ExternalTextureBindingExpansionMap expansions =
+                mLayout->GetExternalTextureBindingExpansionMap();
+            ExternalTextureBindingExpansionMap::iterator it =
+                expansions.find(BindingNumber(entry.binding));
+
+            ASSERT(it != expansions.end());
+
+            BindingIndex plane0BindingIndex =
+                descriptor->layout->GetBindingIndex(it->second.plane0);
+            BindingIndex plane1BindingIndex =
+                descriptor->layout->GetBindingIndex(it->second.plane1);
+            BindingIndex paramsBindingIndex =
+                descriptor->layout->GetBindingIndex(it->second.params);
+
+            ASSERT(mBindingData.bindings[plane0BindingIndex] == nullptr);
+
+            mBindingData.bindings[plane0BindingIndex] =
+                externalTextureBindingEntry->externalTexture->GetTextureViews()[0];
+
+            ASSERT(mBindingData.bindings[plane1BindingIndex] == nullptr);
+            mBindingData.bindings[plane1BindingIndex] =
+                externalTextureBindingEntry->externalTexture->GetTextureViews()[1];
+
+            ASSERT(mBindingData.bindings[paramsBindingIndex] == nullptr);
+            mBindingData.bindings[paramsBindingIndex] =
+                externalTextureBindingEntry->externalTexture->GetParamsBuffer();
+            mBindingData.bufferData[paramsBindingIndex].offset = 0;
+            mBindingData.bufferData[paramsBindingIndex].size =
+                sizeof(dawn_native::ExternalTextureParams);
+
+            continue;
+        }
+    }
+
+    uint32_t packedIdx = 0;
+    for (BindingIndex bindingIndex{0}; bindingIndex < descriptor->layout->GetBufferCount();
+         ++bindingIndex) {
+        if (descriptor->layout->GetBindingInfo(bindingIndex).buffer.minBindingSize == 0) {
+            mBindingData.unverifiedBufferSizes[packedIdx] =
+                mBindingData.bufferData[bindingIndex].size;
+            ++packedIdx;
+        }
+    }
+
+    TrackInDevice();
+}
+
+BindGroupBase::BindGroupBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
+    TrackInDevice();
+}
+
+BindGroupBase::~BindGroupBase() = default;
+
+void BindGroupBase::DestroyImpl() {
+    if (mLayout != nullptr) {
+        ASSERT(!IsError());
         for (BindingIndex i{0}; i < mLayout->GetBindingCount(); ++i) {
-            // TODO(enga): Shouldn't be needed when bindings are tightly packed.
-            // This is to fill Ref<ObjectBase> holes with nullptrs.
-            new (&mBindingData.bindings[i]) Ref<ObjectBase>();
-        }
-
-        for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
-            const BindGroupEntry& entry = descriptor->entries[i];
-
-            BindingIndex bindingIndex =
-                descriptor->layout->GetBindingIndex(BindingNumber(entry.binding));
-            ASSERT(bindingIndex < mLayout->GetBindingCount());
-
-            // Only a single binding type should be set, so once we found it we can skip to the
-            // next loop iteration.
-
-            if (entry.buffer != nullptr) {
-                ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
-                mBindingData.bindings[bindingIndex] = entry.buffer;
-                mBindingData.bufferData[bindingIndex].offset = entry.offset;
-                uint64_t bufferSize = (entry.size == wgpu::kWholeSize)
-                                          ? entry.buffer->GetSize() - entry.offset
-                                          : entry.size;
-                mBindingData.bufferData[bindingIndex].size = bufferSize;
-                continue;
-            }
-
-            if (entry.textureView != nullptr) {
-                ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
-                mBindingData.bindings[bindingIndex] = entry.textureView;
-                continue;
-            }
-
-            if (entry.sampler != nullptr) {
-                ASSERT(mBindingData.bindings[bindingIndex] == nullptr);
-                mBindingData.bindings[bindingIndex] = entry.sampler;
-                continue;
-            }
-
-            // Here we unpack external texture bindings into multiple additional bindings for the
-            // external texture's contents. New binding locations previously determined in the bind
-            // group layout are created in this bind group and filled with the external texture's
-            // underlying resources.
-            const ExternalTextureBindingEntry* externalTextureBindingEntry = nullptr;
-            FindInChain(entry.nextInChain, &externalTextureBindingEntry);
-            if (externalTextureBindingEntry != nullptr) {
-                mBoundExternalTextures.push_back(externalTextureBindingEntry->externalTexture);
-
-                ExternalTextureBindingExpansionMap expansions =
-                    mLayout->GetExternalTextureBindingExpansionMap();
-                ExternalTextureBindingExpansionMap::iterator it =
-                    expansions.find(BindingNumber(entry.binding));
-
-                ASSERT(it != expansions.end());
-
-                BindingIndex plane0BindingIndex =
-                    descriptor->layout->GetBindingIndex(it->second.plane0);
-                BindingIndex plane1BindingIndex =
-                    descriptor->layout->GetBindingIndex(it->second.plane1);
-                BindingIndex paramsBindingIndex =
-                    descriptor->layout->GetBindingIndex(it->second.params);
-
-                ASSERT(mBindingData.bindings[plane0BindingIndex] == nullptr);
-
-                mBindingData.bindings[plane0BindingIndex] =
-                    externalTextureBindingEntry->externalTexture->GetTextureViews()[0];
-
-                ASSERT(mBindingData.bindings[plane1BindingIndex] == nullptr);
-                mBindingData.bindings[plane1BindingIndex] =
-                    externalTextureBindingEntry->externalTexture->GetTextureViews()[1];
-
-                ASSERT(mBindingData.bindings[paramsBindingIndex] == nullptr);
-                mBindingData.bindings[paramsBindingIndex] =
-                    externalTextureBindingEntry->externalTexture->GetParamsBuffer();
-                mBindingData.bufferData[paramsBindingIndex].offset = 0;
-                mBindingData.bufferData[paramsBindingIndex].size =
-                    sizeof(dawn_native::ExternalTextureParams);
-
-                continue;
-            }
-        }
-
-        uint32_t packedIdx = 0;
-        for (BindingIndex bindingIndex{0}; bindingIndex < descriptor->layout->GetBufferCount();
-             ++bindingIndex) {
-            if (descriptor->layout->GetBindingInfo(bindingIndex).buffer.minBindingSize == 0) {
-                mBindingData.unverifiedBufferSizes[packedIdx] =
-                    mBindingData.bufferData[bindingIndex].size;
-                ++packedIdx;
-            }
-        }
-
-        TrackInDevice();
-    }
-
-    BindGroupBase::BindGroupBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
-        TrackInDevice();
-    }
-
-    BindGroupBase::~BindGroupBase() = default;
-
-    void BindGroupBase::DestroyImpl() {
-        if (mLayout != nullptr) {
-            ASSERT(!IsError());
-            for (BindingIndex i{0}; i < mLayout->GetBindingCount(); ++i) {
-                mBindingData.bindings[i].~Ref<ObjectBase>();
-            }
+            mBindingData.bindings[i].~Ref<ObjectBase>();
         }
     }
+}
 
-    void BindGroupBase::DeleteThis() {
-        // Add another ref to the layout so that if this is the last ref, the layout
-        // is destroyed after the bind group. The bind group is slab-allocated inside
-        // memory owned by the layout (except for the null backend).
-        Ref<BindGroupLayoutBase> layout = mLayout;
-        ApiObjectBase::DeleteThis();
-    }
+void BindGroupBase::DeleteThis() {
+    // Add another ref to the layout so that if this is the last ref, the layout
+    // is destroyed after the bind group. The bind group is slab-allocated inside
+    // memory owned by the layout (except for the null backend).
+    Ref<BindGroupLayoutBase> layout = mLayout;
+    ApiObjectBase::DeleteThis();
+}
 
-    BindGroupBase::BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag)
-        : ApiObjectBase(device, tag), mBindingData() {
-    }
+BindGroupBase::BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+    : ApiObjectBase(device, tag), mBindingData() {}
 
-    // static
-    BindGroupBase* BindGroupBase::MakeError(DeviceBase* device) {
-        return new BindGroupBase(device, ObjectBase::kError);
-    }
+// static
+BindGroupBase* BindGroupBase::MakeError(DeviceBase* device) {
+    return new BindGroupBase(device, ObjectBase::kError);
+}
 
-    ObjectType BindGroupBase::GetType() const {
-        return ObjectType::BindGroup;
-    }
+ObjectType BindGroupBase::GetType() const {
+    return ObjectType::BindGroup;
+}
 
-    BindGroupLayoutBase* BindGroupBase::GetLayout() {
-        ASSERT(!IsError());
-        return mLayout.Get();
-    }
+BindGroupLayoutBase* BindGroupBase::GetLayout() {
+    ASSERT(!IsError());
+    return mLayout.Get();
+}
 
-    const BindGroupLayoutBase* BindGroupBase::GetLayout() const {
-        ASSERT(!IsError());
-        return mLayout.Get();
-    }
+const BindGroupLayoutBase* BindGroupBase::GetLayout() const {
+    ASSERT(!IsError());
+    return mLayout.Get();
+}
 
-    const ityp::span<uint32_t, uint64_t>& BindGroupBase::GetUnverifiedBufferSizes() const {
-        ASSERT(!IsError());
-        return mBindingData.unverifiedBufferSizes;
-    }
+const ityp::span<uint32_t, uint64_t>& BindGroupBase::GetUnverifiedBufferSizes() const {
+    ASSERT(!IsError());
+    return mBindingData.unverifiedBufferSizes;
+}
 
-    BufferBinding BindGroupBase::GetBindingAsBufferBinding(BindingIndex bindingIndex) {
-        ASSERT(!IsError());
-        ASSERT(bindingIndex < mLayout->GetBindingCount());
-        ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Buffer);
-        BufferBase* buffer = static_cast<BufferBase*>(mBindingData.bindings[bindingIndex].Get());
-        return {buffer, mBindingData.bufferData[bindingIndex].offset,
-                mBindingData.bufferData[bindingIndex].size};
-    }
+BufferBinding BindGroupBase::GetBindingAsBufferBinding(BindingIndex bindingIndex) {
+    ASSERT(!IsError());
+    ASSERT(bindingIndex < mLayout->GetBindingCount());
+    ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Buffer);
+    BufferBase* buffer = static_cast<BufferBase*>(mBindingData.bindings[bindingIndex].Get());
+    return {buffer, mBindingData.bufferData[bindingIndex].offset,
+            mBindingData.bufferData[bindingIndex].size};
+}
 
-    SamplerBase* BindGroupBase::GetBindingAsSampler(BindingIndex bindingIndex) const {
-        ASSERT(!IsError());
-        ASSERT(bindingIndex < mLayout->GetBindingCount());
-        ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Sampler);
-        return static_cast<SamplerBase*>(mBindingData.bindings[bindingIndex].Get());
-    }
+SamplerBase* BindGroupBase::GetBindingAsSampler(BindingIndex bindingIndex) const {
+    ASSERT(!IsError());
+    ASSERT(bindingIndex < mLayout->GetBindingCount());
+    ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Sampler);
+    return static_cast<SamplerBase*>(mBindingData.bindings[bindingIndex].Get());
+}
 
-    TextureViewBase* BindGroupBase::GetBindingAsTextureView(BindingIndex bindingIndex) {
-        ASSERT(!IsError());
-        ASSERT(bindingIndex < mLayout->GetBindingCount());
-        ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Texture ||
-               mLayout->GetBindingInfo(bindingIndex).bindingType ==
-                   BindingInfoType::StorageTexture);
-        return static_cast<TextureViewBase*>(mBindingData.bindings[bindingIndex].Get());
-    }
+TextureViewBase* BindGroupBase::GetBindingAsTextureView(BindingIndex bindingIndex) {
+    ASSERT(!IsError());
+    ASSERT(bindingIndex < mLayout->GetBindingCount());
+    ASSERT(mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::Texture ||
+           mLayout->GetBindingInfo(bindingIndex).bindingType == BindingInfoType::StorageTexture);
+    return static_cast<TextureViewBase*>(mBindingData.bindings[bindingIndex].Get());
+}
 
-    const std::vector<Ref<ExternalTextureBase>>& BindGroupBase::GetBoundExternalTextures() const {
-        return mBoundExternalTextures;
-    }
+const std::vector<Ref<ExternalTextureBase>>& BindGroupBase::GetBoundExternalTextures() const {
+    return mBoundExternalTextures;
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/BindGroup.h b/src/dawn/native/BindGroup.h
index 4826136..236e4fb 100644
--- a/src/dawn/native/BindGroup.h
+++ b/src/dawn/native/BindGroup.h
@@ -29,68 +29,67 @@
 
 namespace dawn::native {
 
-    class DeviceBase;
+class DeviceBase;
 
-    MaybeError ValidateBindGroupDescriptor(DeviceBase* device,
-                                           const BindGroupDescriptor* descriptor);
+MaybeError ValidateBindGroupDescriptor(DeviceBase* device, const BindGroupDescriptor* descriptor);
 
-    struct BufferBinding {
-        BufferBase* buffer;
-        uint64_t offset;
-        uint64_t size;
-    };
+struct BufferBinding {
+    BufferBase* buffer;
+    uint64_t offset;
+    uint64_t size;
+};
 
-    class BindGroupBase : public ApiObjectBase {
-      public:
-        static BindGroupBase* MakeError(DeviceBase* device);
+class BindGroupBase : public ApiObjectBase {
+  public:
+    static BindGroupBase* MakeError(DeviceBase* device);
 
-        ObjectType GetType() const override;
+    ObjectType GetType() const override;
 
-        BindGroupLayoutBase* GetLayout();
-        const BindGroupLayoutBase* GetLayout() const;
-        BufferBinding GetBindingAsBufferBinding(BindingIndex bindingIndex);
-        SamplerBase* GetBindingAsSampler(BindingIndex bindingIndex) const;
-        TextureViewBase* GetBindingAsTextureView(BindingIndex bindingIndex);
-        const ityp::span<uint32_t, uint64_t>& GetUnverifiedBufferSizes() const;
-        const std::vector<Ref<ExternalTextureBase>>& GetBoundExternalTextures() const;
+    BindGroupLayoutBase* GetLayout();
+    const BindGroupLayoutBase* GetLayout() const;
+    BufferBinding GetBindingAsBufferBinding(BindingIndex bindingIndex);
+    SamplerBase* GetBindingAsSampler(BindingIndex bindingIndex) const;
+    TextureViewBase* GetBindingAsTextureView(BindingIndex bindingIndex);
+    const ityp::span<uint32_t, uint64_t>& GetUnverifiedBufferSizes() const;
+    const std::vector<Ref<ExternalTextureBase>>& GetBoundExternalTextures() const;
 
-      protected:
-        // To save memory, the size of a bind group is dynamically determined and the bind group is
-        // placement-allocated into memory big enough to hold the bind group with its
-        // dynamically-sized bindings after it. The pointer of the memory of the beginning of the
-        // binding data should be passed as |bindingDataStart|.
-        BindGroupBase(DeviceBase* device,
-                      const BindGroupDescriptor* descriptor,
-                      void* bindingDataStart);
+  protected:
+    // To save memory, the size of a bind group is dynamically determined and the bind group is
+    // placement-allocated into memory big enough to hold the bind group with its
+    // dynamically-sized bindings after it. The pointer of the memory of the beginning of the
+    // binding data should be passed as |bindingDataStart|.
+    BindGroupBase(DeviceBase* device,
+                  const BindGroupDescriptor* descriptor,
+                  void* bindingDataStart);
 
-        // Helper to instantiate BindGroupBase. We pass in |derived| because BindGroupBase may not
-        // be first in the allocation. The binding data is stored after the Derived class.
-        template <typename Derived>
-        BindGroupBase(Derived* derived, DeviceBase* device, const BindGroupDescriptor* descriptor)
-            : BindGroupBase(device,
-                            descriptor,
-                            AlignPtr(reinterpret_cast<char*>(derived) + sizeof(Derived),
-                                     descriptor->layout->GetBindingDataAlignment())) {
-            static_assert(std::is_base_of<BindGroupBase, Derived>::value);
-        }
+    // Helper to instantiate BindGroupBase. We pass in |derived| because BindGroupBase may not
+    // be first in the allocation. The binding data is stored after the Derived class.
+    template <typename Derived>
+    BindGroupBase(Derived* derived, DeviceBase* device, const BindGroupDescriptor* descriptor)
+        : BindGroupBase(device,
+                        descriptor,
+                        AlignPtr(reinterpret_cast<char*>(derived) + sizeof(Derived),
+                                 descriptor->layout->GetBindingDataAlignment())) {
+        static_assert(std::is_base_of<BindGroupBase, Derived>::value);
+    }
 
-        // Constructor used only for mocking and testing.
-        explicit BindGroupBase(DeviceBase* device);
-        void DestroyImpl() override;
+    // Constructor used only for mocking and testing.
+    explicit BindGroupBase(DeviceBase* device);
+    void DestroyImpl() override;
 
-        ~BindGroupBase() override;
+    ~BindGroupBase() override;
 
-      private:
-        BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag);
-        void DeleteThis() override;
+  private:
+    BindGroupBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+    void DeleteThis() override;
 
-        Ref<BindGroupLayoutBase> mLayout;
-        BindGroupLayoutBase::BindingDataPointers mBindingData;
+    Ref<BindGroupLayoutBase> mLayout;
+    BindGroupLayoutBase::BindingDataPointers mBindingData;
 
-        // TODO(dawn:1293): Store external textures in
-        // BindGroupLayoutBase::BindingDataPointers::bindings
-        std::vector<Ref<ExternalTextureBase>> mBoundExternalTextures;
-    };
+    // TODO(dawn:1293): Store external textures in
+    // BindGroupLayoutBase::BindingDataPointers::bindings
+    std::vector<Ref<ExternalTextureBase>> mBoundExternalTextures;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/BindGroupLayout.cpp b/src/dawn/native/BindGroupLayout.cpp
index f2c4e72..b57cd69 100644
--- a/src/dawn/native/BindGroupLayout.cpp
+++ b/src/dawn/native/BindGroupLayout.cpp
@@ -31,648 +31,639 @@
 
 namespace dawn::native {
 
-    namespace {
-        MaybeError ValidateStorageTextureFormat(DeviceBase* device,
-                                                wgpu::TextureFormat storageTextureFormat) {
-            const Format* format = nullptr;
-            DAWN_TRY_ASSIGN(format, device->GetInternalFormat(storageTextureFormat));
+namespace {
+MaybeError ValidateStorageTextureFormat(DeviceBase* device,
+                                        wgpu::TextureFormat storageTextureFormat) {
+    const Format* format = nullptr;
+    DAWN_TRY_ASSIGN(format, device->GetInternalFormat(storageTextureFormat));
 
-            ASSERT(format != nullptr);
-            DAWN_INVALID_IF(!format->supportsStorageUsage,
-                            "Texture format (%s) does not support storage textures.",
-                            storageTextureFormat);
+    ASSERT(format != nullptr);
+    DAWN_INVALID_IF(!format->supportsStorageUsage,
+                    "Texture format (%s) does not support storage textures.", storageTextureFormat);
 
+    return {};
+}
+
+MaybeError ValidateStorageTextureViewDimension(wgpu::TextureViewDimension dimension) {
+    switch (dimension) {
+        case wgpu::TextureViewDimension::Cube:
+        case wgpu::TextureViewDimension::CubeArray:
+            return DAWN_FORMAT_VALIDATION_ERROR(
+                "%s texture views cannot be used as storage textures.", dimension);
+
+        case wgpu::TextureViewDimension::e1D:
+        case wgpu::TextureViewDimension::e2D:
+        case wgpu::TextureViewDimension::e2DArray:
+        case wgpu::TextureViewDimension::e3D:
             return {};
+
+        case wgpu::TextureViewDimension::Undefined:
+            break;
+    }
+    UNREACHABLE();
+}
+
+MaybeError ValidateBindGroupLayoutEntry(DeviceBase* device,
+                                        const BindGroupLayoutEntry& entry,
+                                        bool allowInternalBinding) {
+    DAWN_TRY(ValidateShaderStage(entry.visibility));
+
+    int bindingMemberCount = 0;
+    BindingInfoType bindingType;
+    wgpu::ShaderStage allowedStages = kAllStages;
+
+    if (entry.buffer.type != wgpu::BufferBindingType::Undefined) {
+        bindingMemberCount++;
+        bindingType = BindingInfoType::Buffer;
+        const BufferBindingLayout& buffer = entry.buffer;
+
+        // The kInternalStorageBufferBinding is used internally and not a value
+        // in wgpu::BufferBindingType.
+        if (buffer.type == kInternalStorageBufferBinding) {
+            DAWN_INVALID_IF(!allowInternalBinding, "Internal binding types are disallowed");
+        } else {
+            DAWN_TRY(ValidateBufferBindingType(buffer.type));
         }
 
-        MaybeError ValidateStorageTextureViewDimension(wgpu::TextureViewDimension dimension) {
-            switch (dimension) {
-                case wgpu::TextureViewDimension::Cube:
-                case wgpu::TextureViewDimension::CubeArray:
-                    return DAWN_FORMAT_VALIDATION_ERROR(
-                        "%s texture views cannot be used as storage textures.", dimension);
-
-                case wgpu::TextureViewDimension::e1D:
-                case wgpu::TextureViewDimension::e2D:
-                case wgpu::TextureViewDimension::e2DArray:
-                case wgpu::TextureViewDimension::e3D:
-                    return {};
-
-                case wgpu::TextureViewDimension::Undefined:
-                    break;
-            }
-            UNREACHABLE();
+        if (buffer.type == wgpu::BufferBindingType::Storage ||
+            buffer.type == kInternalStorageBufferBinding) {
+            allowedStages &= ~wgpu::ShaderStage::Vertex;
         }
-
-        MaybeError ValidateBindGroupLayoutEntry(DeviceBase* device,
-                                                const BindGroupLayoutEntry& entry,
-                                                bool allowInternalBinding) {
-            DAWN_TRY(ValidateShaderStage(entry.visibility));
-
-            int bindingMemberCount = 0;
-            BindingInfoType bindingType;
-            wgpu::ShaderStage allowedStages = kAllStages;
-
-            if (entry.buffer.type != wgpu::BufferBindingType::Undefined) {
-                bindingMemberCount++;
-                bindingType = BindingInfoType::Buffer;
-                const BufferBindingLayout& buffer = entry.buffer;
-
-                // The kInternalStorageBufferBinding is used internally and not a value
-                // in wgpu::BufferBindingType.
-                if (buffer.type == kInternalStorageBufferBinding) {
-                    DAWN_INVALID_IF(!allowInternalBinding, "Internal binding types are disallowed");
-                } else {
-                    DAWN_TRY(ValidateBufferBindingType(buffer.type));
-                }
-
-                if (buffer.type == wgpu::BufferBindingType::Storage ||
-                    buffer.type == kInternalStorageBufferBinding) {
-                    allowedStages &= ~wgpu::ShaderStage::Vertex;
-                }
-            }
-
-            if (entry.sampler.type != wgpu::SamplerBindingType::Undefined) {
-                bindingMemberCount++;
-                bindingType = BindingInfoType::Sampler;
-                DAWN_TRY(ValidateSamplerBindingType(entry.sampler.type));
-            }
-
-            if (entry.texture.sampleType != wgpu::TextureSampleType::Undefined) {
-                bindingMemberCount++;
-                bindingType = BindingInfoType::Texture;
-                const TextureBindingLayout& texture = entry.texture;
-                DAWN_TRY(ValidateTextureSampleType(texture.sampleType));
-
-                // viewDimension defaults to 2D if left undefined, needs validation otherwise.
-                wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D;
-                if (texture.viewDimension != wgpu::TextureViewDimension::Undefined) {
-                    DAWN_TRY(ValidateTextureViewDimension(texture.viewDimension));
-                    viewDimension = texture.viewDimension;
-                }
-
-                DAWN_INVALID_IF(
-                    texture.multisampled && viewDimension != wgpu::TextureViewDimension::e2D,
-                    "View dimension (%s) for a multisampled texture bindings was not %s.",
-                    viewDimension, wgpu::TextureViewDimension::e2D);
-            }
-
-            if (entry.storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
-                bindingMemberCount++;
-                bindingType = BindingInfoType::StorageTexture;
-                const StorageTextureBindingLayout& storageTexture = entry.storageTexture;
-                DAWN_TRY(ValidateStorageTextureAccess(storageTexture.access));
-                DAWN_TRY(ValidateStorageTextureFormat(device, storageTexture.format));
-
-                // viewDimension defaults to 2D if left undefined, needs validation otherwise.
-                if (storageTexture.viewDimension != wgpu::TextureViewDimension::Undefined) {
-                    DAWN_TRY(ValidateTextureViewDimension(storageTexture.viewDimension));
-                    DAWN_TRY(ValidateStorageTextureViewDimension(storageTexture.viewDimension));
-                }
-
-                if (storageTexture.access == wgpu::StorageTextureAccess::WriteOnly) {
-                    allowedStages &= ~wgpu::ShaderStage::Vertex;
-                }
-            }
-
-            const ExternalTextureBindingLayout* externalTextureBindingLayout = nullptr;
-            FindInChain(entry.nextInChain, &externalTextureBindingLayout);
-            if (externalTextureBindingLayout != nullptr) {
-                bindingMemberCount++;
-                bindingType = BindingInfoType::ExternalTexture;
-            }
-
-            DAWN_INVALID_IF(bindingMemberCount == 0,
-                            "BindGroupLayoutEntry had none of buffer, sampler, texture, "
-                            "storageTexture, or externalTexture set");
-
-            DAWN_INVALID_IF(bindingMemberCount != 1,
-                            "BindGroupLayoutEntry had more than one of buffer, sampler, texture, "
-                            "storageTexture, or externalTexture set");
-
-            DAWN_INVALID_IF(
-                !IsSubset(entry.visibility, allowedStages),
-                "%s bindings cannot be used with a visibility of %s. Only %s are allowed.",
-                bindingType, entry.visibility, allowedStages);
-
-            return {};
-        }
-
-        BindGroupLayoutEntry CreateSampledTextureBindingForExternalTexture(
-            uint32_t binding,
-            wgpu::ShaderStage visibility) {
-            BindGroupLayoutEntry entry;
-            entry.binding = binding;
-            entry.visibility = visibility;
-            entry.texture.viewDimension = wgpu::TextureViewDimension::e2D;
-            entry.texture.multisampled = false;
-            entry.texture.sampleType = wgpu::TextureSampleType::Float;
-            return entry;
-        }
-
-        BindGroupLayoutEntry CreateUniformBindingForExternalTexture(uint32_t binding,
-                                                                    wgpu::ShaderStage visibility) {
-            BindGroupLayoutEntry entry;
-            entry.binding = binding;
-            entry.visibility = visibility;
-            entry.buffer.hasDynamicOffset = false;
-            entry.buffer.type = wgpu::BufferBindingType::Uniform;
-            return entry;
-        }
-
-        std::vector<BindGroupLayoutEntry> ExtractAndExpandBglEntries(
-            const BindGroupLayoutDescriptor* descriptor,
-            BindingCounts* bindingCounts,
-            ExternalTextureBindingExpansionMap* externalTextureBindingExpansions) {
-            std::vector<BindGroupLayoutEntry> expandedOutput;
-
-            // When new bgl entries are created, we use binding numbers larger than
-            // kMaxBindingNumber to ensure there are no collisions.
-            uint32_t nextOpenBindingNumberForNewEntry = kMaxBindingNumber + 1;
-            for (uint32_t i = 0; i < descriptor->entryCount; i++) {
-                const BindGroupLayoutEntry& entry = descriptor->entries[i];
-                const ExternalTextureBindingLayout* externalTextureBindingLayout = nullptr;
-                FindInChain(entry.nextInChain, &externalTextureBindingLayout);
-                // External textures are expanded from a texture_external into two sampled texture
-                // bindings and one uniform buffer binding. The original binding number is used
-                // for the first sampled texture.
-                if (externalTextureBindingLayout != nullptr) {
-                    for (SingleShaderStage stage : IterateStages(entry.visibility)) {
-                        // External textures are not fully implemented, which means that expanding
-                        // the external texture at this time will not occupy the same number of
-                        // binding slots as defined in the WebGPU specification. Here we prematurely
-                        // increment the binding counts for an additional sampled textures and a
-                        // sampler so that an external texture will occupy the correct number of
-                        // slots for correct validation of shader binding limits.
-                        // TODO(dawn:1082): Consider removing this and instead making a change to
-                        // the validation.
-                        constexpr uint32_t kUnimplementedSampledTexturesPerExternalTexture = 2;
-                        constexpr uint32_t kUnimplementedSamplersPerExternalTexture = 1;
-                        bindingCounts->perStage[stage].sampledTextureCount +=
-                            kUnimplementedSampledTexturesPerExternalTexture;
-                        bindingCounts->perStage[stage].samplerCount +=
-                            kUnimplementedSamplersPerExternalTexture;
-                    }
-
-                    dawn_native::ExternalTextureBindingExpansion bindingExpansion;
-
-                    BindGroupLayoutEntry plane0Entry =
-                        CreateSampledTextureBindingForExternalTexture(entry.binding,
-                                                                      entry.visibility);
-                    bindingExpansion.plane0 = BindingNumber(plane0Entry.binding);
-                    expandedOutput.push_back(plane0Entry);
-
-                    BindGroupLayoutEntry plane1Entry =
-                        CreateSampledTextureBindingForExternalTexture(
-                            nextOpenBindingNumberForNewEntry++, entry.visibility);
-                    bindingExpansion.plane1 = BindingNumber(plane1Entry.binding);
-                    expandedOutput.push_back(plane1Entry);
-
-                    BindGroupLayoutEntry paramsEntry = CreateUniformBindingForExternalTexture(
-                        nextOpenBindingNumberForNewEntry++, entry.visibility);
-                    bindingExpansion.params = BindingNumber(paramsEntry.binding);
-                    expandedOutput.push_back(paramsEntry);
-
-                    externalTextureBindingExpansions->insert(
-                        {BindingNumber(entry.binding), bindingExpansion});
-                } else {
-                    expandedOutput.push_back(entry);
-                }
-            }
-
-            return expandedOutput;
-        }
-    }  // anonymous namespace
-
-    MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device,
-                                                 const BindGroupLayoutDescriptor* descriptor,
-                                                 bool allowInternalBinding) {
-        DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
-
-        std::set<BindingNumber> bindingsSet;
-        BindingCounts bindingCounts = {};
-
-        for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
-            const BindGroupLayoutEntry& entry = descriptor->entries[i];
-            BindingNumber bindingNumber = BindingNumber(entry.binding);
-
-            DAWN_INVALID_IF(bindingNumber > kMaxBindingNumberTyped,
-                            "Binding number (%u) exceeds the maximum binding number (%u).",
-                            uint32_t(bindingNumber), uint32_t(kMaxBindingNumberTyped));
-            DAWN_INVALID_IF(bindingsSet.count(bindingNumber) != 0,
-                            "On entries[%u]: binding index (%u) was specified by a previous entry.",
-                            i, entry.binding);
-
-            DAWN_TRY_CONTEXT(ValidateBindGroupLayoutEntry(device, entry, allowInternalBinding),
-                             "validating entries[%u]", i);
-
-            IncrementBindingCounts(&bindingCounts, entry);
-
-            bindingsSet.insert(bindingNumber);
-        }
-
-        DAWN_TRY_CONTEXT(ValidateBindingCounts(bindingCounts), "validating binding counts");
-
-        return {};
     }
 
-    namespace {
+    if (entry.sampler.type != wgpu::SamplerBindingType::Undefined) {
+        bindingMemberCount++;
+        bindingType = BindingInfoType::Sampler;
+        DAWN_TRY(ValidateSamplerBindingType(entry.sampler.type));
+    }
 
-        bool operator!=(const BindingInfo& a, const BindingInfo& b) {
-            if (a.visibility != b.visibility || a.bindingType != b.bindingType) {
-                return true;
-            }
+    if (entry.texture.sampleType != wgpu::TextureSampleType::Undefined) {
+        bindingMemberCount++;
+        bindingType = BindingInfoType::Texture;
+        const TextureBindingLayout& texture = entry.texture;
+        DAWN_TRY(ValidateTextureSampleType(texture.sampleType));
 
-            switch (a.bindingType) {
-                case BindingInfoType::Buffer:
-                    return a.buffer.type != b.buffer.type ||
-                           a.buffer.hasDynamicOffset != b.buffer.hasDynamicOffset ||
-                           a.buffer.minBindingSize != b.buffer.minBindingSize;
-                case BindingInfoType::Sampler:
-                    return a.sampler.type != b.sampler.type;
-                case BindingInfoType::Texture:
-                    return a.texture.sampleType != b.texture.sampleType ||
-                           a.texture.viewDimension != b.texture.viewDimension ||
-                           a.texture.multisampled != b.texture.multisampled;
-                case BindingInfoType::StorageTexture:
-                    return a.storageTexture.access != b.storageTexture.access ||
-                           a.storageTexture.viewDimension != b.storageTexture.viewDimension ||
-                           a.storageTexture.format != b.storageTexture.format;
-                case BindingInfoType::ExternalTexture:
-                    return false;
-            }
-            UNREACHABLE();
+        // viewDimension defaults to 2D if left undefined, needs validation otherwise.
+        wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D;
+        if (texture.viewDimension != wgpu::TextureViewDimension::Undefined) {
+            DAWN_TRY(ValidateTextureViewDimension(texture.viewDimension));
+            viewDimension = texture.viewDimension;
         }
 
-        bool IsBufferBinding(const BindGroupLayoutEntry& binding) {
-            return binding.buffer.type != wgpu::BufferBindingType::Undefined;
+        DAWN_INVALID_IF(texture.multisampled && viewDimension != wgpu::TextureViewDimension::e2D,
+                        "View dimension (%s) for a multisampled texture bindings was not %s.",
+                        viewDimension, wgpu::TextureViewDimension::e2D);
+    }
+
+    if (entry.storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
+        bindingMemberCount++;
+        bindingType = BindingInfoType::StorageTexture;
+        const StorageTextureBindingLayout& storageTexture = entry.storageTexture;
+        DAWN_TRY(ValidateStorageTextureAccess(storageTexture.access));
+        DAWN_TRY(ValidateStorageTextureFormat(device, storageTexture.format));
+
+        // viewDimension defaults to 2D if left undefined, needs validation otherwise.
+        if (storageTexture.viewDimension != wgpu::TextureViewDimension::Undefined) {
+            DAWN_TRY(ValidateTextureViewDimension(storageTexture.viewDimension));
+            DAWN_TRY(ValidateStorageTextureViewDimension(storageTexture.viewDimension));
         }
 
-        bool BindingHasDynamicOffset(const BindGroupLayoutEntry& binding) {
-            if (binding.buffer.type != wgpu::BufferBindingType::Undefined) {
-                return binding.buffer.hasDynamicOffset;
+        if (storageTexture.access == wgpu::StorageTextureAccess::WriteOnly) {
+            allowedStages &= ~wgpu::ShaderStage::Vertex;
+        }
+    }
+
+    const ExternalTextureBindingLayout* externalTextureBindingLayout = nullptr;
+    FindInChain(entry.nextInChain, &externalTextureBindingLayout);
+    if (externalTextureBindingLayout != nullptr) {
+        bindingMemberCount++;
+        bindingType = BindingInfoType::ExternalTexture;
+    }
+
+    DAWN_INVALID_IF(bindingMemberCount == 0,
+                    "BindGroupLayoutEntry had none of buffer, sampler, texture, "
+                    "storageTexture, or externalTexture set");
+
+    DAWN_INVALID_IF(bindingMemberCount != 1,
+                    "BindGroupLayoutEntry had more than one of buffer, sampler, texture, "
+                    "storageTexture, or externalTexture set");
+
+    DAWN_INVALID_IF(!IsSubset(entry.visibility, allowedStages),
+                    "%s bindings cannot be used with a visibility of %s. Only %s are allowed.",
+                    bindingType, entry.visibility, allowedStages);
+
+    return {};
+}
+
+BindGroupLayoutEntry CreateSampledTextureBindingForExternalTexture(uint32_t binding,
+                                                                   wgpu::ShaderStage visibility) {
+    BindGroupLayoutEntry entry;
+    entry.binding = binding;
+    entry.visibility = visibility;
+    entry.texture.viewDimension = wgpu::TextureViewDimension::e2D;
+    entry.texture.multisampled = false;
+    entry.texture.sampleType = wgpu::TextureSampleType::Float;
+    return entry;
+}
+
+BindGroupLayoutEntry CreateUniformBindingForExternalTexture(uint32_t binding,
+                                                            wgpu::ShaderStage visibility) {
+    BindGroupLayoutEntry entry;
+    entry.binding = binding;
+    entry.visibility = visibility;
+    entry.buffer.hasDynamicOffset = false;
+    entry.buffer.type = wgpu::BufferBindingType::Uniform;
+    return entry;
+}
+
+std::vector<BindGroupLayoutEntry> ExtractAndExpandBglEntries(
+    const BindGroupLayoutDescriptor* descriptor,
+    BindingCounts* bindingCounts,
+    ExternalTextureBindingExpansionMap* externalTextureBindingExpansions) {
+    std::vector<BindGroupLayoutEntry> expandedOutput;
+
+    // When new bgl entries are created, we use binding numbers larger than
+    // kMaxBindingNumber to ensure there are no collisions.
+    uint32_t nextOpenBindingNumberForNewEntry = kMaxBindingNumber + 1;
+    for (uint32_t i = 0; i < descriptor->entryCount; i++) {
+        const BindGroupLayoutEntry& entry = descriptor->entries[i];
+        const ExternalTextureBindingLayout* externalTextureBindingLayout = nullptr;
+        FindInChain(entry.nextInChain, &externalTextureBindingLayout);
+        // External textures are expanded from a texture_external into two sampled texture
+        // bindings and one uniform buffer binding. The original binding number is used
+        // for the first sampled texture.
+        if (externalTextureBindingLayout != nullptr) {
+            for (SingleShaderStage stage : IterateStages(entry.visibility)) {
+                // External textures are not fully implemented, which means that expanding
+                // the external texture at this time will not occupy the same number of
+                // binding slots as defined in the WebGPU specification. Here we prematurely
+                // increment the binding counts for an additional sampled textures and a
+                // sampler so that an external texture will occupy the correct number of
+                // slots for correct validation of shader binding limits.
+                // TODO(dawn:1082): Consider removing this and instead making a change to
+                // the validation.
+                constexpr uint32_t kUnimplementedSampledTexturesPerExternalTexture = 2;
+                constexpr uint32_t kUnimplementedSamplersPerExternalTexture = 1;
+                bindingCounts->perStage[stage].sampledTextureCount +=
+                    kUnimplementedSampledTexturesPerExternalTexture;
+                bindingCounts->perStage[stage].samplerCount +=
+                    kUnimplementedSamplersPerExternalTexture;
             }
+
+            dawn_native::ExternalTextureBindingExpansion bindingExpansion;
+
+            BindGroupLayoutEntry plane0Entry =
+                CreateSampledTextureBindingForExternalTexture(entry.binding, entry.visibility);
+            bindingExpansion.plane0 = BindingNumber(plane0Entry.binding);
+            expandedOutput.push_back(plane0Entry);
+
+            BindGroupLayoutEntry plane1Entry = CreateSampledTextureBindingForExternalTexture(
+                nextOpenBindingNumberForNewEntry++, entry.visibility);
+            bindingExpansion.plane1 = BindingNumber(plane1Entry.binding);
+            expandedOutput.push_back(plane1Entry);
+
+            BindGroupLayoutEntry paramsEntry = CreateUniformBindingForExternalTexture(
+                nextOpenBindingNumberForNewEntry++, entry.visibility);
+            bindingExpansion.params = BindingNumber(paramsEntry.binding);
+            expandedOutput.push_back(paramsEntry);
+
+            externalTextureBindingExpansions->insert(
+                {BindingNumber(entry.binding), bindingExpansion});
+        } else {
+            expandedOutput.push_back(entry);
+        }
+    }
+
+    return expandedOutput;
+}
+}  // anonymous namespace
+
+MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device,
+                                             const BindGroupLayoutDescriptor* descriptor,
+                                             bool allowInternalBinding) {
+    DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
+
+    std::set<BindingNumber> bindingsSet;
+    BindingCounts bindingCounts = {};
+
+    for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
+        const BindGroupLayoutEntry& entry = descriptor->entries[i];
+        BindingNumber bindingNumber = BindingNumber(entry.binding);
+
+        DAWN_INVALID_IF(bindingNumber > kMaxBindingNumberTyped,
+                        "Binding number (%u) exceeds the maximum binding number (%u).",
+                        uint32_t(bindingNumber), uint32_t(kMaxBindingNumberTyped));
+        DAWN_INVALID_IF(bindingsSet.count(bindingNumber) != 0,
+                        "On entries[%u]: binding index (%u) was specified by a previous entry.", i,
+                        entry.binding);
+
+        DAWN_TRY_CONTEXT(ValidateBindGroupLayoutEntry(device, entry, allowInternalBinding),
+                         "validating entries[%u]", i);
+
+        IncrementBindingCounts(&bindingCounts, entry);
+
+        bindingsSet.insert(bindingNumber);
+    }
+
+    DAWN_TRY_CONTEXT(ValidateBindingCounts(bindingCounts), "validating binding counts");
+
+    return {};
+}
+
+namespace {
+
+bool operator!=(const BindingInfo& a, const BindingInfo& b) {
+    if (a.visibility != b.visibility || a.bindingType != b.bindingType) {
+        return true;
+    }
+
+    switch (a.bindingType) {
+        case BindingInfoType::Buffer:
+            return a.buffer.type != b.buffer.type ||
+                   a.buffer.hasDynamicOffset != b.buffer.hasDynamicOffset ||
+                   a.buffer.minBindingSize != b.buffer.minBindingSize;
+        case BindingInfoType::Sampler:
+            return a.sampler.type != b.sampler.type;
+        case BindingInfoType::Texture:
+            return a.texture.sampleType != b.texture.sampleType ||
+                   a.texture.viewDimension != b.texture.viewDimension ||
+                   a.texture.multisampled != b.texture.multisampled;
+        case BindingInfoType::StorageTexture:
+            return a.storageTexture.access != b.storageTexture.access ||
+                   a.storageTexture.viewDimension != b.storageTexture.viewDimension ||
+                   a.storageTexture.format != b.storageTexture.format;
+        case BindingInfoType::ExternalTexture:
             return false;
+    }
+    UNREACHABLE();
+}
+
+bool IsBufferBinding(const BindGroupLayoutEntry& binding) {
+    return binding.buffer.type != wgpu::BufferBindingType::Undefined;
+}
+
+bool BindingHasDynamicOffset(const BindGroupLayoutEntry& binding) {
+    if (binding.buffer.type != wgpu::BufferBindingType::Undefined) {
+        return binding.buffer.hasDynamicOffset;
+    }
+    return false;
+}
+
+BindingInfo CreateBindGroupLayoutInfo(const BindGroupLayoutEntry& binding) {
+    BindingInfo bindingInfo;
+    bindingInfo.binding = BindingNumber(binding.binding);
+    bindingInfo.visibility = binding.visibility;
+
+    if (binding.buffer.type != wgpu::BufferBindingType::Undefined) {
+        bindingInfo.bindingType = BindingInfoType::Buffer;
+        bindingInfo.buffer = binding.buffer;
+    } else if (binding.sampler.type != wgpu::SamplerBindingType::Undefined) {
+        bindingInfo.bindingType = BindingInfoType::Sampler;
+        bindingInfo.sampler = binding.sampler;
+    } else if (binding.texture.sampleType != wgpu::TextureSampleType::Undefined) {
+        bindingInfo.bindingType = BindingInfoType::Texture;
+        bindingInfo.texture = binding.texture;
+
+        if (binding.texture.viewDimension == wgpu::TextureViewDimension::Undefined) {
+            bindingInfo.texture.viewDimension = wgpu::TextureViewDimension::e2D;
         }
+    } else if (binding.storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
+        bindingInfo.bindingType = BindingInfoType::StorageTexture;
+        bindingInfo.storageTexture = binding.storageTexture;
 
-        BindingInfo CreateBindGroupLayoutInfo(const BindGroupLayoutEntry& binding) {
-            BindingInfo bindingInfo;
-            bindingInfo.binding = BindingNumber(binding.binding);
-            bindingInfo.visibility = binding.visibility;
-
-            if (binding.buffer.type != wgpu::BufferBindingType::Undefined) {
-                bindingInfo.bindingType = BindingInfoType::Buffer;
-                bindingInfo.buffer = binding.buffer;
-            } else if (binding.sampler.type != wgpu::SamplerBindingType::Undefined) {
-                bindingInfo.bindingType = BindingInfoType::Sampler;
-                bindingInfo.sampler = binding.sampler;
-            } else if (binding.texture.sampleType != wgpu::TextureSampleType::Undefined) {
-                bindingInfo.bindingType = BindingInfoType::Texture;
-                bindingInfo.texture = binding.texture;
-
-                if (binding.texture.viewDimension == wgpu::TextureViewDimension::Undefined) {
-                    bindingInfo.texture.viewDimension = wgpu::TextureViewDimension::e2D;
-                }
-            } else if (binding.storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
-                bindingInfo.bindingType = BindingInfoType::StorageTexture;
-                bindingInfo.storageTexture = binding.storageTexture;
-
-                if (binding.storageTexture.viewDimension == wgpu::TextureViewDimension::Undefined) {
-                    bindingInfo.storageTexture.viewDimension = wgpu::TextureViewDimension::e2D;
-                }
-            } else {
-                const ExternalTextureBindingLayout* externalTextureBindingLayout = nullptr;
-                FindInChain(binding.nextInChain, &externalTextureBindingLayout);
-                if (externalTextureBindingLayout != nullptr) {
-                    bindingInfo.bindingType = BindingInfoType::ExternalTexture;
-                }
-            }
-
-            return bindingInfo;
+        if (binding.storageTexture.viewDimension == wgpu::TextureViewDimension::Undefined) {
+            bindingInfo.storageTexture.viewDimension = wgpu::TextureViewDimension::e2D;
         }
+    } else {
+        const ExternalTextureBindingLayout* externalTextureBindingLayout = nullptr;
+        FindInChain(binding.nextInChain, &externalTextureBindingLayout);
+        if (externalTextureBindingLayout != nullptr) {
+            bindingInfo.bindingType = BindingInfoType::ExternalTexture;
+        }
+    }
 
-        bool SortBindingsCompare(const BindGroupLayoutEntry& a, const BindGroupLayoutEntry& b) {
-            const bool aIsBuffer = IsBufferBinding(a);
-            const bool bIsBuffer = IsBufferBinding(b);
-            if (aIsBuffer != bIsBuffer) {
-                // Always place buffers first.
-                return aIsBuffer;
-            }
+    return bindingInfo;
+}
 
-            if (aIsBuffer) {
-                bool aHasDynamicOffset = BindingHasDynamicOffset(a);
-                bool bHasDynamicOffset = BindingHasDynamicOffset(b);
-                ASSERT(bIsBuffer);
-                if (aHasDynamicOffset != bHasDynamicOffset) {
-                    // Buffers with dynamic offsets should come before those without.
-                    // This makes it easy to iterate over the dynamic buffer bindings
-                    // [0, dynamicBufferCount) during validation.
-                    return aHasDynamicOffset;
-                }
-                if (aHasDynamicOffset) {
-                    ASSERT(bHasDynamicOffset);
-                    ASSERT(a.binding != b.binding);
-                    // Above, we ensured that dynamic buffers are first. Now, ensure that
-                    // dynamic buffer bindings are in increasing order. This is because dynamic
-                    // buffer offsets are applied in increasing order of binding number.
-                    return a.binding < b.binding;
-                }
-            }
+bool SortBindingsCompare(const BindGroupLayoutEntry& a, const BindGroupLayoutEntry& b) {
+    const bool aIsBuffer = IsBufferBinding(a);
+    const bool bIsBuffer = IsBufferBinding(b);
+    if (aIsBuffer != bIsBuffer) {
+        // Always place buffers first.
+        return aIsBuffer;
+    }
 
-            // This applies some defaults and gives us a single value to check for the binding type.
-            BindingInfo aInfo = CreateBindGroupLayoutInfo(a);
-            BindingInfo bInfo = CreateBindGroupLayoutInfo(b);
-
-            // Sort by type.
-            if (aInfo.bindingType != bInfo.bindingType) {
-                return aInfo.bindingType < bInfo.bindingType;
-            }
-
-            if (a.visibility != b.visibility) {
-                return a.visibility < b.visibility;
-            }
-
-            switch (aInfo.bindingType) {
-                case BindingInfoType::Buffer:
-                    if (aInfo.buffer.minBindingSize != bInfo.buffer.minBindingSize) {
-                        return aInfo.buffer.minBindingSize < bInfo.buffer.minBindingSize;
-                    }
-                    break;
-                case BindingInfoType::Sampler:
-                    if (aInfo.sampler.type != bInfo.sampler.type) {
-                        return aInfo.sampler.type < bInfo.sampler.type;
-                    }
-                    break;
-                case BindingInfoType::Texture:
-                    if (aInfo.texture.multisampled != bInfo.texture.multisampled) {
-                        return aInfo.texture.multisampled < bInfo.texture.multisampled;
-                    }
-                    if (aInfo.texture.viewDimension != bInfo.texture.viewDimension) {
-                        return aInfo.texture.viewDimension < bInfo.texture.viewDimension;
-                    }
-                    if (aInfo.texture.sampleType != bInfo.texture.sampleType) {
-                        return aInfo.texture.sampleType < bInfo.texture.sampleType;
-                    }
-                    break;
-                case BindingInfoType::StorageTexture:
-                    if (aInfo.storageTexture.access != bInfo.storageTexture.access) {
-                        return aInfo.storageTexture.access < bInfo.storageTexture.access;
-                    }
-                    if (aInfo.storageTexture.viewDimension != bInfo.storageTexture.viewDimension) {
-                        return aInfo.storageTexture.viewDimension <
-                               bInfo.storageTexture.viewDimension;
-                    }
-                    if (aInfo.storageTexture.format != bInfo.storageTexture.format) {
-                        return aInfo.storageTexture.format < bInfo.storageTexture.format;
-                    }
-                    break;
-                case BindingInfoType::ExternalTexture:
-                    break;
-            }
+    if (aIsBuffer) {
+        bool aHasDynamicOffset = BindingHasDynamicOffset(a);
+        bool bHasDynamicOffset = BindingHasDynamicOffset(b);
+        ASSERT(bIsBuffer);
+        if (aHasDynamicOffset != bHasDynamicOffset) {
+            // Buffers with dynamic offsets should come before those without.
+            // This makes it easy to iterate over the dynamic buffer bindings
+            // [0, dynamicBufferCount) during validation.
+            return aHasDynamicOffset;
+        }
+        if (aHasDynamicOffset) {
+            ASSERT(bHasDynamicOffset);
+            ASSERT(a.binding != b.binding);
+            // Above, we ensured that dynamic buffers are first. Now, ensure that
+            // dynamic buffer bindings are in increasing order. This is because dynamic
+            // buffer offsets are applied in increasing order of binding number.
             return a.binding < b.binding;
         }
+    }
 
-        // This is a utility function to help ASSERT that the BGL-binding comparator places buffers
-        // first.
-        bool CheckBufferBindingsFirst(ityp::span<BindingIndex, const BindingInfo> bindings) {
-            BindingIndex lastBufferIndex{0};
-            BindingIndex firstNonBufferIndex = std::numeric_limits<BindingIndex>::max();
-            for (BindingIndex i{0}; i < bindings.size(); ++i) {
-                if (bindings[i].bindingType == BindingInfoType::Buffer) {
-                    lastBufferIndex = std::max(i, lastBufferIndex);
-                } else {
-                    firstNonBufferIndex = std::min(i, firstNonBufferIndex);
-                }
+    // This applies some defaults and gives us a single value to check for the binding type.
+    BindingInfo aInfo = CreateBindGroupLayoutInfo(a);
+    BindingInfo bInfo = CreateBindGroupLayoutInfo(b);
+
+    // Sort by type.
+    if (aInfo.bindingType != bInfo.bindingType) {
+        return aInfo.bindingType < bInfo.bindingType;
+    }
+
+    if (a.visibility != b.visibility) {
+        return a.visibility < b.visibility;
+    }
+
+    switch (aInfo.bindingType) {
+        case BindingInfoType::Buffer:
+            if (aInfo.buffer.minBindingSize != bInfo.buffer.minBindingSize) {
+                return aInfo.buffer.minBindingSize < bInfo.buffer.minBindingSize;
             }
-
-            // If there are no buffers, then |lastBufferIndex| is initialized to 0 and
-            // |firstNonBufferIndex| gets set to 0.
-            return firstNonBufferIndex >= lastBufferIndex;
-        }
-
-    }  // namespace
-
-    // BindGroupLayoutBase
-
-    BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device,
-                                             const BindGroupLayoutDescriptor* descriptor,
-                                             PipelineCompatibilityToken pipelineCompatibilityToken,
-                                             ApiObjectBase::UntrackedByDeviceTag tag)
-        : ApiObjectBase(device, descriptor->label),
-          mPipelineCompatibilityToken(pipelineCompatibilityToken),
-          mUnexpandedBindingCount(descriptor->entryCount) {
-        std::vector<BindGroupLayoutEntry> sortedBindings = ExtractAndExpandBglEntries(
-            descriptor, &mBindingCounts, &mExternalTextureBindingExpansionMap);
-
-        std::sort(sortedBindings.begin(), sortedBindings.end(), SortBindingsCompare);
-
-        for (uint32_t i = 0; i < sortedBindings.size(); ++i) {
-            const BindGroupLayoutEntry& binding = sortedBindings[static_cast<uint32_t>(i)];
-
-            mBindingInfo.push_back(CreateBindGroupLayoutInfo(binding));
-
-            if (IsBufferBinding(binding)) {
-                // Buffers must be contiguously packed at the start of the binding info.
-                ASSERT(GetBufferCount() == BindingIndex(i));
+            break;
+        case BindingInfoType::Sampler:
+            if (aInfo.sampler.type != bInfo.sampler.type) {
+                return aInfo.sampler.type < bInfo.sampler.type;
             }
-            IncrementBindingCounts(&mBindingCounts, binding);
-
-            const auto& [_, inserted] = mBindingMap.emplace(BindingNumber(binding.binding), i);
-            ASSERT(inserted);
-        }
-        ASSERT(CheckBufferBindingsFirst({mBindingInfo.data(), GetBindingCount()}));
-        ASSERT(mBindingInfo.size() <= kMaxBindingsPerPipelineLayoutTyped);
+            break;
+        case BindingInfoType::Texture:
+            if (aInfo.texture.multisampled != bInfo.texture.multisampled) {
+                return aInfo.texture.multisampled < bInfo.texture.multisampled;
+            }
+            if (aInfo.texture.viewDimension != bInfo.texture.viewDimension) {
+                return aInfo.texture.viewDimension < bInfo.texture.viewDimension;
+            }
+            if (aInfo.texture.sampleType != bInfo.texture.sampleType) {
+                return aInfo.texture.sampleType < bInfo.texture.sampleType;
+            }
+            break;
+        case BindingInfoType::StorageTexture:
+            if (aInfo.storageTexture.access != bInfo.storageTexture.access) {
+                return aInfo.storageTexture.access < bInfo.storageTexture.access;
+            }
+            if (aInfo.storageTexture.viewDimension != bInfo.storageTexture.viewDimension) {
+                return aInfo.storageTexture.viewDimension < bInfo.storageTexture.viewDimension;
+            }
+            if (aInfo.storageTexture.format != bInfo.storageTexture.format) {
+                return aInfo.storageTexture.format < bInfo.storageTexture.format;
+            }
+            break;
+        case BindingInfoType::ExternalTexture:
+            break;
     }
+    return a.binding < b.binding;
+}
 
-    BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device,
-                                             const BindGroupLayoutDescriptor* descriptor,
-                                             PipelineCompatibilityToken pipelineCompatibilityToken)
-        : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken, kUntrackedByDevice) {
-        TrackInDevice();
-    }
-
-    BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag)
-        : ApiObjectBase(device, tag) {
-    }
-
-    BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device)
-        : ApiObjectBase(device, kLabelNotImplemented) {
-        TrackInDevice();
-    }
-
-    BindGroupLayoutBase::~BindGroupLayoutBase() = default;
-
-    void BindGroupLayoutBase::DestroyImpl() {
-        if (IsCachedReference()) {
-            // Do not uncache the actual cached object if we are a blueprint.
-            GetDevice()->UncacheBindGroupLayout(this);
+// This is a utility function to help ASSERT that the BGL-binding comparator places buffers
+// first.
+bool CheckBufferBindingsFirst(ityp::span<BindingIndex, const BindingInfo> bindings) {
+    BindingIndex lastBufferIndex{0};
+    BindingIndex firstNonBufferIndex = std::numeric_limits<BindingIndex>::max();
+    for (BindingIndex i{0}; i < bindings.size(); ++i) {
+        if (bindings[i].bindingType == BindingInfoType::Buffer) {
+            lastBufferIndex = std::max(i, lastBufferIndex);
+        } else {
+            firstNonBufferIndex = std::min(i, firstNonBufferIndex);
         }
     }
 
-    // static
-    BindGroupLayoutBase* BindGroupLayoutBase::MakeError(DeviceBase* device) {
-        return new BindGroupLayoutBase(device, ObjectBase::kError);
-    }
+    // If there are no buffers, then |lastBufferIndex| is initialized to 0 and
+    // |firstNonBufferIndex| gets set to 0.
+    return firstNonBufferIndex >= lastBufferIndex;
+}
 
-    ObjectType BindGroupLayoutBase::GetType() const {
-        return ObjectType::BindGroupLayout;
-    }
+}  // namespace
 
-    const BindGroupLayoutBase::BindingMap& BindGroupLayoutBase::GetBindingMap() const {
-        ASSERT(!IsError());
-        return mBindingMap;
-    }
+// BindGroupLayoutBase
 
-    bool BindGroupLayoutBase::HasBinding(BindingNumber bindingNumber) const {
-        return mBindingMap.count(bindingNumber) != 0;
-    }
+BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device,
+                                         const BindGroupLayoutDescriptor* descriptor,
+                                         PipelineCompatibilityToken pipelineCompatibilityToken,
+                                         ApiObjectBase::UntrackedByDeviceTag tag)
+    : ApiObjectBase(device, descriptor->label),
+      mPipelineCompatibilityToken(pipelineCompatibilityToken),
+      mUnexpandedBindingCount(descriptor->entryCount) {
+    std::vector<BindGroupLayoutEntry> sortedBindings = ExtractAndExpandBglEntries(
+        descriptor, &mBindingCounts, &mExternalTextureBindingExpansionMap);
 
-    BindingIndex BindGroupLayoutBase::GetBindingIndex(BindingNumber bindingNumber) const {
-        ASSERT(!IsError());
-        const auto& it = mBindingMap.find(bindingNumber);
-        ASSERT(it != mBindingMap.end());
-        return it->second;
-    }
+    std::sort(sortedBindings.begin(), sortedBindings.end(), SortBindingsCompare);
 
-    size_t BindGroupLayoutBase::ComputeContentHash() {
-        ObjectContentHasher recorder;
-        recorder.Record(mPipelineCompatibilityToken);
+    for (uint32_t i = 0; i < sortedBindings.size(); ++i) {
+        const BindGroupLayoutEntry& binding = sortedBindings[static_cast<uint32_t>(i)];
 
-        // std::map is sorted by key, so two BGLs constructed in different orders
-        // will still record the same.
-        for (const auto [id, index] : mBindingMap) {
-            recorder.Record(id, index);
+        mBindingInfo.push_back(CreateBindGroupLayoutInfo(binding));
 
-            const BindingInfo& info = mBindingInfo[index];
-            recorder.Record(info.buffer.hasDynamicOffset, info.visibility, info.bindingType,
-                            info.buffer.type, info.buffer.minBindingSize, info.sampler.type,
-                            info.texture.sampleType, info.texture.viewDimension,
-                            info.texture.multisampled, info.storageTexture.access,
-                            info.storageTexture.format, info.storageTexture.viewDimension);
+        if (IsBufferBinding(binding)) {
+            // Buffers must be contiguously packed at the start of the binding info.
+            ASSERT(GetBufferCount() == BindingIndex(i));
         }
+        IncrementBindingCounts(&mBindingCounts, binding);
 
-        return recorder.GetContentHash();
+        const auto& [_, inserted] = mBindingMap.emplace(BindingNumber(binding.binding), i);
+        ASSERT(inserted);
+    }
+    ASSERT(CheckBufferBindingsFirst({mBindingInfo.data(), GetBindingCount()}));
+    ASSERT(mBindingInfo.size() <= kMaxBindingsPerPipelineLayoutTyped);
+}
+
+BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device,
+                                         const BindGroupLayoutDescriptor* descriptor,
+                                         PipelineCompatibilityToken pipelineCompatibilityToken)
+    : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken, kUntrackedByDevice) {
+    TrackInDevice();
+}
+
+BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+    : ApiObjectBase(device, tag) {}
+
+BindGroupLayoutBase::BindGroupLayoutBase(DeviceBase* device)
+    : ApiObjectBase(device, kLabelNotImplemented) {
+    TrackInDevice();
+}
+
+BindGroupLayoutBase::~BindGroupLayoutBase() = default;
+
+void BindGroupLayoutBase::DestroyImpl() {
+    if (IsCachedReference()) {
+        // Do not uncache the actual cached object if we are a blueprint.
+        GetDevice()->UncacheBindGroupLayout(this);
+    }
+}
+
+// static
+BindGroupLayoutBase* BindGroupLayoutBase::MakeError(DeviceBase* device) {
+    return new BindGroupLayoutBase(device, ObjectBase::kError);
+}
+
+ObjectType BindGroupLayoutBase::GetType() const {
+    return ObjectType::BindGroupLayout;
+}
+
+const BindGroupLayoutBase::BindingMap& BindGroupLayoutBase::GetBindingMap() const {
+    ASSERT(!IsError());
+    return mBindingMap;
+}
+
+bool BindGroupLayoutBase::HasBinding(BindingNumber bindingNumber) const {
+    return mBindingMap.count(bindingNumber) != 0;
+}
+
+BindingIndex BindGroupLayoutBase::GetBindingIndex(BindingNumber bindingNumber) const {
+    ASSERT(!IsError());
+    const auto& it = mBindingMap.find(bindingNumber);
+    ASSERT(it != mBindingMap.end());
+    return it->second;
+}
+
+size_t BindGroupLayoutBase::ComputeContentHash() {
+    ObjectContentHasher recorder;
+    recorder.Record(mPipelineCompatibilityToken);
+
+    // std::map is sorted by key, so two BGLs constructed in different orders
+    // will still record the same.
+    for (const auto [id, index] : mBindingMap) {
+        recorder.Record(id, index);
+
+        const BindingInfo& info = mBindingInfo[index];
+        recorder.Record(info.buffer.hasDynamicOffset, info.visibility, info.bindingType,
+                        info.buffer.type, info.buffer.minBindingSize, info.sampler.type,
+                        info.texture.sampleType, info.texture.viewDimension,
+                        info.texture.multisampled, info.storageTexture.access,
+                        info.storageTexture.format, info.storageTexture.viewDimension);
     }
 
-    bool BindGroupLayoutBase::EqualityFunc::operator()(const BindGroupLayoutBase* a,
-                                                       const BindGroupLayoutBase* b) const {
-        return a->IsLayoutEqual(b);
-    }
+    return recorder.GetContentHash();
+}
 
-    BindingIndex BindGroupLayoutBase::GetBindingCount() const {
-        return mBindingInfo.size();
-    }
+bool BindGroupLayoutBase::EqualityFunc::operator()(const BindGroupLayoutBase* a,
+                                                   const BindGroupLayoutBase* b) const {
+    return a->IsLayoutEqual(b);
+}
 
-    BindingIndex BindGroupLayoutBase::GetBufferCount() const {
-        return BindingIndex(mBindingCounts.bufferCount);
-    }
+BindingIndex BindGroupLayoutBase::GetBindingCount() const {
+    return mBindingInfo.size();
+}
 
-    BindingIndex BindGroupLayoutBase::GetDynamicBufferCount() const {
-        // This is a binding index because dynamic buffers are packed at the front of the binding
-        // info.
-        return static_cast<BindingIndex>(mBindingCounts.dynamicStorageBufferCount +
-                                         mBindingCounts.dynamicUniformBufferCount);
-    }
+BindingIndex BindGroupLayoutBase::GetBufferCount() const {
+    return BindingIndex(mBindingCounts.bufferCount);
+}
 
-    uint32_t BindGroupLayoutBase::GetUnverifiedBufferCount() const {
-        return mBindingCounts.unverifiedBufferCount;
-    }
+BindingIndex BindGroupLayoutBase::GetDynamicBufferCount() const {
+    // This is a binding index because dynamic buffers are packed at the front of the binding
+    // info.
+    return static_cast<BindingIndex>(mBindingCounts.dynamicStorageBufferCount +
+                                     mBindingCounts.dynamicUniformBufferCount);
+}
 
-    uint32_t BindGroupLayoutBase::GetExternalTextureBindingCount() const {
-        return mExternalTextureBindingExpansionMap.size();
-    }
+uint32_t BindGroupLayoutBase::GetUnverifiedBufferCount() const {
+    return mBindingCounts.unverifiedBufferCount;
+}
 
-    const BindingCounts& BindGroupLayoutBase::GetBindingCountInfo() const {
-        return mBindingCounts;
-    }
+uint32_t BindGroupLayoutBase::GetExternalTextureBindingCount() const {
+    return mExternalTextureBindingExpansionMap.size();
+}
 
-    const ExternalTextureBindingExpansionMap&
-    BindGroupLayoutBase::GetExternalTextureBindingExpansionMap() const {
-        return mExternalTextureBindingExpansionMap;
-    }
+const BindingCounts& BindGroupLayoutBase::GetBindingCountInfo() const {
+    return mBindingCounts;
+}
 
-    uint32_t BindGroupLayoutBase::GetUnexpandedBindingCount() const {
-        return mUnexpandedBindingCount;
-    }
+const ExternalTextureBindingExpansionMap&
+BindGroupLayoutBase::GetExternalTextureBindingExpansionMap() const {
+    return mExternalTextureBindingExpansionMap;
+}
 
-    bool BindGroupLayoutBase::IsLayoutEqual(const BindGroupLayoutBase* other,
-                                            bool excludePipelineCompatibiltyToken) const {
-        if (!excludePipelineCompatibiltyToken &&
-            GetPipelineCompatibilityToken() != other->GetPipelineCompatibilityToken()) {
+uint32_t BindGroupLayoutBase::GetUnexpandedBindingCount() const {
+    return mUnexpandedBindingCount;
+}
+
+bool BindGroupLayoutBase::IsLayoutEqual(const BindGroupLayoutBase* other,
+                                        bool excludePipelineCompatibiltyToken) const {
+    if (!excludePipelineCompatibiltyToken &&
+        GetPipelineCompatibilityToken() != other->GetPipelineCompatibilityToken()) {
+        return false;
+    }
+    if (GetBindingCount() != other->GetBindingCount()) {
+        return false;
+    }
+    for (BindingIndex i{0}; i < GetBindingCount(); ++i) {
+        if (mBindingInfo[i] != other->mBindingInfo[i]) {
             return false;
         }
-        if (GetBindingCount() != other->GetBindingCount()) {
+    }
+    return mBindingMap == other->mBindingMap;
+}
+
+PipelineCompatibilityToken BindGroupLayoutBase::GetPipelineCompatibilityToken() const {
+    return mPipelineCompatibilityToken;
+}
+
+size_t BindGroupLayoutBase::GetBindingDataSize() const {
+    // | ------ buffer-specific ----------| ------------ object pointers -------------|
+    // | --- offsets + sizes -------------| --------------- Ref<ObjectBase> ----------|
+    // Followed by:
+    // |---------buffer size array--------|
+    // |-uint64_t[mUnverifiedBufferCount]-|
+    size_t objectPointerStart = mBindingCounts.bufferCount * sizeof(BufferBindingData);
+    ASSERT(IsAligned(objectPointerStart, alignof(Ref<ObjectBase>)));
+    size_t bufferSizeArrayStart = Align(
+        objectPointerStart + mBindingCounts.totalCount * sizeof(Ref<ObjectBase>), sizeof(uint64_t));
+    ASSERT(IsAligned(bufferSizeArrayStart, alignof(uint64_t)));
+    return bufferSizeArrayStart + mBindingCounts.unverifiedBufferCount * sizeof(uint64_t);
+}
+
+BindGroupLayoutBase::BindingDataPointers BindGroupLayoutBase::ComputeBindingDataPointers(
+    void* dataStart) const {
+    BufferBindingData* bufferData = reinterpret_cast<BufferBindingData*>(dataStart);
+    auto bindings = reinterpret_cast<Ref<ObjectBase>*>(bufferData + mBindingCounts.bufferCount);
+    uint64_t* unverifiedBufferSizes = AlignPtr(
+        reinterpret_cast<uint64_t*>(bindings + mBindingCounts.totalCount), sizeof(uint64_t));
+
+    ASSERT(IsPtrAligned(bufferData, alignof(BufferBindingData)));
+    ASSERT(IsPtrAligned(bindings, alignof(Ref<ObjectBase>)));
+    ASSERT(IsPtrAligned(unverifiedBufferSizes, alignof(uint64_t)));
+
+    return {{bufferData, GetBufferCount()},
+            {bindings, GetBindingCount()},
+            {unverifiedBufferSizes, mBindingCounts.unverifiedBufferCount}};
+}
+
+bool BindGroupLayoutBase::IsStorageBufferBinding(BindingIndex bindingIndex) const {
+    ASSERT(bindingIndex < GetBufferCount());
+    switch (GetBindingInfo(bindingIndex).buffer.type) {
+        case wgpu::BufferBindingType::Uniform:
             return false;
-        }
-        for (BindingIndex i{0}; i < GetBindingCount(); ++i) {
-            if (mBindingInfo[i] != other->mBindingInfo[i]) {
-                return false;
-            }
-        }
-        return mBindingMap == other->mBindingMap;
+        case kInternalStorageBufferBinding:
+        case wgpu::BufferBindingType::Storage:
+        case wgpu::BufferBindingType::ReadOnlyStorage:
+            return true;
+        case wgpu::BufferBindingType::Undefined:
+            break;
     }
+    UNREACHABLE();
+}
 
-    PipelineCompatibilityToken BindGroupLayoutBase::GetPipelineCompatibilityToken() const {
-        return mPipelineCompatibilityToken;
+std::string BindGroupLayoutBase::EntriesToString() const {
+    std::string entries = "[";
+    std::string sep = "";
+    const BindGroupLayoutBase::BindingMap& bindingMap = GetBindingMap();
+    for (const auto [bindingNumber, bindingIndex] : bindingMap) {
+        const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
+        entries += absl::StrFormat("%s%s", sep, bindingInfo);
+        sep = ", ";
     }
-
-    size_t BindGroupLayoutBase::GetBindingDataSize() const {
-        // | ------ buffer-specific ----------| ------------ object pointers -------------|
-        // | --- offsets + sizes -------------| --------------- Ref<ObjectBase> ----------|
-        // Followed by:
-        // |---------buffer size array--------|
-        // |-uint64_t[mUnverifiedBufferCount]-|
-        size_t objectPointerStart = mBindingCounts.bufferCount * sizeof(BufferBindingData);
-        ASSERT(IsAligned(objectPointerStart, alignof(Ref<ObjectBase>)));
-        size_t bufferSizeArrayStart =
-            Align(objectPointerStart + mBindingCounts.totalCount * sizeof(Ref<ObjectBase>),
-                  sizeof(uint64_t));
-        ASSERT(IsAligned(bufferSizeArrayStart, alignof(uint64_t)));
-        return bufferSizeArrayStart + mBindingCounts.unverifiedBufferCount * sizeof(uint64_t);
-    }
-
-    BindGroupLayoutBase::BindingDataPointers BindGroupLayoutBase::ComputeBindingDataPointers(
-        void* dataStart) const {
-        BufferBindingData* bufferData = reinterpret_cast<BufferBindingData*>(dataStart);
-        auto bindings = reinterpret_cast<Ref<ObjectBase>*>(bufferData + mBindingCounts.bufferCount);
-        uint64_t* unverifiedBufferSizes = AlignPtr(
-            reinterpret_cast<uint64_t*>(bindings + mBindingCounts.totalCount), sizeof(uint64_t));
-
-        ASSERT(IsPtrAligned(bufferData, alignof(BufferBindingData)));
-        ASSERT(IsPtrAligned(bindings, alignof(Ref<ObjectBase>)));
-        ASSERT(IsPtrAligned(unverifiedBufferSizes, alignof(uint64_t)));
-
-        return {{bufferData, GetBufferCount()},
-                {bindings, GetBindingCount()},
-                {unverifiedBufferSizes, mBindingCounts.unverifiedBufferCount}};
-    }
-
-    bool BindGroupLayoutBase::IsStorageBufferBinding(BindingIndex bindingIndex) const {
-        ASSERT(bindingIndex < GetBufferCount());
-        switch (GetBindingInfo(bindingIndex).buffer.type) {
-            case wgpu::BufferBindingType::Uniform:
-                return false;
-            case kInternalStorageBufferBinding:
-            case wgpu::BufferBindingType::Storage:
-            case wgpu::BufferBindingType::ReadOnlyStorage:
-                return true;
-            case wgpu::BufferBindingType::Undefined:
-                break;
-        }
-        UNREACHABLE();
-    }
-
-    std::string BindGroupLayoutBase::EntriesToString() const {
-        std::string entries = "[";
-        std::string sep = "";
-        const BindGroupLayoutBase::BindingMap& bindingMap = GetBindingMap();
-        for (const auto [bindingNumber, bindingIndex] : bindingMap) {
-            const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
-            entries += absl::StrFormat("%s%s", sep, bindingInfo);
-            sep = ", ";
-        }
-        entries += "]";
-        return entries;
-    }
+    entries += "]";
+    return entries;
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/BindGroupLayout.h b/src/dawn/native/BindGroupLayout.h
index 3b909d1..a218877 100644
--- a/src/dawn/native/BindGroupLayout.h
+++ b/src/dawn/native/BindGroupLayout.h
@@ -34,139 +34,137 @@
 #include "dawn/native/dawn_platform.h"
 
 namespace dawn::native {
-    // TODO(dawn:1082): Minor optimization to use BindingIndex instead of BindingNumber
-    struct ExternalTextureBindingExpansion {
-        BindingNumber plane0;
-        BindingNumber plane1;
-        BindingNumber params;
+// TODO(dawn:1082): Minor optimization to use BindingIndex instead of BindingNumber
+struct ExternalTextureBindingExpansion {
+    BindingNumber plane0;
+    BindingNumber plane1;
+    BindingNumber params;
+};
+
+using ExternalTextureBindingExpansionMap = std::map<BindingNumber, ExternalTextureBindingExpansion>;
+
+MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device,
+                                             const BindGroupLayoutDescriptor* descriptor,
+                                             bool allowInternalBinding = false);
+
+// Bindings are specified as a |BindingNumber| in the BindGroupLayoutDescriptor.
+// These numbers may be arbitrary and sparse. Internally, Dawn packs these numbers
+// into a packed range of |BindingIndex| integers.
+class BindGroupLayoutBase : public ApiObjectBase, public CachedObject {
+  public:
+    BindGroupLayoutBase(DeviceBase* device,
+                        const BindGroupLayoutDescriptor* descriptor,
+                        PipelineCompatibilityToken pipelineCompatibilityToken,
+                        ApiObjectBase::UntrackedByDeviceTag tag);
+    BindGroupLayoutBase(DeviceBase* device,
+                        const BindGroupLayoutDescriptor* descriptor,
+                        PipelineCompatibilityToken pipelineCompatibilityToken);
+    ~BindGroupLayoutBase() override;
+
+    static BindGroupLayoutBase* MakeError(DeviceBase* device);
+
+    ObjectType GetType() const override;
+
+    // A map from the BindingNumber to its packed BindingIndex.
+    using BindingMap = std::map<BindingNumber, BindingIndex>;
+
+    const BindingInfo& GetBindingInfo(BindingIndex bindingIndex) const {
+        ASSERT(!IsError());
+        ASSERT(bindingIndex < mBindingInfo.size());
+        return mBindingInfo[bindingIndex];
+    }
+    const BindingMap& GetBindingMap() const;
+    bool HasBinding(BindingNumber bindingNumber) const;
+    BindingIndex GetBindingIndex(BindingNumber bindingNumber) const;
+
+    // Functions necessary for the unordered_set<BGLBase*>-based cache.
+    size_t ComputeContentHash() override;
+
+    struct EqualityFunc {
+        bool operator()(const BindGroupLayoutBase* a, const BindGroupLayoutBase* b) const;
     };
 
-    using ExternalTextureBindingExpansionMap =
-        std::map<BindingNumber, ExternalTextureBindingExpansion>;
+    BindingIndex GetBindingCount() const;
+    // Returns |BindingIndex| because buffers are packed at the front.
+    BindingIndex GetBufferCount() const;
+    // Returns |BindingIndex| because dynamic buffers are packed at the front.
+    BindingIndex GetDynamicBufferCount() const;
+    uint32_t GetUnverifiedBufferCount() const;
 
-    MaybeError ValidateBindGroupLayoutDescriptor(DeviceBase* device,
-                                                 const BindGroupLayoutDescriptor* descriptor,
-                                                 bool allowInternalBinding = false);
+    // Used to get counts and validate them in pipeline layout creation. Other getters
+    // should be used to get typed integer counts.
+    const BindingCounts& GetBindingCountInfo() const;
 
-    // Bindings are specified as a |BindingNumber| in the BindGroupLayoutDescriptor.
-    // These numbers may be arbitrary and sparse. Internally, Dawn packs these numbers
-    // into a packed range of |BindingIndex| integers.
-    class BindGroupLayoutBase : public ApiObjectBase, public CachedObject {
-      public:
-        BindGroupLayoutBase(DeviceBase* device,
-                            const BindGroupLayoutDescriptor* descriptor,
-                            PipelineCompatibilityToken pipelineCompatibilityToken,
-                            ApiObjectBase::UntrackedByDeviceTag tag);
-        BindGroupLayoutBase(DeviceBase* device,
-                            const BindGroupLayoutDescriptor* descriptor,
-                            PipelineCompatibilityToken pipelineCompatibilityToken);
-        ~BindGroupLayoutBase() override;
+    uint32_t GetExternalTextureBindingCount() const;
 
-        static BindGroupLayoutBase* MakeError(DeviceBase* device);
+    // Used to specify unpacked external texture binding slots when transforming shader modules.
+    const ExternalTextureBindingExpansionMap& GetExternalTextureBindingExpansionMap() const;
 
-        ObjectType GetType() const override;
+    uint32_t GetUnexpandedBindingCount() const;
 
-        // A map from the BindingNumber to its packed BindingIndex.
-        using BindingMap = std::map<BindingNumber, BindingIndex>;
+    // Tests that the BindingInfo of two bind groups are equal,
+    // ignoring their compatibility groups.
+    bool IsLayoutEqual(const BindGroupLayoutBase* other,
+                       bool excludePipelineCompatibiltyToken = false) const;
+    PipelineCompatibilityToken GetPipelineCompatibilityToken() const;
 
-        const BindingInfo& GetBindingInfo(BindingIndex bindingIndex) const {
-            ASSERT(!IsError());
-            ASSERT(bindingIndex < mBindingInfo.size());
-            return mBindingInfo[bindingIndex];
-        }
-        const BindingMap& GetBindingMap() const;
-        bool HasBinding(BindingNumber bindingNumber) const;
-        BindingIndex GetBindingIndex(BindingNumber bindingNumber) const;
-
-        // Functions necessary for the unordered_set<BGLBase*>-based cache.
-        size_t ComputeContentHash() override;
-
-        struct EqualityFunc {
-            bool operator()(const BindGroupLayoutBase* a, const BindGroupLayoutBase* b) const;
-        };
-
-        BindingIndex GetBindingCount() const;
-        // Returns |BindingIndex| because buffers are packed at the front.
-        BindingIndex GetBufferCount() const;
-        // Returns |BindingIndex| because dynamic buffers are packed at the front.
-        BindingIndex GetDynamicBufferCount() const;
-        uint32_t GetUnverifiedBufferCount() const;
-
-        // Used to get counts and validate them in pipeline layout creation. Other getters
-        // should be used to get typed integer counts.
-        const BindingCounts& GetBindingCountInfo() const;
-
-        uint32_t GetExternalTextureBindingCount() const;
-
-        // Used to specify unpacked external texture binding slots when transforming shader modules.
-        const ExternalTextureBindingExpansionMap& GetExternalTextureBindingExpansionMap() const;
-
-        uint32_t GetUnexpandedBindingCount() const;
-
-        // Tests that the BindingInfo of two bind groups are equal,
-        // ignoring their compatibility groups.
-        bool IsLayoutEqual(const BindGroupLayoutBase* other,
-                           bool excludePipelineCompatibiltyToken = false) const;
-        PipelineCompatibilityToken GetPipelineCompatibilityToken() const;
-
-        struct BufferBindingData {
-            uint64_t offset;
-            uint64_t size;
-        };
-
-        struct BindingDataPointers {
-            ityp::span<BindingIndex, BufferBindingData> const bufferData = {};
-            ityp::span<BindingIndex, Ref<ObjectBase>> const bindings = {};
-            ityp::span<uint32_t, uint64_t> const unverifiedBufferSizes = {};
-        };
-
-        // Compute the amount of space / alignment required to store bindings for a bind group of
-        // this layout.
-        size_t GetBindingDataSize() const;
-        static constexpr size_t GetBindingDataAlignment() {
-            static_assert(alignof(Ref<ObjectBase>) <= alignof(BufferBindingData));
-            return alignof(BufferBindingData);
-        }
-
-        BindingDataPointers ComputeBindingDataPointers(void* dataStart) const;
-
-        bool IsStorageBufferBinding(BindingIndex bindingIndex) const;
-
-        // Returns a detailed string representation of the layout entries for use in error messages.
-        std::string EntriesToString() const;
-
-      protected:
-        // Constructor used only for mocking and testing.
-        explicit BindGroupLayoutBase(DeviceBase* device);
-        void DestroyImpl() override;
-
-        template <typename BindGroup>
-        SlabAllocator<BindGroup> MakeFrontendBindGroupAllocator(size_t size) {
-            return SlabAllocator<BindGroup>(
-                size,  // bytes
-                Align(sizeof(BindGroup), GetBindingDataAlignment()) + GetBindingDataSize(),  // size
-                std::max(alignof(BindGroup), GetBindingDataAlignment())  // alignment
-            );
-        }
-
-      private:
-        BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag);
-
-        BindingCounts mBindingCounts = {};
-        ityp::vector<BindingIndex, BindingInfo> mBindingInfo;
-
-        // Map from BindGroupLayoutEntry.binding to packed indices.
-        BindingMap mBindingMap;
-
-        ExternalTextureBindingExpansionMap mExternalTextureBindingExpansionMap;
-
-        // Non-0 if this BindGroupLayout was created as part of a default PipelineLayout.
-        const PipelineCompatibilityToken mPipelineCompatibilityToken =
-            PipelineCompatibilityToken(0);
-
-        uint32_t mUnexpandedBindingCount;
+    struct BufferBindingData {
+        uint64_t offset;
+        uint64_t size;
     };
 
+    struct BindingDataPointers {
+        ityp::span<BindingIndex, BufferBindingData> const bufferData = {};
+        ityp::span<BindingIndex, Ref<ObjectBase>> const bindings = {};
+        ityp::span<uint32_t, uint64_t> const unverifiedBufferSizes = {};
+    };
+
+    // Compute the amount of space / alignment required to store bindings for a bind group of
+    // this layout.
+    size_t GetBindingDataSize() const;
+    static constexpr size_t GetBindingDataAlignment() {
+        static_assert(alignof(Ref<ObjectBase>) <= alignof(BufferBindingData));
+        return alignof(BufferBindingData);
+    }
+
+    BindingDataPointers ComputeBindingDataPointers(void* dataStart) const;
+
+    bool IsStorageBufferBinding(BindingIndex bindingIndex) const;
+
+    // Returns a detailed string representation of the layout entries for use in error messages.
+    std::string EntriesToString() const;
+
+  protected:
+    // Constructor used only for mocking and testing.
+    explicit BindGroupLayoutBase(DeviceBase* device);
+    void DestroyImpl() override;
+
+    template <typename BindGroup>
+    SlabAllocator<BindGroup> MakeFrontendBindGroupAllocator(size_t size) {
+        return SlabAllocator<BindGroup>(
+            size,                                                                        // bytes
+            Align(sizeof(BindGroup), GetBindingDataAlignment()) + GetBindingDataSize(),  // size
+            std::max(alignof(BindGroup), GetBindingDataAlignment())  // alignment
+        );
+    }
+
+  private:
+    BindGroupLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+
+    BindingCounts mBindingCounts = {};
+    ityp::vector<BindingIndex, BindingInfo> mBindingInfo;
+
+    // Map from BindGroupLayoutEntry.binding to packed indices.
+    BindingMap mBindingMap;
+
+    ExternalTextureBindingExpansionMap mExternalTextureBindingExpansionMap;
+
+    // Non-0 if this BindGroupLayout was created as part of a default PipelineLayout.
+    const PipelineCompatibilityToken mPipelineCompatibilityToken = PipelineCompatibilityToken(0);
+
+    uint32_t mUnexpandedBindingCount;
+};
+
 }  // namespace dawn::native
 
 #endif  // SRC_DAWN_NATIVE_BINDGROUPLAYOUT_H_
diff --git a/src/dawn/native/BindGroupTracker.h b/src/dawn/native/BindGroupTracker.h
index ce081e9..cd8254c 100644
--- a/src/dawn/native/BindGroupTracker.h
+++ b/src/dawn/native/BindGroupTracker.h
@@ -25,119 +25,117 @@
 
 namespace dawn::native {
 
-    // Keeps track of the dirty bind groups so they can be lazily applied when we know the
-    // pipeline state or it changes.
-    // |DynamicOffset| is a template parameter because offsets in Vulkan are uint32_t but uint64_t
-    // in other backends.
-    template <bool CanInheritBindGroups, typename DynamicOffset>
-    class BindGroupTrackerBase {
-      public:
-        void OnSetBindGroup(BindGroupIndex index,
-                            BindGroupBase* bindGroup,
-                            uint32_t dynamicOffsetCount,
-                            uint32_t* dynamicOffsets) {
-            ASSERT(index < kMaxBindGroupsTyped);
+// Keeps track of the dirty bind groups so they can be lazily applied when we know the
+// pipeline state or it changes.
+// |DynamicOffset| is a template parameter because offsets in Vulkan are uint32_t but uint64_t
+// in other backends.
+template <bool CanInheritBindGroups, typename DynamicOffset>
+class BindGroupTrackerBase {
+  public:
+    void OnSetBindGroup(BindGroupIndex index,
+                        BindGroupBase* bindGroup,
+                        uint32_t dynamicOffsetCount,
+                        uint32_t* dynamicOffsets) {
+        ASSERT(index < kMaxBindGroupsTyped);
 
-            if (mBindGroupLayoutsMask[index]) {
-                // It is okay to only dirty bind groups that are used by the current pipeline
-                // layout. If the pipeline layout changes, then the bind groups it uses will
-                // become dirty.
+        if (mBindGroupLayoutsMask[index]) {
+            // It is okay to only dirty bind groups that are used by the current pipeline
+            // layout. If the pipeline layout changes, then the bind groups it uses will
+            // become dirty.
 
-                if (mBindGroups[index] != bindGroup) {
-                    mDirtyBindGroups.set(index);
-                    mDirtyBindGroupsObjectChangedOrIsDynamic.set(index);
-                }
-
-                if (dynamicOffsetCount > 0) {
-                    mDirtyBindGroupsObjectChangedOrIsDynamic.set(index);
-                }
+            if (mBindGroups[index] != bindGroup) {
+                mDirtyBindGroups.set(index);
+                mDirtyBindGroupsObjectChangedOrIsDynamic.set(index);
             }
 
-            mBindGroups[index] = bindGroup;
-            mDynamicOffsetCounts[index] = dynamicOffsetCount;
-            SetDynamicOffsets(mDynamicOffsets[index].data(), dynamicOffsetCount, dynamicOffsets);
-        }
-
-        void OnSetPipeline(PipelineBase* pipeline) {
-            mPipelineLayout = pipeline->GetLayout();
-        }
-
-      protected:
-        // The Derived class should call this before it applies bind groups.
-        void BeforeApply() {
-            if (mLastAppliedPipelineLayout == mPipelineLayout) {
-                return;
-            }
-
-            // Use the bind group layout mask to avoid marking unused bind groups as dirty.
-            mBindGroupLayoutsMask = mPipelineLayout->GetBindGroupLayoutsMask();
-
-            // Changing the pipeline layout sets bind groups as dirty. If CanInheritBindGroups,
-            // the first |k| matching bind groups may be inherited.
-            if (CanInheritBindGroups && mLastAppliedPipelineLayout != nullptr) {
-                // Dirty bind groups that cannot be inherited.
-                BindGroupLayoutMask dirtiedGroups =
-                    ~mPipelineLayout->InheritedGroupsMask(mLastAppliedPipelineLayout);
-
-                mDirtyBindGroups |= dirtiedGroups;
-                mDirtyBindGroupsObjectChangedOrIsDynamic |= dirtiedGroups;
-
-                // Clear any bind groups not in the mask.
-                mDirtyBindGroups &= mBindGroupLayoutsMask;
-                mDirtyBindGroupsObjectChangedOrIsDynamic &= mBindGroupLayoutsMask;
-            } else {
-                mDirtyBindGroups = mBindGroupLayoutsMask;
-                mDirtyBindGroupsObjectChangedOrIsDynamic = mBindGroupLayoutsMask;
-            }
-        }
-
-        // The Derived class should call this after it applies bind groups.
-        void AfterApply() {
-            // Reset all dirty bind groups. Dirty bind groups not in the bind group layout mask
-            // will be dirtied again by the next pipeline change.
-            mDirtyBindGroups.reset();
-            mDirtyBindGroupsObjectChangedOrIsDynamic.reset();
-            // Keep track of the last applied pipeline layout. This allows us to avoid computing
-            // the intersection of the dirty bind groups and bind group layout mask in next Draw
-            // or Dispatch (which is very hot code) until the layout is changed again.
-            mLastAppliedPipelineLayout = mPipelineLayout;
-        }
-
-        BindGroupLayoutMask mDirtyBindGroups = 0;
-        BindGroupLayoutMask mDirtyBindGroupsObjectChangedOrIsDynamic = 0;
-        BindGroupLayoutMask mBindGroupLayoutsMask = 0;
-        ityp::array<BindGroupIndex, BindGroupBase*, kMaxBindGroups> mBindGroups = {};
-        ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mDynamicOffsetCounts = {};
-        ityp::array<BindGroupIndex,
-                    std::array<DynamicOffset, kMaxDynamicBuffersPerPipelineLayout>,
-                    kMaxBindGroups>
-            mDynamicOffsets = {};
-
-        // |mPipelineLayout| is the current pipeline layout set on the command buffer.
-        // |mLastAppliedPipelineLayout| is the last pipeline layout for which we applied changes
-        // to the bind group bindings.
-        PipelineLayoutBase* mPipelineLayout = nullptr;
-        PipelineLayoutBase* mLastAppliedPipelineLayout = nullptr;
-
-      private:
-        // We have two overloads here because offsets in Vulkan are uint32_t but uint64_t
-        // in other backends.
-        static void SetDynamicOffsets(uint64_t* data,
-                                      uint32_t dynamicOffsetCount,
-                                      uint32_t* dynamicOffsets) {
-            for (uint32_t i = 0; i < dynamicOffsetCount; ++i) {
-                data[i] = static_cast<uint64_t>(dynamicOffsets[i]);
-            }
-        }
-
-        static void SetDynamicOffsets(uint32_t* data,
-                                      uint32_t dynamicOffsetCount,
-                                      uint32_t* dynamicOffsets) {
             if (dynamicOffsetCount > 0) {
-                memcpy(data, dynamicOffsets, sizeof(uint32_t) * dynamicOffsetCount);
+                mDirtyBindGroupsObjectChangedOrIsDynamic.set(index);
             }
         }
-    };
+
+        mBindGroups[index] = bindGroup;
+        mDynamicOffsetCounts[index] = dynamicOffsetCount;
+        SetDynamicOffsets(mDynamicOffsets[index].data(), dynamicOffsetCount, dynamicOffsets);
+    }
+
+    void OnSetPipeline(PipelineBase* pipeline) { mPipelineLayout = pipeline->GetLayout(); }
+
+  protected:
+    // The Derived class should call this before it applies bind groups.
+    void BeforeApply() {
+        if (mLastAppliedPipelineLayout == mPipelineLayout) {
+            return;
+        }
+
+        // Use the bind group layout mask to avoid marking unused bind groups as dirty.
+        mBindGroupLayoutsMask = mPipelineLayout->GetBindGroupLayoutsMask();
+
+        // Changing the pipeline layout sets bind groups as dirty. If CanInheritBindGroups,
+        // the first |k| matching bind groups may be inherited.
+        if (CanInheritBindGroups && mLastAppliedPipelineLayout != nullptr) {
+            // Dirty bind groups that cannot be inherited.
+            BindGroupLayoutMask dirtiedGroups =
+                ~mPipelineLayout->InheritedGroupsMask(mLastAppliedPipelineLayout);
+
+            mDirtyBindGroups |= dirtiedGroups;
+            mDirtyBindGroupsObjectChangedOrIsDynamic |= dirtiedGroups;
+
+            // Clear any bind groups not in the mask.
+            mDirtyBindGroups &= mBindGroupLayoutsMask;
+            mDirtyBindGroupsObjectChangedOrIsDynamic &= mBindGroupLayoutsMask;
+        } else {
+            mDirtyBindGroups = mBindGroupLayoutsMask;
+            mDirtyBindGroupsObjectChangedOrIsDynamic = mBindGroupLayoutsMask;
+        }
+    }
+
+    // The Derived class should call this after it applies bind groups.
+    void AfterApply() {
+        // Reset all dirty bind groups. Dirty bind groups not in the bind group layout mask
+        // will be dirtied again by the next pipeline change.
+        mDirtyBindGroups.reset();
+        mDirtyBindGroupsObjectChangedOrIsDynamic.reset();
+        // Keep track of the last applied pipeline layout. This allows us to avoid computing
+        // the intersection of the dirty bind groups and bind group layout mask in next Draw
+        // or Dispatch (which is very hot code) until the layout is changed again.
+        mLastAppliedPipelineLayout = mPipelineLayout;
+    }
+
+    BindGroupLayoutMask mDirtyBindGroups = 0;
+    BindGroupLayoutMask mDirtyBindGroupsObjectChangedOrIsDynamic = 0;
+    BindGroupLayoutMask mBindGroupLayoutsMask = 0;
+    ityp::array<BindGroupIndex, BindGroupBase*, kMaxBindGroups> mBindGroups = {};
+    ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mDynamicOffsetCounts = {};
+    ityp::array<BindGroupIndex,
+                std::array<DynamicOffset, kMaxDynamicBuffersPerPipelineLayout>,
+                kMaxBindGroups>
+        mDynamicOffsets = {};
+
+    // |mPipelineLayout| is the current pipeline layout set on the command buffer.
+    // |mLastAppliedPipelineLayout| is the last pipeline layout for which we applied changes
+    // to the bind group bindings.
+    PipelineLayoutBase* mPipelineLayout = nullptr;
+    PipelineLayoutBase* mLastAppliedPipelineLayout = nullptr;
+
+  private:
+    // We have two overloads here because offsets in Vulkan are uint32_t but uint64_t
+    // in other backends.
+    static void SetDynamicOffsets(uint64_t* data,
+                                  uint32_t dynamicOffsetCount,
+                                  uint32_t* dynamicOffsets) {
+        for (uint32_t i = 0; i < dynamicOffsetCount; ++i) {
+            data[i] = static_cast<uint64_t>(dynamicOffsets[i]);
+        }
+    }
+
+    static void SetDynamicOffsets(uint32_t* data,
+                                  uint32_t dynamicOffsetCount,
+                                  uint32_t* dynamicOffsets) {
+        if (dynamicOffsetCount > 0) {
+            memcpy(data, dynamicOffsets, sizeof(uint32_t) * dynamicOffsetCount);
+        }
+    }
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/BindingInfo.cpp b/src/dawn/native/BindingInfo.cpp
index 009735c..1d4b60d 100644
--- a/src/dawn/native/BindingInfo.cpp
+++ b/src/dawn/native/BindingInfo.cpp
@@ -18,178 +18,172 @@
 
 namespace dawn::native {
 
-    void IncrementBindingCounts(BindingCounts* bindingCounts, const BindGroupLayoutEntry& entry) {
-        bindingCounts->totalCount += 1;
+void IncrementBindingCounts(BindingCounts* bindingCounts, const BindGroupLayoutEntry& entry) {
+    bindingCounts->totalCount += 1;
 
-        uint32_t PerStageBindingCounts::*perStageBindingCountMember = nullptr;
+    uint32_t PerStageBindingCounts::*perStageBindingCountMember = nullptr;
 
-        if (entry.buffer.type != wgpu::BufferBindingType::Undefined) {
-            ++bindingCounts->bufferCount;
-            const BufferBindingLayout& buffer = entry.buffer;
+    if (entry.buffer.type != wgpu::BufferBindingType::Undefined) {
+        ++bindingCounts->bufferCount;
+        const BufferBindingLayout& buffer = entry.buffer;
 
-            if (buffer.minBindingSize == 0) {
-                ++bindingCounts->unverifiedBufferCount;
-            }
-
-            switch (buffer.type) {
-                case wgpu::BufferBindingType::Uniform:
-                    if (buffer.hasDynamicOffset) {
-                        ++bindingCounts->dynamicUniformBufferCount;
-                    }
-                    perStageBindingCountMember = &PerStageBindingCounts::uniformBufferCount;
-                    break;
-
-                case wgpu::BufferBindingType::Storage:
-                case kInternalStorageBufferBinding:
-                case wgpu::BufferBindingType::ReadOnlyStorage:
-                    if (buffer.hasDynamicOffset) {
-                        ++bindingCounts->dynamicStorageBufferCount;
-                    }
-                    perStageBindingCountMember = &PerStageBindingCounts::storageBufferCount;
-                    break;
-
-                case wgpu::BufferBindingType::Undefined:
-                    // Can't get here due to the enclosing if statement.
-                    UNREACHABLE();
-                    break;
-            }
-        } else if (entry.sampler.type != wgpu::SamplerBindingType::Undefined) {
-            perStageBindingCountMember = &PerStageBindingCounts::samplerCount;
-        } else if (entry.texture.sampleType != wgpu::TextureSampleType::Undefined) {
-            perStageBindingCountMember = &PerStageBindingCounts::sampledTextureCount;
-        } else if (entry.storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
-            perStageBindingCountMember = &PerStageBindingCounts::storageTextureCount;
-        } else {
-            const ExternalTextureBindingLayout* externalTextureBindingLayout;
-            FindInChain(entry.nextInChain, &externalTextureBindingLayout);
-            if (externalTextureBindingLayout != nullptr) {
-                perStageBindingCountMember = &PerStageBindingCounts::externalTextureCount;
-            }
+        if (buffer.minBindingSize == 0) {
+            ++bindingCounts->unverifiedBufferCount;
         }
 
-        ASSERT(perStageBindingCountMember != nullptr);
-        for (SingleShaderStage stage : IterateStages(entry.visibility)) {
-            ++(bindingCounts->perStage[stage].*perStageBindingCountMember);
+        switch (buffer.type) {
+            case wgpu::BufferBindingType::Uniform:
+                if (buffer.hasDynamicOffset) {
+                    ++bindingCounts->dynamicUniformBufferCount;
+                }
+                perStageBindingCountMember = &PerStageBindingCounts::uniformBufferCount;
+                break;
+
+            case wgpu::BufferBindingType::Storage:
+            case kInternalStorageBufferBinding:
+            case wgpu::BufferBindingType::ReadOnlyStorage:
+                if (buffer.hasDynamicOffset) {
+                    ++bindingCounts->dynamicStorageBufferCount;
+                }
+                perStageBindingCountMember = &PerStageBindingCounts::storageBufferCount;
+                break;
+
+            case wgpu::BufferBindingType::Undefined:
+                // Can't get here due to the enclosing if statement.
+                UNREACHABLE();
+                break;
+        }
+    } else if (entry.sampler.type != wgpu::SamplerBindingType::Undefined) {
+        perStageBindingCountMember = &PerStageBindingCounts::samplerCount;
+    } else if (entry.texture.sampleType != wgpu::TextureSampleType::Undefined) {
+        perStageBindingCountMember = &PerStageBindingCounts::sampledTextureCount;
+    } else if (entry.storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
+        perStageBindingCountMember = &PerStageBindingCounts::storageTextureCount;
+    } else {
+        const ExternalTextureBindingLayout* externalTextureBindingLayout;
+        FindInChain(entry.nextInChain, &externalTextureBindingLayout);
+        if (externalTextureBindingLayout != nullptr) {
+            perStageBindingCountMember = &PerStageBindingCounts::externalTextureCount;
         }
     }
 
-    void AccumulateBindingCounts(BindingCounts* bindingCounts, const BindingCounts& rhs) {
-        bindingCounts->totalCount += rhs.totalCount;
-        bindingCounts->bufferCount += rhs.bufferCount;
-        bindingCounts->unverifiedBufferCount += rhs.unverifiedBufferCount;
-        bindingCounts->dynamicUniformBufferCount += rhs.dynamicUniformBufferCount;
-        bindingCounts->dynamicStorageBufferCount += rhs.dynamicStorageBufferCount;
-
-        for (SingleShaderStage stage : IterateStages(kAllStages)) {
-            bindingCounts->perStage[stage].sampledTextureCount +=
-                rhs.perStage[stage].sampledTextureCount;
-            bindingCounts->perStage[stage].samplerCount += rhs.perStage[stage].samplerCount;
-            bindingCounts->perStage[stage].storageBufferCount +=
-                rhs.perStage[stage].storageBufferCount;
-            bindingCounts->perStage[stage].storageTextureCount +=
-                rhs.perStage[stage].storageTextureCount;
-            bindingCounts->perStage[stage].uniformBufferCount +=
-                rhs.perStage[stage].uniformBufferCount;
-            bindingCounts->perStage[stage].externalTextureCount +=
-                rhs.perStage[stage].externalTextureCount;
-        }
+    ASSERT(perStageBindingCountMember != nullptr);
+    for (SingleShaderStage stage : IterateStages(entry.visibility)) {
+        ++(bindingCounts->perStage[stage].*perStageBindingCountMember);
     }
+}
 
-    MaybeError ValidateBindingCounts(const BindingCounts& bindingCounts) {
+void AccumulateBindingCounts(BindingCounts* bindingCounts, const BindingCounts& rhs) {
+    bindingCounts->totalCount += rhs.totalCount;
+    bindingCounts->bufferCount += rhs.bufferCount;
+    bindingCounts->unverifiedBufferCount += rhs.unverifiedBufferCount;
+    bindingCounts->dynamicUniformBufferCount += rhs.dynamicUniformBufferCount;
+    bindingCounts->dynamicStorageBufferCount += rhs.dynamicStorageBufferCount;
+
+    for (SingleShaderStage stage : IterateStages(kAllStages)) {
+        bindingCounts->perStage[stage].sampledTextureCount +=
+            rhs.perStage[stage].sampledTextureCount;
+        bindingCounts->perStage[stage].samplerCount += rhs.perStage[stage].samplerCount;
+        bindingCounts->perStage[stage].storageBufferCount += rhs.perStage[stage].storageBufferCount;
+        bindingCounts->perStage[stage].storageTextureCount +=
+            rhs.perStage[stage].storageTextureCount;
+        bindingCounts->perStage[stage].uniformBufferCount += rhs.perStage[stage].uniformBufferCount;
+        bindingCounts->perStage[stage].externalTextureCount +=
+            rhs.perStage[stage].externalTextureCount;
+    }
+}
+
+MaybeError ValidateBindingCounts(const BindingCounts& bindingCounts) {
+    DAWN_INVALID_IF(
+        bindingCounts.dynamicUniformBufferCount > kMaxDynamicUniformBuffersPerPipelineLayout,
+        "The number of dynamic uniform buffers (%u) exceeds the maximum per-pipeline-layout "
+        "limit (%u).",
+        bindingCounts.dynamicUniformBufferCount, kMaxDynamicUniformBuffersPerPipelineLayout);
+
+    DAWN_INVALID_IF(
+        bindingCounts.dynamicStorageBufferCount > kMaxDynamicStorageBuffersPerPipelineLayout,
+        "The number of dynamic storage buffers (%u) exceeds the maximum per-pipeline-layout "
+        "limit (%u).",
+        bindingCounts.dynamicStorageBufferCount, kMaxDynamicStorageBuffersPerPipelineLayout);
+
+    for (SingleShaderStage stage : IterateStages(kAllStages)) {
         DAWN_INVALID_IF(
-            bindingCounts.dynamicUniformBufferCount > kMaxDynamicUniformBuffersPerPipelineLayout,
-            "The number of dynamic uniform buffers (%u) exceeds the maximum per-pipeline-layout "
-            "limit (%u).",
-            bindingCounts.dynamicUniformBufferCount, kMaxDynamicUniformBuffersPerPipelineLayout);
+            bindingCounts.perStage[stage].sampledTextureCount > kMaxSampledTexturesPerShaderStage,
+            "The number of sampled textures (%u) in the %s stage exceeds the maximum "
+            "per-stage limit (%u).",
+            bindingCounts.perStage[stage].sampledTextureCount, stage,
+            kMaxSampledTexturesPerShaderStage);
+
+        // The per-stage number of external textures is bound by the maximum sampled textures
+        // per stage.
+        DAWN_INVALID_IF(bindingCounts.perStage[stage].externalTextureCount >
+                            kMaxSampledTexturesPerShaderStage / kSampledTexturesPerExternalTexture,
+                        "The number of external textures (%u) in the %s stage exceeds the maximum "
+                        "per-stage limit (%u).",
+                        bindingCounts.perStage[stage].externalTextureCount, stage,
+                        kMaxSampledTexturesPerShaderStage / kSampledTexturesPerExternalTexture);
 
         DAWN_INVALID_IF(
-            bindingCounts.dynamicStorageBufferCount > kMaxDynamicStorageBuffersPerPipelineLayout,
-            "The number of dynamic storage buffers (%u) exceeds the maximum per-pipeline-layout "
+            bindingCounts.perStage[stage].sampledTextureCount +
+                    (bindingCounts.perStage[stage].externalTextureCount *
+                     kSampledTexturesPerExternalTexture) >
+                kMaxSampledTexturesPerShaderStage,
+            "The combination of sampled textures (%u) and external textures (%u) in the %s "
+            "stage exceeds the maximum per-stage limit (%u).",
+            bindingCounts.perStage[stage].sampledTextureCount,
+            bindingCounts.perStage[stage].externalTextureCount, stage,
+            kMaxSampledTexturesPerShaderStage);
+
+        DAWN_INVALID_IF(
+            bindingCounts.perStage[stage].samplerCount > kMaxSamplersPerShaderStage,
+            "The number of samplers (%u) in the %s stage exceeds the maximum per-stage limit "
+            "(%u).",
+            bindingCounts.perStage[stage].samplerCount, stage, kMaxSamplersPerShaderStage);
+
+        DAWN_INVALID_IF(
+            bindingCounts.perStage[stage].samplerCount +
+                    (bindingCounts.perStage[stage].externalTextureCount *
+                     kSamplersPerExternalTexture) >
+                kMaxSamplersPerShaderStage,
+            "The combination of samplers (%u) and external textures (%u) in the %s stage "
+            "exceeds the maximum per-stage limit (%u).",
+            bindingCounts.perStage[stage].samplerCount,
+            bindingCounts.perStage[stage].externalTextureCount, stage, kMaxSamplersPerShaderStage);
+
+        DAWN_INVALID_IF(
+            bindingCounts.perStage[stage].storageBufferCount > kMaxStorageBuffersPerShaderStage,
+            "The number of storage buffers (%u) in the %s stage exceeds the maximum per-stage "
             "limit (%u).",
-            bindingCounts.dynamicStorageBufferCount, kMaxDynamicStorageBuffersPerPipelineLayout);
+            bindingCounts.perStage[stage].storageBufferCount, stage,
+            kMaxStorageBuffersPerShaderStage);
 
-        for (SingleShaderStage stage : IterateStages(kAllStages)) {
-            DAWN_INVALID_IF(
-                bindingCounts.perStage[stage].sampledTextureCount >
-                    kMaxSampledTexturesPerShaderStage,
-                "The number of sampled textures (%u) in the %s stage exceeds the maximum "
-                "per-stage limit (%u).",
-                bindingCounts.perStage[stage].sampledTextureCount, stage,
-                kMaxSampledTexturesPerShaderStage);
+        DAWN_INVALID_IF(
+            bindingCounts.perStage[stage].storageTextureCount > kMaxStorageTexturesPerShaderStage,
+            "The number of storage textures (%u) in the %s stage exceeds the maximum per-stage "
+            "limit (%u).",
+            bindingCounts.perStage[stage].storageTextureCount, stage,
+            kMaxStorageTexturesPerShaderStage);
 
-            // The per-stage number of external textures is bound by the maximum sampled textures
-            // per stage.
-            DAWN_INVALID_IF(
-                bindingCounts.perStage[stage].externalTextureCount >
-                    kMaxSampledTexturesPerShaderStage / kSampledTexturesPerExternalTexture,
-                "The number of external textures (%u) in the %s stage exceeds the maximum "
-                "per-stage limit (%u).",
-                bindingCounts.perStage[stage].externalTextureCount, stage,
-                kMaxSampledTexturesPerShaderStage / kSampledTexturesPerExternalTexture);
+        DAWN_INVALID_IF(
+            bindingCounts.perStage[stage].uniformBufferCount > kMaxUniformBuffersPerShaderStage,
+            "The number of uniform buffers (%u) in the %s stage exceeds the maximum per-stage "
+            "limit (%u).",
+            bindingCounts.perStage[stage].uniformBufferCount, stage,
+            kMaxUniformBuffersPerShaderStage);
 
-            DAWN_INVALID_IF(
-                bindingCounts.perStage[stage].sampledTextureCount +
-                        (bindingCounts.perStage[stage].externalTextureCount *
-                         kSampledTexturesPerExternalTexture) >
-                    kMaxSampledTexturesPerShaderStage,
-                "The combination of sampled textures (%u) and external textures (%u) in the %s "
-                "stage exceeds the maximum per-stage limit (%u).",
-                bindingCounts.perStage[stage].sampledTextureCount,
-                bindingCounts.perStage[stage].externalTextureCount, stage,
-                kMaxSampledTexturesPerShaderStage);
-
-            DAWN_INVALID_IF(
-                bindingCounts.perStage[stage].samplerCount > kMaxSamplersPerShaderStage,
-                "The number of samplers (%u) in the %s stage exceeds the maximum per-stage limit "
-                "(%u).",
-                bindingCounts.perStage[stage].samplerCount, stage, kMaxSamplersPerShaderStage);
-
-            DAWN_INVALID_IF(
-                bindingCounts.perStage[stage].samplerCount +
-                        (bindingCounts.perStage[stage].externalTextureCount *
-                         kSamplersPerExternalTexture) >
-                    kMaxSamplersPerShaderStage,
-                "The combination of samplers (%u) and external textures (%u) in the %s stage "
-                "exceeds the maximum per-stage limit (%u).",
-                bindingCounts.perStage[stage].samplerCount,
-                bindingCounts.perStage[stage].externalTextureCount, stage,
-                kMaxSamplersPerShaderStage);
-
-            DAWN_INVALID_IF(
-                bindingCounts.perStage[stage].storageBufferCount > kMaxStorageBuffersPerShaderStage,
-                "The number of storage buffers (%u) in the %s stage exceeds the maximum per-stage "
-                "limit (%u).",
-                bindingCounts.perStage[stage].storageBufferCount, stage,
-                kMaxStorageBuffersPerShaderStage);
-
-            DAWN_INVALID_IF(
-                bindingCounts.perStage[stage].storageTextureCount >
-                    kMaxStorageTexturesPerShaderStage,
-                "The number of storage textures (%u) in the %s stage exceeds the maximum per-stage "
-                "limit (%u).",
-                bindingCounts.perStage[stage].storageTextureCount, stage,
-                kMaxStorageTexturesPerShaderStage);
-
-            DAWN_INVALID_IF(
-                bindingCounts.perStage[stage].uniformBufferCount > kMaxUniformBuffersPerShaderStage,
-                "The number of uniform buffers (%u) in the %s stage exceeds the maximum per-stage "
-                "limit (%u).",
-                bindingCounts.perStage[stage].uniformBufferCount, stage,
-                kMaxUniformBuffersPerShaderStage);
-
-            DAWN_INVALID_IF(
-                bindingCounts.perStage[stage].uniformBufferCount +
-                        (bindingCounts.perStage[stage].externalTextureCount *
-                         kUniformsPerExternalTexture) >
-                    kMaxUniformBuffersPerShaderStage,
-                "The combination of uniform buffers (%u) and external textures (%u) in the %s "
-                "stage exceeds the maximum per-stage limit (%u).",
-                bindingCounts.perStage[stage].uniformBufferCount,
-                bindingCounts.perStage[stage].externalTextureCount, stage,
-                kMaxUniformBuffersPerShaderStage);
-        }
-
-        return {};
+        DAWN_INVALID_IF(
+            bindingCounts.perStage[stage].uniformBufferCount +
+                    (bindingCounts.perStage[stage].externalTextureCount *
+                     kUniformsPerExternalTexture) >
+                kMaxUniformBuffersPerShaderStage,
+            "The combination of uniform buffers (%u) and external textures (%u) in the %s "
+            "stage exceeds the maximum per-stage limit (%u).",
+            bindingCounts.perStage[stage].uniformBufferCount,
+            bindingCounts.perStage[stage].externalTextureCount, stage,
+            kMaxUniformBuffersPerShaderStage);
     }
 
+    return {};
+}
+
 }  // namespace dawn::native
diff --git a/src/dawn/native/BindingInfo.h b/src/dawn/native/BindingInfo.h
index 1798eb6..9d32b05 100644
--- a/src/dawn/native/BindingInfo.h
+++ b/src/dawn/native/BindingInfo.h
@@ -29,70 +29,70 @@
 
 namespace dawn::native {
 
-    // Not a real WebGPU limit, but the sum of the two limits is useful for internal optimizations.
-    static constexpr uint32_t kMaxDynamicBuffersPerPipelineLayout =
-        kMaxDynamicUniformBuffersPerPipelineLayout + kMaxDynamicStorageBuffersPerPipelineLayout;
+// Not a real WebGPU limit, but the sum of the two limits is useful for internal optimizations.
+static constexpr uint32_t kMaxDynamicBuffersPerPipelineLayout =
+    kMaxDynamicUniformBuffersPerPipelineLayout + kMaxDynamicStorageBuffersPerPipelineLayout;
 
-    static constexpr BindingIndex kMaxDynamicBuffersPerPipelineLayoutTyped =
-        BindingIndex(kMaxDynamicBuffersPerPipelineLayout);
+static constexpr BindingIndex kMaxDynamicBuffersPerPipelineLayoutTyped =
+    BindingIndex(kMaxDynamicBuffersPerPipelineLayout);
 
-    // Not a real WebGPU limit, but used to optimize parts of Dawn which expect valid usage of the
-    // API. There should never be more bindings than the max per stage, for each stage.
-    static constexpr uint32_t kMaxBindingsPerPipelineLayout =
-        3 * (kMaxSampledTexturesPerShaderStage + kMaxSamplersPerShaderStage +
-             kMaxStorageBuffersPerShaderStage + kMaxStorageTexturesPerShaderStage +
-             kMaxUniformBuffersPerShaderStage);
+// Not a real WebGPU limit, but used to optimize parts of Dawn which expect valid usage of the
+// API. There should never be more bindings than the max per stage, for each stage.
+static constexpr uint32_t kMaxBindingsPerPipelineLayout =
+    3 * (kMaxSampledTexturesPerShaderStage + kMaxSamplersPerShaderStage +
+         kMaxStorageBuffersPerShaderStage + kMaxStorageTexturesPerShaderStage +
+         kMaxUniformBuffersPerShaderStage);
 
-    static constexpr BindingIndex kMaxBindingsPerPipelineLayoutTyped =
-        BindingIndex(kMaxBindingsPerPipelineLayout);
+static constexpr BindingIndex kMaxBindingsPerPipelineLayoutTyped =
+    BindingIndex(kMaxBindingsPerPipelineLayout);
 
-    // TODO(enga): Figure out a good number for this.
-    static constexpr uint32_t kMaxOptimalBindingsPerGroup = 32;
+// TODO(enga): Figure out a good number for this.
+static constexpr uint32_t kMaxOptimalBindingsPerGroup = 32;
 
-    enum class BindingInfoType { Buffer, Sampler, Texture, StorageTexture, ExternalTexture };
+enum class BindingInfoType { Buffer, Sampler, Texture, StorageTexture, ExternalTexture };
 
-    struct BindingInfo {
-        BindingNumber binding;
-        wgpu::ShaderStage visibility;
+struct BindingInfo {
+    BindingNumber binding;
+    wgpu::ShaderStage visibility;
 
-        BindingInfoType bindingType;
+    BindingInfoType bindingType;
 
-        // TODO(dawn:527): These four values could be made into a union.
-        BufferBindingLayout buffer;
-        SamplerBindingLayout sampler;
-        TextureBindingLayout texture;
-        StorageTextureBindingLayout storageTexture;
-    };
+    // TODO(dawn:527): These four values could be made into a union.
+    BufferBindingLayout buffer;
+    SamplerBindingLayout sampler;
+    TextureBindingLayout texture;
+    StorageTextureBindingLayout storageTexture;
+};
 
-    struct BindingSlot {
-        BindGroupIndex group;
-        BindingNumber binding;
-    };
+struct BindingSlot {
+    BindGroupIndex group;
+    BindingNumber binding;
+};
 
-    struct PerStageBindingCounts {
-        uint32_t sampledTextureCount;
-        uint32_t samplerCount;
-        uint32_t storageBufferCount;
-        uint32_t storageTextureCount;
-        uint32_t uniformBufferCount;
-        uint32_t externalTextureCount;
-    };
+struct PerStageBindingCounts {
+    uint32_t sampledTextureCount;
+    uint32_t samplerCount;
+    uint32_t storageBufferCount;
+    uint32_t storageTextureCount;
+    uint32_t uniformBufferCount;
+    uint32_t externalTextureCount;
+};
 
-    struct BindingCounts {
-        uint32_t totalCount;
-        uint32_t bufferCount;
-        uint32_t unverifiedBufferCount;  // Buffers with minimum buffer size unspecified
-        uint32_t dynamicUniformBufferCount;
-        uint32_t dynamicStorageBufferCount;
-        PerStage<PerStageBindingCounts> perStage;
-    };
+struct BindingCounts {
+    uint32_t totalCount;
+    uint32_t bufferCount;
+    uint32_t unverifiedBufferCount;  // Buffers with minimum buffer size unspecified
+    uint32_t dynamicUniformBufferCount;
+    uint32_t dynamicStorageBufferCount;
+    PerStage<PerStageBindingCounts> perStage;
+};
 
-    void IncrementBindingCounts(BindingCounts* bindingCounts, const BindGroupLayoutEntry& entry);
-    void AccumulateBindingCounts(BindingCounts* bindingCounts, const BindingCounts& rhs);
-    MaybeError ValidateBindingCounts(const BindingCounts& bindingCounts);
+void IncrementBindingCounts(BindingCounts* bindingCounts, const BindGroupLayoutEntry& entry);
+void AccumulateBindingCounts(BindingCounts* bindingCounts, const BindingCounts& rhs);
+MaybeError ValidateBindingCounts(const BindingCounts& bindingCounts);
 
-    // For buffer size validation
-    using RequiredBufferSizes = ityp::array<BindGroupIndex, std::vector<uint64_t>, kMaxBindGroups>;
+// For buffer size validation
+using RequiredBufferSizes = ityp::array<BindGroupIndex, std::vector<uint64_t>, kMaxBindGroups>;
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/BlobCache.cpp b/src/dawn/native/BlobCache.cpp
index c566695..9e27d2d 100644
--- a/src/dawn/native/BlobCache.cpp
+++ b/src/dawn/native/BlobCache.cpp
@@ -21,73 +21,72 @@
 
 namespace dawn::native {
 
-    CachedBlob::CachedBlob(size_t size) {
-        if (size != 0) {
-            Reset(size);
-        }
+CachedBlob::CachedBlob(size_t size) {
+    if (size != 0) {
+        Reset(size);
     }
+}
 
-    bool CachedBlob::Empty() const {
-        return mSize == 0;
-    }
+bool CachedBlob::Empty() const {
+    return mSize == 0;
+}
 
-    const uint8_t* CachedBlob::Data() const {
-        return mData.get();
-    }
+const uint8_t* CachedBlob::Data() const {
+    return mData.get();
+}
 
-    uint8_t* CachedBlob::Data() {
-        return mData.get();
-    }
+uint8_t* CachedBlob::Data() {
+    return mData.get();
+}
 
-    size_t CachedBlob::Size() const {
-        return mSize;
-    }
+size_t CachedBlob::Size() const {
+    return mSize;
+}
 
-    void CachedBlob::Reset(size_t size) {
-        mSize = size;
-        mData = std::make_unique<uint8_t[]>(size);
-    }
+void CachedBlob::Reset(size_t size) {
+    mSize = size;
+    mData = std::make_unique<uint8_t[]>(size);
+}
 
-    BlobCache::BlobCache(dawn::platform::CachingInterface* cachingInterface)
-        : mCache(cachingInterface) {
-    }
+BlobCache::BlobCache(dawn::platform::CachingInterface* cachingInterface)
+    : mCache(cachingInterface) {}
 
-    CachedBlob BlobCache::Load(const CacheKey& key) {
-        std::lock_guard<std::mutex> lock(mMutex);
-        return LoadInternal(key);
-    }
+CachedBlob BlobCache::Load(const CacheKey& key) {
+    std::lock_guard<std::mutex> lock(mMutex);
+    return LoadInternal(key);
+}
 
-    void BlobCache::Store(const CacheKey& key, size_t valueSize, const void* value) {
-        std::lock_guard<std::mutex> lock(mMutex);
-        StoreInternal(key, valueSize, value);
-    }
+void BlobCache::Store(const CacheKey& key, size_t valueSize, const void* value) {
+    std::lock_guard<std::mutex> lock(mMutex);
+    StoreInternal(key, valueSize, value);
+}
 
-    void BlobCache::Store(const CacheKey& key, const CachedBlob& value) {
-        Store(key, value.Size(), value.Data());
-    }
+void BlobCache::Store(const CacheKey& key, const CachedBlob& value) {
+    Store(key, value.Size(), value.Data());
+}
 
-    CachedBlob BlobCache::LoadInternal(const CacheKey& key) {
-        CachedBlob result;
-        if (mCache == nullptr) {
-            return result;
-        }
-        const size_t expectedSize = mCache->LoadData(nullptr, key.data(), key.size(), nullptr, 0);
-        if (expectedSize > 0) {
-            result.Reset(expectedSize);
-            const size_t actualSize =
-                mCache->LoadData(nullptr, key.data(), key.size(), result.Data(), expectedSize);
-            ASSERT(expectedSize == actualSize);
-        }
+CachedBlob BlobCache::LoadInternal(const CacheKey& key) {
+    CachedBlob result;
+    if (mCache == nullptr) {
         return result;
     }
-
-    void BlobCache::StoreInternal(const CacheKey& key, size_t valueSize, const void* value) {
-        ASSERT(value != nullptr);
-        ASSERT(valueSize > 0);
-        if (mCache == nullptr) {
-            return;
-        }
-        mCache->StoreData(nullptr, key.data(), key.size(), value, valueSize);
+    const size_t expectedSize = mCache->LoadData(nullptr, key.data(), key.size(), nullptr, 0);
+    if (expectedSize > 0) {
+        result.Reset(expectedSize);
+        const size_t actualSize =
+            mCache->LoadData(nullptr, key.data(), key.size(), result.Data(), expectedSize);
+        ASSERT(expectedSize == actualSize);
     }
+    return result;
+}
+
+void BlobCache::StoreInternal(const CacheKey& key, size_t valueSize, const void* value) {
+    ASSERT(value != nullptr);
+    ASSERT(valueSize > 0);
+    if (mCache == nullptr) {
+        return;
+    }
+    mCache->StoreData(nullptr, key.data(), key.size(), value, valueSize);
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/BlobCache.h b/src/dawn/native/BlobCache.h
index 4e92fba..f4fb965 100644
--- a/src/dawn/native/BlobCache.h
+++ b/src/dawn/native/BlobCache.h
@@ -19,57 +19,57 @@
 #include <mutex>
 
 namespace dawn::platform {
-    class CachingInterface;
+class CachingInterface;
 }
 
 namespace dawn::native {
 
-    class BlobCache;
-    class CacheKey;
-    class InstanceBase;
+class BlobCache;
+class CacheKey;
+class InstanceBase;
 
-    class CachedBlob {
-      public:
-        explicit CachedBlob(size_t size = 0);
+class CachedBlob {
+  public:
+    explicit CachedBlob(size_t size = 0);
 
-        bool Empty() const;
-        const uint8_t* Data() const;
-        uint8_t* Data();
-        size_t Size() const;
-        void Reset(size_t size);
+    bool Empty() const;
+    const uint8_t* Data() const;
+    uint8_t* Data();
+    size_t Size() const;
+    void Reset(size_t size);
 
-      private:
-        std::unique_ptr<uint8_t[]> mData = nullptr;
-        size_t mSize = 0;
-    };
+  private:
+    std::unique_ptr<uint8_t[]> mData = nullptr;
+    size_t mSize = 0;
+};
 
-    // This class should always be thread-safe because it may be called asynchronously. Its purpose
-    // is to wrap the CachingInterface provided via a platform.
-    class BlobCache {
-      public:
-        explicit BlobCache(dawn::platform::CachingInterface* cachingInterface = nullptr);
+// This class should always be thread-safe because it may be called asynchronously. Its purpose
+// is to wrap the CachingInterface provided via a platform.
+class BlobCache {
+  public:
+    explicit BlobCache(dawn::platform::CachingInterface* cachingInterface = nullptr);
 
-        // Returns empty blob if the key is not found in the cache.
-        CachedBlob Load(const CacheKey& key);
+    // Returns empty blob if the key is not found in the cache.
+    CachedBlob Load(const CacheKey& key);
 
-        // Value to store must be non-empty/non-null.
-        void Store(const CacheKey& key, size_t valueSize, const void* value);
-        void Store(const CacheKey& key, const CachedBlob& value);
+    // Value to store must be non-empty/non-null.
+    void Store(const CacheKey& key, size_t valueSize, const void* value);
+    void Store(const CacheKey& key, const CachedBlob& value);
 
-      private:
-        // Non-thread safe internal implementations of load and store. Exposed callers that use
-        // these helpers need to make sure that these are entered with `mMutex` held.
-        CachedBlob LoadInternal(const CacheKey& key);
-        void StoreInternal(const CacheKey& key, size_t valueSize, const void* value);
+  private:
+    // Non-thread safe internal implementations of load and store. Exposed callers that use
+    // these helpers need to make sure that these are entered with `mMutex` held.
+    CachedBlob LoadInternal(const CacheKey& key);
+    void StoreInternal(const CacheKey& key, size_t valueSize, const void* value);
 
-        // Protects thread safety of access to mCache.
-        std::mutex mMutex;
+    // Protects thread safety of access to mCache.
+    std::mutex mMutex;
 
-        // TODO(dawn:549): Current CachingInterface declaration requires passing a device to each
-        //   call, but this might be unnecessary. This class just passes nullptr for those calls
-        //   right now. Eventually we can just change the interface to be more generic.
-        dawn::platform::CachingInterface* mCache;
-    };
+    // TODO(dawn:549): Current CachingInterface declaration requires passing a device to each
+    //   call, but this might be unnecessary. This class just passes nullptr for those calls
+    //   right now. Eventually we can just change the interface to be more generic.
+    dawn::platform::CachingInterface* mCache;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/BuddyAllocator.cpp b/src/dawn/native/BuddyAllocator.cpp
index 76d7a65..2d7de75 100644
--- a/src/dawn/native/BuddyAllocator.cpp
+++ b/src/dawn/native/BuddyAllocator.cpp
@@ -19,246 +19,246 @@
 
 namespace dawn::native {
 
-    BuddyAllocator::BuddyAllocator(uint64_t maxSize) : mMaxBlockSize(maxSize) {
-        ASSERT(IsPowerOfTwo(maxSize));
+BuddyAllocator::BuddyAllocator(uint64_t maxSize) : mMaxBlockSize(maxSize) {
+    ASSERT(IsPowerOfTwo(maxSize));
 
-        mFreeLists.resize(Log2(mMaxBlockSize) + 1);
+    mFreeLists.resize(Log2(mMaxBlockSize) + 1);
 
-        // Insert the level0 free block.
-        mRoot = new BuddyBlock(maxSize, /*offset*/ 0);
-        mFreeLists[0] = {mRoot};
+    // Insert the level0 free block.
+    mRoot = new BuddyBlock(maxSize, /*offset*/ 0);
+    mFreeLists[0] = {mRoot};
+}
+
+BuddyAllocator::~BuddyAllocator() {
+    if (mRoot) {
+        DeleteBlock(mRoot);
     }
+}
 
-    BuddyAllocator::~BuddyAllocator() {
-        if (mRoot) {
-            DeleteBlock(mRoot);
+uint64_t BuddyAllocator::ComputeTotalNumOfFreeBlocksForTesting() const {
+    return ComputeNumOfFreeBlocks(mRoot);
+}
+
+uint64_t BuddyAllocator::ComputeNumOfFreeBlocks(BuddyBlock* block) const {
+    if (block->mState == BlockState::Free) {
+        return 1;
+    } else if (block->mState == BlockState::Split) {
+        return ComputeNumOfFreeBlocks(block->split.pLeft) +
+               ComputeNumOfFreeBlocks(block->split.pLeft->pBuddy);
+    }
+    return 0;
+}
+
+uint32_t BuddyAllocator::ComputeLevelFromBlockSize(uint64_t blockSize) const {
+    // Every level in the buddy system can be indexed by order-n where n = log2(blockSize).
+    // However, mFreeList zero-indexed by level.
+    // For example, blockSize=4 is Level1 if MAX_BLOCK is 8.
+    return Log2(mMaxBlockSize) - Log2(blockSize);
+}
+
+uint64_t BuddyAllocator::GetNextFreeAlignedBlock(size_t allocationBlockLevel,
+                                                 uint64_t alignment) const {
+    ASSERT(IsPowerOfTwo(alignment));
+    // The current level is the level that corresponds to the allocation size. The free list may
+    // not contain a block at that level until a larger one gets allocated (and splits).
+    // Continue to go up the tree until such a larger block exists.
+    //
+    // Even if the block exists at the level, it cannot be used if it's offset is unaligned.
+    // When the alignment is also a power-of-two, we simply use the next free block whose size
+    // is greater than or equal to the alignment value.
+    //
+    //  After one 8-byte allocation:
+    //
+    //  Level          --------------------------------
+    //      0       32 |               S              |
+    //                 --------------------------------
+    //      1       16 |       S       |       F2     |       S - split
+    //                 --------------------------------       F - free
+    //      2       8  |   Aa  |   F1  |              |       A - allocated
+    //                 --------------------------------
+    //
+    //  Allocate(size=8, alignment=8) will be satisfied by using F1.
+    //  Allocate(size=8, alignment=4) will be satified by using F1.
+    //  Allocate(size=8, alignment=16) will be satisified by using F2.
+    //
+    for (size_t ii = 0; ii <= allocationBlockLevel; ++ii) {
+        size_t currLevel = allocationBlockLevel - ii;
+        BuddyBlock* freeBlock = mFreeLists[currLevel].head;
+        if (freeBlock && (freeBlock->mOffset % alignment == 0)) {
+            return currLevel;
         }
     }
+    return kInvalidOffset;  // No free block exists at any level.
+}
 
-    uint64_t BuddyAllocator::ComputeTotalNumOfFreeBlocksForTesting() const {
-        return ComputeNumOfFreeBlocks(mRoot);
+// Inserts existing free block into the free-list.
+// Called by allocate upon splitting to insert a child block into a free-list.
+// Note: Always insert into the head of the free-list. As when a larger free block at a lower
+// level was split, there were no smaller free blocks at a higher level to allocate.
+void BuddyAllocator::InsertFreeBlock(BuddyBlock* block, size_t level) {
+    ASSERT(block->mState == BlockState::Free);
+
+    // Inserted block is now the front (no prev).
+    block->free.pPrev = nullptr;
+
+    // Old head is now the inserted block's next.
+    block->free.pNext = mFreeLists[level].head;
+
+    // Block already in HEAD position (ex. right child was inserted first).
+    if (mFreeLists[level].head != nullptr) {
+        // Old head's previous is the inserted block.
+        mFreeLists[level].head->free.pPrev = block;
     }
 
-    uint64_t BuddyAllocator::ComputeNumOfFreeBlocks(BuddyBlock* block) const {
-        if (block->mState == BlockState::Free) {
-            return 1;
-        } else if (block->mState == BlockState::Split) {
-            return ComputeNumOfFreeBlocks(block->split.pLeft) +
-                   ComputeNumOfFreeBlocks(block->split.pLeft->pBuddy);
-        }
-        return 0;
-    }
+    mFreeLists[level].head = block;
+}
 
-    uint32_t BuddyAllocator::ComputeLevelFromBlockSize(uint64_t blockSize) const {
-        // Every level in the buddy system can be indexed by order-n where n = log2(blockSize).
-        // However, mFreeList zero-indexed by level.
-        // For example, blockSize=4 is Level1 if MAX_BLOCK is 8.
-        return Log2(mMaxBlockSize) - Log2(blockSize);
-    }
+void BuddyAllocator::RemoveFreeBlock(BuddyBlock* block, size_t level) {
+    ASSERT(block->mState == BlockState::Free);
 
-    uint64_t BuddyAllocator::GetNextFreeAlignedBlock(size_t allocationBlockLevel,
-                                                     uint64_t alignment) const {
-        ASSERT(IsPowerOfTwo(alignment));
-        // The current level is the level that corresponds to the allocation size. The free list may
-        // not contain a block at that level until a larger one gets allocated (and splits).
-        // Continue to go up the tree until such a larger block exists.
-        //
-        // Even if the block exists at the level, it cannot be used if it's offset is unaligned.
-        // When the alignment is also a power-of-two, we simply use the next free block whose size
-        // is greater than or equal to the alignment value.
-        //
-        //  After one 8-byte allocation:
-        //
-        //  Level          --------------------------------
-        //      0       32 |               S              |
-        //                 --------------------------------
-        //      1       16 |       S       |       F2     |       S - split
-        //                 --------------------------------       F - free
-        //      2       8  |   Aa  |   F1  |              |       A - allocated
-        //                 --------------------------------
-        //
-        //  Allocate(size=8, alignment=8) will be satisfied by using F1.
-        //  Allocate(size=8, alignment=4) will be satified by using F1.
-        //  Allocate(size=8, alignment=16) will be satisified by using F2.
-        //
-        for (size_t ii = 0; ii <= allocationBlockLevel; ++ii) {
-            size_t currLevel = allocationBlockLevel - ii;
-            BuddyBlock* freeBlock = mFreeLists[currLevel].head;
-            if (freeBlock && (freeBlock->mOffset % alignment == 0)) {
-                return currLevel;
-            }
-        }
-        return kInvalidOffset;  // No free block exists at any level.
-    }
+    if (mFreeLists[level].head == block) {
+        // Block is in HEAD position.
+        mFreeLists[level].head = mFreeLists[level].head->free.pNext;
+    } else {
+        // Block is after HEAD position.
+        BuddyBlock* pPrev = block->free.pPrev;
+        BuddyBlock* pNext = block->free.pNext;
 
-    // Inserts existing free block into the free-list.
-    // Called by allocate upon splitting to insert a child block into a free-list.
-    // Note: Always insert into the head of the free-list. As when a larger free block at a lower
-    // level was split, there were no smaller free blocks at a higher level to allocate.
-    void BuddyAllocator::InsertFreeBlock(BuddyBlock* block, size_t level) {
-        ASSERT(block->mState == BlockState::Free);
+        ASSERT(pPrev != nullptr);
+        ASSERT(pPrev->mState == BlockState::Free);
 
-        // Inserted block is now the front (no prev).
-        block->free.pPrev = nullptr;
+        pPrev->free.pNext = pNext;
 
-        // Old head is now the inserted block's next.
-        block->free.pNext = mFreeLists[level].head;
-
-        // Block already in HEAD position (ex. right child was inserted first).
-        if (mFreeLists[level].head != nullptr) {
-            // Old head's previous is the inserted block.
-            mFreeLists[level].head->free.pPrev = block;
-        }
-
-        mFreeLists[level].head = block;
-    }
-
-    void BuddyAllocator::RemoveFreeBlock(BuddyBlock* block, size_t level) {
-        ASSERT(block->mState == BlockState::Free);
-
-        if (mFreeLists[level].head == block) {
-            // Block is in HEAD position.
-            mFreeLists[level].head = mFreeLists[level].head->free.pNext;
-        } else {
-            // Block is after HEAD position.
-            BuddyBlock* pPrev = block->free.pPrev;
-            BuddyBlock* pNext = block->free.pNext;
-
-            ASSERT(pPrev != nullptr);
-            ASSERT(pPrev->mState == BlockState::Free);
-
-            pPrev->free.pNext = pNext;
-
-            if (pNext != nullptr) {
-                ASSERT(pNext->mState == BlockState::Free);
-                pNext->free.pPrev = pPrev;
-            }
+        if (pNext != nullptr) {
+            ASSERT(pNext->mState == BlockState::Free);
+            pNext->free.pPrev = pPrev;
         }
     }
+}
 
-    uint64_t BuddyAllocator::Allocate(uint64_t allocationSize, uint64_t alignment) {
-        if (allocationSize == 0 || allocationSize > mMaxBlockSize) {
-            return kInvalidOffset;
-        }
+uint64_t BuddyAllocator::Allocate(uint64_t allocationSize, uint64_t alignment) {
+    if (allocationSize == 0 || allocationSize > mMaxBlockSize) {
+        return kInvalidOffset;
+    }
 
-        // Compute the level
-        const uint32_t allocationSizeToLevel = ComputeLevelFromBlockSize(allocationSize);
+    // Compute the level
+    const uint32_t allocationSizeToLevel = ComputeLevelFromBlockSize(allocationSize);
 
-        ASSERT(allocationSizeToLevel < mFreeLists.size());
+    ASSERT(allocationSizeToLevel < mFreeLists.size());
 
-        uint64_t currBlockLevel = GetNextFreeAlignedBlock(allocationSizeToLevel, alignment);
+    uint64_t currBlockLevel = GetNextFreeAlignedBlock(allocationSizeToLevel, alignment);
 
-        // Error when no free blocks exist (allocator is full)
-        if (currBlockLevel == kInvalidOffset) {
-            return kInvalidOffset;
-        }
+    // Error when no free blocks exist (allocator is full)
+    if (currBlockLevel == kInvalidOffset) {
+        return kInvalidOffset;
+    }
 
-        // Split free blocks level-by-level.
-        // Terminate when the current block level is equal to the computed level of the requested
-        // allocation.
-        BuddyBlock* currBlock = mFreeLists[currBlockLevel].head;
+    // Split free blocks level-by-level.
+    // Terminate when the current block level is equal to the computed level of the requested
+    // allocation.
+    BuddyBlock* currBlock = mFreeLists[currBlockLevel].head;
 
-        for (; currBlockLevel < allocationSizeToLevel; currBlockLevel++) {
-            ASSERT(currBlock->mState == BlockState::Free);
+    for (; currBlockLevel < allocationSizeToLevel; currBlockLevel++) {
+        ASSERT(currBlock->mState == BlockState::Free);
 
-            // Remove curr block (about to be split).
-            RemoveFreeBlock(currBlock, currBlockLevel);
-
-            // Create two free child blocks (the buddies).
-            const uint64_t nextLevelSize = currBlock->mSize / 2;
-            BuddyBlock* leftChildBlock = new BuddyBlock(nextLevelSize, currBlock->mOffset);
-            BuddyBlock* rightChildBlock =
-                new BuddyBlock(nextLevelSize, currBlock->mOffset + nextLevelSize);
-
-            // Remember the parent to merge these back upon de-allocation.
-            rightChildBlock->pParent = currBlock;
-            leftChildBlock->pParent = currBlock;
-
-            // Make them buddies.
-            leftChildBlock->pBuddy = rightChildBlock;
-            rightChildBlock->pBuddy = leftChildBlock;
-
-            // Insert the children back into the free list into the next level.
-            // The free list does not require a specific order. However, an order is specified as
-            // it's ideal to allocate lower addresses first by having the leftmost child in HEAD.
-            InsertFreeBlock(rightChildBlock, currBlockLevel + 1);
-            InsertFreeBlock(leftChildBlock, currBlockLevel + 1);
-
-            // Curr block is now split.
-            currBlock->mState = BlockState::Split;
-            currBlock->split.pLeft = leftChildBlock;
-
-            // Decend down into the next level.
-            currBlock = leftChildBlock;
-        }
-
-        // Remove curr block from free-list (now allocated).
+        // Remove curr block (about to be split).
         RemoveFreeBlock(currBlock, currBlockLevel);
-        currBlock->mState = BlockState::Allocated;
 
-        return currBlock->mOffset;
+        // Create two free child blocks (the buddies).
+        const uint64_t nextLevelSize = currBlock->mSize / 2;
+        BuddyBlock* leftChildBlock = new BuddyBlock(nextLevelSize, currBlock->mOffset);
+        BuddyBlock* rightChildBlock =
+            new BuddyBlock(nextLevelSize, currBlock->mOffset + nextLevelSize);
+
+        // Remember the parent to merge these back upon de-allocation.
+        rightChildBlock->pParent = currBlock;
+        leftChildBlock->pParent = currBlock;
+
+        // Make them buddies.
+        leftChildBlock->pBuddy = rightChildBlock;
+        rightChildBlock->pBuddy = leftChildBlock;
+
+        // Insert the children back into the free list into the next level.
+        // The free list does not require a specific order. However, an order is specified as
+        // it's ideal to allocate lower addresses first by having the leftmost child in HEAD.
+        InsertFreeBlock(rightChildBlock, currBlockLevel + 1);
+        InsertFreeBlock(leftChildBlock, currBlockLevel + 1);
+
+        // Curr block is now split.
+        currBlock->mState = BlockState::Split;
+        currBlock->split.pLeft = leftChildBlock;
+
+        // Decend down into the next level.
+        currBlock = leftChildBlock;
     }
 
-    void BuddyAllocator::Deallocate(uint64_t offset) {
-        BuddyBlock* curr = mRoot;
+    // Remove curr block from free-list (now allocated).
+    RemoveFreeBlock(currBlock, currBlockLevel);
+    currBlock->mState = BlockState::Allocated;
 
-        // TODO(crbug.com/dawn/827): Optimize de-allocation.
-        // Passing allocationSize directly will avoid the following level-by-level search;
-        // however, it requires the size information to be stored outside the allocator.
+    return currBlock->mOffset;
+}
 
-        // Search for the free block node that corresponds to the block offset.
-        size_t currBlockLevel = 0;
-        while (curr->mState == BlockState::Split) {
-            if (offset < curr->split.pLeft->pBuddy->mOffset) {
-                curr = curr->split.pLeft;
-            } else {
-                curr = curr->split.pLeft->pBuddy;
-            }
+void BuddyAllocator::Deallocate(uint64_t offset) {
+    BuddyBlock* curr = mRoot;
 
-            currBlockLevel++;
+    // TODO(crbug.com/dawn/827): Optimize de-allocation.
+    // Passing allocationSize directly will avoid the following level-by-level search;
+    // however, it requires the size information to be stored outside the allocator.
+
+    // Search for the free block node that corresponds to the block offset.
+    size_t currBlockLevel = 0;
+    while (curr->mState == BlockState::Split) {
+        if (offset < curr->split.pLeft->pBuddy->mOffset) {
+            curr = curr->split.pLeft;
+        } else {
+            curr = curr->split.pLeft->pBuddy;
         }
 
-        ASSERT(curr->mState == BlockState::Allocated);
-
-        // Ensure the block is at the correct level
-        ASSERT(currBlockLevel == ComputeLevelFromBlockSize(curr->mSize));
-
-        // Mark curr free so we can merge.
-        curr->mState = BlockState::Free;
-
-        // Merge the buddies (LevelN-to-Level0).
-        while (currBlockLevel > 0 && curr->pBuddy->mState == BlockState::Free) {
-            // Remove the buddy.
-            RemoveFreeBlock(curr->pBuddy, currBlockLevel);
-
-            BuddyBlock* parent = curr->pParent;
-
-            // The buddies were inserted in a specific order but
-            // could be deleted in any order.
-            DeleteBlock(curr->pBuddy);
-            DeleteBlock(curr);
-
-            // Parent is now free.
-            parent->mState = BlockState::Free;
-
-            // Ascend up to the next level (parent block).
-            curr = parent;
-            currBlockLevel--;
-        }
-
-        InsertFreeBlock(curr, currBlockLevel);
+        currBlockLevel++;
     }
 
-    // Helper which deletes a block in the tree recursively (post-order).
-    void BuddyAllocator::DeleteBlock(BuddyBlock* block) {
-        ASSERT(block != nullptr);
+    ASSERT(curr->mState == BlockState::Allocated);
 
-        if (block->mState == BlockState::Split) {
-            // Delete the pair in same order we inserted.
-            DeleteBlock(block->split.pLeft->pBuddy);
-            DeleteBlock(block->split.pLeft);
-        }
-        delete block;
+    // Ensure the block is at the correct level
+    ASSERT(currBlockLevel == ComputeLevelFromBlockSize(curr->mSize));
+
+    // Mark curr free so we can merge.
+    curr->mState = BlockState::Free;
+
+    // Merge the buddies (LevelN-to-Level0).
+    while (currBlockLevel > 0 && curr->pBuddy->mState == BlockState::Free) {
+        // Remove the buddy.
+        RemoveFreeBlock(curr->pBuddy, currBlockLevel);
+
+        BuddyBlock* parent = curr->pParent;
+
+        // The buddies were inserted in a specific order but
+        // could be deleted in any order.
+        DeleteBlock(curr->pBuddy);
+        DeleteBlock(curr);
+
+        // Parent is now free.
+        parent->mState = BlockState::Free;
+
+        // Ascend up to the next level (parent block).
+        curr = parent;
+        currBlockLevel--;
     }
 
+    InsertFreeBlock(curr, currBlockLevel);
+}
+
+// Helper which deletes a block in the tree recursively (post-order).
+void BuddyAllocator::DeleteBlock(BuddyBlock* block) {
+    ASSERT(block != nullptr);
+
+    if (block->mState == BlockState::Split) {
+        // Delete the pair in same order we inserted.
+        DeleteBlock(block->split.pLeft->pBuddy);
+        DeleteBlock(block->split.pLeft);
+    }
+    delete block;
+}
+
 }  // namespace dawn::native
diff --git a/src/dawn/native/BuddyAllocator.h b/src/dawn/native/BuddyAllocator.h
index d22bd58..e0c478b 100644
--- a/src/dawn/native/BuddyAllocator.h
+++ b/src/dawn/native/BuddyAllocator.h
@@ -22,96 +22,96 @@
 
 namespace dawn::native {
 
-    // Buddy allocator uses the buddy memory allocation technique to satisfy an allocation request.
-    // Memory is split into halves until just large enough to fit to the request. This
-    // requires the allocation size to be a power-of-two value. The allocator "allocates" a block by
-    // returning the starting offset whose size is guaranteed to be greater than or equal to the
-    // allocation size. To deallocate, the same offset is used to find the corresponding block.
-    //
-    // Internally, it manages a free list to track free blocks in a full binary tree.
-    // Every index in the free list corresponds to a level in the tree. That level also determines
-    // the size of the block to be used to satisfy the request. The first level (index=0) represents
-    // the root whose size is also called the max block size.
-    //
-    class BuddyAllocator {
-      public:
-        explicit BuddyAllocator(uint64_t maxSize);
-        ~BuddyAllocator();
+// Buddy allocator uses the buddy memory allocation technique to satisfy an allocation request.
+// Memory is split into halves until just large enough to fit to the request. This
+// requires the allocation size to be a power-of-two value. The allocator "allocates" a block by
+// returning the starting offset whose size is guaranteed to be greater than or equal to the
+// allocation size. To deallocate, the same offset is used to find the corresponding block.
+//
+// Internally, it manages a free list to track free blocks in a full binary tree.
+// Every index in the free list corresponds to a level in the tree. That level also determines
+// the size of the block to be used to satisfy the request. The first level (index=0) represents
+// the root whose size is also called the max block size.
+//
+class BuddyAllocator {
+  public:
+    explicit BuddyAllocator(uint64_t maxSize);
+    ~BuddyAllocator();
 
-        // Required methods.
-        uint64_t Allocate(uint64_t allocationSize, uint64_t alignment = 1);
-        void Deallocate(uint64_t offset);
+    // Required methods.
+    uint64_t Allocate(uint64_t allocationSize, uint64_t alignment = 1);
+    void Deallocate(uint64_t offset);
 
-        // For testing purposes only.
-        uint64_t ComputeTotalNumOfFreeBlocksForTesting() const;
+    // For testing purposes only.
+    uint64_t ComputeTotalNumOfFreeBlocksForTesting() const;
 
-        static constexpr uint64_t kInvalidOffset = std::numeric_limits<uint64_t>::max();
+    static constexpr uint64_t kInvalidOffset = std::numeric_limits<uint64_t>::max();
 
-      private:
-        uint32_t ComputeLevelFromBlockSize(uint64_t blockSize) const;
-        uint64_t GetNextFreeAlignedBlock(size_t allocationBlockLevel, uint64_t alignment) const;
+  private:
+    uint32_t ComputeLevelFromBlockSize(uint64_t blockSize) const;
+    uint64_t GetNextFreeAlignedBlock(size_t allocationBlockLevel, uint64_t alignment) const;
 
-        enum class BlockState { Free, Split, Allocated };
+    enum class BlockState { Free, Split, Allocated };
 
-        struct BuddyBlock {
-            BuddyBlock(uint64_t size, uint64_t offset)
-                : mOffset(offset), mSize(size), mState(BlockState::Free) {
-                free.pPrev = nullptr;
-                free.pNext = nullptr;
-            }
+    struct BuddyBlock {
+        BuddyBlock(uint64_t size, uint64_t offset)
+            : mOffset(offset), mSize(size), mState(BlockState::Free) {
+            free.pPrev = nullptr;
+            free.pNext = nullptr;
+        }
 
-            uint64_t mOffset;
-            uint64_t mSize;
+        uint64_t mOffset;
+        uint64_t mSize;
 
-            // Pointer to this block's buddy, iff parent is split.
-            // Used to quickly merge buddy blocks upon de-allocate.
-            BuddyBlock* pBuddy = nullptr;
-            BuddyBlock* pParent = nullptr;
+        // Pointer to this block's buddy, iff parent is split.
+        // Used to quickly merge buddy blocks upon de-allocate.
+        BuddyBlock* pBuddy = nullptr;
+        BuddyBlock* pParent = nullptr;
 
-            // Track whether this block has been split or not.
-            BlockState mState;
+        // Track whether this block has been split or not.
+        BlockState mState;
 
-            struct FreeLinks {
-                BuddyBlock* pPrev;
-                BuddyBlock* pNext;
-            };
-
-            struct SplitLink {
-                BuddyBlock* pLeft;
-            };
-
-            union {
-                // Used upon allocation.
-                // Avoids searching for the next free block.
-                FreeLinks free;
-
-                // Used upon de-allocation.
-                // Had this block split upon allocation, it and it's buddy is to be deleted.
-                SplitLink split;
-            };
+        struct FreeLinks {
+            BuddyBlock* pPrev;
+            BuddyBlock* pNext;
         };
 
-        void InsertFreeBlock(BuddyBlock* block, size_t level);
-        void RemoveFreeBlock(BuddyBlock* block, size_t level);
-        void DeleteBlock(BuddyBlock* block);
-
-        uint64_t ComputeNumOfFreeBlocks(BuddyBlock* block) const;
-
-        // Keep track the head and tail (for faster insertion/removal).
-        struct BlockList {
-            BuddyBlock* head = nullptr;  // First free block in level.
-            // TODO(crbug.com/dawn/827): Track the tail.
+        struct SplitLink {
+            BuddyBlock* pLeft;
         };
 
-        BuddyBlock* mRoot = nullptr;  // Used to deallocate non-free blocks.
+        union {
+            // Used upon allocation.
+            // Avoids searching for the next free block.
+            FreeLinks free;
 
-        uint64_t mMaxBlockSize = 0;
-
-        // List of linked-lists of free blocks where the index is a level that
-        // corresponds to a power-of-two sized block.
-        std::vector<BlockList> mFreeLists;
+            // Used upon de-allocation.
+            // Had this block split upon allocation, it and it's buddy is to be deleted.
+            SplitLink split;
+        };
     };
 
+    void InsertFreeBlock(BuddyBlock* block, size_t level);
+    void RemoveFreeBlock(BuddyBlock* block, size_t level);
+    void DeleteBlock(BuddyBlock* block);
+
+    uint64_t ComputeNumOfFreeBlocks(BuddyBlock* block) const;
+
+    // Keep track the head and tail (for faster insertion/removal).
+    struct BlockList {
+        BuddyBlock* head = nullptr;  // First free block in level.
+        // TODO(crbug.com/dawn/827): Track the tail.
+    };
+
+    BuddyBlock* mRoot = nullptr;  // Used to deallocate non-free blocks.
+
+    uint64_t mMaxBlockSize = 0;
+
+    // List of linked-lists of free blocks where the index is a level that
+    // corresponds to a power-of-two sized block.
+    std::vector<BlockList> mFreeLists;
+};
+
 }  // namespace dawn::native
 
 #endif  // SRC_DAWN_NATIVE_BUDDYALLOCATOR_H_
diff --git a/src/dawn/native/BuddyMemoryAllocator.cpp b/src/dawn/native/BuddyMemoryAllocator.cpp
index df3874e..d21ecf1 100644
--- a/src/dawn/native/BuddyMemoryAllocator.cpp
+++ b/src/dawn/native/BuddyMemoryAllocator.cpp
@@ -21,102 +21,102 @@
 
 namespace dawn::native {
 
-    BuddyMemoryAllocator::BuddyMemoryAllocator(uint64_t maxSystemSize,
-                                               uint64_t memoryBlockSize,
-                                               ResourceHeapAllocator* heapAllocator)
-        : mMemoryBlockSize(memoryBlockSize),
-          mBuddyBlockAllocator(maxSystemSize),
-          mHeapAllocator(heapAllocator) {
-        ASSERT(memoryBlockSize <= maxSystemSize);
-        ASSERT(IsPowerOfTwo(mMemoryBlockSize));
-        ASSERT(maxSystemSize % mMemoryBlockSize == 0);
+BuddyMemoryAllocator::BuddyMemoryAllocator(uint64_t maxSystemSize,
+                                           uint64_t memoryBlockSize,
+                                           ResourceHeapAllocator* heapAllocator)
+    : mMemoryBlockSize(memoryBlockSize),
+      mBuddyBlockAllocator(maxSystemSize),
+      mHeapAllocator(heapAllocator) {
+    ASSERT(memoryBlockSize <= maxSystemSize);
+    ASSERT(IsPowerOfTwo(mMemoryBlockSize));
+    ASSERT(maxSystemSize % mMemoryBlockSize == 0);
 
-        mTrackedSubAllocations.resize(maxSystemSize / mMemoryBlockSize);
+    mTrackedSubAllocations.resize(maxSystemSize / mMemoryBlockSize);
+}
+
+uint64_t BuddyMemoryAllocator::GetMemoryIndex(uint64_t offset) const {
+    ASSERT(offset != BuddyAllocator::kInvalidOffset);
+    return offset / mMemoryBlockSize;
+}
+
+ResultOrError<ResourceMemoryAllocation> BuddyMemoryAllocator::Allocate(uint64_t allocationSize,
+                                                                       uint64_t alignment) {
+    ResourceMemoryAllocation invalidAllocation = ResourceMemoryAllocation{};
+
+    if (allocationSize == 0) {
+        return std::move(invalidAllocation);
     }
 
-    uint64_t BuddyMemoryAllocator::GetMemoryIndex(uint64_t offset) const {
-        ASSERT(offset != BuddyAllocator::kInvalidOffset);
-        return offset / mMemoryBlockSize;
+    // Check the unaligned size to avoid overflowing NextPowerOfTwo.
+    if (allocationSize > mMemoryBlockSize) {
+        return std::move(invalidAllocation);
     }
 
-    ResultOrError<ResourceMemoryAllocation> BuddyMemoryAllocator::Allocate(uint64_t allocationSize,
-                                                                           uint64_t alignment) {
-        ResourceMemoryAllocation invalidAllocation = ResourceMemoryAllocation{};
+    // Round allocation size to nearest power-of-two.
+    allocationSize = NextPowerOfTwo(allocationSize);
 
-        if (allocationSize == 0) {
-            return std::move(invalidAllocation);
-        }
-
-        // Check the unaligned size to avoid overflowing NextPowerOfTwo.
-        if (allocationSize > mMemoryBlockSize) {
-            return std::move(invalidAllocation);
-        }
-
-        // Round allocation size to nearest power-of-two.
-        allocationSize = NextPowerOfTwo(allocationSize);
-
-        // Allocation cannot exceed the memory size.
-        if (allocationSize > mMemoryBlockSize) {
-            return std::move(invalidAllocation);
-        }
-
-        // Attempt to sub-allocate a block of the requested size.
-        const uint64_t blockOffset = mBuddyBlockAllocator.Allocate(allocationSize, alignment);
-        if (blockOffset == BuddyAllocator::kInvalidOffset) {
-            return std::move(invalidAllocation);
-        }
-
-        const uint64_t memoryIndex = GetMemoryIndex(blockOffset);
-        if (mTrackedSubAllocations[memoryIndex].refcount == 0) {
-            // Transfer ownership to this allocator
-            std::unique_ptr<ResourceHeapBase> memory;
-            DAWN_TRY_ASSIGN(memory, mHeapAllocator->AllocateResourceHeap(mMemoryBlockSize));
-            mTrackedSubAllocations[memoryIndex] = {/*refcount*/ 0, std::move(memory)};
-        }
-
-        mTrackedSubAllocations[memoryIndex].refcount++;
-
-        AllocationInfo info;
-        info.mBlockOffset = blockOffset;
-        info.mMethod = AllocationMethod::kSubAllocated;
-
-        // Allocation offset is always local to the memory.
-        const uint64_t memoryOffset = blockOffset % mMemoryBlockSize;
-
-        return ResourceMemoryAllocation{
-            info, memoryOffset, mTrackedSubAllocations[memoryIndex].mMemoryAllocation.get()};
+    // Allocation cannot exceed the memory size.
+    if (allocationSize > mMemoryBlockSize) {
+        return std::move(invalidAllocation);
     }
 
-    void BuddyMemoryAllocator::Deallocate(const ResourceMemoryAllocation& allocation) {
-        const AllocationInfo info = allocation.GetInfo();
+    // Attempt to sub-allocate a block of the requested size.
+    const uint64_t blockOffset = mBuddyBlockAllocator.Allocate(allocationSize, alignment);
+    if (blockOffset == BuddyAllocator::kInvalidOffset) {
+        return std::move(invalidAllocation);
+    }
 
-        ASSERT(info.mMethod == AllocationMethod::kSubAllocated);
+    const uint64_t memoryIndex = GetMemoryIndex(blockOffset);
+    if (mTrackedSubAllocations[memoryIndex].refcount == 0) {
+        // Transfer ownership to this allocator
+        std::unique_ptr<ResourceHeapBase> memory;
+        DAWN_TRY_ASSIGN(memory, mHeapAllocator->AllocateResourceHeap(mMemoryBlockSize));
+        mTrackedSubAllocations[memoryIndex] = {/*refcount*/ 0, std::move(memory)};
+    }
 
-        const uint64_t memoryIndex = GetMemoryIndex(info.mBlockOffset);
+    mTrackedSubAllocations[memoryIndex].refcount++;
 
-        ASSERT(mTrackedSubAllocations[memoryIndex].refcount > 0);
-        mTrackedSubAllocations[memoryIndex].refcount--;
+    AllocationInfo info;
+    info.mBlockOffset = blockOffset;
+    info.mMethod = AllocationMethod::kSubAllocated;
 
-        if (mTrackedSubAllocations[memoryIndex].refcount == 0) {
-            mHeapAllocator->DeallocateResourceHeap(
-                std::move(mTrackedSubAllocations[memoryIndex].mMemoryAllocation));
+    // Allocation offset is always local to the memory.
+    const uint64_t memoryOffset = blockOffset % mMemoryBlockSize;
+
+    return ResourceMemoryAllocation{info, memoryOffset,
+                                    mTrackedSubAllocations[memoryIndex].mMemoryAllocation.get()};
+}
+
+void BuddyMemoryAllocator::Deallocate(const ResourceMemoryAllocation& allocation) {
+    const AllocationInfo info = allocation.GetInfo();
+
+    ASSERT(info.mMethod == AllocationMethod::kSubAllocated);
+
+    const uint64_t memoryIndex = GetMemoryIndex(info.mBlockOffset);
+
+    ASSERT(mTrackedSubAllocations[memoryIndex].refcount > 0);
+    mTrackedSubAllocations[memoryIndex].refcount--;
+
+    if (mTrackedSubAllocations[memoryIndex].refcount == 0) {
+        mHeapAllocator->DeallocateResourceHeap(
+            std::move(mTrackedSubAllocations[memoryIndex].mMemoryAllocation));
+    }
+
+    mBuddyBlockAllocator.Deallocate(info.mBlockOffset);
+}
+
+uint64_t BuddyMemoryAllocator::GetMemoryBlockSize() const {
+    return mMemoryBlockSize;
+}
+
+uint64_t BuddyMemoryAllocator::ComputeTotalNumOfHeapsForTesting() const {
+    uint64_t count = 0;
+    for (const TrackedSubAllocations& allocation : mTrackedSubAllocations) {
+        if (allocation.refcount > 0) {
+            count++;
         }
-
-        mBuddyBlockAllocator.Deallocate(info.mBlockOffset);
     }
-
-    uint64_t BuddyMemoryAllocator::GetMemoryBlockSize() const {
-        return mMemoryBlockSize;
-    }
-
-    uint64_t BuddyMemoryAllocator::ComputeTotalNumOfHeapsForTesting() const {
-        uint64_t count = 0;
-        for (const TrackedSubAllocations& allocation : mTrackedSubAllocations) {
-            if (allocation.refcount > 0) {
-                count++;
-            }
-        }
-        return count;
-    }
+    return count;
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/BuddyMemoryAllocator.h b/src/dawn/native/BuddyMemoryAllocator.h
index 299dc3d..a58cbae 100644
--- a/src/dawn/native/BuddyMemoryAllocator.h
+++ b/src/dawn/native/BuddyMemoryAllocator.h
@@ -24,51 +24,50 @@
 
 namespace dawn::native {
 
-    class ResourceHeapAllocator;
+class ResourceHeapAllocator;
 
-    // BuddyMemoryAllocator uses the buddy allocator to sub-allocate blocks of device
-    // memory created by MemoryAllocator clients. It creates a very large buddy system
-    // where backing device memory blocks equal a specified level in the system.
-    //
-    // Upon sub-allocating, the offset gets mapped to device memory by computing the corresponding
-    // memory index and should the memory not exist, it is created. If two sub-allocations share the
-    // same memory index, the memory refcount is incremented to ensure de-allocating one doesn't
-    // release the other prematurely.
-    //
-    // The MemoryAllocator should return ResourceHeaps that are all compatible with each other.
-    // It should also outlive all the resources that are in the buddy allocator.
-    class BuddyMemoryAllocator {
-      public:
-        BuddyMemoryAllocator(uint64_t maxSystemSize,
-                             uint64_t memoryBlockSize,
-                             ResourceHeapAllocator* heapAllocator);
-        ~BuddyMemoryAllocator() = default;
+// BuddyMemoryAllocator uses the buddy allocator to sub-allocate blocks of device
+// memory created by MemoryAllocator clients. It creates a very large buddy system
+// where backing device memory blocks equal a specified level in the system.
+//
+// Upon sub-allocating, the offset gets mapped to device memory by computing the corresponding
+// memory index and should the memory not exist, it is created. If two sub-allocations share the
+// same memory index, the memory refcount is incremented to ensure de-allocating one doesn't
+// release the other prematurely.
+//
+// The MemoryAllocator should return ResourceHeaps that are all compatible with each other.
+// It should also outlive all the resources that are in the buddy allocator.
+class BuddyMemoryAllocator {
+  public:
+    BuddyMemoryAllocator(uint64_t maxSystemSize,
+                         uint64_t memoryBlockSize,
+                         ResourceHeapAllocator* heapAllocator);
+    ~BuddyMemoryAllocator() = default;
 
-        ResultOrError<ResourceMemoryAllocation> Allocate(uint64_t allocationSize,
-                                                         uint64_t alignment);
-        void Deallocate(const ResourceMemoryAllocation& allocation);
+    ResultOrError<ResourceMemoryAllocation> Allocate(uint64_t allocationSize, uint64_t alignment);
+    void Deallocate(const ResourceMemoryAllocation& allocation);
 
-        uint64_t GetMemoryBlockSize() const;
+    uint64_t GetMemoryBlockSize() const;
 
-        // For testing purposes.
-        uint64_t ComputeTotalNumOfHeapsForTesting() const;
+    // For testing purposes.
+    uint64_t ComputeTotalNumOfHeapsForTesting() const;
 
-      private:
-        uint64_t GetMemoryIndex(uint64_t offset) const;
+  private:
+    uint64_t GetMemoryIndex(uint64_t offset) const;
 
-        uint64_t mMemoryBlockSize = 0;
+    uint64_t mMemoryBlockSize = 0;
 
-        BuddyAllocator mBuddyBlockAllocator;
-        ResourceHeapAllocator* mHeapAllocator;
+    BuddyAllocator mBuddyBlockAllocator;
+    ResourceHeapAllocator* mHeapAllocator;
 
-        struct TrackedSubAllocations {
-            size_t refcount = 0;
-            std::unique_ptr<ResourceHeapBase> mMemoryAllocation;
-        };
-
-        std::vector<TrackedSubAllocations> mTrackedSubAllocations;
+    struct TrackedSubAllocations {
+        size_t refcount = 0;
+        std::unique_ptr<ResourceHeapBase> mMemoryAllocation;
     };
 
+    std::vector<TrackedSubAllocations> mTrackedSubAllocations;
+};
+
 }  // namespace dawn::native
 
 #endif  // SRC_DAWN_NATIVE_BUDDYMEMORYALLOCATOR_H_
diff --git a/src/dawn/native/Buffer.cpp b/src/dawn/native/Buffer.cpp
index 9161f11..80e905b 100644
--- a/src/dawn/native/Buffer.cpp
+++ b/src/dawn/native/Buffer.cpp
@@ -33,543 +33,526 @@
 
 namespace dawn::native {
 
-    namespace {
-        struct MapRequestTask : QueueBase::TaskInFlight {
-            MapRequestTask(Ref<BufferBase> buffer, MapRequestID id)
-                : buffer(std::move(buffer)), id(id) {
-            }
-            void Finish(dawn::platform::Platform* platform, ExecutionSerial serial) override {
-                TRACE_EVENT1(platform, General, "Buffer::TaskInFlight::Finished", "serial",
-                             uint64_t(serial));
-                buffer->OnMapRequestCompleted(id, WGPUBufferMapAsyncStatus_Success);
-            }
-            void HandleDeviceLoss() override {
-                buffer->OnMapRequestCompleted(id, WGPUBufferMapAsyncStatus_DeviceLost);
-            }
-            ~MapRequestTask() override = default;
-
-          private:
-            Ref<BufferBase> buffer;
-            MapRequestID id;
-        };
-
-        class ErrorBuffer final : public BufferBase {
-          public:
-            ErrorBuffer(DeviceBase* device, const BufferDescriptor* descriptor)
-                : BufferBase(device, descriptor, ObjectBase::kError) {
-                if (descriptor->mappedAtCreation) {
-                    // Check that the size can be used to allocate an mFakeMappedData. A malloc(0)
-                    // is invalid, and on 32bit systems we should avoid a narrowing conversion that
-                    // would make size = 1 << 32 + 1 allocate one byte.
-                    bool isValidSize =
-                        descriptor->size != 0 &&
-                        descriptor->size < uint64_t(std::numeric_limits<size_t>::max());
-
-                    if (isValidSize) {
-                        mFakeMappedData =
-                            std::unique_ptr<uint8_t[]>(AllocNoThrow<uint8_t>(descriptor->size));
-                    }
-                    // Since error buffers in this case may allocate memory, we need to track them
-                    // for destruction on the device.
-                    TrackInDevice();
-                }
-            }
-
-          private:
-            bool IsCPUWritableAtCreation() const override {
-                UNREACHABLE();
-            }
-
-            MaybeError MapAtCreationImpl() override {
-                UNREACHABLE();
-            }
-
-            MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override {
-                UNREACHABLE();
-            }
-
-            void* GetMappedPointerImpl() override {
-                return mFakeMappedData.get();
-            }
-
-            void UnmapImpl() override {
-                mFakeMappedData.reset();
-            }
-
-            std::unique_ptr<uint8_t[]> mFakeMappedData;
-        };
-
-    }  // anonymous namespace
-
-    MaybeError ValidateBufferDescriptor(DeviceBase*, const BufferDescriptor* descriptor) {
-        DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
-        DAWN_TRY(ValidateBufferUsage(descriptor->usage));
-
-        wgpu::BufferUsage usage = descriptor->usage;
-
-        DAWN_INVALID_IF(usage == wgpu::BufferUsage::None, "Buffer usages must not be 0.");
-
-        const wgpu::BufferUsage kMapWriteAllowedUsages =
-            wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
-        DAWN_INVALID_IF(
-            usage & wgpu::BufferUsage::MapWrite && !IsSubset(usage, kMapWriteAllowedUsages),
-            "Buffer usages (%s) is invalid. If a buffer usage contains %s the only other allowed "
-            "usage is %s.",
-            usage, wgpu::BufferUsage::MapWrite, wgpu::BufferUsage::CopySrc);
-
-        const wgpu::BufferUsage kMapReadAllowedUsages =
-            wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst;
-        DAWN_INVALID_IF(
-            usage & wgpu::BufferUsage::MapRead && !IsSubset(usage, kMapReadAllowedUsages),
-            "Buffer usages (%s) is invalid. If a buffer usage contains %s the only other allowed "
-            "usage is %s.",
-            usage, wgpu::BufferUsage::MapRead, wgpu::BufferUsage::CopyDst);
-
-        DAWN_INVALID_IF(descriptor->mappedAtCreation && descriptor->size % 4 != 0,
-                        "Buffer is mapped at creation but its size (%u) is not a multiple of 4.",
-                        descriptor->size);
-
-        return {};
+namespace {
+struct MapRequestTask : QueueBase::TaskInFlight {
+    MapRequestTask(Ref<BufferBase> buffer, MapRequestID id) : buffer(std::move(buffer)), id(id) {}
+    void Finish(dawn::platform::Platform* platform, ExecutionSerial serial) override {
+        TRACE_EVENT1(platform, General, "Buffer::TaskInFlight::Finished", "serial",
+                     uint64_t(serial));
+        buffer->OnMapRequestCompleted(id, WGPUBufferMapAsyncStatus_Success);
     }
-
-    // Buffer
-
-    BufferBase::BufferBase(DeviceBase* device, const BufferDescriptor* descriptor)
-        : ApiObjectBase(device, descriptor->label),
-          mSize(descriptor->size),
-          mUsage(descriptor->usage),
-          mState(BufferState::Unmapped) {
-        // Add readonly storage usage if the buffer has a storage usage. The validation rules in
-        // ValidateSyncScopeResourceUsage will make sure we don't use both at the same time.
-        if (mUsage & wgpu::BufferUsage::Storage) {
-            mUsage |= kReadOnlyStorageBuffer;
-        }
-
-        // The query resolve buffer need to be used as a storage buffer in the internal compute
-        // pipeline which does timestamp uint conversion for timestamp query, it requires the buffer
-        // has Storage usage in the binding group. Implicitly add an InternalStorage usage which is
-        // only compatible with InternalStorageBuffer binding type in BGL. It shouldn't be
-        // compatible with StorageBuffer binding type and the query resolve buffer cannot be bound
-        // as storage buffer if it's created without Storage usage.
-        if (mUsage & wgpu::BufferUsage::QueryResolve) {
-            mUsage |= kInternalStorageBuffer;
-        }
-
-        // We also add internal storage usage for Indirect buffers for some transformations before
-        // DispatchIndirect calls on the backend (e.g. validations, support of [[num_workgroups]] on
-        // D3D12), since these transformations involve binding them as storage buffers for use in a
-        // compute pass.
-        if (mUsage & wgpu::BufferUsage::Indirect) {
-            mUsage |= kInternalStorageBuffer;
-        }
-
-        TrackInDevice();
+    void HandleDeviceLoss() override {
+        buffer->OnMapRequestCompleted(id, WGPUBufferMapAsyncStatus_DeviceLost);
     }
+    ~MapRequestTask() override = default;
 
-    BufferBase::BufferBase(DeviceBase* device,
-                           const BufferDescriptor* descriptor,
-                           ObjectBase::ErrorTag tag)
-        : ApiObjectBase(device, tag), mSize(descriptor->size), mState(BufferState::Unmapped) {
+  private:
+    Ref<BufferBase> buffer;
+    MapRequestID id;
+};
+
+class ErrorBuffer final : public BufferBase {
+  public:
+    ErrorBuffer(DeviceBase* device, const BufferDescriptor* descriptor)
+        : BufferBase(device, descriptor, ObjectBase::kError) {
         if (descriptor->mappedAtCreation) {
-            mState = BufferState::MappedAtCreation;
-            mMapOffset = 0;
-            mMapSize = mSize;
-        }
-    }
+            // Check that the size can be used to allocate an mFakeMappedData. A malloc(0)
+            // is invalid, and on 32bit systems we should avoid a narrowing conversion that
+            // would make size = 1 << 32 + 1 allocate one byte.
+            bool isValidSize = descriptor->size != 0 &&
+                               descriptor->size < uint64_t(std::numeric_limits<size_t>::max());
 
-    BufferBase::BufferBase(DeviceBase* device, BufferState state)
-        : ApiObjectBase(device, kLabelNotImplemented), mState(state) {
-        TrackInDevice();
-    }
-
-    BufferBase::~BufferBase() {
-        ASSERT(mState == BufferState::Unmapped || mState == BufferState::Destroyed);
-    }
-
-    void BufferBase::DestroyImpl() {
-        if (mState == BufferState::Mapped) {
-            UnmapInternal(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
-        } else if (mState == BufferState::MappedAtCreation) {
-            if (mStagingBuffer != nullptr) {
-                mStagingBuffer.reset();
-            } else if (mSize != 0) {
-                UnmapInternal(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
+            if (isValidSize) {
+                mFakeMappedData =
+                    std::unique_ptr<uint8_t[]>(AllocNoThrow<uint8_t>(descriptor->size));
             }
+            // Since error buffers in this case may allocate memory, we need to track them
+            // for destruction on the device.
+            TrackInDevice();
         }
-        mState = BufferState::Destroyed;
     }
 
-    // static
-    BufferBase* BufferBase::MakeError(DeviceBase* device, const BufferDescriptor* descriptor) {
-        return new ErrorBuffer(device, descriptor);
+  private:
+    bool IsCPUWritableAtCreation() const override { UNREACHABLE(); }
+
+    MaybeError MapAtCreationImpl() override { UNREACHABLE(); }
+
+    MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override {
+        UNREACHABLE();
     }
 
-    ObjectType BufferBase::GetType() const {
-        return ObjectType::Buffer;
+    void* GetMappedPointerImpl() override { return mFakeMappedData.get(); }
+
+    void UnmapImpl() override { mFakeMappedData.reset(); }
+
+    std::unique_ptr<uint8_t[]> mFakeMappedData;
+};
+
+}  // anonymous namespace
+
+MaybeError ValidateBufferDescriptor(DeviceBase*, const BufferDescriptor* descriptor) {
+    DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
+    DAWN_TRY(ValidateBufferUsage(descriptor->usage));
+
+    wgpu::BufferUsage usage = descriptor->usage;
+
+    DAWN_INVALID_IF(usage == wgpu::BufferUsage::None, "Buffer usages must not be 0.");
+
+    const wgpu::BufferUsage kMapWriteAllowedUsages =
+        wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
+    DAWN_INVALID_IF(
+        usage & wgpu::BufferUsage::MapWrite && !IsSubset(usage, kMapWriteAllowedUsages),
+        "Buffer usages (%s) is invalid. If a buffer usage contains %s the only other allowed "
+        "usage is %s.",
+        usage, wgpu::BufferUsage::MapWrite, wgpu::BufferUsage::CopySrc);
+
+    const wgpu::BufferUsage kMapReadAllowedUsages =
+        wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopyDst;
+    DAWN_INVALID_IF(
+        usage & wgpu::BufferUsage::MapRead && !IsSubset(usage, kMapReadAllowedUsages),
+        "Buffer usages (%s) is invalid. If a buffer usage contains %s the only other allowed "
+        "usage is %s.",
+        usage, wgpu::BufferUsage::MapRead, wgpu::BufferUsage::CopyDst);
+
+    DAWN_INVALID_IF(descriptor->mappedAtCreation && descriptor->size % 4 != 0,
+                    "Buffer is mapped at creation but its size (%u) is not a multiple of 4.",
+                    descriptor->size);
+
+    return {};
+}
+
+// Buffer
+
+BufferBase::BufferBase(DeviceBase* device, const BufferDescriptor* descriptor)
+    : ApiObjectBase(device, descriptor->label),
+      mSize(descriptor->size),
+      mUsage(descriptor->usage),
+      mState(BufferState::Unmapped) {
+    // Add readonly storage usage if the buffer has a storage usage. The validation rules in
+    // ValidateSyncScopeResourceUsage will make sure we don't use both at the same time.
+    if (mUsage & wgpu::BufferUsage::Storage) {
+        mUsage |= kReadOnlyStorageBuffer;
     }
 
-    uint64_t BufferBase::GetSize() const {
-        ASSERT(!IsError());
-        return mSize;
+    // The query resolve buffer need to be used as a storage buffer in the internal compute
+    // pipeline which does timestamp uint conversion for timestamp query, it requires the buffer
+    // has Storage usage in the binding group. Implicitly add an InternalStorage usage which is
+    // only compatible with InternalStorageBuffer binding type in BGL. It shouldn't be
+    // compatible with StorageBuffer binding type and the query resolve buffer cannot be bound
+    // as storage buffer if it's created without Storage usage.
+    if (mUsage & wgpu::BufferUsage::QueryResolve) {
+        mUsage |= kInternalStorageBuffer;
     }
 
-    uint64_t BufferBase::GetAllocatedSize() const {
-        ASSERT(!IsError());
-        // The backend must initialize this value.
-        ASSERT(mAllocatedSize != 0);
-        return mAllocatedSize;
+    // We also add internal storage usage for Indirect buffers for some transformations before
+    // DispatchIndirect calls on the backend (e.g. validations, support of [[num_workgroups]] on
+    // D3D12), since these transformations involve binding them as storage buffers for use in a
+    // compute pass.
+    if (mUsage & wgpu::BufferUsage::Indirect) {
+        mUsage |= kInternalStorageBuffer;
     }
 
-    wgpu::BufferUsage BufferBase::GetUsage() const {
-        ASSERT(!IsError());
-        return mUsage;
-    }
+    TrackInDevice();
+}
 
-    wgpu::BufferUsage BufferBase::GetUsageExternalOnly() const {
-        return GetUsage() & ~kAllInternalBufferUsages;
-    }
-
-    MaybeError BufferBase::MapAtCreation() {
-        DAWN_TRY(MapAtCreationInternal());
-
-        void* ptr;
-        size_t size;
-        if (mSize == 0) {
-            return {};
-        } else if (mStagingBuffer) {
-            // If there is a staging buffer for initialization, clear its contents directly.
-            // It should be exactly as large as the buffer allocation.
-            ptr = mStagingBuffer->GetMappedPointer();
-            size = mStagingBuffer->GetSize();
-            ASSERT(size == GetAllocatedSize());
-        } else {
-            // Otherwise, the buffer is directly mappable on the CPU.
-            ptr = GetMappedPointerImpl();
-            size = GetAllocatedSize();
-        }
-
-        DeviceBase* device = GetDevice();
-        if (device->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
-            memset(ptr, uint8_t(0u), size);
-            SetIsDataInitialized();
-            device->IncrementLazyClearCountForTesting();
-        } else if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
-            memset(ptr, uint8_t(1u), size);
-        }
-
-        return {};
-    }
-
-    MaybeError BufferBase::MapAtCreationInternal() {
-        ASSERT(!IsError());
+BufferBase::BufferBase(DeviceBase* device,
+                       const BufferDescriptor* descriptor,
+                       ObjectBase::ErrorTag tag)
+    : ApiObjectBase(device, tag), mSize(descriptor->size), mState(BufferState::Unmapped) {
+    if (descriptor->mappedAtCreation) {
+        mState = BufferState::MappedAtCreation;
         mMapOffset = 0;
         mMapSize = mSize;
-
-        // 0-sized buffers are not supposed to be written to. Return back any non-null pointer.
-        // Skip handling 0-sized buffers so we don't try to map them in the backend.
-        if (mSize != 0) {
-            // Mappable buffers don't use a staging buffer and are just as if mapped through
-            // MapAsync.
-            if (IsCPUWritableAtCreation()) {
-                DAWN_TRY(MapAtCreationImpl());
-            } else {
-                // If any of these fail, the buffer will be deleted and replaced with an error
-                // buffer. The staging buffer is used to return mappable data to inititalize the
-                // buffer contents. Allocate one as large as the real buffer size so that every byte
-                // is initialized.
-                // TODO(crbug.com/dawn/828): Suballocate and reuse memory from a larger staging
-                // buffer so we don't create many small buffers.
-                DAWN_TRY_ASSIGN(mStagingBuffer,
-                                GetDevice()->CreateStagingBuffer(GetAllocatedSize()));
-            }
-        }
-
-        // Only set the state to mapped at creation if we did no fail any point in this helper.
-        // Otherwise, if we override the default unmapped state before succeeding to create a
-        // staging buffer, we will have issues when we try to destroy the buffer.
-        mState = BufferState::MappedAtCreation;
-        return {};
     }
+}
 
-    MaybeError BufferBase::ValidateCanUseOnQueueNow() const {
-        ASSERT(!IsError());
+BufferBase::BufferBase(DeviceBase* device, BufferState state)
+    : ApiObjectBase(device, kLabelNotImplemented), mState(state) {
+    TrackInDevice();
+}
 
-        switch (mState) {
-            case BufferState::Destroyed:
-                return DAWN_FORMAT_VALIDATION_ERROR("%s used in submit while destroyed.", this);
-            case BufferState::Mapped:
-            case BufferState::MappedAtCreation:
-                return DAWN_FORMAT_VALIDATION_ERROR("%s used in submit while mapped.", this);
-            case BufferState::Unmapped:
-                return {};
-        }
-        UNREACHABLE();
-    }
+BufferBase::~BufferBase() {
+    ASSERT(mState == BufferState::Unmapped || mState == BufferState::Destroyed);
+}
 
-    void BufferBase::CallMapCallback(MapRequestID mapID, WGPUBufferMapAsyncStatus status) {
-        ASSERT(!IsError());
-        if (mMapCallback != nullptr && mapID == mLastMapID) {
-            // Tag the callback as fired before firing it, otherwise it could fire a second time if
-            // for example buffer.Unmap() is called inside the application-provided callback.
-            WGPUBufferMapCallback callback = mMapCallback;
-            mMapCallback = nullptr;
-
-            if (GetDevice()->IsLost()) {
-                callback(WGPUBufferMapAsyncStatus_DeviceLost, mMapUserdata);
-            } else {
-                callback(status, mMapUserdata);
-            }
-        }
-    }
-
-    void BufferBase::APIMapAsync(wgpu::MapMode mode,
-                                 size_t offset,
-                                 size_t size,
-                                 WGPUBufferMapCallback callback,
-                                 void* userdata) {
-        // Handle the defaulting of size required by WebGPU, even if in webgpu_cpp.h it is not
-        // possible to default the function argument (because there is the callback later in the
-        // argument list)
-        if ((size == wgpu::kWholeMapSize) && (offset <= mSize)) {
-            size = mSize - offset;
-        }
-
-        WGPUBufferMapAsyncStatus status;
-        if (GetDevice()->ConsumedError(ValidateMapAsync(mode, offset, size, &status),
-                                       "calling %s.MapAsync(%s, %u, %u, ...).", this, mode, offset,
-                                       size)) {
-            if (callback) {
-                callback(status, userdata);
-            }
-            return;
-        }
-        ASSERT(!IsError());
-
-        mLastMapID++;
-        mMapMode = mode;
-        mMapOffset = offset;
-        mMapSize = size;
-        mMapCallback = callback;
-        mMapUserdata = userdata;
-        mState = BufferState::Mapped;
-
-        if (GetDevice()->ConsumedError(MapAsyncImpl(mode, offset, size))) {
-            CallMapCallback(mLastMapID, WGPUBufferMapAsyncStatus_DeviceLost);
-            return;
-        }
-        std::unique_ptr<MapRequestTask> request =
-            std::make_unique<MapRequestTask>(this, mLastMapID);
-        TRACE_EVENT1(GetDevice()->GetPlatform(), General, "Buffer::APIMapAsync", "serial",
-                     uint64_t(GetDevice()->GetPendingCommandSerial()));
-        GetDevice()->GetQueue()->TrackTask(std::move(request),
-                                           GetDevice()->GetPendingCommandSerial());
-    }
-
-    void* BufferBase::APIGetMappedRange(size_t offset, size_t size) {
-        return GetMappedRange(offset, size, true);
-    }
-
-    const void* BufferBase::APIGetConstMappedRange(size_t offset, size_t size) {
-        return GetMappedRange(offset, size, false);
-    }
-
-    void* BufferBase::GetMappedRange(size_t offset, size_t size, bool writable) {
-        if (!CanGetMappedRange(writable, offset, size)) {
-            return nullptr;
-        }
-
+void BufferBase::DestroyImpl() {
+    if (mState == BufferState::Mapped) {
+        UnmapInternal(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
+    } else if (mState == BufferState::MappedAtCreation) {
         if (mStagingBuffer != nullptr) {
-            return static_cast<uint8_t*>(mStagingBuffer->GetMappedPointer()) + offset;
+            mStagingBuffer.reset();
+        } else if (mSize != 0) {
+            UnmapInternal(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
         }
-        if (mSize == 0) {
-            return reinterpret_cast<uint8_t*>(intptr_t(0xCAFED00D));
-        }
-        uint8_t* start = static_cast<uint8_t*>(GetMappedPointerImpl());
-        return start == nullptr ? nullptr : start + offset;
     }
+    mState = BufferState::Destroyed;
+}
 
-    void BufferBase::APIDestroy() {
-        Destroy();
-    }
+// static
+BufferBase* BufferBase::MakeError(DeviceBase* device, const BufferDescriptor* descriptor) {
+    return new ErrorBuffer(device, descriptor);
+}
 
-    MaybeError BufferBase::CopyFromStagingBuffer() {
-        ASSERT(mStagingBuffer);
-        if (mSize == 0) {
-            // Staging buffer is not created if zero size.
-            ASSERT(mStagingBuffer == nullptr);
-            return {};
-        }
+ObjectType BufferBase::GetType() const {
+    return ObjectType::Buffer;
+}
 
-        DAWN_TRY(GetDevice()->CopyFromStagingToBuffer(mStagingBuffer.get(), 0, this, 0,
-                                                      GetAllocatedSize()));
+uint64_t BufferBase::GetSize() const {
+    ASSERT(!IsError());
+    return mSize;
+}
 
-        DynamicUploader* uploader = GetDevice()->GetDynamicUploader();
-        uploader->ReleaseStagingBuffer(std::move(mStagingBuffer));
+uint64_t BufferBase::GetAllocatedSize() const {
+    ASSERT(!IsError());
+    // The backend must initialize this value.
+    ASSERT(mAllocatedSize != 0);
+    return mAllocatedSize;
+}
 
+wgpu::BufferUsage BufferBase::GetUsage() const {
+    ASSERT(!IsError());
+    return mUsage;
+}
+
+wgpu::BufferUsage BufferBase::GetUsageExternalOnly() const {
+    return GetUsage() & ~kAllInternalBufferUsages;
+}
+
+MaybeError BufferBase::MapAtCreation() {
+    DAWN_TRY(MapAtCreationInternal());
+
+    void* ptr;
+    size_t size;
+    if (mSize == 0) {
         return {};
+    } else if (mStagingBuffer) {
+        // If there is a staging buffer for initialization, clear its contents directly.
+        // It should be exactly as large as the buffer allocation.
+        ptr = mStagingBuffer->GetMappedPointer();
+        size = mStagingBuffer->GetSize();
+        ASSERT(size == GetAllocatedSize());
+    } else {
+        // Otherwise, the buffer is directly mappable on the CPU.
+        ptr = GetMappedPointerImpl();
+        size = GetAllocatedSize();
     }
 
-    void BufferBase::APIUnmap() {
-        if (GetDevice()->ConsumedError(ValidateUnmap(), "calling %s.Unmap().", this)) {
-            return;
-        }
-        Unmap();
+    DeviceBase* device = GetDevice();
+    if (device->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
+        memset(ptr, uint8_t(0u), size);
+        SetIsDataInitialized();
+        device->IncrementLazyClearCountForTesting();
+    } else if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
+        memset(ptr, uint8_t(1u), size);
     }
 
-    void BufferBase::Unmap() {
-        UnmapInternal(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback);
-    }
+    return {};
+}
 
-    void BufferBase::UnmapInternal(WGPUBufferMapAsyncStatus callbackStatus) {
-        if (mState == BufferState::Mapped) {
-            // A map request can only be called once, so this will fire only if the request wasn't
-            // completed before the Unmap.
-            // Callbacks are not fired if there is no callback registered, so this is correct for
-            // mappedAtCreation = true.
-            CallMapCallback(mLastMapID, callbackStatus);
-            UnmapImpl();
+MaybeError BufferBase::MapAtCreationInternal() {
+    ASSERT(!IsError());
+    mMapOffset = 0;
+    mMapSize = mSize;
 
-            mMapCallback = nullptr;
-            mMapUserdata = 0;
-        } else if (mState == BufferState::MappedAtCreation) {
-            if (mStagingBuffer != nullptr) {
-                GetDevice()->ConsumedError(CopyFromStagingBuffer());
-            } else if (mSize != 0) {
-                UnmapImpl();
-            }
-        }
-
-        mState = BufferState::Unmapped;
-    }
-
-    MaybeError BufferBase::ValidateMapAsync(wgpu::MapMode mode,
-                                            size_t offset,
-                                            size_t size,
-                                            WGPUBufferMapAsyncStatus* status) const {
-        *status = WGPUBufferMapAsyncStatus_DeviceLost;
-        DAWN_TRY(GetDevice()->ValidateIsAlive());
-
-        *status = WGPUBufferMapAsyncStatus_Error;
-        DAWN_TRY(GetDevice()->ValidateObject(this));
-
-        DAWN_INVALID_IF(uint64_t(offset) > mSize,
-                        "Mapping offset (%u) is larger than the size (%u) of %s.", offset, mSize,
-                        this);
-
-        DAWN_INVALID_IF(offset % 8 != 0, "Offset (%u) must be a multiple of 8.", offset);
-        DAWN_INVALID_IF(size % 4 != 0, "Size (%u) must be a multiple of 4.", size);
-
-        DAWN_INVALID_IF(uint64_t(size) > mSize - uint64_t(offset),
-                        "Mapping range (offset:%u, size: %u) doesn't fit in the size (%u) of %s.",
-                        offset, size, mSize, this);
-
-        switch (mState) {
-            case BufferState::Mapped:
-            case BufferState::MappedAtCreation:
-                return DAWN_FORMAT_VALIDATION_ERROR("%s is already mapped.", this);
-            case BufferState::Destroyed:
-                return DAWN_FORMAT_VALIDATION_ERROR("%s is destroyed.", this);
-            case BufferState::Unmapped:
-                break;
-        }
-
-        bool isReadMode = mode & wgpu::MapMode::Read;
-        bool isWriteMode = mode & wgpu::MapMode::Write;
-        DAWN_INVALID_IF(!(isReadMode ^ isWriteMode), "Map mode (%s) is not one of %s or %s.", mode,
-                        wgpu::MapMode::Write, wgpu::MapMode::Read);
-
-        if (mode & wgpu::MapMode::Read) {
-            DAWN_INVALID_IF(!(mUsage & wgpu::BufferUsage::MapRead),
-                            "The buffer usages (%s) do not contain %s.", mUsage,
-                            wgpu::BufferUsage::MapRead);
+    // 0-sized buffers are not supposed to be written to. Return back any non-null pointer.
+    // Skip handling 0-sized buffers so we don't try to map them in the backend.
+    if (mSize != 0) {
+        // Mappable buffers don't use a staging buffer and are just as if mapped through
+        // MapAsync.
+        if (IsCPUWritableAtCreation()) {
+            DAWN_TRY(MapAtCreationImpl());
         } else {
-            ASSERT(mode & wgpu::MapMode::Write);
-            DAWN_INVALID_IF(!(mUsage & wgpu::BufferUsage::MapWrite),
-                            "The buffer usages (%s) do not contain %s.", mUsage,
-                            wgpu::BufferUsage::MapWrite);
+            // If any of these fail, the buffer will be deleted and replaced with an error
+            // buffer. The staging buffer is used to return mappable data to inititalize the
+            // buffer contents. Allocate one as large as the real buffer size so that every byte
+            // is initialized.
+            // TODO(crbug.com/dawn/828): Suballocate and reuse memory from a larger staging
+            // buffer so we don't create many small buffers.
+            DAWN_TRY_ASSIGN(mStagingBuffer, GetDevice()->CreateStagingBuffer(GetAllocatedSize()));
         }
+    }
 
-        *status = WGPUBufferMapAsyncStatus_Success;
+    // Only set the state to mapped at creation if we did no fail any point in this helper.
+    // Otherwise, if we override the default unmapped state before succeeding to create a
+    // staging buffer, we will have issues when we try to destroy the buffer.
+    mState = BufferState::MappedAtCreation;
+    return {};
+}
+
+MaybeError BufferBase::ValidateCanUseOnQueueNow() const {
+    ASSERT(!IsError());
+
+    switch (mState) {
+        case BufferState::Destroyed:
+            return DAWN_FORMAT_VALIDATION_ERROR("%s used in submit while destroyed.", this);
+        case BufferState::Mapped:
+        case BufferState::MappedAtCreation:
+            return DAWN_FORMAT_VALIDATION_ERROR("%s used in submit while mapped.", this);
+        case BufferState::Unmapped:
+            return {};
+    }
+    UNREACHABLE();
+}
+
+void BufferBase::CallMapCallback(MapRequestID mapID, WGPUBufferMapAsyncStatus status) {
+    ASSERT(!IsError());
+    if (mMapCallback != nullptr && mapID == mLastMapID) {
+        // Tag the callback as fired before firing it, otherwise it could fire a second time if
+        // for example buffer.Unmap() is called inside the application-provided callback.
+        WGPUBufferMapCallback callback = mMapCallback;
+        mMapCallback = nullptr;
+
+        if (GetDevice()->IsLost()) {
+            callback(WGPUBufferMapAsyncStatus_DeviceLost, mMapUserdata);
+        } else {
+            callback(status, mMapUserdata);
+        }
+    }
+}
+
+void BufferBase::APIMapAsync(wgpu::MapMode mode,
+                             size_t offset,
+                             size_t size,
+                             WGPUBufferMapCallback callback,
+                             void* userdata) {
+    // Handle the defaulting of size required by WebGPU, even if in webgpu_cpp.h it is not
+    // possible to default the function argument (because there is the callback later in the
+    // argument list)
+    if ((size == wgpu::kWholeMapSize) && (offset <= mSize)) {
+        size = mSize - offset;
+    }
+
+    WGPUBufferMapAsyncStatus status;
+    if (GetDevice()->ConsumedError(ValidateMapAsync(mode, offset, size, &status),
+                                   "calling %s.MapAsync(%s, %u, %u, ...).", this, mode, offset,
+                                   size)) {
+        if (callback) {
+            callback(status, userdata);
+        }
+        return;
+    }
+    ASSERT(!IsError());
+
+    mLastMapID++;
+    mMapMode = mode;
+    mMapOffset = offset;
+    mMapSize = size;
+    mMapCallback = callback;
+    mMapUserdata = userdata;
+    mState = BufferState::Mapped;
+
+    if (GetDevice()->ConsumedError(MapAsyncImpl(mode, offset, size))) {
+        CallMapCallback(mLastMapID, WGPUBufferMapAsyncStatus_DeviceLost);
+        return;
+    }
+    std::unique_ptr<MapRequestTask> request = std::make_unique<MapRequestTask>(this, mLastMapID);
+    TRACE_EVENT1(GetDevice()->GetPlatform(), General, "Buffer::APIMapAsync", "serial",
+                 uint64_t(GetDevice()->GetPendingCommandSerial()));
+    GetDevice()->GetQueue()->TrackTask(std::move(request), GetDevice()->GetPendingCommandSerial());
+}
+
+void* BufferBase::APIGetMappedRange(size_t offset, size_t size) {
+    return GetMappedRange(offset, size, true);
+}
+
+const void* BufferBase::APIGetConstMappedRange(size_t offset, size_t size) {
+    return GetMappedRange(offset, size, false);
+}
+
+void* BufferBase::GetMappedRange(size_t offset, size_t size, bool writable) {
+    if (!CanGetMappedRange(writable, offset, size)) {
+        return nullptr;
+    }
+
+    if (mStagingBuffer != nullptr) {
+        return static_cast<uint8_t*>(mStagingBuffer->GetMappedPointer()) + offset;
+    }
+    if (mSize == 0) {
+        return reinterpret_cast<uint8_t*>(intptr_t(0xCAFED00D));
+    }
+    uint8_t* start = static_cast<uint8_t*>(GetMappedPointerImpl());
+    return start == nullptr ? nullptr : start + offset;
+}
+
+void BufferBase::APIDestroy() {
+    Destroy();
+}
+
+MaybeError BufferBase::CopyFromStagingBuffer() {
+    ASSERT(mStagingBuffer);
+    if (mSize == 0) {
+        // Staging buffer is not created if zero size.
+        ASSERT(mStagingBuffer == nullptr);
         return {};
     }
 
-    bool BufferBase::CanGetMappedRange(bool writable, size_t offset, size_t size) const {
-        if (offset % 8 != 0 || offset < mMapOffset || offset > mSize) {
+    DAWN_TRY(
+        GetDevice()->CopyFromStagingToBuffer(mStagingBuffer.get(), 0, this, 0, GetAllocatedSize()));
+
+    DynamicUploader* uploader = GetDevice()->GetDynamicUploader();
+    uploader->ReleaseStagingBuffer(std::move(mStagingBuffer));
+
+    return {};
+}
+
+void BufferBase::APIUnmap() {
+    if (GetDevice()->ConsumedError(ValidateUnmap(), "calling %s.Unmap().", this)) {
+        return;
+    }
+    Unmap();
+}
+
+void BufferBase::Unmap() {
+    UnmapInternal(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback);
+}
+
+void BufferBase::UnmapInternal(WGPUBufferMapAsyncStatus callbackStatus) {
+    if (mState == BufferState::Mapped) {
+        // A map request can only be called once, so this will fire only if the request wasn't
+        // completed before the Unmap.
+        // Callbacks are not fired if there is no callback registered, so this is correct for
+        // mappedAtCreation = true.
+        CallMapCallback(mLastMapID, callbackStatus);
+        UnmapImpl();
+
+        mMapCallback = nullptr;
+        mMapUserdata = 0;
+    } else if (mState == BufferState::MappedAtCreation) {
+        if (mStagingBuffer != nullptr) {
+            GetDevice()->ConsumedError(CopyFromStagingBuffer());
+        } else if (mSize != 0) {
+            UnmapImpl();
+        }
+    }
+
+    mState = BufferState::Unmapped;
+}
+
+MaybeError BufferBase::ValidateMapAsync(wgpu::MapMode mode,
+                                        size_t offset,
+                                        size_t size,
+                                        WGPUBufferMapAsyncStatus* status) const {
+    *status = WGPUBufferMapAsyncStatus_DeviceLost;
+    DAWN_TRY(GetDevice()->ValidateIsAlive());
+
+    *status = WGPUBufferMapAsyncStatus_Error;
+    DAWN_TRY(GetDevice()->ValidateObject(this));
+
+    DAWN_INVALID_IF(uint64_t(offset) > mSize,
+                    "Mapping offset (%u) is larger than the size (%u) of %s.", offset, mSize, this);
+
+    DAWN_INVALID_IF(offset % 8 != 0, "Offset (%u) must be a multiple of 8.", offset);
+    DAWN_INVALID_IF(size % 4 != 0, "Size (%u) must be a multiple of 4.", size);
+
+    DAWN_INVALID_IF(uint64_t(size) > mSize - uint64_t(offset),
+                    "Mapping range (offset:%u, size: %u) doesn't fit in the size (%u) of %s.",
+                    offset, size, mSize, this);
+
+    switch (mState) {
+        case BufferState::Mapped:
+        case BufferState::MappedAtCreation:
+            return DAWN_FORMAT_VALIDATION_ERROR("%s is already mapped.", this);
+        case BufferState::Destroyed:
+            return DAWN_FORMAT_VALIDATION_ERROR("%s is destroyed.", this);
+        case BufferState::Unmapped:
+            break;
+    }
+
+    bool isReadMode = mode & wgpu::MapMode::Read;
+    bool isWriteMode = mode & wgpu::MapMode::Write;
+    DAWN_INVALID_IF(!(isReadMode ^ isWriteMode), "Map mode (%s) is not one of %s or %s.", mode,
+                    wgpu::MapMode::Write, wgpu::MapMode::Read);
+
+    if (mode & wgpu::MapMode::Read) {
+        DAWN_INVALID_IF(!(mUsage & wgpu::BufferUsage::MapRead),
+                        "The buffer usages (%s) do not contain %s.", mUsage,
+                        wgpu::BufferUsage::MapRead);
+    } else {
+        ASSERT(mode & wgpu::MapMode::Write);
+        DAWN_INVALID_IF(!(mUsage & wgpu::BufferUsage::MapWrite),
+                        "The buffer usages (%s) do not contain %s.", mUsage,
+                        wgpu::BufferUsage::MapWrite);
+    }
+
+    *status = WGPUBufferMapAsyncStatus_Success;
+    return {};
+}
+
+bool BufferBase::CanGetMappedRange(bool writable, size_t offset, size_t size) const {
+    if (offset % 8 != 0 || offset < mMapOffset || offset > mSize) {
+        return false;
+    }
+
+    size_t rangeSize = size == WGPU_WHOLE_MAP_SIZE ? mSize - offset : size;
+
+    if (rangeSize % 4 != 0 || rangeSize > mMapSize) {
+        return false;
+    }
+
+    size_t offsetInMappedRange = offset - mMapOffset;
+    if (offsetInMappedRange > mMapSize - rangeSize) {
+        return false;
+    }
+
+    // Note that:
+    //
+    //   - We don't check that the device is alive because the application can ask for the
+    //     mapped pointer before it knows, and even Dawn knows, that the device was lost, and
+    //     still needs to work properly.
+    //   - We don't check that the object is alive because we need to return mapped pointers
+    //     for error buffers too.
+
+    switch (mState) {
+        // Writeable Buffer::GetMappedRange is always allowed when mapped at creation.
+        case BufferState::MappedAtCreation:
+            return true;
+
+        case BufferState::Mapped:
+            ASSERT(bool{mMapMode & wgpu::MapMode::Read} ^ bool{mMapMode & wgpu::MapMode::Write});
+            return !writable || (mMapMode & wgpu::MapMode::Write);
+
+        case BufferState::Unmapped:
+        case BufferState::Destroyed:
             return false;
-        }
-
-        size_t rangeSize = size == WGPU_WHOLE_MAP_SIZE ? mSize - offset : size;
-
-        if (rangeSize % 4 != 0 || rangeSize > mMapSize) {
-            return false;
-        }
-
-        size_t offsetInMappedRange = offset - mMapOffset;
-        if (offsetInMappedRange > mMapSize - rangeSize) {
-            return false;
-        }
-
-        // Note that:
-        //
-        //   - We don't check that the device is alive because the application can ask for the
-        //     mapped pointer before it knows, and even Dawn knows, that the device was lost, and
-        //     still needs to work properly.
-        //   - We don't check that the object is alive because we need to return mapped pointers
-        //     for error buffers too.
-
-        switch (mState) {
-            // Writeable Buffer::GetMappedRange is always allowed when mapped at creation.
-            case BufferState::MappedAtCreation:
-                return true;
-
-            case BufferState::Mapped:
-                ASSERT(bool{mMapMode & wgpu::MapMode::Read} ^
-                       bool{mMapMode & wgpu::MapMode::Write});
-                return !writable || (mMapMode & wgpu::MapMode::Write);
-
-            case BufferState::Unmapped:
-            case BufferState::Destroyed:
-                return false;
-        }
-        UNREACHABLE();
     }
+    UNREACHABLE();
+}
 
-    MaybeError BufferBase::ValidateUnmap() const {
-        DAWN_TRY(GetDevice()->ValidateIsAlive());
+MaybeError BufferBase::ValidateUnmap() const {
+    DAWN_TRY(GetDevice()->ValidateIsAlive());
 
-        switch (mState) {
-            case BufferState::Mapped:
-            case BufferState::MappedAtCreation:
-                // A buffer may be in the Mapped state if it was created with mappedAtCreation
-                // even if it did not have a mappable usage.
-                return {};
-            case BufferState::Unmapped:
-                return DAWN_FORMAT_VALIDATION_ERROR("%s is unmapped.", this);
-            case BufferState::Destroyed:
-                return DAWN_FORMAT_VALIDATION_ERROR("%s is destroyed.", this);
-        }
-        UNREACHABLE();
+    switch (mState) {
+        case BufferState::Mapped:
+        case BufferState::MappedAtCreation:
+            // A buffer may be in the Mapped state if it was created with mappedAtCreation
+            // even if it did not have a mappable usage.
+            return {};
+        case BufferState::Unmapped:
+            return DAWN_FORMAT_VALIDATION_ERROR("%s is unmapped.", this);
+        case BufferState::Destroyed:
+            return DAWN_FORMAT_VALIDATION_ERROR("%s is destroyed.", this);
     }
+    UNREACHABLE();
+}
 
-    void BufferBase::OnMapRequestCompleted(MapRequestID mapID, WGPUBufferMapAsyncStatus status) {
-        CallMapCallback(mapID, status);
-    }
+void BufferBase::OnMapRequestCompleted(MapRequestID mapID, WGPUBufferMapAsyncStatus status) {
+    CallMapCallback(mapID, status);
+}
 
-    bool BufferBase::NeedsInitialization() const {
-        return !mIsDataInitialized &&
-               GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse);
-    }
+bool BufferBase::NeedsInitialization() const {
+    return !mIsDataInitialized && GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse);
+}
 
-    bool BufferBase::IsDataInitialized() const {
-        return mIsDataInitialized;
-    }
+bool BufferBase::IsDataInitialized() const {
+    return mIsDataInitialized;
+}
 
-    void BufferBase::SetIsDataInitialized() {
-        mIsDataInitialized = true;
-    }
+void BufferBase::SetIsDataInitialized() {
+    mIsDataInitialized = true;
+}
 
-    bool BufferBase::IsFullBufferRange(uint64_t offset, uint64_t size) const {
-        return offset == 0 && size == GetSize();
-    }
+bool BufferBase::IsFullBufferRange(uint64_t offset, uint64_t size) const {
+    return offset == 0 && size == GetSize();
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/Buffer.h b/src/dawn/native/Buffer.h
index fd138c2..e5c6150 100644
--- a/src/dawn/native/Buffer.h
+++ b/src/dawn/native/Buffer.h
@@ -26,114 +26,112 @@
 
 namespace dawn::native {
 
-    struct CopyTextureToBufferCmd;
+struct CopyTextureToBufferCmd;
 
-    enum class MapType : uint32_t;
+enum class MapType : uint32_t;
 
-    MaybeError ValidateBufferDescriptor(DeviceBase* device, const BufferDescriptor* descriptor);
+MaybeError ValidateBufferDescriptor(DeviceBase* device, const BufferDescriptor* descriptor);
 
-    static constexpr wgpu::BufferUsage kReadOnlyBufferUsages =
-        wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::Index |
-        wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Uniform | kReadOnlyStorageBuffer |
-        wgpu::BufferUsage::Indirect;
+static constexpr wgpu::BufferUsage kReadOnlyBufferUsages =
+    wgpu::BufferUsage::MapRead | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::Index |
+    wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Uniform | kReadOnlyStorageBuffer |
+    wgpu::BufferUsage::Indirect;
 
-    static constexpr wgpu::BufferUsage kMappableBufferUsages =
-        wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite;
+static constexpr wgpu::BufferUsage kMappableBufferUsages =
+    wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite;
 
-    class BufferBase : public ApiObjectBase {
-      public:
-        enum class BufferState {
-            Unmapped,
-            Mapped,
-            MappedAtCreation,
-            Destroyed,
-        };
-        BufferBase(DeviceBase* device, const BufferDescriptor* descriptor);
-
-        static BufferBase* MakeError(DeviceBase* device, const BufferDescriptor* descriptor);
-
-        ObjectType GetType() const override;
-
-        uint64_t GetSize() const;
-        uint64_t GetAllocatedSize() const;
-
-        // |GetUsageExternalOnly| returns the usage with which the buffer was created using the
-        // base WebGPU API. Additional usages may be added for internal state tracking. |GetUsage|
-        // returns the union of base usage and the usages added internally.
-        wgpu::BufferUsage GetUsage() const;
-        wgpu::BufferUsage GetUsageExternalOnly() const;
-
-        MaybeError MapAtCreation();
-        void OnMapRequestCompleted(MapRequestID mapID, WGPUBufferMapAsyncStatus status);
-
-        MaybeError ValidateCanUseOnQueueNow() const;
-
-        bool IsFullBufferRange(uint64_t offset, uint64_t size) const;
-        bool NeedsInitialization() const;
-        bool IsDataInitialized() const;
-        void SetIsDataInitialized();
-
-        void* GetMappedRange(size_t offset, size_t size, bool writable = true);
-        void Unmap();
-
-        // Dawn API
-        void APIMapAsync(wgpu::MapMode mode,
-                         size_t offset,
-                         size_t size,
-                         WGPUBufferMapCallback callback,
-                         void* userdata);
-        void* APIGetMappedRange(size_t offset, size_t size);
-        const void* APIGetConstMappedRange(size_t offset, size_t size);
-        void APIUnmap();
-        void APIDestroy();
-
-      protected:
-        BufferBase(DeviceBase* device,
-                   const BufferDescriptor* descriptor,
-                   ObjectBase::ErrorTag tag);
-
-        // Constructor used only for mocking and testing.
-        BufferBase(DeviceBase* device, BufferState state);
-        void DestroyImpl() override;
-
-        ~BufferBase() override;
-
-        MaybeError MapAtCreationInternal();
-
-        uint64_t mAllocatedSize = 0;
-
-      private:
-        virtual MaybeError MapAtCreationImpl() = 0;
-        virtual MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) = 0;
-        virtual void UnmapImpl() = 0;
-        virtual void* GetMappedPointerImpl() = 0;
-
-        virtual bool IsCPUWritableAtCreation() const = 0;
-        MaybeError CopyFromStagingBuffer();
-        void CallMapCallback(MapRequestID mapID, WGPUBufferMapAsyncStatus status);
-
-        MaybeError ValidateMapAsync(wgpu::MapMode mode,
-                                    size_t offset,
-                                    size_t size,
-                                    WGPUBufferMapAsyncStatus* status) const;
-        MaybeError ValidateUnmap() const;
-        bool CanGetMappedRange(bool writable, size_t offset, size_t size) const;
-        void UnmapInternal(WGPUBufferMapAsyncStatus callbackStatus);
-
-        uint64_t mSize = 0;
-        wgpu::BufferUsage mUsage = wgpu::BufferUsage::None;
-        BufferState mState;
-        bool mIsDataInitialized = false;
-
-        std::unique_ptr<StagingBufferBase> mStagingBuffer;
-
-        WGPUBufferMapCallback mMapCallback = nullptr;
-        void* mMapUserdata = 0;
-        MapRequestID mLastMapID = MapRequestID(0);
-        wgpu::MapMode mMapMode = wgpu::MapMode::None;
-        size_t mMapOffset = 0;
-        size_t mMapSize = 0;
+class BufferBase : public ApiObjectBase {
+  public:
+    enum class BufferState {
+        Unmapped,
+        Mapped,
+        MappedAtCreation,
+        Destroyed,
     };
+    BufferBase(DeviceBase* device, const BufferDescriptor* descriptor);
+
+    static BufferBase* MakeError(DeviceBase* device, const BufferDescriptor* descriptor);
+
+    ObjectType GetType() const override;
+
+    uint64_t GetSize() const;
+    uint64_t GetAllocatedSize() const;
+
+    // |GetUsageExternalOnly| returns the usage with which the buffer was created using the
+    // base WebGPU API. Additional usages may be added for internal state tracking. |GetUsage|
+    // returns the union of base usage and the usages added internally.
+    wgpu::BufferUsage GetUsage() const;
+    wgpu::BufferUsage GetUsageExternalOnly() const;
+
+    MaybeError MapAtCreation();
+    void OnMapRequestCompleted(MapRequestID mapID, WGPUBufferMapAsyncStatus status);
+
+    MaybeError ValidateCanUseOnQueueNow() const;
+
+    bool IsFullBufferRange(uint64_t offset, uint64_t size) const;
+    bool NeedsInitialization() const;
+    bool IsDataInitialized() const;
+    void SetIsDataInitialized();
+
+    void* GetMappedRange(size_t offset, size_t size, bool writable = true);
+    void Unmap();
+
+    // Dawn API
+    void APIMapAsync(wgpu::MapMode mode,
+                     size_t offset,
+                     size_t size,
+                     WGPUBufferMapCallback callback,
+                     void* userdata);
+    void* APIGetMappedRange(size_t offset, size_t size);
+    const void* APIGetConstMappedRange(size_t offset, size_t size);
+    void APIUnmap();
+    void APIDestroy();
+
+  protected:
+    BufferBase(DeviceBase* device, const BufferDescriptor* descriptor, ObjectBase::ErrorTag tag);
+
+    // Constructor used only for mocking and testing.
+    BufferBase(DeviceBase* device, BufferState state);
+    void DestroyImpl() override;
+
+    ~BufferBase() override;
+
+    MaybeError MapAtCreationInternal();
+
+    uint64_t mAllocatedSize = 0;
+
+  private:
+    virtual MaybeError MapAtCreationImpl() = 0;
+    virtual MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) = 0;
+    virtual void UnmapImpl() = 0;
+    virtual void* GetMappedPointerImpl() = 0;
+
+    virtual bool IsCPUWritableAtCreation() const = 0;
+    MaybeError CopyFromStagingBuffer();
+    void CallMapCallback(MapRequestID mapID, WGPUBufferMapAsyncStatus status);
+
+    MaybeError ValidateMapAsync(wgpu::MapMode mode,
+                                size_t offset,
+                                size_t size,
+                                WGPUBufferMapAsyncStatus* status) const;
+    MaybeError ValidateUnmap() const;
+    bool CanGetMappedRange(bool writable, size_t offset, size_t size) const;
+    void UnmapInternal(WGPUBufferMapAsyncStatus callbackStatus);
+
+    uint64_t mSize = 0;
+    wgpu::BufferUsage mUsage = wgpu::BufferUsage::None;
+    BufferState mState;
+    bool mIsDataInitialized = false;
+
+    std::unique_ptr<StagingBufferBase> mStagingBuffer;
+
+    WGPUBufferMapCallback mMapCallback = nullptr;
+    void* mMapUserdata = 0;
+    MapRequestID mLastMapID = MapRequestID(0);
+    wgpu::MapMode mMapMode = wgpu::MapMode::None;
+    size_t mMapOffset = 0;
+    size_t mMapSize = 0;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/CacheKey.cpp b/src/dawn/native/CacheKey.cpp
index dea67f8..495b013 100644
--- a/src/dawn/native/CacheKey.cpp
+++ b/src/dawn/native/CacheKey.cpp
@@ -18,26 +18,26 @@
 
 namespace dawn::native {
 
-    std::ostream& operator<<(std::ostream& os, const CacheKey& key) {
-        os << std::hex;
-        for (const int b : key) {
-            os << std::setfill('0') << std::setw(2) << b << " ";
-        }
-        os << std::dec;
-        return os;
+std::ostream& operator<<(std::ostream& os, const CacheKey& key) {
+    os << std::hex;
+    for (const int b : key) {
+        os << std::setfill('0') << std::setw(2) << b << " ";
     }
+    os << std::dec;
+    return os;
+}
 
-    template <>
-    void CacheKeySerializer<std::string>::Serialize(CacheKey* key, const std::string& t) {
-        key->Record(static_cast<size_t>(t.length()));
-        key->insert(key->end(), t.begin(), t.end());
-    }
+template <>
+void CacheKeySerializer<std::string>::Serialize(CacheKey* key, const std::string& t) {
+    key->Record(static_cast<size_t>(t.length()));
+    key->insert(key->end(), t.begin(), t.end());
+}
 
-    template <>
-    void CacheKeySerializer<CacheKey>::Serialize(CacheKey* key, const CacheKey& t) {
-        // For nested cache keys, we do not record the length, and just copy the key so that it
-        // appears we just flatten the keys into a single key.
-        key->insert(key->end(), t.begin(), t.end());
-    }
+template <>
+void CacheKeySerializer<CacheKey>::Serialize(CacheKey* key, const CacheKey& t) {
+    // For nested cache keys, we do not record the length, and just copy the key so that it
+    // appears we just flatten the keys into a single key.
+    key->insert(key->end(), t.begin(), t.end());
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/CacheKey.h b/src/dawn/native/CacheKey.h
index 8b920b5..357ce4b 100644
--- a/src/dawn/native/CacheKey.h
+++ b/src/dawn/native/CacheKey.h
@@ -27,179 +27,175 @@
 
 namespace dawn::native {
 
-    // Forward declare classes because of co-dependency.
-    class CacheKey;
-    class CachedObject;
+// Forward declare classes because of co-dependency.
+class CacheKey;
+class CachedObject;
 
-    // Stream operator for CacheKey for debugging.
-    std::ostream& operator<<(std::ostream& os, const CacheKey& key);
+// Stream operator for CacheKey for debugging.
+std::ostream& operator<<(std::ostream& os, const CacheKey& key);
 
-    // Overridable serializer struct that should be implemented for cache key serializable
-    // types/classes.
-    template <typename T, typename SFINAE = void>
-    class CacheKeySerializer {
-      public:
-        static void Serialize(CacheKey* key, const T& t);
-    };
+// Overridable serializer struct that should be implemented for cache key serializable
+// types/classes.
+template <typename T, typename SFINAE = void>
+class CacheKeySerializer {
+  public:
+    static void Serialize(CacheKey* key, const T& t);
+};
 
-    class CacheKey : public std::vector<uint8_t> {
-      public:
-        using std::vector<uint8_t>::vector;
+class CacheKey : public std::vector<uint8_t> {
+  public:
+    using std::vector<uint8_t>::vector;
 
-        enum class Type { ComputePipeline, RenderPipeline, Shader };
+    enum class Type { ComputePipeline, RenderPipeline, Shader };
 
-        template <typename T>
-        CacheKey& Record(const T& t) {
-            CacheKeySerializer<T>::Serialize(this, t);
-            return *this;
-        }
-        template <typename T, typename... Args>
-        CacheKey& Record(const T& t, const Args&... args) {
-            CacheKeySerializer<T>::Serialize(this, t);
-            return Record(args...);
-        }
-
-        // Records iterables by prepending the number of elements. Some common iterables are have a
-        // CacheKeySerializer implemented to avoid needing to split them out when recording, i.e.
-        // strings and CacheKeys, but they fundamentally do the same as this function.
-        template <typename IterableT>
-        CacheKey& RecordIterable(const IterableT& iterable) {
-            // Always record the size of generic iterables as a size_t for now.
-            Record(static_cast<size_t>(iterable.size()));
-            for (auto it = iterable.begin(); it != iterable.end(); ++it) {
-                Record(*it);
-            }
-            return *this;
-        }
-        template <typename Index, typename Value, size_t Size>
-        CacheKey& RecordIterable(const ityp::array<Index, Value, Size>& iterable) {
-            Record(static_cast<Index>(iterable.size()));
-            for (auto it = iterable.begin(); it != iterable.end(); ++it) {
-                Record(*it);
-            }
-            return *this;
-        }
-        template <typename Ptr>
-        CacheKey& RecordIterable(const Ptr* ptr, size_t n) {
-            Record(n);
-            for (size_t i = 0; i < n; ++i) {
-                Record(ptr[i]);
-            }
-            return *this;
-        }
-    };
-
-    // Specialized overload for fundamental types.
     template <typename T>
-    class CacheKeySerializer<T, std::enable_if_t<std::is_fundamental_v<T>>> {
-      public:
-        static void Serialize(CacheKey* key, const T t) {
-            const char* it = reinterpret_cast<const char*>(&t);
-            key->insert(key->end(), it, (it + sizeof(T)));
-        }
-    };
+    CacheKey& Record(const T& t) {
+        CacheKeySerializer<T>::Serialize(this, t);
+        return *this;
+    }
+    template <typename T, typename... Args>
+    CacheKey& Record(const T& t, const Args&... args) {
+        CacheKeySerializer<T>::Serialize(this, t);
+        return Record(args...);
+    }
 
-    // Specialized overload for bitsets that are smaller than 64.
-    template <size_t N>
-    class CacheKeySerializer<std::bitset<N>, std::enable_if_t<(N <= 64)>> {
-      public:
-        static void Serialize(CacheKey* key, const std::bitset<N>& t) {
-            key->Record(t.to_ullong());
+    // Records iterables by prepending the number of elements. Some common iterables are have a
+    // CacheKeySerializer implemented to avoid needing to split them out when recording, i.e.
+    // strings and CacheKeys, but they fundamentally do the same as this function.
+    template <typename IterableT>
+    CacheKey& RecordIterable(const IterableT& iterable) {
+        // Always record the size of generic iterables as a size_t for now.
+        Record(static_cast<size_t>(iterable.size()));
+        for (auto it = iterable.begin(); it != iterable.end(); ++it) {
+            Record(*it);
         }
-    };
+        return *this;
+    }
+    template <typename Index, typename Value, size_t Size>
+    CacheKey& RecordIterable(const ityp::array<Index, Value, Size>& iterable) {
+        Record(static_cast<Index>(iterable.size()));
+        for (auto it = iterable.begin(); it != iterable.end(); ++it) {
+            Record(*it);
+        }
+        return *this;
+    }
+    template <typename Ptr>
+    CacheKey& RecordIterable(const Ptr* ptr, size_t n) {
+        Record(n);
+        for (size_t i = 0; i < n; ++i) {
+            Record(ptr[i]);
+        }
+        return *this;
+    }
+};
 
-    // Specialized overload for bitsets since using the built-in to_ullong have a size limit.
-    template <size_t N>
-    class CacheKeySerializer<std::bitset<N>, std::enable_if_t<(N > 64)>> {
-      public:
-        static void Serialize(CacheKey* key, const std::bitset<N>& t) {
-            // Serializes the bitset into series of uint8_t, along with recording the size.
-            static_assert(N > 0);
-            key->Record(static_cast<size_t>(N));
-            uint8_t value = 0;
-            for (size_t i = 0; i < N; i++) {
-                value <<= 1;
-                // Explicitly convert to numeric since MSVC doesn't like mixing of bools.
-                value |= t[i] ? 1 : 0;
-                if (i % 8 == 7) {
-                    // Whenever we fill an 8 bit value, record it and zero it out.
-                    key->Record(value);
-                    value = 0;
-                }
-            }
-            // Serialize the last value if we are not a multiple of 8.
-            if (N % 8 != 0) {
+// Specialized overload for fundamental types.
+template <typename T>
+class CacheKeySerializer<T, std::enable_if_t<std::is_fundamental_v<T>>> {
+  public:
+    static void Serialize(CacheKey* key, const T t) {
+        const char* it = reinterpret_cast<const char*>(&t);
+        key->insert(key->end(), it, (it + sizeof(T)));
+    }
+};
+
+// Specialized overload for bitsets that are smaller than 64.
+template <size_t N>
+class CacheKeySerializer<std::bitset<N>, std::enable_if_t<(N <= 64)>> {
+  public:
+    static void Serialize(CacheKey* key, const std::bitset<N>& t) { key->Record(t.to_ullong()); }
+};
+
+// Specialized overload for bitsets since using the built-in to_ullong have a size limit.
+template <size_t N>
+class CacheKeySerializer<std::bitset<N>, std::enable_if_t<(N > 64)>> {
+  public:
+    static void Serialize(CacheKey* key, const std::bitset<N>& t) {
+        // Serializes the bitset into series of uint8_t, along with recording the size.
+        static_assert(N > 0);
+        key->Record(static_cast<size_t>(N));
+        uint8_t value = 0;
+        for (size_t i = 0; i < N; i++) {
+            value <<= 1;
+            // Explicitly convert to numeric since MSVC doesn't like mixing of bools.
+            value |= t[i] ? 1 : 0;
+            if (i % 8 == 7) {
+                // Whenever we fill an 8 bit value, record it and zero it out.
                 key->Record(value);
+                value = 0;
             }
         }
-    };
-
-    // Specialized overload for enums.
-    template <typename T>
-    class CacheKeySerializer<T, std::enable_if_t<std::is_enum_v<T>>> {
-      public:
-        static void Serialize(CacheKey* key, const T t) {
-            CacheKeySerializer<std::underlying_type_t<T>>::Serialize(
-                key, static_cast<std::underlying_type_t<T>>(t));
+        // Serialize the last value if we are not a multiple of 8.
+        if (N % 8 != 0) {
+            key->Record(value);
         }
-    };
+    }
+};
 
-    // Specialized overload for TypedInteger.
-    template <typename Tag, typename Integer>
-    class CacheKeySerializer<::detail::TypedIntegerImpl<Tag, Integer>> {
-      public:
-        static void Serialize(CacheKey* key, const ::detail::TypedIntegerImpl<Tag, Integer> t) {
-            CacheKeySerializer<Integer>::Serialize(key, static_cast<Integer>(t));
-        }
-    };
+// Specialized overload for enums.
+template <typename T>
+class CacheKeySerializer<T, std::enable_if_t<std::is_enum_v<T>>> {
+  public:
+    static void Serialize(CacheKey* key, const T t) {
+        CacheKeySerializer<std::underlying_type_t<T>>::Serialize(
+            key, static_cast<std::underlying_type_t<T>>(t));
+    }
+};
 
-    // Specialized overload for pointers. Since we are serializing for a cache key, we always
-    // serialize via value, not by pointer. To handle nullptr scenarios, we always serialize whether
-    // the pointer was nullptr followed by the contents if applicable.
-    template <typename T>
-    class CacheKeySerializer<T, std::enable_if_t<std::is_pointer_v<T>>> {
-      public:
-        static void Serialize(CacheKey* key, const T t) {
-            key->Record(t == nullptr);
-            if (t != nullptr) {
-                CacheKeySerializer<std::remove_cv_t<std::remove_pointer_t<T>>>::Serialize(key, *t);
-            }
-        }
-    };
+// Specialized overload for TypedInteger.
+template <typename Tag, typename Integer>
+class CacheKeySerializer<::detail::TypedIntegerImpl<Tag, Integer>> {
+  public:
+    static void Serialize(CacheKey* key, const ::detail::TypedIntegerImpl<Tag, Integer> t) {
+        CacheKeySerializer<Integer>::Serialize(key, static_cast<Integer>(t));
+    }
+};
 
-    // Specialized overload for fixed arrays of primitives.
-    template <typename T, size_t N>
-    class CacheKeySerializer<T[N], std::enable_if_t<std::is_fundamental_v<T>>> {
-      public:
-        static void Serialize(CacheKey* key, const T (&t)[N]) {
-            static_assert(N > 0);
-            key->Record(static_cast<size_t>(N));
-            const char* it = reinterpret_cast<const char*>(t);
-            key->insert(key->end(), it, it + sizeof(t));
+// Specialized overload for pointers. Since we are serializing for a cache key, we always
+// serialize via value, not by pointer. To handle nullptr scenarios, we always serialize whether
+// the pointer was nullptr followed by the contents if applicable.
+template <typename T>
+class CacheKeySerializer<T, std::enable_if_t<std::is_pointer_v<T>>> {
+  public:
+    static void Serialize(CacheKey* key, const T t) {
+        key->Record(t == nullptr);
+        if (t != nullptr) {
+            CacheKeySerializer<std::remove_cv_t<std::remove_pointer_t<T>>>::Serialize(key, *t);
         }
-    };
+    }
+};
 
-    // Specialized overload for fixed arrays of non-primitives.
-    template <typename T, size_t N>
-    class CacheKeySerializer<T[N], std::enable_if_t<!std::is_fundamental_v<T>>> {
-      public:
-        static void Serialize(CacheKey* key, const T (&t)[N]) {
-            static_assert(N > 0);
-            key->Record(static_cast<size_t>(N));
-            for (size_t i = 0; i < N; i++) {
-                key->Record(t[i]);
-            }
-        }
-    };
+// Specialized overload for fixed arrays of primitives.
+template <typename T, size_t N>
+class CacheKeySerializer<T[N], std::enable_if_t<std::is_fundamental_v<T>>> {
+  public:
+    static void Serialize(CacheKey* key, const T (&t)[N]) {
+        static_assert(N > 0);
+        key->Record(static_cast<size_t>(N));
+        const char* it = reinterpret_cast<const char*>(t);
+        key->insert(key->end(), it, it + sizeof(t));
+    }
+};
 
-    // Specialized overload for CachedObjects.
-    template <typename T>
-    class CacheKeySerializer<T, std::enable_if_t<std::is_base_of_v<CachedObject, T>>> {
-      public:
-        static void Serialize(CacheKey* key, const T& t) {
-            key->Record(t.GetCacheKey());
+// Specialized overload for fixed arrays of non-primitives.
+template <typename T, size_t N>
+class CacheKeySerializer<T[N], std::enable_if_t<!std::is_fundamental_v<T>>> {
+  public:
+    static void Serialize(CacheKey* key, const T (&t)[N]) {
+        static_assert(N > 0);
+        key->Record(static_cast<size_t>(N));
+        for (size_t i = 0; i < N; i++) {
+            key->Record(t[i]);
         }
-    };
+    }
+};
+
+// Specialized overload for CachedObjects.
+template <typename T>
+class CacheKeySerializer<T, std::enable_if_t<std::is_base_of_v<CachedObject, T>>> {
+  public:
+    static void Serialize(CacheKey* key, const T& t) { key->Record(t.GetCacheKey()); }
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/CachedObject.cpp b/src/dawn/native/CachedObject.cpp
index e7e7cd8..26c58b9 100644
--- a/src/dawn/native/CachedObject.cpp
+++ b/src/dawn/native/CachedObject.cpp
@@ -19,35 +19,35 @@
 
 namespace dawn::native {
 
-    bool CachedObject::IsCachedReference() const {
-        return mIsCachedReference;
-    }
+bool CachedObject::IsCachedReference() const {
+    return mIsCachedReference;
+}
 
-    void CachedObject::SetIsCachedReference() {
-        mIsCachedReference = true;
-    }
+void CachedObject::SetIsCachedReference() {
+    mIsCachedReference = true;
+}
 
-    size_t CachedObject::HashFunc::operator()(const CachedObject* obj) const {
-        return obj->GetContentHash();
-    }
+size_t CachedObject::HashFunc::operator()(const CachedObject* obj) const {
+    return obj->GetContentHash();
+}
 
-    size_t CachedObject::GetContentHash() const {
-        ASSERT(mIsContentHashInitialized);
-        return mContentHash;
-    }
+size_t CachedObject::GetContentHash() const {
+    ASSERT(mIsContentHashInitialized);
+    return mContentHash;
+}
 
-    void CachedObject::SetContentHash(size_t contentHash) {
-        ASSERT(!mIsContentHashInitialized);
-        mContentHash = contentHash;
-        mIsContentHashInitialized = true;
-    }
+void CachedObject::SetContentHash(size_t contentHash) {
+    ASSERT(!mIsContentHashInitialized);
+    mContentHash = contentHash;
+    mIsContentHashInitialized = true;
+}
 
-    const CacheKey& CachedObject::GetCacheKey() const {
-        return mCacheKey;
-    }
+const CacheKey& CachedObject::GetCacheKey() const {
+    return mCacheKey;
+}
 
-    CacheKey* CachedObject::GetCacheKey() {
-        return &mCacheKey;
-    }
+CacheKey* CachedObject::GetCacheKey() {
+    return &mCacheKey;
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/CachedObject.h b/src/dawn/native/CachedObject.h
index f275e02..6fda516 100644
--- a/src/dawn/native/CachedObject.h
+++ b/src/dawn/native/CachedObject.h
@@ -23,43 +23,43 @@
 
 namespace dawn::native {
 
-    // Some objects are cached so that instead of creating new duplicate objects,
-    // we increase the refcount of an existing object.
-    // When an object is successfully created, the device should call
-    // SetIsCachedReference() and insert the object into the cache.
-    class CachedObject {
-      public:
-        bool IsCachedReference() const;
+// Some objects are cached so that instead of creating new duplicate objects,
+// we increase the refcount of an existing object.
+// When an object is successfully created, the device should call
+// SetIsCachedReference() and insert the object into the cache.
+class CachedObject {
+  public:
+    bool IsCachedReference() const;
 
-        // Functor necessary for the unordered_set<CachedObject*>-based cache.
-        struct HashFunc {
-            size_t operator()(const CachedObject* obj) const;
-        };
-
-        size_t GetContentHash() const;
-        void SetContentHash(size_t contentHash);
-
-        // Returns the cache key for the object only, i.e. without device/adapter information.
-        const CacheKey& GetCacheKey() const;
-
-      protected:
-        // Protected accessor for derived classes to access and modify the key.
-        CacheKey* GetCacheKey();
-
-      private:
-        friend class DeviceBase;
-        void SetIsCachedReference();
-
-        bool mIsCachedReference = false;
-
-        // Called by ObjectContentHasher upon creation to record the object.
-        virtual size_t ComputeContentHash() = 0;
-
-        size_t mContentHash = 0;
-        bool mIsContentHashInitialized = false;
-        CacheKey mCacheKey;
+    // Functor necessary for the unordered_set<CachedObject*>-based cache.
+    struct HashFunc {
+        size_t operator()(const CachedObject* obj) const;
     };
 
+    size_t GetContentHash() const;
+    void SetContentHash(size_t contentHash);
+
+    // Returns the cache key for the object only, i.e. without device/adapter information.
+    const CacheKey& GetCacheKey() const;
+
+  protected:
+    // Protected accessor for derived classes to access and modify the key.
+    CacheKey* GetCacheKey();
+
+  private:
+    friend class DeviceBase;
+    void SetIsCachedReference();
+
+    bool mIsCachedReference = false;
+
+    // Called by ObjectContentHasher upon creation to record the object.
+    virtual size_t ComputeContentHash() = 0;
+
+    size_t mContentHash = 0;
+    bool mIsContentHashInitialized = false;
+    CacheKey mCacheKey;
+};
+
 }  // namespace dawn::native
 
 #endif  // SRC_DAWN_NATIVE_CACHEDOBJECT_H_
diff --git a/src/dawn/native/CallbackTaskManager.cpp b/src/dawn/native/CallbackTaskManager.cpp
index d9589a7..40146f8 100644
--- a/src/dawn/native/CallbackTaskManager.cpp
+++ b/src/dawn/native/CallbackTaskManager.cpp
@@ -18,22 +18,22 @@
 
 namespace dawn::native {
 
-    bool CallbackTaskManager::IsEmpty() {
-        std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
-        return mCallbackTaskQueue.empty();
-    }
+bool CallbackTaskManager::IsEmpty() {
+    std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
+    return mCallbackTaskQueue.empty();
+}
 
-    std::vector<std::unique_ptr<CallbackTask>> CallbackTaskManager::AcquireCallbackTasks() {
-        std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
+std::vector<std::unique_ptr<CallbackTask>> CallbackTaskManager::AcquireCallbackTasks() {
+    std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
 
-        std::vector<std::unique_ptr<CallbackTask>> allTasks;
-        allTasks.swap(mCallbackTaskQueue);
-        return allTasks;
-    }
+    std::vector<std::unique_ptr<CallbackTask>> allTasks;
+    allTasks.swap(mCallbackTaskQueue);
+    return allTasks;
+}
 
-    void CallbackTaskManager::AddCallbackTask(std::unique_ptr<CallbackTask> callbackTask) {
-        std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
-        mCallbackTaskQueue.push_back(std::move(callbackTask));
-    }
+void CallbackTaskManager::AddCallbackTask(std::unique_ptr<CallbackTask> callbackTask) {
+    std::lock_guard<std::mutex> lock(mCallbackTaskQueueMutex);
+    mCallbackTaskQueue.push_back(std::move(callbackTask));
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/CallbackTaskManager.h b/src/dawn/native/CallbackTaskManager.h
index 0a4253a..479ba01 100644
--- a/src/dawn/native/CallbackTaskManager.h
+++ b/src/dawn/native/CallbackTaskManager.h
@@ -21,24 +21,24 @@
 
 namespace dawn::native {
 
-    struct CallbackTask {
-      public:
-        virtual ~CallbackTask() = default;
-        virtual void Finish() = 0;
-        virtual void HandleShutDown() = 0;
-        virtual void HandleDeviceLoss() = 0;
-    };
+struct CallbackTask {
+  public:
+    virtual ~CallbackTask() = default;
+    virtual void Finish() = 0;
+    virtual void HandleShutDown() = 0;
+    virtual void HandleDeviceLoss() = 0;
+};
 
-    class CallbackTaskManager {
-      public:
-        void AddCallbackTask(std::unique_ptr<CallbackTask> callbackTask);
-        bool IsEmpty();
-        std::vector<std::unique_ptr<CallbackTask>> AcquireCallbackTasks();
+class CallbackTaskManager {
+  public:
+    void AddCallbackTask(std::unique_ptr<CallbackTask> callbackTask);
+    bool IsEmpty();
+    std::vector<std::unique_ptr<CallbackTask>> AcquireCallbackTasks();
 
-      private:
-        std::mutex mCallbackTaskQueueMutex;
-        std::vector<std::unique_ptr<CallbackTask>> mCallbackTaskQueue;
-    };
+  private:
+    std::mutex mCallbackTaskQueueMutex;
+    std::vector<std::unique_ptr<CallbackTask>> mCallbackTaskQueue;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/CommandAllocator.cpp b/src/dawn/native/CommandAllocator.cpp
index 587e1af..7f1c022 100644
--- a/src/dawn/native/CommandAllocator.cpp
+++ b/src/dawn/native/CommandAllocator.cpp
@@ -24,205 +24,203 @@
 
 namespace dawn::native {
 
-    // TODO(cwallez@chromium.org): figure out a way to have more type safety for the iterator
+// TODO(cwallez@chromium.org): figure out a way to have more type safety for the iterator
 
-    CommandIterator::CommandIterator() {
-        Reset();
+CommandIterator::CommandIterator() {
+    Reset();
+}
+
+CommandIterator::~CommandIterator() {
+    ASSERT(IsEmpty());
+}
+
+CommandIterator::CommandIterator(CommandIterator&& other) {
+    if (!other.IsEmpty()) {
+        mBlocks = std::move(other.mBlocks);
+        other.Reset();
     }
+    Reset();
+}
 
-    CommandIterator::~CommandIterator() {
-        ASSERT(IsEmpty());
+CommandIterator& CommandIterator::operator=(CommandIterator&& other) {
+    ASSERT(IsEmpty());
+    if (!other.IsEmpty()) {
+        mBlocks = std::move(other.mBlocks);
+        other.Reset();
     }
+    Reset();
+    return *this;
+}
 
-    CommandIterator::CommandIterator(CommandIterator&& other) {
-        if (!other.IsEmpty()) {
-            mBlocks = std::move(other.mBlocks);
-            other.Reset();
-        }
-        Reset();
-    }
+CommandIterator::CommandIterator(CommandAllocator allocator) : mBlocks(allocator.AcquireBlocks()) {
+    Reset();
+}
 
-    CommandIterator& CommandIterator::operator=(CommandIterator&& other) {
-        ASSERT(IsEmpty());
-        if (!other.IsEmpty()) {
-            mBlocks = std::move(other.mBlocks);
-            other.Reset();
-        }
-        Reset();
-        return *this;
-    }
-
-    CommandIterator::CommandIterator(CommandAllocator allocator)
-        : mBlocks(allocator.AcquireBlocks()) {
-        Reset();
-    }
-
-    void CommandIterator::AcquireCommandBlocks(std::vector<CommandAllocator> allocators) {
-        ASSERT(IsEmpty());
-        mBlocks.clear();
-        for (CommandAllocator& allocator : allocators) {
-            CommandBlocks blocks = allocator.AcquireBlocks();
-            if (!blocks.empty()) {
-                mBlocks.reserve(mBlocks.size() + blocks.size());
-                for (BlockDef& block : blocks) {
-                    mBlocks.push_back(std::move(block));
-                }
+void CommandIterator::AcquireCommandBlocks(std::vector<CommandAllocator> allocators) {
+    ASSERT(IsEmpty());
+    mBlocks.clear();
+    for (CommandAllocator& allocator : allocators) {
+        CommandBlocks blocks = allocator.AcquireBlocks();
+        if (!blocks.empty()) {
+            mBlocks.reserve(mBlocks.size() + blocks.size());
+            for (BlockDef& block : blocks) {
+                mBlocks.push_back(std::move(block));
             }
         }
+    }
+    Reset();
+}
+
+bool CommandIterator::NextCommandIdInNewBlock(uint32_t* commandId) {
+    mCurrentBlock++;
+    if (mCurrentBlock >= mBlocks.size()) {
         Reset();
+        *commandId = detail::kEndOfBlock;
+        return false;
+    }
+    mCurrentPtr = AlignPtr(mBlocks[mCurrentBlock].block, alignof(uint32_t));
+    return NextCommandId(commandId);
+}
+
+void CommandIterator::Reset() {
+    mCurrentBlock = 0;
+
+    if (mBlocks.empty()) {
+        // This will case the first NextCommandId call to try to move to the next block and stop
+        // the iteration immediately, without special casing the initialization.
+        mCurrentPtr = reinterpret_cast<uint8_t*>(&mEndOfBlock);
+        mBlocks.emplace_back();
+        mBlocks[0].size = sizeof(mEndOfBlock);
+        mBlocks[0].block = mCurrentPtr;
+    } else {
+        mCurrentPtr = AlignPtr(mBlocks[0].block, alignof(uint32_t));
+    }
+}
+
+void CommandIterator::MakeEmptyAsDataWasDestroyed() {
+    if (IsEmpty()) {
+        return;
     }
 
-    bool CommandIterator::NextCommandIdInNewBlock(uint32_t* commandId) {
-        mCurrentBlock++;
-        if (mCurrentBlock >= mBlocks.size()) {
-            Reset();
-            *commandId = detail::kEndOfBlock;
-            return false;
-        }
-        mCurrentPtr = AlignPtr(mBlocks[mCurrentBlock].block, alignof(uint32_t));
-        return NextCommandId(commandId);
+    for (BlockDef& block : mBlocks) {
+        free(block.block);
     }
+    mBlocks.clear();
+    Reset();
+    ASSERT(IsEmpty());
+}
 
-    void CommandIterator::Reset() {
-        mCurrentBlock = 0;
+bool CommandIterator::IsEmpty() const {
+    return mBlocks[0].block == reinterpret_cast<const uint8_t*>(&mEndOfBlock);
+}
 
-        if (mBlocks.empty()) {
-            // This will case the first NextCommandId call to try to move to the next block and stop
-            // the iteration immediately, without special casing the initialization.
-            mCurrentPtr = reinterpret_cast<uint8_t*>(&mEndOfBlock);
-            mBlocks.emplace_back();
-            mBlocks[0].size = sizeof(mEndOfBlock);
-            mBlocks[0].block = mCurrentPtr;
-        } else {
-            mCurrentPtr = AlignPtr(mBlocks[0].block, alignof(uint32_t));
-        }
-    }
+// Potential TODO(crbug.com/dawn/835):
+//  - Host the size and pointer to next block in the block itself to avoid having an allocation
+//    in the vector
+//  - Assume T's alignof is, say 64bits, static assert it, and make commandAlignment a constant
+//    in Allocate
+//  - Be able to optimize allocation to one block, for command buffers expected to live long to
+//    avoid cache misses
+//  - Better block allocation, maybe have Dawn API to say command buffer is going to have size
+//    close to another
 
-    void CommandIterator::MakeEmptyAsDataWasDestroyed() {
-        if (IsEmpty()) {
-            return;
-        }
+CommandAllocator::CommandAllocator() {
+    ResetPointers();
+}
 
-        for (BlockDef& block : mBlocks) {
-            free(block.block);
-        }
-        mBlocks.clear();
-        Reset();
-        ASSERT(IsEmpty());
-    }
+CommandAllocator::~CommandAllocator() {
+    Reset();
+}
 
-    bool CommandIterator::IsEmpty() const {
-        return mBlocks[0].block == reinterpret_cast<const uint8_t*>(&mEndOfBlock);
-    }
-
-    // Potential TODO(crbug.com/dawn/835):
-    //  - Host the size and pointer to next block in the block itself to avoid having an allocation
-    //    in the vector
-    //  - Assume T's alignof is, say 64bits, static assert it, and make commandAlignment a constant
-    //    in Allocate
-    //  - Be able to optimize allocation to one block, for command buffers expected to live long to
-    //    avoid cache misses
-    //  - Better block allocation, maybe have Dawn API to say command buffer is going to have size
-    //    close to another
-
-    CommandAllocator::CommandAllocator() {
+CommandAllocator::CommandAllocator(CommandAllocator&& other)
+    : mBlocks(std::move(other.mBlocks)), mLastAllocationSize(other.mLastAllocationSize) {
+    other.mBlocks.clear();
+    if (!other.IsEmpty()) {
+        mCurrentPtr = other.mCurrentPtr;
+        mEndPtr = other.mEndPtr;
+    } else {
         ResetPointers();
     }
+    other.Reset();
+}
 
-    CommandAllocator::~CommandAllocator() {
-        Reset();
+CommandAllocator& CommandAllocator::operator=(CommandAllocator&& other) {
+    Reset();
+    if (!other.IsEmpty()) {
+        std::swap(mBlocks, other.mBlocks);
+        mLastAllocationSize = other.mLastAllocationSize;
+        mCurrentPtr = other.mCurrentPtr;
+        mEndPtr = other.mEndPtr;
+    }
+    other.Reset();
+    return *this;
+}
+
+void CommandAllocator::Reset() {
+    for (BlockDef& block : mBlocks) {
+        free(block.block);
+    }
+    mBlocks.clear();
+    mLastAllocationSize = kDefaultBaseAllocationSize;
+    ResetPointers();
+}
+
+bool CommandAllocator::IsEmpty() const {
+    return mCurrentPtr == reinterpret_cast<const uint8_t*>(&mPlaceholderEnum[0]);
+}
+
+CommandBlocks&& CommandAllocator::AcquireBlocks() {
+    ASSERT(mCurrentPtr != nullptr && mEndPtr != nullptr);
+    ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t)));
+    ASSERT(mCurrentPtr + sizeof(uint32_t) <= mEndPtr);
+    *reinterpret_cast<uint32_t*>(mCurrentPtr) = detail::kEndOfBlock;
+
+    mCurrentPtr = nullptr;
+    mEndPtr = nullptr;
+    return std::move(mBlocks);
+}
+
+uint8_t* CommandAllocator::AllocateInNewBlock(uint32_t commandId,
+                                              size_t commandSize,
+                                              size_t commandAlignment) {
+    // When there is not enough space, we signal the kEndOfBlock, so that the iterator knows
+    // to move to the next one. kEndOfBlock on the last block means the end of the commands.
+    uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
+    *idAlloc = detail::kEndOfBlock;
+
+    // We'll request a block that can contain at least the command ID, the command and an
+    // additional ID to contain the kEndOfBlock tag.
+    size_t requestedBlockSize = commandSize + kWorstCaseAdditionalSize;
+
+    // The computation of the request could overflow.
+    if (DAWN_UNLIKELY(requestedBlockSize <= commandSize)) {
+        return nullptr;
     }
 
-    CommandAllocator::CommandAllocator(CommandAllocator&& other)
-        : mBlocks(std::move(other.mBlocks)), mLastAllocationSize(other.mLastAllocationSize) {
-        other.mBlocks.clear();
-        if (!other.IsEmpty()) {
-            mCurrentPtr = other.mCurrentPtr;
-            mEndPtr = other.mEndPtr;
-        } else {
-            ResetPointers();
-        }
-        other.Reset();
+    if (DAWN_UNLIKELY(!GetNewBlock(requestedBlockSize))) {
+        return nullptr;
+    }
+    return Allocate(commandId, commandSize, commandAlignment);
+}
+
+bool CommandAllocator::GetNewBlock(size_t minimumSize) {
+    // Allocate blocks doubling sizes each time, to a maximum of 16k (or at least minimumSize).
+    mLastAllocationSize = std::max(minimumSize, std::min(mLastAllocationSize * 2, size_t(16384)));
+
+    uint8_t* block = static_cast<uint8_t*>(malloc(mLastAllocationSize));
+    if (DAWN_UNLIKELY(block == nullptr)) {
+        return false;
     }
 
-    CommandAllocator& CommandAllocator::operator=(CommandAllocator&& other) {
-        Reset();
-        if (!other.IsEmpty()) {
-            std::swap(mBlocks, other.mBlocks);
-            mLastAllocationSize = other.mLastAllocationSize;
-            mCurrentPtr = other.mCurrentPtr;
-            mEndPtr = other.mEndPtr;
-        }
-        other.Reset();
-        return *this;
-    }
+    mBlocks.push_back({mLastAllocationSize, block});
+    mCurrentPtr = AlignPtr(block, alignof(uint32_t));
+    mEndPtr = block + mLastAllocationSize;
+    return true;
+}
 
-    void CommandAllocator::Reset() {
-        for (BlockDef& block : mBlocks) {
-            free(block.block);
-        }
-        mBlocks.clear();
-        mLastAllocationSize = kDefaultBaseAllocationSize;
-        ResetPointers();
-    }
-
-    bool CommandAllocator::IsEmpty() const {
-        return mCurrentPtr == reinterpret_cast<const uint8_t*>(&mPlaceholderEnum[0]);
-    }
-
-    CommandBlocks&& CommandAllocator::AcquireBlocks() {
-        ASSERT(mCurrentPtr != nullptr && mEndPtr != nullptr);
-        ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t)));
-        ASSERT(mCurrentPtr + sizeof(uint32_t) <= mEndPtr);
-        *reinterpret_cast<uint32_t*>(mCurrentPtr) = detail::kEndOfBlock;
-
-        mCurrentPtr = nullptr;
-        mEndPtr = nullptr;
-        return std::move(mBlocks);
-    }
-
-    uint8_t* CommandAllocator::AllocateInNewBlock(uint32_t commandId,
-                                                  size_t commandSize,
-                                                  size_t commandAlignment) {
-        // When there is not enough space, we signal the kEndOfBlock, so that the iterator knows
-        // to move to the next one. kEndOfBlock on the last block means the end of the commands.
-        uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
-        *idAlloc = detail::kEndOfBlock;
-
-        // We'll request a block that can contain at least the command ID, the command and an
-        // additional ID to contain the kEndOfBlock tag.
-        size_t requestedBlockSize = commandSize + kWorstCaseAdditionalSize;
-
-        // The computation of the request could overflow.
-        if (DAWN_UNLIKELY(requestedBlockSize <= commandSize)) {
-            return nullptr;
-        }
-
-        if (DAWN_UNLIKELY(!GetNewBlock(requestedBlockSize))) {
-            return nullptr;
-        }
-        return Allocate(commandId, commandSize, commandAlignment);
-    }
-
-    bool CommandAllocator::GetNewBlock(size_t minimumSize) {
-        // Allocate blocks doubling sizes each time, to a maximum of 16k (or at least minimumSize).
-        mLastAllocationSize =
-            std::max(minimumSize, std::min(mLastAllocationSize * 2, size_t(16384)));
-
-        uint8_t* block = static_cast<uint8_t*>(malloc(mLastAllocationSize));
-        if (DAWN_UNLIKELY(block == nullptr)) {
-            return false;
-        }
-
-        mBlocks.push_back({mLastAllocationSize, block});
-        mCurrentPtr = AlignPtr(block, alignof(uint32_t));
-        mEndPtr = block + mLastAllocationSize;
-        return true;
-    }
-
-    void CommandAllocator::ResetPointers() {
-        mCurrentPtr = reinterpret_cast<uint8_t*>(&mPlaceholderEnum[0]);
-        mEndPtr = reinterpret_cast<uint8_t*>(&mPlaceholderEnum[1]);
-    }
+void CommandAllocator::ResetPointers() {
+    mCurrentPtr = reinterpret_cast<uint8_t*>(&mPlaceholderEnum[0]);
+    mEndPtr = reinterpret_cast<uint8_t*>(&mPlaceholderEnum[1]);
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/CommandAllocator.h b/src/dawn/native/CommandAllocator.h
index b9d4d15..c3e999e 100644
--- a/src/dawn/native/CommandAllocator.h
+++ b/src/dawn/native/CommandAllocator.h
@@ -26,248 +26,246 @@
 
 namespace dawn::native {
 
-    // Allocation for command buffers should be fast. To avoid doing an allocation per command
-    // or to avoid copying commands when reallocing, we use a linear allocator in a growing set
-    // of large memory blocks. We also use this to have the format to be (u32 commandId, command),
-    // so that iteration over the commands is easy.
+// Allocation for command buffers should be fast. To avoid doing an allocation per command
+// or to avoid copying commands when reallocing, we use a linear allocator in a growing set
+// of large memory blocks. We also use this to have the format to be (u32 commandId, command),
+// so that iteration over the commands is easy.
 
-    // Usage of the allocator and iterator:
-    //     CommandAllocator allocator;
-    //     DrawCommand* cmd = allocator.Allocate<DrawCommand>(CommandType::Draw);
-    //     // Fill command
-    //     // Repeat allocation and filling commands
-    //
-    //     CommandIterator commands(allocator);
-    //     CommandType type;
-    //     while(commands.NextCommandId(&type)) {
-    //         switch(type) {
-    //              case CommandType::Draw:
-    //                  DrawCommand* draw = commands.NextCommand<DrawCommand>();
-    //                  // Do the draw
-    //                  break;
-    //              // other cases
-    //         }
-    //     }
+// Usage of the allocator and iterator:
+//     CommandAllocator allocator;
+//     DrawCommand* cmd = allocator.Allocate<DrawCommand>(CommandType::Draw);
+//     // Fill command
+//     // Repeat allocation and filling commands
+//
+//     CommandIterator commands(allocator);
+//     CommandType type;
+//     while(commands.NextCommandId(&type)) {
+//         switch(type) {
+//              case CommandType::Draw:
+//                  DrawCommand* draw = commands.NextCommand<DrawCommand>();
+//                  // Do the draw
+//                  break;
+//              // other cases
+//         }
+//     }
 
-    // Note that you need to extract the commands from the CommandAllocator before destroying it
-    // and must tell the CommandIterator when the allocated commands have been processed for
-    // deletion.
+// Note that you need to extract the commands from the CommandAllocator before destroying it
+// and must tell the CommandIterator when the allocated commands have been processed for
+// deletion.
 
-    // These are the lists of blocks, should not be used directly, only through CommandAllocator
-    // and CommandIterator
-    struct BlockDef {
-        size_t size;
-        uint8_t* block;
-    };
-    using CommandBlocks = std::vector<BlockDef>;
+// These are the lists of blocks, should not be used directly, only through CommandAllocator
+// and CommandIterator
+struct BlockDef {
+    size_t size;
+    uint8_t* block;
+};
+using CommandBlocks = std::vector<BlockDef>;
 
-    namespace detail {
-        constexpr uint32_t kEndOfBlock = std::numeric_limits<uint32_t>::max();
-        constexpr uint32_t kAdditionalData = std::numeric_limits<uint32_t>::max() - 1;
-    }  // namespace detail
+namespace detail {
+constexpr uint32_t kEndOfBlock = std::numeric_limits<uint32_t>::max();
+constexpr uint32_t kAdditionalData = std::numeric_limits<uint32_t>::max() - 1;
+}  // namespace detail
 
-    class CommandAllocator;
+class CommandAllocator;
 
-    class CommandIterator : public NonCopyable {
-      public:
-        CommandIterator();
-        ~CommandIterator();
+class CommandIterator : public NonCopyable {
+  public:
+    CommandIterator();
+    ~CommandIterator();
 
-        CommandIterator(CommandIterator&& other);
-        CommandIterator& operator=(CommandIterator&& other);
+    CommandIterator(CommandIterator&& other);
+    CommandIterator& operator=(CommandIterator&& other);
 
-        // Shorthand constructor for acquiring CommandBlocks from a single CommandAllocator.
-        explicit CommandIterator(CommandAllocator allocator);
+    // Shorthand constructor for acquiring CommandBlocks from a single CommandAllocator.
+    explicit CommandIterator(CommandAllocator allocator);
 
-        void AcquireCommandBlocks(std::vector<CommandAllocator> allocators);
+    void AcquireCommandBlocks(std::vector<CommandAllocator> allocators);
 
-        template <typename E>
-        bool NextCommandId(E* commandId) {
-            return NextCommandId(reinterpret_cast<uint32_t*>(commandId));
+    template <typename E>
+    bool NextCommandId(E* commandId) {
+        return NextCommandId(reinterpret_cast<uint32_t*>(commandId));
+    }
+    template <typename T>
+    T* NextCommand() {
+        return static_cast<T*>(NextCommand(sizeof(T), alignof(T)));
+    }
+    template <typename T>
+    T* NextData(size_t count) {
+        return static_cast<T*>(NextData(sizeof(T) * count, alignof(T)));
+    }
+
+    // Sets iterator to the beginning of the commands without emptying the list. This method can
+    // be used if iteration was stopped early and the iterator needs to be restarted.
+    void Reset();
+
+    // This method must to be called after commands have been deleted. This indicates that the
+    // commands have been submitted and they are no longer valid.
+    void MakeEmptyAsDataWasDestroyed();
+
+  private:
+    bool IsEmpty() const;
+
+    DAWN_FORCE_INLINE bool NextCommandId(uint32_t* commandId) {
+        uint8_t* idPtr = AlignPtr(mCurrentPtr, alignof(uint32_t));
+        ASSERT(idPtr + sizeof(uint32_t) <=
+               mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
+
+        uint32_t id = *reinterpret_cast<uint32_t*>(idPtr);
+
+        if (id != detail::kEndOfBlock) {
+            mCurrentPtr = idPtr + sizeof(uint32_t);
+            *commandId = id;
+            return true;
         }
-        template <typename T>
-        T* NextCommand() {
-            return static_cast<T*>(NextCommand(sizeof(T), alignof(T)));
+        return NextCommandIdInNewBlock(commandId);
+    }
+
+    bool NextCommandIdInNewBlock(uint32_t* commandId);
+
+    DAWN_FORCE_INLINE void* NextCommand(size_t commandSize, size_t commandAlignment) {
+        uint8_t* commandPtr = AlignPtr(mCurrentPtr, commandAlignment);
+        ASSERT(commandPtr + sizeof(commandSize) <=
+               mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
+
+        mCurrentPtr = commandPtr + commandSize;
+        return commandPtr;
+    }
+
+    DAWN_FORCE_INLINE void* NextData(size_t dataSize, size_t dataAlignment) {
+        uint32_t id;
+        bool hasId = NextCommandId(&id);
+        ASSERT(hasId);
+        ASSERT(id == detail::kAdditionalData);
+
+        return NextCommand(dataSize, dataAlignment);
+    }
+
+    CommandBlocks mBlocks;
+    uint8_t* mCurrentPtr = nullptr;
+    size_t mCurrentBlock = 0;
+    // Used to avoid a special case for empty iterators.
+    uint32_t mEndOfBlock = detail::kEndOfBlock;
+};
+
+class CommandAllocator : public NonCopyable {
+  public:
+    CommandAllocator();
+    ~CommandAllocator();
+
+    // NOTE: A moved-from CommandAllocator is reset to its initial empty state.
+    CommandAllocator(CommandAllocator&&);
+    CommandAllocator& operator=(CommandAllocator&&);
+
+    // Frees all blocks held by the allocator and restores it to its initial empty state.
+    void Reset();
+
+    bool IsEmpty() const;
+
+    template <typename T, typename E>
+    T* Allocate(E commandId) {
+        static_assert(sizeof(E) == sizeof(uint32_t));
+        static_assert(alignof(E) == alignof(uint32_t));
+        static_assert(alignof(T) <= kMaxSupportedAlignment);
+        T* result =
+            reinterpret_cast<T*>(Allocate(static_cast<uint32_t>(commandId), sizeof(T), alignof(T)));
+        if (!result) {
+            return nullptr;
         }
-        template <typename T>
-        T* NextData(size_t count) {
-            return static_cast<T*>(NextData(sizeof(T) * count, alignof(T)));
+        new (result) T;
+        return result;
+    }
+
+    template <typename T>
+    T* AllocateData(size_t count) {
+        static_assert(alignof(T) <= kMaxSupportedAlignment);
+        T* result = reinterpret_cast<T*>(AllocateData(sizeof(T) * count, alignof(T)));
+        if (!result) {
+            return nullptr;
         }
-
-        // Sets iterator to the beginning of the commands without emptying the list. This method can
-        // be used if iteration was stopped early and the iterator needs to be restarted.
-        void Reset();
-
-        // This method must to be called after commands have been deleted. This indicates that the
-        // commands have been submitted and they are no longer valid.
-        void MakeEmptyAsDataWasDestroyed();
-
-      private:
-        bool IsEmpty() const;
-
-        DAWN_FORCE_INLINE bool NextCommandId(uint32_t* commandId) {
-            uint8_t* idPtr = AlignPtr(mCurrentPtr, alignof(uint32_t));
-            ASSERT(idPtr + sizeof(uint32_t) <=
-                   mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
-
-            uint32_t id = *reinterpret_cast<uint32_t*>(idPtr);
-
-            if (id != detail::kEndOfBlock) {
-                mCurrentPtr = idPtr + sizeof(uint32_t);
-                *commandId = id;
-                return true;
-            }
-            return NextCommandIdInNewBlock(commandId);
+        for (size_t i = 0; i < count; i++) {
+            new (result + i) T;
         }
+        return result;
+    }
 
-        bool NextCommandIdInNewBlock(uint32_t* commandId);
+  private:
+    // This is used for some internal computations and can be any power of two as long as code
+    // using the CommandAllocator passes the static_asserts.
+    static constexpr size_t kMaxSupportedAlignment = 8;
 
-        DAWN_FORCE_INLINE void* NextCommand(size_t commandSize, size_t commandAlignment) {
-            uint8_t* commandPtr = AlignPtr(mCurrentPtr, commandAlignment);
-            ASSERT(commandPtr + sizeof(commandSize) <=
-                   mBlocks[mCurrentBlock].block + mBlocks[mCurrentBlock].size);
+    // To avoid checking for overflows at every step of the computations we compute an upper
+    // bound of the space that will be needed in addition to the command data.
+    static constexpr size_t kWorstCaseAdditionalSize =
+        sizeof(uint32_t) + kMaxSupportedAlignment + alignof(uint32_t) + sizeof(uint32_t);
 
-            mCurrentPtr = commandPtr + commandSize;
-            return commandPtr;
+    // The default value of mLastAllocationSize.
+    static constexpr size_t kDefaultBaseAllocationSize = 2048;
+
+    friend CommandIterator;
+    CommandBlocks&& AcquireBlocks();
+
+    DAWN_FORCE_INLINE uint8_t* Allocate(uint32_t commandId,
+                                        size_t commandSize,
+                                        size_t commandAlignment) {
+        ASSERT(mCurrentPtr != nullptr);
+        ASSERT(mEndPtr != nullptr);
+        ASSERT(commandId != detail::kEndOfBlock);
+
+        // It should always be possible to allocate one id, for kEndOfBlock tagging,
+        ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t)));
+        ASSERT(mEndPtr >= mCurrentPtr);
+        ASSERT(static_cast<size_t>(mEndPtr - mCurrentPtr) >= sizeof(uint32_t));
+
+        // The memory after the ID will contain the following:
+        //   - the current ID
+        //   - padding to align the command, maximum kMaxSupportedAlignment
+        //   - the command of size commandSize
+        //   - padding to align the next ID, maximum alignof(uint32_t)
+        //   - the next ID of size sizeof(uint32_t)
+
+        // This can't overflow because by construction mCurrentPtr always has space for the next
+        // ID.
+        size_t remainingSize = static_cast<size_t>(mEndPtr - mCurrentPtr);
+
+        // The good case were we have enough space for the command data and upper bound of the
+        // extra required space.
+        if ((remainingSize >= kWorstCaseAdditionalSize) &&
+            (remainingSize - kWorstCaseAdditionalSize >= commandSize)) {
+            uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
+            *idAlloc = commandId;
+
+            uint8_t* commandAlloc = AlignPtr(mCurrentPtr + sizeof(uint32_t), commandAlignment);
+            mCurrentPtr = AlignPtr(commandAlloc + commandSize, alignof(uint32_t));
+
+            return commandAlloc;
         }
+        return AllocateInNewBlock(commandId, commandSize, commandAlignment);
+    }
 
-        DAWN_FORCE_INLINE void* NextData(size_t dataSize, size_t dataAlignment) {
-            uint32_t id;
-            bool hasId = NextCommandId(&id);
-            ASSERT(hasId);
-            ASSERT(id == detail::kAdditionalData);
+    uint8_t* AllocateInNewBlock(uint32_t commandId, size_t commandSize, size_t commandAlignment);
 
-            return NextCommand(dataSize, dataAlignment);
-        }
+    DAWN_FORCE_INLINE uint8_t* AllocateData(size_t commandSize, size_t commandAlignment) {
+        return Allocate(detail::kAdditionalData, commandSize, commandAlignment);
+    }
 
-        CommandBlocks mBlocks;
-        uint8_t* mCurrentPtr = nullptr;
-        size_t mCurrentBlock = 0;
-        // Used to avoid a special case for empty iterators.
-        uint32_t mEndOfBlock = detail::kEndOfBlock;
-    };
+    bool GetNewBlock(size_t minimumSize);
 
-    class CommandAllocator : public NonCopyable {
-      public:
-        CommandAllocator();
-        ~CommandAllocator();
+    void ResetPointers();
 
-        // NOTE: A moved-from CommandAllocator is reset to its initial empty state.
-        CommandAllocator(CommandAllocator&&);
-        CommandAllocator& operator=(CommandAllocator&&);
+    CommandBlocks mBlocks;
+    size_t mLastAllocationSize = kDefaultBaseAllocationSize;
 
-        // Frees all blocks held by the allocator and restores it to its initial empty state.
-        void Reset();
+    // Data used for the block range at initialization so that the first call to Allocate sees
+    // there is not enough space and calls GetNewBlock. This avoids having to special case the
+    // initialization in Allocate.
+    uint32_t mPlaceholderEnum[1] = {0};
 
-        bool IsEmpty() const;
-
-        template <typename T, typename E>
-        T* Allocate(E commandId) {
-            static_assert(sizeof(E) == sizeof(uint32_t));
-            static_assert(alignof(E) == alignof(uint32_t));
-            static_assert(alignof(T) <= kMaxSupportedAlignment);
-            T* result = reinterpret_cast<T*>(
-                Allocate(static_cast<uint32_t>(commandId), sizeof(T), alignof(T)));
-            if (!result) {
-                return nullptr;
-            }
-            new (result) T;
-            return result;
-        }
-
-        template <typename T>
-        T* AllocateData(size_t count) {
-            static_assert(alignof(T) <= kMaxSupportedAlignment);
-            T* result = reinterpret_cast<T*>(AllocateData(sizeof(T) * count, alignof(T)));
-            if (!result) {
-                return nullptr;
-            }
-            for (size_t i = 0; i < count; i++) {
-                new (result + i) T;
-            }
-            return result;
-        }
-
-      private:
-        // This is used for some internal computations and can be any power of two as long as code
-        // using the CommandAllocator passes the static_asserts.
-        static constexpr size_t kMaxSupportedAlignment = 8;
-
-        // To avoid checking for overflows at every step of the computations we compute an upper
-        // bound of the space that will be needed in addition to the command data.
-        static constexpr size_t kWorstCaseAdditionalSize =
-            sizeof(uint32_t) + kMaxSupportedAlignment + alignof(uint32_t) + sizeof(uint32_t);
-
-        // The default value of mLastAllocationSize.
-        static constexpr size_t kDefaultBaseAllocationSize = 2048;
-
-        friend CommandIterator;
-        CommandBlocks&& AcquireBlocks();
-
-        DAWN_FORCE_INLINE uint8_t* Allocate(uint32_t commandId,
-                                            size_t commandSize,
-                                            size_t commandAlignment) {
-            ASSERT(mCurrentPtr != nullptr);
-            ASSERT(mEndPtr != nullptr);
-            ASSERT(commandId != detail::kEndOfBlock);
-
-            // It should always be possible to allocate one id, for kEndOfBlock tagging,
-            ASSERT(IsPtrAligned(mCurrentPtr, alignof(uint32_t)));
-            ASSERT(mEndPtr >= mCurrentPtr);
-            ASSERT(static_cast<size_t>(mEndPtr - mCurrentPtr) >= sizeof(uint32_t));
-
-            // The memory after the ID will contain the following:
-            //   - the current ID
-            //   - padding to align the command, maximum kMaxSupportedAlignment
-            //   - the command of size commandSize
-            //   - padding to align the next ID, maximum alignof(uint32_t)
-            //   - the next ID of size sizeof(uint32_t)
-
-            // This can't overflow because by construction mCurrentPtr always has space for the next
-            // ID.
-            size_t remainingSize = static_cast<size_t>(mEndPtr - mCurrentPtr);
-
-            // The good case were we have enough space for the command data and upper bound of the
-            // extra required space.
-            if ((remainingSize >= kWorstCaseAdditionalSize) &&
-                (remainingSize - kWorstCaseAdditionalSize >= commandSize)) {
-                uint32_t* idAlloc = reinterpret_cast<uint32_t*>(mCurrentPtr);
-                *idAlloc = commandId;
-
-                uint8_t* commandAlloc = AlignPtr(mCurrentPtr + sizeof(uint32_t), commandAlignment);
-                mCurrentPtr = AlignPtr(commandAlloc + commandSize, alignof(uint32_t));
-
-                return commandAlloc;
-            }
-            return AllocateInNewBlock(commandId, commandSize, commandAlignment);
-        }
-
-        uint8_t* AllocateInNewBlock(uint32_t commandId,
-                                    size_t commandSize,
-                                    size_t commandAlignment);
-
-        DAWN_FORCE_INLINE uint8_t* AllocateData(size_t commandSize, size_t commandAlignment) {
-            return Allocate(detail::kAdditionalData, commandSize, commandAlignment);
-        }
-
-        bool GetNewBlock(size_t minimumSize);
-
-        void ResetPointers();
-
-        CommandBlocks mBlocks;
-        size_t mLastAllocationSize = kDefaultBaseAllocationSize;
-
-        // Data used for the block range at initialization so that the first call to Allocate sees
-        // there is not enough space and calls GetNewBlock. This avoids having to special case the
-        // initialization in Allocate.
-        uint32_t mPlaceholderEnum[1] = {0};
-
-        // Pointers to the current range of allocation in the block. Guaranteed to allow for at
-        // least one uint32_t if not nullptr, so that the special kEndOfBlock command id can always
-        // be written. Nullptr iff the blocks were moved out.
-        uint8_t* mCurrentPtr = nullptr;
-        uint8_t* mEndPtr = nullptr;
-    };
+    // Pointers to the current range of allocation in the block. Guaranteed to allow for at
+    // least one uint32_t if not nullptr, so that the special kEndOfBlock command id can always
+    // be written. Nullptr iff the blocks were moved out.
+    uint8_t* mCurrentPtr = nullptr;
+    uint8_t* mEndPtr = nullptr;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/CommandBuffer.cpp b/src/dawn/native/CommandBuffer.cpp
index e96b1d2..b5ae04a 100644
--- a/src/dawn/native/CommandBuffer.cpp
+++ b/src/dawn/native/CommandBuffer.cpp
@@ -25,225 +25,221 @@
 
 namespace dawn::native {
 
-    CommandBufferBase::CommandBufferBase(CommandEncoder* encoder,
-                                         const CommandBufferDescriptor* descriptor)
-        : ApiObjectBase(encoder->GetDevice(), descriptor->label),
-          mCommands(encoder->AcquireCommands()),
-          mResourceUsages(encoder->AcquireResourceUsages()) {
-        TrackInDevice();
+CommandBufferBase::CommandBufferBase(CommandEncoder* encoder,
+                                     const CommandBufferDescriptor* descriptor)
+    : ApiObjectBase(encoder->GetDevice(), descriptor->label),
+      mCommands(encoder->AcquireCommands()),
+      mResourceUsages(encoder->AcquireResourceUsages()) {
+    TrackInDevice();
+}
+
+CommandBufferBase::CommandBufferBase(DeviceBase* device)
+    : ApiObjectBase(device, kLabelNotImplemented) {
+    TrackInDevice();
+}
+
+CommandBufferBase::CommandBufferBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+    : ApiObjectBase(device, tag) {}
+
+// static
+CommandBufferBase* CommandBufferBase::MakeError(DeviceBase* device) {
+    return new CommandBufferBase(device, ObjectBase::kError);
+}
+
+ObjectType CommandBufferBase::GetType() const {
+    return ObjectType::CommandBuffer;
+}
+
+MaybeError CommandBufferBase::ValidateCanUseInSubmitNow() const {
+    ASSERT(!IsError());
+
+    DAWN_INVALID_IF(!IsAlive(), "%s cannot be submitted more than once.", this);
+    return {};
+}
+
+void CommandBufferBase::DestroyImpl() {
+    FreeCommands(&mCommands);
+    mResourceUsages = {};
+}
+
+const CommandBufferResourceUsage& CommandBufferBase::GetResourceUsages() const {
+    return mResourceUsages;
+}
+
+CommandIterator* CommandBufferBase::GetCommandIteratorForTesting() {
+    return &mCommands;
+}
+
+bool IsCompleteSubresourceCopiedTo(const TextureBase* texture,
+                                   const Extent3D copySize,
+                                   const uint32_t mipLevel) {
+    Extent3D extent = texture->GetMipLevelPhysicalSize(mipLevel);
+
+    switch (texture->GetDimension()) {
+        case wgpu::TextureDimension::e1D:
+            return extent.width == copySize.width;
+        case wgpu::TextureDimension::e2D:
+            return extent.width == copySize.width && extent.height == copySize.height;
+        case wgpu::TextureDimension::e3D:
+            return extent.width == copySize.width && extent.height == copySize.height &&
+                   extent.depthOrArrayLayers == copySize.depthOrArrayLayers;
     }
 
-    CommandBufferBase::CommandBufferBase(DeviceBase* device)
-        : ApiObjectBase(device, kLabelNotImplemented) {
-        TrackInDevice();
+    UNREACHABLE();
+}
+
+SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy, const Extent3D& copySize) {
+    switch (copy.texture->GetDimension()) {
+        case wgpu::TextureDimension::e1D:
+            ASSERT(copy.origin.z == 0 && copySize.depthOrArrayLayers == 1);
+            ASSERT(copy.mipLevel == 0);
+            return {copy.aspect, {0, 1}, {0, 1}};
+        case wgpu::TextureDimension::e2D:
+            return {copy.aspect, {copy.origin.z, copySize.depthOrArrayLayers}, {copy.mipLevel, 1}};
+        case wgpu::TextureDimension::e3D:
+            return {copy.aspect, {0, 1}, {copy.mipLevel, 1}};
     }
 
-    CommandBufferBase::CommandBufferBase(DeviceBase* device, ObjectBase::ErrorTag tag)
-        : ApiObjectBase(device, tag) {
-    }
+    UNREACHABLE();
+}
 
-    // static
-    CommandBufferBase* CommandBufferBase::MakeError(DeviceBase* device) {
-        return new CommandBufferBase(device, ObjectBase::kError);
-    }
+void LazyClearRenderPassAttachments(BeginRenderPassCmd* renderPass) {
+    for (ColorAttachmentIndex i :
+         IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+        auto& attachmentInfo = renderPass->colorAttachments[i];
+        TextureViewBase* view = attachmentInfo.view.Get();
+        bool hasResolveTarget = attachmentInfo.resolveTarget != nullptr;
 
-    ObjectType CommandBufferBase::GetType() const {
-        return ObjectType::CommandBuffer;
-    }
+        ASSERT(view->GetLayerCount() == 1);
+        ASSERT(view->GetLevelCount() == 1);
+        SubresourceRange range = view->GetSubresourceRange();
 
-    MaybeError CommandBufferBase::ValidateCanUseInSubmitNow() const {
-        ASSERT(!IsError());
-
-        DAWN_INVALID_IF(!IsAlive(), "%s cannot be submitted more than once.", this);
-        return {};
-    }
-
-    void CommandBufferBase::DestroyImpl() {
-        FreeCommands(&mCommands);
-        mResourceUsages = {};
-    }
-
-    const CommandBufferResourceUsage& CommandBufferBase::GetResourceUsages() const {
-        return mResourceUsages;
-    }
-
-    CommandIterator* CommandBufferBase::GetCommandIteratorForTesting() {
-        return &mCommands;
-    }
-
-    bool IsCompleteSubresourceCopiedTo(const TextureBase* texture,
-                                       const Extent3D copySize,
-                                       const uint32_t mipLevel) {
-        Extent3D extent = texture->GetMipLevelPhysicalSize(mipLevel);
-
-        switch (texture->GetDimension()) {
-            case wgpu::TextureDimension::e1D:
-                return extent.width == copySize.width;
-            case wgpu::TextureDimension::e2D:
-                return extent.width == copySize.width && extent.height == copySize.height;
-            case wgpu::TextureDimension::e3D:
-                return extent.width == copySize.width && extent.height == copySize.height &&
-                       extent.depthOrArrayLayers == copySize.depthOrArrayLayers;
+        // If the loadOp is Load, but the subresource is not initialized, use Clear instead.
+        if (attachmentInfo.loadOp == wgpu::LoadOp::Load &&
+            !view->GetTexture()->IsSubresourceContentInitialized(range)) {
+            attachmentInfo.loadOp = wgpu::LoadOp::Clear;
+            attachmentInfo.clearColor = {0.f, 0.f, 0.f, 0.f};
         }
 
-        UNREACHABLE();
-    }
-
-    SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy,
-                                                   const Extent3D& copySize) {
-        switch (copy.texture->GetDimension()) {
-            case wgpu::TextureDimension::e1D:
-                ASSERT(copy.origin.z == 0 && copySize.depthOrArrayLayers == 1);
-                ASSERT(copy.mipLevel == 0);
-                return {copy.aspect, {0, 1}, {0, 1}};
-            case wgpu::TextureDimension::e2D:
-                return {
-                    copy.aspect, {copy.origin.z, copySize.depthOrArrayLayers}, {copy.mipLevel, 1}};
-            case wgpu::TextureDimension::e3D:
-                return {copy.aspect, {0, 1}, {copy.mipLevel, 1}};
+        if (hasResolveTarget) {
+            // We need to set the resolve target to initialized so that it does not get
+            // cleared later in the pipeline. The texture will be resolved from the
+            // source color attachment, which will be correctly initialized.
+            TextureViewBase* resolveView = attachmentInfo.resolveTarget.Get();
+            ASSERT(resolveView->GetLayerCount() == 1);
+            ASSERT(resolveView->GetLevelCount() == 1);
+            resolveView->GetTexture()->SetIsSubresourceContentInitialized(
+                true, resolveView->GetSubresourceRange());
         }
 
-        UNREACHABLE();
-    }
+        switch (attachmentInfo.storeOp) {
+            case wgpu::StoreOp::Store:
+                view->GetTexture()->SetIsSubresourceContentInitialized(true, range);
+                break;
 
-    void LazyClearRenderPassAttachments(BeginRenderPassCmd* renderPass) {
-        for (ColorAttachmentIndex i :
-             IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
-            auto& attachmentInfo = renderPass->colorAttachments[i];
-            TextureViewBase* view = attachmentInfo.view.Get();
-            bool hasResolveTarget = attachmentInfo.resolveTarget != nullptr;
+            case wgpu::StoreOp::Discard:
+                view->GetTexture()->SetIsSubresourceContentInitialized(false, range);
+                break;
 
-            ASSERT(view->GetLayerCount() == 1);
-            ASSERT(view->GetLevelCount() == 1);
-            SubresourceRange range = view->GetSubresourceRange();
-
-            // If the loadOp is Load, but the subresource is not initialized, use Clear instead.
-            if (attachmentInfo.loadOp == wgpu::LoadOp::Load &&
-                !view->GetTexture()->IsSubresourceContentInitialized(range)) {
-                attachmentInfo.loadOp = wgpu::LoadOp::Clear;
-                attachmentInfo.clearColor = {0.f, 0.f, 0.f, 0.f};
-            }
-
-            if (hasResolveTarget) {
-                // We need to set the resolve target to initialized so that it does not get
-                // cleared later in the pipeline. The texture will be resolved from the
-                // source color attachment, which will be correctly initialized.
-                TextureViewBase* resolveView = attachmentInfo.resolveTarget.Get();
-                ASSERT(resolveView->GetLayerCount() == 1);
-                ASSERT(resolveView->GetLevelCount() == 1);
-                resolveView->GetTexture()->SetIsSubresourceContentInitialized(
-                    true, resolveView->GetSubresourceRange());
-            }
-
-            switch (attachmentInfo.storeOp) {
-                case wgpu::StoreOp::Store:
-                    view->GetTexture()->SetIsSubresourceContentInitialized(true, range);
-                    break;
-
-                case wgpu::StoreOp::Discard:
-                    view->GetTexture()->SetIsSubresourceContentInitialized(false, range);
-                    break;
-
-                case wgpu::StoreOp::Undefined:
-                    UNREACHABLE();
-                    break;
-            }
-        }
-
-        if (renderPass->attachmentState->HasDepthStencilAttachment()) {
-            auto& attachmentInfo = renderPass->depthStencilAttachment;
-            TextureViewBase* view = attachmentInfo.view.Get();
-            ASSERT(view->GetLayerCount() == 1);
-            ASSERT(view->GetLevelCount() == 1);
-            SubresourceRange range = view->GetSubresourceRange();
-
-            SubresourceRange depthRange = range;
-            depthRange.aspects = range.aspects & Aspect::Depth;
-
-            SubresourceRange stencilRange = range;
-            stencilRange.aspects = range.aspects & Aspect::Stencil;
-
-            // If the depth stencil texture has not been initialized, we want to use loadop
-            // clear to init the contents to 0's
-            if (!view->GetTexture()->IsSubresourceContentInitialized(depthRange) &&
-                attachmentInfo.depthLoadOp == wgpu::LoadOp::Load) {
-                attachmentInfo.clearDepth = 0.0f;
-                attachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
-            }
-
-            if (!view->GetTexture()->IsSubresourceContentInitialized(stencilRange) &&
-                attachmentInfo.stencilLoadOp == wgpu::LoadOp::Load) {
-                attachmentInfo.clearStencil = 0u;
-                attachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
-            }
-
-            view->GetTexture()->SetIsSubresourceContentInitialized(
-                attachmentInfo.depthStoreOp == wgpu::StoreOp::Store, depthRange);
-
-            view->GetTexture()->SetIsSubresourceContentInitialized(
-                attachmentInfo.stencilStoreOp == wgpu::StoreOp::Store, stencilRange);
+            case wgpu::StoreOp::Undefined:
+                UNREACHABLE();
+                break;
         }
     }
 
-    bool IsFullBufferOverwrittenInTextureToBufferCopy(const CopyTextureToBufferCmd* copy) {
-        ASSERT(copy != nullptr);
+    if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+        auto& attachmentInfo = renderPass->depthStencilAttachment;
+        TextureViewBase* view = attachmentInfo.view.Get();
+        ASSERT(view->GetLayerCount() == 1);
+        ASSERT(view->GetLevelCount() == 1);
+        SubresourceRange range = view->GetSubresourceRange();
 
-        if (copy->destination.offset > 0) {
-            // The copy doesn't touch the start of the buffer.
-            return false;
+        SubresourceRange depthRange = range;
+        depthRange.aspects = range.aspects & Aspect::Depth;
+
+        SubresourceRange stencilRange = range;
+        stencilRange.aspects = range.aspects & Aspect::Stencil;
+
+        // If the depth stencil texture has not been initialized, we want to use loadop
+        // clear to init the contents to 0's
+        if (!view->GetTexture()->IsSubresourceContentInitialized(depthRange) &&
+            attachmentInfo.depthLoadOp == wgpu::LoadOp::Load) {
+            attachmentInfo.clearDepth = 0.0f;
+            attachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
         }
 
-        const TextureBase* texture = copy->source.texture.Get();
-        const TexelBlockInfo& blockInfo =
-            texture->GetFormat().GetAspectInfo(copy->source.aspect).block;
-        const uint64_t widthInBlocks = copy->copySize.width / blockInfo.width;
-        const uint64_t heightInBlocks = copy->copySize.height / blockInfo.height;
-        const bool multiSlice = copy->copySize.depthOrArrayLayers > 1;
-        const bool multiRow = multiSlice || heightInBlocks > 1;
-
-        if (multiSlice && copy->destination.rowsPerImage > heightInBlocks) {
-            // There are gaps between slices that aren't overwritten
-            return false;
+        if (!view->GetTexture()->IsSubresourceContentInitialized(stencilRange) &&
+            attachmentInfo.stencilLoadOp == wgpu::LoadOp::Load) {
+            attachmentInfo.clearStencil = 0u;
+            attachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
         }
 
-        const uint64_t copyTextureDataSizePerRow = widthInBlocks * blockInfo.byteSize;
-        if (multiRow && copy->destination.bytesPerRow > copyTextureDataSizePerRow) {
-            // There are gaps between rows that aren't overwritten
-            return false;
-        }
+        view->GetTexture()->SetIsSubresourceContentInitialized(
+            attachmentInfo.depthStoreOp == wgpu::StoreOp::Store, depthRange);
 
-        // After the above checks, we're sure the copy has no gaps.
-        // Now, compute the total number of bytes written.
-        const uint64_t writtenBytes =
-            ComputeRequiredBytesInCopy(blockInfo, copy->copySize, copy->destination.bytesPerRow,
-                                       copy->destination.rowsPerImage)
-                .AcquireSuccess();
-        if (!copy->destination.buffer->IsFullBufferRange(copy->destination.offset, writtenBytes)) {
-            // The written bytes don't cover the whole buffer.
-            return false;
-        }
+        view->GetTexture()->SetIsSubresourceContentInitialized(
+            attachmentInfo.stencilStoreOp == wgpu::StoreOp::Store, stencilRange);
+    }
+}
 
-        return true;
+bool IsFullBufferOverwrittenInTextureToBufferCopy(const CopyTextureToBufferCmd* copy) {
+    ASSERT(copy != nullptr);
+
+    if (copy->destination.offset > 0) {
+        // The copy doesn't touch the start of the buffer.
+        return false;
     }
 
-    std::array<float, 4> ConvertToFloatColor(dawn::native::Color color) {
-        const std::array<float, 4> outputValue = {
-            static_cast<float>(color.r), static_cast<float>(color.g), static_cast<float>(color.b),
-            static_cast<float>(color.a)};
-        return outputValue;
-    }
-    std::array<int32_t, 4> ConvertToSignedIntegerColor(dawn::native::Color color) {
-        const std::array<int32_t, 4> outputValue = {
-            static_cast<int32_t>(color.r), static_cast<int32_t>(color.g),
-            static_cast<int32_t>(color.b), static_cast<int32_t>(color.a)};
-        return outputValue;
+    const TextureBase* texture = copy->source.texture.Get();
+    const TexelBlockInfo& blockInfo = texture->GetFormat().GetAspectInfo(copy->source.aspect).block;
+    const uint64_t widthInBlocks = copy->copySize.width / blockInfo.width;
+    const uint64_t heightInBlocks = copy->copySize.height / blockInfo.height;
+    const bool multiSlice = copy->copySize.depthOrArrayLayers > 1;
+    const bool multiRow = multiSlice || heightInBlocks > 1;
+
+    if (multiSlice && copy->destination.rowsPerImage > heightInBlocks) {
+        // There are gaps between slices that aren't overwritten
+        return false;
     }
 
-    std::array<uint32_t, 4> ConvertToUnsignedIntegerColor(dawn::native::Color color) {
-        const std::array<uint32_t, 4> outputValue = {
-            static_cast<uint32_t>(color.r), static_cast<uint32_t>(color.g),
-            static_cast<uint32_t>(color.b), static_cast<uint32_t>(color.a)};
-        return outputValue;
+    const uint64_t copyTextureDataSizePerRow = widthInBlocks * blockInfo.byteSize;
+    if (multiRow && copy->destination.bytesPerRow > copyTextureDataSizePerRow) {
+        // There are gaps between rows that aren't overwritten
+        return false;
     }
 
+    // After the above checks, we're sure the copy has no gaps.
+    // Now, compute the total number of bytes written.
+    const uint64_t writtenBytes =
+        ComputeRequiredBytesInCopy(blockInfo, copy->copySize, copy->destination.bytesPerRow,
+                                   copy->destination.rowsPerImage)
+            .AcquireSuccess();
+    if (!copy->destination.buffer->IsFullBufferRange(copy->destination.offset, writtenBytes)) {
+        // The written bytes don't cover the whole buffer.
+        return false;
+    }
+
+    return true;
+}
+
+std::array<float, 4> ConvertToFloatColor(dawn::native::Color color) {
+    const std::array<float, 4> outputValue = {
+        static_cast<float>(color.r), static_cast<float>(color.g), static_cast<float>(color.b),
+        static_cast<float>(color.a)};
+    return outputValue;
+}
+std::array<int32_t, 4> ConvertToSignedIntegerColor(dawn::native::Color color) {
+    const std::array<int32_t, 4> outputValue = {
+        static_cast<int32_t>(color.r), static_cast<int32_t>(color.g), static_cast<int32_t>(color.b),
+        static_cast<int32_t>(color.a)};
+    return outputValue;
+}
+
+std::array<uint32_t, 4> ConvertToUnsignedIntegerColor(dawn::native::Color color) {
+    const std::array<uint32_t, 4> outputValue = {
+        static_cast<uint32_t>(color.r), static_cast<uint32_t>(color.g),
+        static_cast<uint32_t>(color.b), static_cast<uint32_t>(color.a)};
+    return outputValue;
+}
+
 }  // namespace dawn::native
diff --git a/src/dawn/native/CommandBuffer.h b/src/dawn/native/CommandBuffer.h
index 455a8d9..19d9f68 100644
--- a/src/dawn/native/CommandBuffer.h
+++ b/src/dawn/native/CommandBuffer.h
@@ -26,50 +26,49 @@
 
 namespace dawn::native {
 
-    struct BeginRenderPassCmd;
-    struct CopyTextureToBufferCmd;
-    struct TextureCopy;
+struct BeginRenderPassCmd;
+struct CopyTextureToBufferCmd;
+struct TextureCopy;
 
-    class CommandBufferBase : public ApiObjectBase {
-      public:
-        CommandBufferBase(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
+class CommandBufferBase : public ApiObjectBase {
+  public:
+    CommandBufferBase(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
 
-        static CommandBufferBase* MakeError(DeviceBase* device);
+    static CommandBufferBase* MakeError(DeviceBase* device);
 
-        ObjectType GetType() const override;
+    ObjectType GetType() const override;
 
-        MaybeError ValidateCanUseInSubmitNow() const;
+    MaybeError ValidateCanUseInSubmitNow() const;
 
-        const CommandBufferResourceUsage& GetResourceUsages() const;
+    const CommandBufferResourceUsage& GetResourceUsages() const;
 
-        CommandIterator* GetCommandIteratorForTesting();
+    CommandIterator* GetCommandIteratorForTesting();
 
-      protected:
-        // Constructor used only for mocking and testing.
-        explicit CommandBufferBase(DeviceBase* device);
-        void DestroyImpl() override;
+  protected:
+    // Constructor used only for mocking and testing.
+    explicit CommandBufferBase(DeviceBase* device);
+    void DestroyImpl() override;
 
-        CommandIterator mCommands;
+    CommandIterator mCommands;
 
-      private:
-        CommandBufferBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+  private:
+    CommandBufferBase(DeviceBase* device, ObjectBase::ErrorTag tag);
 
-        CommandBufferResourceUsage mResourceUsages;
-    };
+    CommandBufferResourceUsage mResourceUsages;
+};
 
-    bool IsCompleteSubresourceCopiedTo(const TextureBase* texture,
-                                       const Extent3D copySize,
-                                       const uint32_t mipLevel);
-    SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy,
-                                                   const Extent3D& copySize);
+bool IsCompleteSubresourceCopiedTo(const TextureBase* texture,
+                                   const Extent3D copySize,
+                                   const uint32_t mipLevel);
+SubresourceRange GetSubresourcesAffectedByCopy(const TextureCopy& copy, const Extent3D& copySize);
 
-    void LazyClearRenderPassAttachments(BeginRenderPassCmd* renderPass);
+void LazyClearRenderPassAttachments(BeginRenderPassCmd* renderPass);
 
-    bool IsFullBufferOverwrittenInTextureToBufferCopy(const CopyTextureToBufferCmd* copy);
+bool IsFullBufferOverwrittenInTextureToBufferCopy(const CopyTextureToBufferCmd* copy);
 
-    std::array<float, 4> ConvertToFloatColor(dawn::native::Color color);
-    std::array<int32_t, 4> ConvertToSignedIntegerColor(dawn::native::Color color);
-    std::array<uint32_t, 4> ConvertToUnsignedIntegerColor(dawn::native::Color color);
+std::array<float, 4> ConvertToFloatColor(dawn::native::Color color);
+std::array<int32_t, 4> ConvertToSignedIntegerColor(dawn::native::Color color);
+std::array<uint32_t, 4> ConvertToUnsignedIntegerColor(dawn::native::Color color);
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/CommandBufferStateTracker.cpp b/src/dawn/native/CommandBufferStateTracker.cpp
index ee164c7..b0624a3 100644
--- a/src/dawn/native/CommandBufferStateTracker.cpp
+++ b/src/dawn/native/CommandBufferStateTracker.cpp
@@ -30,392 +30,385 @@
 
 namespace dawn::native {
 
-    namespace {
-        bool BufferSizesAtLeastAsBig(const ityp::span<uint32_t, uint64_t> unverifiedBufferSizes,
-                                     const std::vector<uint64_t>& pipelineMinBufferSizes) {
-            ASSERT(unverifiedBufferSizes.size() == pipelineMinBufferSizes.size());
+namespace {
+bool BufferSizesAtLeastAsBig(const ityp::span<uint32_t, uint64_t> unverifiedBufferSizes,
+                             const std::vector<uint64_t>& pipelineMinBufferSizes) {
+    ASSERT(unverifiedBufferSizes.size() == pipelineMinBufferSizes.size());
 
-            for (uint32_t i = 0; i < unverifiedBufferSizes.size(); ++i) {
-                if (unverifiedBufferSizes[i] < pipelineMinBufferSizes[i]) {
-                    return false;
-                }
-            }
-
-            return true;
+    for (uint32_t i = 0; i < unverifiedBufferSizes.size(); ++i) {
+        if (unverifiedBufferSizes[i] < pipelineMinBufferSizes[i]) {
+            return false;
         }
-    }  // namespace
-
-    enum ValidationAspect {
-        VALIDATION_ASPECT_PIPELINE,
-        VALIDATION_ASPECT_BIND_GROUPS,
-        VALIDATION_ASPECT_VERTEX_BUFFERS,
-        VALIDATION_ASPECT_INDEX_BUFFER,
-
-        VALIDATION_ASPECT_COUNT
-    };
-    static_assert(VALIDATION_ASPECT_COUNT == CommandBufferStateTracker::kNumAspects);
-
-    static constexpr CommandBufferStateTracker::ValidationAspects kDispatchAspects =
-        1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS;
-
-    static constexpr CommandBufferStateTracker::ValidationAspects kDrawAspects =
-        1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS |
-        1 << VALIDATION_ASPECT_VERTEX_BUFFERS;
-
-    static constexpr CommandBufferStateTracker::ValidationAspects kDrawIndexedAspects =
-        1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS |
-        1 << VALIDATION_ASPECT_VERTEX_BUFFERS | 1 << VALIDATION_ASPECT_INDEX_BUFFER;
-
-    static constexpr CommandBufferStateTracker::ValidationAspects kLazyAspects =
-        1 << VALIDATION_ASPECT_BIND_GROUPS | 1 << VALIDATION_ASPECT_VERTEX_BUFFERS |
-        1 << VALIDATION_ASPECT_INDEX_BUFFER;
-
-    MaybeError CommandBufferStateTracker::ValidateCanDispatch() {
-        return ValidateOperation(kDispatchAspects);
     }
 
-    MaybeError CommandBufferStateTracker::ValidateCanDraw() {
-        return ValidateOperation(kDrawAspects);
+    return true;
+}
+}  // namespace
+
+enum ValidationAspect {
+    VALIDATION_ASPECT_PIPELINE,
+    VALIDATION_ASPECT_BIND_GROUPS,
+    VALIDATION_ASPECT_VERTEX_BUFFERS,
+    VALIDATION_ASPECT_INDEX_BUFFER,
+
+    VALIDATION_ASPECT_COUNT
+};
+static_assert(VALIDATION_ASPECT_COUNT == CommandBufferStateTracker::kNumAspects);
+
+static constexpr CommandBufferStateTracker::ValidationAspects kDispatchAspects =
+    1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS;
+
+static constexpr CommandBufferStateTracker::ValidationAspects kDrawAspects =
+    1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS |
+    1 << VALIDATION_ASPECT_VERTEX_BUFFERS;
+
+static constexpr CommandBufferStateTracker::ValidationAspects kDrawIndexedAspects =
+    1 << VALIDATION_ASPECT_PIPELINE | 1 << VALIDATION_ASPECT_BIND_GROUPS |
+    1 << VALIDATION_ASPECT_VERTEX_BUFFERS | 1 << VALIDATION_ASPECT_INDEX_BUFFER;
+
+static constexpr CommandBufferStateTracker::ValidationAspects kLazyAspects =
+    1 << VALIDATION_ASPECT_BIND_GROUPS | 1 << VALIDATION_ASPECT_VERTEX_BUFFERS |
+    1 << VALIDATION_ASPECT_INDEX_BUFFER;
+
+MaybeError CommandBufferStateTracker::ValidateCanDispatch() {
+    return ValidateOperation(kDispatchAspects);
+}
+
+MaybeError CommandBufferStateTracker::ValidateCanDraw() {
+    return ValidateOperation(kDrawAspects);
+}
+
+MaybeError CommandBufferStateTracker::ValidateCanDrawIndexed() {
+    return ValidateOperation(kDrawIndexedAspects);
+}
+
+MaybeError CommandBufferStateTracker::ValidateBufferInRangeForVertexBuffer(uint32_t vertexCount,
+                                                                           uint32_t firstVertex) {
+    RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
+
+    const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& vertexBufferSlotsUsedAsVertexBuffer =
+        lastRenderPipeline->GetVertexBufferSlotsUsedAsVertexBuffer();
+
+    for (auto usedSlotVertex : IterateBitSet(vertexBufferSlotsUsedAsVertexBuffer)) {
+        const VertexBufferInfo& vertexBuffer = lastRenderPipeline->GetVertexBuffer(usedSlotVertex);
+        uint64_t arrayStride = vertexBuffer.arrayStride;
+        uint64_t bufferSize = mVertexBufferSizes[usedSlotVertex];
+
+        if (arrayStride == 0) {
+            DAWN_INVALID_IF(vertexBuffer.usedBytesInStride > bufferSize,
+                            "Bound vertex buffer size (%u) at slot %u with an arrayStride of 0 "
+                            "is smaller than the required size for all attributes (%u)",
+                            bufferSize, static_cast<uint8_t>(usedSlotVertex),
+                            vertexBuffer.usedBytesInStride);
+        } else {
+            uint64_t strideCount = static_cast<uint64_t>(firstVertex) + vertexCount;
+            if (strideCount != 0u) {
+                uint64_t requiredSize = (strideCount - 1u) * arrayStride + vertexBuffer.lastStride;
+                // firstVertex and vertexCount are in uint32_t,
+                // arrayStride must not be larger than kMaxVertexBufferArrayStride, which is
+                // currently 2048, and vertexBuffer.lastStride = max(attribute.offset +
+                // sizeof(attribute.format)) with attribute.offset being no larger than
+                // kMaxVertexBufferArrayStride, so by doing checks in uint64_t we avoid
+                // overflows.
+                DAWN_INVALID_IF(
+                    requiredSize > bufferSize,
+                    "Vertex range (first: %u, count: %u) requires a larger buffer (%u) than "
+                    "the "
+                    "bound buffer size (%u) of the vertex buffer at slot %u with stride %u.",
+                    firstVertex, vertexCount, requiredSize, bufferSize,
+                    static_cast<uint8_t>(usedSlotVertex), arrayStride);
+            }
+        }
     }
 
-    MaybeError CommandBufferStateTracker::ValidateCanDrawIndexed() {
-        return ValidateOperation(kDrawIndexedAspects);
+    return {};
+}
+
+MaybeError CommandBufferStateTracker::ValidateBufferInRangeForInstanceBuffer(
+    uint32_t instanceCount,
+    uint32_t firstInstance) {
+    RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
+
+    const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& vertexBufferSlotsUsedAsInstanceBuffer =
+        lastRenderPipeline->GetVertexBufferSlotsUsedAsInstanceBuffer();
+
+    for (auto usedSlotInstance : IterateBitSet(vertexBufferSlotsUsedAsInstanceBuffer)) {
+        const VertexBufferInfo& vertexBuffer =
+            lastRenderPipeline->GetVertexBuffer(usedSlotInstance);
+        uint64_t arrayStride = vertexBuffer.arrayStride;
+        uint64_t bufferSize = mVertexBufferSizes[usedSlotInstance];
+        if (arrayStride == 0) {
+            DAWN_INVALID_IF(vertexBuffer.usedBytesInStride > bufferSize,
+                            "Bound vertex buffer size (%u) at slot %u with an arrayStride of 0 "
+                            "is smaller than the required size for all attributes (%u)",
+                            bufferSize, static_cast<uint8_t>(usedSlotInstance),
+                            vertexBuffer.usedBytesInStride);
+        } else {
+            uint64_t strideCount = static_cast<uint64_t>(firstInstance) + instanceCount;
+            if (strideCount != 0u) {
+                uint64_t requiredSize = (strideCount - 1u) * arrayStride + vertexBuffer.lastStride;
+                // firstInstance and instanceCount are in uint32_t,
+                // arrayStride must not be larger than kMaxVertexBufferArrayStride, which is
+                // currently 2048, and vertexBuffer.lastStride = max(attribute.offset +
+                // sizeof(attribute.format)) with attribute.offset being no larger than
+                // kMaxVertexBufferArrayStride, so by doing checks in uint64_t we avoid
+                // overflows.
+                DAWN_INVALID_IF(
+                    requiredSize > bufferSize,
+                    "Instance range (first: %u, count: %u) requires a larger buffer (%u) than "
+                    "the "
+                    "bound buffer size (%u) of the vertex buffer at slot %u with stride %u.",
+                    firstInstance, instanceCount, requiredSize, bufferSize,
+                    static_cast<uint8_t>(usedSlotInstance), arrayStride);
+            }
+        }
     }
 
-    MaybeError CommandBufferStateTracker::ValidateBufferInRangeForVertexBuffer(
-        uint32_t vertexCount,
-        uint32_t firstVertex) {
+    return {};
+}
+
+MaybeError CommandBufferStateTracker::ValidateIndexBufferInRange(uint32_t indexCount,
+                                                                 uint32_t firstIndex) {
+    // Validate the range of index buffer
+    // firstIndex and indexCount are in uint32_t, while IndexFormatSize is 2 (for
+    // wgpu::IndexFormat::Uint16) or 4 (for wgpu::IndexFormat::Uint32), so by doing checks in
+    // uint64_t we avoid overflows.
+    DAWN_INVALID_IF(
+        (static_cast<uint64_t>(firstIndex) + indexCount) * IndexFormatSize(mIndexFormat) >
+            mIndexBufferSize,
+        "Index range (first: %u, count: %u, format: %s) does not fit in index buffer size "
+        "(%u).",
+        firstIndex, indexCount, mIndexFormat, mIndexBufferSize);
+    return {};
+}
+
+MaybeError CommandBufferStateTracker::ValidateOperation(ValidationAspects requiredAspects) {
+    // Fast return-true path if everything is good
+    ValidationAspects missingAspects = requiredAspects & ~mAspects;
+    if (missingAspects.none()) {
+        return {};
+    }
+
+    // Generate an error immediately if a non-lazy aspect is missing as computing lazy aspects
+    // requires the pipeline to be set.
+    DAWN_TRY(CheckMissingAspects(missingAspects & ~kLazyAspects));
+
+    RecomputeLazyAspects(missingAspects);
+
+    DAWN_TRY(CheckMissingAspects(requiredAspects & ~mAspects));
+
+    return {};
+}
+
+void CommandBufferStateTracker::RecomputeLazyAspects(ValidationAspects aspects) {
+    ASSERT(mAspects[VALIDATION_ASPECT_PIPELINE]);
+    ASSERT((aspects & ~kLazyAspects).none());
+
+    if (aspects[VALIDATION_ASPECT_BIND_GROUPS]) {
+        bool matches = true;
+
+        for (BindGroupIndex i : IterateBitSet(mLastPipelineLayout->GetBindGroupLayoutsMask())) {
+            if (mBindgroups[i] == nullptr ||
+                mLastPipelineLayout->GetBindGroupLayout(i) != mBindgroups[i]->GetLayout() ||
+                !BufferSizesAtLeastAsBig(mBindgroups[i]->GetUnverifiedBufferSizes(),
+                                         (*mMinBufferSizes)[i])) {
+                matches = false;
+                break;
+            }
+        }
+
+        if (matches) {
+            mAspects.set(VALIDATION_ASPECT_BIND_GROUPS);
+        }
+    }
+
+    if (aspects[VALIDATION_ASPECT_VERTEX_BUFFERS]) {
         RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
 
-        const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
-            vertexBufferSlotsUsedAsVertexBuffer =
-                lastRenderPipeline->GetVertexBufferSlotsUsedAsVertexBuffer();
-
-        for (auto usedSlotVertex : IterateBitSet(vertexBufferSlotsUsedAsVertexBuffer)) {
-            const VertexBufferInfo& vertexBuffer =
-                lastRenderPipeline->GetVertexBuffer(usedSlotVertex);
-            uint64_t arrayStride = vertexBuffer.arrayStride;
-            uint64_t bufferSize = mVertexBufferSizes[usedSlotVertex];
-
-            if (arrayStride == 0) {
-                DAWN_INVALID_IF(vertexBuffer.usedBytesInStride > bufferSize,
-                                "Bound vertex buffer size (%u) at slot %u with an arrayStride of 0 "
-                                "is smaller than the required size for all attributes (%u)",
-                                bufferSize, static_cast<uint8_t>(usedSlotVertex),
-                                vertexBuffer.usedBytesInStride);
-            } else {
-                uint64_t strideCount = static_cast<uint64_t>(firstVertex) + vertexCount;
-                if (strideCount != 0u) {
-                    uint64_t requiredSize =
-                        (strideCount - 1u) * arrayStride + vertexBuffer.lastStride;
-                    // firstVertex and vertexCount are in uint32_t,
-                    // arrayStride must not be larger than kMaxVertexBufferArrayStride, which is
-                    // currently 2048, and vertexBuffer.lastStride = max(attribute.offset +
-                    // sizeof(attribute.format)) with attribute.offset being no larger than
-                    // kMaxVertexBufferArrayStride, so by doing checks in uint64_t we avoid
-                    // overflows.
-                    DAWN_INVALID_IF(
-                        requiredSize > bufferSize,
-                        "Vertex range (first: %u, count: %u) requires a larger buffer (%u) than "
-                        "the "
-                        "bound buffer size (%u) of the vertex buffer at slot %u with stride %u.",
-                        firstVertex, vertexCount, requiredSize, bufferSize,
-                        static_cast<uint8_t>(usedSlotVertex), arrayStride);
-                }
-            }
+        const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& requiredVertexBuffers =
+            lastRenderPipeline->GetVertexBufferSlotsUsed();
+        if (IsSubset(requiredVertexBuffers, mVertexBufferSlotsUsed)) {
+            mAspects.set(VALIDATION_ASPECT_VERTEX_BUFFERS);
         }
-
-        return {};
     }
 
-    MaybeError CommandBufferStateTracker::ValidateBufferInRangeForInstanceBuffer(
-        uint32_t instanceCount,
-        uint32_t firstInstance) {
+    if (aspects[VALIDATION_ASPECT_INDEX_BUFFER] && mIndexBufferSet) {
         RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
-
-        const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
-            vertexBufferSlotsUsedAsInstanceBuffer =
-                lastRenderPipeline->GetVertexBufferSlotsUsedAsInstanceBuffer();
-
-        for (auto usedSlotInstance : IterateBitSet(vertexBufferSlotsUsedAsInstanceBuffer)) {
-            const VertexBufferInfo& vertexBuffer =
-                lastRenderPipeline->GetVertexBuffer(usedSlotInstance);
-            uint64_t arrayStride = vertexBuffer.arrayStride;
-            uint64_t bufferSize = mVertexBufferSizes[usedSlotInstance];
-            if (arrayStride == 0) {
-                DAWN_INVALID_IF(vertexBuffer.usedBytesInStride > bufferSize,
-                                "Bound vertex buffer size (%u) at slot %u with an arrayStride of 0 "
-                                "is smaller than the required size for all attributes (%u)",
-                                bufferSize, static_cast<uint8_t>(usedSlotInstance),
-                                vertexBuffer.usedBytesInStride);
-            } else {
-                uint64_t strideCount = static_cast<uint64_t>(firstInstance) + instanceCount;
-                if (strideCount != 0u) {
-                    uint64_t requiredSize =
-                        (strideCount - 1u) * arrayStride + vertexBuffer.lastStride;
-                    // firstInstance and instanceCount are in uint32_t,
-                    // arrayStride must not be larger than kMaxVertexBufferArrayStride, which is
-                    // currently 2048, and vertexBuffer.lastStride = max(attribute.offset +
-                    // sizeof(attribute.format)) with attribute.offset being no larger than
-                    // kMaxVertexBufferArrayStride, so by doing checks in uint64_t we avoid
-                    // overflows.
-                    DAWN_INVALID_IF(
-                        requiredSize > bufferSize,
-                        "Instance range (first: %u, count: %u) requires a larger buffer (%u) than "
-                        "the "
-                        "bound buffer size (%u) of the vertex buffer at slot %u with stride %u.",
-                        firstInstance, instanceCount, requiredSize, bufferSize,
-                        static_cast<uint8_t>(usedSlotInstance), arrayStride);
-                }
-            }
+        if (!IsStripPrimitiveTopology(lastRenderPipeline->GetPrimitiveTopology()) ||
+            mIndexFormat == lastRenderPipeline->GetStripIndexFormat()) {
+            mAspects.set(VALIDATION_ASPECT_INDEX_BUFFER);
         }
+    }
+}
 
+MaybeError CommandBufferStateTracker::CheckMissingAspects(ValidationAspects aspects) {
+    if (!aspects.any()) {
         return {};
     }
 
-    MaybeError CommandBufferStateTracker::ValidateIndexBufferInRange(uint32_t indexCount,
-                                                                     uint32_t firstIndex) {
-        // Validate the range of index buffer
-        // firstIndex and indexCount are in uint32_t, while IndexFormatSize is 2 (for
-        // wgpu::IndexFormat::Uint16) or 4 (for wgpu::IndexFormat::Uint32), so by doing checks in
-        // uint64_t we avoid overflows.
-        DAWN_INVALID_IF(
-            (static_cast<uint64_t>(firstIndex) + indexCount) * IndexFormatSize(mIndexFormat) >
-                mIndexBufferSize,
-            "Index range (first: %u, count: %u, format: %s) does not fit in index buffer size "
-            "(%u).",
-            firstIndex, indexCount, mIndexFormat, mIndexBufferSize);
-        return {};
-    }
+    DAWN_INVALID_IF(aspects[VALIDATION_ASPECT_PIPELINE], "No pipeline set.");
 
-    MaybeError CommandBufferStateTracker::ValidateOperation(ValidationAspects requiredAspects) {
-        // Fast return-true path if everything is good
-        ValidationAspects missingAspects = requiredAspects & ~mAspects;
-        if (missingAspects.none()) {
-            return {};
+    if (DAWN_UNLIKELY(aspects[VALIDATION_ASPECT_INDEX_BUFFER])) {
+        DAWN_INVALID_IF(!mIndexBufferSet, "Index buffer was not set.");
+
+        RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
+        wgpu::IndexFormat pipelineIndexFormat = lastRenderPipeline->GetStripIndexFormat();
+
+        if (IsStripPrimitiveTopology(lastRenderPipeline->GetPrimitiveTopology())) {
+            DAWN_INVALID_IF(
+                pipelineIndexFormat == wgpu::IndexFormat::Undefined,
+                "%s has a strip primitive topology (%s) but a strip index format of %s, which "
+                "prevents it for being used for indexed draw calls.",
+                lastRenderPipeline, lastRenderPipeline->GetPrimitiveTopology(),
+                pipelineIndexFormat);
+
+            DAWN_INVALID_IF(
+                mIndexFormat != pipelineIndexFormat,
+                "Strip index format (%s) of %s does not match index buffer format (%s).",
+                pipelineIndexFormat, lastRenderPipeline, mIndexFormat);
         }
 
-        // Generate an error immediately if a non-lazy aspect is missing as computing lazy aspects
-        // requires the pipeline to be set.
-        DAWN_TRY(CheckMissingAspects(missingAspects & ~kLazyAspects));
-
-        RecomputeLazyAspects(missingAspects);
-
-        DAWN_TRY(CheckMissingAspects(requiredAspects & ~mAspects));
-
-        return {};
-    }
-
-    void CommandBufferStateTracker::RecomputeLazyAspects(ValidationAspects aspects) {
-        ASSERT(mAspects[VALIDATION_ASPECT_PIPELINE]);
-        ASSERT((aspects & ~kLazyAspects).none());
-
-        if (aspects[VALIDATION_ASPECT_BIND_GROUPS]) {
-            bool matches = true;
-
-            for (BindGroupIndex i : IterateBitSet(mLastPipelineLayout->GetBindGroupLayoutsMask())) {
-                if (mBindgroups[i] == nullptr ||
-                    mLastPipelineLayout->GetBindGroupLayout(i) != mBindgroups[i]->GetLayout() ||
-                    !BufferSizesAtLeastAsBig(mBindgroups[i]->GetUnverifiedBufferSizes(),
-                                             (*mMinBufferSizes)[i])) {
-                    matches = false;
-                    break;
-                }
-            }
-
-            if (matches) {
-                mAspects.set(VALIDATION_ASPECT_BIND_GROUPS);
-            }
-        }
-
-        if (aspects[VALIDATION_ASPECT_VERTEX_BUFFERS]) {
-            RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
-
-            const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& requiredVertexBuffers =
-                lastRenderPipeline->GetVertexBufferSlotsUsed();
-            if (IsSubset(requiredVertexBuffers, mVertexBufferSlotsUsed)) {
-                mAspects.set(VALIDATION_ASPECT_VERTEX_BUFFERS);
-            }
-        }
-
-        if (aspects[VALIDATION_ASPECT_INDEX_BUFFER] && mIndexBufferSet) {
-            RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
-            if (!IsStripPrimitiveTopology(lastRenderPipeline->GetPrimitiveTopology()) ||
-                mIndexFormat == lastRenderPipeline->GetStripIndexFormat()) {
-                mAspects.set(VALIDATION_ASPECT_INDEX_BUFFER);
-            }
-        }
-    }
-
-    MaybeError CommandBufferStateTracker::CheckMissingAspects(ValidationAspects aspects) {
-        if (!aspects.any()) {
-            return {};
-        }
-
-        DAWN_INVALID_IF(aspects[VALIDATION_ASPECT_PIPELINE], "No pipeline set.");
-
-        if (DAWN_UNLIKELY(aspects[VALIDATION_ASPECT_INDEX_BUFFER])) {
-            DAWN_INVALID_IF(!mIndexBufferSet, "Index buffer was not set.");
-
-            RenderPipelineBase* lastRenderPipeline = GetRenderPipeline();
-            wgpu::IndexFormat pipelineIndexFormat = lastRenderPipeline->GetStripIndexFormat();
-
-            if (IsStripPrimitiveTopology(lastRenderPipeline->GetPrimitiveTopology())) {
-                DAWN_INVALID_IF(
-                    pipelineIndexFormat == wgpu::IndexFormat::Undefined,
-                    "%s has a strip primitive topology (%s) but a strip index format of %s, which "
-                    "prevents it for being used for indexed draw calls.",
-                    lastRenderPipeline, lastRenderPipeline->GetPrimitiveTopology(),
-                    pipelineIndexFormat);
-
-                DAWN_INVALID_IF(
-                    mIndexFormat != pipelineIndexFormat,
-                    "Strip index format (%s) of %s does not match index buffer format (%s).",
-                    pipelineIndexFormat, lastRenderPipeline, mIndexFormat);
-            }
-
-            // The chunk of code above should be similar to the one in |RecomputeLazyAspects|.
-            // It returns the first invalid state found. We shouldn't be able to reach this line
-            // because to have invalid aspects one of the above conditions must have failed earlier.
-            // If this is reached, make sure lazy aspects and the error checks above are consistent.
-            UNREACHABLE();
-            return DAWN_FORMAT_VALIDATION_ERROR("Index buffer is invalid.");
-        }
-
-        // TODO(dawn:563): Indicate which slots were not set.
-        DAWN_INVALID_IF(aspects[VALIDATION_ASPECT_VERTEX_BUFFERS],
-                        "Vertex buffer slots required by %s were not set.", GetRenderPipeline());
-
-        if (DAWN_UNLIKELY(aspects[VALIDATION_ASPECT_BIND_GROUPS])) {
-            for (BindGroupIndex i : IterateBitSet(mLastPipelineLayout->GetBindGroupLayoutsMask())) {
-                ASSERT(HasPipeline());
-
-                DAWN_INVALID_IF(mBindgroups[i] == nullptr, "No bind group set at index %u.",
-                                static_cast<uint32_t>(i));
-
-                BindGroupLayoutBase* requiredBGL = mLastPipelineLayout->GetBindGroupLayout(i);
-                BindGroupLayoutBase* currentBGL = mBindgroups[i]->GetLayout();
-
-                DAWN_INVALID_IF(
-                    requiredBGL->GetPipelineCompatibilityToken() != PipelineCompatibilityToken(0) &&
-                        currentBGL->GetPipelineCompatibilityToken() !=
-                            requiredBGL->GetPipelineCompatibilityToken(),
-                    "The current pipeline (%s) was created with a default layout, and is not "
-                    "compatible with the %s at index %u which uses a %s that was not created by "
-                    "the pipeline. Either use the bind group layout returned by calling "
-                    "getBindGroupLayout(%u) on the pipeline when creating the bind group, or "
-                    "provide an explicit pipeline layout when creating the pipeline.",
-                    mLastPipeline, mBindgroups[i], static_cast<uint32_t>(i), currentBGL,
-                    static_cast<uint32_t>(i));
-
-                DAWN_INVALID_IF(
-                    requiredBGL->GetPipelineCompatibilityToken() == PipelineCompatibilityToken(0) &&
-                        currentBGL->GetPipelineCompatibilityToken() !=
-                            PipelineCompatibilityToken(0),
-                    "%s at index %u uses a %s which was created as part of the default layout for "
-                    "a different pipeline than the current one (%s), and as a result is not "
-                    "compatible. Use an explicit bind group layout when creating bind groups and "
-                    "an explicit pipeline layout when creating pipelines to share bind groups "
-                    "between pipelines.",
-                    mBindgroups[i], static_cast<uint32_t>(i), currentBGL, mLastPipeline);
-
-                DAWN_INVALID_IF(
-                    mLastPipelineLayout->GetBindGroupLayout(i) != mBindgroups[i]->GetLayout(),
-                    "Bind group layout %s of pipeline layout %s does not match layout %s of bind "
-                    "group %s at index %u.",
-                    requiredBGL, mLastPipelineLayout, currentBGL, mBindgroups[i],
-                    static_cast<uint32_t>(i));
-
-                // TODO(dawn:563): Report the binding sizes and which ones are failing.
-                DAWN_INVALID_IF(!BufferSizesAtLeastAsBig(mBindgroups[i]->GetUnverifiedBufferSizes(),
-                                                         (*mMinBufferSizes)[i]),
-                                "Binding sizes are too small for bind group %s at index %u",
-                                mBindgroups[i], static_cast<uint32_t>(i));
-            }
-
-            // The chunk of code above should be similar to the one in |RecomputeLazyAspects|.
-            // It returns the first invalid state found. We shouldn't be able to reach this line
-            // because to have invalid aspects one of the above conditions must have failed earlier.
-            // If this is reached, make sure lazy aspects and the error checks above are consistent.
-            UNREACHABLE();
-            return DAWN_FORMAT_VALIDATION_ERROR("Bind groups are invalid.");
-        }
-
+        // The chunk of code above should be similar to the one in |RecomputeLazyAspects|.
+        // It returns the first invalid state found. We shouldn't be able to reach this line
+        // because to have invalid aspects one of the above conditions must have failed earlier.
+        // If this is reached, make sure lazy aspects and the error checks above are consistent.
         UNREACHABLE();
+        return DAWN_FORMAT_VALIDATION_ERROR("Index buffer is invalid.");
     }
 
-    void CommandBufferStateTracker::SetComputePipeline(ComputePipelineBase* pipeline) {
-        SetPipelineCommon(pipeline);
+    // TODO(dawn:563): Indicate which slots were not set.
+    DAWN_INVALID_IF(aspects[VALIDATION_ASPECT_VERTEX_BUFFERS],
+                    "Vertex buffer slots required by %s were not set.", GetRenderPipeline());
+
+    if (DAWN_UNLIKELY(aspects[VALIDATION_ASPECT_BIND_GROUPS])) {
+        for (BindGroupIndex i : IterateBitSet(mLastPipelineLayout->GetBindGroupLayoutsMask())) {
+            ASSERT(HasPipeline());
+
+            DAWN_INVALID_IF(mBindgroups[i] == nullptr, "No bind group set at index %u.",
+                            static_cast<uint32_t>(i));
+
+            BindGroupLayoutBase* requiredBGL = mLastPipelineLayout->GetBindGroupLayout(i);
+            BindGroupLayoutBase* currentBGL = mBindgroups[i]->GetLayout();
+
+            DAWN_INVALID_IF(
+                requiredBGL->GetPipelineCompatibilityToken() != PipelineCompatibilityToken(0) &&
+                    currentBGL->GetPipelineCompatibilityToken() !=
+                        requiredBGL->GetPipelineCompatibilityToken(),
+                "The current pipeline (%s) was created with a default layout, and is not "
+                "compatible with the %s at index %u which uses a %s that was not created by "
+                "the pipeline. Either use the bind group layout returned by calling "
+                "getBindGroupLayout(%u) on the pipeline when creating the bind group, or "
+                "provide an explicit pipeline layout when creating the pipeline.",
+                mLastPipeline, mBindgroups[i], static_cast<uint32_t>(i), currentBGL,
+                static_cast<uint32_t>(i));
+
+            DAWN_INVALID_IF(
+                requiredBGL->GetPipelineCompatibilityToken() == PipelineCompatibilityToken(0) &&
+                    currentBGL->GetPipelineCompatibilityToken() != PipelineCompatibilityToken(0),
+                "%s at index %u uses a %s which was created as part of the default layout for "
+                "a different pipeline than the current one (%s), and as a result is not "
+                "compatible. Use an explicit bind group layout when creating bind groups and "
+                "an explicit pipeline layout when creating pipelines to share bind groups "
+                "between pipelines.",
+                mBindgroups[i], static_cast<uint32_t>(i), currentBGL, mLastPipeline);
+
+            DAWN_INVALID_IF(
+                mLastPipelineLayout->GetBindGroupLayout(i) != mBindgroups[i]->GetLayout(),
+                "Bind group layout %s of pipeline layout %s does not match layout %s of bind "
+                "group %s at index %u.",
+                requiredBGL, mLastPipelineLayout, currentBGL, mBindgroups[i],
+                static_cast<uint32_t>(i));
+
+            // TODO(dawn:563): Report the binding sizes and which ones are failing.
+            DAWN_INVALID_IF(!BufferSizesAtLeastAsBig(mBindgroups[i]->GetUnverifiedBufferSizes(),
+                                                     (*mMinBufferSizes)[i]),
+                            "Binding sizes are too small for bind group %s at index %u",
+                            mBindgroups[i], static_cast<uint32_t>(i));
+        }
+
+        // The chunk of code above should be similar to the one in |RecomputeLazyAspects|.
+        // It returns the first invalid state found. We shouldn't be able to reach this line
+        // because to have invalid aspects one of the above conditions must have failed earlier.
+        // If this is reached, make sure lazy aspects and the error checks above are consistent.
+        UNREACHABLE();
+        return DAWN_FORMAT_VALIDATION_ERROR("Bind groups are invalid.");
     }
 
-    void CommandBufferStateTracker::SetRenderPipeline(RenderPipelineBase* pipeline) {
-        SetPipelineCommon(pipeline);
-    }
+    UNREACHABLE();
+}
 
-    void CommandBufferStateTracker::SetBindGroup(BindGroupIndex index,
-                                                 BindGroupBase* bindgroup,
-                                                 uint32_t dynamicOffsetCount,
-                                                 const uint32_t* dynamicOffsets) {
-        mBindgroups[index] = bindgroup;
-        mDynamicOffsets[index].assign(dynamicOffsets, dynamicOffsets + dynamicOffsetCount);
-        mAspects.reset(VALIDATION_ASPECT_BIND_GROUPS);
-    }
+void CommandBufferStateTracker::SetComputePipeline(ComputePipelineBase* pipeline) {
+    SetPipelineCommon(pipeline);
+}
 
-    void CommandBufferStateTracker::SetIndexBuffer(wgpu::IndexFormat format, uint64_t size) {
-        mIndexBufferSet = true;
-        mIndexFormat = format;
-        mIndexBufferSize = size;
-    }
+void CommandBufferStateTracker::SetRenderPipeline(RenderPipelineBase* pipeline) {
+    SetPipelineCommon(pipeline);
+}
 
-    void CommandBufferStateTracker::SetVertexBuffer(VertexBufferSlot slot, uint64_t size) {
-        mVertexBufferSlotsUsed.set(slot);
-        mVertexBufferSizes[slot] = size;
-    }
+void CommandBufferStateTracker::SetBindGroup(BindGroupIndex index,
+                                             BindGroupBase* bindgroup,
+                                             uint32_t dynamicOffsetCount,
+                                             const uint32_t* dynamicOffsets) {
+    mBindgroups[index] = bindgroup;
+    mDynamicOffsets[index].assign(dynamicOffsets, dynamicOffsets + dynamicOffsetCount);
+    mAspects.reset(VALIDATION_ASPECT_BIND_GROUPS);
+}
 
-    void CommandBufferStateTracker::SetPipelineCommon(PipelineBase* pipeline) {
-        mLastPipeline = pipeline;
-        mLastPipelineLayout = pipeline != nullptr ? pipeline->GetLayout() : nullptr;
-        mMinBufferSizes = pipeline != nullptr ? &pipeline->GetMinBufferSizes() : nullptr;
+void CommandBufferStateTracker::SetIndexBuffer(wgpu::IndexFormat format, uint64_t size) {
+    mIndexBufferSet = true;
+    mIndexFormat = format;
+    mIndexBufferSize = size;
+}
 
-        mAspects.set(VALIDATION_ASPECT_PIPELINE);
+void CommandBufferStateTracker::SetVertexBuffer(VertexBufferSlot slot, uint64_t size) {
+    mVertexBufferSlotsUsed.set(slot);
+    mVertexBufferSizes[slot] = size;
+}
 
-        // Reset lazy aspects so they get recomputed on the next operation.
-        mAspects &= ~kLazyAspects;
-    }
+void CommandBufferStateTracker::SetPipelineCommon(PipelineBase* pipeline) {
+    mLastPipeline = pipeline;
+    mLastPipelineLayout = pipeline != nullptr ? pipeline->GetLayout() : nullptr;
+    mMinBufferSizes = pipeline != nullptr ? &pipeline->GetMinBufferSizes() : nullptr;
 
-    BindGroupBase* CommandBufferStateTracker::GetBindGroup(BindGroupIndex index) const {
-        return mBindgroups[index];
-    }
+    mAspects.set(VALIDATION_ASPECT_PIPELINE);
 
-    const std::vector<uint32_t>& CommandBufferStateTracker::GetDynamicOffsets(
-        BindGroupIndex index) const {
-        return mDynamicOffsets[index];
-    }
+    // Reset lazy aspects so they get recomputed on the next operation.
+    mAspects &= ~kLazyAspects;
+}
 
-    bool CommandBufferStateTracker::HasPipeline() const {
-        return mLastPipeline != nullptr;
-    }
+BindGroupBase* CommandBufferStateTracker::GetBindGroup(BindGroupIndex index) const {
+    return mBindgroups[index];
+}
 
-    RenderPipelineBase* CommandBufferStateTracker::GetRenderPipeline() const {
-        ASSERT(HasPipeline() && mLastPipeline->GetType() == ObjectType::RenderPipeline);
-        return static_cast<RenderPipelineBase*>(mLastPipeline);
-    }
+const std::vector<uint32_t>& CommandBufferStateTracker::GetDynamicOffsets(
+    BindGroupIndex index) const {
+    return mDynamicOffsets[index];
+}
 
-    ComputePipelineBase* CommandBufferStateTracker::GetComputePipeline() const {
-        ASSERT(HasPipeline() && mLastPipeline->GetType() == ObjectType::ComputePipeline);
-        return static_cast<ComputePipelineBase*>(mLastPipeline);
-    }
+bool CommandBufferStateTracker::HasPipeline() const {
+    return mLastPipeline != nullptr;
+}
 
-    PipelineLayoutBase* CommandBufferStateTracker::GetPipelineLayout() const {
-        return mLastPipelineLayout;
-    }
+RenderPipelineBase* CommandBufferStateTracker::GetRenderPipeline() const {
+    ASSERT(HasPipeline() && mLastPipeline->GetType() == ObjectType::RenderPipeline);
+    return static_cast<RenderPipelineBase*>(mLastPipeline);
+}
 
-    wgpu::IndexFormat CommandBufferStateTracker::GetIndexFormat() const {
-        return mIndexFormat;
-    }
+ComputePipelineBase* CommandBufferStateTracker::GetComputePipeline() const {
+    ASSERT(HasPipeline() && mLastPipeline->GetType() == ObjectType::ComputePipeline);
+    return static_cast<ComputePipelineBase*>(mLastPipeline);
+}
 
-    uint64_t CommandBufferStateTracker::GetIndexBufferSize() const {
-        return mIndexBufferSize;
-    }
+PipelineLayoutBase* CommandBufferStateTracker::GetPipelineLayout() const {
+    return mLastPipelineLayout;
+}
+
+wgpu::IndexFormat CommandBufferStateTracker::GetIndexFormat() const {
+    return mIndexFormat;
+}
+
+uint64_t CommandBufferStateTracker::GetIndexBufferSize() const {
+    return mIndexBufferSize;
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/CommandBufferStateTracker.h b/src/dawn/native/CommandBufferStateTracker.h
index 5c87ea3..a29d8d1 100644
--- a/src/dawn/native/CommandBufferStateTracker.h
+++ b/src/dawn/native/CommandBufferStateTracker.h
@@ -26,62 +26,62 @@
 
 namespace dawn::native {
 
-    class CommandBufferStateTracker {
-      public:
-        // Non-state-modifying validation functions
-        MaybeError ValidateCanDispatch();
-        MaybeError ValidateCanDraw();
-        MaybeError ValidateCanDrawIndexed();
-        MaybeError ValidateBufferInRangeForVertexBuffer(uint32_t vertexCount, uint32_t firstVertex);
-        MaybeError ValidateBufferInRangeForInstanceBuffer(uint32_t instanceCount,
-                                                          uint32_t firstInstance);
-        MaybeError ValidateIndexBufferInRange(uint32_t indexCount, uint32_t firstIndex);
+class CommandBufferStateTracker {
+  public:
+    // Non-state-modifying validation functions
+    MaybeError ValidateCanDispatch();
+    MaybeError ValidateCanDraw();
+    MaybeError ValidateCanDrawIndexed();
+    MaybeError ValidateBufferInRangeForVertexBuffer(uint32_t vertexCount, uint32_t firstVertex);
+    MaybeError ValidateBufferInRangeForInstanceBuffer(uint32_t instanceCount,
+                                                      uint32_t firstInstance);
+    MaybeError ValidateIndexBufferInRange(uint32_t indexCount, uint32_t firstIndex);
 
-        // State-modifying methods
-        void SetComputePipeline(ComputePipelineBase* pipeline);
-        void SetRenderPipeline(RenderPipelineBase* pipeline);
-        void SetBindGroup(BindGroupIndex index,
-                          BindGroupBase* bindgroup,
-                          uint32_t dynamicOffsetCount,
-                          const uint32_t* dynamicOffsets);
-        void SetIndexBuffer(wgpu::IndexFormat format, uint64_t size);
-        void SetVertexBuffer(VertexBufferSlot slot, uint64_t size);
+    // State-modifying methods
+    void SetComputePipeline(ComputePipelineBase* pipeline);
+    void SetRenderPipeline(RenderPipelineBase* pipeline);
+    void SetBindGroup(BindGroupIndex index,
+                      BindGroupBase* bindgroup,
+                      uint32_t dynamicOffsetCount,
+                      const uint32_t* dynamicOffsets);
+    void SetIndexBuffer(wgpu::IndexFormat format, uint64_t size);
+    void SetVertexBuffer(VertexBufferSlot slot, uint64_t size);
 
-        static constexpr size_t kNumAspects = 4;
-        using ValidationAspects = std::bitset<kNumAspects>;
+    static constexpr size_t kNumAspects = 4;
+    using ValidationAspects = std::bitset<kNumAspects>;
 
-        BindGroupBase* GetBindGroup(BindGroupIndex index) const;
-        const std::vector<uint32_t>& GetDynamicOffsets(BindGroupIndex index) const;
-        bool HasPipeline() const;
-        RenderPipelineBase* GetRenderPipeline() const;
-        ComputePipelineBase* GetComputePipeline() const;
-        PipelineLayoutBase* GetPipelineLayout() const;
-        wgpu::IndexFormat GetIndexFormat() const;
-        uint64_t GetIndexBufferSize() const;
+    BindGroupBase* GetBindGroup(BindGroupIndex index) const;
+    const std::vector<uint32_t>& GetDynamicOffsets(BindGroupIndex index) const;
+    bool HasPipeline() const;
+    RenderPipelineBase* GetRenderPipeline() const;
+    ComputePipelineBase* GetComputePipeline() const;
+    PipelineLayoutBase* GetPipelineLayout() const;
+    wgpu::IndexFormat GetIndexFormat() const;
+    uint64_t GetIndexBufferSize() const;
 
-      private:
-        MaybeError ValidateOperation(ValidationAspects requiredAspects);
-        void RecomputeLazyAspects(ValidationAspects aspects);
-        MaybeError CheckMissingAspects(ValidationAspects aspects);
+  private:
+    MaybeError ValidateOperation(ValidationAspects requiredAspects);
+    void RecomputeLazyAspects(ValidationAspects aspects);
+    MaybeError CheckMissingAspects(ValidationAspects aspects);
 
-        void SetPipelineCommon(PipelineBase* pipeline);
+    void SetPipelineCommon(PipelineBase* pipeline);
 
-        ValidationAspects mAspects;
+    ValidationAspects mAspects;
 
-        ityp::array<BindGroupIndex, BindGroupBase*, kMaxBindGroups> mBindgroups = {};
-        ityp::array<BindGroupIndex, std::vector<uint32_t>, kMaxBindGroups> mDynamicOffsets = {};
-        ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsed;
-        bool mIndexBufferSet = false;
-        wgpu::IndexFormat mIndexFormat;
-        uint64_t mIndexBufferSize = 0;
+    ityp::array<BindGroupIndex, BindGroupBase*, kMaxBindGroups> mBindgroups = {};
+    ityp::array<BindGroupIndex, std::vector<uint32_t>, kMaxBindGroups> mDynamicOffsets = {};
+    ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsed;
+    bool mIndexBufferSet = false;
+    wgpu::IndexFormat mIndexFormat;
+    uint64_t mIndexBufferSize = 0;
 
-        ityp::array<VertexBufferSlot, uint64_t, kMaxVertexBuffers> mVertexBufferSizes = {};
+    ityp::array<VertexBufferSlot, uint64_t, kMaxVertexBuffers> mVertexBufferSizes = {};
 
-        PipelineLayoutBase* mLastPipelineLayout = nullptr;
-        PipelineBase* mLastPipeline = nullptr;
+    PipelineLayoutBase* mLastPipelineLayout = nullptr;
+    PipelineBase* mLastPipeline = nullptr;
 
-        const RequiredBufferSizes* mMinBufferSizes = nullptr;
-    };
+    const RequiredBufferSizes* mMinBufferSizes = nullptr;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/CommandEncoder.cpp b/src/dawn/native/CommandEncoder.cpp
index 8642a38..3900d07 100644
--- a/src/dawn/native/CommandEncoder.cpp
+++ b/src/dawn/native/CommandEncoder.cpp
@@ -41,1385 +41,1357 @@
 
 namespace dawn::native {
 
-    namespace {
+namespace {
 
-        bool HasDeprecatedColor(const RenderPassColorAttachment& attachment) {
-            return !std::isnan(attachment.clearColor.r) || !std::isnan(attachment.clearColor.g) ||
-                   !std::isnan(attachment.clearColor.b) || !std::isnan(attachment.clearColor.a);
+bool HasDeprecatedColor(const RenderPassColorAttachment& attachment) {
+    return !std::isnan(attachment.clearColor.r) || !std::isnan(attachment.clearColor.g) ||
+           !std::isnan(attachment.clearColor.b) || !std::isnan(attachment.clearColor.a);
+}
+
+MaybeError ValidateB2BCopyAlignment(uint64_t dataSize, uint64_t srcOffset, uint64_t dstOffset) {
+    // Copy size must be a multiple of 4 bytes on macOS.
+    DAWN_INVALID_IF(dataSize % 4 != 0, "Copy size (%u) is not a multiple of 4.", dataSize);
+
+    // SourceOffset and destinationOffset must be multiples of 4 bytes on macOS.
+    DAWN_INVALID_IF(srcOffset % 4 != 0 || dstOffset % 4 != 0,
+                    "Source offset (%u) or destination offset (%u) is not a multiple of 4 bytes,",
+                    srcOffset, dstOffset);
+
+    return {};
+}
+
+MaybeError ValidateTextureSampleCountInBufferCopyCommands(const TextureBase* texture) {
+    DAWN_INVALID_IF(texture->GetSampleCount() > 1,
+                    "%s sample count (%u) is not 1 when copying to or from a buffer.", texture,
+                    texture->GetSampleCount());
+
+    return {};
+}
+
+MaybeError ValidateLinearTextureCopyOffset(const TextureDataLayout& layout,
+                                           const TexelBlockInfo& blockInfo,
+                                           const bool hasDepthOrStencil) {
+    if (hasDepthOrStencil) {
+        // For depth-stencil texture, buffer offset must be a multiple of 4.
+        DAWN_INVALID_IF(layout.offset % 4 != 0,
+                        "Offset (%u) is not a multiple of 4 for depth/stencil texture.",
+                        layout.offset);
+    } else {
+        DAWN_INVALID_IF(layout.offset % blockInfo.byteSize != 0,
+                        "Offset (%u) is not a multiple of the texel block byte size (%u).",
+                        layout.offset, blockInfo.byteSize);
+    }
+    return {};
+}
+
+MaybeError ValidateTextureDepthStencilToBufferCopyRestrictions(const ImageCopyTexture& src) {
+    Aspect aspectUsed;
+    DAWN_TRY_ASSIGN(aspectUsed, SingleAspectUsedByImageCopyTexture(src));
+    if (aspectUsed == Aspect::Depth) {
+        switch (src.texture->GetFormat().format) {
+            case wgpu::TextureFormat::Depth24Plus:
+            case wgpu::TextureFormat::Depth24PlusStencil8:
+            case wgpu::TextureFormat::Depth24UnormStencil8:
+                return DAWN_FORMAT_VALIDATION_ERROR(
+                    "The depth aspect of %s format %s cannot be selected in a texture to "
+                    "buffer copy.",
+                    src.texture, src.texture->GetFormat().format);
+            case wgpu::TextureFormat::Depth32Float:
+            case wgpu::TextureFormat::Depth16Unorm:
+            case wgpu::TextureFormat::Depth32FloatStencil8:
+                break;
+
+            default:
+                UNREACHABLE();
         }
+    }
 
-        MaybeError ValidateB2BCopyAlignment(uint64_t dataSize,
-                                            uint64_t srcOffset,
-                                            uint64_t dstOffset) {
-            // Copy size must be a multiple of 4 bytes on macOS.
-            DAWN_INVALID_IF(dataSize % 4 != 0, "Copy size (%u) is not a multiple of 4.", dataSize);
+    return {};
+}
 
-            // SourceOffset and destinationOffset must be multiples of 4 bytes on macOS.
-            DAWN_INVALID_IF(
-                srcOffset % 4 != 0 || dstOffset % 4 != 0,
-                "Source offset (%u) or destination offset (%u) is not a multiple of 4 bytes,",
-                srcOffset, dstOffset);
+MaybeError ValidateAttachmentArrayLayersAndLevelCount(const TextureViewBase* attachment) {
+    // Currently we do not support layered rendering.
+    DAWN_INVALID_IF(attachment->GetLayerCount() > 1,
+                    "The layer count (%u) of %s used as attachment is greater than 1.",
+                    attachment->GetLayerCount(), attachment);
 
-            return {};
-        }
+    DAWN_INVALID_IF(attachment->GetLevelCount() > 1,
+                    "The mip level count (%u) of %s used as attachment is greater than 1.",
+                    attachment->GetLevelCount(), attachment);
 
-        MaybeError ValidateTextureSampleCountInBufferCopyCommands(const TextureBase* texture) {
-            DAWN_INVALID_IF(texture->GetSampleCount() > 1,
-                            "%s sample count (%u) is not 1 when copying to or from a buffer.",
-                            texture, texture->GetSampleCount());
+    return {};
+}
 
-            return {};
-        }
+MaybeError ValidateOrSetAttachmentSize(const TextureViewBase* attachment,
+                                       uint32_t* width,
+                                       uint32_t* height) {
+    const Extent3D& attachmentSize =
+        attachment->GetTexture()->GetMipLevelVirtualSize(attachment->GetBaseMipLevel());
 
-        MaybeError ValidateLinearTextureCopyOffset(const TextureDataLayout& layout,
-                                                   const TexelBlockInfo& blockInfo,
-                                                   const bool hasDepthOrStencil) {
-            if (hasDepthOrStencil) {
-                // For depth-stencil texture, buffer offset must be a multiple of 4.
-                DAWN_INVALID_IF(layout.offset % 4 != 0,
-                                "Offset (%u) is not a multiple of 4 for depth/stencil texture.",
-                                layout.offset);
-            } else {
-                DAWN_INVALID_IF(layout.offset % blockInfo.byteSize != 0,
-                                "Offset (%u) is not a multiple of the texel block byte size (%u).",
-                                layout.offset, blockInfo.byteSize);
-            }
-            return {};
-        }
+    if (*width == 0) {
+        DAWN_ASSERT(*height == 0);
+        *width = attachmentSize.width;
+        *height = attachmentSize.height;
+        DAWN_ASSERT(*width != 0 && *height != 0);
+    } else {
+        DAWN_INVALID_IF(*width != attachmentSize.width || *height != attachmentSize.height,
+                        "Attachment %s size (width: %u, height: %u) does not match the size of the "
+                        "other attachments (width: %u, height: %u).",
+                        attachment, attachmentSize.width, attachmentSize.height, *width, *height);
+    }
 
-        MaybeError ValidateTextureDepthStencilToBufferCopyRestrictions(
-            const ImageCopyTexture& src) {
-            Aspect aspectUsed;
-            DAWN_TRY_ASSIGN(aspectUsed, SingleAspectUsedByImageCopyTexture(src));
-            if (aspectUsed == Aspect::Depth) {
-                switch (src.texture->GetFormat().format) {
-                    case wgpu::TextureFormat::Depth24Plus:
-                    case wgpu::TextureFormat::Depth24PlusStencil8:
-                    case wgpu::TextureFormat::Depth24UnormStencil8:
-                        return DAWN_FORMAT_VALIDATION_ERROR(
-                            "The depth aspect of %s format %s cannot be selected in a texture to "
-                            "buffer copy.",
-                            src.texture, src.texture->GetFormat().format);
-                    case wgpu::TextureFormat::Depth32Float:
-                    case wgpu::TextureFormat::Depth16Unorm:
-                    case wgpu::TextureFormat::Depth32FloatStencil8:
-                        break;
+    return {};
+}
 
-                    default:
-                        UNREACHABLE();
-                }
-            }
+MaybeError ValidateOrSetColorAttachmentSampleCount(const TextureViewBase* colorAttachment,
+                                                   uint32_t* sampleCount) {
+    if (*sampleCount == 0) {
+        *sampleCount = colorAttachment->GetTexture()->GetSampleCount();
+        DAWN_ASSERT(*sampleCount != 0);
+    } else {
+        DAWN_INVALID_IF(
+            *sampleCount != colorAttachment->GetTexture()->GetSampleCount(),
+            "Color attachment %s sample count (%u) does not match the sample count of the "
+            "other attachments (%u).",
+            colorAttachment, colorAttachment->GetTexture()->GetSampleCount(), *sampleCount);
+    }
 
-            return {};
-        }
+    return {};
+}
 
-        MaybeError ValidateAttachmentArrayLayersAndLevelCount(const TextureViewBase* attachment) {
-            // Currently we do not support layered rendering.
-            DAWN_INVALID_IF(attachment->GetLayerCount() > 1,
-                            "The layer count (%u) of %s used as attachment is greater than 1.",
-                            attachment->GetLayerCount(), attachment);
-
-            DAWN_INVALID_IF(attachment->GetLevelCount() > 1,
-                            "The mip level count (%u) of %s used as attachment is greater than 1.",
-                            attachment->GetLevelCount(), attachment);
-
-            return {};
-        }
-
-        MaybeError ValidateOrSetAttachmentSize(const TextureViewBase* attachment,
-                                               uint32_t* width,
-                                               uint32_t* height) {
-            const Extent3D& attachmentSize =
-                attachment->GetTexture()->GetMipLevelVirtualSize(attachment->GetBaseMipLevel());
-
-            if (*width == 0) {
-                DAWN_ASSERT(*height == 0);
-                *width = attachmentSize.width;
-                *height = attachmentSize.height;
-                DAWN_ASSERT(*width != 0 && *height != 0);
-            } else {
-                DAWN_INVALID_IF(
-                    *width != attachmentSize.width || *height != attachmentSize.height,
-                    "Attachment %s size (width: %u, height: %u) does not match the size of the "
-                    "other attachments (width: %u, height: %u).",
-                    attachment, attachmentSize.width, attachmentSize.height, *width, *height);
-            }
-
-            return {};
-        }
-
-        MaybeError ValidateOrSetColorAttachmentSampleCount(const TextureViewBase* colorAttachment,
-                                                           uint32_t* sampleCount) {
-            if (*sampleCount == 0) {
-                *sampleCount = colorAttachment->GetTexture()->GetSampleCount();
-                DAWN_ASSERT(*sampleCount != 0);
-            } else {
-                DAWN_INVALID_IF(
-                    *sampleCount != colorAttachment->GetTexture()->GetSampleCount(),
-                    "Color attachment %s sample count (%u) does not match the sample count of the "
-                    "other attachments (%u).",
-                    colorAttachment, colorAttachment->GetTexture()->GetSampleCount(), *sampleCount);
-            }
-
-            return {};
-        }
-
-        MaybeError ValidateResolveTarget(const DeviceBase* device,
-                                         const RenderPassColorAttachment& colorAttachment,
-                                         UsageValidationMode usageValidationMode) {
-            if (colorAttachment.resolveTarget == nullptr) {
-                return {};
-            }
-
-            const TextureViewBase* resolveTarget = colorAttachment.resolveTarget;
-            const TextureViewBase* attachment = colorAttachment.view;
-            DAWN_TRY(device->ValidateObject(colorAttachment.resolveTarget));
-            DAWN_TRY(ValidateCanUseAs(colorAttachment.resolveTarget->GetTexture(),
-                                      wgpu::TextureUsage::RenderAttachment, usageValidationMode));
-
-            DAWN_INVALID_IF(
-                !attachment->GetTexture()->IsMultisampledTexture(),
-                "Cannot set %s as a resolve target when the color attachment %s has a sample "
-                "count of 1.",
-                resolveTarget, attachment);
-
-            DAWN_INVALID_IF(resolveTarget->GetTexture()->IsMultisampledTexture(),
-                            "Cannot use %s as resolve target. Sample count (%u) is greater than 1.",
-                            resolveTarget, resolveTarget->GetTexture()->GetSampleCount());
-
-            DAWN_INVALID_IF(resolveTarget->GetLayerCount() > 1,
-                            "The resolve target %s array layer count (%u) is not 1.", resolveTarget,
-                            resolveTarget->GetLayerCount());
-
-            DAWN_INVALID_IF(resolveTarget->GetLevelCount() > 1,
-                            "The resolve target %s mip level count (%u) is not 1.", resolveTarget,
-                            resolveTarget->GetLevelCount());
-
-            const Extent3D& colorTextureSize =
-                attachment->GetTexture()->GetMipLevelVirtualSize(attachment->GetBaseMipLevel());
-            const Extent3D& resolveTextureSize =
-                resolveTarget->GetTexture()->GetMipLevelVirtualSize(
-                    resolveTarget->GetBaseMipLevel());
-            DAWN_INVALID_IF(
-                colorTextureSize.width != resolveTextureSize.width ||
-                    colorTextureSize.height != resolveTextureSize.height,
-                "The Resolve target %s size (width: %u, height: %u) does not match the color "
-                "attachment %s size (width: %u, height: %u).",
-                resolveTarget, resolveTextureSize.width, resolveTextureSize.height, attachment,
-                colorTextureSize.width, colorTextureSize.height);
-
-            wgpu::TextureFormat resolveTargetFormat = resolveTarget->GetFormat().format;
-            DAWN_INVALID_IF(
-                resolveTargetFormat != attachment->GetFormat().format,
-                "The resolve target %s format (%s) does not match the color attachment %s format "
-                "(%s).",
-                resolveTarget, resolveTargetFormat, attachment, attachment->GetFormat().format);
-            DAWN_INVALID_IF(
-                !resolveTarget->GetFormat().supportsResolveTarget,
-                "The resolve target %s format (%s) does not support being used as resolve target.",
-                resolveTarget, resolveTargetFormat);
-
-            return {};
-        }
-
-        MaybeError ValidateRenderPassColorAttachment(
-            DeviceBase* device,
-            const RenderPassColorAttachment& colorAttachment,
-            uint32_t* width,
-            uint32_t* height,
-            uint32_t* sampleCount,
-            UsageValidationMode usageValidationMode) {
-            TextureViewBase* attachment = colorAttachment.view;
-            if (attachment == nullptr) {
-                return {};
-            }
-            DAWN_TRY(device->ValidateObject(attachment));
-            DAWN_TRY(ValidateCanUseAs(attachment->GetTexture(),
-                                      wgpu::TextureUsage::RenderAttachment, usageValidationMode));
-
-            DAWN_INVALID_IF(!(attachment->GetAspects() & Aspect::Color) ||
-                                !attachment->GetFormat().isRenderable,
-                            "The color attachment %s format (%s) is not color renderable.",
-                            attachment, attachment->GetFormat().format);
-
-            DAWN_TRY(ValidateLoadOp(colorAttachment.loadOp));
-            DAWN_TRY(ValidateStoreOp(colorAttachment.storeOp));
-            DAWN_INVALID_IF(colorAttachment.loadOp == wgpu::LoadOp::Undefined,
-                            "loadOp must be set.");
-            DAWN_INVALID_IF(colorAttachment.storeOp == wgpu::StoreOp::Undefined,
-                            "storeOp must be set.");
-
-            // TODO(dawn:1269): Remove after the deprecation period.
-            bool useClearColor = HasDeprecatedColor(colorAttachment);
-            const dawn::native::Color& clearValue =
-                useClearColor ? colorAttachment.clearColor : colorAttachment.clearValue;
-            if (useClearColor) {
-                device->EmitDeprecationWarning(
-                    "clearColor is deprecated, prefer using clearValue instead.");
-            }
-
-            if (colorAttachment.loadOp == wgpu::LoadOp::Clear) {
-                DAWN_INVALID_IF(std::isnan(clearValue.r) || std::isnan(clearValue.g) ||
-                                    std::isnan(clearValue.b) || std::isnan(clearValue.a),
-                                "Color clear value (%s) contain a NaN.", &clearValue);
-            }
-
-            DAWN_TRY(ValidateOrSetColorAttachmentSampleCount(attachment, sampleCount));
-
-            DAWN_TRY(ValidateResolveTarget(device, colorAttachment, usageValidationMode));
-
-            DAWN_TRY(ValidateAttachmentArrayLayersAndLevelCount(attachment));
-            DAWN_TRY(ValidateOrSetAttachmentSize(attachment, width, height));
-
-            return {};
-        }
-
-        MaybeError ValidateRenderPassDepthStencilAttachment(
-            DeviceBase* device,
-            const RenderPassDepthStencilAttachment* depthStencilAttachment,
-            uint32_t* width,
-            uint32_t* height,
-            uint32_t* sampleCount,
-            UsageValidationMode usageValidationMode) {
-            DAWN_ASSERT(depthStencilAttachment != nullptr);
-
-            TextureViewBase* attachment = depthStencilAttachment->view;
-            DAWN_TRY(device->ValidateObject(attachment));
-            DAWN_TRY(ValidateCanUseAs(attachment->GetTexture(),
-                                      wgpu::TextureUsage::RenderAttachment, usageValidationMode));
-
-            const Format& format = attachment->GetFormat();
-            DAWN_INVALID_IF(
-                !format.HasDepthOrStencil(),
-                "The depth stencil attachment %s format (%s) is not a depth stencil format.",
-                attachment, format.format);
-
-            DAWN_INVALID_IF(!format.isRenderable,
-                            "The depth stencil attachment %s format (%s) is not renderable.",
-                            attachment, format.format);
-
-            DAWN_INVALID_IF(attachment->GetAspects() != format.aspects,
-                            "The depth stencil attachment %s must encompass all aspects.",
-                            attachment);
-
-            DAWN_INVALID_IF(
-                attachment->GetAspects() == (Aspect::Depth | Aspect::Stencil) &&
-                    depthStencilAttachment->depthReadOnly !=
-                        depthStencilAttachment->stencilReadOnly,
-                "depthReadOnly (%u) and stencilReadOnly (%u) must be the same when texture aspect "
-                "is 'all'.",
-                depthStencilAttachment->depthReadOnly, depthStencilAttachment->stencilReadOnly);
-
-            // Read only, or depth doesn't exist.
-            if (depthStencilAttachment->depthReadOnly ||
-                !IsSubset(Aspect::Depth, attachment->GetAspects())) {
-                if (depthStencilAttachment->depthLoadOp == wgpu::LoadOp::Load &&
-                    depthStencilAttachment->depthStoreOp == wgpu::StoreOp::Store) {
-                    // TODO(dawn:1269): Remove this branch after the deprecation period.
-                    device->EmitDeprecationWarning(
-                        "Setting depthLoadOp and depthStoreOp when "
-                        "the attachment has no depth aspect or depthReadOnly is true is "
-                        "deprecated.");
-                } else {
-                    DAWN_INVALID_IF(depthStencilAttachment->depthLoadOp != wgpu::LoadOp::Undefined,
-                                    "depthLoadOp (%s) must not be set if the attachment (%s) has "
-                                    "no depth aspect or depthReadOnly (%u) is true.",
-                                    depthStencilAttachment->depthLoadOp, attachment,
-                                    depthStencilAttachment->depthReadOnly);
-                    DAWN_INVALID_IF(
-                        depthStencilAttachment->depthStoreOp != wgpu::StoreOp::Undefined,
-                        "depthStoreOp (%s) must not be set if the attachment (%s) has no depth "
-                        "aspect or depthReadOnly (%u) is true.",
-                        depthStencilAttachment->depthStoreOp, attachment,
-                        depthStencilAttachment->depthReadOnly);
-                }
-            } else {
-                DAWN_TRY(ValidateLoadOp(depthStencilAttachment->depthLoadOp));
-                DAWN_INVALID_IF(depthStencilAttachment->depthLoadOp == wgpu::LoadOp::Undefined,
-                                "depthLoadOp must be set if the attachment (%s) has a depth aspect "
-                                "and depthReadOnly (%u) is false.",
-                                attachment, depthStencilAttachment->depthReadOnly);
-                DAWN_TRY(ValidateStoreOp(depthStencilAttachment->depthStoreOp));
-                DAWN_INVALID_IF(depthStencilAttachment->depthStoreOp == wgpu::StoreOp::Undefined,
-                                "depthStoreOp must be set if the attachment (%s) has a depth "
-                                "aspect and depthReadOnly (%u) is false.",
-                                attachment, depthStencilAttachment->depthReadOnly);
-            }
-
-            // Read only, or stencil doesn't exist.
-            if (depthStencilAttachment->stencilReadOnly ||
-                !IsSubset(Aspect::Stencil, attachment->GetAspects())) {
-                if (depthStencilAttachment->stencilLoadOp == wgpu::LoadOp::Load &&
-                    depthStencilAttachment->stencilStoreOp == wgpu::StoreOp::Store) {
-                    // TODO(dawn:1269): Remove this branch after the deprecation period.
-                    device->EmitDeprecationWarning(
-                        "Setting stencilLoadOp and stencilStoreOp when "
-                        "the attachment has no stencil aspect or stencilReadOnly is true is "
-                        "deprecated.");
-                } else {
-                    DAWN_INVALID_IF(
-                        depthStencilAttachment->stencilLoadOp != wgpu::LoadOp::Undefined,
-                        "stencilLoadOp (%s) must not be set if the attachment (%s) has no stencil "
-                        "aspect or stencilReadOnly (%u) is true.",
-                        depthStencilAttachment->stencilLoadOp, attachment,
-                        depthStencilAttachment->stencilReadOnly);
-                    DAWN_INVALID_IF(
-                        depthStencilAttachment->stencilStoreOp != wgpu::StoreOp::Undefined,
-                        "stencilStoreOp (%s) must not be set if the attachment (%s) has no stencil "
-                        "aspect or stencilReadOnly (%u) is true.",
-                        depthStencilAttachment->stencilStoreOp, attachment,
-                        depthStencilAttachment->stencilReadOnly);
-                }
-            } else {
-                DAWN_TRY(ValidateLoadOp(depthStencilAttachment->stencilLoadOp));
-                DAWN_INVALID_IF(
-                    depthStencilAttachment->stencilLoadOp == wgpu::LoadOp::Undefined,
-                    "stencilLoadOp (%s) must be set if the attachment (%s) has a stencil "
-                    "aspect and stencilReadOnly (%u) is false.",
-                    depthStencilAttachment->stencilLoadOp, attachment,
-                    depthStencilAttachment->stencilReadOnly);
-                DAWN_TRY(ValidateStoreOp(depthStencilAttachment->stencilStoreOp));
-                DAWN_INVALID_IF(
-                    depthStencilAttachment->stencilStoreOp == wgpu::StoreOp::Undefined,
-                    "stencilStoreOp (%s) must be set if the attachment (%s) has a stencil "
-                    "aspect and stencilReadOnly (%u) is false.",
-                    depthStencilAttachment->stencilStoreOp, attachment,
-                    depthStencilAttachment->stencilReadOnly);
-            }
-
-            if (!std::isnan(depthStencilAttachment->clearDepth)) {
-                // TODO(dawn:1269): Remove this branch after the deprecation period.
-                device->EmitDeprecationWarning(
-                    "clearDepth is deprecated, prefer depthClearValue instead.");
-            } else {
-                DAWN_INVALID_IF(depthStencilAttachment->depthLoadOp == wgpu::LoadOp::Clear &&
-                                    std::isnan(depthStencilAttachment->depthClearValue),
-                                "depthClearValue is NaN.");
-            }
-
-            // TODO(dawn:1269): Remove after the deprecation period.
-            if (depthStencilAttachment->stencilClearValue == 0 &&
-                depthStencilAttachment->clearStencil != 0) {
-                device->EmitDeprecationWarning(
-                    "clearStencil is deprecated, prefer stencilClearValue instead.");
-            }
-
-            // *sampleCount == 0 must only happen when there is no color attachment. In that case we
-            // do not need to validate the sample count of the depth stencil attachment.
-            const uint32_t depthStencilSampleCount = attachment->GetTexture()->GetSampleCount();
-            if (*sampleCount != 0) {
-                DAWN_INVALID_IF(
-                    depthStencilSampleCount != *sampleCount,
-                    "The depth stencil attachment %s sample count (%u) does not match the sample "
-                    "count of the other attachments (%u).",
-                    attachment, depthStencilSampleCount, *sampleCount);
-            } else {
-                *sampleCount = depthStencilSampleCount;
-            }
-
-            DAWN_TRY(ValidateAttachmentArrayLayersAndLevelCount(attachment));
-            DAWN_TRY(ValidateOrSetAttachmentSize(attachment, width, height));
-
-            return {};
-        }
-
-        MaybeError ValidateRenderPassDescriptor(DeviceBase* device,
-                                                const RenderPassDescriptor* descriptor,
-                                                uint32_t* width,
-                                                uint32_t* height,
-                                                uint32_t* sampleCount,
-                                                UsageValidationMode usageValidationMode) {
-            DAWN_INVALID_IF(
-                descriptor->colorAttachmentCount > kMaxColorAttachments,
-                "Color attachment count (%u) exceeds the maximum number of color attachments (%u).",
-                descriptor->colorAttachmentCount, kMaxColorAttachments);
-
-            bool isAllColorAttachmentNull = true;
-            for (uint32_t i = 0; i < descriptor->colorAttachmentCount; ++i) {
-                DAWN_TRY_CONTEXT(ValidateRenderPassColorAttachment(
-                                     device, descriptor->colorAttachments[i], width, height,
-                                     sampleCount, usageValidationMode),
-                                 "validating colorAttachments[%u].", i);
-                if (descriptor->colorAttachments[i].view) {
-                    isAllColorAttachmentNull = false;
-                }
-            }
-
-            if (descriptor->depthStencilAttachment != nullptr) {
-                DAWN_TRY_CONTEXT(ValidateRenderPassDepthStencilAttachment(
-                                     device, descriptor->depthStencilAttachment, width, height,
-                                     sampleCount, usageValidationMode),
-                                 "validating depthStencilAttachment.");
-            } else {
-                DAWN_INVALID_IF(
-                    isAllColorAttachmentNull,
-                    "No color or depthStencil attachments specified. At least one is required.");
-            }
-
-            if (descriptor->occlusionQuerySet != nullptr) {
-                DAWN_TRY(device->ValidateObject(descriptor->occlusionQuerySet));
-
-                DAWN_INVALID_IF(
-                    descriptor->occlusionQuerySet->GetQueryType() != wgpu::QueryType::Occlusion,
-                    "The occlusionQuerySet %s type (%s) is not %s.", descriptor->occlusionQuerySet,
-                    descriptor->occlusionQuerySet->GetQueryType(), wgpu::QueryType::Occlusion);
-            }
-
-            if (descriptor->timestampWriteCount > 0) {
-                DAWN_ASSERT(descriptor->timestampWrites != nullptr);
-
-                // Record the query set and query index used on render passes for validating query
-                // index overwrite. The TrackQueryAvailability of
-                // RenderPassResourceUsageTracker is not used here because the timestampWrites are
-                // not validated and encoded one by one, but encoded together after passing the
-                // validation.
-                QueryAvailabilityMap usedQueries;
-                for (uint32_t i = 0; i < descriptor->timestampWriteCount; ++i) {
-                    QuerySetBase* querySet = descriptor->timestampWrites[i].querySet;
-                    DAWN_ASSERT(querySet != nullptr);
-                    uint32_t queryIndex = descriptor->timestampWrites[i].queryIndex;
-                    DAWN_TRY_CONTEXT(ValidateTimestampQuery(device, querySet, queryIndex),
-                                     "validating querySet and queryIndex of timestampWrites[%u].",
-                                     i);
-                    DAWN_TRY_CONTEXT(ValidateRenderPassTimestampLocation(
-                                         descriptor->timestampWrites[i].location),
-                                     "validating location of timestampWrites[%u].", i);
-
-                    auto checkIt = usedQueries.find(querySet);
-                    DAWN_INVALID_IF(checkIt != usedQueries.end() && checkIt->second[queryIndex],
-                                    "Query index %u of %s is written to twice in a render pass.",
-                                    queryIndex, querySet);
-
-                    // Gets the iterator for that querySet or create a new vector of bool set to
-                    // false if the querySet wasn't registered.
-                    auto addIt = usedQueries.emplace(querySet, querySet->GetQueryCount()).first;
-                    addIt->second[queryIndex] = true;
-                }
-            }
-
-            DAWN_INVALID_IF(descriptor->colorAttachmentCount == 0 &&
-                                descriptor->depthStencilAttachment == nullptr,
-                            "Render pass has no attachments.");
-
-            return {};
-        }
-
-        MaybeError ValidateComputePassDescriptor(const DeviceBase* device,
-                                                 const ComputePassDescriptor* descriptor) {
-            if (descriptor == nullptr) {
-                return {};
-            }
-
-            if (descriptor->timestampWriteCount > 0) {
-                DAWN_ASSERT(descriptor->timestampWrites != nullptr);
-
-                for (uint32_t i = 0; i < descriptor->timestampWriteCount; ++i) {
-                    DAWN_ASSERT(descriptor->timestampWrites[i].querySet != nullptr);
-                    DAWN_TRY_CONTEXT(
-                        ValidateTimestampQuery(device, descriptor->timestampWrites[i].querySet,
-                                               descriptor->timestampWrites[i].queryIndex),
-                        "validating querySet and queryIndex of timestampWrites[%u].", i);
-                    DAWN_TRY_CONTEXT(ValidateComputePassTimestampLocation(
-                                         descriptor->timestampWrites[i].location),
-                                     "validating location of timestampWrites[%u].", i);
-                }
-            }
-
-            return {};
-        }
-
-        MaybeError ValidateQuerySetResolve(const QuerySetBase* querySet,
-                                           uint32_t firstQuery,
-                                           uint32_t queryCount,
-                                           const BufferBase* destination,
-                                           uint64_t destinationOffset) {
-            DAWN_INVALID_IF(firstQuery >= querySet->GetQueryCount(),
-                            "First query (%u) exceeds the number of queries (%u) in %s.",
-                            firstQuery, querySet->GetQueryCount(), querySet);
-
-            DAWN_INVALID_IF(
-                queryCount > querySet->GetQueryCount() - firstQuery,
-                "The query range (firstQuery: %u, queryCount: %u) exceeds the number of queries "
-                "(%u) in %s.",
-                firstQuery, queryCount, querySet->GetQueryCount(), querySet);
-
-            DAWN_INVALID_IF(destinationOffset % 256 != 0,
-                            "The destination buffer %s offset (%u) is not a multiple of 256.",
-                            destination, destinationOffset);
-
-            uint64_t bufferSize = destination->GetSize();
-            // The destination buffer must have enough storage, from destination offset, to contain
-            // the result of resolved queries
-            bool fitsInBuffer = destinationOffset <= bufferSize &&
-                                (static_cast<uint64_t>(queryCount) * sizeof(uint64_t) <=
-                                 (bufferSize - destinationOffset));
-            DAWN_INVALID_IF(
-                !fitsInBuffer,
-                "The resolved %s data size (%u) would not fit in %s with size %u at the offset %u.",
-                querySet, static_cast<uint64_t>(queryCount) * sizeof(uint64_t), destination,
-                bufferSize, destinationOffset);
-
-            return {};
-        }
-
-        MaybeError EncodeTimestampsToNanosecondsConversion(CommandEncoder* encoder,
-                                                           QuerySetBase* querySet,
-                                                           uint32_t firstQuery,
-                                                           uint32_t queryCount,
-                                                           BufferBase* destination,
-                                                           uint64_t destinationOffset) {
-            DeviceBase* device = encoder->GetDevice();
-
-            // The availability got from query set is a reference to vector<bool>, need to covert
-            // bool to uint32_t due to a user input in pipeline must not contain a bool type in
-            // WGSL.
-            std::vector<uint32_t> availability{querySet->GetQueryAvailability().begin(),
-                                               querySet->GetQueryAvailability().end()};
-
-            // Timestamp availability storage buffer
-            BufferDescriptor availabilityDesc = {};
-            availabilityDesc.usage = wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopyDst;
-            availabilityDesc.size = querySet->GetQueryCount() * sizeof(uint32_t);
-            Ref<BufferBase> availabilityBuffer;
-            DAWN_TRY_ASSIGN(availabilityBuffer, device->CreateBuffer(&availabilityDesc));
-
-            DAWN_TRY(device->GetQueue()->WriteBuffer(availabilityBuffer.Get(), 0,
-                                                     availability.data(),
-                                                     availability.size() * sizeof(uint32_t)));
-
-            // Timestamp params uniform buffer
-            TimestampParams params(firstQuery, queryCount, static_cast<uint32_t>(destinationOffset),
-                                   device->GetTimestampPeriodInNS());
-
-            BufferDescriptor parmsDesc = {};
-            parmsDesc.usage = wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopyDst;
-            parmsDesc.size = sizeof(params);
-            Ref<BufferBase> paramsBuffer;
-            DAWN_TRY_ASSIGN(paramsBuffer, device->CreateBuffer(&parmsDesc));
-
-            DAWN_TRY(
-                device->GetQueue()->WriteBuffer(paramsBuffer.Get(), 0, &params, sizeof(params)));
-
-            return EncodeConvertTimestampsToNanoseconds(
-                encoder, destination, availabilityBuffer.Get(), paramsBuffer.Get());
-        }
-
-        bool IsReadOnlyDepthStencilAttachment(
-            const RenderPassDepthStencilAttachment* depthStencilAttachment) {
-            DAWN_ASSERT(depthStencilAttachment != nullptr);
-            Aspect aspects = depthStencilAttachment->view->GetAspects();
-            DAWN_ASSERT(IsSubset(aspects, Aspect::Depth | Aspect::Stencil));
-
-            if ((aspects & Aspect::Depth) && !depthStencilAttachment->depthReadOnly) {
-                return false;
-            }
-            if (aspects & Aspect::Stencil && !depthStencilAttachment->stencilReadOnly) {
-                return false;
-            }
-            return true;
-        }
-
-    }  // namespace
-
-    MaybeError ValidateCommandEncoderDescriptor(const DeviceBase* device,
-                                                const CommandEncoderDescriptor* descriptor) {
-        DAWN_TRY(ValidateSingleSType(descriptor->nextInChain,
-                                     wgpu::SType::DawnEncoderInternalUsageDescriptor));
-
-        const DawnEncoderInternalUsageDescriptor* internalUsageDesc = nullptr;
-        FindInChain(descriptor->nextInChain, &internalUsageDesc);
-
-        DAWN_INVALID_IF(internalUsageDesc != nullptr &&
-                            !device->APIHasFeature(wgpu::FeatureName::DawnInternalUsages),
-                        "%s is not available.", wgpu::FeatureName::DawnInternalUsages);
+MaybeError ValidateResolveTarget(const DeviceBase* device,
+                                 const RenderPassColorAttachment& colorAttachment,
+                                 UsageValidationMode usageValidationMode) {
+    if (colorAttachment.resolveTarget == nullptr) {
         return {};
     }
 
-    // static
-    Ref<CommandEncoder> CommandEncoder::Create(DeviceBase* device,
-                                               const CommandEncoderDescriptor* descriptor) {
-        return AcquireRef(new CommandEncoder(device, descriptor));
+    const TextureViewBase* resolveTarget = colorAttachment.resolveTarget;
+    const TextureViewBase* attachment = colorAttachment.view;
+    DAWN_TRY(device->ValidateObject(colorAttachment.resolveTarget));
+    DAWN_TRY(ValidateCanUseAs(colorAttachment.resolveTarget->GetTexture(),
+                              wgpu::TextureUsage::RenderAttachment, usageValidationMode));
+
+    DAWN_INVALID_IF(!attachment->GetTexture()->IsMultisampledTexture(),
+                    "Cannot set %s as a resolve target when the color attachment %s has a sample "
+                    "count of 1.",
+                    resolveTarget, attachment);
+
+    DAWN_INVALID_IF(resolveTarget->GetTexture()->IsMultisampledTexture(),
+                    "Cannot use %s as resolve target. Sample count (%u) is greater than 1.",
+                    resolveTarget, resolveTarget->GetTexture()->GetSampleCount());
+
+    DAWN_INVALID_IF(resolveTarget->GetLayerCount() > 1,
+                    "The resolve target %s array layer count (%u) is not 1.", resolveTarget,
+                    resolveTarget->GetLayerCount());
+
+    DAWN_INVALID_IF(resolveTarget->GetLevelCount() > 1,
+                    "The resolve target %s mip level count (%u) is not 1.", resolveTarget,
+                    resolveTarget->GetLevelCount());
+
+    const Extent3D& colorTextureSize =
+        attachment->GetTexture()->GetMipLevelVirtualSize(attachment->GetBaseMipLevel());
+    const Extent3D& resolveTextureSize =
+        resolveTarget->GetTexture()->GetMipLevelVirtualSize(resolveTarget->GetBaseMipLevel());
+    DAWN_INVALID_IF(colorTextureSize.width != resolveTextureSize.width ||
+                        colorTextureSize.height != resolveTextureSize.height,
+                    "The Resolve target %s size (width: %u, height: %u) does not match the color "
+                    "attachment %s size (width: %u, height: %u).",
+                    resolveTarget, resolveTextureSize.width, resolveTextureSize.height, attachment,
+                    colorTextureSize.width, colorTextureSize.height);
+
+    wgpu::TextureFormat resolveTargetFormat = resolveTarget->GetFormat().format;
+    DAWN_INVALID_IF(
+        resolveTargetFormat != attachment->GetFormat().format,
+        "The resolve target %s format (%s) does not match the color attachment %s format "
+        "(%s).",
+        resolveTarget, resolveTargetFormat, attachment, attachment->GetFormat().format);
+    DAWN_INVALID_IF(
+        !resolveTarget->GetFormat().supportsResolveTarget,
+        "The resolve target %s format (%s) does not support being used as resolve target.",
+        resolveTarget, resolveTargetFormat);
+
+    return {};
+}
+
+MaybeError ValidateRenderPassColorAttachment(DeviceBase* device,
+                                             const RenderPassColorAttachment& colorAttachment,
+                                             uint32_t* width,
+                                             uint32_t* height,
+                                             uint32_t* sampleCount,
+                                             UsageValidationMode usageValidationMode) {
+    TextureViewBase* attachment = colorAttachment.view;
+    if (attachment == nullptr) {
+        return {};
+    }
+    DAWN_TRY(device->ValidateObject(attachment));
+    DAWN_TRY(ValidateCanUseAs(attachment->GetTexture(), wgpu::TextureUsage::RenderAttachment,
+                              usageValidationMode));
+
+    DAWN_INVALID_IF(
+        !(attachment->GetAspects() & Aspect::Color) || !attachment->GetFormat().isRenderable,
+        "The color attachment %s format (%s) is not color renderable.", attachment,
+        attachment->GetFormat().format);
+
+    DAWN_TRY(ValidateLoadOp(colorAttachment.loadOp));
+    DAWN_TRY(ValidateStoreOp(colorAttachment.storeOp));
+    DAWN_INVALID_IF(colorAttachment.loadOp == wgpu::LoadOp::Undefined, "loadOp must be set.");
+    DAWN_INVALID_IF(colorAttachment.storeOp == wgpu::StoreOp::Undefined, "storeOp must be set.");
+
+    // TODO(dawn:1269): Remove after the deprecation period.
+    bool useClearColor = HasDeprecatedColor(colorAttachment);
+    const dawn::native::Color& clearValue =
+        useClearColor ? colorAttachment.clearColor : colorAttachment.clearValue;
+    if (useClearColor) {
+        device->EmitDeprecationWarning(
+            "clearColor is deprecated, prefer using clearValue instead.");
     }
 
-    // static
-    CommandEncoder* CommandEncoder::MakeError(DeviceBase* device) {
-        return new CommandEncoder(device, ObjectBase::kError);
+    if (colorAttachment.loadOp == wgpu::LoadOp::Clear) {
+        DAWN_INVALID_IF(std::isnan(clearValue.r) || std::isnan(clearValue.g) ||
+                            std::isnan(clearValue.b) || std::isnan(clearValue.a),
+                        "Color clear value (%s) contain a NaN.", &clearValue);
     }
 
-    CommandEncoder::CommandEncoder(DeviceBase* device, const CommandEncoderDescriptor* descriptor)
-        : ApiObjectBase(device, descriptor->label), mEncodingContext(device, this) {
-        TrackInDevice();
+    DAWN_TRY(ValidateOrSetColorAttachmentSampleCount(attachment, sampleCount));
 
-        const DawnEncoderInternalUsageDescriptor* internalUsageDesc = nullptr;
-        FindInChain(descriptor->nextInChain, &internalUsageDesc);
+    DAWN_TRY(ValidateResolveTarget(device, colorAttachment, usageValidationMode));
 
-        if (internalUsageDesc != nullptr && internalUsageDesc->useInternalUsages) {
-            mUsageValidationMode = UsageValidationMode::Internal;
+    DAWN_TRY(ValidateAttachmentArrayLayersAndLevelCount(attachment));
+    DAWN_TRY(ValidateOrSetAttachmentSize(attachment, width, height));
+
+    return {};
+}
+
+MaybeError ValidateRenderPassDepthStencilAttachment(
+    DeviceBase* device,
+    const RenderPassDepthStencilAttachment* depthStencilAttachment,
+    uint32_t* width,
+    uint32_t* height,
+    uint32_t* sampleCount,
+    UsageValidationMode usageValidationMode) {
+    DAWN_ASSERT(depthStencilAttachment != nullptr);
+
+    TextureViewBase* attachment = depthStencilAttachment->view;
+    DAWN_TRY(device->ValidateObject(attachment));
+    DAWN_TRY(ValidateCanUseAs(attachment->GetTexture(), wgpu::TextureUsage::RenderAttachment,
+                              usageValidationMode));
+
+    const Format& format = attachment->GetFormat();
+    DAWN_INVALID_IF(!format.HasDepthOrStencil(),
+                    "The depth stencil attachment %s format (%s) is not a depth stencil format.",
+                    attachment, format.format);
+
+    DAWN_INVALID_IF(!format.isRenderable,
+                    "The depth stencil attachment %s format (%s) is not renderable.", attachment,
+                    format.format);
+
+    DAWN_INVALID_IF(attachment->GetAspects() != format.aspects,
+                    "The depth stencil attachment %s must encompass all aspects.", attachment);
+
+    DAWN_INVALID_IF(
+        attachment->GetAspects() == (Aspect::Depth | Aspect::Stencil) &&
+            depthStencilAttachment->depthReadOnly != depthStencilAttachment->stencilReadOnly,
+        "depthReadOnly (%u) and stencilReadOnly (%u) must be the same when texture aspect "
+        "is 'all'.",
+        depthStencilAttachment->depthReadOnly, depthStencilAttachment->stencilReadOnly);
+
+    // Read only, or depth doesn't exist.
+    if (depthStencilAttachment->depthReadOnly ||
+        !IsSubset(Aspect::Depth, attachment->GetAspects())) {
+        if (depthStencilAttachment->depthLoadOp == wgpu::LoadOp::Load &&
+            depthStencilAttachment->depthStoreOp == wgpu::StoreOp::Store) {
+            // TODO(dawn:1269): Remove this branch after the deprecation period.
+            device->EmitDeprecationWarning(
+                "Setting depthLoadOp and depthStoreOp when "
+                "the attachment has no depth aspect or depthReadOnly is true is "
+                "deprecated.");
         } else {
-            mUsageValidationMode = UsageValidationMode::Default;
+            DAWN_INVALID_IF(depthStencilAttachment->depthLoadOp != wgpu::LoadOp::Undefined,
+                            "depthLoadOp (%s) must not be set if the attachment (%s) has "
+                            "no depth aspect or depthReadOnly (%u) is true.",
+                            depthStencilAttachment->depthLoadOp, attachment,
+                            depthStencilAttachment->depthReadOnly);
+            DAWN_INVALID_IF(depthStencilAttachment->depthStoreOp != wgpu::StoreOp::Undefined,
+                            "depthStoreOp (%s) must not be set if the attachment (%s) has no depth "
+                            "aspect or depthReadOnly (%u) is true.",
+                            depthStencilAttachment->depthStoreOp, attachment,
+                            depthStencilAttachment->depthReadOnly);
+        }
+    } else {
+        DAWN_TRY(ValidateLoadOp(depthStencilAttachment->depthLoadOp));
+        DAWN_INVALID_IF(depthStencilAttachment->depthLoadOp == wgpu::LoadOp::Undefined,
+                        "depthLoadOp must be set if the attachment (%s) has a depth aspect "
+                        "and depthReadOnly (%u) is false.",
+                        attachment, depthStencilAttachment->depthReadOnly);
+        DAWN_TRY(ValidateStoreOp(depthStencilAttachment->depthStoreOp));
+        DAWN_INVALID_IF(depthStencilAttachment->depthStoreOp == wgpu::StoreOp::Undefined,
+                        "depthStoreOp must be set if the attachment (%s) has a depth "
+                        "aspect and depthReadOnly (%u) is false.",
+                        attachment, depthStencilAttachment->depthReadOnly);
+    }
+
+    // Read only, or stencil doesn't exist.
+    if (depthStencilAttachment->stencilReadOnly ||
+        !IsSubset(Aspect::Stencil, attachment->GetAspects())) {
+        if (depthStencilAttachment->stencilLoadOp == wgpu::LoadOp::Load &&
+            depthStencilAttachment->stencilStoreOp == wgpu::StoreOp::Store) {
+            // TODO(dawn:1269): Remove this branch after the deprecation period.
+            device->EmitDeprecationWarning(
+                "Setting stencilLoadOp and stencilStoreOp when "
+                "the attachment has no stencil aspect or stencilReadOnly is true is "
+                "deprecated.");
+        } else {
+            DAWN_INVALID_IF(
+                depthStencilAttachment->stencilLoadOp != wgpu::LoadOp::Undefined,
+                "stencilLoadOp (%s) must not be set if the attachment (%s) has no stencil "
+                "aspect or stencilReadOnly (%u) is true.",
+                depthStencilAttachment->stencilLoadOp, attachment,
+                depthStencilAttachment->stencilReadOnly);
+            DAWN_INVALID_IF(
+                depthStencilAttachment->stencilStoreOp != wgpu::StoreOp::Undefined,
+                "stencilStoreOp (%s) must not be set if the attachment (%s) has no stencil "
+                "aspect or stencilReadOnly (%u) is true.",
+                depthStencilAttachment->stencilStoreOp, attachment,
+                depthStencilAttachment->stencilReadOnly);
+        }
+    } else {
+        DAWN_TRY(ValidateLoadOp(depthStencilAttachment->stencilLoadOp));
+        DAWN_INVALID_IF(depthStencilAttachment->stencilLoadOp == wgpu::LoadOp::Undefined,
+                        "stencilLoadOp (%s) must be set if the attachment (%s) has a stencil "
+                        "aspect and stencilReadOnly (%u) is false.",
+                        depthStencilAttachment->stencilLoadOp, attachment,
+                        depthStencilAttachment->stencilReadOnly);
+        DAWN_TRY(ValidateStoreOp(depthStencilAttachment->stencilStoreOp));
+        DAWN_INVALID_IF(depthStencilAttachment->stencilStoreOp == wgpu::StoreOp::Undefined,
+                        "stencilStoreOp (%s) must be set if the attachment (%s) has a stencil "
+                        "aspect and stencilReadOnly (%u) is false.",
+                        depthStencilAttachment->stencilStoreOp, attachment,
+                        depthStencilAttachment->stencilReadOnly);
+    }
+
+    if (!std::isnan(depthStencilAttachment->clearDepth)) {
+        // TODO(dawn:1269): Remove this branch after the deprecation period.
+        device->EmitDeprecationWarning("clearDepth is deprecated, prefer depthClearValue instead.");
+    } else {
+        DAWN_INVALID_IF(depthStencilAttachment->depthLoadOp == wgpu::LoadOp::Clear &&
+                            std::isnan(depthStencilAttachment->depthClearValue),
+                        "depthClearValue is NaN.");
+    }
+
+    // TODO(dawn:1269): Remove after the deprecation period.
+    if (depthStencilAttachment->stencilClearValue == 0 &&
+        depthStencilAttachment->clearStencil != 0) {
+        device->EmitDeprecationWarning(
+            "clearStencil is deprecated, prefer stencilClearValue instead.");
+    }
+
+    // *sampleCount == 0 must only happen when there is no color attachment. In that case we
+    // do not need to validate the sample count of the depth stencil attachment.
+    const uint32_t depthStencilSampleCount = attachment->GetTexture()->GetSampleCount();
+    if (*sampleCount != 0) {
+        DAWN_INVALID_IF(
+            depthStencilSampleCount != *sampleCount,
+            "The depth stencil attachment %s sample count (%u) does not match the sample "
+            "count of the other attachments (%u).",
+            attachment, depthStencilSampleCount, *sampleCount);
+    } else {
+        *sampleCount = depthStencilSampleCount;
+    }
+
+    DAWN_TRY(ValidateAttachmentArrayLayersAndLevelCount(attachment));
+    DAWN_TRY(ValidateOrSetAttachmentSize(attachment, width, height));
+
+    return {};
+}
+
+MaybeError ValidateRenderPassDescriptor(DeviceBase* device,
+                                        const RenderPassDescriptor* descriptor,
+                                        uint32_t* width,
+                                        uint32_t* height,
+                                        uint32_t* sampleCount,
+                                        UsageValidationMode usageValidationMode) {
+    DAWN_INVALID_IF(
+        descriptor->colorAttachmentCount > kMaxColorAttachments,
+        "Color attachment count (%u) exceeds the maximum number of color attachments (%u).",
+        descriptor->colorAttachmentCount, kMaxColorAttachments);
+
+    bool isAllColorAttachmentNull = true;
+    for (uint32_t i = 0; i < descriptor->colorAttachmentCount; ++i) {
+        DAWN_TRY_CONTEXT(
+            ValidateRenderPassColorAttachment(device, descriptor->colorAttachments[i], width,
+                                              height, sampleCount, usageValidationMode),
+            "validating colorAttachments[%u].", i);
+        if (descriptor->colorAttachments[i].view) {
+            isAllColorAttachmentNull = false;
         }
     }
 
-    CommandEncoder::CommandEncoder(DeviceBase* device, ObjectBase::ErrorTag tag)
-        : ApiObjectBase(device, tag),
-          mEncodingContext(device, this),
-          mUsageValidationMode(UsageValidationMode::Default) {
-        mEncodingContext.HandleError(DAWN_FORMAT_VALIDATION_ERROR("%s is invalid.", this));
+    if (descriptor->depthStencilAttachment != nullptr) {
+        DAWN_TRY_CONTEXT(ValidateRenderPassDepthStencilAttachment(
+                             device, descriptor->depthStencilAttachment, width, height, sampleCount,
+                             usageValidationMode),
+                         "validating depthStencilAttachment.");
+    } else {
+        DAWN_INVALID_IF(
+            isAllColorAttachmentNull,
+            "No color or depthStencil attachments specified. At least one is required.");
     }
 
-    ObjectType CommandEncoder::GetType() const {
-        return ObjectType::CommandEncoder;
+    if (descriptor->occlusionQuerySet != nullptr) {
+        DAWN_TRY(device->ValidateObject(descriptor->occlusionQuerySet));
+
+        DAWN_INVALID_IF(descriptor->occlusionQuerySet->GetQueryType() != wgpu::QueryType::Occlusion,
+                        "The occlusionQuerySet %s type (%s) is not %s.",
+                        descriptor->occlusionQuerySet,
+                        descriptor->occlusionQuerySet->GetQueryType(), wgpu::QueryType::Occlusion);
     }
 
-    void CommandEncoder::DestroyImpl() {
-        mEncodingContext.Destroy();
-    }
+    if (descriptor->timestampWriteCount > 0) {
+        DAWN_ASSERT(descriptor->timestampWrites != nullptr);
 
-    CommandBufferResourceUsage CommandEncoder::AcquireResourceUsages() {
-        return CommandBufferResourceUsage{
-            mEncodingContext.AcquireRenderPassUsages(), mEncodingContext.AcquireComputePassUsages(),
-            std::move(mTopLevelBuffers), std::move(mTopLevelTextures), std::move(mUsedQuerySets)};
-    }
+        // Record the query set and query index used on render passes for validating query
+        // index overwrite. The TrackQueryAvailability of
+        // RenderPassResourceUsageTracker is not used here because the timestampWrites are
+        // not validated and encoded one by one, but encoded together after passing the
+        // validation.
+        QueryAvailabilityMap usedQueries;
+        for (uint32_t i = 0; i < descriptor->timestampWriteCount; ++i) {
+            QuerySetBase* querySet = descriptor->timestampWrites[i].querySet;
+            DAWN_ASSERT(querySet != nullptr);
+            uint32_t queryIndex = descriptor->timestampWrites[i].queryIndex;
+            DAWN_TRY_CONTEXT(ValidateTimestampQuery(device, querySet, queryIndex),
+                             "validating querySet and queryIndex of timestampWrites[%u].", i);
+            DAWN_TRY_CONTEXT(
+                ValidateRenderPassTimestampLocation(descriptor->timestampWrites[i].location),
+                "validating location of timestampWrites[%u].", i);
 
-    CommandIterator CommandEncoder::AcquireCommands() {
-        return mEncodingContext.AcquireCommands();
-    }
+            auto checkIt = usedQueries.find(querySet);
+            DAWN_INVALID_IF(checkIt != usedQueries.end() && checkIt->second[queryIndex],
+                            "Query index %u of %s is written to twice in a render pass.",
+                            queryIndex, querySet);
 
-    void CommandEncoder::TrackUsedQuerySet(QuerySetBase* querySet) {
-        mUsedQuerySets.insert(querySet);
-    }
-
-    void CommandEncoder::TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex) {
-        DAWN_ASSERT(querySet != nullptr);
-
-        if (GetDevice()->IsValidationEnabled()) {
-            TrackUsedQuerySet(querySet);
+            // Gets the iterator for that querySet or create a new vector of bool set to
+            // false if the querySet wasn't registered.
+            auto addIt = usedQueries.emplace(querySet, querySet->GetQueryCount()).first;
+            addIt->second[queryIndex] = true;
         }
-
-        // Set the query at queryIndex to available for resolving in query set.
-        querySet->SetQueryAvailability(queryIndex, true);
     }
 
-    // Implementation of the API's command recording methods
+    DAWN_INVALID_IF(
+        descriptor->colorAttachmentCount == 0 && descriptor->depthStencilAttachment == nullptr,
+        "Render pass has no attachments.");
 
-    ComputePassEncoder* CommandEncoder::APIBeginComputePass(
-        const ComputePassDescriptor* descriptor) {
-        return BeginComputePass(descriptor).Detach();
+    return {};
+}
+
+MaybeError ValidateComputePassDescriptor(const DeviceBase* device,
+                                         const ComputePassDescriptor* descriptor) {
+    if (descriptor == nullptr) {
+        return {};
     }
 
-    Ref<ComputePassEncoder> CommandEncoder::BeginComputePass(
-        const ComputePassDescriptor* descriptor) {
-        DeviceBase* device = GetDevice();
+    if (descriptor->timestampWriteCount > 0) {
+        DAWN_ASSERT(descriptor->timestampWrites != nullptr);
 
-        std::vector<TimestampWrite> timestampWritesAtBeginning;
-        std::vector<TimestampWrite> timestampWritesAtEnd;
-        bool success = mEncodingContext.TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                DAWN_TRY(ValidateComputePassDescriptor(device, descriptor));
+        for (uint32_t i = 0; i < descriptor->timestampWriteCount; ++i) {
+            DAWN_ASSERT(descriptor->timestampWrites[i].querySet != nullptr);
+            DAWN_TRY_CONTEXT(ValidateTimestampQuery(device, descriptor->timestampWrites[i].querySet,
+                                                    descriptor->timestampWrites[i].queryIndex),
+                             "validating querySet and queryIndex of timestampWrites[%u].", i);
+            DAWN_TRY_CONTEXT(
+                ValidateComputePassTimestampLocation(descriptor->timestampWrites[i].location),
+                "validating location of timestampWrites[%u].", i);
+        }
+    }
 
-                BeginComputePassCmd* cmd =
-                    allocator->Allocate<BeginComputePassCmd>(Command::BeginComputePass);
+    return {};
+}
 
-                if (descriptor == nullptr) {
-                    return {};
-                }
+MaybeError ValidateQuerySetResolve(const QuerySetBase* querySet,
+                                   uint32_t firstQuery,
+                                   uint32_t queryCount,
+                                   const BufferBase* destination,
+                                   uint64_t destinationOffset) {
+    DAWN_INVALID_IF(firstQuery >= querySet->GetQueryCount(),
+                    "First query (%u) exceeds the number of queries (%u) in %s.", firstQuery,
+                    querySet->GetQueryCount(), querySet);
 
-                // Split the timestampWrites used in BeginComputePassCmd and EndComputePassCmd
-                for (uint32_t i = 0; i < descriptor->timestampWriteCount; i++) {
-                    QuerySetBase* querySet = descriptor->timestampWrites[i].querySet;
-                    uint32_t queryIndex = descriptor->timestampWrites[i].queryIndex;
+    DAWN_INVALID_IF(
+        queryCount > querySet->GetQueryCount() - firstQuery,
+        "The query range (firstQuery: %u, queryCount: %u) exceeds the number of queries "
+        "(%u) in %s.",
+        firstQuery, queryCount, querySet->GetQueryCount(), querySet);
 
-                    switch (descriptor->timestampWrites[i].location) {
-                        case wgpu::ComputePassTimestampLocation::Beginning:
-                            timestampWritesAtBeginning.push_back({querySet, queryIndex});
-                            break;
-                        case wgpu::ComputePassTimestampLocation::End:
-                            timestampWritesAtEnd.push_back({querySet, queryIndex});
-                            break;
-                        default:
-                            break;
-                    }
+    DAWN_INVALID_IF(destinationOffset % 256 != 0,
+                    "The destination buffer %s offset (%u) is not a multiple of 256.", destination,
+                    destinationOffset);
 
-                    TrackQueryAvailability(querySet, queryIndex);
-                }
+    uint64_t bufferSize = destination->GetSize();
+    // The destination buffer must have enough storage, from destination offset, to contain
+    // the result of resolved queries
+    bool fitsInBuffer =
+        destinationOffset <= bufferSize &&
+        (static_cast<uint64_t>(queryCount) * sizeof(uint64_t) <= (bufferSize - destinationOffset));
+    DAWN_INVALID_IF(
+        !fitsInBuffer,
+        "The resolved %s data size (%u) would not fit in %s with size %u at the offset %u.",
+        querySet, static_cast<uint64_t>(queryCount) * sizeof(uint64_t), destination, bufferSize,
+        destinationOffset);
 
-                cmd->timestampWrites = std::move(timestampWritesAtBeginning);
+    return {};
+}
 
-                return {};
-            },
-            "encoding %s.BeginComputePass(%s).", this, descriptor);
+MaybeError EncodeTimestampsToNanosecondsConversion(CommandEncoder* encoder,
+                                                   QuerySetBase* querySet,
+                                                   uint32_t firstQuery,
+                                                   uint32_t queryCount,
+                                                   BufferBase* destination,
+                                                   uint64_t destinationOffset) {
+    DeviceBase* device = encoder->GetDevice();
 
-        if (success) {
-            const ComputePassDescriptor defaultDescriptor = {};
+    // The availability got from query set is a reference to vector<bool>, need to covert
+    // bool to uint32_t due to a user input in pipeline must not contain a bool type in
+    // WGSL.
+    std::vector<uint32_t> availability{querySet->GetQueryAvailability().begin(),
+                                       querySet->GetQueryAvailability().end()};
+
+    // Timestamp availability storage buffer
+    BufferDescriptor availabilityDesc = {};
+    availabilityDesc.usage = wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopyDst;
+    availabilityDesc.size = querySet->GetQueryCount() * sizeof(uint32_t);
+    Ref<BufferBase> availabilityBuffer;
+    DAWN_TRY_ASSIGN(availabilityBuffer, device->CreateBuffer(&availabilityDesc));
+
+    DAWN_TRY(device->GetQueue()->WriteBuffer(availabilityBuffer.Get(), 0, availability.data(),
+                                             availability.size() * sizeof(uint32_t)));
+
+    // Timestamp params uniform buffer
+    TimestampParams params(firstQuery, queryCount, static_cast<uint32_t>(destinationOffset),
+                           device->GetTimestampPeriodInNS());
+
+    BufferDescriptor parmsDesc = {};
+    parmsDesc.usage = wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopyDst;
+    parmsDesc.size = sizeof(params);
+    Ref<BufferBase> paramsBuffer;
+    DAWN_TRY_ASSIGN(paramsBuffer, device->CreateBuffer(&parmsDesc));
+
+    DAWN_TRY(device->GetQueue()->WriteBuffer(paramsBuffer.Get(), 0, &params, sizeof(params)));
+
+    return EncodeConvertTimestampsToNanoseconds(encoder, destination, availabilityBuffer.Get(),
+                                                paramsBuffer.Get());
+}
+
+bool IsReadOnlyDepthStencilAttachment(
+    const RenderPassDepthStencilAttachment* depthStencilAttachment) {
+    DAWN_ASSERT(depthStencilAttachment != nullptr);
+    Aspect aspects = depthStencilAttachment->view->GetAspects();
+    DAWN_ASSERT(IsSubset(aspects, Aspect::Depth | Aspect::Stencil));
+
+    if ((aspects & Aspect::Depth) && !depthStencilAttachment->depthReadOnly) {
+        return false;
+    }
+    if (aspects & Aspect::Stencil && !depthStencilAttachment->stencilReadOnly) {
+        return false;
+    }
+    return true;
+}
+
+}  // namespace
+
+MaybeError ValidateCommandEncoderDescriptor(const DeviceBase* device,
+                                            const CommandEncoderDescriptor* descriptor) {
+    DAWN_TRY(ValidateSingleSType(descriptor->nextInChain,
+                                 wgpu::SType::DawnEncoderInternalUsageDescriptor));
+
+    const DawnEncoderInternalUsageDescriptor* internalUsageDesc = nullptr;
+    FindInChain(descriptor->nextInChain, &internalUsageDesc);
+
+    DAWN_INVALID_IF(internalUsageDesc != nullptr &&
+                        !device->APIHasFeature(wgpu::FeatureName::DawnInternalUsages),
+                    "%s is not available.", wgpu::FeatureName::DawnInternalUsages);
+    return {};
+}
+
+// static
+Ref<CommandEncoder> CommandEncoder::Create(DeviceBase* device,
+                                           const CommandEncoderDescriptor* descriptor) {
+    return AcquireRef(new CommandEncoder(device, descriptor));
+}
+
+// static
+CommandEncoder* CommandEncoder::MakeError(DeviceBase* device) {
+    return new CommandEncoder(device, ObjectBase::kError);
+}
+
+CommandEncoder::CommandEncoder(DeviceBase* device, const CommandEncoderDescriptor* descriptor)
+    : ApiObjectBase(device, descriptor->label), mEncodingContext(device, this) {
+    TrackInDevice();
+
+    const DawnEncoderInternalUsageDescriptor* internalUsageDesc = nullptr;
+    FindInChain(descriptor->nextInChain, &internalUsageDesc);
+
+    if (internalUsageDesc != nullptr && internalUsageDesc->useInternalUsages) {
+        mUsageValidationMode = UsageValidationMode::Internal;
+    } else {
+        mUsageValidationMode = UsageValidationMode::Default;
+    }
+}
+
+CommandEncoder::CommandEncoder(DeviceBase* device, ObjectBase::ErrorTag tag)
+    : ApiObjectBase(device, tag),
+      mEncodingContext(device, this),
+      mUsageValidationMode(UsageValidationMode::Default) {
+    mEncodingContext.HandleError(DAWN_FORMAT_VALIDATION_ERROR("%s is invalid.", this));
+}
+
+ObjectType CommandEncoder::GetType() const {
+    return ObjectType::CommandEncoder;
+}
+
+void CommandEncoder::DestroyImpl() {
+    mEncodingContext.Destroy();
+}
+
+CommandBufferResourceUsage CommandEncoder::AcquireResourceUsages() {
+    return CommandBufferResourceUsage{
+        mEncodingContext.AcquireRenderPassUsages(), mEncodingContext.AcquireComputePassUsages(),
+        std::move(mTopLevelBuffers), std::move(mTopLevelTextures), std::move(mUsedQuerySets)};
+}
+
+CommandIterator CommandEncoder::AcquireCommands() {
+    return mEncodingContext.AcquireCommands();
+}
+
+void CommandEncoder::TrackUsedQuerySet(QuerySetBase* querySet) {
+    mUsedQuerySets.insert(querySet);
+}
+
+void CommandEncoder::TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex) {
+    DAWN_ASSERT(querySet != nullptr);
+
+    if (GetDevice()->IsValidationEnabled()) {
+        TrackUsedQuerySet(querySet);
+    }
+
+    // Set the query at queryIndex to available for resolving in query set.
+    querySet->SetQueryAvailability(queryIndex, true);
+}
+
+// Implementation of the API's command recording methods
+
+ComputePassEncoder* CommandEncoder::APIBeginComputePass(const ComputePassDescriptor* descriptor) {
+    return BeginComputePass(descriptor).Detach();
+}
+
+Ref<ComputePassEncoder> CommandEncoder::BeginComputePass(const ComputePassDescriptor* descriptor) {
+    DeviceBase* device = GetDevice();
+
+    std::vector<TimestampWrite> timestampWritesAtBeginning;
+    std::vector<TimestampWrite> timestampWritesAtEnd;
+    bool success = mEncodingContext.TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            DAWN_TRY(ValidateComputePassDescriptor(device, descriptor));
+
+            BeginComputePassCmd* cmd =
+                allocator->Allocate<BeginComputePassCmd>(Command::BeginComputePass);
+
             if (descriptor == nullptr) {
-                descriptor = &defaultDescriptor;
+                return {};
             }
 
-            Ref<ComputePassEncoder> passEncoder = ComputePassEncoder::Create(
-                device, descriptor, this, &mEncodingContext, std::move(timestampWritesAtEnd));
-            mEncodingContext.EnterPass(passEncoder.Get());
-            return passEncoder;
-        }
+            // Split the timestampWrites used in BeginComputePassCmd and EndComputePassCmd
+            for (uint32_t i = 0; i < descriptor->timestampWriteCount; i++) {
+                QuerySetBase* querySet = descriptor->timestampWrites[i].querySet;
+                uint32_t queryIndex = descriptor->timestampWrites[i].queryIndex;
 
-        return ComputePassEncoder::MakeError(device, this, &mEncodingContext);
-    }
-
-    RenderPassEncoder* CommandEncoder::APIBeginRenderPass(const RenderPassDescriptor* descriptor) {
-        return BeginRenderPass(descriptor).Detach();
-    }
-
-    Ref<RenderPassEncoder> CommandEncoder::BeginRenderPass(const RenderPassDescriptor* descriptor) {
-        DeviceBase* device = GetDevice();
-
-        RenderPassResourceUsageTracker usageTracker;
-
-        uint32_t width = 0;
-        uint32_t height = 0;
-        bool depthReadOnly = false;
-        bool stencilReadOnly = false;
-        Ref<AttachmentState> attachmentState;
-        std::vector<TimestampWrite> timestampWritesAtBeginning;
-        std::vector<TimestampWrite> timestampWritesAtEnd;
-        bool success = mEncodingContext.TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                uint32_t sampleCount = 0;
-
-                DAWN_TRY(ValidateRenderPassDescriptor(device, descriptor, &width, &height,
-                                                      &sampleCount, mUsageValidationMode));
-
-                ASSERT(width > 0 && height > 0 && sampleCount > 0);
-
-                mEncodingContext.WillBeginRenderPass();
-                BeginRenderPassCmd* cmd =
-                    allocator->Allocate<BeginRenderPassCmd>(Command::BeginRenderPass);
-
-                cmd->attachmentState = device->GetOrCreateAttachmentState(descriptor);
-                attachmentState = cmd->attachmentState;
-
-                // Split the timestampWrites used in BeginRenderPassCmd and EndRenderPassCmd
-                for (uint32_t i = 0; i < descriptor->timestampWriteCount; i++) {
-                    QuerySetBase* querySet = descriptor->timestampWrites[i].querySet;
-                    uint32_t queryIndex = descriptor->timestampWrites[i].queryIndex;
-
-                    switch (descriptor->timestampWrites[i].location) {
-                        case wgpu::RenderPassTimestampLocation::Beginning:
-                            timestampWritesAtBeginning.push_back({querySet, queryIndex});
-                            break;
-                        case wgpu::RenderPassTimestampLocation::End:
-                            timestampWritesAtEnd.push_back({querySet, queryIndex});
-                            break;
-                        default:
-                            break;
-                    }
-
-                    TrackQueryAvailability(querySet, queryIndex);
-                    // Track the query availability with true on render pass again for rewrite
-                    // validation and query reset on Vulkan
-                    usageTracker.TrackQueryAvailability(querySet, queryIndex);
-                }
-
-                for (ColorAttachmentIndex index :
-                     IterateBitSet(cmd->attachmentState->GetColorAttachmentsMask())) {
-                    uint8_t i = static_cast<uint8_t>(index);
-                    TextureViewBase* view = descriptor->colorAttachments[i].view;
-                    TextureViewBase* resolveTarget = descriptor->colorAttachments[i].resolveTarget;
-
-                    cmd->colorAttachments[index].view = view;
-                    cmd->colorAttachments[index].resolveTarget = resolveTarget;
-                    cmd->colorAttachments[index].loadOp = descriptor->colorAttachments[i].loadOp;
-                    cmd->colorAttachments[index].storeOp = descriptor->colorAttachments[i].storeOp;
-
-                    cmd->colorAttachments[index].clearColor =
-                        HasDeprecatedColor(descriptor->colorAttachments[i])
-                            ? descriptor->colorAttachments[i].clearColor
-                            : descriptor->colorAttachments[i].clearValue;
-
-                    usageTracker.TextureViewUsedAs(view, wgpu::TextureUsage::RenderAttachment);
-
-                    if (resolveTarget != nullptr) {
-                        usageTracker.TextureViewUsedAs(resolveTarget,
-                                                       wgpu::TextureUsage::RenderAttachment);
-                    }
-                }
-
-                if (cmd->attachmentState->HasDepthStencilAttachment()) {
-                    TextureViewBase* view = descriptor->depthStencilAttachment->view;
-
-                    cmd->depthStencilAttachment.view = view;
-
-                    if (!std::isnan(descriptor->depthStencilAttachment->clearDepth)) {
-                        // TODO(dawn:1269): Remove this branch after the deprecation period.
-                        cmd->depthStencilAttachment.clearDepth =
-                            descriptor->depthStencilAttachment->clearDepth;
-                    } else {
-                        cmd->depthStencilAttachment.clearDepth =
-                            descriptor->depthStencilAttachment->depthClearValue;
-                    }
-
-                    if (descriptor->depthStencilAttachment->stencilClearValue == 0 &&
-                        descriptor->depthStencilAttachment->clearStencil != 0) {
-                        // TODO(dawn:1269): Remove this branch after the deprecation period.
-                        cmd->depthStencilAttachment.clearStencil =
-                            descriptor->depthStencilAttachment->clearStencil;
-                    } else {
-                        cmd->depthStencilAttachment.clearStencil =
-                            descriptor->depthStencilAttachment->stencilClearValue;
-                    }
-
-                    cmd->depthStencilAttachment.depthReadOnly =
-                        descriptor->depthStencilAttachment->depthReadOnly;
-                    cmd->depthStencilAttachment.stencilReadOnly =
-                        descriptor->depthStencilAttachment->stencilReadOnly;
-
-                    if (descriptor->depthStencilAttachment->depthReadOnly ||
-                        !IsSubset(Aspect::Depth,
-                                  descriptor->depthStencilAttachment->view->GetAspects())) {
-                        cmd->depthStencilAttachment.depthLoadOp = wgpu::LoadOp::Load;
-                        cmd->depthStencilAttachment.depthStoreOp = wgpu::StoreOp::Store;
-                    } else {
-                        cmd->depthStencilAttachment.depthLoadOp =
-                            descriptor->depthStencilAttachment->depthLoadOp;
-                        cmd->depthStencilAttachment.depthStoreOp =
-                            descriptor->depthStencilAttachment->depthStoreOp;
-                    }
-
-                    if (descriptor->depthStencilAttachment->stencilReadOnly ||
-                        !IsSubset(Aspect::Stencil,
-                                  descriptor->depthStencilAttachment->view->GetAspects())) {
-                        cmd->depthStencilAttachment.stencilLoadOp = wgpu::LoadOp::Load;
-                        cmd->depthStencilAttachment.stencilStoreOp = wgpu::StoreOp::Store;
-                    } else {
-                        cmd->depthStencilAttachment.stencilLoadOp =
-                            descriptor->depthStencilAttachment->stencilLoadOp;
-                        cmd->depthStencilAttachment.stencilStoreOp =
-                            descriptor->depthStencilAttachment->stencilStoreOp;
-                    }
-
-                    if (IsReadOnlyDepthStencilAttachment(descriptor->depthStencilAttachment)) {
-                        usageTracker.TextureViewUsedAs(view, kReadOnlyRenderAttachment);
-                    } else {
-                        usageTracker.TextureViewUsedAs(view, wgpu::TextureUsage::RenderAttachment);
-                    }
-
-                    depthReadOnly = descriptor->depthStencilAttachment->depthReadOnly;
-                    stencilReadOnly = descriptor->depthStencilAttachment->stencilReadOnly;
-                }
-
-                cmd->width = width;
-                cmd->height = height;
-
-                cmd->occlusionQuerySet = descriptor->occlusionQuerySet;
-
-                cmd->timestampWrites = std::move(timestampWritesAtBeginning);
-
-                return {};
-            },
-            "encoding %s.BeginRenderPass(%s).", this, descriptor);
-
-        if (success) {
-            Ref<RenderPassEncoder> passEncoder = RenderPassEncoder::Create(
-                device, descriptor, this, &mEncodingContext, std::move(usageTracker),
-                std::move(attachmentState), std::move(timestampWritesAtEnd), width, height,
-                depthReadOnly, stencilReadOnly);
-            mEncodingContext.EnterPass(passEncoder.Get());
-            return passEncoder;
-        }
-
-        return RenderPassEncoder::MakeError(device, this, &mEncodingContext);
-    }
-
-    void CommandEncoder::APICopyBufferToBuffer(BufferBase* source,
-                                               uint64_t sourceOffset,
-                                               BufferBase* destination,
-                                               uint64_t destinationOffset,
-                                               uint64_t size) {
-        mEncodingContext.TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                if (GetDevice()->IsValidationEnabled()) {
-                    DAWN_TRY(GetDevice()->ValidateObject(source));
-                    DAWN_TRY(GetDevice()->ValidateObject(destination));
-
-                    DAWN_INVALID_IF(source == destination,
-                                    "Source and destination are the same buffer (%s).", source);
-
-                    DAWN_TRY_CONTEXT(ValidateCopySizeFitsInBuffer(source, sourceOffset, size),
-                                     "validating source %s copy size.", source);
-                    DAWN_TRY_CONTEXT(
-                        ValidateCopySizeFitsInBuffer(destination, destinationOffset, size),
-                        "validating destination %s copy size.", destination);
-                    DAWN_TRY(ValidateB2BCopyAlignment(size, sourceOffset, destinationOffset));
-
-                    DAWN_TRY_CONTEXT(ValidateCanUseAs(source, wgpu::BufferUsage::CopySrc),
-                                     "validating source %s usage.", source);
-                    DAWN_TRY_CONTEXT(ValidateCanUseAs(destination, wgpu::BufferUsage::CopyDst),
-                                     "validating destination %s usage.", destination);
-
-                    mTopLevelBuffers.insert(source);
-                    mTopLevelBuffers.insert(destination);
-                }
-
-                CopyBufferToBufferCmd* copy =
-                    allocator->Allocate<CopyBufferToBufferCmd>(Command::CopyBufferToBuffer);
-                copy->source = source;
-                copy->sourceOffset = sourceOffset;
-                copy->destination = destination;
-                copy->destinationOffset = destinationOffset;
-                copy->size = size;
-
-                return {};
-            },
-            "encoding %s.CopyBufferToBuffer(%s, %u, %s, %u, %u).", this, source, sourceOffset,
-            destination, destinationOffset, size);
-    }
-
-    void CommandEncoder::APICopyBufferToTexture(const ImageCopyBuffer* source,
-                                                const ImageCopyTexture* destination,
-                                                const Extent3D* copySize) {
-        mEncodingContext.TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                if (GetDevice()->IsValidationEnabled()) {
-                    DAWN_TRY(ValidateImageCopyBuffer(GetDevice(), *source));
-                    DAWN_TRY_CONTEXT(ValidateCanUseAs(source->buffer, wgpu::BufferUsage::CopySrc),
-                                     "validating source %s usage.", source->buffer);
-
-                    DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *destination, *copySize));
-                    DAWN_TRY_CONTEXT(
-                        ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
-                                         mUsageValidationMode),
-                        "validating destination %s usage.", destination->texture);
-                    DAWN_TRY(ValidateTextureSampleCountInBufferCopyCommands(destination->texture));
-
-                    DAWN_TRY(ValidateLinearToDepthStencilCopyRestrictions(*destination));
-                    // We validate texture copy range before validating linear texture data,
-                    // because in the latter we divide copyExtent.width by blockWidth and
-                    // copyExtent.height by blockHeight while the divisibility conditions are
-                    // checked in validating texture copy range.
-                    DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *destination, *copySize));
-                }
-                const TexelBlockInfo& blockInfo =
-                    destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
-                if (GetDevice()->IsValidationEnabled()) {
-                    DAWN_TRY(ValidateLinearTextureCopyOffset(
-                        source->layout, blockInfo,
-                        destination->texture->GetFormat().HasDepthOrStencil()));
-                    DAWN_TRY(ValidateLinearTextureData(source->layout, source->buffer->GetSize(),
-                                                       blockInfo, *copySize));
-
-                    mTopLevelBuffers.insert(source->buffer);
-                    mTopLevelTextures.insert(destination->texture);
-                }
-
-                TextureDataLayout srcLayout = source->layout;
-                ApplyDefaultTextureDataLayoutOptions(&srcLayout, blockInfo, *copySize);
-
-                CopyBufferToTextureCmd* copy =
-                    allocator->Allocate<CopyBufferToTextureCmd>(Command::CopyBufferToTexture);
-                copy->source.buffer = source->buffer;
-                copy->source.offset = srcLayout.offset;
-                copy->source.bytesPerRow = srcLayout.bytesPerRow;
-                copy->source.rowsPerImage = srcLayout.rowsPerImage;
-                copy->destination.texture = destination->texture;
-                copy->destination.origin = destination->origin;
-                copy->destination.mipLevel = destination->mipLevel;
-                copy->destination.aspect =
-                    ConvertAspect(destination->texture->GetFormat(), destination->aspect);
-                copy->copySize = *copySize;
-
-                return {};
-            },
-            "encoding %s.CopyBufferToTexture(%s, %s, %s).", this, source->buffer,
-            destination->texture, copySize);
-    }
-
-    void CommandEncoder::APICopyTextureToBuffer(const ImageCopyTexture* source,
-                                                const ImageCopyBuffer* destination,
-                                                const Extent3D* copySize) {
-        mEncodingContext.TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                if (GetDevice()->IsValidationEnabled()) {
-                    DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *source, *copySize));
-                    DAWN_TRY_CONTEXT(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc,
-                                                      mUsageValidationMode),
-                                     "validating source %s usage.", source->texture);
-                    DAWN_TRY(ValidateTextureSampleCountInBufferCopyCommands(source->texture));
-                    DAWN_TRY(ValidateTextureDepthStencilToBufferCopyRestrictions(*source));
-
-                    DAWN_TRY(ValidateImageCopyBuffer(GetDevice(), *destination));
-                    DAWN_TRY_CONTEXT(
-                        ValidateCanUseAs(destination->buffer, wgpu::BufferUsage::CopyDst),
-                        "validating destination %s usage.", destination->buffer);
-
-                    // We validate texture copy range before validating linear texture data,
-                    // because in the latter we divide copyExtent.width by blockWidth and
-                    // copyExtent.height by blockHeight while the divisibility conditions are
-                    // checked in validating texture copy range.
-                    DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *source, *copySize));
-                }
-                const TexelBlockInfo& blockInfo =
-                    source->texture->GetFormat().GetAspectInfo(source->aspect).block;
-                if (GetDevice()->IsValidationEnabled()) {
-                    DAWN_TRY(ValidateLinearTextureCopyOffset(
-                        destination->layout, blockInfo,
-                        source->texture->GetFormat().HasDepthOrStencil()));
-                    DAWN_TRY(ValidateLinearTextureData(
-                        destination->layout, destination->buffer->GetSize(), blockInfo, *copySize));
-
-                    mTopLevelTextures.insert(source->texture);
-                    mTopLevelBuffers.insert(destination->buffer);
-                }
-
-                TextureDataLayout dstLayout = destination->layout;
-                ApplyDefaultTextureDataLayoutOptions(&dstLayout, blockInfo, *copySize);
-
-                CopyTextureToBufferCmd* copy =
-                    allocator->Allocate<CopyTextureToBufferCmd>(Command::CopyTextureToBuffer);
-                copy->source.texture = source->texture;
-                copy->source.origin = source->origin;
-                copy->source.mipLevel = source->mipLevel;
-                copy->source.aspect = ConvertAspect(source->texture->GetFormat(), source->aspect);
-                copy->destination.buffer = destination->buffer;
-                copy->destination.offset = dstLayout.offset;
-                copy->destination.bytesPerRow = dstLayout.bytesPerRow;
-                copy->destination.rowsPerImage = dstLayout.rowsPerImage;
-                copy->copySize = *copySize;
-
-                return {};
-            },
-            "encoding %s.CopyTextureToBuffer(%s, %s, %s).", this, source->texture,
-            destination->buffer, copySize);
-    }
-
-    void CommandEncoder::APICopyTextureToTexture(const ImageCopyTexture* source,
-                                                 const ImageCopyTexture* destination,
-                                                 const Extent3D* copySize) {
-        APICopyTextureToTextureHelper<false>(source, destination, copySize);
-    }
-
-    void CommandEncoder::APICopyTextureToTextureInternal(const ImageCopyTexture* source,
-                                                         const ImageCopyTexture* destination,
-                                                         const Extent3D* copySize) {
-        APICopyTextureToTextureHelper<true>(source, destination, copySize);
-    }
-
-    template <bool Internal>
-    void CommandEncoder::APICopyTextureToTextureHelper(const ImageCopyTexture* source,
-                                                       const ImageCopyTexture* destination,
-                                                       const Extent3D* copySize) {
-        mEncodingContext.TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                if (GetDevice()->IsValidationEnabled()) {
-                    DAWN_TRY(GetDevice()->ValidateObject(source->texture));
-                    DAWN_TRY(GetDevice()->ValidateObject(destination->texture));
-
-                    DAWN_TRY_CONTEXT(ValidateImageCopyTexture(GetDevice(), *source, *copySize),
-                                     "validating source %s.", source->texture);
-                    DAWN_TRY_CONTEXT(ValidateImageCopyTexture(GetDevice(), *destination, *copySize),
-                                     "validating destination %s.", destination->texture);
-
-                    DAWN_TRY(
-                        ValidateTextureToTextureCopyRestrictions(*source, *destination, *copySize));
-
-                    DAWN_TRY_CONTEXT(ValidateTextureCopyRange(GetDevice(), *source, *copySize),
-                                     "validating source %s copy range.", source->texture);
-                    DAWN_TRY_CONTEXT(ValidateTextureCopyRange(GetDevice(), *destination, *copySize),
-                                     "validating source %s copy range.", destination->texture);
-
-                    // For internal usages (CopyToCopyInternal) we don't care if the user has added
-                    // CopySrc as a usage for this texture, but we will always add it internally.
-                    if (Internal) {
-                        DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc,
-                                                  UsageValidationMode::Internal));
-                        DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
-                                                  UsageValidationMode::Internal));
-                    } else {
-                        DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc,
-                                                  mUsageValidationMode));
-                        DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
-                                                  mUsageValidationMode));
-                    }
-
-                    mTopLevelTextures.insert(source->texture);
-                    mTopLevelTextures.insert(destination->texture);
-                }
-
-                CopyTextureToTextureCmd* copy =
-                    allocator->Allocate<CopyTextureToTextureCmd>(Command::CopyTextureToTexture);
-                copy->source.texture = source->texture;
-                copy->source.origin = source->origin;
-                copy->source.mipLevel = source->mipLevel;
-                copy->source.aspect = ConvertAspect(source->texture->GetFormat(), source->aspect);
-                copy->destination.texture = destination->texture;
-                copy->destination.origin = destination->origin;
-                copy->destination.mipLevel = destination->mipLevel;
-                copy->destination.aspect =
-                    ConvertAspect(destination->texture->GetFormat(), destination->aspect);
-                copy->copySize = *copySize;
-
-                return {};
-            },
-            "encoding %s.CopyTextureToTexture(%s, %s, %s).", this, source->texture,
-            destination->texture, copySize);
-    }
-
-    void CommandEncoder::APIClearBuffer(BufferBase* buffer, uint64_t offset, uint64_t size) {
-        mEncodingContext.TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                if (GetDevice()->IsValidationEnabled()) {
-                    DAWN_TRY(GetDevice()->ValidateObject(buffer));
-
-                    uint64_t bufferSize = buffer->GetSize();
-                    DAWN_INVALID_IF(offset > bufferSize,
-                                    "Buffer offset (%u) is larger than the size (%u) of %s.",
-                                    offset, bufferSize, buffer);
-
-                    uint64_t remainingSize = bufferSize - offset;
-                    if (size == wgpu::kWholeSize) {
-                        size = remainingSize;
-                    } else {
-                        DAWN_INVALID_IF(size > remainingSize,
-                                        "Buffer range (offset: %u, size: %u) doesn't fit in "
-                                        "the size (%u) of %s.",
-                                        offset, size, bufferSize, buffer);
-                    }
-
-                    DAWN_TRY_CONTEXT(ValidateCanUseAs(buffer, wgpu::BufferUsage::CopyDst),
-                                     "validating buffer %s usage.", buffer);
-
-                    // Size must be a multiple of 4 bytes on macOS.
-                    DAWN_INVALID_IF(size % 4 != 0, "Fill size (%u) is not a multiple of 4 bytes.",
-                                    size);
-
-                    // Offset must be multiples of 4 bytes on macOS.
-                    DAWN_INVALID_IF(offset % 4 != 0, "Offset (%u) is not a multiple of 4 bytes,",
-                                    offset);
-
-                    mTopLevelBuffers.insert(buffer);
-                } else {
-                    if (size == wgpu::kWholeSize) {
-                        DAWN_ASSERT(buffer->GetSize() >= offset);
-                        size = buffer->GetSize() - offset;
-                    }
-                }
-
-                ClearBufferCmd* cmd = allocator->Allocate<ClearBufferCmd>(Command::ClearBuffer);
-                cmd->buffer = buffer;
-                cmd->offset = offset;
-                cmd->size = size;
-
-                return {};
-            },
-            "encoding %s.ClearBuffer(%s, %u, %u).", this, buffer, offset, size);
-    }
-
-    void CommandEncoder::APIInjectValidationError(const char* message) {
-        if (mEncodingContext.CheckCurrentEncoder(this)) {
-            mEncodingContext.HandleError(DAWN_VALIDATION_ERROR(message));
-        }
-    }
-
-    void CommandEncoder::APIInsertDebugMarker(const char* groupLabel) {
-        mEncodingContext.TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                InsertDebugMarkerCmd* cmd =
-                    allocator->Allocate<InsertDebugMarkerCmd>(Command::InsertDebugMarker);
-                cmd->length = strlen(groupLabel);
-
-                char* label = allocator->AllocateData<char>(cmd->length + 1);
-                memcpy(label, groupLabel, cmd->length + 1);
-
-                return {};
-            },
-            "encoding %s.InsertDebugMarker(\"%s\").", this, groupLabel);
-    }
-
-    void CommandEncoder::APIPopDebugGroup() {
-        mEncodingContext.TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                if (GetDevice()->IsValidationEnabled()) {
-                    DAWN_INVALID_IF(
-                        mDebugGroupStackSize == 0,
-                        "PopDebugGroup called when no debug groups are currently pushed.");
-                }
-                allocator->Allocate<PopDebugGroupCmd>(Command::PopDebugGroup);
-                mDebugGroupStackSize--;
-                mEncodingContext.PopDebugGroupLabel();
-
-                return {};
-            },
-            "encoding %s.PopDebugGroup().", this);
-    }
-
-    void CommandEncoder::APIPushDebugGroup(const char* groupLabel) {
-        mEncodingContext.TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                PushDebugGroupCmd* cmd =
-                    allocator->Allocate<PushDebugGroupCmd>(Command::PushDebugGroup);
-                cmd->length = strlen(groupLabel);
-
-                char* label = allocator->AllocateData<char>(cmd->length + 1);
-                memcpy(label, groupLabel, cmd->length + 1);
-
-                mDebugGroupStackSize++;
-                mEncodingContext.PushDebugGroupLabel(groupLabel);
-
-                return {};
-            },
-            "encoding %s.PushDebugGroup(\"%s\").", this, groupLabel);
-    }
-
-    void CommandEncoder::APIResolveQuerySet(QuerySetBase* querySet,
-                                            uint32_t firstQuery,
-                                            uint32_t queryCount,
-                                            BufferBase* destination,
-                                            uint64_t destinationOffset) {
-        mEncodingContext.TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                if (GetDevice()->IsValidationEnabled()) {
-                    DAWN_TRY(GetDevice()->ValidateObject(querySet));
-                    DAWN_TRY(GetDevice()->ValidateObject(destination));
-
-                    DAWN_TRY(ValidateQuerySetResolve(querySet, firstQuery, queryCount, destination,
-                                                     destinationOffset));
-
-                    DAWN_TRY(ValidateCanUseAs(destination, wgpu::BufferUsage::QueryResolve));
-
-                    TrackUsedQuerySet(querySet);
-                    mTopLevelBuffers.insert(destination);
-                }
-
-                ResolveQuerySetCmd* cmd =
-                    allocator->Allocate<ResolveQuerySetCmd>(Command::ResolveQuerySet);
-                cmd->querySet = querySet;
-                cmd->firstQuery = firstQuery;
-                cmd->queryCount = queryCount;
-                cmd->destination = destination;
-                cmd->destinationOffset = destinationOffset;
-
-                // Encode internal compute pipeline for timestamp query
-                if (querySet->GetQueryType() == wgpu::QueryType::Timestamp &&
-                    !GetDevice()->IsToggleEnabled(Toggle::DisableTimestampQueryConversion)) {
-                    DAWN_TRY(EncodeTimestampsToNanosecondsConversion(
-                        this, querySet, firstQuery, queryCount, destination, destinationOffset));
-                }
-
-                return {};
-            },
-            "encoding %s.ResolveQuerySet(%s, %u, %u, %s, %u).", this, querySet, firstQuery,
-            queryCount, destination, destinationOffset);
-    }
-
-    void CommandEncoder::APIWriteBuffer(BufferBase* buffer,
-                                        uint64_t bufferOffset,
-                                        const uint8_t* data,
-                                        uint64_t size) {
-        mEncodingContext.TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                if (GetDevice()->IsValidationEnabled()) {
-                    DAWN_TRY(ValidateWriteBuffer(GetDevice(), buffer, bufferOffset, size));
-                }
-
-                WriteBufferCmd* cmd = allocator->Allocate<WriteBufferCmd>(Command::WriteBuffer);
-                cmd->buffer = buffer;
-                cmd->offset = bufferOffset;
-                cmd->size = size;
-
-                uint8_t* inlinedData = allocator->AllocateData<uint8_t>(size);
-                memcpy(inlinedData, data, size);
-
-                mTopLevelBuffers.insert(buffer);
-
-                return {};
-            },
-            "encoding %s.WriteBuffer(%s, %u, ..., %u).", this, buffer, bufferOffset, size);
-    }
-
-    void CommandEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
-        mEncodingContext.TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                if (GetDevice()->IsValidationEnabled()) {
-                    DAWN_TRY(ValidateTimestampQuery(GetDevice(), querySet, queryIndex));
+                switch (descriptor->timestampWrites[i].location) {
+                    case wgpu::ComputePassTimestampLocation::Beginning:
+                        timestampWritesAtBeginning.push_back({querySet, queryIndex});
+                        break;
+                    case wgpu::ComputePassTimestampLocation::End:
+                        timestampWritesAtEnd.push_back({querySet, queryIndex});
+                        break;
+                    default:
+                        break;
                 }
 
                 TrackQueryAvailability(querySet, queryIndex);
+            }
 
-                WriteTimestampCmd* cmd =
-                    allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
-                cmd->querySet = querySet;
-                cmd->queryIndex = queryIndex;
+            cmd->timestampWrites = std::move(timestampWritesAtBeginning);
 
-                return {};
-            },
-            "encoding %s.WriteTimestamp(%s, %u).", this, querySet, queryIndex);
-    }
+            return {};
+        },
+        "encoding %s.BeginComputePass(%s).", this, descriptor);
 
-    CommandBufferBase* CommandEncoder::APIFinish(const CommandBufferDescriptor* descriptor) {
-        Ref<CommandBufferBase> commandBuffer;
-        if (GetDevice()->ConsumedError(Finish(descriptor), &commandBuffer)) {
-            return CommandBufferBase::MakeError(GetDevice());
-        }
-        ASSERT(!IsError());
-        return commandBuffer.Detach();
-    }
-
-    ResultOrError<Ref<CommandBufferBase>> CommandEncoder::Finish(
-        const CommandBufferDescriptor* descriptor) {
-        DeviceBase* device = GetDevice();
-
-        // Even if mEncodingContext.Finish() validation fails, calling it will mutate the internal
-        // state of the encoding context. The internal state is set to finished, and subsequent
-        // calls to encode commands will generate errors.
-        DAWN_TRY(mEncodingContext.Finish());
-        DAWN_TRY(device->ValidateIsAlive());
-
-        if (device->IsValidationEnabled()) {
-            DAWN_TRY(ValidateFinish());
-        }
-
-        const CommandBufferDescriptor defaultDescriptor = {};
+    if (success) {
+        const ComputePassDescriptor defaultDescriptor = {};
         if (descriptor == nullptr) {
             descriptor = &defaultDescriptor;
         }
 
-        return device->CreateCommandBuffer(this, descriptor);
+        Ref<ComputePassEncoder> passEncoder = ComputePassEncoder::Create(
+            device, descriptor, this, &mEncodingContext, std::move(timestampWritesAtEnd));
+        mEncodingContext.EnterPass(passEncoder.Get());
+        return passEncoder;
     }
 
-    // Implementation of the command buffer validation that can be precomputed before submit
-    MaybeError CommandEncoder::ValidateFinish() const {
-        TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "CommandEncoder::ValidateFinish");
-        DAWN_TRY(GetDevice()->ValidateObject(this));
+    return ComputePassEncoder::MakeError(device, this, &mEncodingContext);
+}
 
-        for (const RenderPassResourceUsage& passUsage : mEncodingContext.GetRenderPassUsages()) {
-            DAWN_TRY_CONTEXT(ValidateSyncScopeResourceUsage(passUsage),
-                             "validating render pass usage.");
-        }
+RenderPassEncoder* CommandEncoder::APIBeginRenderPass(const RenderPassDescriptor* descriptor) {
+    return BeginRenderPass(descriptor).Detach();
+}
 
-        for (const ComputePassResourceUsage& passUsage : mEncodingContext.GetComputePassUsages()) {
-            for (const SyncScopeResourceUsage& scope : passUsage.dispatchUsages) {
-                DAWN_TRY_CONTEXT(ValidateSyncScopeResourceUsage(scope),
-                                 "validating compute pass usage.");
+Ref<RenderPassEncoder> CommandEncoder::BeginRenderPass(const RenderPassDescriptor* descriptor) {
+    DeviceBase* device = GetDevice();
+
+    RenderPassResourceUsageTracker usageTracker;
+
+    uint32_t width = 0;
+    uint32_t height = 0;
+    bool depthReadOnly = false;
+    bool stencilReadOnly = false;
+    Ref<AttachmentState> attachmentState;
+    std::vector<TimestampWrite> timestampWritesAtBeginning;
+    std::vector<TimestampWrite> timestampWritesAtEnd;
+    bool success = mEncodingContext.TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            uint32_t sampleCount = 0;
+
+            DAWN_TRY(ValidateRenderPassDescriptor(device, descriptor, &width, &height, &sampleCount,
+                                                  mUsageValidationMode));
+
+            ASSERT(width > 0 && height > 0 && sampleCount > 0);
+
+            mEncodingContext.WillBeginRenderPass();
+            BeginRenderPassCmd* cmd =
+                allocator->Allocate<BeginRenderPassCmd>(Command::BeginRenderPass);
+
+            cmd->attachmentState = device->GetOrCreateAttachmentState(descriptor);
+            attachmentState = cmd->attachmentState;
+
+            // Split the timestampWrites used in BeginRenderPassCmd and EndRenderPassCmd
+            for (uint32_t i = 0; i < descriptor->timestampWriteCount; i++) {
+                QuerySetBase* querySet = descriptor->timestampWrites[i].querySet;
+                uint32_t queryIndex = descriptor->timestampWrites[i].queryIndex;
+
+                switch (descriptor->timestampWrites[i].location) {
+                    case wgpu::RenderPassTimestampLocation::Beginning:
+                        timestampWritesAtBeginning.push_back({querySet, queryIndex});
+                        break;
+                    case wgpu::RenderPassTimestampLocation::End:
+                        timestampWritesAtEnd.push_back({querySet, queryIndex});
+                        break;
+                    default:
+                        break;
+                }
+
+                TrackQueryAvailability(querySet, queryIndex);
+                // Track the query availability with true on render pass again for rewrite
+                // validation and query reset on Vulkan
+                usageTracker.TrackQueryAvailability(querySet, queryIndex);
             }
-        }
 
-        DAWN_INVALID_IF(
-            mDebugGroupStackSize != 0,
-            "PushDebugGroup called %u time(s) without a corresponding PopDebugGroup prior to "
-            "calling Finish.",
-            mDebugGroupStackSize);
+            for (ColorAttachmentIndex index :
+                 IterateBitSet(cmd->attachmentState->GetColorAttachmentsMask())) {
+                uint8_t i = static_cast<uint8_t>(index);
+                TextureViewBase* view = descriptor->colorAttachments[i].view;
+                TextureViewBase* resolveTarget = descriptor->colorAttachments[i].resolveTarget;
 
-        return {};
+                cmd->colorAttachments[index].view = view;
+                cmd->colorAttachments[index].resolveTarget = resolveTarget;
+                cmd->colorAttachments[index].loadOp = descriptor->colorAttachments[i].loadOp;
+                cmd->colorAttachments[index].storeOp = descriptor->colorAttachments[i].storeOp;
+
+                cmd->colorAttachments[index].clearColor =
+                    HasDeprecatedColor(descriptor->colorAttachments[i])
+                        ? descriptor->colorAttachments[i].clearColor
+                        : descriptor->colorAttachments[i].clearValue;
+
+                usageTracker.TextureViewUsedAs(view, wgpu::TextureUsage::RenderAttachment);
+
+                if (resolveTarget != nullptr) {
+                    usageTracker.TextureViewUsedAs(resolveTarget,
+                                                   wgpu::TextureUsage::RenderAttachment);
+                }
+            }
+
+            if (cmd->attachmentState->HasDepthStencilAttachment()) {
+                TextureViewBase* view = descriptor->depthStencilAttachment->view;
+
+                cmd->depthStencilAttachment.view = view;
+
+                if (!std::isnan(descriptor->depthStencilAttachment->clearDepth)) {
+                    // TODO(dawn:1269): Remove this branch after the deprecation period.
+                    cmd->depthStencilAttachment.clearDepth =
+                        descriptor->depthStencilAttachment->clearDepth;
+                } else {
+                    cmd->depthStencilAttachment.clearDepth =
+                        descriptor->depthStencilAttachment->depthClearValue;
+                }
+
+                if (descriptor->depthStencilAttachment->stencilClearValue == 0 &&
+                    descriptor->depthStencilAttachment->clearStencil != 0) {
+                    // TODO(dawn:1269): Remove this branch after the deprecation period.
+                    cmd->depthStencilAttachment.clearStencil =
+                        descriptor->depthStencilAttachment->clearStencil;
+                } else {
+                    cmd->depthStencilAttachment.clearStencil =
+                        descriptor->depthStencilAttachment->stencilClearValue;
+                }
+
+                cmd->depthStencilAttachment.depthReadOnly =
+                    descriptor->depthStencilAttachment->depthReadOnly;
+                cmd->depthStencilAttachment.stencilReadOnly =
+                    descriptor->depthStencilAttachment->stencilReadOnly;
+
+                if (descriptor->depthStencilAttachment->depthReadOnly ||
+                    !IsSubset(Aspect::Depth,
+                              descriptor->depthStencilAttachment->view->GetAspects())) {
+                    cmd->depthStencilAttachment.depthLoadOp = wgpu::LoadOp::Load;
+                    cmd->depthStencilAttachment.depthStoreOp = wgpu::StoreOp::Store;
+                } else {
+                    cmd->depthStencilAttachment.depthLoadOp =
+                        descriptor->depthStencilAttachment->depthLoadOp;
+                    cmd->depthStencilAttachment.depthStoreOp =
+                        descriptor->depthStencilAttachment->depthStoreOp;
+                }
+
+                if (descriptor->depthStencilAttachment->stencilReadOnly ||
+                    !IsSubset(Aspect::Stencil,
+                              descriptor->depthStencilAttachment->view->GetAspects())) {
+                    cmd->depthStencilAttachment.stencilLoadOp = wgpu::LoadOp::Load;
+                    cmd->depthStencilAttachment.stencilStoreOp = wgpu::StoreOp::Store;
+                } else {
+                    cmd->depthStencilAttachment.stencilLoadOp =
+                        descriptor->depthStencilAttachment->stencilLoadOp;
+                    cmd->depthStencilAttachment.stencilStoreOp =
+                        descriptor->depthStencilAttachment->stencilStoreOp;
+                }
+
+                if (IsReadOnlyDepthStencilAttachment(descriptor->depthStencilAttachment)) {
+                    usageTracker.TextureViewUsedAs(view, kReadOnlyRenderAttachment);
+                } else {
+                    usageTracker.TextureViewUsedAs(view, wgpu::TextureUsage::RenderAttachment);
+                }
+
+                depthReadOnly = descriptor->depthStencilAttachment->depthReadOnly;
+                stencilReadOnly = descriptor->depthStencilAttachment->stencilReadOnly;
+            }
+
+            cmd->width = width;
+            cmd->height = height;
+
+            cmd->occlusionQuerySet = descriptor->occlusionQuerySet;
+
+            cmd->timestampWrites = std::move(timestampWritesAtBeginning);
+
+            return {};
+        },
+        "encoding %s.BeginRenderPass(%s).", this, descriptor);
+
+    if (success) {
+        Ref<RenderPassEncoder> passEncoder = RenderPassEncoder::Create(
+            device, descriptor, this, &mEncodingContext, std::move(usageTracker),
+            std::move(attachmentState), std::move(timestampWritesAtEnd), width, height,
+            depthReadOnly, stencilReadOnly);
+        mEncodingContext.EnterPass(passEncoder.Get());
+        return passEncoder;
     }
 
+    return RenderPassEncoder::MakeError(device, this, &mEncodingContext);
+}
+
+void CommandEncoder::APICopyBufferToBuffer(BufferBase* source,
+                                           uint64_t sourceOffset,
+                                           BufferBase* destination,
+                                           uint64_t destinationOffset,
+                                           uint64_t size) {
+    mEncodingContext.TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            if (GetDevice()->IsValidationEnabled()) {
+                DAWN_TRY(GetDevice()->ValidateObject(source));
+                DAWN_TRY(GetDevice()->ValidateObject(destination));
+
+                DAWN_INVALID_IF(source == destination,
+                                "Source and destination are the same buffer (%s).", source);
+
+                DAWN_TRY_CONTEXT(ValidateCopySizeFitsInBuffer(source, sourceOffset, size),
+                                 "validating source %s copy size.", source);
+                DAWN_TRY_CONTEXT(ValidateCopySizeFitsInBuffer(destination, destinationOffset, size),
+                                 "validating destination %s copy size.", destination);
+                DAWN_TRY(ValidateB2BCopyAlignment(size, sourceOffset, destinationOffset));
+
+                DAWN_TRY_CONTEXT(ValidateCanUseAs(source, wgpu::BufferUsage::CopySrc),
+                                 "validating source %s usage.", source);
+                DAWN_TRY_CONTEXT(ValidateCanUseAs(destination, wgpu::BufferUsage::CopyDst),
+                                 "validating destination %s usage.", destination);
+
+                mTopLevelBuffers.insert(source);
+                mTopLevelBuffers.insert(destination);
+            }
+
+            CopyBufferToBufferCmd* copy =
+                allocator->Allocate<CopyBufferToBufferCmd>(Command::CopyBufferToBuffer);
+            copy->source = source;
+            copy->sourceOffset = sourceOffset;
+            copy->destination = destination;
+            copy->destinationOffset = destinationOffset;
+            copy->size = size;
+
+            return {};
+        },
+        "encoding %s.CopyBufferToBuffer(%s, %u, %s, %u, %u).", this, source, sourceOffset,
+        destination, destinationOffset, size);
+}
+
+void CommandEncoder::APICopyBufferToTexture(const ImageCopyBuffer* source,
+                                            const ImageCopyTexture* destination,
+                                            const Extent3D* copySize) {
+    mEncodingContext.TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            if (GetDevice()->IsValidationEnabled()) {
+                DAWN_TRY(ValidateImageCopyBuffer(GetDevice(), *source));
+                DAWN_TRY_CONTEXT(ValidateCanUseAs(source->buffer, wgpu::BufferUsage::CopySrc),
+                                 "validating source %s usage.", source->buffer);
+
+                DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *destination, *copySize));
+                DAWN_TRY_CONTEXT(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
+                                                  mUsageValidationMode),
+                                 "validating destination %s usage.", destination->texture);
+                DAWN_TRY(ValidateTextureSampleCountInBufferCopyCommands(destination->texture));
+
+                DAWN_TRY(ValidateLinearToDepthStencilCopyRestrictions(*destination));
+                // We validate texture copy range before validating linear texture data,
+                // because in the latter we divide copyExtent.width by blockWidth and
+                // copyExtent.height by blockHeight while the divisibility conditions are
+                // checked in validating texture copy range.
+                DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *destination, *copySize));
+            }
+            const TexelBlockInfo& blockInfo =
+                destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
+            if (GetDevice()->IsValidationEnabled()) {
+                DAWN_TRY(ValidateLinearTextureCopyOffset(
+                    source->layout, blockInfo,
+                    destination->texture->GetFormat().HasDepthOrStencil()));
+                DAWN_TRY(ValidateLinearTextureData(source->layout, source->buffer->GetSize(),
+                                                   blockInfo, *copySize));
+
+                mTopLevelBuffers.insert(source->buffer);
+                mTopLevelTextures.insert(destination->texture);
+            }
+
+            TextureDataLayout srcLayout = source->layout;
+            ApplyDefaultTextureDataLayoutOptions(&srcLayout, blockInfo, *copySize);
+
+            CopyBufferToTextureCmd* copy =
+                allocator->Allocate<CopyBufferToTextureCmd>(Command::CopyBufferToTexture);
+            copy->source.buffer = source->buffer;
+            copy->source.offset = srcLayout.offset;
+            copy->source.bytesPerRow = srcLayout.bytesPerRow;
+            copy->source.rowsPerImage = srcLayout.rowsPerImage;
+            copy->destination.texture = destination->texture;
+            copy->destination.origin = destination->origin;
+            copy->destination.mipLevel = destination->mipLevel;
+            copy->destination.aspect =
+                ConvertAspect(destination->texture->GetFormat(), destination->aspect);
+            copy->copySize = *copySize;
+
+            return {};
+        },
+        "encoding %s.CopyBufferToTexture(%s, %s, %s).", this, source->buffer, destination->texture,
+        copySize);
+}
+
+void CommandEncoder::APICopyTextureToBuffer(const ImageCopyTexture* source,
+                                            const ImageCopyBuffer* destination,
+                                            const Extent3D* copySize) {
+    mEncodingContext.TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            if (GetDevice()->IsValidationEnabled()) {
+                DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *source, *copySize));
+                DAWN_TRY_CONTEXT(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc,
+                                                  mUsageValidationMode),
+                                 "validating source %s usage.", source->texture);
+                DAWN_TRY(ValidateTextureSampleCountInBufferCopyCommands(source->texture));
+                DAWN_TRY(ValidateTextureDepthStencilToBufferCopyRestrictions(*source));
+
+                DAWN_TRY(ValidateImageCopyBuffer(GetDevice(), *destination));
+                DAWN_TRY_CONTEXT(ValidateCanUseAs(destination->buffer, wgpu::BufferUsage::CopyDst),
+                                 "validating destination %s usage.", destination->buffer);
+
+                // We validate texture copy range before validating linear texture data,
+                // because in the latter we divide copyExtent.width by blockWidth and
+                // copyExtent.height by blockHeight while the divisibility conditions are
+                // checked in validating texture copy range.
+                DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *source, *copySize));
+            }
+            const TexelBlockInfo& blockInfo =
+                source->texture->GetFormat().GetAspectInfo(source->aspect).block;
+            if (GetDevice()->IsValidationEnabled()) {
+                DAWN_TRY(ValidateLinearTextureCopyOffset(
+                    destination->layout, blockInfo,
+                    source->texture->GetFormat().HasDepthOrStencil()));
+                DAWN_TRY(ValidateLinearTextureData(
+                    destination->layout, destination->buffer->GetSize(), blockInfo, *copySize));
+
+                mTopLevelTextures.insert(source->texture);
+                mTopLevelBuffers.insert(destination->buffer);
+            }
+
+            TextureDataLayout dstLayout = destination->layout;
+            ApplyDefaultTextureDataLayoutOptions(&dstLayout, blockInfo, *copySize);
+
+            CopyTextureToBufferCmd* copy =
+                allocator->Allocate<CopyTextureToBufferCmd>(Command::CopyTextureToBuffer);
+            copy->source.texture = source->texture;
+            copy->source.origin = source->origin;
+            copy->source.mipLevel = source->mipLevel;
+            copy->source.aspect = ConvertAspect(source->texture->GetFormat(), source->aspect);
+            copy->destination.buffer = destination->buffer;
+            copy->destination.offset = dstLayout.offset;
+            copy->destination.bytesPerRow = dstLayout.bytesPerRow;
+            copy->destination.rowsPerImage = dstLayout.rowsPerImage;
+            copy->copySize = *copySize;
+
+            return {};
+        },
+        "encoding %s.CopyTextureToBuffer(%s, %s, %s).", this, source->texture, destination->buffer,
+        copySize);
+}
+
+void CommandEncoder::APICopyTextureToTexture(const ImageCopyTexture* source,
+                                             const ImageCopyTexture* destination,
+                                             const Extent3D* copySize) {
+    APICopyTextureToTextureHelper<false>(source, destination, copySize);
+}
+
+void CommandEncoder::APICopyTextureToTextureInternal(const ImageCopyTexture* source,
+                                                     const ImageCopyTexture* destination,
+                                                     const Extent3D* copySize) {
+    APICopyTextureToTextureHelper<true>(source, destination, copySize);
+}
+
+template <bool Internal>
+void CommandEncoder::APICopyTextureToTextureHelper(const ImageCopyTexture* source,
+                                                   const ImageCopyTexture* destination,
+                                                   const Extent3D* copySize) {
+    mEncodingContext.TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            if (GetDevice()->IsValidationEnabled()) {
+                DAWN_TRY(GetDevice()->ValidateObject(source->texture));
+                DAWN_TRY(GetDevice()->ValidateObject(destination->texture));
+
+                DAWN_TRY_CONTEXT(ValidateImageCopyTexture(GetDevice(), *source, *copySize),
+                                 "validating source %s.", source->texture);
+                DAWN_TRY_CONTEXT(ValidateImageCopyTexture(GetDevice(), *destination, *copySize),
+                                 "validating destination %s.", destination->texture);
+
+                DAWN_TRY(
+                    ValidateTextureToTextureCopyRestrictions(*source, *destination, *copySize));
+
+                DAWN_TRY_CONTEXT(ValidateTextureCopyRange(GetDevice(), *source, *copySize),
+                                 "validating source %s copy range.", source->texture);
+                DAWN_TRY_CONTEXT(ValidateTextureCopyRange(GetDevice(), *destination, *copySize),
+                                 "validating source %s copy range.", destination->texture);
+
+                // For internal usages (CopyToCopyInternal) we don't care if the user has added
+                // CopySrc as a usage for this texture, but we will always add it internally.
+                if (Internal) {
+                    DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc,
+                                              UsageValidationMode::Internal));
+                    DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
+                                              UsageValidationMode::Internal));
+                } else {
+                    DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc,
+                                              mUsageValidationMode));
+                    DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
+                                              mUsageValidationMode));
+                }
+
+                mTopLevelTextures.insert(source->texture);
+                mTopLevelTextures.insert(destination->texture);
+            }
+
+            CopyTextureToTextureCmd* copy =
+                allocator->Allocate<CopyTextureToTextureCmd>(Command::CopyTextureToTexture);
+            copy->source.texture = source->texture;
+            copy->source.origin = source->origin;
+            copy->source.mipLevel = source->mipLevel;
+            copy->source.aspect = ConvertAspect(source->texture->GetFormat(), source->aspect);
+            copy->destination.texture = destination->texture;
+            copy->destination.origin = destination->origin;
+            copy->destination.mipLevel = destination->mipLevel;
+            copy->destination.aspect =
+                ConvertAspect(destination->texture->GetFormat(), destination->aspect);
+            copy->copySize = *copySize;
+
+            return {};
+        },
+        "encoding %s.CopyTextureToTexture(%s, %s, %s).", this, source->texture,
+        destination->texture, copySize);
+}
+
+void CommandEncoder::APIClearBuffer(BufferBase* buffer, uint64_t offset, uint64_t size) {
+    mEncodingContext.TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            if (GetDevice()->IsValidationEnabled()) {
+                DAWN_TRY(GetDevice()->ValidateObject(buffer));
+
+                uint64_t bufferSize = buffer->GetSize();
+                DAWN_INVALID_IF(offset > bufferSize,
+                                "Buffer offset (%u) is larger than the size (%u) of %s.", offset,
+                                bufferSize, buffer);
+
+                uint64_t remainingSize = bufferSize - offset;
+                if (size == wgpu::kWholeSize) {
+                    size = remainingSize;
+                } else {
+                    DAWN_INVALID_IF(size > remainingSize,
+                                    "Buffer range (offset: %u, size: %u) doesn't fit in "
+                                    "the size (%u) of %s.",
+                                    offset, size, bufferSize, buffer);
+                }
+
+                DAWN_TRY_CONTEXT(ValidateCanUseAs(buffer, wgpu::BufferUsage::CopyDst),
+                                 "validating buffer %s usage.", buffer);
+
+                // Size must be a multiple of 4 bytes on macOS.
+                DAWN_INVALID_IF(size % 4 != 0, "Fill size (%u) is not a multiple of 4 bytes.",
+                                size);
+
+                // Offset must be multiples of 4 bytes on macOS.
+                DAWN_INVALID_IF(offset % 4 != 0, "Offset (%u) is not a multiple of 4 bytes,",
+                                offset);
+
+                mTopLevelBuffers.insert(buffer);
+            } else {
+                if (size == wgpu::kWholeSize) {
+                    DAWN_ASSERT(buffer->GetSize() >= offset);
+                    size = buffer->GetSize() - offset;
+                }
+            }
+
+            ClearBufferCmd* cmd = allocator->Allocate<ClearBufferCmd>(Command::ClearBuffer);
+            cmd->buffer = buffer;
+            cmd->offset = offset;
+            cmd->size = size;
+
+            return {};
+        },
+        "encoding %s.ClearBuffer(%s, %u, %u).", this, buffer, offset, size);
+}
+
+void CommandEncoder::APIInjectValidationError(const char* message) {
+    if (mEncodingContext.CheckCurrentEncoder(this)) {
+        mEncodingContext.HandleError(DAWN_VALIDATION_ERROR(message));
+    }
+}
+
+void CommandEncoder::APIInsertDebugMarker(const char* groupLabel) {
+    mEncodingContext.TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            InsertDebugMarkerCmd* cmd =
+                allocator->Allocate<InsertDebugMarkerCmd>(Command::InsertDebugMarker);
+            cmd->length = strlen(groupLabel);
+
+            char* label = allocator->AllocateData<char>(cmd->length + 1);
+            memcpy(label, groupLabel, cmd->length + 1);
+
+            return {};
+        },
+        "encoding %s.InsertDebugMarker(\"%s\").", this, groupLabel);
+}
+
+void CommandEncoder::APIPopDebugGroup() {
+    mEncodingContext.TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            if (GetDevice()->IsValidationEnabled()) {
+                DAWN_INVALID_IF(mDebugGroupStackSize == 0,
+                                "PopDebugGroup called when no debug groups are currently pushed.");
+            }
+            allocator->Allocate<PopDebugGroupCmd>(Command::PopDebugGroup);
+            mDebugGroupStackSize--;
+            mEncodingContext.PopDebugGroupLabel();
+
+            return {};
+        },
+        "encoding %s.PopDebugGroup().", this);
+}
+
+void CommandEncoder::APIPushDebugGroup(const char* groupLabel) {
+    mEncodingContext.TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            PushDebugGroupCmd* cmd =
+                allocator->Allocate<PushDebugGroupCmd>(Command::PushDebugGroup);
+            cmd->length = strlen(groupLabel);
+
+            char* label = allocator->AllocateData<char>(cmd->length + 1);
+            memcpy(label, groupLabel, cmd->length + 1);
+
+            mDebugGroupStackSize++;
+            mEncodingContext.PushDebugGroupLabel(groupLabel);
+
+            return {};
+        },
+        "encoding %s.PushDebugGroup(\"%s\").", this, groupLabel);
+}
+
+void CommandEncoder::APIResolveQuerySet(QuerySetBase* querySet,
+                                        uint32_t firstQuery,
+                                        uint32_t queryCount,
+                                        BufferBase* destination,
+                                        uint64_t destinationOffset) {
+    mEncodingContext.TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            if (GetDevice()->IsValidationEnabled()) {
+                DAWN_TRY(GetDevice()->ValidateObject(querySet));
+                DAWN_TRY(GetDevice()->ValidateObject(destination));
+
+                DAWN_TRY(ValidateQuerySetResolve(querySet, firstQuery, queryCount, destination,
+                                                 destinationOffset));
+
+                DAWN_TRY(ValidateCanUseAs(destination, wgpu::BufferUsage::QueryResolve));
+
+                TrackUsedQuerySet(querySet);
+                mTopLevelBuffers.insert(destination);
+            }
+
+            ResolveQuerySetCmd* cmd =
+                allocator->Allocate<ResolveQuerySetCmd>(Command::ResolveQuerySet);
+            cmd->querySet = querySet;
+            cmd->firstQuery = firstQuery;
+            cmd->queryCount = queryCount;
+            cmd->destination = destination;
+            cmd->destinationOffset = destinationOffset;
+
+            // Encode internal compute pipeline for timestamp query
+            if (querySet->GetQueryType() == wgpu::QueryType::Timestamp &&
+                !GetDevice()->IsToggleEnabled(Toggle::DisableTimestampQueryConversion)) {
+                DAWN_TRY(EncodeTimestampsToNanosecondsConversion(
+                    this, querySet, firstQuery, queryCount, destination, destinationOffset));
+            }
+
+            return {};
+        },
+        "encoding %s.ResolveQuerySet(%s, %u, %u, %s, %u).", this, querySet, firstQuery, queryCount,
+        destination, destinationOffset);
+}
+
+void CommandEncoder::APIWriteBuffer(BufferBase* buffer,
+                                    uint64_t bufferOffset,
+                                    const uint8_t* data,
+                                    uint64_t size) {
+    mEncodingContext.TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            if (GetDevice()->IsValidationEnabled()) {
+                DAWN_TRY(ValidateWriteBuffer(GetDevice(), buffer, bufferOffset, size));
+            }
+
+            WriteBufferCmd* cmd = allocator->Allocate<WriteBufferCmd>(Command::WriteBuffer);
+            cmd->buffer = buffer;
+            cmd->offset = bufferOffset;
+            cmd->size = size;
+
+            uint8_t* inlinedData = allocator->AllocateData<uint8_t>(size);
+            memcpy(inlinedData, data, size);
+
+            mTopLevelBuffers.insert(buffer);
+
+            return {};
+        },
+        "encoding %s.WriteBuffer(%s, %u, ..., %u).", this, buffer, bufferOffset, size);
+}
+
+void CommandEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
+    mEncodingContext.TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            if (GetDevice()->IsValidationEnabled()) {
+                DAWN_TRY(ValidateTimestampQuery(GetDevice(), querySet, queryIndex));
+            }
+
+            TrackQueryAvailability(querySet, queryIndex);
+
+            WriteTimestampCmd* cmd =
+                allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
+            cmd->querySet = querySet;
+            cmd->queryIndex = queryIndex;
+
+            return {};
+        },
+        "encoding %s.WriteTimestamp(%s, %u).", this, querySet, queryIndex);
+}
+
+CommandBufferBase* CommandEncoder::APIFinish(const CommandBufferDescriptor* descriptor) {
+    Ref<CommandBufferBase> commandBuffer;
+    if (GetDevice()->ConsumedError(Finish(descriptor), &commandBuffer)) {
+        return CommandBufferBase::MakeError(GetDevice());
+    }
+    ASSERT(!IsError());
+    return commandBuffer.Detach();
+}
+
+ResultOrError<Ref<CommandBufferBase>> CommandEncoder::Finish(
+    const CommandBufferDescriptor* descriptor) {
+    DeviceBase* device = GetDevice();
+
+    // Even if mEncodingContext.Finish() validation fails, calling it will mutate the internal
+    // state of the encoding context. The internal state is set to finished, and subsequent
+    // calls to encode commands will generate errors.
+    DAWN_TRY(mEncodingContext.Finish());
+    DAWN_TRY(device->ValidateIsAlive());
+
+    if (device->IsValidationEnabled()) {
+        DAWN_TRY(ValidateFinish());
+    }
+
+    const CommandBufferDescriptor defaultDescriptor = {};
+    if (descriptor == nullptr) {
+        descriptor = &defaultDescriptor;
+    }
+
+    return device->CreateCommandBuffer(this, descriptor);
+}
+
+// Implementation of the command buffer validation that can be precomputed before submit
+MaybeError CommandEncoder::ValidateFinish() const {
+    TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "CommandEncoder::ValidateFinish");
+    DAWN_TRY(GetDevice()->ValidateObject(this));
+
+    for (const RenderPassResourceUsage& passUsage : mEncodingContext.GetRenderPassUsages()) {
+        DAWN_TRY_CONTEXT(ValidateSyncScopeResourceUsage(passUsage),
+                         "validating render pass usage.");
+    }
+
+    for (const ComputePassResourceUsage& passUsage : mEncodingContext.GetComputePassUsages()) {
+        for (const SyncScopeResourceUsage& scope : passUsage.dispatchUsages) {
+            DAWN_TRY_CONTEXT(ValidateSyncScopeResourceUsage(scope),
+                             "validating compute pass usage.");
+        }
+    }
+
+    DAWN_INVALID_IF(
+        mDebugGroupStackSize != 0,
+        "PushDebugGroup called %u time(s) without a corresponding PopDebugGroup prior to "
+        "calling Finish.",
+        mDebugGroupStackSize);
+
+    return {};
+}
+
 }  // namespace dawn::native
diff --git a/src/dawn/native/CommandEncoder.h b/src/dawn/native/CommandEncoder.h
index 1f70b40..79e6e96 100644
--- a/src/dawn/native/CommandEncoder.h
+++ b/src/dawn/native/CommandEncoder.h
@@ -27,96 +27,96 @@
 
 namespace dawn::native {
 
-    enum class UsageValidationMode;
+enum class UsageValidationMode;
 
-    MaybeError ValidateCommandEncoderDescriptor(const DeviceBase* device,
-                                                const CommandEncoderDescriptor* descriptor);
+MaybeError ValidateCommandEncoderDescriptor(const DeviceBase* device,
+                                            const CommandEncoderDescriptor* descriptor);
 
-    class CommandEncoder final : public ApiObjectBase {
-      public:
-        static Ref<CommandEncoder> Create(DeviceBase* device,
-                                          const CommandEncoderDescriptor* descriptor);
-        static CommandEncoder* MakeError(DeviceBase* device);
+class CommandEncoder final : public ApiObjectBase {
+  public:
+    static Ref<CommandEncoder> Create(DeviceBase* device,
+                                      const CommandEncoderDescriptor* descriptor);
+    static CommandEncoder* MakeError(DeviceBase* device);
 
-        ObjectType GetType() const override;
+    ObjectType GetType() const override;
 
-        CommandIterator AcquireCommands();
-        CommandBufferResourceUsage AcquireResourceUsages();
+    CommandIterator AcquireCommands();
+    CommandBufferResourceUsage AcquireResourceUsages();
 
-        void TrackUsedQuerySet(QuerySetBase* querySet);
-        void TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex);
+    void TrackUsedQuerySet(QuerySetBase* querySet);
+    void TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex);
 
-        // Dawn API
-        ComputePassEncoder* APIBeginComputePass(const ComputePassDescriptor* descriptor);
-        RenderPassEncoder* APIBeginRenderPass(const RenderPassDescriptor* descriptor);
+    // Dawn API
+    ComputePassEncoder* APIBeginComputePass(const ComputePassDescriptor* descriptor);
+    RenderPassEncoder* APIBeginRenderPass(const RenderPassDescriptor* descriptor);
 
-        void APICopyBufferToBuffer(BufferBase* source,
-                                   uint64_t sourceOffset,
-                                   BufferBase* destination,
-                                   uint64_t destinationOffset,
-                                   uint64_t size);
-        void APICopyBufferToTexture(const ImageCopyBuffer* source,
-                                    const ImageCopyTexture* destination,
-                                    const Extent3D* copySize);
-        void APICopyTextureToBuffer(const ImageCopyTexture* source,
-                                    const ImageCopyBuffer* destination,
-                                    const Extent3D* copySize);
-        void APICopyTextureToTexture(const ImageCopyTexture* source,
-                                     const ImageCopyTexture* destination,
-                                     const Extent3D* copySize);
-        void APICopyTextureToTextureInternal(const ImageCopyTexture* source,
-                                             const ImageCopyTexture* destination,
-                                             const Extent3D* copySize);
-        void APIClearBuffer(BufferBase* destination, uint64_t destinationOffset, uint64_t size);
+    void APICopyBufferToBuffer(BufferBase* source,
+                               uint64_t sourceOffset,
+                               BufferBase* destination,
+                               uint64_t destinationOffset,
+                               uint64_t size);
+    void APICopyBufferToTexture(const ImageCopyBuffer* source,
+                                const ImageCopyTexture* destination,
+                                const Extent3D* copySize);
+    void APICopyTextureToBuffer(const ImageCopyTexture* source,
+                                const ImageCopyBuffer* destination,
+                                const Extent3D* copySize);
+    void APICopyTextureToTexture(const ImageCopyTexture* source,
+                                 const ImageCopyTexture* destination,
+                                 const Extent3D* copySize);
+    void APICopyTextureToTextureInternal(const ImageCopyTexture* source,
+                                         const ImageCopyTexture* destination,
+                                         const Extent3D* copySize);
+    void APIClearBuffer(BufferBase* destination, uint64_t destinationOffset, uint64_t size);
 
-        void APIInjectValidationError(const char* message);
-        void APIInsertDebugMarker(const char* groupLabel);
-        void APIPopDebugGroup();
-        void APIPushDebugGroup(const char* groupLabel);
+    void APIInjectValidationError(const char* message);
+    void APIInsertDebugMarker(const char* groupLabel);
+    void APIPopDebugGroup();
+    void APIPushDebugGroup(const char* groupLabel);
 
-        void APIResolveQuerySet(QuerySetBase* querySet,
-                                uint32_t firstQuery,
-                                uint32_t queryCount,
-                                BufferBase* destination,
-                                uint64_t destinationOffset);
-        void APIWriteBuffer(BufferBase* buffer,
-                            uint64_t bufferOffset,
-                            const uint8_t* data,
-                            uint64_t size);
-        void APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
+    void APIResolveQuerySet(QuerySetBase* querySet,
+                            uint32_t firstQuery,
+                            uint32_t queryCount,
+                            BufferBase* destination,
+                            uint64_t destinationOffset);
+    void APIWriteBuffer(BufferBase* buffer,
+                        uint64_t bufferOffset,
+                        const uint8_t* data,
+                        uint64_t size);
+    void APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
 
-        CommandBufferBase* APIFinish(const CommandBufferDescriptor* descriptor = nullptr);
+    CommandBufferBase* APIFinish(const CommandBufferDescriptor* descriptor = nullptr);
 
-        Ref<ComputePassEncoder> BeginComputePass(const ComputePassDescriptor* descriptor = nullptr);
-        Ref<RenderPassEncoder> BeginRenderPass(const RenderPassDescriptor* descriptor);
-        ResultOrError<Ref<CommandBufferBase>> Finish(
-            const CommandBufferDescriptor* descriptor = nullptr);
+    Ref<ComputePassEncoder> BeginComputePass(const ComputePassDescriptor* descriptor = nullptr);
+    Ref<RenderPassEncoder> BeginRenderPass(const RenderPassDescriptor* descriptor);
+    ResultOrError<Ref<CommandBufferBase>> Finish(
+        const CommandBufferDescriptor* descriptor = nullptr);
 
-      private:
-        CommandEncoder(DeviceBase* device, const CommandEncoderDescriptor* descriptor);
-        CommandEncoder(DeviceBase* device, ObjectBase::ErrorTag tag);
+  private:
+    CommandEncoder(DeviceBase* device, const CommandEncoderDescriptor* descriptor);
+    CommandEncoder(DeviceBase* device, ObjectBase::ErrorTag tag);
 
-        void DestroyImpl() override;
+    void DestroyImpl() override;
 
-        // Helper to be able to implement both APICopyTextureToTexture and
-        // APICopyTextureToTextureInternal. The only difference between both
-        // copies, is that the Internal one will also check internal usage.
-        template <bool Internal>
-        void APICopyTextureToTextureHelper(const ImageCopyTexture* source,
-                                           const ImageCopyTexture* destination,
-                                           const Extent3D* copySize);
+    // Helper to be able to implement both APICopyTextureToTexture and
+    // APICopyTextureToTextureInternal. The only difference between both
+    // copies, is that the Internal one will also check internal usage.
+    template <bool Internal>
+    void APICopyTextureToTextureHelper(const ImageCopyTexture* source,
+                                       const ImageCopyTexture* destination,
+                                       const Extent3D* copySize);
 
-        MaybeError ValidateFinish() const;
+    MaybeError ValidateFinish() const;
 
-        EncodingContext mEncodingContext;
-        std::set<BufferBase*> mTopLevelBuffers;
-        std::set<TextureBase*> mTopLevelTextures;
-        std::set<QuerySetBase*> mUsedQuerySets;
+    EncodingContext mEncodingContext;
+    std::set<BufferBase*> mTopLevelBuffers;
+    std::set<TextureBase*> mTopLevelTextures;
+    std::set<QuerySetBase*> mUsedQuerySets;
 
-        uint64_t mDebugGroupStackSize = 0;
+    uint64_t mDebugGroupStackSize = 0;
 
-        UsageValidationMode mUsageValidationMode;
-    };
+    UsageValidationMode mUsageValidationMode;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/CommandValidation.cpp b/src/dawn/native/CommandValidation.cpp
index d2d38f7..3df90f1 100644
--- a/src/dawn/native/CommandValidation.cpp
+++ b/src/dawn/native/CommandValidation.cpp
@@ -32,461 +32,453 @@
 
 namespace dawn::native {
 
-    // Performs validation of the "synchronization scope" rules of WebGPU.
-    MaybeError ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage& scope) {
-        // Buffers can only be used as single-write or multiple read.
-        for (size_t i = 0; i < scope.bufferUsages.size(); ++i) {
-            const wgpu::BufferUsage usage = scope.bufferUsages[i];
-            bool readOnly = IsSubset(usage, kReadOnlyBufferUsages);
-            bool singleUse = wgpu::HasZeroOrOneBits(usage);
+// Performs validation of the "synchronization scope" rules of WebGPU.
+MaybeError ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage& scope) {
+    // Buffers can only be used as single-write or multiple read.
+    for (size_t i = 0; i < scope.bufferUsages.size(); ++i) {
+        const wgpu::BufferUsage usage = scope.bufferUsages[i];
+        bool readOnly = IsSubset(usage, kReadOnlyBufferUsages);
+        bool singleUse = wgpu::HasZeroOrOneBits(usage);
 
-            DAWN_INVALID_IF(!readOnly && !singleUse,
-                            "%s usage (%s) includes writable usage and another usage in the same "
-                            "synchronization scope.",
-                            scope.buffers[i], usage);
-        }
-
-        // Check that every single subresource is used as either a single-write usage or a
-        // combination of readonly usages.
-        for (size_t i = 0; i < scope.textureUsages.size(); ++i) {
-            const TextureSubresourceUsage& textureUsage = scope.textureUsages[i];
-            MaybeError error = {};
-            textureUsage.Iterate([&](const SubresourceRange&, const wgpu::TextureUsage& usage) {
-                bool readOnly = IsSubset(usage, kReadOnlyTextureUsages);
-                bool singleUse = wgpu::HasZeroOrOneBits(usage);
-                if (!readOnly && !singleUse && !error.IsError()) {
-                    error = DAWN_FORMAT_VALIDATION_ERROR(
+        DAWN_INVALID_IF(!readOnly && !singleUse,
                         "%s usage (%s) includes writable usage and another usage in the same "
                         "synchronization scope.",
-                        scope.textures[i], usage);
-                }
-            });
-            DAWN_TRY(std::move(error));
-        }
-        return {};
+                        scope.buffers[i], usage);
     }
 
-    MaybeError ValidateTimestampQuery(const DeviceBase* device,
-                                      const QuerySetBase* querySet,
-                                      uint32_t queryIndex) {
-        DAWN_TRY(device->ValidateObject(querySet));
+    // Check that every single subresource is used as either a single-write usage or a
+    // combination of readonly usages.
+    for (size_t i = 0; i < scope.textureUsages.size(); ++i) {
+        const TextureSubresourceUsage& textureUsage = scope.textureUsages[i];
+        MaybeError error = {};
+        textureUsage.Iterate([&](const SubresourceRange&, const wgpu::TextureUsage& usage) {
+            bool readOnly = IsSubset(usage, kReadOnlyTextureUsages);
+            bool singleUse = wgpu::HasZeroOrOneBits(usage);
+            if (!readOnly && !singleUse && !error.IsError()) {
+                error = DAWN_FORMAT_VALIDATION_ERROR(
+                    "%s usage (%s) includes writable usage and another usage in the same "
+                    "synchronization scope.",
+                    scope.textures[i], usage);
+            }
+        });
+        DAWN_TRY(std::move(error));
+    }
+    return {};
+}
 
-        DAWN_INVALID_IF(querySet->GetQueryType() != wgpu::QueryType::Timestamp,
-                        "The type of %s is not %s.", querySet, wgpu::QueryType::Timestamp);
+MaybeError ValidateTimestampQuery(const DeviceBase* device,
+                                  const QuerySetBase* querySet,
+                                  uint32_t queryIndex) {
+    DAWN_TRY(device->ValidateObject(querySet));
 
-        DAWN_INVALID_IF(queryIndex >= querySet->GetQueryCount(),
-                        "Query index (%u) exceeds the number of queries (%u) in %s.", queryIndex,
-                        querySet->GetQueryCount(), querySet);
+    DAWN_INVALID_IF(querySet->GetQueryType() != wgpu::QueryType::Timestamp,
+                    "The type of %s is not %s.", querySet, wgpu::QueryType::Timestamp);
 
-        return {};
+    DAWN_INVALID_IF(queryIndex >= querySet->GetQueryCount(),
+                    "Query index (%u) exceeds the number of queries (%u) in %s.", queryIndex,
+                    querySet->GetQueryCount(), querySet);
+
+    return {};
+}
+
+MaybeError ValidateWriteBuffer(const DeviceBase* device,
+                               const BufferBase* buffer,
+                               uint64_t bufferOffset,
+                               uint64_t size) {
+    DAWN_TRY(device->ValidateObject(buffer));
+
+    DAWN_INVALID_IF(bufferOffset % 4 != 0, "BufferOffset (%u) is not a multiple of 4.",
+                    bufferOffset);
+
+    DAWN_INVALID_IF(size % 4 != 0, "Size (%u) is not a multiple of 4.", size);
+
+    uint64_t bufferSize = buffer->GetSize();
+    DAWN_INVALID_IF(bufferOffset > bufferSize || size > (bufferSize - bufferOffset),
+                    "Write range (bufferOffset: %u, size: %u) does not fit in %s size (%u).",
+                    bufferOffset, size, buffer, bufferSize);
+
+    DAWN_TRY(ValidateCanUseAs(buffer, wgpu::BufferUsage::CopyDst));
+
+    return {};
+}
+
+bool IsRangeOverlapped(uint32_t startA, uint32_t startB, uint32_t length) {
+    uint32_t maxStart = std::max(startA, startB);
+    uint32_t minStart = std::min(startA, startB);
+    return static_cast<uint64_t>(minStart) + static_cast<uint64_t>(length) >
+           static_cast<uint64_t>(maxStart);
+}
+
+ResultOrError<uint64_t> ComputeRequiredBytesInCopy(const TexelBlockInfo& blockInfo,
+                                                   const Extent3D& copySize,
+                                                   uint32_t bytesPerRow,
+                                                   uint32_t rowsPerImage) {
+    ASSERT(copySize.width % blockInfo.width == 0);
+    ASSERT(copySize.height % blockInfo.height == 0);
+    uint32_t widthInBlocks = copySize.width / blockInfo.width;
+    uint32_t heightInBlocks = copySize.height / blockInfo.height;
+    uint64_t bytesInLastRow = Safe32x32(widthInBlocks, blockInfo.byteSize);
+
+    if (copySize.depthOrArrayLayers == 0) {
+        return 0;
     }
 
-    MaybeError ValidateWriteBuffer(const DeviceBase* device,
-                                   const BufferBase* buffer,
-                                   uint64_t bufferOffset,
-                                   uint64_t size) {
-        DAWN_TRY(device->ValidateObject(buffer));
+    // Check for potential overflows for the rest of the computations. We have the following
+    // inequalities:
+    //
+    //   bytesInLastRow <= bytesPerRow
+    //   heightInBlocks <= rowsPerImage
+    //
+    // So:
+    //
+    //   bytesInLastImage  = bytesPerRow * (heightInBlocks - 1) + bytesInLastRow
+    //                    <= bytesPerRow * heightInBlocks
+    //                    <= bytesPerRow * rowsPerImage
+    //                    <= bytesPerImage
+    //
+    // This means that if the computation of depth * bytesPerImage doesn't overflow, none of the
+    // computations for requiredBytesInCopy will. (and it's not a very pessimizing check)
+    ASSERT(copySize.depthOrArrayLayers <= 1 || (bytesPerRow != wgpu::kCopyStrideUndefined &&
+                                                rowsPerImage != wgpu::kCopyStrideUndefined));
+    uint64_t bytesPerImage = Safe32x32(bytesPerRow, rowsPerImage);
+    DAWN_INVALID_IF(
+        bytesPerImage > std::numeric_limits<uint64_t>::max() / copySize.depthOrArrayLayers,
+        "The number of bytes per image (%u) exceeds the maximum (%u) when copying %u images.",
+        bytesPerImage, std::numeric_limits<uint64_t>::max() / copySize.depthOrArrayLayers,
+        copySize.depthOrArrayLayers);
 
-        DAWN_INVALID_IF(bufferOffset % 4 != 0, "BufferOffset (%u) is not a multiple of 4.",
-                        bufferOffset);
-
-        DAWN_INVALID_IF(size % 4 != 0, "Size (%u) is not a multiple of 4.", size);
-
-        uint64_t bufferSize = buffer->GetSize();
-        DAWN_INVALID_IF(bufferOffset > bufferSize || size > (bufferSize - bufferOffset),
-                        "Write range (bufferOffset: %u, size: %u) does not fit in %s size (%u).",
-                        bufferOffset, size, buffer, bufferSize);
-
-        DAWN_TRY(ValidateCanUseAs(buffer, wgpu::BufferUsage::CopyDst));
-
-        return {};
+    uint64_t requiredBytesInCopy = bytesPerImage * (copySize.depthOrArrayLayers - 1);
+    if (heightInBlocks > 0) {
+        ASSERT(heightInBlocks <= 1 || bytesPerRow != wgpu::kCopyStrideUndefined);
+        uint64_t bytesInLastImage = Safe32x32(bytesPerRow, heightInBlocks - 1) + bytesInLastRow;
+        requiredBytesInCopy += bytesInLastImage;
     }
+    return requiredBytesInCopy;
+}
 
-    bool IsRangeOverlapped(uint32_t startA, uint32_t startB, uint32_t length) {
-        uint32_t maxStart = std::max(startA, startB);
-        uint32_t minStart = std::min(startA, startB);
-        return static_cast<uint64_t>(minStart) + static_cast<uint64_t>(length) >
-               static_cast<uint64_t>(maxStart);
-    }
+MaybeError ValidateCopySizeFitsInBuffer(const Ref<BufferBase>& buffer,
+                                        uint64_t offset,
+                                        uint64_t size) {
+    uint64_t bufferSize = buffer->GetSize();
+    bool fitsInBuffer = offset <= bufferSize && (size <= (bufferSize - offset));
+    DAWN_INVALID_IF(!fitsInBuffer,
+                    "Copy range (offset: %u, size: %u) does not fit in %s size (%u).", offset, size,
+                    buffer.Get(), bufferSize);
 
-    ResultOrError<uint64_t> ComputeRequiredBytesInCopy(const TexelBlockInfo& blockInfo,
-                                                       const Extent3D& copySize,
-                                                       uint32_t bytesPerRow,
-                                                       uint32_t rowsPerImage) {
-        ASSERT(copySize.width % blockInfo.width == 0);
-        ASSERT(copySize.height % blockInfo.height == 0);
-        uint32_t widthInBlocks = copySize.width / blockInfo.width;
-        uint32_t heightInBlocks = copySize.height / blockInfo.height;
-        uint64_t bytesInLastRow = Safe32x32(widthInBlocks, blockInfo.byteSize);
+    return {};
+}
 
-        if (copySize.depthOrArrayLayers == 0) {
-            return 0;
-        }
+// Replace wgpu::kCopyStrideUndefined with real values, so backends don't have to think about
+// it.
+void ApplyDefaultTextureDataLayoutOptions(TextureDataLayout* layout,
+                                          const TexelBlockInfo& blockInfo,
+                                          const Extent3D& copyExtent) {
+    ASSERT(layout != nullptr);
+    ASSERT(copyExtent.height % blockInfo.height == 0);
+    uint32_t heightInBlocks = copyExtent.height / blockInfo.height;
 
-        // Check for potential overflows for the rest of the computations. We have the following
-        // inequalities:
-        //
-        //   bytesInLastRow <= bytesPerRow
-        //   heightInBlocks <= rowsPerImage
-        //
-        // So:
-        //
-        //   bytesInLastImage  = bytesPerRow * (heightInBlocks - 1) + bytesInLastRow
-        //                    <= bytesPerRow * heightInBlocks
-        //                    <= bytesPerRow * rowsPerImage
-        //                    <= bytesPerImage
-        //
-        // This means that if the computation of depth * bytesPerImage doesn't overflow, none of the
-        // computations for requiredBytesInCopy will. (and it's not a very pessimizing check)
-        ASSERT(copySize.depthOrArrayLayers <= 1 || (bytesPerRow != wgpu::kCopyStrideUndefined &&
-                                                    rowsPerImage != wgpu::kCopyStrideUndefined));
-        uint64_t bytesPerImage = Safe32x32(bytesPerRow, rowsPerImage);
-        DAWN_INVALID_IF(
-            bytesPerImage > std::numeric_limits<uint64_t>::max() / copySize.depthOrArrayLayers,
-            "The number of bytes per image (%u) exceeds the maximum (%u) when copying %u images.",
-            bytesPerImage, std::numeric_limits<uint64_t>::max() / copySize.depthOrArrayLayers,
-            copySize.depthOrArrayLayers);
-
-        uint64_t requiredBytesInCopy = bytesPerImage * (copySize.depthOrArrayLayers - 1);
-        if (heightInBlocks > 0) {
-            ASSERT(heightInBlocks <= 1 || bytesPerRow != wgpu::kCopyStrideUndefined);
-            uint64_t bytesInLastImage = Safe32x32(bytesPerRow, heightInBlocks - 1) + bytesInLastRow;
-            requiredBytesInCopy += bytesInLastImage;
-        }
-        return requiredBytesInCopy;
-    }
-
-    MaybeError ValidateCopySizeFitsInBuffer(const Ref<BufferBase>& buffer,
-                                            uint64_t offset,
-                                            uint64_t size) {
-        uint64_t bufferSize = buffer->GetSize();
-        bool fitsInBuffer = offset <= bufferSize && (size <= (bufferSize - offset));
-        DAWN_INVALID_IF(!fitsInBuffer,
-                        "Copy range (offset: %u, size: %u) does not fit in %s size (%u).", offset,
-                        size, buffer.Get(), bufferSize);
-
-        return {};
-    }
-
-    // Replace wgpu::kCopyStrideUndefined with real values, so backends don't have to think about
-    // it.
-    void ApplyDefaultTextureDataLayoutOptions(TextureDataLayout* layout,
-                                              const TexelBlockInfo& blockInfo,
-                                              const Extent3D& copyExtent) {
-        ASSERT(layout != nullptr);
-        ASSERT(copyExtent.height % blockInfo.height == 0);
-        uint32_t heightInBlocks = copyExtent.height / blockInfo.height;
-
-        if (layout->bytesPerRow == wgpu::kCopyStrideUndefined) {
-            ASSERT(copyExtent.width % blockInfo.width == 0);
-            uint32_t widthInBlocks = copyExtent.width / blockInfo.width;
-            uint32_t bytesInLastRow = widthInBlocks * blockInfo.byteSize;
-
-            ASSERT(heightInBlocks <= 1 && copyExtent.depthOrArrayLayers <= 1);
-            layout->bytesPerRow = Align(bytesInLastRow, kTextureBytesPerRowAlignment);
-        }
-        if (layout->rowsPerImage == wgpu::kCopyStrideUndefined) {
-            ASSERT(copyExtent.depthOrArrayLayers <= 1);
-            layout->rowsPerImage = heightInBlocks;
-        }
-    }
-
-    MaybeError ValidateLinearTextureData(const TextureDataLayout& layout,
-                                         uint64_t byteSize,
-                                         const TexelBlockInfo& blockInfo,
-                                         const Extent3D& copyExtent) {
-        ASSERT(copyExtent.height % blockInfo.height == 0);
-        uint32_t heightInBlocks = copyExtent.height / blockInfo.height;
-
-        // TODO(dawn:563): Right now kCopyStrideUndefined will be formatted as a large value in the
-        // validation message. Investigate ways to make it print as a more readable symbol.
-        DAWN_INVALID_IF(
-            copyExtent.depthOrArrayLayers > 1 &&
-                (layout.bytesPerRow == wgpu::kCopyStrideUndefined ||
-                 layout.rowsPerImage == wgpu::kCopyStrideUndefined),
-            "Copy depth (%u) is > 1, but bytesPerRow (%u) or rowsPerImage (%u) are not specified.",
-            copyExtent.depthOrArrayLayers, layout.bytesPerRow, layout.rowsPerImage);
-
-        DAWN_INVALID_IF(heightInBlocks > 1 && layout.bytesPerRow == wgpu::kCopyStrideUndefined,
-                        "HeightInBlocks (%u) is > 1, but bytesPerRow is not specified.",
-                        heightInBlocks);
-
-        // Validation for other members in layout:
+    if (layout->bytesPerRow == wgpu::kCopyStrideUndefined) {
         ASSERT(copyExtent.width % blockInfo.width == 0);
         uint32_t widthInBlocks = copyExtent.width / blockInfo.width;
-        ASSERT(Safe32x32(widthInBlocks, blockInfo.byteSize) <=
-               std::numeric_limits<uint32_t>::max());
         uint32_t bytesInLastRow = widthInBlocks * blockInfo.byteSize;
 
-        // These != wgpu::kCopyStrideUndefined checks are technically redundant with the > checks,
-        // but they should get optimized out.
-        DAWN_INVALID_IF(
-            layout.bytesPerRow != wgpu::kCopyStrideUndefined && bytesInLastRow > layout.bytesPerRow,
-            "The byte size of each row (%u) is > bytesPerRow (%u).", bytesInLastRow,
-            layout.bytesPerRow);
+        ASSERT(heightInBlocks <= 1 && copyExtent.depthOrArrayLayers <= 1);
+        layout->bytesPerRow = Align(bytesInLastRow, kTextureBytesPerRowAlignment);
+    }
+    if (layout->rowsPerImage == wgpu::kCopyStrideUndefined) {
+        ASSERT(copyExtent.depthOrArrayLayers <= 1);
+        layout->rowsPerImage = heightInBlocks;
+    }
+}
 
-        DAWN_INVALID_IF(layout.rowsPerImage != wgpu::kCopyStrideUndefined &&
-                            heightInBlocks > layout.rowsPerImage,
-                        "The height of each image in blocks (%u) is > rowsPerImage (%u).",
-                        heightInBlocks, layout.rowsPerImage);
+MaybeError ValidateLinearTextureData(const TextureDataLayout& layout,
+                                     uint64_t byteSize,
+                                     const TexelBlockInfo& blockInfo,
+                                     const Extent3D& copyExtent) {
+    ASSERT(copyExtent.height % blockInfo.height == 0);
+    uint32_t heightInBlocks = copyExtent.height / blockInfo.height;
 
-        // We compute required bytes in copy after validating texel block alignments
-        // because the divisibility conditions are necessary for the algorithm to be valid,
-        // also the bytesPerRow bound is necessary to avoid overflows.
-        uint64_t requiredBytesInCopy;
-        DAWN_TRY_ASSIGN(requiredBytesInCopy,
-                        ComputeRequiredBytesInCopy(blockInfo, copyExtent, layout.bytesPerRow,
-                                                   layout.rowsPerImage));
+    // TODO(dawn:563): Right now kCopyStrideUndefined will be formatted as a large value in the
+    // validation message. Investigate ways to make it print as a more readable symbol.
+    DAWN_INVALID_IF(
+        copyExtent.depthOrArrayLayers > 1 && (layout.bytesPerRow == wgpu::kCopyStrideUndefined ||
+                                              layout.rowsPerImage == wgpu::kCopyStrideUndefined),
+        "Copy depth (%u) is > 1, but bytesPerRow (%u) or rowsPerImage (%u) are not specified.",
+        copyExtent.depthOrArrayLayers, layout.bytesPerRow, layout.rowsPerImage);
 
-        bool fitsInData =
-            layout.offset <= byteSize && (requiredBytesInCopy <= (byteSize - layout.offset));
-        DAWN_INVALID_IF(
-            !fitsInData,
-            "Required size for texture data layout (%u) exceeds the linear data size (%u) with "
-            "offset (%u).",
-            requiredBytesInCopy, byteSize, layout.offset);
+    DAWN_INVALID_IF(heightInBlocks > 1 && layout.bytesPerRow == wgpu::kCopyStrideUndefined,
+                    "HeightInBlocks (%u) is > 1, but bytesPerRow is not specified.",
+                    heightInBlocks);
 
-        return {};
+    // Validation for other members in layout:
+    ASSERT(copyExtent.width % blockInfo.width == 0);
+    uint32_t widthInBlocks = copyExtent.width / blockInfo.width;
+    ASSERT(Safe32x32(widthInBlocks, blockInfo.byteSize) <= std::numeric_limits<uint32_t>::max());
+    uint32_t bytesInLastRow = widthInBlocks * blockInfo.byteSize;
+
+    // These != wgpu::kCopyStrideUndefined checks are technically redundant with the > checks,
+    // but they should get optimized out.
+    DAWN_INVALID_IF(
+        layout.bytesPerRow != wgpu::kCopyStrideUndefined && bytesInLastRow > layout.bytesPerRow,
+        "The byte size of each row (%u) is > bytesPerRow (%u).", bytesInLastRow,
+        layout.bytesPerRow);
+
+    DAWN_INVALID_IF(
+        layout.rowsPerImage != wgpu::kCopyStrideUndefined && heightInBlocks > layout.rowsPerImage,
+        "The height of each image in blocks (%u) is > rowsPerImage (%u).", heightInBlocks,
+        layout.rowsPerImage);
+
+    // We compute required bytes in copy after validating texel block alignments
+    // because the divisibility conditions are necessary for the algorithm to be valid,
+    // also the bytesPerRow bound is necessary to avoid overflows.
+    uint64_t requiredBytesInCopy;
+    DAWN_TRY_ASSIGN(
+        requiredBytesInCopy,
+        ComputeRequiredBytesInCopy(blockInfo, copyExtent, layout.bytesPerRow, layout.rowsPerImage));
+
+    bool fitsInData =
+        layout.offset <= byteSize && (requiredBytesInCopy <= (byteSize - layout.offset));
+    DAWN_INVALID_IF(
+        !fitsInData,
+        "Required size for texture data layout (%u) exceeds the linear data size (%u) with "
+        "offset (%u).",
+        requiredBytesInCopy, byteSize, layout.offset);
+
+    return {};
+}
+
+MaybeError ValidateImageCopyBuffer(DeviceBase const* device,
+                                   const ImageCopyBuffer& imageCopyBuffer) {
+    DAWN_TRY(device->ValidateObject(imageCopyBuffer.buffer));
+    if (imageCopyBuffer.layout.bytesPerRow != wgpu::kCopyStrideUndefined) {
+        DAWN_INVALID_IF(imageCopyBuffer.layout.bytesPerRow % kTextureBytesPerRowAlignment != 0,
+                        "bytesPerRow (%u) is not a multiple of %u.",
+                        imageCopyBuffer.layout.bytesPerRow, kTextureBytesPerRowAlignment);
     }
 
-    MaybeError ValidateImageCopyBuffer(DeviceBase const* device,
-                                       const ImageCopyBuffer& imageCopyBuffer) {
-        DAWN_TRY(device->ValidateObject(imageCopyBuffer.buffer));
-        if (imageCopyBuffer.layout.bytesPerRow != wgpu::kCopyStrideUndefined) {
-            DAWN_INVALID_IF(imageCopyBuffer.layout.bytesPerRow % kTextureBytesPerRowAlignment != 0,
-                            "bytesPerRow (%u) is not a multiple of %u.",
-                            imageCopyBuffer.layout.bytesPerRow, kTextureBytesPerRowAlignment);
-        }
+    return {};
+}
 
-        return {};
-    }
+MaybeError ValidateImageCopyTexture(DeviceBase const* device,
+                                    const ImageCopyTexture& textureCopy,
+                                    const Extent3D& copySize) {
+    const TextureBase* texture = textureCopy.texture;
+    DAWN_TRY(device->ValidateObject(texture));
 
-    MaybeError ValidateImageCopyTexture(DeviceBase const* device,
-                                        const ImageCopyTexture& textureCopy,
-                                        const Extent3D& copySize) {
-        const TextureBase* texture = textureCopy.texture;
-        DAWN_TRY(device->ValidateObject(texture));
+    DAWN_INVALID_IF(textureCopy.mipLevel >= texture->GetNumMipLevels(),
+                    "MipLevel (%u) is greater than the number of mip levels (%u) in %s.",
+                    textureCopy.mipLevel, texture->GetNumMipLevels(), texture);
 
-        DAWN_INVALID_IF(textureCopy.mipLevel >= texture->GetNumMipLevels(),
-                        "MipLevel (%u) is greater than the number of mip levels (%u) in %s.",
-                        textureCopy.mipLevel, texture->GetNumMipLevels(), texture);
+    DAWN_TRY(ValidateTextureAspect(textureCopy.aspect));
+    DAWN_INVALID_IF(SelectFormatAspects(texture->GetFormat(), textureCopy.aspect) == Aspect::None,
+                    "%s format (%s) does not have the selected aspect (%s).", texture,
+                    texture->GetFormat().format, textureCopy.aspect);
 
-        DAWN_TRY(ValidateTextureAspect(textureCopy.aspect));
+    if (texture->GetSampleCount() > 1 || texture->GetFormat().HasDepthOrStencil()) {
+        Extent3D subresourceSize = texture->GetMipLevelPhysicalSize(textureCopy.mipLevel);
+        ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
         DAWN_INVALID_IF(
-            SelectFormatAspects(texture->GetFormat(), textureCopy.aspect) == Aspect::None,
-            "%s format (%s) does not have the selected aspect (%s).", texture,
-            texture->GetFormat().format, textureCopy.aspect);
-
-        if (texture->GetSampleCount() > 1 || texture->GetFormat().HasDepthOrStencil()) {
-            Extent3D subresourceSize = texture->GetMipLevelPhysicalSize(textureCopy.mipLevel);
-            ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
-            DAWN_INVALID_IF(
-                textureCopy.origin.x != 0 || textureCopy.origin.y != 0 ||
-                    subresourceSize.width != copySize.width ||
-                    subresourceSize.height != copySize.height,
-                "Copy origin (%s) and size (%s) does not cover the entire subresource (origin: "
-                "[x: 0, y: 0], size: %s) of %s. The entire subresource must be copied when the "
-                "format (%s) is a depth/stencil format or the sample count (%u) is > 1.",
-                &textureCopy.origin, &copySize, &subresourceSize, texture,
-                texture->GetFormat().format, texture->GetSampleCount());
-        }
-
-        return {};
+            textureCopy.origin.x != 0 || textureCopy.origin.y != 0 ||
+                subresourceSize.width != copySize.width ||
+                subresourceSize.height != copySize.height,
+            "Copy origin (%s) and size (%s) does not cover the entire subresource (origin: "
+            "[x: 0, y: 0], size: %s) of %s. The entire subresource must be copied when the "
+            "format (%s) is a depth/stencil format or the sample count (%u) is > 1.",
+            &textureCopy.origin, &copySize, &subresourceSize, texture, texture->GetFormat().format,
+            texture->GetSampleCount());
     }
 
-    MaybeError ValidateTextureCopyRange(DeviceBase const* device,
-                                        const ImageCopyTexture& textureCopy,
-                                        const Extent3D& copySize) {
-        const TextureBase* texture = textureCopy.texture;
+    return {};
+}
 
-        // Validation for the copy being in-bounds:
-        Extent3D mipSize = texture->GetMipLevelPhysicalSize(textureCopy.mipLevel);
-        // For 1D/2D textures, include the array layer as depth so it can be checked with other
-        // dimensions.
-        if (texture->GetDimension() != wgpu::TextureDimension::e3D) {
-            mipSize.depthOrArrayLayers = texture->GetArrayLayers();
-        }
-        // All texture dimensions are in uint32_t so by doing checks in uint64_t we avoid
-        // overflows.
+MaybeError ValidateTextureCopyRange(DeviceBase const* device,
+                                    const ImageCopyTexture& textureCopy,
+                                    const Extent3D& copySize) {
+    const TextureBase* texture = textureCopy.texture;
+
+    // Validation for the copy being in-bounds:
+    Extent3D mipSize = texture->GetMipLevelPhysicalSize(textureCopy.mipLevel);
+    // For 1D/2D textures, include the array layer as depth so it can be checked with other
+    // dimensions.
+    if (texture->GetDimension() != wgpu::TextureDimension::e3D) {
+        mipSize.depthOrArrayLayers = texture->GetArrayLayers();
+    }
+    // All texture dimensions are in uint32_t so by doing checks in uint64_t we avoid
+    // overflows.
+    DAWN_INVALID_IF(
+        static_cast<uint64_t>(textureCopy.origin.x) + static_cast<uint64_t>(copySize.width) >
+                static_cast<uint64_t>(mipSize.width) ||
+            static_cast<uint64_t>(textureCopy.origin.y) + static_cast<uint64_t>(copySize.height) >
+                static_cast<uint64_t>(mipSize.height) ||
+            static_cast<uint64_t>(textureCopy.origin.z) +
+                    static_cast<uint64_t>(copySize.depthOrArrayLayers) >
+                static_cast<uint64_t>(mipSize.depthOrArrayLayers),
+        "Texture copy range (origin: %s, copySize: %s) touches outside of %s mip level %u "
+        "size (%s).",
+        &textureCopy.origin, &copySize, texture, textureCopy.mipLevel, &mipSize);
+
+    // Validation for the texel block alignments:
+    const Format& format = textureCopy.texture->GetFormat();
+    if (format.isCompressed) {
+        const TexelBlockInfo& blockInfo = format.GetAspectInfo(textureCopy.aspect).block;
         DAWN_INVALID_IF(
-            static_cast<uint64_t>(textureCopy.origin.x) + static_cast<uint64_t>(copySize.width) >
-                    static_cast<uint64_t>(mipSize.width) ||
-                static_cast<uint64_t>(textureCopy.origin.y) +
-                        static_cast<uint64_t>(copySize.height) >
-                    static_cast<uint64_t>(mipSize.height) ||
-                static_cast<uint64_t>(textureCopy.origin.z) +
-                        static_cast<uint64_t>(copySize.depthOrArrayLayers) >
-                    static_cast<uint64_t>(mipSize.depthOrArrayLayers),
-            "Texture copy range (origin: %s, copySize: %s) touches outside of %s mip level %u "
-            "size (%s).",
-            &textureCopy.origin, &copySize, texture, textureCopy.mipLevel, &mipSize);
-
-        // Validation for the texel block alignments:
-        const Format& format = textureCopy.texture->GetFormat();
-        if (format.isCompressed) {
-            const TexelBlockInfo& blockInfo = format.GetAspectInfo(textureCopy.aspect).block;
-            DAWN_INVALID_IF(
-                textureCopy.origin.x % blockInfo.width != 0,
-                "Texture copy origin.x (%u) is not a multiple of compressed texture format block "
-                "width (%u).",
-                textureCopy.origin.x, blockInfo.width);
-            DAWN_INVALID_IF(
-                textureCopy.origin.y % blockInfo.height != 0,
-                "Texture copy origin.y (%u) is not a multiple of compressed texture format block "
-                "height (%u).",
-                textureCopy.origin.y, blockInfo.height);
-            DAWN_INVALID_IF(
-                copySize.width % blockInfo.width != 0,
-                "copySize.width (%u) is not a multiple of compressed texture format block width "
-                "(%u).",
-                copySize.width, blockInfo.width);
-            DAWN_INVALID_IF(
-                copySize.height % blockInfo.height != 0,
-                "copySize.height (%u) is not a multiple of compressed texture format block "
-                "height (%u).",
-                copySize.height, blockInfo.height);
-        }
-
-        return {};
+            textureCopy.origin.x % blockInfo.width != 0,
+            "Texture copy origin.x (%u) is not a multiple of compressed texture format block "
+            "width (%u).",
+            textureCopy.origin.x, blockInfo.width);
+        DAWN_INVALID_IF(
+            textureCopy.origin.y % blockInfo.height != 0,
+            "Texture copy origin.y (%u) is not a multiple of compressed texture format block "
+            "height (%u).",
+            textureCopy.origin.y, blockInfo.height);
+        DAWN_INVALID_IF(
+            copySize.width % blockInfo.width != 0,
+            "copySize.width (%u) is not a multiple of compressed texture format block width "
+            "(%u).",
+            copySize.width, blockInfo.width);
+        DAWN_INVALID_IF(copySize.height % blockInfo.height != 0,
+                        "copySize.height (%u) is not a multiple of compressed texture format block "
+                        "height (%u).",
+                        copySize.height, blockInfo.height);
     }
 
-    // Always returns a single aspect (color, stencil, depth, or ith plane for multi-planar
-    // formats).
-    ResultOrError<Aspect> SingleAspectUsedByImageCopyTexture(const ImageCopyTexture& view) {
-        const Format& format = view.texture->GetFormat();
-        switch (view.aspect) {
-            case wgpu::TextureAspect::All: {
+    return {};
+}
+
+// Always returns a single aspect (color, stencil, depth, or ith plane for multi-planar
+// formats).
+ResultOrError<Aspect> SingleAspectUsedByImageCopyTexture(const ImageCopyTexture& view) {
+    const Format& format = view.texture->GetFormat();
+    switch (view.aspect) {
+        case wgpu::TextureAspect::All: {
+            DAWN_INVALID_IF(
+                !HasOneBit(format.aspects),
+                "More than a single aspect (%s) is selected for multi-planar format (%s) in "
+                "%s <-> linear data copy.",
+                view.aspect, format.format, view.texture);
+
+            Aspect single = format.aspects;
+            return single;
+        }
+        case wgpu::TextureAspect::DepthOnly:
+            ASSERT(format.aspects & Aspect::Depth);
+            return Aspect::Depth;
+        case wgpu::TextureAspect::StencilOnly:
+            ASSERT(format.aspects & Aspect::Stencil);
+            return Aspect::Stencil;
+        case wgpu::TextureAspect::Plane0Only:
+        case wgpu::TextureAspect::Plane1Only:
+            break;
+    }
+    UNREACHABLE();
+}
+
+MaybeError ValidateLinearToDepthStencilCopyRestrictions(const ImageCopyTexture& dst) {
+    Aspect aspectUsed;
+    DAWN_TRY_ASSIGN(aspectUsed, SingleAspectUsedByImageCopyTexture(dst));
+
+    const Format& format = dst.texture->GetFormat();
+    switch (format.format) {
+        case wgpu::TextureFormat::Depth16Unorm:
+            return {};
+        default:
+            DAWN_INVALID_IF(aspectUsed == Aspect::Depth,
+                            "Cannot copy into the depth aspect of %s with format %s.", dst.texture,
+                            format.format);
+            break;
+    }
+
+    return {};
+}
+
+MaybeError ValidateTextureToTextureCopyCommonRestrictions(const ImageCopyTexture& src,
+                                                          const ImageCopyTexture& dst,
+                                                          const Extent3D& copySize) {
+    const uint32_t srcSamples = src.texture->GetSampleCount();
+    const uint32_t dstSamples = dst.texture->GetSampleCount();
+
+    DAWN_INVALID_IF(
+        srcSamples != dstSamples,
+        "Source %s sample count (%u) and destination %s sample count (%u) does not match.",
+        src.texture, srcSamples, dst.texture, dstSamples);
+
+    // Metal cannot select a single aspect for texture-to-texture copies.
+    const Format& format = src.texture->GetFormat();
+    DAWN_INVALID_IF(
+        SelectFormatAspects(format, src.aspect) != format.aspects,
+        "Source %s aspect (%s) doesn't select all the aspects of the source format (%s).",
+        src.texture, src.aspect, format.format);
+
+    DAWN_INVALID_IF(
+        SelectFormatAspects(format, dst.aspect) != format.aspects,
+        "Destination %s aspect (%s) doesn't select all the aspects of the destination format "
+        "(%s).",
+        dst.texture, dst.aspect, format.format);
+
+    if (src.texture == dst.texture) {
+        switch (src.texture->GetDimension()) {
+            case wgpu::TextureDimension::e1D:
+                ASSERT(src.mipLevel == 0 && src.origin.z == 0 && dst.origin.z == 0);
+                return DAWN_FORMAT_VALIDATION_ERROR("Copy is from %s to itself.", src.texture);
+
+            case wgpu::TextureDimension::e2D:
                 DAWN_INVALID_IF(
-                    !HasOneBit(format.aspects),
-                    "More than a single aspect (%s) is selected for multi-planar format (%s) in "
-                    "%s <-> linear data copy.",
-                    view.aspect, format.format, view.texture);
+                    src.mipLevel == dst.mipLevel &&
+                        IsRangeOverlapped(src.origin.z, dst.origin.z, copySize.depthOrArrayLayers),
+                    "Copy source and destination are overlapping layer ranges "
+                    "([%u, %u) and [%u, %u)) of %s mip level %u",
+                    src.origin.z, src.origin.z + copySize.depthOrArrayLayers, dst.origin.z,
+                    dst.origin.z + copySize.depthOrArrayLayers, src.texture, src.mipLevel);
+                break;
 
-                Aspect single = format.aspects;
-                return single;
-            }
-            case wgpu::TextureAspect::DepthOnly:
-                ASSERT(format.aspects & Aspect::Depth);
-                return Aspect::Depth;
-            case wgpu::TextureAspect::StencilOnly:
-                ASSERT(format.aspects & Aspect::Stencil);
-                return Aspect::Stencil;
-            case wgpu::TextureAspect::Plane0Only:
-            case wgpu::TextureAspect::Plane1Only:
+            case wgpu::TextureDimension::e3D:
+                DAWN_INVALID_IF(src.mipLevel == dst.mipLevel,
+                                "Copy is from %s mip level %u to itself.", src.texture,
+                                src.mipLevel);
                 break;
         }
-        UNREACHABLE();
     }
 
-    MaybeError ValidateLinearToDepthStencilCopyRestrictions(const ImageCopyTexture& dst) {
-        Aspect aspectUsed;
-        DAWN_TRY_ASSIGN(aspectUsed, SingleAspectUsedByImageCopyTexture(dst));
+    return {};
+}
 
-        const Format& format = dst.texture->GetFormat();
-        switch (format.format) {
-            case wgpu::TextureFormat::Depth16Unorm:
-                return {};
-            default:
-                DAWN_INVALID_IF(aspectUsed == Aspect::Depth,
-                                "Cannot copy into the depth aspect of %s with format %s.",
-                                dst.texture, format.format);
-                break;
-        }
+MaybeError ValidateTextureToTextureCopyRestrictions(const ImageCopyTexture& src,
+                                                    const ImageCopyTexture& dst,
+                                                    const Extent3D& copySize) {
+    // Metal requires texture-to-texture copies happens between texture formats that equal to
+    // each other or only have diff on srgb-ness.
+    DAWN_INVALID_IF(!src.texture->GetFormat().CopyCompatibleWith(dst.texture->GetFormat()),
+                    "Source %s format (%s) and destination %s format (%s) are not copy compatible.",
+                    src.texture, src.texture->GetFormat().format, dst.texture,
+                    dst.texture->GetFormat().format);
 
-        return {};
+    return ValidateTextureToTextureCopyCommonRestrictions(src, dst, copySize);
+}
+
+MaybeError ValidateCanUseAs(const TextureBase* texture,
+                            wgpu::TextureUsage usage,
+                            UsageValidationMode mode) {
+    ASSERT(wgpu::HasZeroOrOneBits(usage));
+    switch (mode) {
+        case UsageValidationMode::Default:
+            DAWN_INVALID_IF(!(texture->GetUsage() & usage), "%s usage (%s) doesn't include %s.",
+                            texture, texture->GetUsage(), usage);
+            break;
+        case UsageValidationMode::Internal:
+            DAWN_INVALID_IF(!(texture->GetInternalUsage() & usage),
+                            "%s internal usage (%s) doesn't include %s.", texture,
+                            texture->GetInternalUsage(), usage);
+            break;
     }
 
-    MaybeError ValidateTextureToTextureCopyCommonRestrictions(const ImageCopyTexture& src,
-                                                              const ImageCopyTexture& dst,
-                                                              const Extent3D& copySize) {
-        const uint32_t srcSamples = src.texture->GetSampleCount();
-        const uint32_t dstSamples = dst.texture->GetSampleCount();
+    return {};
+}
 
-        DAWN_INVALID_IF(
-            srcSamples != dstSamples,
-            "Source %s sample count (%u) and destination %s sample count (%u) does not match.",
-            src.texture, srcSamples, dst.texture, dstSamples);
-
-        // Metal cannot select a single aspect for texture-to-texture copies.
-        const Format& format = src.texture->GetFormat();
-        DAWN_INVALID_IF(
-            SelectFormatAspects(format, src.aspect) != format.aspects,
-            "Source %s aspect (%s) doesn't select all the aspects of the source format (%s).",
-            src.texture, src.aspect, format.format);
-
-        DAWN_INVALID_IF(
-            SelectFormatAspects(format, dst.aspect) != format.aspects,
-            "Destination %s aspect (%s) doesn't select all the aspects of the destination format "
-            "(%s).",
-            dst.texture, dst.aspect, format.format);
-
-        if (src.texture == dst.texture) {
-            switch (src.texture->GetDimension()) {
-                case wgpu::TextureDimension::e1D:
-                    ASSERT(src.mipLevel == 0 && src.origin.z == 0 && dst.origin.z == 0);
-                    return DAWN_FORMAT_VALIDATION_ERROR("Copy is from %s to itself.", src.texture);
-
-                case wgpu::TextureDimension::e2D:
-                    DAWN_INVALID_IF(src.mipLevel == dst.mipLevel &&
-                                        IsRangeOverlapped(src.origin.z, dst.origin.z,
-                                                          copySize.depthOrArrayLayers),
-                                    "Copy source and destination are overlapping layer ranges "
-                                    "([%u, %u) and [%u, %u)) of %s mip level %u",
-                                    src.origin.z, src.origin.z + copySize.depthOrArrayLayers,
-                                    dst.origin.z, dst.origin.z + copySize.depthOrArrayLayers,
-                                    src.texture, src.mipLevel);
-                    break;
-
-                case wgpu::TextureDimension::e3D:
-                    DAWN_INVALID_IF(src.mipLevel == dst.mipLevel,
-                                    "Copy is from %s mip level %u to itself.", src.texture,
-                                    src.mipLevel);
-                    break;
-            }
-        }
-
-        return {};
-    }
-
-    MaybeError ValidateTextureToTextureCopyRestrictions(const ImageCopyTexture& src,
-                                                        const ImageCopyTexture& dst,
-                                                        const Extent3D& copySize) {
-        // Metal requires texture-to-texture copies happens between texture formats that equal to
-        // each other or only have diff on srgb-ness.
-        DAWN_INVALID_IF(
-            !src.texture->GetFormat().CopyCompatibleWith(dst.texture->GetFormat()),
-            "Source %s format (%s) and destination %s format (%s) are not copy compatible.",
-            src.texture, src.texture->GetFormat().format, dst.texture,
-            dst.texture->GetFormat().format);
-
-        return ValidateTextureToTextureCopyCommonRestrictions(src, dst, copySize);
-    }
-
-    MaybeError ValidateCanUseAs(const TextureBase* texture,
-                                wgpu::TextureUsage usage,
-                                UsageValidationMode mode) {
-        ASSERT(wgpu::HasZeroOrOneBits(usage));
-        switch (mode) {
-            case UsageValidationMode::Default:
-                DAWN_INVALID_IF(!(texture->GetUsage() & usage), "%s usage (%s) doesn't include %s.",
-                                texture, texture->GetUsage(), usage);
-                break;
-            case UsageValidationMode::Internal:
-                DAWN_INVALID_IF(!(texture->GetInternalUsage() & usage),
-                                "%s internal usage (%s) doesn't include %s.", texture,
-                                texture->GetInternalUsage(), usage);
-                break;
-        }
-
-        return {};
-    }
-
-    MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage) {
-        ASSERT(wgpu::HasZeroOrOneBits(usage));
-        DAWN_INVALID_IF(!(buffer->GetUsageExternalOnly() & usage),
-                        "%s usage (%s) doesn't include %s.", buffer, buffer->GetUsageExternalOnly(),
-                        usage);
-        return {};
-    }
+MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage) {
+    ASSERT(wgpu::HasZeroOrOneBits(usage));
+    DAWN_INVALID_IF(!(buffer->GetUsageExternalOnly() & usage), "%s usage (%s) doesn't include %s.",
+                    buffer, buffer->GetUsageExternalOnly(), usage);
+    return {};
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/CommandValidation.h b/src/dawn/native/CommandValidation.h
index 477a3f5..ede6b31 100644
--- a/src/dawn/native/CommandValidation.h
+++ b/src/dawn/native/CommandValidation.h
@@ -23,74 +23,74 @@
 
 namespace dawn::native {
 
-    class QuerySetBase;
-    struct SyncScopeResourceUsage;
-    struct TexelBlockInfo;
+class QuerySetBase;
+struct SyncScopeResourceUsage;
+struct TexelBlockInfo;
 
-    MaybeError ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage& usage);
+MaybeError ValidateSyncScopeResourceUsage(const SyncScopeResourceUsage& usage);
 
-    MaybeError ValidateTimestampQuery(const DeviceBase* device,
-                                      const QuerySetBase* querySet,
-                                      uint32_t queryIndex);
+MaybeError ValidateTimestampQuery(const DeviceBase* device,
+                                  const QuerySetBase* querySet,
+                                  uint32_t queryIndex);
 
-    MaybeError ValidateWriteBuffer(const DeviceBase* device,
-                                   const BufferBase* buffer,
-                                   uint64_t bufferOffset,
-                                   uint64_t size);
+MaybeError ValidateWriteBuffer(const DeviceBase* device,
+                               const BufferBase* buffer,
+                               uint64_t bufferOffset,
+                               uint64_t size);
 
-    template <typename A, typename B>
-    DAWN_FORCE_INLINE uint64_t Safe32x32(A a, B b) {
-        static_assert(std::is_same<A, uint32_t>::value, "'a' must be uint32_t");
-        static_assert(std::is_same<B, uint32_t>::value, "'b' must be uint32_t");
-        return uint64_t(a) * uint64_t(b);
-    }
+template <typename A, typename B>
+DAWN_FORCE_INLINE uint64_t Safe32x32(A a, B b) {
+    static_assert(std::is_same<A, uint32_t>::value, "'a' must be uint32_t");
+    static_assert(std::is_same<B, uint32_t>::value, "'b' must be uint32_t");
+    return uint64_t(a) * uint64_t(b);
+}
 
-    ResultOrError<uint64_t> ComputeRequiredBytesInCopy(const TexelBlockInfo& blockInfo,
-                                                       const Extent3D& copySize,
-                                                       uint32_t bytesPerRow,
-                                                       uint32_t rowsPerImage);
+ResultOrError<uint64_t> ComputeRequiredBytesInCopy(const TexelBlockInfo& blockInfo,
+                                                   const Extent3D& copySize,
+                                                   uint32_t bytesPerRow,
+                                                   uint32_t rowsPerImage);
 
-    void ApplyDefaultTextureDataLayoutOptions(TextureDataLayout* layout,
-                                              const TexelBlockInfo& blockInfo,
-                                              const Extent3D& copyExtent);
-    MaybeError ValidateLinearTextureData(const TextureDataLayout& layout,
-                                         uint64_t byteSize,
-                                         const TexelBlockInfo& blockInfo,
-                                         const Extent3D& copyExtent);
-    MaybeError ValidateTextureCopyRange(DeviceBase const* device,
-                                        const ImageCopyTexture& imageCopyTexture,
-                                        const Extent3D& copySize);
-    ResultOrError<Aspect> SingleAspectUsedByImageCopyTexture(const ImageCopyTexture& view);
-    MaybeError ValidateLinearToDepthStencilCopyRestrictions(const ImageCopyTexture& dst);
+void ApplyDefaultTextureDataLayoutOptions(TextureDataLayout* layout,
+                                          const TexelBlockInfo& blockInfo,
+                                          const Extent3D& copyExtent);
+MaybeError ValidateLinearTextureData(const TextureDataLayout& layout,
+                                     uint64_t byteSize,
+                                     const TexelBlockInfo& blockInfo,
+                                     const Extent3D& copyExtent);
+MaybeError ValidateTextureCopyRange(DeviceBase const* device,
+                                    const ImageCopyTexture& imageCopyTexture,
+                                    const Extent3D& copySize);
+ResultOrError<Aspect> SingleAspectUsedByImageCopyTexture(const ImageCopyTexture& view);
+MaybeError ValidateLinearToDepthStencilCopyRestrictions(const ImageCopyTexture& dst);
 
-    MaybeError ValidateImageCopyBuffer(DeviceBase const* device,
-                                       const ImageCopyBuffer& imageCopyBuffer);
-    MaybeError ValidateImageCopyTexture(DeviceBase const* device,
-                                        const ImageCopyTexture& imageCopyTexture,
-                                        const Extent3D& copySize);
+MaybeError ValidateImageCopyBuffer(DeviceBase const* device,
+                                   const ImageCopyBuffer& imageCopyBuffer);
+MaybeError ValidateImageCopyTexture(DeviceBase const* device,
+                                    const ImageCopyTexture& imageCopyTexture,
+                                    const Extent3D& copySize);
 
-    MaybeError ValidateCopySizeFitsInBuffer(const Ref<BufferBase>& buffer,
-                                            uint64_t offset,
-                                            uint64_t size);
+MaybeError ValidateCopySizeFitsInBuffer(const Ref<BufferBase>& buffer,
+                                        uint64_t offset,
+                                        uint64_t size);
 
-    bool IsRangeOverlapped(uint32_t startA, uint32_t startB, uint32_t length);
+bool IsRangeOverlapped(uint32_t startA, uint32_t startB, uint32_t length);
 
-    MaybeError ValidateTextureToTextureCopyCommonRestrictions(const ImageCopyTexture& src,
-                                                              const ImageCopyTexture& dst,
-                                                              const Extent3D& copySize);
-    MaybeError ValidateTextureToTextureCopyRestrictions(const ImageCopyTexture& src,
-                                                        const ImageCopyTexture& dst,
-                                                        const Extent3D& copySize);
+MaybeError ValidateTextureToTextureCopyCommonRestrictions(const ImageCopyTexture& src,
+                                                          const ImageCopyTexture& dst,
+                                                          const Extent3D& copySize);
+MaybeError ValidateTextureToTextureCopyRestrictions(const ImageCopyTexture& src,
+                                                    const ImageCopyTexture& dst,
+                                                    const Extent3D& copySize);
 
-    enum class UsageValidationMode {
-        Default,
-        Internal,
-    };
+enum class UsageValidationMode {
+    Default,
+    Internal,
+};
 
-    MaybeError ValidateCanUseAs(const TextureBase* texture,
-                                wgpu::TextureUsage usage,
-                                UsageValidationMode mode);
-    MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage);
+MaybeError ValidateCanUseAs(const TextureBase* texture,
+                            wgpu::TextureUsage usage,
+                            UsageValidationMode mode);
+MaybeError ValidateCanUseAs(const BufferBase* buffer, wgpu::BufferUsage usage);
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/Commands.cpp b/src/dawn/native/Commands.cpp
index 3337cbd..019da13 100644
--- a/src/dawn/native/Commands.cpp
+++ b/src/dawn/native/Commands.cpp
@@ -25,341 +25,340 @@
 
 namespace dawn::native {
 
-    void FreeCommands(CommandIterator* commands) {
-        commands->Reset();
+void FreeCommands(CommandIterator* commands) {
+    commands->Reset();
 
-        Command type;
-        while (commands->NextCommandId(&type)) {
-            switch (type) {
-                case Command::BeginComputePass: {
-                    BeginComputePassCmd* begin = commands->NextCommand<BeginComputePassCmd>();
-                    begin->~BeginComputePassCmd();
-                    break;
-                }
-                case Command::BeginOcclusionQuery: {
-                    BeginOcclusionQueryCmd* begin = commands->NextCommand<BeginOcclusionQueryCmd>();
-                    begin->~BeginOcclusionQueryCmd();
-                    break;
-                }
-                case Command::BeginRenderPass: {
-                    BeginRenderPassCmd* begin = commands->NextCommand<BeginRenderPassCmd>();
-                    begin->~BeginRenderPassCmd();
-                    break;
-                }
-                case Command::CopyBufferToBuffer: {
-                    CopyBufferToBufferCmd* copy = commands->NextCommand<CopyBufferToBufferCmd>();
-                    copy->~CopyBufferToBufferCmd();
-                    break;
-                }
-                case Command::CopyBufferToTexture: {
-                    CopyBufferToTextureCmd* copy = commands->NextCommand<CopyBufferToTextureCmd>();
-                    copy->~CopyBufferToTextureCmd();
-                    break;
-                }
-                case Command::CopyTextureToBuffer: {
-                    CopyTextureToBufferCmd* copy = commands->NextCommand<CopyTextureToBufferCmd>();
-                    copy->~CopyTextureToBufferCmd();
-                    break;
-                }
-                case Command::CopyTextureToTexture: {
-                    CopyTextureToTextureCmd* copy =
-                        commands->NextCommand<CopyTextureToTextureCmd>();
-                    copy->~CopyTextureToTextureCmd();
-                    break;
-                }
-                case Command::Dispatch: {
-                    DispatchCmd* dispatch = commands->NextCommand<DispatchCmd>();
-                    dispatch->~DispatchCmd();
-                    break;
-                }
-                case Command::DispatchIndirect: {
-                    DispatchIndirectCmd* dispatch = commands->NextCommand<DispatchIndirectCmd>();
-                    dispatch->~DispatchIndirectCmd();
-                    break;
-                }
-                case Command::Draw: {
-                    DrawCmd* draw = commands->NextCommand<DrawCmd>();
-                    draw->~DrawCmd();
-                    break;
-                }
-                case Command::DrawIndexed: {
-                    DrawIndexedCmd* draw = commands->NextCommand<DrawIndexedCmd>();
-                    draw->~DrawIndexedCmd();
-                    break;
-                }
-                case Command::DrawIndirect: {
-                    DrawIndirectCmd* draw = commands->NextCommand<DrawIndirectCmd>();
-                    draw->~DrawIndirectCmd();
-                    break;
-                }
-                case Command::DrawIndexedIndirect: {
-                    DrawIndexedIndirectCmd* draw = commands->NextCommand<DrawIndexedIndirectCmd>();
-                    draw->~DrawIndexedIndirectCmd();
-                    break;
-                }
-                case Command::EndComputePass: {
-                    EndComputePassCmd* cmd = commands->NextCommand<EndComputePassCmd>();
-                    cmd->~EndComputePassCmd();
-                    break;
-                }
-                case Command::EndOcclusionQuery: {
-                    EndOcclusionQueryCmd* cmd = commands->NextCommand<EndOcclusionQueryCmd>();
-                    cmd->~EndOcclusionQueryCmd();
-                    break;
-                }
-                case Command::EndRenderPass: {
-                    EndRenderPassCmd* cmd = commands->NextCommand<EndRenderPassCmd>();
-                    cmd->~EndRenderPassCmd();
-                    break;
-                }
-                case Command::ExecuteBundles: {
-                    ExecuteBundlesCmd* cmd = commands->NextCommand<ExecuteBundlesCmd>();
-                    auto bundles = commands->NextData<Ref<RenderBundleBase>>(cmd->count);
-                    for (size_t i = 0; i < cmd->count; ++i) {
-                        (&bundles[i])->~Ref<RenderBundleBase>();
-                    }
-                    cmd->~ExecuteBundlesCmd();
-                    break;
-                }
-                case Command::ClearBuffer: {
-                    ClearBufferCmd* cmd = commands->NextCommand<ClearBufferCmd>();
-                    cmd->~ClearBufferCmd();
-                    break;
-                }
-                case Command::InsertDebugMarker: {
-                    InsertDebugMarkerCmd* cmd = commands->NextCommand<InsertDebugMarkerCmd>();
-                    commands->NextData<char>(cmd->length + 1);
-                    cmd->~InsertDebugMarkerCmd();
-                    break;
-                }
-                case Command::PopDebugGroup: {
-                    PopDebugGroupCmd* cmd = commands->NextCommand<PopDebugGroupCmd>();
-                    cmd->~PopDebugGroupCmd();
-                    break;
-                }
-                case Command::PushDebugGroup: {
-                    PushDebugGroupCmd* cmd = commands->NextCommand<PushDebugGroupCmd>();
-                    commands->NextData<char>(cmd->length + 1);
-                    cmd->~PushDebugGroupCmd();
-                    break;
-                }
-                case Command::ResolveQuerySet: {
-                    ResolveQuerySetCmd* cmd = commands->NextCommand<ResolveQuerySetCmd>();
-                    cmd->~ResolveQuerySetCmd();
-                    break;
-                }
-                case Command::SetComputePipeline: {
-                    SetComputePipelineCmd* cmd = commands->NextCommand<SetComputePipelineCmd>();
-                    cmd->~SetComputePipelineCmd();
-                    break;
-                }
-                case Command::SetRenderPipeline: {
-                    SetRenderPipelineCmd* cmd = commands->NextCommand<SetRenderPipelineCmd>();
-                    cmd->~SetRenderPipelineCmd();
-                    break;
-                }
-                case Command::SetStencilReference: {
-                    SetStencilReferenceCmd* cmd = commands->NextCommand<SetStencilReferenceCmd>();
-                    cmd->~SetStencilReferenceCmd();
-                    break;
-                }
-                case Command::SetViewport: {
-                    SetViewportCmd* cmd = commands->NextCommand<SetViewportCmd>();
-                    cmd->~SetViewportCmd();
-                    break;
-                }
-                case Command::SetScissorRect: {
-                    SetScissorRectCmd* cmd = commands->NextCommand<SetScissorRectCmd>();
-                    cmd->~SetScissorRectCmd();
-                    break;
-                }
-                case Command::SetBlendConstant: {
-                    SetBlendConstantCmd* cmd = commands->NextCommand<SetBlendConstantCmd>();
-                    cmd->~SetBlendConstantCmd();
-                    break;
-                }
-                case Command::SetBindGroup: {
-                    SetBindGroupCmd* cmd = commands->NextCommand<SetBindGroupCmd>();
-                    if (cmd->dynamicOffsetCount > 0) {
-                        commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
-                    }
-                    cmd->~SetBindGroupCmd();
-                    break;
-                }
-                case Command::SetIndexBuffer: {
-                    SetIndexBufferCmd* cmd = commands->NextCommand<SetIndexBufferCmd>();
-                    cmd->~SetIndexBufferCmd();
-                    break;
-                }
-                case Command::SetVertexBuffer: {
-                    SetVertexBufferCmd* cmd = commands->NextCommand<SetVertexBufferCmd>();
-                    cmd->~SetVertexBufferCmd();
-                    break;
-                }
-                case Command::WriteBuffer: {
-                    WriteBufferCmd* write = commands->NextCommand<WriteBufferCmd>();
-                    commands->NextData<uint8_t>(write->size);
-                    write->~WriteBufferCmd();
-                    break;
-                }
-                case Command::WriteTimestamp: {
-                    WriteTimestampCmd* cmd = commands->NextCommand<WriteTimestampCmd>();
-                    cmd->~WriteTimestampCmd();
-                    break;
-                }
-            }
-        }
-
-        commands->MakeEmptyAsDataWasDestroyed();
-    }
-
-    void SkipCommand(CommandIterator* commands, Command type) {
+    Command type;
+    while (commands->NextCommandId(&type)) {
         switch (type) {
-            case Command::BeginComputePass:
-                commands->NextCommand<BeginComputePassCmd>();
-                break;
-
-            case Command::BeginOcclusionQuery:
-                commands->NextCommand<BeginOcclusionQueryCmd>();
-                break;
-
-            case Command::BeginRenderPass:
-                commands->NextCommand<BeginRenderPassCmd>();
-                break;
-
-            case Command::CopyBufferToBuffer:
-                commands->NextCommand<CopyBufferToBufferCmd>();
-                break;
-
-            case Command::CopyBufferToTexture:
-                commands->NextCommand<CopyBufferToTextureCmd>();
-                break;
-
-            case Command::CopyTextureToBuffer:
-                commands->NextCommand<CopyTextureToBufferCmd>();
-                break;
-
-            case Command::CopyTextureToTexture:
-                commands->NextCommand<CopyTextureToTextureCmd>();
-                break;
-
-            case Command::Dispatch:
-                commands->NextCommand<DispatchCmd>();
-                break;
-
-            case Command::DispatchIndirect:
-                commands->NextCommand<DispatchIndirectCmd>();
-                break;
-
-            case Command::Draw:
-                commands->NextCommand<DrawCmd>();
-                break;
-
-            case Command::DrawIndexed:
-                commands->NextCommand<DrawIndexedCmd>();
-                break;
-
-            case Command::DrawIndirect:
-                commands->NextCommand<DrawIndirectCmd>();
-                break;
-
-            case Command::DrawIndexedIndirect:
-                commands->NextCommand<DrawIndexedIndirectCmd>();
-                break;
-
-            case Command::EndComputePass:
-                commands->NextCommand<EndComputePassCmd>();
-                break;
-
-            case Command::EndOcclusionQuery:
-                commands->NextCommand<EndOcclusionQueryCmd>();
-                break;
-
-            case Command::EndRenderPass:
-                commands->NextCommand<EndRenderPassCmd>();
-                break;
-
-            case Command::ExecuteBundles: {
-                auto* cmd = commands->NextCommand<ExecuteBundlesCmd>();
-                commands->NextData<Ref<RenderBundleBase>>(cmd->count);
+            case Command::BeginComputePass: {
+                BeginComputePassCmd* begin = commands->NextCommand<BeginComputePassCmd>();
+                begin->~BeginComputePassCmd();
                 break;
             }
-
-            case Command::ClearBuffer:
-                commands->NextCommand<ClearBufferCmd>();
+            case Command::BeginOcclusionQuery: {
+                BeginOcclusionQueryCmd* begin = commands->NextCommand<BeginOcclusionQueryCmd>();
+                begin->~BeginOcclusionQueryCmd();
                 break;
-
+            }
+            case Command::BeginRenderPass: {
+                BeginRenderPassCmd* begin = commands->NextCommand<BeginRenderPassCmd>();
+                begin->~BeginRenderPassCmd();
+                break;
+            }
+            case Command::CopyBufferToBuffer: {
+                CopyBufferToBufferCmd* copy = commands->NextCommand<CopyBufferToBufferCmd>();
+                copy->~CopyBufferToBufferCmd();
+                break;
+            }
+            case Command::CopyBufferToTexture: {
+                CopyBufferToTextureCmd* copy = commands->NextCommand<CopyBufferToTextureCmd>();
+                copy->~CopyBufferToTextureCmd();
+                break;
+            }
+            case Command::CopyTextureToBuffer: {
+                CopyTextureToBufferCmd* copy = commands->NextCommand<CopyTextureToBufferCmd>();
+                copy->~CopyTextureToBufferCmd();
+                break;
+            }
+            case Command::CopyTextureToTexture: {
+                CopyTextureToTextureCmd* copy = commands->NextCommand<CopyTextureToTextureCmd>();
+                copy->~CopyTextureToTextureCmd();
+                break;
+            }
+            case Command::Dispatch: {
+                DispatchCmd* dispatch = commands->NextCommand<DispatchCmd>();
+                dispatch->~DispatchCmd();
+                break;
+            }
+            case Command::DispatchIndirect: {
+                DispatchIndirectCmd* dispatch = commands->NextCommand<DispatchIndirectCmd>();
+                dispatch->~DispatchIndirectCmd();
+                break;
+            }
+            case Command::Draw: {
+                DrawCmd* draw = commands->NextCommand<DrawCmd>();
+                draw->~DrawCmd();
+                break;
+            }
+            case Command::DrawIndexed: {
+                DrawIndexedCmd* draw = commands->NextCommand<DrawIndexedCmd>();
+                draw->~DrawIndexedCmd();
+                break;
+            }
+            case Command::DrawIndirect: {
+                DrawIndirectCmd* draw = commands->NextCommand<DrawIndirectCmd>();
+                draw->~DrawIndirectCmd();
+                break;
+            }
+            case Command::DrawIndexedIndirect: {
+                DrawIndexedIndirectCmd* draw = commands->NextCommand<DrawIndexedIndirectCmd>();
+                draw->~DrawIndexedIndirectCmd();
+                break;
+            }
+            case Command::EndComputePass: {
+                EndComputePassCmd* cmd = commands->NextCommand<EndComputePassCmd>();
+                cmd->~EndComputePassCmd();
+                break;
+            }
+            case Command::EndOcclusionQuery: {
+                EndOcclusionQueryCmd* cmd = commands->NextCommand<EndOcclusionQueryCmd>();
+                cmd->~EndOcclusionQueryCmd();
+                break;
+            }
+            case Command::EndRenderPass: {
+                EndRenderPassCmd* cmd = commands->NextCommand<EndRenderPassCmd>();
+                cmd->~EndRenderPassCmd();
+                break;
+            }
+            case Command::ExecuteBundles: {
+                ExecuteBundlesCmd* cmd = commands->NextCommand<ExecuteBundlesCmd>();
+                auto bundles = commands->NextData<Ref<RenderBundleBase>>(cmd->count);
+                for (size_t i = 0; i < cmd->count; ++i) {
+                    (&bundles[i])->~Ref<RenderBundleBase>();
+                }
+                cmd->~ExecuteBundlesCmd();
+                break;
+            }
+            case Command::ClearBuffer: {
+                ClearBufferCmd* cmd = commands->NextCommand<ClearBufferCmd>();
+                cmd->~ClearBufferCmd();
+                break;
+            }
             case Command::InsertDebugMarker: {
                 InsertDebugMarkerCmd* cmd = commands->NextCommand<InsertDebugMarkerCmd>();
                 commands->NextData<char>(cmd->length + 1);
+                cmd->~InsertDebugMarkerCmd();
                 break;
             }
-
-            case Command::PopDebugGroup:
-                commands->NextCommand<PopDebugGroupCmd>();
+            case Command::PopDebugGroup: {
+                PopDebugGroupCmd* cmd = commands->NextCommand<PopDebugGroupCmd>();
+                cmd->~PopDebugGroupCmd();
                 break;
-
+            }
             case Command::PushDebugGroup: {
                 PushDebugGroupCmd* cmd = commands->NextCommand<PushDebugGroupCmd>();
                 commands->NextData<char>(cmd->length + 1);
+                cmd->~PushDebugGroupCmd();
                 break;
             }
-
             case Command::ResolveQuerySet: {
-                commands->NextCommand<ResolveQuerySetCmd>();
+                ResolveQuerySetCmd* cmd = commands->NextCommand<ResolveQuerySetCmd>();
+                cmd->~ResolveQuerySetCmd();
                 break;
             }
-
-            case Command::SetComputePipeline:
-                commands->NextCommand<SetComputePipelineCmd>();
+            case Command::SetComputePipeline: {
+                SetComputePipelineCmd* cmd = commands->NextCommand<SetComputePipelineCmd>();
+                cmd->~SetComputePipelineCmd();
                 break;
-
-            case Command::SetRenderPipeline:
-                commands->NextCommand<SetRenderPipelineCmd>();
+            }
+            case Command::SetRenderPipeline: {
+                SetRenderPipelineCmd* cmd = commands->NextCommand<SetRenderPipelineCmd>();
+                cmd->~SetRenderPipelineCmd();
                 break;
-
-            case Command::SetStencilReference:
-                commands->NextCommand<SetStencilReferenceCmd>();
+            }
+            case Command::SetStencilReference: {
+                SetStencilReferenceCmd* cmd = commands->NextCommand<SetStencilReferenceCmd>();
+                cmd->~SetStencilReferenceCmd();
                 break;
-
-            case Command::SetViewport:
-                commands->NextCommand<SetViewportCmd>();
+            }
+            case Command::SetViewport: {
+                SetViewportCmd* cmd = commands->NextCommand<SetViewportCmd>();
+                cmd->~SetViewportCmd();
                 break;
-
-            case Command::SetScissorRect:
-                commands->NextCommand<SetScissorRectCmd>();
+            }
+            case Command::SetScissorRect: {
+                SetScissorRectCmd* cmd = commands->NextCommand<SetScissorRectCmd>();
+                cmd->~SetScissorRectCmd();
                 break;
-
-            case Command::SetBlendConstant:
-                commands->NextCommand<SetBlendConstantCmd>();
+            }
+            case Command::SetBlendConstant: {
+                SetBlendConstantCmd* cmd = commands->NextCommand<SetBlendConstantCmd>();
+                cmd->~SetBlendConstantCmd();
                 break;
-
+            }
             case Command::SetBindGroup: {
                 SetBindGroupCmd* cmd = commands->NextCommand<SetBindGroupCmd>();
                 if (cmd->dynamicOffsetCount > 0) {
                     commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
                 }
+                cmd->~SetBindGroupCmd();
                 break;
             }
-
-            case Command::SetIndexBuffer:
-                commands->NextCommand<SetIndexBufferCmd>();
+            case Command::SetIndexBuffer: {
+                SetIndexBufferCmd* cmd = commands->NextCommand<SetIndexBufferCmd>();
+                cmd->~SetIndexBufferCmd();
                 break;
-
+            }
             case Command::SetVertexBuffer: {
-                commands->NextCommand<SetVertexBufferCmd>();
+                SetVertexBufferCmd* cmd = commands->NextCommand<SetVertexBufferCmd>();
+                cmd->~SetVertexBufferCmd();
                 break;
             }
-
-            case Command::WriteBuffer:
-                commands->NextCommand<WriteBufferCmd>();
+            case Command::WriteBuffer: {
+                WriteBufferCmd* write = commands->NextCommand<WriteBufferCmd>();
+                commands->NextData<uint8_t>(write->size);
+                write->~WriteBufferCmd();
                 break;
-
+            }
             case Command::WriteTimestamp: {
-                commands->NextCommand<WriteTimestampCmd>();
+                WriteTimestampCmd* cmd = commands->NextCommand<WriteTimestampCmd>();
+                cmd->~WriteTimestampCmd();
                 break;
             }
         }
     }
 
+    commands->MakeEmptyAsDataWasDestroyed();
+}
+
+void SkipCommand(CommandIterator* commands, Command type) {
+    switch (type) {
+        case Command::BeginComputePass:
+            commands->NextCommand<BeginComputePassCmd>();
+            break;
+
+        case Command::BeginOcclusionQuery:
+            commands->NextCommand<BeginOcclusionQueryCmd>();
+            break;
+
+        case Command::BeginRenderPass:
+            commands->NextCommand<BeginRenderPassCmd>();
+            break;
+
+        case Command::CopyBufferToBuffer:
+            commands->NextCommand<CopyBufferToBufferCmd>();
+            break;
+
+        case Command::CopyBufferToTexture:
+            commands->NextCommand<CopyBufferToTextureCmd>();
+            break;
+
+        case Command::CopyTextureToBuffer:
+            commands->NextCommand<CopyTextureToBufferCmd>();
+            break;
+
+        case Command::CopyTextureToTexture:
+            commands->NextCommand<CopyTextureToTextureCmd>();
+            break;
+
+        case Command::Dispatch:
+            commands->NextCommand<DispatchCmd>();
+            break;
+
+        case Command::DispatchIndirect:
+            commands->NextCommand<DispatchIndirectCmd>();
+            break;
+
+        case Command::Draw:
+            commands->NextCommand<DrawCmd>();
+            break;
+
+        case Command::DrawIndexed:
+            commands->NextCommand<DrawIndexedCmd>();
+            break;
+
+        case Command::DrawIndirect:
+            commands->NextCommand<DrawIndirectCmd>();
+            break;
+
+        case Command::DrawIndexedIndirect:
+            commands->NextCommand<DrawIndexedIndirectCmd>();
+            break;
+
+        case Command::EndComputePass:
+            commands->NextCommand<EndComputePassCmd>();
+            break;
+
+        case Command::EndOcclusionQuery:
+            commands->NextCommand<EndOcclusionQueryCmd>();
+            break;
+
+        case Command::EndRenderPass:
+            commands->NextCommand<EndRenderPassCmd>();
+            break;
+
+        case Command::ExecuteBundles: {
+            auto* cmd = commands->NextCommand<ExecuteBundlesCmd>();
+            commands->NextData<Ref<RenderBundleBase>>(cmd->count);
+            break;
+        }
+
+        case Command::ClearBuffer:
+            commands->NextCommand<ClearBufferCmd>();
+            break;
+
+        case Command::InsertDebugMarker: {
+            InsertDebugMarkerCmd* cmd = commands->NextCommand<InsertDebugMarkerCmd>();
+            commands->NextData<char>(cmd->length + 1);
+            break;
+        }
+
+        case Command::PopDebugGroup:
+            commands->NextCommand<PopDebugGroupCmd>();
+            break;
+
+        case Command::PushDebugGroup: {
+            PushDebugGroupCmd* cmd = commands->NextCommand<PushDebugGroupCmd>();
+            commands->NextData<char>(cmd->length + 1);
+            break;
+        }
+
+        case Command::ResolveQuerySet: {
+            commands->NextCommand<ResolveQuerySetCmd>();
+            break;
+        }
+
+        case Command::SetComputePipeline:
+            commands->NextCommand<SetComputePipelineCmd>();
+            break;
+
+        case Command::SetRenderPipeline:
+            commands->NextCommand<SetRenderPipelineCmd>();
+            break;
+
+        case Command::SetStencilReference:
+            commands->NextCommand<SetStencilReferenceCmd>();
+            break;
+
+        case Command::SetViewport:
+            commands->NextCommand<SetViewportCmd>();
+            break;
+
+        case Command::SetScissorRect:
+            commands->NextCommand<SetScissorRectCmd>();
+            break;
+
+        case Command::SetBlendConstant:
+            commands->NextCommand<SetBlendConstantCmd>();
+            break;
+
+        case Command::SetBindGroup: {
+            SetBindGroupCmd* cmd = commands->NextCommand<SetBindGroupCmd>();
+            if (cmd->dynamicOffsetCount > 0) {
+                commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
+            }
+            break;
+        }
+
+        case Command::SetIndexBuffer:
+            commands->NextCommand<SetIndexBufferCmd>();
+            break;
+
+        case Command::SetVertexBuffer: {
+            commands->NextCommand<SetVertexBufferCmd>();
+            break;
+        }
+
+        case Command::WriteBuffer:
+            commands->NextCommand<WriteBufferCmd>();
+            break;
+
+        case Command::WriteTimestamp: {
+            commands->NextCommand<WriteTimestampCmd>();
+            break;
+        }
+    }
+}
+
 }  // namespace dawn::native
diff --git a/src/dawn/native/Commands.h b/src/dawn/native/Commands.h
index 7be232d..1c7024a 100644
--- a/src/dawn/native/Commands.h
+++ b/src/dawn/native/Commands.h
@@ -29,271 +29,271 @@
 
 namespace dawn::native {
 
-    // Definition of the commands that are present in the CommandIterator given by the
-    // CommandBufferBuilder. There are not defined in CommandBuffer.h to break some header
-    // dependencies: Ref<Object> needs Object to be defined.
+// Definition of the commands that are present in the CommandIterator given by the
+// CommandBufferBuilder. There are not defined in CommandBuffer.h to break some header
+// dependencies: Ref<Object> needs Object to be defined.
 
-    enum class Command {
-        BeginComputePass,
-        BeginOcclusionQuery,
-        BeginRenderPass,
-        ClearBuffer,
-        CopyBufferToBuffer,
-        CopyBufferToTexture,
-        CopyTextureToBuffer,
-        CopyTextureToTexture,
-        Dispatch,
-        DispatchIndirect,
-        Draw,
-        DrawIndexed,
-        DrawIndirect,
-        DrawIndexedIndirect,
-        EndComputePass,
-        EndOcclusionQuery,
-        EndRenderPass,
-        ExecuteBundles,
-        InsertDebugMarker,
-        PopDebugGroup,
-        PushDebugGroup,
-        ResolveQuerySet,
-        SetComputePipeline,
-        SetRenderPipeline,
-        SetStencilReference,
-        SetViewport,
-        SetScissorRect,
-        SetBlendConstant,
-        SetBindGroup,
-        SetIndexBuffer,
-        SetVertexBuffer,
-        WriteBuffer,
-        WriteTimestamp,
-    };
+enum class Command {
+    BeginComputePass,
+    BeginOcclusionQuery,
+    BeginRenderPass,
+    ClearBuffer,
+    CopyBufferToBuffer,
+    CopyBufferToTexture,
+    CopyTextureToBuffer,
+    CopyTextureToTexture,
+    Dispatch,
+    DispatchIndirect,
+    Draw,
+    DrawIndexed,
+    DrawIndirect,
+    DrawIndexedIndirect,
+    EndComputePass,
+    EndOcclusionQuery,
+    EndRenderPass,
+    ExecuteBundles,
+    InsertDebugMarker,
+    PopDebugGroup,
+    PushDebugGroup,
+    ResolveQuerySet,
+    SetComputePipeline,
+    SetRenderPipeline,
+    SetStencilReference,
+    SetViewport,
+    SetScissorRect,
+    SetBlendConstant,
+    SetBindGroup,
+    SetIndexBuffer,
+    SetVertexBuffer,
+    WriteBuffer,
+    WriteTimestamp,
+};
 
-    struct TimestampWrite {
-        Ref<QuerySetBase> querySet;
-        uint32_t queryIndex;
-    };
+struct TimestampWrite {
+    Ref<QuerySetBase> querySet;
+    uint32_t queryIndex;
+};
 
-    struct BeginComputePassCmd {
-        std::vector<TimestampWrite> timestampWrites;
-    };
+struct BeginComputePassCmd {
+    std::vector<TimestampWrite> timestampWrites;
+};
 
-    struct BeginOcclusionQueryCmd {
-        Ref<QuerySetBase> querySet;
-        uint32_t queryIndex;
-    };
+struct BeginOcclusionQueryCmd {
+    Ref<QuerySetBase> querySet;
+    uint32_t queryIndex;
+};
 
-    struct RenderPassColorAttachmentInfo {
-        Ref<TextureViewBase> view;
-        Ref<TextureViewBase> resolveTarget;
-        wgpu::LoadOp loadOp;
-        wgpu::StoreOp storeOp;
-        dawn::native::Color clearColor;
-    };
+struct RenderPassColorAttachmentInfo {
+    Ref<TextureViewBase> view;
+    Ref<TextureViewBase> resolveTarget;
+    wgpu::LoadOp loadOp;
+    wgpu::StoreOp storeOp;
+    dawn::native::Color clearColor;
+};
 
-    struct RenderPassDepthStencilAttachmentInfo {
-        Ref<TextureViewBase> view;
-        wgpu::LoadOp depthLoadOp;
-        wgpu::StoreOp depthStoreOp;
-        wgpu::LoadOp stencilLoadOp;
-        wgpu::StoreOp stencilStoreOp;
-        float clearDepth;
-        uint32_t clearStencil;
-        bool depthReadOnly;
-        bool stencilReadOnly;
-    };
+struct RenderPassDepthStencilAttachmentInfo {
+    Ref<TextureViewBase> view;
+    wgpu::LoadOp depthLoadOp;
+    wgpu::StoreOp depthStoreOp;
+    wgpu::LoadOp stencilLoadOp;
+    wgpu::StoreOp stencilStoreOp;
+    float clearDepth;
+    uint32_t clearStencil;
+    bool depthReadOnly;
+    bool stencilReadOnly;
+};
 
-    struct BeginRenderPassCmd {
-        Ref<AttachmentState> attachmentState;
-        ityp::array<ColorAttachmentIndex, RenderPassColorAttachmentInfo, kMaxColorAttachments>
-            colorAttachments;
-        RenderPassDepthStencilAttachmentInfo depthStencilAttachment;
+struct BeginRenderPassCmd {
+    Ref<AttachmentState> attachmentState;
+    ityp::array<ColorAttachmentIndex, RenderPassColorAttachmentInfo, kMaxColorAttachments>
+        colorAttachments;
+    RenderPassDepthStencilAttachmentInfo depthStencilAttachment;
 
-        // Cache the width and height of all attachments for convenience
-        uint32_t width;
-        uint32_t height;
+    // Cache the width and height of all attachments for convenience
+    uint32_t width;
+    uint32_t height;
 
-        Ref<QuerySetBase> occlusionQuerySet;
-        std::vector<TimestampWrite> timestampWrites;
-    };
+    Ref<QuerySetBase> occlusionQuerySet;
+    std::vector<TimestampWrite> timestampWrites;
+};
 
-    struct BufferCopy {
-        Ref<BufferBase> buffer;
-        uint64_t offset;
-        uint32_t bytesPerRow;
-        uint32_t rowsPerImage;
-    };
+struct BufferCopy {
+    Ref<BufferBase> buffer;
+    uint64_t offset;
+    uint32_t bytesPerRow;
+    uint32_t rowsPerImage;
+};
 
-    struct TextureCopy {
-        Ref<TextureBase> texture;
-        uint32_t mipLevel;
-        Origin3D origin;  // Texels / array layer
-        Aspect aspect;
-    };
+struct TextureCopy {
+    Ref<TextureBase> texture;
+    uint32_t mipLevel;
+    Origin3D origin;  // Texels / array layer
+    Aspect aspect;
+};
 
-    struct CopyBufferToBufferCmd {
-        Ref<BufferBase> source;
-        uint64_t sourceOffset;
-        Ref<BufferBase> destination;
-        uint64_t destinationOffset;
-        uint64_t size;
-    };
+struct CopyBufferToBufferCmd {
+    Ref<BufferBase> source;
+    uint64_t sourceOffset;
+    Ref<BufferBase> destination;
+    uint64_t destinationOffset;
+    uint64_t size;
+};
 
-    struct CopyBufferToTextureCmd {
-        BufferCopy source;
-        TextureCopy destination;
-        Extent3D copySize;  // Texels
-    };
+struct CopyBufferToTextureCmd {
+    BufferCopy source;
+    TextureCopy destination;
+    Extent3D copySize;  // Texels
+};
 
-    struct CopyTextureToBufferCmd {
-        TextureCopy source;
-        BufferCopy destination;
-        Extent3D copySize;  // Texels
-    };
+struct CopyTextureToBufferCmd {
+    TextureCopy source;
+    BufferCopy destination;
+    Extent3D copySize;  // Texels
+};
 
-    struct CopyTextureToTextureCmd {
-        TextureCopy source;
-        TextureCopy destination;
-        Extent3D copySize;  // Texels
-    };
+struct CopyTextureToTextureCmd {
+    TextureCopy source;
+    TextureCopy destination;
+    Extent3D copySize;  // Texels
+};
 
-    struct DispatchCmd {
-        uint32_t x;
-        uint32_t y;
-        uint32_t z;
-    };
+struct DispatchCmd {
+    uint32_t x;
+    uint32_t y;
+    uint32_t z;
+};
 
-    struct DispatchIndirectCmd {
-        Ref<BufferBase> indirectBuffer;
-        uint64_t indirectOffset;
-    };
+struct DispatchIndirectCmd {
+    Ref<BufferBase> indirectBuffer;
+    uint64_t indirectOffset;
+};
 
-    struct DrawCmd {
-        uint32_t vertexCount;
-        uint32_t instanceCount;
-        uint32_t firstVertex;
-        uint32_t firstInstance;
-    };
+struct DrawCmd {
+    uint32_t vertexCount;
+    uint32_t instanceCount;
+    uint32_t firstVertex;
+    uint32_t firstInstance;
+};
 
-    struct DrawIndexedCmd {
-        uint32_t indexCount;
-        uint32_t instanceCount;
-        uint32_t firstIndex;
-        int32_t baseVertex;
-        uint32_t firstInstance;
-    };
+struct DrawIndexedCmd {
+    uint32_t indexCount;
+    uint32_t instanceCount;
+    uint32_t firstIndex;
+    int32_t baseVertex;
+    uint32_t firstInstance;
+};
 
-    struct DrawIndirectCmd {
-        Ref<BufferBase> indirectBuffer;
-        uint64_t indirectOffset;
-    };
+struct DrawIndirectCmd {
+    Ref<BufferBase> indirectBuffer;
+    uint64_t indirectOffset;
+};
 
-    struct DrawIndexedIndirectCmd : DrawIndirectCmd {};
+struct DrawIndexedIndirectCmd : DrawIndirectCmd {};
 
-    struct EndComputePassCmd {
-        std::vector<TimestampWrite> timestampWrites;
-    };
+struct EndComputePassCmd {
+    std::vector<TimestampWrite> timestampWrites;
+};
 
-    struct EndOcclusionQueryCmd {
-        Ref<QuerySetBase> querySet;
-        uint32_t queryIndex;
-    };
+struct EndOcclusionQueryCmd {
+    Ref<QuerySetBase> querySet;
+    uint32_t queryIndex;
+};
 
-    struct EndRenderPassCmd {
-        std::vector<TimestampWrite> timestampWrites;
-    };
+struct EndRenderPassCmd {
+    std::vector<TimestampWrite> timestampWrites;
+};
 
-    struct ExecuteBundlesCmd {
-        uint32_t count;
-    };
+struct ExecuteBundlesCmd {
+    uint32_t count;
+};
 
-    struct ClearBufferCmd {
-        Ref<BufferBase> buffer;
-        uint64_t offset;
-        uint64_t size;
-    };
+struct ClearBufferCmd {
+    Ref<BufferBase> buffer;
+    uint64_t offset;
+    uint64_t size;
+};
 
-    struct InsertDebugMarkerCmd {
-        uint32_t length;
-    };
+struct InsertDebugMarkerCmd {
+    uint32_t length;
+};
 
-    struct PopDebugGroupCmd {};
+struct PopDebugGroupCmd {};
 
-    struct PushDebugGroupCmd {
-        uint32_t length;
-    };
+struct PushDebugGroupCmd {
+    uint32_t length;
+};
 
-    struct ResolveQuerySetCmd {
-        Ref<QuerySetBase> querySet;
-        uint32_t firstQuery;
-        uint32_t queryCount;
-        Ref<BufferBase> destination;
-        uint64_t destinationOffset;
-    };
+struct ResolveQuerySetCmd {
+    Ref<QuerySetBase> querySet;
+    uint32_t firstQuery;
+    uint32_t queryCount;
+    Ref<BufferBase> destination;
+    uint64_t destinationOffset;
+};
 
-    struct SetComputePipelineCmd {
-        Ref<ComputePipelineBase> pipeline;
-    };
+struct SetComputePipelineCmd {
+    Ref<ComputePipelineBase> pipeline;
+};
 
-    struct SetRenderPipelineCmd {
-        Ref<RenderPipelineBase> pipeline;
-    };
+struct SetRenderPipelineCmd {
+    Ref<RenderPipelineBase> pipeline;
+};
 
-    struct SetStencilReferenceCmd {
-        uint32_t reference;
-    };
+struct SetStencilReferenceCmd {
+    uint32_t reference;
+};
 
-    struct SetViewportCmd {
-        float x, y, width, height, minDepth, maxDepth;
-    };
+struct SetViewportCmd {
+    float x, y, width, height, minDepth, maxDepth;
+};
 
-    struct SetScissorRectCmd {
-        uint32_t x, y, width, height;
-    };
+struct SetScissorRectCmd {
+    uint32_t x, y, width, height;
+};
 
-    struct SetBlendConstantCmd {
-        Color color;
-    };
+struct SetBlendConstantCmd {
+    Color color;
+};
 
-    struct SetBindGroupCmd {
-        BindGroupIndex index;
-        Ref<BindGroupBase> group;
-        uint32_t dynamicOffsetCount;
-    };
+struct SetBindGroupCmd {
+    BindGroupIndex index;
+    Ref<BindGroupBase> group;
+    uint32_t dynamicOffsetCount;
+};
 
-    struct SetIndexBufferCmd {
-        Ref<BufferBase> buffer;
-        wgpu::IndexFormat format;
-        uint64_t offset;
-        uint64_t size;
-    };
+struct SetIndexBufferCmd {
+    Ref<BufferBase> buffer;
+    wgpu::IndexFormat format;
+    uint64_t offset;
+    uint64_t size;
+};
 
-    struct SetVertexBufferCmd {
-        VertexBufferSlot slot;
-        Ref<BufferBase> buffer;
-        uint64_t offset;
-        uint64_t size;
-    };
+struct SetVertexBufferCmd {
+    VertexBufferSlot slot;
+    Ref<BufferBase> buffer;
+    uint64_t offset;
+    uint64_t size;
+};
 
-    struct WriteBufferCmd {
-        Ref<BufferBase> buffer;
-        uint64_t offset;
-        uint64_t size;
-    };
+struct WriteBufferCmd {
+    Ref<BufferBase> buffer;
+    uint64_t offset;
+    uint64_t size;
+};
 
-    struct WriteTimestampCmd {
-        Ref<QuerySetBase> querySet;
-        uint32_t queryIndex;
-    };
+struct WriteTimestampCmd {
+    Ref<QuerySetBase> querySet;
+    uint32_t queryIndex;
+};
 
-    // This needs to be called before the CommandIterator is freed so that the Ref<> present in
-    // the commands have a chance to run their destructor and remove internal references.
-    class CommandIterator;
-    void FreeCommands(CommandIterator* commands);
+// This needs to be called before the CommandIterator is freed so that the Ref<> present in
+// the commands have a chance to run their destructor and remove internal references.
+class CommandIterator;
+void FreeCommands(CommandIterator* commands);
 
-    // Helper function to allow skipping over a command when it is unimplemented, while still
-    // consuming the correct amount of data from the command iterator.
-    void SkipCommand(CommandIterator* commands, Command type);
+// Helper function to allow skipping over a command when it is unimplemented, while still
+// consuming the correct amount of data from the command iterator.
+void SkipCommand(CommandIterator* commands, Command type);
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/CompilationMessages.cpp b/src/dawn/native/CompilationMessages.cpp
index ec9c245..0eae1a6 100644
--- a/src/dawn/native/CompilationMessages.cpp
+++ b/src/dawn/native/CompilationMessages.cpp
@@ -21,181 +21,181 @@
 
 namespace dawn::native {
 
-    namespace {
+namespace {
 
-        WGPUCompilationMessageType tintSeverityToMessageType(tint::diag::Severity severity) {
-            switch (severity) {
-                case tint::diag::Severity::Note:
-                    return WGPUCompilationMessageType_Info;
-                case tint::diag::Severity::Warning:
-                    return WGPUCompilationMessageType_Warning;
-                default:
-                    return WGPUCompilationMessageType_Error;
+WGPUCompilationMessageType tintSeverityToMessageType(tint::diag::Severity severity) {
+    switch (severity) {
+        case tint::diag::Severity::Note:
+            return WGPUCompilationMessageType_Info;
+        case tint::diag::Severity::Warning:
+            return WGPUCompilationMessageType_Warning;
+        default:
+            return WGPUCompilationMessageType_Error;
+    }
+}
+
+}  // anonymous namespace
+
+OwnedCompilationMessages::OwnedCompilationMessages() {
+    mCompilationInfo.nextInChain = 0;
+    mCompilationInfo.messageCount = 0;
+    mCompilationInfo.messages = nullptr;
+}
+
+void OwnedCompilationMessages::AddMessageForTesting(std::string message,
+                                                    wgpu::CompilationMessageType type,
+                                                    uint64_t lineNum,
+                                                    uint64_t linePos,
+                                                    uint64_t offset,
+                                                    uint64_t length) {
+    // Cannot add messages after GetCompilationInfo has been called.
+    ASSERT(mCompilationInfo.messages == nullptr);
+
+    mMessageStrings.push_back(message);
+    mMessages.push_back({nullptr, nullptr, static_cast<WGPUCompilationMessageType>(type), lineNum,
+                         linePos, offset, length});
+}
+
+void OwnedCompilationMessages::AddMessage(const tint::diag::Diagnostic& diagnostic) {
+    // Cannot add messages after GetCompilationInfo has been called.
+    ASSERT(mCompilationInfo.messages == nullptr);
+
+    // Tint line and column values are 1-based.
+    uint64_t lineNum = diagnostic.source.range.begin.line;
+    uint64_t linePos = diagnostic.source.range.begin.column;
+    // The offset is 0-based.
+    uint64_t offset = 0;
+    uint64_t length = 0;
+
+    if (lineNum && linePos && diagnostic.source.file) {
+        const auto& lines = diagnostic.source.file->content.lines;
+        size_t i = 0;
+        // To find the offset of the message position, loop through each of the first lineNum-1
+        // lines and add it's length (+1 to account for the line break) to the offset.
+        for (; i < lineNum - 1; ++i) {
+            offset += lines[i].length() + 1;
+        }
+
+        // If the end line is on a different line from the beginning line, add the length of the
+        // lines in between to the ending offset.
+        uint64_t endLineNum = diagnostic.source.range.end.line;
+        uint64_t endLinePos = diagnostic.source.range.end.column;
+
+        // If the range has a valid start but the end it not specified, clamp it to the start.
+        if (endLineNum == 0 || endLinePos == 0) {
+            endLineNum = lineNum;
+            endLinePos = linePos;
+        }
+
+        // Negative ranges aren't allowed
+        ASSERT(endLineNum >= lineNum);
+
+        uint64_t endOffset = offset;
+        for (; i < endLineNum - 1; ++i) {
+            endOffset += lines[i].length() + 1;
+        }
+
+        // Add the line positions to the offset and endOffset to get their final positions
+        // within the code string.
+        offset += linePos - 1;
+        endOffset += endLinePos - 1;
+
+        // Negative ranges aren't allowed
+        ASSERT(endOffset >= offset);
+
+        // The length of the message is the difference between the starting offset and the
+        // ending offset.
+        length = endOffset - offset;
+    }
+
+    if (diagnostic.code) {
+        mMessageStrings.push_back(std::string(diagnostic.code) + ": " + diagnostic.message);
+    } else {
+        mMessageStrings.push_back(diagnostic.message);
+    }
+
+    mMessages.push_back({nullptr, nullptr, tintSeverityToMessageType(diagnostic.severity), lineNum,
+                         linePos, offset, length});
+}
+
+void OwnedCompilationMessages::AddMessages(const tint::diag::List& diagnostics) {
+    // Cannot add messages after GetCompilationInfo has been called.
+    ASSERT(mCompilationInfo.messages == nullptr);
+
+    for (const auto& diag : diagnostics) {
+        AddMessage(diag);
+    }
+
+    AddFormattedTintMessages(diagnostics);
+}
+
+void OwnedCompilationMessages::ClearMessages() {
+    // Cannot clear messages after GetCompilationInfo has been called.
+    ASSERT(mCompilationInfo.messages == nullptr);
+
+    mMessageStrings.clear();
+    mMessages.clear();
+}
+
+const WGPUCompilationInfo* OwnedCompilationMessages::GetCompilationInfo() {
+    mCompilationInfo.messageCount = mMessages.size();
+    mCompilationInfo.messages = mMessages.data();
+
+    // Ensure every message points at the correct message string. Cannot do this earlier, since
+    // vector reallocations may move the pointers around.
+    for (size_t i = 0; i < mCompilationInfo.messageCount; ++i) {
+        WGPUCompilationMessage& message = mMessages[i];
+        std::string& messageString = mMessageStrings[i];
+        message.message = messageString.c_str();
+    }
+
+    return &mCompilationInfo;
+}
+
+const std::vector<std::string>& OwnedCompilationMessages::GetFormattedTintMessages() {
+    return mFormattedTintMessages;
+}
+
+void OwnedCompilationMessages::AddFormattedTintMessages(const tint::diag::List& diagnostics) {
+    tint::diag::List messageList;
+    size_t warningCount = 0;
+    size_t errorCount = 0;
+    for (auto& diag : diagnostics) {
+        switch (diag.severity) {
+            case (tint::diag::Severity::Fatal):
+            case (tint::diag::Severity::Error):
+            case (tint::diag::Severity::InternalCompilerError): {
+                errorCount++;
+                messageList.add(tint::diag::Diagnostic(diag));
+                break;
             }
-        }
-
-    }  // anonymous namespace
-
-    OwnedCompilationMessages::OwnedCompilationMessages() {
-        mCompilationInfo.nextInChain = 0;
-        mCompilationInfo.messageCount = 0;
-        mCompilationInfo.messages = nullptr;
-    }
-
-    void OwnedCompilationMessages::AddMessageForTesting(std::string message,
-                                                        wgpu::CompilationMessageType type,
-                                                        uint64_t lineNum,
-                                                        uint64_t linePos,
-                                                        uint64_t offset,
-                                                        uint64_t length) {
-        // Cannot add messages after GetCompilationInfo has been called.
-        ASSERT(mCompilationInfo.messages == nullptr);
-
-        mMessageStrings.push_back(message);
-        mMessages.push_back({nullptr, nullptr, static_cast<WGPUCompilationMessageType>(type),
-                             lineNum, linePos, offset, length});
-    }
-
-    void OwnedCompilationMessages::AddMessage(const tint::diag::Diagnostic& diagnostic) {
-        // Cannot add messages after GetCompilationInfo has been called.
-        ASSERT(mCompilationInfo.messages == nullptr);
-
-        // Tint line and column values are 1-based.
-        uint64_t lineNum = diagnostic.source.range.begin.line;
-        uint64_t linePos = diagnostic.source.range.begin.column;
-        // The offset is 0-based.
-        uint64_t offset = 0;
-        uint64_t length = 0;
-
-        if (lineNum && linePos && diagnostic.source.file) {
-            const auto& lines = diagnostic.source.file->content.lines;
-            size_t i = 0;
-            // To find the offset of the message position, loop through each of the first lineNum-1
-            // lines and add it's length (+1 to account for the line break) to the offset.
-            for (; i < lineNum - 1; ++i) {
-                offset += lines[i].length() + 1;
+            case (tint::diag::Severity::Warning): {
+                warningCount++;
+                messageList.add(tint::diag::Diagnostic(diag));
+                break;
             }
-
-            // If the end line is on a different line from the beginning line, add the length of the
-            // lines in between to the ending offset.
-            uint64_t endLineNum = diagnostic.source.range.end.line;
-            uint64_t endLinePos = diagnostic.source.range.end.column;
-
-            // If the range has a valid start but the end it not specified, clamp it to the start.
-            if (endLineNum == 0 || endLinePos == 0) {
-                endLineNum = lineNum;
-                endLinePos = linePos;
-            }
-
-            // Negative ranges aren't allowed
-            ASSERT(endLineNum >= lineNum);
-
-            uint64_t endOffset = offset;
-            for (; i < endLineNum - 1; ++i) {
-                endOffset += lines[i].length() + 1;
-            }
-
-            // Add the line positions to the offset and endOffset to get their final positions
-            // within the code string.
-            offset += linePos - 1;
-            endOffset += endLinePos - 1;
-
-            // Negative ranges aren't allowed
-            ASSERT(endOffset >= offset);
-
-            // The length of the message is the difference between the starting offset and the
-            // ending offset.
-            length = endOffset - offset;
+            default:
+                break;
         }
-
-        if (diagnostic.code) {
-            mMessageStrings.push_back(std::string(diagnostic.code) + ": " + diagnostic.message);
-        } else {
-            mMessageStrings.push_back(diagnostic.message);
-        }
-
-        mMessages.push_back({nullptr, nullptr, tintSeverityToMessageType(diagnostic.severity),
-                             lineNum, linePos, offset, length});
     }
-
-    void OwnedCompilationMessages::AddMessages(const tint::diag::List& diagnostics) {
-        // Cannot add messages after GetCompilationInfo has been called.
-        ASSERT(mCompilationInfo.messages == nullptr);
-
-        for (const auto& diag : diagnostics) {
-            AddMessage(diag);
-        }
-
-        AddFormattedTintMessages(diagnostics);
+    if (errorCount == 0 && warningCount == 0) {
+        return;
     }
-
-    void OwnedCompilationMessages::ClearMessages() {
-        // Cannot clear messages after GetCompilationInfo has been called.
-        ASSERT(mCompilationInfo.messages == nullptr);
-
-        mMessageStrings.clear();
-        mMessages.clear();
-    }
-
-    const WGPUCompilationInfo* OwnedCompilationMessages::GetCompilationInfo() {
-        mCompilationInfo.messageCount = mMessages.size();
-        mCompilationInfo.messages = mMessages.data();
-
-        // Ensure every message points at the correct message string. Cannot do this earlier, since
-        // vector reallocations may move the pointers around.
-        for (size_t i = 0; i < mCompilationInfo.messageCount; ++i) {
-            WGPUCompilationMessage& message = mMessages[i];
-            std::string& messageString = mMessageStrings[i];
-            message.message = messageString.c_str();
-        }
-
-        return &mCompilationInfo;
-    }
-
-    const std::vector<std::string>& OwnedCompilationMessages::GetFormattedTintMessages() {
-        return mFormattedTintMessages;
-    }
-
-    void OwnedCompilationMessages::AddFormattedTintMessages(const tint::diag::List& diagnostics) {
-        tint::diag::List messageList;
-        size_t warningCount = 0;
-        size_t errorCount = 0;
-        for (auto& diag : diagnostics) {
-            switch (diag.severity) {
-                case (tint::diag::Severity::Fatal):
-                case (tint::diag::Severity::Error):
-                case (tint::diag::Severity::InternalCompilerError): {
-                    errorCount++;
-                    messageList.add(tint::diag::Diagnostic(diag));
-                    break;
-                }
-                case (tint::diag::Severity::Warning): {
-                    warningCount++;
-                    messageList.add(tint::diag::Diagnostic(diag));
-                    break;
-                }
-                default:
-                    break;
-            }
-        }
-        if (errorCount == 0 && warningCount == 0) {
-            return;
-        }
-        tint::diag::Formatter::Style style;
-        style.print_newline_at_end = false;
-        std::ostringstream t;
-        if (errorCount > 0) {
-            t << errorCount << " error(s) ";
-            if (warningCount > 0) {
-                t << "and ";
-            }
-        }
+    tint::diag::Formatter::Style style;
+    style.print_newline_at_end = false;
+    std::ostringstream t;
+    if (errorCount > 0) {
+        t << errorCount << " error(s) ";
         if (warningCount > 0) {
-            t << warningCount << " warning(s) ";
+            t << "and ";
         }
-        t << "generated while compiling the shader:" << std::endl
-          << tint::diag::Formatter{style}.format(messageList);
-        mFormattedTintMessages.push_back(t.str());
     }
+    if (warningCount > 0) {
+        t << warningCount << " warning(s) ";
+    }
+    t << "generated while compiling the shader:" << std::endl
+      << tint::diag::Formatter{style}.format(messageList);
+    mFormattedTintMessages.push_back(t.str());
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/CompilationMessages.h b/src/dawn/native/CompilationMessages.h
index e8c7dbc..188d1ca 100644
--- a/src/dawn/native/CompilationMessages.h
+++ b/src/dawn/native/CompilationMessages.h
@@ -23,39 +23,39 @@
 #include "dawn/common/NonCopyable.h"
 
 namespace tint::diag {
-    class Diagnostic;
-    class List;
+class Diagnostic;
+class List;
 }  // namespace tint::diag
 
 namespace dawn::native {
 
-    class OwnedCompilationMessages : public NonCopyable {
-      public:
-        OwnedCompilationMessages();
-        ~OwnedCompilationMessages() = default;
+class OwnedCompilationMessages : public NonCopyable {
+  public:
+    OwnedCompilationMessages();
+    ~OwnedCompilationMessages() = default;
 
-        void AddMessageForTesting(
-            std::string message,
-            wgpu::CompilationMessageType type = wgpu::CompilationMessageType::Info,
-            uint64_t lineNum = 0,
-            uint64_t linePos = 0,
-            uint64_t offset = 0,
-            uint64_t length = 0);
-        void AddMessages(const tint::diag::List& diagnostics);
-        void ClearMessages();
+    void AddMessageForTesting(
+        std::string message,
+        wgpu::CompilationMessageType type = wgpu::CompilationMessageType::Info,
+        uint64_t lineNum = 0,
+        uint64_t linePos = 0,
+        uint64_t offset = 0,
+        uint64_t length = 0);
+    void AddMessages(const tint::diag::List& diagnostics);
+    void ClearMessages();
 
-        const WGPUCompilationInfo* GetCompilationInfo();
-        const std::vector<std::string>& GetFormattedTintMessages();
+    const WGPUCompilationInfo* GetCompilationInfo();
+    const std::vector<std::string>& GetFormattedTintMessages();
 
-      private:
-        void AddMessage(const tint::diag::Diagnostic& diagnostic);
-        void AddFormattedTintMessages(const tint::diag::List& diagnostics);
+  private:
+    void AddMessage(const tint::diag::Diagnostic& diagnostic);
+    void AddFormattedTintMessages(const tint::diag::List& diagnostics);
 
-        WGPUCompilationInfo mCompilationInfo;
-        std::vector<std::string> mMessageStrings;
-        std::vector<WGPUCompilationMessage> mMessages;
-        std::vector<std::string> mFormattedTintMessages;
-    };
+    WGPUCompilationInfo mCompilationInfo;
+    std::vector<std::string> mMessageStrings;
+    std::vector<WGPUCompilationMessage> mMessages;
+    std::vector<std::string> mFormattedTintMessages;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/ComputePassEncoder.cpp b/src/dawn/native/ComputePassEncoder.cpp
index fce217d..aeaa875 100644
--- a/src/dawn/native/ComputePassEncoder.cpp
+++ b/src/dawn/native/ComputePassEncoder.cpp
@@ -30,21 +30,21 @@
 
 namespace dawn::native {
 
-    namespace {
+namespace {
 
-        ResultOrError<ComputePipelineBase*> GetOrCreateIndirectDispatchValidationPipeline(
-            DeviceBase* device) {
-            InternalPipelineStore* store = device->GetInternalPipelineStore();
+ResultOrError<ComputePipelineBase*> GetOrCreateIndirectDispatchValidationPipeline(
+    DeviceBase* device) {
+    InternalPipelineStore* store = device->GetInternalPipelineStore();
 
-            if (store->dispatchIndirectValidationPipeline != nullptr) {
-                return store->dispatchIndirectValidationPipeline.Get();
-            }
+    if (store->dispatchIndirectValidationPipeline != nullptr) {
+        return store->dispatchIndirectValidationPipeline.Get();
+    }
 
-            // TODO(https://crbug.com/dawn/1108): Propagate validation feedback from this
-            // shader in various failure modes.
-            // Type 'bool' cannot be used in storage class 'uniform' as it is non-host-shareable.
-            Ref<ShaderModuleBase> shaderModule;
-            DAWN_TRY_ASSIGN(shaderModule, utils::CreateShaderModule(device, R"(
+    // TODO(https://crbug.com/dawn/1108): Propagate validation feedback from this
+    // shader in various failure modes.
+    // Type 'bool' cannot be used in storage class 'uniform' as it is non-host-shareable.
+    Ref<ShaderModuleBase> shaderModule;
+    DAWN_TRY_ASSIGN(shaderModule, utils::CreateShaderModule(device, R"(
                 struct UniformParams {
                     maxComputeWorkgroupsPerDimension: u32;
                     clientOffsetInU32: u32;
@@ -81,421 +81,412 @@
                 }
             )"));
 
-            Ref<BindGroupLayoutBase> bindGroupLayout;
-            DAWN_TRY_ASSIGN(
-                bindGroupLayout,
-                utils::MakeBindGroupLayout(
-                    device,
-                    {
-                        {0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform},
-                        {1, wgpu::ShaderStage::Compute, kInternalStorageBufferBinding},
-                        {2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
-                    },
-                    /* allowInternalBinding */ true));
+    Ref<BindGroupLayoutBase> bindGroupLayout;
+    DAWN_TRY_ASSIGN(bindGroupLayout,
+                    utils::MakeBindGroupLayout(
+                        device,
+                        {
+                            {0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform},
+                            {1, wgpu::ShaderStage::Compute, kInternalStorageBufferBinding},
+                            {2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
+                        },
+                        /* allowInternalBinding */ true));
 
-            Ref<PipelineLayoutBase> pipelineLayout;
-            DAWN_TRY_ASSIGN(pipelineLayout,
-                            utils::MakeBasicPipelineLayout(device, bindGroupLayout));
+    Ref<PipelineLayoutBase> pipelineLayout;
+    DAWN_TRY_ASSIGN(pipelineLayout, utils::MakeBasicPipelineLayout(device, bindGroupLayout));
 
-            ComputePipelineDescriptor computePipelineDescriptor = {};
-            computePipelineDescriptor.layout = pipelineLayout.Get();
-            computePipelineDescriptor.compute.module = shaderModule.Get();
-            computePipelineDescriptor.compute.entryPoint = "main";
+    ComputePipelineDescriptor computePipelineDescriptor = {};
+    computePipelineDescriptor.layout = pipelineLayout.Get();
+    computePipelineDescriptor.compute.module = shaderModule.Get();
+    computePipelineDescriptor.compute.entryPoint = "main";
 
-            DAWN_TRY_ASSIGN(store->dispatchIndirectValidationPipeline,
-                            device->CreateComputePipeline(&computePipelineDescriptor));
+    DAWN_TRY_ASSIGN(store->dispatchIndirectValidationPipeline,
+                    device->CreateComputePipeline(&computePipelineDescriptor));
 
-            return store->dispatchIndirectValidationPipeline.Get();
-        }
+    return store->dispatchIndirectValidationPipeline.Get();
+}
 
-    }  // namespace
+}  // namespace
 
-    ComputePassEncoder::ComputePassEncoder(DeviceBase* device,
-                                           const ComputePassDescriptor* descriptor,
-                                           CommandEncoder* commandEncoder,
-                                           EncodingContext* encodingContext,
-                                           std::vector<TimestampWrite> timestampWritesAtEnd)
-        : ProgrammableEncoder(device, descriptor->label, encodingContext),
-          mCommandEncoder(commandEncoder),
-          mTimestampWritesAtEnd(std::move(timestampWritesAtEnd)) {
-        TrackInDevice();
-    }
+ComputePassEncoder::ComputePassEncoder(DeviceBase* device,
+                                       const ComputePassDescriptor* descriptor,
+                                       CommandEncoder* commandEncoder,
+                                       EncodingContext* encodingContext,
+                                       std::vector<TimestampWrite> timestampWritesAtEnd)
+    : ProgrammableEncoder(device, descriptor->label, encodingContext),
+      mCommandEncoder(commandEncoder),
+      mTimestampWritesAtEnd(std::move(timestampWritesAtEnd)) {
+    TrackInDevice();
+}
 
-    // static
-    Ref<ComputePassEncoder> ComputePassEncoder::Create(
-        DeviceBase* device,
-        const ComputePassDescriptor* descriptor,
-        CommandEncoder* commandEncoder,
-        EncodingContext* encodingContext,
-        std::vector<TimestampWrite> timestampWritesAtEnd) {
-        return AcquireRef(new ComputePassEncoder(device, descriptor, commandEncoder,
-                                                 encodingContext, std::move(timestampWritesAtEnd)));
-    }
+// static
+Ref<ComputePassEncoder> ComputePassEncoder::Create(
+    DeviceBase* device,
+    const ComputePassDescriptor* descriptor,
+    CommandEncoder* commandEncoder,
+    EncodingContext* encodingContext,
+    std::vector<TimestampWrite> timestampWritesAtEnd) {
+    return AcquireRef(new ComputePassEncoder(device, descriptor, commandEncoder, encodingContext,
+                                             std::move(timestampWritesAtEnd)));
+}
 
-    ComputePassEncoder::ComputePassEncoder(DeviceBase* device,
-                                           CommandEncoder* commandEncoder,
-                                           EncodingContext* encodingContext,
-                                           ErrorTag errorTag)
-        : ProgrammableEncoder(device, encodingContext, errorTag), mCommandEncoder(commandEncoder) {
-    }
+ComputePassEncoder::ComputePassEncoder(DeviceBase* device,
+                                       CommandEncoder* commandEncoder,
+                                       EncodingContext* encodingContext,
+                                       ErrorTag errorTag)
+    : ProgrammableEncoder(device, encodingContext, errorTag), mCommandEncoder(commandEncoder) {}
 
-    // static
-    Ref<ComputePassEncoder> ComputePassEncoder::MakeError(DeviceBase* device,
-                                                          CommandEncoder* commandEncoder,
-                                                          EncodingContext* encodingContext) {
-        return AcquireRef(
-            new ComputePassEncoder(device, commandEncoder, encodingContext, ObjectBase::kError));
-    }
+// static
+Ref<ComputePassEncoder> ComputePassEncoder::MakeError(DeviceBase* device,
+                                                      CommandEncoder* commandEncoder,
+                                                      EncodingContext* encodingContext) {
+    return AcquireRef(
+        new ComputePassEncoder(device, commandEncoder, encodingContext, ObjectBase::kError));
+}
 
-    void ComputePassEncoder::DestroyImpl() {
-        // Ensure that the pass has exited. This is done for passes only since validation requires
-        // they exit before destruction while bundles do not.
-        mEncodingContext->EnsurePassExited(this);
-    }
+void ComputePassEncoder::DestroyImpl() {
+    // Ensure that the pass has exited. This is done for passes only since validation requires
+    // they exit before destruction while bundles do not.
+    mEncodingContext->EnsurePassExited(this);
+}
 
-    ObjectType ComputePassEncoder::GetType() const {
-        return ObjectType::ComputePassEncoder;
-    }
+ObjectType ComputePassEncoder::GetType() const {
+    return ObjectType::ComputePassEncoder;
+}
 
-    void ComputePassEncoder::APIEnd() {
-        if (mEncodingContext->TryEncode(
-                this,
-                [&](CommandAllocator* allocator) -> MaybeError {
-                    if (IsValidationEnabled()) {
-                        DAWN_TRY(ValidateProgrammableEncoderEnd());
-                    }
-
-                    EndComputePassCmd* cmd =
-                        allocator->Allocate<EndComputePassCmd>(Command::EndComputePass);
-                    // The query availability has already been updated at the beginning of compute
-                    // pass, and no need to do update here.
-                    cmd->timestampWrites = std::move(mTimestampWritesAtEnd);
-
-                    return {};
-                },
-                "encoding %s.End().", this)) {
-            mEncodingContext->ExitComputePass(this, mUsageTracker.AcquireResourceUsage());
-        }
-    }
-
-    void ComputePassEncoder::APIEndPass() {
-        GetDevice()->EmitDeprecationWarning("endPass() has been deprecated. Use end() instead.");
-        APIEnd();
-    }
-
-    void ComputePassEncoder::APIDispatch(uint32_t workgroupCountX,
-                                         uint32_t workgroupCountY,
-                                         uint32_t workgroupCountZ) {
-        GetDevice()->EmitDeprecationWarning(
-            "dispatch() has been deprecated. Use dispatchWorkgroups() instead.");
-        APIDispatchWorkgroups(workgroupCountX, workgroupCountY, workgroupCountZ);
-    }
-
-    void ComputePassEncoder::APIDispatchWorkgroups(uint32_t workgroupCountX,
-                                                   uint32_t workgroupCountY,
-                                                   uint32_t workgroupCountZ) {
-        mEncodingContext->TryEncode(
+void ComputePassEncoder::APIEnd() {
+    if (mEncodingContext->TryEncode(
             this,
             [&](CommandAllocator* allocator) -> MaybeError {
                 if (IsValidationEnabled()) {
-                    DAWN_TRY(mCommandBufferState.ValidateCanDispatch());
-
-                    uint32_t workgroupsPerDimension =
-                        GetDevice()->GetLimits().v1.maxComputeWorkgroupsPerDimension;
-
-                    DAWN_INVALID_IF(workgroupCountX > workgroupsPerDimension,
-                                    "Dispatch workgroup count X (%u) exceeds max compute "
-                                    "workgroups per dimension (%u).",
-                                    workgroupCountX, workgroupsPerDimension);
-
-                    DAWN_INVALID_IF(workgroupCountY > workgroupsPerDimension,
-                                    "Dispatch workgroup count Y (%u) exceeds max compute "
-                                    "workgroups per dimension (%u).",
-                                    workgroupCountY, workgroupsPerDimension);
-
-                    DAWN_INVALID_IF(workgroupCountZ > workgroupsPerDimension,
-                                    "Dispatch workgroup count Z (%u) exceeds max compute "
-                                    "workgroups per dimension (%u).",
-                                    workgroupCountZ, workgroupsPerDimension);
+                    DAWN_TRY(ValidateProgrammableEncoderEnd());
                 }
 
-                // Record the synchronization scope for Dispatch, which is just the current
-                // bindgroups.
-                AddDispatchSyncScope();
-
-                DispatchCmd* dispatch = allocator->Allocate<DispatchCmd>(Command::Dispatch);
-                dispatch->x = workgroupCountX;
-                dispatch->y = workgroupCountY;
-                dispatch->z = workgroupCountZ;
+                EndComputePassCmd* cmd =
+                    allocator->Allocate<EndComputePassCmd>(Command::EndComputePass);
+                // The query availability has already been updated at the beginning of compute
+                // pass, and no need to do update here.
+                cmd->timestampWrites = std::move(mTimestampWritesAtEnd);
 
                 return {};
             },
-            "encoding %s.DispatchWorkgroups(%u, %u, %u).", this, workgroupCountX, workgroupCountY,
-            workgroupCountZ);
+            "encoding %s.End().", this)) {
+        mEncodingContext->ExitComputePass(this, mUsageTracker.AcquireResourceUsage());
+    }
+}
+
+void ComputePassEncoder::APIEndPass() {
+    GetDevice()->EmitDeprecationWarning("endPass() has been deprecated. Use end() instead.");
+    APIEnd();
+}
+
+void ComputePassEncoder::APIDispatch(uint32_t workgroupCountX,
+                                     uint32_t workgroupCountY,
+                                     uint32_t workgroupCountZ) {
+    GetDevice()->EmitDeprecationWarning(
+        "dispatch() has been deprecated. Use dispatchWorkgroups() instead.");
+    APIDispatchWorkgroups(workgroupCountX, workgroupCountY, workgroupCountZ);
+}
+
+void ComputePassEncoder::APIDispatchWorkgroups(uint32_t workgroupCountX,
+                                               uint32_t workgroupCountY,
+                                               uint32_t workgroupCountZ) {
+    mEncodingContext->TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            if (IsValidationEnabled()) {
+                DAWN_TRY(mCommandBufferState.ValidateCanDispatch());
+
+                uint32_t workgroupsPerDimension =
+                    GetDevice()->GetLimits().v1.maxComputeWorkgroupsPerDimension;
+
+                DAWN_INVALID_IF(workgroupCountX > workgroupsPerDimension,
+                                "Dispatch workgroup count X (%u) exceeds max compute "
+                                "workgroups per dimension (%u).",
+                                workgroupCountX, workgroupsPerDimension);
+
+                DAWN_INVALID_IF(workgroupCountY > workgroupsPerDimension,
+                                "Dispatch workgroup count Y (%u) exceeds max compute "
+                                "workgroups per dimension (%u).",
+                                workgroupCountY, workgroupsPerDimension);
+
+                DAWN_INVALID_IF(workgroupCountZ > workgroupsPerDimension,
+                                "Dispatch workgroup count Z (%u) exceeds max compute "
+                                "workgroups per dimension (%u).",
+                                workgroupCountZ, workgroupsPerDimension);
+            }
+
+            // Record the synchronization scope for Dispatch, which is just the current
+            // bindgroups.
+            AddDispatchSyncScope();
+
+            DispatchCmd* dispatch = allocator->Allocate<DispatchCmd>(Command::Dispatch);
+            dispatch->x = workgroupCountX;
+            dispatch->y = workgroupCountY;
+            dispatch->z = workgroupCountZ;
+
+            return {};
+        },
+        "encoding %s.DispatchWorkgroups(%u, %u, %u).", this, workgroupCountX, workgroupCountY,
+        workgroupCountZ);
+}
+
+ResultOrError<std::pair<Ref<BufferBase>, uint64_t>>
+ComputePassEncoder::TransformIndirectDispatchBuffer(Ref<BufferBase> indirectBuffer,
+                                                    uint64_t indirectOffset) {
+    DeviceBase* device = GetDevice();
+
+    const bool shouldDuplicateNumWorkgroups =
+        device->ShouldDuplicateNumWorkgroupsForDispatchIndirect(
+            mCommandBufferState.GetComputePipeline());
+    if (!IsValidationEnabled() && !shouldDuplicateNumWorkgroups) {
+        return std::make_pair(indirectBuffer, indirectOffset);
     }
 
-    ResultOrError<std::pair<Ref<BufferBase>, uint64_t>>
-    ComputePassEncoder::TransformIndirectDispatchBuffer(Ref<BufferBase> indirectBuffer,
-                                                        uint64_t indirectOffset) {
-        DeviceBase* device = GetDevice();
+    // Save the previous command buffer state so it can be restored after the
+    // validation inserts additional commands.
+    CommandBufferStateTracker previousState = mCommandBufferState;
 
-        const bool shouldDuplicateNumWorkgroups =
-            device->ShouldDuplicateNumWorkgroupsForDispatchIndirect(
-                mCommandBufferState.GetComputePipeline());
-        if (!IsValidationEnabled() && !shouldDuplicateNumWorkgroups) {
-            return std::make_pair(indirectBuffer, indirectOffset);
-        }
+    auto* const store = device->GetInternalPipelineStore();
 
-        // Save the previous command buffer state so it can be restored after the
-        // validation inserts additional commands.
-        CommandBufferStateTracker previousState = mCommandBufferState;
+    Ref<ComputePipelineBase> validationPipeline;
+    DAWN_TRY_ASSIGN(validationPipeline, GetOrCreateIndirectDispatchValidationPipeline(device));
 
-        auto* const store = device->GetInternalPipelineStore();
+    Ref<BindGroupLayoutBase> layout;
+    DAWN_TRY_ASSIGN(layout, validationPipeline->GetBindGroupLayout(0));
 
-        Ref<ComputePipelineBase> validationPipeline;
-        DAWN_TRY_ASSIGN(validationPipeline, GetOrCreateIndirectDispatchValidationPipeline(device));
+    uint32_t storageBufferOffsetAlignment = device->GetLimits().v1.minStorageBufferOffsetAlignment;
 
-        Ref<BindGroupLayoutBase> layout;
-        DAWN_TRY_ASSIGN(layout, validationPipeline->GetBindGroupLayout(0));
+    // Let the offset be the indirectOffset, aligned down to |storageBufferOffsetAlignment|.
+    const uint32_t clientOffsetFromAlignedBoundary = indirectOffset % storageBufferOffsetAlignment;
+    const uint64_t clientOffsetAlignedDown = indirectOffset - clientOffsetFromAlignedBoundary;
+    const uint64_t clientIndirectBindingOffset = clientOffsetAlignedDown;
 
-        uint32_t storageBufferOffsetAlignment =
-            device->GetLimits().v1.minStorageBufferOffsetAlignment;
+    // Let the size of the binding be the additional offset, plus the size.
+    const uint64_t clientIndirectBindingSize =
+        kDispatchIndirectSize + clientOffsetFromAlignedBoundary;
 
-        // Let the offset be the indirectOffset, aligned down to |storageBufferOffsetAlignment|.
-        const uint32_t clientOffsetFromAlignedBoundary =
-            indirectOffset % storageBufferOffsetAlignment;
-        const uint64_t clientOffsetAlignedDown = indirectOffset - clientOffsetFromAlignedBoundary;
-        const uint64_t clientIndirectBindingOffset = clientOffsetAlignedDown;
+    // Neither 'enableValidation' nor 'duplicateNumWorkgroups' can be declared as 'bool' as
+    // currently in WGSL type 'bool' cannot be used in storage class 'uniform' as 'it is
+    // non-host-shareable'.
+    struct UniformParams {
+        uint32_t maxComputeWorkgroupsPerDimension;
+        uint32_t clientOffsetInU32;
+        uint32_t enableValidation;
+        uint32_t duplicateNumWorkgroups;
+    };
 
-        // Let the size of the binding be the additional offset, plus the size.
-        const uint64_t clientIndirectBindingSize =
-            kDispatchIndirectSize + clientOffsetFromAlignedBoundary;
+    // Create a uniform buffer to hold parameters for the shader.
+    Ref<BufferBase> uniformBuffer;
+    {
+        UniformParams params;
+        params.maxComputeWorkgroupsPerDimension =
+            device->GetLimits().v1.maxComputeWorkgroupsPerDimension;
+        params.clientOffsetInU32 = clientOffsetFromAlignedBoundary / sizeof(uint32_t);
+        params.enableValidation = static_cast<uint32_t>(IsValidationEnabled());
+        params.duplicateNumWorkgroups = static_cast<uint32_t>(shouldDuplicateNumWorkgroups);
 
-        // Neither 'enableValidation' nor 'duplicateNumWorkgroups' can be declared as 'bool' as
-        // currently in WGSL type 'bool' cannot be used in storage class 'uniform' as 'it is
-        // non-host-shareable'.
-        struct UniformParams {
-            uint32_t maxComputeWorkgroupsPerDimension;
-            uint32_t clientOffsetInU32;
-            uint32_t enableValidation;
-            uint32_t duplicateNumWorkgroups;
-        };
-
-        // Create a uniform buffer to hold parameters for the shader.
-        Ref<BufferBase> uniformBuffer;
-        {
-            UniformParams params;
-            params.maxComputeWorkgroupsPerDimension =
-                device->GetLimits().v1.maxComputeWorkgroupsPerDimension;
-            params.clientOffsetInU32 = clientOffsetFromAlignedBoundary / sizeof(uint32_t);
-            params.enableValidation = static_cast<uint32_t>(IsValidationEnabled());
-            params.duplicateNumWorkgroups = static_cast<uint32_t>(shouldDuplicateNumWorkgroups);
-
-            DAWN_TRY_ASSIGN(uniformBuffer, utils::CreateBufferFromData(
-                                               device, wgpu::BufferUsage::Uniform, {params}));
-        }
-
-        // Reserve space in the scratch buffer to hold the validated indirect params.
-        ScratchBuffer& scratchBuffer = store->scratchIndirectStorage;
-        const uint64_t scratchBufferSize =
-            shouldDuplicateNumWorkgroups ? 2 * kDispatchIndirectSize : kDispatchIndirectSize;
-        DAWN_TRY(scratchBuffer.EnsureCapacity(scratchBufferSize));
-        Ref<BufferBase> validatedIndirectBuffer = scratchBuffer.GetBuffer();
-
-        Ref<BindGroupBase> validationBindGroup;
-        ASSERT(indirectBuffer->GetUsage() & kInternalStorageBuffer);
-        DAWN_TRY_ASSIGN(validationBindGroup,
-                        utils::MakeBindGroup(device, layout,
-                                             {
-                                                 {0, uniformBuffer},
-                                                 {1, indirectBuffer, clientIndirectBindingOffset,
-                                                  clientIndirectBindingSize},
-                                                 {2, validatedIndirectBuffer, 0, scratchBufferSize},
-                                             }));
-
-        // Issue commands to validate the indirect buffer.
-        APISetPipeline(validationPipeline.Get());
-        APISetBindGroup(0, validationBindGroup.Get());
-        APIDispatchWorkgroups(1);
-
-        // Restore the state.
-        RestoreCommandBufferState(std::move(previousState));
-
-        // Return the new indirect buffer and indirect buffer offset.
-        return std::make_pair(std::move(validatedIndirectBuffer), uint64_t(0));
+        DAWN_TRY_ASSIGN(uniformBuffer,
+                        utils::CreateBufferFromData(device, wgpu::BufferUsage::Uniform, {params}));
     }
 
-    void ComputePassEncoder::APIDispatchIndirect(BufferBase* indirectBuffer,
-                                                 uint64_t indirectOffset) {
-        GetDevice()->EmitDeprecationWarning(
-            "dispatchIndirect() has been deprecated. Use dispatchWorkgroupsIndirect() instead.");
-        APIDispatchWorkgroupsIndirect(indirectBuffer, indirectOffset);
+    // Reserve space in the scratch buffer to hold the validated indirect params.
+    ScratchBuffer& scratchBuffer = store->scratchIndirectStorage;
+    const uint64_t scratchBufferSize =
+        shouldDuplicateNumWorkgroups ? 2 * kDispatchIndirectSize : kDispatchIndirectSize;
+    DAWN_TRY(scratchBuffer.EnsureCapacity(scratchBufferSize));
+    Ref<BufferBase> validatedIndirectBuffer = scratchBuffer.GetBuffer();
+
+    Ref<BindGroupBase> validationBindGroup;
+    ASSERT(indirectBuffer->GetUsage() & kInternalStorageBuffer);
+    DAWN_TRY_ASSIGN(validationBindGroup,
+                    utils::MakeBindGroup(device, layout,
+                                         {
+                                             {0, uniformBuffer},
+                                             {1, indirectBuffer, clientIndirectBindingOffset,
+                                              clientIndirectBindingSize},
+                                             {2, validatedIndirectBuffer, 0, scratchBufferSize},
+                                         }));
+
+    // Issue commands to validate the indirect buffer.
+    APISetPipeline(validationPipeline.Get());
+    APISetBindGroup(0, validationBindGroup.Get());
+    APIDispatchWorkgroups(1);
+
+    // Restore the state.
+    RestoreCommandBufferState(std::move(previousState));
+
+    // Return the new indirect buffer and indirect buffer offset.
+    return std::make_pair(std::move(validatedIndirectBuffer), uint64_t(0));
+}
+
+void ComputePassEncoder::APIDispatchIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset) {
+    GetDevice()->EmitDeprecationWarning(
+        "dispatchIndirect() has been deprecated. Use dispatchWorkgroupsIndirect() instead.");
+    APIDispatchWorkgroupsIndirect(indirectBuffer, indirectOffset);
+}
+
+void ComputePassEncoder::APIDispatchWorkgroupsIndirect(BufferBase* indirectBuffer,
+                                                       uint64_t indirectOffset) {
+    mEncodingContext->TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            if (IsValidationEnabled()) {
+                DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
+                DAWN_TRY(ValidateCanUseAs(indirectBuffer, wgpu::BufferUsage::Indirect));
+                DAWN_TRY(mCommandBufferState.ValidateCanDispatch());
+
+                DAWN_INVALID_IF(indirectOffset % 4 != 0,
+                                "Indirect offset (%u) is not a multiple of 4.", indirectOffset);
+
+                DAWN_INVALID_IF(
+                    indirectOffset >= indirectBuffer->GetSize() ||
+                        indirectOffset + kDispatchIndirectSize > indirectBuffer->GetSize(),
+                    "Indirect offset (%u) and dispatch size (%u) exceeds the indirect buffer "
+                    "size (%u).",
+                    indirectOffset, kDispatchIndirectSize, indirectBuffer->GetSize());
+            }
+
+            SyncScopeUsageTracker scope;
+            scope.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
+            mUsageTracker.AddReferencedBuffer(indirectBuffer);
+            // TODO(crbug.com/dawn/1166): If validation is enabled, adding |indirectBuffer|
+            // is needed for correct usage validation even though it will only be bound for
+            // storage. This will unecessarily transition the |indirectBuffer| in
+            // the backend.
+
+            Ref<BufferBase> indirectBufferRef = indirectBuffer;
+
+            // Get applied indirect buffer with necessary changes on the original indirect
+            // buffer. For example,
+            // - Validate each indirect dispatch with a single dispatch to copy the indirect
+            //   buffer params into a scratch buffer if they're valid, and otherwise zero them
+            //   out.
+            // - Duplicate all the indirect dispatch parameters to support @num_workgroups on
+            //   D3D12.
+            // - Directly return the original indirect dispatch buffer if we don't need any
+            //   transformations on it.
+            // We could consider moving the validation earlier in the pass after the last
+            // last point the indirect buffer was used with writable usage, as well as batch
+            // validation for multiple dispatches into one, but inserting commands at
+            // arbitrary points in the past is not possible right now.
+            DAWN_TRY_ASSIGN(std::tie(indirectBufferRef, indirectOffset),
+                            TransformIndirectDispatchBuffer(indirectBufferRef, indirectOffset));
+
+            // If we have created a new scratch dispatch indirect buffer in
+            // TransformIndirectDispatchBuffer(), we need to track it in mUsageTracker.
+            if (indirectBufferRef.Get() != indirectBuffer) {
+                // |indirectBufferRef| was replaced with a scratch buffer. Add it to the
+                // synchronization scope.
+                scope.BufferUsedAs(indirectBufferRef.Get(), wgpu::BufferUsage::Indirect);
+                mUsageTracker.AddReferencedBuffer(indirectBufferRef.Get());
+            }
+
+            AddDispatchSyncScope(std::move(scope));
+
+            DispatchIndirectCmd* dispatch =
+                allocator->Allocate<DispatchIndirectCmd>(Command::DispatchIndirect);
+            dispatch->indirectBuffer = std::move(indirectBufferRef);
+            dispatch->indirectOffset = indirectOffset;
+            return {};
+        },
+        "encoding %s.DispatchWorkgroupsIndirect(%s, %u).", this, indirectBuffer, indirectOffset);
+}
+
+void ComputePassEncoder::APISetPipeline(ComputePipelineBase* pipeline) {
+    mEncodingContext->TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            if (IsValidationEnabled()) {
+                DAWN_TRY(GetDevice()->ValidateObject(pipeline));
+            }
+
+            mCommandBufferState.SetComputePipeline(pipeline);
+
+            SetComputePipelineCmd* cmd =
+                allocator->Allocate<SetComputePipelineCmd>(Command::SetComputePipeline);
+            cmd->pipeline = pipeline;
+
+            return {};
+        },
+        "encoding %s.SetPipeline(%s).", this, pipeline);
+}
+
+void ComputePassEncoder::APISetBindGroup(uint32_t groupIndexIn,
+                                         BindGroupBase* group,
+                                         uint32_t dynamicOffsetCount,
+                                         const uint32_t* dynamicOffsets) {
+    mEncodingContext->TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            BindGroupIndex groupIndex(groupIndexIn);
+
+            if (IsValidationEnabled()) {
+                DAWN_TRY(
+                    ValidateSetBindGroup(groupIndex, group, dynamicOffsetCount, dynamicOffsets));
+            }
+
+            mUsageTracker.AddResourcesReferencedByBindGroup(group);
+            RecordSetBindGroup(allocator, groupIndex, group, dynamicOffsetCount, dynamicOffsets);
+            mCommandBufferState.SetBindGroup(groupIndex, group, dynamicOffsetCount, dynamicOffsets);
+
+            return {};
+        },
+        "encoding %s.SetBindGroup(%u, %s, %u, ...).", this, groupIndexIn, group,
+        dynamicOffsetCount);
+}
+
+void ComputePassEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
+    mEncodingContext->TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            if (IsValidationEnabled()) {
+                DAWN_TRY(ValidateTimestampQuery(GetDevice(), querySet, queryIndex));
+            }
+
+            mCommandEncoder->TrackQueryAvailability(querySet, queryIndex);
+
+            WriteTimestampCmd* cmd =
+                allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
+            cmd->querySet = querySet;
+            cmd->queryIndex = queryIndex;
+
+            return {};
+        },
+        "encoding %s.WriteTimestamp(%s, %u).", this, querySet, queryIndex);
+}
+
+void ComputePassEncoder::AddDispatchSyncScope(SyncScopeUsageTracker scope) {
+    PipelineLayoutBase* layout = mCommandBufferState.GetPipelineLayout();
+    for (BindGroupIndex i : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+        scope.AddBindGroup(mCommandBufferState.GetBindGroup(i));
     }
+    mUsageTracker.AddDispatch(scope.AcquireSyncScopeUsage());
+}
 
-    void ComputePassEncoder::APIDispatchWorkgroupsIndirect(BufferBase* indirectBuffer,
-                                                           uint64_t indirectOffset) {
-        mEncodingContext->TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                if (IsValidationEnabled()) {
-                    DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
-                    DAWN_TRY(ValidateCanUseAs(indirectBuffer, wgpu::BufferUsage::Indirect));
-                    DAWN_TRY(mCommandBufferState.ValidateCanDispatch());
-
-                    DAWN_INVALID_IF(indirectOffset % 4 != 0,
-                                    "Indirect offset (%u) is not a multiple of 4.", indirectOffset);
-
-                    DAWN_INVALID_IF(
-                        indirectOffset >= indirectBuffer->GetSize() ||
-                            indirectOffset + kDispatchIndirectSize > indirectBuffer->GetSize(),
-                        "Indirect offset (%u) and dispatch size (%u) exceeds the indirect buffer "
-                        "size (%u).",
-                        indirectOffset, kDispatchIndirectSize, indirectBuffer->GetSize());
-                }
-
-                SyncScopeUsageTracker scope;
-                scope.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
-                mUsageTracker.AddReferencedBuffer(indirectBuffer);
-                // TODO(crbug.com/dawn/1166): If validation is enabled, adding |indirectBuffer|
-                // is needed for correct usage validation even though it will only be bound for
-                // storage. This will unecessarily transition the |indirectBuffer| in
-                // the backend.
-
-                Ref<BufferBase> indirectBufferRef = indirectBuffer;
-
-                // Get applied indirect buffer with necessary changes on the original indirect
-                // buffer. For example,
-                // - Validate each indirect dispatch with a single dispatch to copy the indirect
-                //   buffer params into a scratch buffer if they're valid, and otherwise zero them
-                //   out.
-                // - Duplicate all the indirect dispatch parameters to support @num_workgroups on
-                //   D3D12.
-                // - Directly return the original indirect dispatch buffer if we don't need any
-                //   transformations on it.
-                // We could consider moving the validation earlier in the pass after the last
-                // last point the indirect buffer was used with writable usage, as well as batch
-                // validation for multiple dispatches into one, but inserting commands at
-                // arbitrary points in the past is not possible right now.
-                DAWN_TRY_ASSIGN(std::tie(indirectBufferRef, indirectOffset),
-                                TransformIndirectDispatchBuffer(indirectBufferRef, indirectOffset));
-
-                // If we have created a new scratch dispatch indirect buffer in
-                // TransformIndirectDispatchBuffer(), we need to track it in mUsageTracker.
-                if (indirectBufferRef.Get() != indirectBuffer) {
-                    // |indirectBufferRef| was replaced with a scratch buffer. Add it to the
-                    // synchronization scope.
-                    scope.BufferUsedAs(indirectBufferRef.Get(), wgpu::BufferUsage::Indirect);
-                    mUsageTracker.AddReferencedBuffer(indirectBufferRef.Get());
-                }
-
-                AddDispatchSyncScope(std::move(scope));
-
-                DispatchIndirectCmd* dispatch =
-                    allocator->Allocate<DispatchIndirectCmd>(Command::DispatchIndirect);
-                dispatch->indirectBuffer = std::move(indirectBufferRef);
-                dispatch->indirectOffset = indirectOffset;
-                return {};
-            },
-            "encoding %s.DispatchWorkgroupsIndirect(%s, %u).", this, indirectBuffer,
-            indirectOffset);
+void ComputePassEncoder::RestoreCommandBufferState(CommandBufferStateTracker state) {
+    // Encode commands for the backend to restore the pipeline and bind groups.
+    if (state.HasPipeline()) {
+        APISetPipeline(state.GetComputePipeline());
     }
-
-    void ComputePassEncoder::APISetPipeline(ComputePipelineBase* pipeline) {
-        mEncodingContext->TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                if (IsValidationEnabled()) {
-                    DAWN_TRY(GetDevice()->ValidateObject(pipeline));
-                }
-
-                mCommandBufferState.SetComputePipeline(pipeline);
-
-                SetComputePipelineCmd* cmd =
-                    allocator->Allocate<SetComputePipelineCmd>(Command::SetComputePipeline);
-                cmd->pipeline = pipeline;
-
-                return {};
-            },
-            "encoding %s.SetPipeline(%s).", this, pipeline);
-    }
-
-    void ComputePassEncoder::APISetBindGroup(uint32_t groupIndexIn,
-                                             BindGroupBase* group,
-                                             uint32_t dynamicOffsetCount,
-                                             const uint32_t* dynamicOffsets) {
-        mEncodingContext->TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                BindGroupIndex groupIndex(groupIndexIn);
-
-                if (IsValidationEnabled()) {
-                    DAWN_TRY(ValidateSetBindGroup(groupIndex, group, dynamicOffsetCount,
-                                                  dynamicOffsets));
-                }
-
-                mUsageTracker.AddResourcesReferencedByBindGroup(group);
-                RecordSetBindGroup(allocator, groupIndex, group, dynamicOffsetCount,
-                                   dynamicOffsets);
-                mCommandBufferState.SetBindGroup(groupIndex, group, dynamicOffsetCount,
-                                                 dynamicOffsets);
-
-                return {};
-            },
-            "encoding %s.SetBindGroup(%u, %s, %u, ...).", this, groupIndexIn, group,
-            dynamicOffsetCount);
-    }
-
-    void ComputePassEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
-        mEncodingContext->TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                if (IsValidationEnabled()) {
-                    DAWN_TRY(ValidateTimestampQuery(GetDevice(), querySet, queryIndex));
-                }
-
-                mCommandEncoder->TrackQueryAvailability(querySet, queryIndex);
-
-                WriteTimestampCmd* cmd =
-                    allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
-                cmd->querySet = querySet;
-                cmd->queryIndex = queryIndex;
-
-                return {};
-            },
-            "encoding %s.WriteTimestamp(%s, %u).", this, querySet, queryIndex);
-    }
-
-    void ComputePassEncoder::AddDispatchSyncScope(SyncScopeUsageTracker scope) {
-        PipelineLayoutBase* layout = mCommandBufferState.GetPipelineLayout();
-        for (BindGroupIndex i : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
-            scope.AddBindGroup(mCommandBufferState.GetBindGroup(i));
-        }
-        mUsageTracker.AddDispatch(scope.AcquireSyncScopeUsage());
-    }
-
-    void ComputePassEncoder::RestoreCommandBufferState(CommandBufferStateTracker state) {
-        // Encode commands for the backend to restore the pipeline and bind groups.
-        if (state.HasPipeline()) {
-            APISetPipeline(state.GetComputePipeline());
-        }
-        for (BindGroupIndex i(0); i < kMaxBindGroupsTyped; ++i) {
-            BindGroupBase* bg = state.GetBindGroup(i);
-            if (bg != nullptr) {
-                const std::vector<uint32_t>& offsets = state.GetDynamicOffsets(i);
-                if (offsets.empty()) {
-                    APISetBindGroup(static_cast<uint32_t>(i), bg);
-                } else {
-                    APISetBindGroup(static_cast<uint32_t>(i), bg, offsets.size(), offsets.data());
-                }
+    for (BindGroupIndex i(0); i < kMaxBindGroupsTyped; ++i) {
+        BindGroupBase* bg = state.GetBindGroup(i);
+        if (bg != nullptr) {
+            const std::vector<uint32_t>& offsets = state.GetDynamicOffsets(i);
+            if (offsets.empty()) {
+                APISetBindGroup(static_cast<uint32_t>(i), bg);
+            } else {
+                APISetBindGroup(static_cast<uint32_t>(i), bg, offsets.size(), offsets.data());
             }
         }
-
-        // Restore the frontend state tracking information.
-        mCommandBufferState = std::move(state);
     }
 
-    CommandBufferStateTracker* ComputePassEncoder::GetCommandBufferStateTrackerForTesting() {
-        return &mCommandBufferState;
-    }
+    // Restore the frontend state tracking information.
+    mCommandBufferState = std::move(state);
+}
+
+CommandBufferStateTracker* ComputePassEncoder::GetCommandBufferStateTrackerForTesting() {
+    return &mCommandBufferState;
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/ComputePassEncoder.h b/src/dawn/native/ComputePassEncoder.h
index 4d1a537..ad95096 100644
--- a/src/dawn/native/ComputePassEncoder.h
+++ b/src/dawn/native/ComputePassEncoder.h
@@ -26,81 +26,81 @@
 
 namespace dawn::native {
 
-    class SyncScopeUsageTracker;
+class SyncScopeUsageTracker;
 
-    class ComputePassEncoder final : public ProgrammableEncoder {
-      public:
-        static Ref<ComputePassEncoder> Create(DeviceBase* device,
-                                              const ComputePassDescriptor* descriptor,
-                                              CommandEncoder* commandEncoder,
-                                              EncodingContext* encodingContext,
-                                              std::vector<TimestampWrite> timestampWritesAtEnd);
-        static Ref<ComputePassEncoder> MakeError(DeviceBase* device,
-                                                 CommandEncoder* commandEncoder,
-                                                 EncodingContext* encodingContext);
+class ComputePassEncoder final : public ProgrammableEncoder {
+  public:
+    static Ref<ComputePassEncoder> Create(DeviceBase* device,
+                                          const ComputePassDescriptor* descriptor,
+                                          CommandEncoder* commandEncoder,
+                                          EncodingContext* encodingContext,
+                                          std::vector<TimestampWrite> timestampWritesAtEnd);
+    static Ref<ComputePassEncoder> MakeError(DeviceBase* device,
+                                             CommandEncoder* commandEncoder,
+                                             EncodingContext* encodingContext);
 
-        ObjectType GetType() const override;
+    ObjectType GetType() const override;
 
-        void APIEnd();
-        void APIEndPass();  // TODO(dawn:1286): Remove after deprecation period.
+    void APIEnd();
+    void APIEndPass();  // TODO(dawn:1286): Remove after deprecation period.
 
-        void APIDispatchWorkgroups(uint32_t workgroupCountX,
-                                   uint32_t workgroupCountY = 1,
-                                   uint32_t workgroupCountZ = 1);
-        void APIDispatchWorkgroupsIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
-        void APISetPipeline(ComputePipelineBase* pipeline);
+    void APIDispatchWorkgroups(uint32_t workgroupCountX,
+                               uint32_t workgroupCountY = 1,
+                               uint32_t workgroupCountZ = 1);
+    void APIDispatchWorkgroupsIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
+    void APISetPipeline(ComputePipelineBase* pipeline);
 
-        void APISetBindGroup(uint32_t groupIndex,
-                             BindGroupBase* group,
-                             uint32_t dynamicOffsetCount = 0,
-                             const uint32_t* dynamicOffsets = nullptr);
+    void APISetBindGroup(uint32_t groupIndex,
+                         BindGroupBase* group,
+                         uint32_t dynamicOffsetCount = 0,
+                         const uint32_t* dynamicOffsets = nullptr);
 
-        void APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
+    void APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
 
-        CommandBufferStateTracker* GetCommandBufferStateTrackerForTesting();
-        void RestoreCommandBufferStateForTesting(CommandBufferStateTracker state) {
-            RestoreCommandBufferState(std::move(state));
-        }
+    CommandBufferStateTracker* GetCommandBufferStateTrackerForTesting();
+    void RestoreCommandBufferStateForTesting(CommandBufferStateTracker state) {
+        RestoreCommandBufferState(std::move(state));
+    }
 
-        // Deprecated
-        void APIDispatch(uint32_t workgroupCountX,
-                         uint32_t workgroupCountY = 1,
-                         uint32_t workgroupCountZ = 1);
-        void APIDispatchIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
+    // Deprecated
+    void APIDispatch(uint32_t workgroupCountX,
+                     uint32_t workgroupCountY = 1,
+                     uint32_t workgroupCountZ = 1);
+    void APIDispatchIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
 
-      protected:
-        ComputePassEncoder(DeviceBase* device,
-                           const ComputePassDescriptor* descriptor,
-                           CommandEncoder* commandEncoder,
-                           EncodingContext* encodingContext,
-                           std::vector<TimestampWrite> timestampWritesAtEnd);
-        ComputePassEncoder(DeviceBase* device,
-                           CommandEncoder* commandEncoder,
-                           EncodingContext* encodingContext,
-                           ErrorTag errorTag);
+  protected:
+    ComputePassEncoder(DeviceBase* device,
+                       const ComputePassDescriptor* descriptor,
+                       CommandEncoder* commandEncoder,
+                       EncodingContext* encodingContext,
+                       std::vector<TimestampWrite> timestampWritesAtEnd);
+    ComputePassEncoder(DeviceBase* device,
+                       CommandEncoder* commandEncoder,
+                       EncodingContext* encodingContext,
+                       ErrorTag errorTag);
 
-      private:
-        void DestroyImpl() override;
+  private:
+    void DestroyImpl() override;
 
-        ResultOrError<std::pair<Ref<BufferBase>, uint64_t>> TransformIndirectDispatchBuffer(
-            Ref<BufferBase> indirectBuffer,
-            uint64_t indirectOffset);
+    ResultOrError<std::pair<Ref<BufferBase>, uint64_t>> TransformIndirectDispatchBuffer(
+        Ref<BufferBase> indirectBuffer,
+        uint64_t indirectOffset);
 
-        void RestoreCommandBufferState(CommandBufferStateTracker state);
+    void RestoreCommandBufferState(CommandBufferStateTracker state);
 
-        CommandBufferStateTracker mCommandBufferState;
+    CommandBufferStateTracker mCommandBufferState;
 
-        // Adds the bindgroups used for the current dispatch to the SyncScopeResourceUsage and
-        // records it in mUsageTracker.
-        void AddDispatchSyncScope(SyncScopeUsageTracker scope = {});
-        ComputePassResourceUsageTracker mUsageTracker;
+    // Adds the bindgroups used for the current dispatch to the SyncScopeResourceUsage and
+    // records it in mUsageTracker.
+    void AddDispatchSyncScope(SyncScopeUsageTracker scope = {});
+    ComputePassResourceUsageTracker mUsageTracker;
 
-        // For render and compute passes, the encoding context is borrowed from the command encoder.
-        // Keep a reference to the encoder to make sure the context isn't freed.
-        Ref<CommandEncoder> mCommandEncoder;
+    // For render and compute passes, the encoding context is borrowed from the command encoder.
+    // Keep a reference to the encoder to make sure the context isn't freed.
+    Ref<CommandEncoder> mCommandEncoder;
 
-        std::vector<TimestampWrite> mTimestampWritesAtEnd;
-    };
+    std::vector<TimestampWrite> mTimestampWritesAtEnd;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/ComputePipeline.cpp b/src/dawn/native/ComputePipeline.cpp
index 7603220..b7893721 100644
--- a/src/dawn/native/ComputePipeline.cpp
+++ b/src/dawn/native/ComputePipeline.cpp
@@ -20,80 +20,78 @@
 
 namespace dawn::native {
 
-    MaybeError ValidateComputePipelineDescriptor(DeviceBase* device,
-                                                 const ComputePipelineDescriptor* descriptor) {
-        if (descriptor->nextInChain != nullptr) {
-            return DAWN_FORMAT_VALIDATION_ERROR("nextInChain must be nullptr.");
+MaybeError ValidateComputePipelineDescriptor(DeviceBase* device,
+                                             const ComputePipelineDescriptor* descriptor) {
+    if (descriptor->nextInChain != nullptr) {
+        return DAWN_FORMAT_VALIDATION_ERROR("nextInChain must be nullptr.");
+    }
+
+    if (descriptor->layout != nullptr) {
+        DAWN_TRY(device->ValidateObject(descriptor->layout));
+    }
+
+    return ValidateProgrammableStage(
+        device, descriptor->compute.module, descriptor->compute.entryPoint,
+        descriptor->compute.constantCount, descriptor->compute.constants, descriptor->layout,
+        SingleShaderStage::Compute);
+}
+
+// ComputePipelineBase
+
+ComputePipelineBase::ComputePipelineBase(DeviceBase* device,
+                                         const ComputePipelineDescriptor* descriptor)
+    : PipelineBase(
+          device,
+          descriptor->layout,
+          descriptor->label,
+          {{SingleShaderStage::Compute, descriptor->compute.module, descriptor->compute.entryPoint,
+            descriptor->compute.constantCount, descriptor->compute.constants}}) {
+    SetContentHash(ComputeContentHash());
+    TrackInDevice();
+
+    // Initialize the cache key to include the cache type and device information.
+    GetCacheKey()->Record(CacheKey::Type::ComputePipeline, device->GetCacheKey());
+}
+
+ComputePipelineBase::ComputePipelineBase(DeviceBase* device) : PipelineBase(device) {
+    TrackInDevice();
+}
+
+ComputePipelineBase::ComputePipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+    : PipelineBase(device, tag) {}
+
+ComputePipelineBase::~ComputePipelineBase() = default;
+
+void ComputePipelineBase::DestroyImpl() {
+    if (IsCachedReference()) {
+        // Do not uncache the actual cached object if we are a blueprint.
+        GetDevice()->UncacheComputePipeline(this);
+    }
+}
+
+// static
+ComputePipelineBase* ComputePipelineBase::MakeError(DeviceBase* device) {
+    class ErrorComputePipeline final : public ComputePipelineBase {
+      public:
+        explicit ErrorComputePipeline(DeviceBase* device)
+            : ComputePipelineBase(device, ObjectBase::kError) {}
+
+        MaybeError Initialize() override {
+            UNREACHABLE();
+            return {};
         }
+    };
 
-        if (descriptor->layout != nullptr) {
-            DAWN_TRY(device->ValidateObject(descriptor->layout));
-        }
+    return new ErrorComputePipeline(device);
+}
 
-        return ValidateProgrammableStage(
-            device, descriptor->compute.module, descriptor->compute.entryPoint,
-            descriptor->compute.constantCount, descriptor->compute.constants, descriptor->layout,
-            SingleShaderStage::Compute);
-    }
+ObjectType ComputePipelineBase::GetType() const {
+    return ObjectType::ComputePipeline;
+}
 
-    // ComputePipelineBase
-
-    ComputePipelineBase::ComputePipelineBase(DeviceBase* device,
-                                             const ComputePipelineDescriptor* descriptor)
-        : PipelineBase(device,
-                       descriptor->layout,
-                       descriptor->label,
-                       {{SingleShaderStage::Compute, descriptor->compute.module,
-                         descriptor->compute.entryPoint, descriptor->compute.constantCount,
-                         descriptor->compute.constants}}) {
-        SetContentHash(ComputeContentHash());
-        TrackInDevice();
-
-        // Initialize the cache key to include the cache type and device information.
-        GetCacheKey()->Record(CacheKey::Type::ComputePipeline, device->GetCacheKey());
-    }
-
-    ComputePipelineBase::ComputePipelineBase(DeviceBase* device) : PipelineBase(device) {
-        TrackInDevice();
-    }
-
-    ComputePipelineBase::ComputePipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
-        : PipelineBase(device, tag) {
-    }
-
-    ComputePipelineBase::~ComputePipelineBase() = default;
-
-    void ComputePipelineBase::DestroyImpl() {
-        if (IsCachedReference()) {
-            // Do not uncache the actual cached object if we are a blueprint.
-            GetDevice()->UncacheComputePipeline(this);
-        }
-    }
-
-    // static
-    ComputePipelineBase* ComputePipelineBase::MakeError(DeviceBase* device) {
-        class ErrorComputePipeline final : public ComputePipelineBase {
-          public:
-            explicit ErrorComputePipeline(DeviceBase* device)
-                : ComputePipelineBase(device, ObjectBase::kError) {
-            }
-
-            MaybeError Initialize() override {
-                UNREACHABLE();
-                return {};
-            }
-        };
-
-        return new ErrorComputePipeline(device);
-    }
-
-    ObjectType ComputePipelineBase::GetType() const {
-        return ObjectType::ComputePipeline;
-    }
-
-    bool ComputePipelineBase::EqualityFunc::operator()(const ComputePipelineBase* a,
-                                                       const ComputePipelineBase* b) const {
-        return PipelineBase::EqualForCache(a, b);
-    }
+bool ComputePipelineBase::EqualityFunc::operator()(const ComputePipelineBase* a,
+                                                   const ComputePipelineBase* b) const {
+    return PipelineBase::EqualForCache(a, b);
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/ComputePipeline.h b/src/dawn/native/ComputePipeline.h
index 257bd85..36bb34f 100644
--- a/src/dawn/native/ComputePipeline.h
+++ b/src/dawn/native/ComputePipeline.h
@@ -21,35 +21,35 @@
 
 namespace dawn::native {
 
-    class DeviceBase;
-    struct EntryPointMetadata;
+class DeviceBase;
+struct EntryPointMetadata;
 
-    MaybeError ValidateComputePipelineDescriptor(DeviceBase* device,
-                                                 const ComputePipelineDescriptor* descriptor);
+MaybeError ValidateComputePipelineDescriptor(DeviceBase* device,
+                                             const ComputePipelineDescriptor* descriptor);
 
-    class ComputePipelineBase : public PipelineBase {
-      public:
-        ComputePipelineBase(DeviceBase* device, const ComputePipelineDescriptor* descriptor);
-        ~ComputePipelineBase() override;
+class ComputePipelineBase : public PipelineBase {
+  public:
+    ComputePipelineBase(DeviceBase* device, const ComputePipelineDescriptor* descriptor);
+    ~ComputePipelineBase() override;
 
-        static ComputePipelineBase* MakeError(DeviceBase* device);
+    static ComputePipelineBase* MakeError(DeviceBase* device);
 
-        ObjectType GetType() const override;
+    ObjectType GetType() const override;
 
-        // Functors necessary for the unordered_set<ComputePipelineBase*>-based cache.
-        struct EqualityFunc {
-            bool operator()(const ComputePipelineBase* a, const ComputePipelineBase* b) const;
-        };
-
-      protected:
-        // Constructor used only for mocking and testing.
-        explicit ComputePipelineBase(DeviceBase* device);
-        void DestroyImpl() override;
-
-      private:
-        ComputePipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+    // Functors necessary for the unordered_set<ComputePipelineBase*>-based cache.
+    struct EqualityFunc {
+        bool operator()(const ComputePipelineBase* a, const ComputePipelineBase* b) const;
     };
 
+  protected:
+    // Constructor used only for mocking and testing.
+    explicit ComputePipelineBase(DeviceBase* device);
+    void DestroyImpl() override;
+
+  private:
+    ComputePipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+};
+
 }  // namespace dawn::native
 
 #endif  // SRC_DAWN_NATIVE_COMPUTEPIPELINE_H_
diff --git a/src/dawn/native/CopyTextureForBrowserHelper.cpp b/src/dawn/native/CopyTextureForBrowserHelper.cpp
index caae7c3..5f04014 100644
--- a/src/dawn/native/CopyTextureForBrowserHelper.cpp
+++ b/src/dawn/native/CopyTextureForBrowserHelper.cpp
@@ -35,9 +35,9 @@
 #include "dawn/native/utils/WGPUHelpers.h"
 
 namespace dawn::native {
-    namespace {
+namespace {
 
-        static const char sCopyTextureForBrowserShader[] = R"(
+static const char sCopyTextureForBrowserShader[] = R"(
             struct GammaTransferParams {
                 G: f32;
                 A: f32;
@@ -195,411 +195,404 @@
             }
         )";
 
-        // Follow the same order of skcms_TransferFunction
-        // https://source.chromium.org/chromium/chromium/src/+/main:third_party/skia/include/third_party/skcms/skcms.h;l=46;
-        struct GammaTransferParams {
-            float G = 0.0;
-            float A = 0.0;
-            float B = 0.0;
-            float C = 0.0;
-            float D = 0.0;
-            float E = 0.0;
-            float F = 0.0;
-            uint32_t padding = 0;
-        };
+// Follow the same order of skcms_TransferFunction
+// https://source.chromium.org/chromium/chromium/src/+/main:third_party/skia/include/third_party/skcms/skcms.h;l=46;
+struct GammaTransferParams {
+    float G = 0.0;
+    float A = 0.0;
+    float B = 0.0;
+    float C = 0.0;
+    float D = 0.0;
+    float E = 0.0;
+    float F = 0.0;
+    uint32_t padding = 0;
+};
 
-        struct Uniform {
-            float scaleX;
-            float scaleY;
-            float offsetX;
-            float offsetY;
-            uint32_t stepsMask = 0;
-            const std::array<uint32_t, 3> padding = {};  // 12 bytes padding
-            std::array<float, 12> conversionMatrix = {};
-            GammaTransferParams gammaDecodingParams = {};
-            GammaTransferParams gammaEncodingParams = {};
-            GammaTransferParams gammaDecodingForDstSrgbParams = {};
-        };
-        static_assert(sizeof(Uniform) == 176);
+struct Uniform {
+    float scaleX;
+    float scaleY;
+    float offsetX;
+    float offsetY;
+    uint32_t stepsMask = 0;
+    const std::array<uint32_t, 3> padding = {};  // 12 bytes padding
+    std::array<float, 12> conversionMatrix = {};
+    GammaTransferParams gammaDecodingParams = {};
+    GammaTransferParams gammaEncodingParams = {};
+    GammaTransferParams gammaDecodingForDstSrgbParams = {};
+};
+static_assert(sizeof(Uniform) == 176);
 
-        // TODO(crbug.com/dawn/856): Expand copyTextureForBrowser to support any
-        // non-depth, non-stencil, non-compressed texture format pair copy.
-        MaybeError ValidateCopyTextureFormatConversion(const wgpu::TextureFormat srcFormat,
-                                                       const wgpu::TextureFormat dstFormat) {
-            switch (srcFormat) {
-                case wgpu::TextureFormat::BGRA8Unorm:
-                case wgpu::TextureFormat::RGBA8Unorm:
-                    break;
-                default:
-                    return DAWN_FORMAT_VALIDATION_ERROR(
-                        "Source texture format (%s) is not supported.", srcFormat);
-            }
+// TODO(crbug.com/dawn/856): Expand copyTextureForBrowser to support any
+// non-depth, non-stencil, non-compressed texture format pair copy.
+MaybeError ValidateCopyTextureFormatConversion(const wgpu::TextureFormat srcFormat,
+                                               const wgpu::TextureFormat dstFormat) {
+    switch (srcFormat) {
+        case wgpu::TextureFormat::BGRA8Unorm:
+        case wgpu::TextureFormat::RGBA8Unorm:
+            break;
+        default:
+            return DAWN_FORMAT_VALIDATION_ERROR("Source texture format (%s) is not supported.",
+                                                srcFormat);
+    }
 
-            switch (dstFormat) {
-                case wgpu::TextureFormat::R8Unorm:
-                case wgpu::TextureFormat::R16Float:
-                case wgpu::TextureFormat::R32Float:
-                case wgpu::TextureFormat::RG8Unorm:
-                case wgpu::TextureFormat::RG16Float:
-                case wgpu::TextureFormat::RG32Float:
-                case wgpu::TextureFormat::RGBA8Unorm:
-                case wgpu::TextureFormat::RGBA8UnormSrgb:
-                case wgpu::TextureFormat::BGRA8Unorm:
-                case wgpu::TextureFormat::BGRA8UnormSrgb:
-                case wgpu::TextureFormat::RGB10A2Unorm:
-                case wgpu::TextureFormat::RGBA16Float:
-                case wgpu::TextureFormat::RGBA32Float:
-                    break;
-                default:
-                    return DAWN_FORMAT_VALIDATION_ERROR(
-                        "Destination texture format (%s) is not supported.", dstFormat);
-            }
+    switch (dstFormat) {
+        case wgpu::TextureFormat::R8Unorm:
+        case wgpu::TextureFormat::R16Float:
+        case wgpu::TextureFormat::R32Float:
+        case wgpu::TextureFormat::RG8Unorm:
+        case wgpu::TextureFormat::RG16Float:
+        case wgpu::TextureFormat::RG32Float:
+        case wgpu::TextureFormat::RGBA8Unorm:
+        case wgpu::TextureFormat::RGBA8UnormSrgb:
+        case wgpu::TextureFormat::BGRA8Unorm:
+        case wgpu::TextureFormat::BGRA8UnormSrgb:
+        case wgpu::TextureFormat::RGB10A2Unorm:
+        case wgpu::TextureFormat::RGBA16Float:
+        case wgpu::TextureFormat::RGBA32Float:
+            break;
+        default:
+            return DAWN_FORMAT_VALIDATION_ERROR("Destination texture format (%s) is not supported.",
+                                                dstFormat);
+    }
 
-            return {};
+    return {};
+}
+
+RenderPipelineBase* GetCachedPipeline(InternalPipelineStore* store, wgpu::TextureFormat dstFormat) {
+    auto pipeline = store->copyTextureForBrowserPipelines.find(dstFormat);
+    if (pipeline != store->copyTextureForBrowserPipelines.end()) {
+        return pipeline->second.Get();
+    }
+    return nullptr;
+}
+
+ResultOrError<RenderPipelineBase*> GetOrCreateCopyTextureForBrowserPipeline(
+    DeviceBase* device,
+    wgpu::TextureFormat dstFormat) {
+    InternalPipelineStore* store = device->GetInternalPipelineStore();
+
+    if (GetCachedPipeline(store, dstFormat) == nullptr) {
+        // Create vertex shader module if not cached before.
+        if (store->copyTextureForBrowser == nullptr) {
+            DAWN_TRY_ASSIGN(store->copyTextureForBrowser,
+                            utils::CreateShaderModule(device, sCopyTextureForBrowserShader));
         }
 
-        RenderPipelineBase* GetCachedPipeline(InternalPipelineStore* store,
-                                              wgpu::TextureFormat dstFormat) {
-            auto pipeline = store->copyTextureForBrowserPipelines.find(dstFormat);
-            if (pipeline != store->copyTextureForBrowserPipelines.end()) {
-                return pipeline->second.Get();
-            }
-            return nullptr;
-        }
+        ShaderModuleBase* shaderModule = store->copyTextureForBrowser.Get();
 
-        ResultOrError<RenderPipelineBase*> GetOrCreateCopyTextureForBrowserPipeline(
-            DeviceBase* device,
-            wgpu::TextureFormat dstFormat) {
-            InternalPipelineStore* store = device->GetInternalPipelineStore();
+        // Prepare vertex stage.
+        VertexState vertex = {};
+        vertex.module = shaderModule;
+        vertex.entryPoint = "vs_main";
 
-            if (GetCachedPipeline(store, dstFormat) == nullptr) {
-                // Create vertex shader module if not cached before.
-                if (store->copyTextureForBrowser == nullptr) {
-                    DAWN_TRY_ASSIGN(
-                        store->copyTextureForBrowser,
-                        utils::CreateShaderModule(device, sCopyTextureForBrowserShader));
-                }
+        // Prepare frgament stage.
+        FragmentState fragment = {};
+        fragment.module = shaderModule;
+        fragment.entryPoint = "fs_main";
 
-                ShaderModuleBase* shaderModule = store->copyTextureForBrowser.Get();
+        // Prepare color state.
+        ColorTargetState target = {};
+        target.format = dstFormat;
 
-                // Prepare vertex stage.
-                VertexState vertex = {};
-                vertex.module = shaderModule;
-                vertex.entryPoint = "vs_main";
+        // Create RenderPipeline.
+        RenderPipelineDescriptor renderPipelineDesc = {};
 
-                // Prepare frgament stage.
-                FragmentState fragment = {};
-                fragment.module = shaderModule;
-                fragment.entryPoint = "fs_main";
+        // Generate the layout based on shader modules.
+        renderPipelineDesc.layout = nullptr;
 
-                // Prepare color state.
-                ColorTargetState target = {};
-                target.format = dstFormat;
+        renderPipelineDesc.vertex = vertex;
+        renderPipelineDesc.fragment = &fragment;
 
-                // Create RenderPipeline.
-                RenderPipelineDescriptor renderPipelineDesc = {};
+        renderPipelineDesc.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
 
-                // Generate the layout based on shader modules.
-                renderPipelineDesc.layout = nullptr;
+        fragment.targetCount = 1;
+        fragment.targets = &target;
 
-                renderPipelineDesc.vertex = vertex;
-                renderPipelineDesc.fragment = &fragment;
+        Ref<RenderPipelineBase> pipeline;
+        DAWN_TRY_ASSIGN(pipeline, device->CreateRenderPipeline(&renderPipelineDesc));
+        store->copyTextureForBrowserPipelines.insert({dstFormat, std::move(pipeline)});
+    }
 
-                renderPipelineDesc.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
+    return GetCachedPipeline(store, dstFormat);
+}
+}  // anonymous namespace
 
-                fragment.targetCount = 1;
-                fragment.targets = &target;
+MaybeError ValidateCopyTextureForBrowser(DeviceBase* device,
+                                         const ImageCopyTexture* source,
+                                         const ImageCopyTexture* destination,
+                                         const Extent3D* copySize,
+                                         const CopyTextureForBrowserOptions* options) {
+    DAWN_TRY(device->ValidateObject(source->texture));
+    DAWN_TRY(device->ValidateObject(destination->texture));
 
-                Ref<RenderPipelineBase> pipeline;
-                DAWN_TRY_ASSIGN(pipeline, device->CreateRenderPipeline(&renderPipelineDesc));
-                store->copyTextureForBrowserPipelines.insert({dstFormat, std::move(pipeline)});
-            }
+    DAWN_INVALID_IF(source->texture->GetTextureState() == TextureBase::TextureState::Destroyed,
+                    "Source texture %s is destroyed.", source->texture);
 
-            return GetCachedPipeline(store, dstFormat);
-        }
-    }  // anonymous namespace
+    DAWN_INVALID_IF(destination->texture->GetTextureState() == TextureBase::TextureState::Destroyed,
+                    "Destination texture %s is destroyed.", destination->texture);
 
-    MaybeError ValidateCopyTextureForBrowser(DeviceBase* device,
-                                             const ImageCopyTexture* source,
-                                             const ImageCopyTexture* destination,
-                                             const Extent3D* copySize,
-                                             const CopyTextureForBrowserOptions* options) {
-        DAWN_TRY(device->ValidateObject(source->texture));
-        DAWN_TRY(device->ValidateObject(destination->texture));
+    DAWN_TRY_CONTEXT(ValidateImageCopyTexture(device, *source, *copySize),
+                     "validating the ImageCopyTexture for the source");
+    DAWN_TRY_CONTEXT(ValidateImageCopyTexture(device, *destination, *copySize),
+                     "validating the ImageCopyTexture for the destination");
 
-        DAWN_INVALID_IF(source->texture->GetTextureState() == TextureBase::TextureState::Destroyed,
-                        "Source texture %s is destroyed.", source->texture);
+    DAWN_TRY_CONTEXT(ValidateTextureCopyRange(device, *source, *copySize),
+                     "validating that the copy fits in the source");
+    DAWN_TRY_CONTEXT(ValidateTextureCopyRange(device, *destination, *copySize),
+                     "validating that the copy fits in the destination");
 
-        DAWN_INVALID_IF(
-            destination->texture->GetTextureState() == TextureBase::TextureState::Destroyed,
-            "Destination texture %s is destroyed.", destination->texture);
+    DAWN_TRY(ValidateTextureToTextureCopyCommonRestrictions(*source, *destination, *copySize));
 
-        DAWN_TRY_CONTEXT(ValidateImageCopyTexture(device, *source, *copySize),
-                         "validating the ImageCopyTexture for the source");
-        DAWN_TRY_CONTEXT(ValidateImageCopyTexture(device, *destination, *copySize),
-                         "validating the ImageCopyTexture for the destination");
+    DAWN_INVALID_IF(source->origin.z > 0, "Source has a non-zero z origin (%u).", source->origin.z);
+    DAWN_INVALID_IF(copySize->depthOrArrayLayers > 1, "Copy is for more than one array layer (%u)",
+                    copySize->depthOrArrayLayers);
 
-        DAWN_TRY_CONTEXT(ValidateTextureCopyRange(device, *source, *copySize),
-                         "validating that the copy fits in the source");
-        DAWN_TRY_CONTEXT(ValidateTextureCopyRange(device, *destination, *copySize),
-                         "validating that the copy fits in the destination");
+    DAWN_INVALID_IF(
+        source->texture->GetSampleCount() > 1 || destination->texture->GetSampleCount() > 1,
+        "The source texture sample count (%u) or the destination texture sample count (%u) is "
+        "not 1.",
+        source->texture->GetSampleCount(), destination->texture->GetSampleCount());
 
-        DAWN_TRY(ValidateTextureToTextureCopyCommonRestrictions(*source, *destination, *copySize));
+    DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc,
+                              UsageValidationMode::Default));
+    DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::TextureBinding,
+                              UsageValidationMode::Default));
 
-        DAWN_INVALID_IF(source->origin.z > 0, "Source has a non-zero z origin (%u).",
-                        source->origin.z);
-        DAWN_INVALID_IF(copySize->depthOrArrayLayers > 1,
-                        "Copy is for more than one array layer (%u)", copySize->depthOrArrayLayers);
+    DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
+                              UsageValidationMode::Default));
+    DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::RenderAttachment,
+                              UsageValidationMode::Default));
 
-        DAWN_INVALID_IF(
-            source->texture->GetSampleCount() > 1 || destination->texture->GetSampleCount() > 1,
-            "The source texture sample count (%u) or the destination texture sample count (%u) is "
-            "not 1.",
-            source->texture->GetSampleCount(), destination->texture->GetSampleCount());
+    DAWN_TRY(ValidateCopyTextureFormatConversion(source->texture->GetFormat().format,
+                                                 destination->texture->GetFormat().format));
 
-        DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::CopySrc,
-                                  UsageValidationMode::Default));
-        DAWN_TRY(ValidateCanUseAs(source->texture, wgpu::TextureUsage::TextureBinding,
-                                  UsageValidationMode::Default));
+    DAWN_INVALID_IF(options->nextInChain != nullptr, "nextInChain must be nullptr");
 
-        DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::CopyDst,
-                                  UsageValidationMode::Default));
-        DAWN_TRY(ValidateCanUseAs(destination->texture, wgpu::TextureUsage::RenderAttachment,
-                                  UsageValidationMode::Default));
+    DAWN_TRY(ValidateAlphaMode(options->srcAlphaMode));
+    DAWN_TRY(ValidateAlphaMode(options->dstAlphaMode));
 
-        DAWN_TRY(ValidateCopyTextureFormatConversion(source->texture->GetFormat().format,
-                                                     destination->texture->GetFormat().format));
+    if (options->needsColorSpaceConversion) {
+        DAWN_INVALID_IF(options->srcTransferFunctionParameters == nullptr,
+                        "srcTransferFunctionParameters is nullptr when doing color conversion");
+        DAWN_INVALID_IF(options->conversionMatrix == nullptr,
+                        "conversionMatrix is nullptr when doing color conversion");
+        DAWN_INVALID_IF(options->dstTransferFunctionParameters == nullptr,
+                        "dstTransferFunctionParameters is nullptr when doing color conversion");
+    }
+    return {};
+}
 
-        DAWN_INVALID_IF(options->nextInChain != nullptr, "nextInChain must be nullptr");
+// Whether the format of dst texture of CopyTextureForBrowser() is srgb or non-srgb.
+bool IsSrgbDstFormat(wgpu::TextureFormat format) {
+    switch (format) {
+        case wgpu::TextureFormat::RGBA8UnormSrgb:
+        case wgpu::TextureFormat::BGRA8UnormSrgb:
+            return true;
+        default:
+            return false;
+    }
+}
 
-        DAWN_TRY(ValidateAlphaMode(options->srcAlphaMode));
-        DAWN_TRY(ValidateAlphaMode(options->dstAlphaMode));
+MaybeError DoCopyTextureForBrowser(DeviceBase* device,
+                                   const ImageCopyTexture* source,
+                                   const ImageCopyTexture* destination,
+                                   const Extent3D* copySize,
+                                   const CopyTextureForBrowserOptions* options) {
+    // TODO(crbug.com/dawn/856): In D3D12 and Vulkan, compatible texture format can directly
+    // copy to each other. This can be a potential fast path.
 
-        if (options->needsColorSpaceConversion) {
-            DAWN_INVALID_IF(options->srcTransferFunctionParameters == nullptr,
-                            "srcTransferFunctionParameters is nullptr when doing color conversion");
-            DAWN_INVALID_IF(options->conversionMatrix == nullptr,
-                            "conversionMatrix is nullptr when doing color conversion");
-            DAWN_INVALID_IF(options->dstTransferFunctionParameters == nullptr,
-                            "dstTransferFunctionParameters is nullptr when doing color conversion");
-        }
+    // Noop copy
+    if (copySize->width == 0 || copySize->height == 0 || copySize->depthOrArrayLayers == 0) {
         return {};
     }
 
-    // Whether the format of dst texture of CopyTextureForBrowser() is srgb or non-srgb.
-    bool IsSrgbDstFormat(wgpu::TextureFormat format) {
-        switch (format) {
-            case wgpu::TextureFormat::RGBA8UnormSrgb:
-            case wgpu::TextureFormat::BGRA8UnormSrgb:
-                return true;
-            default:
-                return false;
+    bool isSrgbDstFormat = IsSrgbDstFormat(destination->texture->GetFormat().format);
+    RenderPipelineBase* pipeline;
+    DAWN_TRY_ASSIGN(pipeline, GetOrCreateCopyTextureForBrowserPipeline(
+                                  device, destination->texture->GetFormat().format));
+
+    // Prepare bind group layout.
+    Ref<BindGroupLayoutBase> layout;
+    DAWN_TRY_ASSIGN(layout, pipeline->GetBindGroupLayout(0));
+
+    Extent3D srcTextureSize = source->texture->GetSize();
+
+    // Prepare binding 0 resource: uniform buffer.
+    Uniform uniformData = {
+        copySize->width / static_cast<float>(srcTextureSize.width),
+        copySize->height / static_cast<float>(srcTextureSize.height),  // scale
+        source->origin.x / static_cast<float>(srcTextureSize.width),
+        source->origin.y / static_cast<float>(srcTextureSize.height)  // offset
+    };
+
+    // Handle flipY. FlipY here means we flip the source texture firstly and then
+    // do copy. This helps on the case which source texture is flipped and the copy
+    // need to unpack the flip.
+    if (options->flipY) {
+        uniformData.scaleY *= -1.0;
+        uniformData.offsetY += copySize->height / static_cast<float>(srcTextureSize.height);
+    }
+
+    uint32_t stepsMask = 0u;
+
+    // Steps to do color space conversion
+    // From https://skia.org/docs/user/color/
+    // - unpremultiply if the source color is premultiplied; Alpha is not involved in color
+    // management, and we need to divide it out if it’s multiplied in.
+    // - linearize the source color using the source color space’s transfer function
+    // - convert those unpremultiplied, linear source colors to XYZ D50 gamut by multiplying by
+    // a 3x3 matrix.
+    // - convert those XYZ D50 colors to the destination gamut by multiplying by a 3x3 matrix.
+    // - encode that color using the inverse of the destination color space’s transfer function.
+    // - premultiply by alpha if the destination is premultiplied.
+    // The reason to choose XYZ D50 as intermediate color space:
+    // From http://www.brucelindbloom.com/index.html?WorkingSpaceInfo.html
+    // "Since the Lab TIFF specification, the ICC profile specification and
+    // Adobe Photoshop all use a D50"
+    constexpr uint32_t kUnpremultiplyStep = 0x01;
+    constexpr uint32_t kDecodeToLinearStep = 0x02;
+    constexpr uint32_t kConvertToDstGamutStep = 0x04;
+    constexpr uint32_t kEncodeToGammaStep = 0x08;
+    constexpr uint32_t kPremultiplyStep = 0x10;
+    constexpr uint32_t kDecodeForSrgbDstFormat = 0x20;
+
+    if (options->srcAlphaMode == wgpu::AlphaMode::Premultiplied) {
+        if (options->needsColorSpaceConversion || options->srcAlphaMode != options->dstAlphaMode) {
+            stepsMask |= kUnpremultiplyStep;
         }
     }
 
-    MaybeError DoCopyTextureForBrowser(DeviceBase* device,
-                                       const ImageCopyTexture* source,
-                                       const ImageCopyTexture* destination,
-                                       const Extent3D* copySize,
-                                       const CopyTextureForBrowserOptions* options) {
-        // TODO(crbug.com/dawn/856): In D3D12 and Vulkan, compatible texture format can directly
-        // copy to each other. This can be a potential fast path.
+    if (options->needsColorSpaceConversion) {
+        stepsMask |= kDecodeToLinearStep;
+        const float* decodingParams = options->srcTransferFunctionParameters;
 
-        // Noop copy
-        if (copySize->width == 0 || copySize->height == 0 || copySize->depthOrArrayLayers == 0) {
-            return {};
-        }
+        uniformData.gammaDecodingParams = {decodingParams[0], decodingParams[1], decodingParams[2],
+                                           decodingParams[3], decodingParams[4], decodingParams[5],
+                                           decodingParams[6]};
 
-        bool isSrgbDstFormat = IsSrgbDstFormat(destination->texture->GetFormat().format);
-        RenderPipelineBase* pipeline;
-        DAWN_TRY_ASSIGN(pipeline, GetOrCreateCopyTextureForBrowserPipeline(
-                                      device, destination->texture->GetFormat().format));
+        stepsMask |= kConvertToDstGamutStep;
+        const float* matrix = options->conversionMatrix;
+        uniformData.conversionMatrix = {{
+            matrix[0],
+            matrix[1],
+            matrix[2],
+            0.0,
+            matrix[3],
+            matrix[4],
+            matrix[5],
+            0.0,
+            matrix[6],
+            matrix[7],
+            matrix[8],
+            0.0,
+        }};
 
-        // Prepare bind group layout.
-        Ref<BindGroupLayoutBase> layout;
-        DAWN_TRY_ASSIGN(layout, pipeline->GetBindGroupLayout(0));
+        stepsMask |= kEncodeToGammaStep;
+        const float* encodingParams = options->dstTransferFunctionParameters;
 
-        Extent3D srcTextureSize = source->texture->GetSize();
-
-        // Prepare binding 0 resource: uniform buffer.
-        Uniform uniformData = {
-            copySize->width / static_cast<float>(srcTextureSize.width),
-            copySize->height / static_cast<float>(srcTextureSize.height),  // scale
-            source->origin.x / static_cast<float>(srcTextureSize.width),
-            source->origin.y / static_cast<float>(srcTextureSize.height)  // offset
-        };
-
-        // Handle flipY. FlipY here means we flip the source texture firstly and then
-        // do copy. This helps on the case which source texture is flipped and the copy
-        // need to unpack the flip.
-        if (options->flipY) {
-            uniformData.scaleY *= -1.0;
-            uniformData.offsetY += copySize->height / static_cast<float>(srcTextureSize.height);
-        }
-
-        uint32_t stepsMask = 0u;
-
-        // Steps to do color space conversion
-        // From https://skia.org/docs/user/color/
-        // - unpremultiply if the source color is premultiplied; Alpha is not involved in color
-        // management, and we need to divide it out if it’s multiplied in.
-        // - linearize the source color using the source color space’s transfer function
-        // - convert those unpremultiplied, linear source colors to XYZ D50 gamut by multiplying by
-        // a 3x3 matrix.
-        // - convert those XYZ D50 colors to the destination gamut by multiplying by a 3x3 matrix.
-        // - encode that color using the inverse of the destination color space’s transfer function.
-        // - premultiply by alpha if the destination is premultiplied.
-        // The reason to choose XYZ D50 as intermediate color space:
-        // From http://www.brucelindbloom.com/index.html?WorkingSpaceInfo.html
-        // "Since the Lab TIFF specification, the ICC profile specification and
-        // Adobe Photoshop all use a D50"
-        constexpr uint32_t kUnpremultiplyStep = 0x01;
-        constexpr uint32_t kDecodeToLinearStep = 0x02;
-        constexpr uint32_t kConvertToDstGamutStep = 0x04;
-        constexpr uint32_t kEncodeToGammaStep = 0x08;
-        constexpr uint32_t kPremultiplyStep = 0x10;
-        constexpr uint32_t kDecodeForSrgbDstFormat = 0x20;
-
-        if (options->srcAlphaMode == wgpu::AlphaMode::Premultiplied) {
-            if (options->needsColorSpaceConversion ||
-                options->srcAlphaMode != options->dstAlphaMode) {
-                stepsMask |= kUnpremultiplyStep;
-            }
-        }
-
-        if (options->needsColorSpaceConversion) {
-            stepsMask |= kDecodeToLinearStep;
-            const float* decodingParams = options->srcTransferFunctionParameters;
-
-            uniformData.gammaDecodingParams = {
-                decodingParams[0], decodingParams[1], decodingParams[2], decodingParams[3],
-                decodingParams[4], decodingParams[5], decodingParams[6]};
-
-            stepsMask |= kConvertToDstGamutStep;
-            const float* matrix = options->conversionMatrix;
-            uniformData.conversionMatrix = {{
-                matrix[0],
-                matrix[1],
-                matrix[2],
-                0.0,
-                matrix[3],
-                matrix[4],
-                matrix[5],
-                0.0,
-                matrix[6],
-                matrix[7],
-                matrix[8],
-                0.0,
-            }};
-
-            stepsMask |= kEncodeToGammaStep;
-            const float* encodingParams = options->dstTransferFunctionParameters;
-
-            uniformData.gammaEncodingParams = {
-                encodingParams[0], encodingParams[1], encodingParams[2], encodingParams[3],
-                encodingParams[4], encodingParams[5], encodingParams[6]};
-        }
-
-        if (options->dstAlphaMode == wgpu::AlphaMode::Premultiplied) {
-            if (options->needsColorSpaceConversion ||
-                options->srcAlphaMode != options->dstAlphaMode) {
-                stepsMask |= kPremultiplyStep;
-            }
-        }
-
-        // Copy to *-srgb texture should keep the bytes exactly the same as copy
-        // to non-srgb texture. Add an extra decode-to-linear step so that after the
-        // sampler of *-srgb format texture applying encoding, the bytes keeps the same
-        // as non-srgb format texture.
-        // NOTE: CopyTextureForBrowser() doesn't need to accept *-srgb format texture as
-        // source input. But above operation also valid for *-srgb format texture input and
-        // non-srgb format dst texture.
-        // TODO(crbug.com/dawn/1195): Reinterpret to non-srgb texture view on *-srgb texture
-        // and use it as render attachment when possible.
-        // TODO(crbug.com/dawn/1195): Opt the condition for this extra step. It is possible to
-        // bypass this extra step in some cases.
-        if (isSrgbDstFormat) {
-            stepsMask |= kDecodeForSrgbDstFormat;
-            // Get gamma-linear conversion params from https://en.wikipedia.org/wiki/SRGB with some
-            // mathematics. Order: {G, A, B, C, D, E, F, }
-            uniformData.gammaDecodingForDstSrgbParams = {
-                2.4, 1.0 / 1.055, 0.055 / 1.055, 1.0 / 12.92, 4.045e-02, 0.0, 0.0};
-        }
-
-        uniformData.stepsMask = stepsMask;
-
-        Ref<BufferBase> uniformBuffer;
-        DAWN_TRY_ASSIGN(
-            uniformBuffer,
-            utils::CreateBufferFromData(
-                device, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform, {uniformData}));
-
-        // Prepare binding 1 resource: sampler
-        // Use default configuration, filterMode set to Nearest for min and mag.
-        SamplerDescriptor samplerDesc = {};
-        Ref<SamplerBase> sampler;
-        DAWN_TRY_ASSIGN(sampler, device->CreateSampler(&samplerDesc));
-
-        // Prepare binding 2 resource: sampled texture
-        TextureViewDescriptor srcTextureViewDesc = {};
-        srcTextureViewDesc.baseMipLevel = source->mipLevel;
-        srcTextureViewDesc.mipLevelCount = 1;
-        srcTextureViewDesc.arrayLayerCount = 1;
-        Ref<TextureViewBase> srcTextureView;
-        DAWN_TRY_ASSIGN(srcTextureView,
-                        device->CreateTextureView(source->texture, &srcTextureViewDesc));
-
-        // Create bind group after all binding entries are set.
-        Ref<BindGroupBase> bindGroup;
-        DAWN_TRY_ASSIGN(bindGroup, utils::MakeBindGroup(
-                                       device, layout,
-                                       {{0, uniformBuffer}, {1, sampler}, {2, srcTextureView}}));
-
-        // Create command encoder.
-        Ref<CommandEncoder> encoder;
-        DAWN_TRY_ASSIGN(encoder, device->CreateCommandEncoder());
-
-        // Prepare dst texture view as color Attachment.
-        TextureViewDescriptor dstTextureViewDesc;
-        dstTextureViewDesc.baseMipLevel = destination->mipLevel;
-        dstTextureViewDesc.mipLevelCount = 1;
-        dstTextureViewDesc.baseArrayLayer = destination->origin.z;
-        dstTextureViewDesc.arrayLayerCount = 1;
-        Ref<TextureViewBase> dstView;
-
-        DAWN_TRY_ASSIGN(dstView,
-                        device->CreateTextureView(destination->texture, &dstTextureViewDesc));
-        // Prepare render pass color attachment descriptor.
-        RenderPassColorAttachment colorAttachmentDesc;
-
-        colorAttachmentDesc.view = dstView.Get();
-        colorAttachmentDesc.loadOp = wgpu::LoadOp::Load;
-        colorAttachmentDesc.storeOp = wgpu::StoreOp::Store;
-        colorAttachmentDesc.clearValue = {0.0, 0.0, 0.0, 1.0};
-
-        // Create render pass.
-        RenderPassDescriptor renderPassDesc;
-        renderPassDesc.colorAttachmentCount = 1;
-        renderPassDesc.colorAttachments = &colorAttachmentDesc;
-        Ref<RenderPassEncoder> passEncoder = encoder->BeginRenderPass(&renderPassDesc);
-
-        // Start pipeline  and encode commands to complete
-        // the copy from src texture to dst texture with transformation.
-        passEncoder->APISetPipeline(pipeline);
-        passEncoder->APISetBindGroup(0, bindGroup.Get());
-        passEncoder->APISetViewport(destination->origin.x, destination->origin.y, copySize->width,
-                                    copySize->height, 0.0, 1.0);
-        passEncoder->APIDraw(3);
-        passEncoder->APIEnd();
-
-        // Finsh encoding.
-        Ref<CommandBufferBase> commandBuffer;
-        DAWN_TRY_ASSIGN(commandBuffer, encoder->Finish());
-        CommandBufferBase* submitCommandBuffer = commandBuffer.Get();
-
-        // Submit command buffer.
-        device->GetQueue()->APISubmit(1, &submitCommandBuffer);
-        return {};
+        uniformData.gammaEncodingParams = {encodingParams[0], encodingParams[1], encodingParams[2],
+                                           encodingParams[3], encodingParams[4], encodingParams[5],
+                                           encodingParams[6]};
     }
 
+    if (options->dstAlphaMode == wgpu::AlphaMode::Premultiplied) {
+        if (options->needsColorSpaceConversion || options->srcAlphaMode != options->dstAlphaMode) {
+            stepsMask |= kPremultiplyStep;
+        }
+    }
+
+    // Copy to *-srgb texture should keep the bytes exactly the same as copy
+    // to non-srgb texture. Add an extra decode-to-linear step so that after the
+    // sampler of *-srgb format texture applying encoding, the bytes keeps the same
+    // as non-srgb format texture.
+    // NOTE: CopyTextureForBrowser() doesn't need to accept *-srgb format texture as
+    // source input. But above operation also valid for *-srgb format texture input and
+    // non-srgb format dst texture.
+    // TODO(crbug.com/dawn/1195): Reinterpret to non-srgb texture view on *-srgb texture
+    // and use it as render attachment when possible.
+    // TODO(crbug.com/dawn/1195): Opt the condition for this extra step. It is possible to
+    // bypass this extra step in some cases.
+    if (isSrgbDstFormat) {
+        stepsMask |= kDecodeForSrgbDstFormat;
+        // Get gamma-linear conversion params from https://en.wikipedia.org/wiki/SRGB with some
+        // mathematics. Order: {G, A, B, C, D, E, F, }
+        uniformData.gammaDecodingForDstSrgbParams = {
+            2.4, 1.0 / 1.055, 0.055 / 1.055, 1.0 / 12.92, 4.045e-02, 0.0, 0.0};
+    }
+
+    uniformData.stepsMask = stepsMask;
+
+    Ref<BufferBase> uniformBuffer;
+    DAWN_TRY_ASSIGN(
+        uniformBuffer,
+        utils::CreateBufferFromData(device, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Uniform,
+                                    {uniformData}));
+
+    // Prepare binding 1 resource: sampler
+    // Use default configuration, filterMode set to Nearest for min and mag.
+    SamplerDescriptor samplerDesc = {};
+    Ref<SamplerBase> sampler;
+    DAWN_TRY_ASSIGN(sampler, device->CreateSampler(&samplerDesc));
+
+    // Prepare binding 2 resource: sampled texture
+    TextureViewDescriptor srcTextureViewDesc = {};
+    srcTextureViewDesc.baseMipLevel = source->mipLevel;
+    srcTextureViewDesc.mipLevelCount = 1;
+    srcTextureViewDesc.arrayLayerCount = 1;
+    Ref<TextureViewBase> srcTextureView;
+    DAWN_TRY_ASSIGN(srcTextureView,
+                    device->CreateTextureView(source->texture, &srcTextureViewDesc));
+
+    // Create bind group after all binding entries are set.
+    Ref<BindGroupBase> bindGroup;
+    DAWN_TRY_ASSIGN(bindGroup,
+                    utils::MakeBindGroup(device, layout,
+                                         {{0, uniformBuffer}, {1, sampler}, {2, srcTextureView}}));
+
+    // Create command encoder.
+    Ref<CommandEncoder> encoder;
+    DAWN_TRY_ASSIGN(encoder, device->CreateCommandEncoder());
+
+    // Prepare dst texture view as color Attachment.
+    TextureViewDescriptor dstTextureViewDesc;
+    dstTextureViewDesc.baseMipLevel = destination->mipLevel;
+    dstTextureViewDesc.mipLevelCount = 1;
+    dstTextureViewDesc.baseArrayLayer = destination->origin.z;
+    dstTextureViewDesc.arrayLayerCount = 1;
+    Ref<TextureViewBase> dstView;
+
+    DAWN_TRY_ASSIGN(dstView, device->CreateTextureView(destination->texture, &dstTextureViewDesc));
+    // Prepare render pass color attachment descriptor.
+    RenderPassColorAttachment colorAttachmentDesc;
+
+    colorAttachmentDesc.view = dstView.Get();
+    colorAttachmentDesc.loadOp = wgpu::LoadOp::Load;
+    colorAttachmentDesc.storeOp = wgpu::StoreOp::Store;
+    colorAttachmentDesc.clearValue = {0.0, 0.0, 0.0, 1.0};
+
+    // Create render pass.
+    RenderPassDescriptor renderPassDesc;
+    renderPassDesc.colorAttachmentCount = 1;
+    renderPassDesc.colorAttachments = &colorAttachmentDesc;
+    Ref<RenderPassEncoder> passEncoder = encoder->BeginRenderPass(&renderPassDesc);
+
+    // Start pipeline  and encode commands to complete
+    // the copy from src texture to dst texture with transformation.
+    passEncoder->APISetPipeline(pipeline);
+    passEncoder->APISetBindGroup(0, bindGroup.Get());
+    passEncoder->APISetViewport(destination->origin.x, destination->origin.y, copySize->width,
+                                copySize->height, 0.0, 1.0);
+    passEncoder->APIDraw(3);
+    passEncoder->APIEnd();
+
+    // Finsh encoding.
+    Ref<CommandBufferBase> commandBuffer;
+    DAWN_TRY_ASSIGN(commandBuffer, encoder->Finish());
+    CommandBufferBase* submitCommandBuffer = commandBuffer.Get();
+
+    // Submit command buffer.
+    device->GetQueue()->APISubmit(1, &submitCommandBuffer);
+    return {};
+}
+
 }  // namespace dawn::native
diff --git a/src/dawn/native/CopyTextureForBrowserHelper.h b/src/dawn/native/CopyTextureForBrowserHelper.h
index 86e3135..0e427ba 100644
--- a/src/dawn/native/CopyTextureForBrowserHelper.h
+++ b/src/dawn/native/CopyTextureForBrowserHelper.h
@@ -19,22 +19,22 @@
 #include "dawn/native/ObjectBase.h"
 
 namespace dawn::native {
-    class DeviceBase;
-    struct Extent3D;
-    struct ImageCopyTexture;
-    struct CopyTextureForBrowserOptions;
+class DeviceBase;
+struct Extent3D;
+struct ImageCopyTexture;
+struct CopyTextureForBrowserOptions;
 
-    MaybeError ValidateCopyTextureForBrowser(DeviceBase* device,
-                                             const ImageCopyTexture* source,
-                                             const ImageCopyTexture* destination,
-                                             const Extent3D* copySize,
-                                             const CopyTextureForBrowserOptions* options);
+MaybeError ValidateCopyTextureForBrowser(DeviceBase* device,
+                                         const ImageCopyTexture* source,
+                                         const ImageCopyTexture* destination,
+                                         const Extent3D* copySize,
+                                         const CopyTextureForBrowserOptions* options);
 
-    MaybeError DoCopyTextureForBrowser(DeviceBase* device,
-                                       const ImageCopyTexture* source,
-                                       const ImageCopyTexture* destination,
-                                       const Extent3D* copySize,
-                                       const CopyTextureForBrowserOptions* options);
+MaybeError DoCopyTextureForBrowser(DeviceBase* device,
+                                   const ImageCopyTexture* source,
+                                   const ImageCopyTexture* destination,
+                                   const Extent3D* copySize,
+                                   const CopyTextureForBrowserOptions* options);
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/CreatePipelineAsyncTask.cpp b/src/dawn/native/CreatePipelineAsyncTask.cpp
index 8a91ccd..9ecd048 100644
--- a/src/dawn/native/CreatePipelineAsyncTask.cpp
+++ b/src/dawn/native/CreatePipelineAsyncTask.cpp
@@ -26,183 +26,175 @@
 
 namespace dawn::native {
 
-    CreatePipelineAsyncCallbackTaskBase::CreatePipelineAsyncCallbackTaskBase(
-        std::string errorMessage,
-        void* userdata)
-        : mErrorMessage(errorMessage), mUserData(userdata) {
+CreatePipelineAsyncCallbackTaskBase::CreatePipelineAsyncCallbackTaskBase(std::string errorMessage,
+                                                                         void* userdata)
+    : mErrorMessage(errorMessage), mUserData(userdata) {}
+
+CreateComputePipelineAsyncCallbackTask::CreateComputePipelineAsyncCallbackTask(
+    Ref<ComputePipelineBase> pipeline,
+    std::string errorMessage,
+    WGPUCreateComputePipelineAsyncCallback callback,
+    void* userdata)
+    : CreatePipelineAsyncCallbackTaskBase(errorMessage, userdata),
+      mPipeline(std::move(pipeline)),
+      mCreateComputePipelineAsyncCallback(callback) {}
+
+void CreateComputePipelineAsyncCallbackTask::Finish() {
+    ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
+
+    if (mPipeline.Get() != nullptr) {
+        mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Success,
+                                            ToAPI(mPipeline.Detach()), "", mUserData);
+    } else {
+        mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Error, nullptr,
+                                            mErrorMessage.c_str(), mUserData);
+    }
+}
+
+void CreateComputePipelineAsyncCallbackTask::HandleShutDown() {
+    ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
+
+    mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
+                                        "Device destroyed before callback", mUserData);
+}
+
+void CreateComputePipelineAsyncCallbackTask::HandleDeviceLoss() {
+    ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
+
+    mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
+                                        "Device lost before callback", mUserData);
+}
+
+CreateRenderPipelineAsyncCallbackTask::CreateRenderPipelineAsyncCallbackTask(
+    Ref<RenderPipelineBase> pipeline,
+    std::string errorMessage,
+    WGPUCreateRenderPipelineAsyncCallback callback,
+    void* userdata)
+    : CreatePipelineAsyncCallbackTaskBase(errorMessage, userdata),
+      mPipeline(std::move(pipeline)),
+      mCreateRenderPipelineAsyncCallback(callback) {}
+
+void CreateRenderPipelineAsyncCallbackTask::Finish() {
+    ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
+
+    if (mPipeline.Get() != nullptr) {
+        mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Success,
+                                           ToAPI(mPipeline.Detach()), "", mUserData);
+    } else {
+        mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Error, nullptr,
+                                           mErrorMessage.c_str(), mUserData);
+    }
+}
+
+void CreateRenderPipelineAsyncCallbackTask::HandleShutDown() {
+    ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
+
+    mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
+                                       "Device destroyed before callback", mUserData);
+}
+
+void CreateRenderPipelineAsyncCallbackTask::HandleDeviceLoss() {
+    ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
+
+    mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
+                                       "Device lost before callback", mUserData);
+}
+
+CreateComputePipelineAsyncTask::CreateComputePipelineAsyncTask(
+    Ref<ComputePipelineBase> nonInitializedComputePipeline,
+    WGPUCreateComputePipelineAsyncCallback callback,
+    void* userdata)
+    : mComputePipeline(std::move(nonInitializedComputePipeline)),
+      mCallback(callback),
+      mUserdata(userdata) {
+    ASSERT(mComputePipeline != nullptr);
+}
+
+void CreateComputePipelineAsyncTask::Run() {
+    const char* eventLabel = utils::GetLabelForTrace(mComputePipeline->GetLabel().c_str());
+
+    DeviceBase* device = mComputePipeline->GetDevice();
+    TRACE_EVENT_FLOW_END1(device->GetPlatform(), General,
+                          "CreateComputePipelineAsyncTask::RunAsync", this, "label", eventLabel);
+    TRACE_EVENT1(device->GetPlatform(), General, "CreateComputePipelineAsyncTask::Run", "label",
+                 eventLabel);
+
+    MaybeError maybeError = mComputePipeline->Initialize();
+    std::string errorMessage;
+    if (maybeError.IsError()) {
+        mComputePipeline = nullptr;
+        errorMessage = maybeError.AcquireError()->GetMessage();
     }
 
-    CreateComputePipelineAsyncCallbackTask::CreateComputePipelineAsyncCallbackTask(
-        Ref<ComputePipelineBase> pipeline,
-        std::string errorMessage,
-        WGPUCreateComputePipelineAsyncCallback callback,
-        void* userdata)
-        : CreatePipelineAsyncCallbackTaskBase(errorMessage, userdata),
-          mPipeline(std::move(pipeline)),
-          mCreateComputePipelineAsyncCallback(callback) {
+    device->AddComputePipelineAsyncCallbackTask(mComputePipeline, errorMessage, mCallback,
+                                                mUserdata);
+}
+
+void CreateComputePipelineAsyncTask::RunAsync(
+    std::unique_ptr<CreateComputePipelineAsyncTask> task) {
+    DeviceBase* device = task->mComputePipeline->GetDevice();
+
+    const char* eventLabel = utils::GetLabelForTrace(task->mComputePipeline->GetLabel().c_str());
+
+    // Using "taskPtr = std::move(task)" causes compilation error while it should be supported
+    // since C++14:
+    // https://docs.microsoft.com/en-us/cpp/cpp/lambda-expressions-in-cpp?view=msvc-160
+    auto asyncTask = [taskPtr = task.release()] {
+        std::unique_ptr<CreateComputePipelineAsyncTask> innnerTaskPtr(taskPtr);
+        innnerTaskPtr->Run();
+    };
+
+    TRACE_EVENT_FLOW_BEGIN1(device->GetPlatform(), General,
+                            "CreateComputePipelineAsyncTask::RunAsync", task.get(), "label",
+                            eventLabel);
+    device->GetAsyncTaskManager()->PostTask(std::move(asyncTask));
+}
+
+CreateRenderPipelineAsyncTask::CreateRenderPipelineAsyncTask(
+    Ref<RenderPipelineBase> nonInitializedRenderPipeline,
+    WGPUCreateRenderPipelineAsyncCallback callback,
+    void* userdata)
+    : mRenderPipeline(std::move(nonInitializedRenderPipeline)),
+      mCallback(callback),
+      mUserdata(userdata) {
+    ASSERT(mRenderPipeline != nullptr);
+}
+
+void CreateRenderPipelineAsyncTask::Run() {
+    const char* eventLabel = utils::GetLabelForTrace(mRenderPipeline->GetLabel().c_str());
+
+    DeviceBase* device = mRenderPipeline->GetDevice();
+    TRACE_EVENT_FLOW_END1(device->GetPlatform(), General, "CreateRenderPipelineAsyncTask::RunAsync",
+                          this, "label", eventLabel);
+    TRACE_EVENT1(device->GetPlatform(), General, "CreateRenderPipelineAsyncTask::Run", "label",
+                 eventLabel);
+
+    MaybeError maybeError = mRenderPipeline->Initialize();
+    std::string errorMessage;
+    if (maybeError.IsError()) {
+        mRenderPipeline = nullptr;
+        errorMessage = maybeError.AcquireError()->GetMessage();
     }
 
-    void CreateComputePipelineAsyncCallbackTask::Finish() {
-        ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
+    device->AddRenderPipelineAsyncCallbackTask(mRenderPipeline, errorMessage, mCallback, mUserdata);
+}
 
-        if (mPipeline.Get() != nullptr) {
-            mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Success,
-                                                ToAPI(mPipeline.Detach()), "", mUserData);
-        } else {
-            mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Error, nullptr,
-                                                mErrorMessage.c_str(), mUserData);
-        }
-    }
+void CreateRenderPipelineAsyncTask::RunAsync(std::unique_ptr<CreateRenderPipelineAsyncTask> task) {
+    DeviceBase* device = task->mRenderPipeline->GetDevice();
 
-    void CreateComputePipelineAsyncCallbackTask::HandleShutDown() {
-        ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
+    const char* eventLabel = utils::GetLabelForTrace(task->mRenderPipeline->GetLabel().c_str());
 
-        mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
-                                            "Device destroyed before callback", mUserData);
-    }
+    // Using "taskPtr = std::move(task)" causes compilation error while it should be supported
+    // since C++14:
+    // https://docs.microsoft.com/en-us/cpp/cpp/lambda-expressions-in-cpp?view=msvc-160
+    auto asyncTask = [taskPtr = task.release()] {
+        std::unique_ptr<CreateRenderPipelineAsyncTask> innerTaskPtr(taskPtr);
+        innerTaskPtr->Run();
+    };
 
-    void CreateComputePipelineAsyncCallbackTask::HandleDeviceLoss() {
-        ASSERT(mCreateComputePipelineAsyncCallback != nullptr);
-
-        mCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
-                                            "Device lost before callback", mUserData);
-    }
-
-    CreateRenderPipelineAsyncCallbackTask::CreateRenderPipelineAsyncCallbackTask(
-        Ref<RenderPipelineBase> pipeline,
-        std::string errorMessage,
-        WGPUCreateRenderPipelineAsyncCallback callback,
-        void* userdata)
-        : CreatePipelineAsyncCallbackTaskBase(errorMessage, userdata),
-          mPipeline(std::move(pipeline)),
-          mCreateRenderPipelineAsyncCallback(callback) {
-    }
-
-    void CreateRenderPipelineAsyncCallbackTask::Finish() {
-        ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
-
-        if (mPipeline.Get() != nullptr) {
-            mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Success,
-                                               ToAPI(mPipeline.Detach()), "", mUserData);
-        } else {
-            mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_Error, nullptr,
-                                               mErrorMessage.c_str(), mUserData);
-        }
-    }
-
-    void CreateRenderPipelineAsyncCallbackTask::HandleShutDown() {
-        ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
-
-        mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
-                                           "Device destroyed before callback", mUserData);
-    }
-
-    void CreateRenderPipelineAsyncCallbackTask::HandleDeviceLoss() {
-        ASSERT(mCreateRenderPipelineAsyncCallback != nullptr);
-
-        mCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
-                                           "Device lost before callback", mUserData);
-    }
-
-    CreateComputePipelineAsyncTask::CreateComputePipelineAsyncTask(
-        Ref<ComputePipelineBase> nonInitializedComputePipeline,
-        WGPUCreateComputePipelineAsyncCallback callback,
-        void* userdata)
-        : mComputePipeline(std::move(nonInitializedComputePipeline)),
-          mCallback(callback),
-          mUserdata(userdata) {
-        ASSERT(mComputePipeline != nullptr);
-    }
-
-    void CreateComputePipelineAsyncTask::Run() {
-        const char* eventLabel = utils::GetLabelForTrace(mComputePipeline->GetLabel().c_str());
-
-        DeviceBase* device = mComputePipeline->GetDevice();
-        TRACE_EVENT_FLOW_END1(device->GetPlatform(), General,
-                              "CreateComputePipelineAsyncTask::RunAsync", this, "label",
-                              eventLabel);
-        TRACE_EVENT1(device->GetPlatform(), General, "CreateComputePipelineAsyncTask::Run", "label",
-                     eventLabel);
-
-        MaybeError maybeError = mComputePipeline->Initialize();
-        std::string errorMessage;
-        if (maybeError.IsError()) {
-            mComputePipeline = nullptr;
-            errorMessage = maybeError.AcquireError()->GetMessage();
-        }
-
-        device->AddComputePipelineAsyncCallbackTask(mComputePipeline, errorMessage, mCallback,
-                                                    mUserdata);
-    }
-
-    void CreateComputePipelineAsyncTask::RunAsync(
-        std::unique_ptr<CreateComputePipelineAsyncTask> task) {
-        DeviceBase* device = task->mComputePipeline->GetDevice();
-
-        const char* eventLabel =
-            utils::GetLabelForTrace(task->mComputePipeline->GetLabel().c_str());
-
-        // Using "taskPtr = std::move(task)" causes compilation error while it should be supported
-        // since C++14:
-        // https://docs.microsoft.com/en-us/cpp/cpp/lambda-expressions-in-cpp?view=msvc-160
-        auto asyncTask = [taskPtr = task.release()] {
-            std::unique_ptr<CreateComputePipelineAsyncTask> innnerTaskPtr(taskPtr);
-            innnerTaskPtr->Run();
-        };
-
-        TRACE_EVENT_FLOW_BEGIN1(device->GetPlatform(), General,
-                                "CreateComputePipelineAsyncTask::RunAsync", task.get(), "label",
-                                eventLabel);
-        device->GetAsyncTaskManager()->PostTask(std::move(asyncTask));
-    }
-
-    CreateRenderPipelineAsyncTask::CreateRenderPipelineAsyncTask(
-        Ref<RenderPipelineBase> nonInitializedRenderPipeline,
-        WGPUCreateRenderPipelineAsyncCallback callback,
-        void* userdata)
-        : mRenderPipeline(std::move(nonInitializedRenderPipeline)),
-          mCallback(callback),
-          mUserdata(userdata) {
-        ASSERT(mRenderPipeline != nullptr);
-    }
-
-    void CreateRenderPipelineAsyncTask::Run() {
-        const char* eventLabel = utils::GetLabelForTrace(mRenderPipeline->GetLabel().c_str());
-
-        DeviceBase* device = mRenderPipeline->GetDevice();
-        TRACE_EVENT_FLOW_END1(device->GetPlatform(), General,
-                              "CreateRenderPipelineAsyncTask::RunAsync", this, "label", eventLabel);
-        TRACE_EVENT1(device->GetPlatform(), General, "CreateRenderPipelineAsyncTask::Run", "label",
-                     eventLabel);
-
-        MaybeError maybeError = mRenderPipeline->Initialize();
-        std::string errorMessage;
-        if (maybeError.IsError()) {
-            mRenderPipeline = nullptr;
-            errorMessage = maybeError.AcquireError()->GetMessage();
-        }
-
-        device->AddRenderPipelineAsyncCallbackTask(mRenderPipeline, errorMessage, mCallback,
-                                                   mUserdata);
-    }
-
-    void CreateRenderPipelineAsyncTask::RunAsync(
-        std::unique_ptr<CreateRenderPipelineAsyncTask> task) {
-        DeviceBase* device = task->mRenderPipeline->GetDevice();
-
-        const char* eventLabel = utils::GetLabelForTrace(task->mRenderPipeline->GetLabel().c_str());
-
-        // Using "taskPtr = std::move(task)" causes compilation error while it should be supported
-        // since C++14:
-        // https://docs.microsoft.com/en-us/cpp/cpp/lambda-expressions-in-cpp?view=msvc-160
-        auto asyncTask = [taskPtr = task.release()] {
-            std::unique_ptr<CreateRenderPipelineAsyncTask> innerTaskPtr(taskPtr);
-            innerTaskPtr->Run();
-        };
-
-        TRACE_EVENT_FLOW_BEGIN1(device->GetPlatform(), General,
-                                "CreateRenderPipelineAsyncTask::RunAsync", task.get(), "label",
-                                eventLabel);
-        device->GetAsyncTaskManager()->PostTask(std::move(asyncTask));
-    }
+    TRACE_EVENT_FLOW_BEGIN1(device->GetPlatform(), General,
+                            "CreateRenderPipelineAsyncTask::RunAsync", task.get(), "label",
+                            eventLabel);
+    device->GetAsyncTaskManager()->PostTask(std::move(asyncTask));
+}
 }  // namespace dawn::native
diff --git a/src/dawn/native/CreatePipelineAsyncTask.h b/src/dawn/native/CreatePipelineAsyncTask.h
index e6cfe27..cd98154 100644
--- a/src/dawn/native/CreatePipelineAsyncTask.h
+++ b/src/dawn/native/CreatePipelineAsyncTask.h
@@ -25,86 +25,86 @@
 
 namespace dawn::native {
 
-    class ComputePipelineBase;
-    class DeviceBase;
-    class PipelineLayoutBase;
-    class RenderPipelineBase;
-    class ShaderModuleBase;
-    struct FlatComputePipelineDescriptor;
+class ComputePipelineBase;
+class DeviceBase;
+class PipelineLayoutBase;
+class RenderPipelineBase;
+class ShaderModuleBase;
+struct FlatComputePipelineDescriptor;
 
-    struct CreatePipelineAsyncCallbackTaskBase : CallbackTask {
-        CreatePipelineAsyncCallbackTaskBase(std::string errorMessage, void* userData);
+struct CreatePipelineAsyncCallbackTaskBase : CallbackTask {
+    CreatePipelineAsyncCallbackTaskBase(std::string errorMessage, void* userData);
 
-      protected:
-        std::string mErrorMessage;
-        void* mUserData;
-    };
+  protected:
+    std::string mErrorMessage;
+    void* mUserData;
+};
 
-    struct CreateComputePipelineAsyncCallbackTask : CreatePipelineAsyncCallbackTaskBase {
-        CreateComputePipelineAsyncCallbackTask(Ref<ComputePipelineBase> pipeline,
-                                               std::string errorMessage,
-                                               WGPUCreateComputePipelineAsyncCallback callback,
-                                               void* userdata);
+struct CreateComputePipelineAsyncCallbackTask : CreatePipelineAsyncCallbackTaskBase {
+    CreateComputePipelineAsyncCallbackTask(Ref<ComputePipelineBase> pipeline,
+                                           std::string errorMessage,
+                                           WGPUCreateComputePipelineAsyncCallback callback,
+                                           void* userdata);
 
-        void Finish() override;
-        void HandleShutDown() final;
-        void HandleDeviceLoss() final;
+    void Finish() override;
+    void HandleShutDown() final;
+    void HandleDeviceLoss() final;
 
-      protected:
-        Ref<ComputePipelineBase> mPipeline;
-        WGPUCreateComputePipelineAsyncCallback mCreateComputePipelineAsyncCallback;
-    };
+  protected:
+    Ref<ComputePipelineBase> mPipeline;
+    WGPUCreateComputePipelineAsyncCallback mCreateComputePipelineAsyncCallback;
+};
 
-    struct CreateRenderPipelineAsyncCallbackTask : CreatePipelineAsyncCallbackTaskBase {
-        CreateRenderPipelineAsyncCallbackTask(Ref<RenderPipelineBase> pipeline,
-                                              std::string errorMessage,
-                                              WGPUCreateRenderPipelineAsyncCallback callback,
-                                              void* userdata);
+struct CreateRenderPipelineAsyncCallbackTask : CreatePipelineAsyncCallbackTaskBase {
+    CreateRenderPipelineAsyncCallbackTask(Ref<RenderPipelineBase> pipeline,
+                                          std::string errorMessage,
+                                          WGPUCreateRenderPipelineAsyncCallback callback,
+                                          void* userdata);
 
-        void Finish() override;
-        void HandleShutDown() final;
-        void HandleDeviceLoss() final;
+    void Finish() override;
+    void HandleShutDown() final;
+    void HandleDeviceLoss() final;
 
-      protected:
-        Ref<RenderPipelineBase> mPipeline;
-        WGPUCreateRenderPipelineAsyncCallback mCreateRenderPipelineAsyncCallback;
-    };
+  protected:
+    Ref<RenderPipelineBase> mPipeline;
+    WGPUCreateRenderPipelineAsyncCallback mCreateRenderPipelineAsyncCallback;
+};
 
-    // CreateComputePipelineAsyncTask defines all the inputs and outputs of
-    // CreateComputePipelineAsync() tasks, which are the same among all the backends.
-    class CreateComputePipelineAsyncTask {
-      public:
-        CreateComputePipelineAsyncTask(Ref<ComputePipelineBase> nonInitializedComputePipeline,
-                                       WGPUCreateComputePipelineAsyncCallback callback,
-                                       void* userdata);
+// CreateComputePipelineAsyncTask defines all the inputs and outputs of
+// CreateComputePipelineAsync() tasks, which are the same among all the backends.
+class CreateComputePipelineAsyncTask {
+  public:
+    CreateComputePipelineAsyncTask(Ref<ComputePipelineBase> nonInitializedComputePipeline,
+                                   WGPUCreateComputePipelineAsyncCallback callback,
+                                   void* userdata);
 
-        void Run();
+    void Run();
 
-        static void RunAsync(std::unique_ptr<CreateComputePipelineAsyncTask> task);
+    static void RunAsync(std::unique_ptr<CreateComputePipelineAsyncTask> task);
 
-      private:
-        Ref<ComputePipelineBase> mComputePipeline;
-        WGPUCreateComputePipelineAsyncCallback mCallback;
-        void* mUserdata;
-    };
+  private:
+    Ref<ComputePipelineBase> mComputePipeline;
+    WGPUCreateComputePipelineAsyncCallback mCallback;
+    void* mUserdata;
+};
 
-    // CreateRenderPipelineAsyncTask defines all the inputs and outputs of
-    // CreateRenderPipelineAsync() tasks, which are the same among all the backends.
-    class CreateRenderPipelineAsyncTask {
-      public:
-        CreateRenderPipelineAsyncTask(Ref<RenderPipelineBase> nonInitializedRenderPipeline,
-                                      WGPUCreateRenderPipelineAsyncCallback callback,
-                                      void* userdata);
+// CreateRenderPipelineAsyncTask defines all the inputs and outputs of
+// CreateRenderPipelineAsync() tasks, which are the same among all the backends.
+class CreateRenderPipelineAsyncTask {
+  public:
+    CreateRenderPipelineAsyncTask(Ref<RenderPipelineBase> nonInitializedRenderPipeline,
+                                  WGPUCreateRenderPipelineAsyncCallback callback,
+                                  void* userdata);
 
-        void Run();
+    void Run();
 
-        static void RunAsync(std::unique_ptr<CreateRenderPipelineAsyncTask> task);
+    static void RunAsync(std::unique_ptr<CreateRenderPipelineAsyncTask> task);
 
-      private:
-        Ref<RenderPipelineBase> mRenderPipeline;
-        WGPUCreateRenderPipelineAsyncCallback mCallback;
-        void* mUserdata;
-    };
+  private:
+    Ref<RenderPipelineBase> mRenderPipeline;
+    WGPUCreateRenderPipelineAsyncCallback mCallback;
+    void* mUserdata;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/DawnNative.cpp b/src/dawn/native/DawnNative.cpp
index 4787bbe..e17c054 100644
--- a/src/dawn/native/DawnNative.cpp
+++ b/src/dawn/native/DawnNative.cpp
@@ -28,289 +28,280 @@
 
 namespace dawn::native {
 
-    namespace {
-        struct ComboDeprecatedDawnDeviceDescriptor : DeviceDescriptor {
-            explicit ComboDeprecatedDawnDeviceDescriptor(
-                const DawnDeviceDescriptor* deviceDescriptor) {
-                dawn::WarningLog() << "DawnDeviceDescriptor is deprecated. Please use "
-                                      "WGPUDeviceDescriptor instead.";
+namespace {
+struct ComboDeprecatedDawnDeviceDescriptor : DeviceDescriptor {
+    explicit ComboDeprecatedDawnDeviceDescriptor(const DawnDeviceDescriptor* deviceDescriptor) {
+        dawn::WarningLog() << "DawnDeviceDescriptor is deprecated. Please use "
+                              "WGPUDeviceDescriptor instead.";
 
-                DeviceDescriptor* desc = this;
+        DeviceDescriptor* desc = this;
 
-                if (deviceDescriptor != nullptr) {
-                    desc->nextInChain = &mTogglesDesc;
-                    mTogglesDesc.forceEnabledToggles = deviceDescriptor->forceEnabledToggles.data();
-                    mTogglesDesc.forceEnabledTogglesCount =
-                        deviceDescriptor->forceEnabledToggles.size();
-                    mTogglesDesc.forceDisabledToggles =
-                        deviceDescriptor->forceDisabledToggles.data();
-                    mTogglesDesc.forceDisabledTogglesCount =
-                        deviceDescriptor->forceDisabledToggles.size();
+        if (deviceDescriptor != nullptr) {
+            desc->nextInChain = &mTogglesDesc;
+            mTogglesDesc.forceEnabledToggles = deviceDescriptor->forceEnabledToggles.data();
+            mTogglesDesc.forceEnabledTogglesCount = deviceDescriptor->forceEnabledToggles.size();
+            mTogglesDesc.forceDisabledToggles = deviceDescriptor->forceDisabledToggles.data();
+            mTogglesDesc.forceDisabledTogglesCount = deviceDescriptor->forceDisabledToggles.size();
 
-                    desc->requiredLimits =
-                        reinterpret_cast<const RequiredLimits*>(deviceDescriptor->requiredLimits);
+            desc->requiredLimits =
+                reinterpret_cast<const RequiredLimits*>(deviceDescriptor->requiredLimits);
 
-                    FeaturesInfo featuresInfo;
-                    for (const char* featureStr : deviceDescriptor->requiredFeatures) {
-                        mRequiredFeatures.push_back(featuresInfo.FeatureNameToAPIEnum(featureStr));
-                    }
-                    desc->requiredFeatures = mRequiredFeatures.data();
-                    desc->requiredFeaturesCount = mRequiredFeatures.size();
-                }
+            FeaturesInfo featuresInfo;
+            for (const char* featureStr : deviceDescriptor->requiredFeatures) {
+                mRequiredFeatures.push_back(featuresInfo.FeatureNameToAPIEnum(featureStr));
             }
-
-            DawnTogglesDeviceDescriptor mTogglesDesc = {};
-            std::vector<wgpu::FeatureName> mRequiredFeatures = {};
-        };
-    }  // namespace
-
-    const DawnProcTable& GetProcsAutogen();
-
-    const DawnProcTable& GetProcs() {
-        return GetProcsAutogen();
+            desc->requiredFeatures = mRequiredFeatures.data();
+            desc->requiredFeaturesCount = mRequiredFeatures.size();
+        }
     }
 
-    std::vector<const char*> GetTogglesUsed(WGPUDevice device) {
-        return FromAPI(device)->GetTogglesUsed();
+    DawnTogglesDeviceDescriptor mTogglesDesc = {};
+    std::vector<wgpu::FeatureName> mRequiredFeatures = {};
+};
+}  // namespace
+
+const DawnProcTable& GetProcsAutogen();
+
+const DawnProcTable& GetProcs() {
+    return GetProcsAutogen();
+}
+
+std::vector<const char*> GetTogglesUsed(WGPUDevice device) {
+    return FromAPI(device)->GetTogglesUsed();
+}
+
+// Adapter
+
+Adapter::Adapter() = default;
+
+Adapter::Adapter(AdapterBase* impl) : mImpl(impl) {
+    if (mImpl != nullptr) {
+        mImpl->Reference();
     }
+}
 
-    // Adapter
+Adapter::~Adapter() {
+    if (mImpl != nullptr) {
+        mImpl->Release();
+    }
+    mImpl = nullptr;
+}
 
-    Adapter::Adapter() = default;
+Adapter::Adapter(const Adapter& other) : Adapter(other.mImpl) {}
 
-    Adapter::Adapter(AdapterBase* impl) : mImpl(impl) {
-        if (mImpl != nullptr) {
+Adapter& Adapter::operator=(const Adapter& other) {
+    if (this != &other) {
+        if (mImpl) {
+            mImpl->Release();
+        }
+        mImpl = other.mImpl;
+        if (mImpl) {
             mImpl->Reference();
         }
     }
+    return *this;
+}
 
-    Adapter::~Adapter() {
-        if (mImpl != nullptr) {
-            mImpl->Release();
-        }
+void Adapter::GetProperties(wgpu::AdapterProperties* properties) const {
+    GetProperties(reinterpret_cast<WGPUAdapterProperties*>(properties));
+}
+
+void Adapter::GetProperties(WGPUAdapterProperties* properties) const {
+    mImpl->APIGetProperties(FromAPI(properties));
+}
+
+WGPUAdapter Adapter::Get() const {
+    return ToAPI(mImpl);
+}
+
+std::vector<const char*> Adapter::GetSupportedFeatures() const {
+    FeaturesSet supportedFeaturesSet = mImpl->GetSupportedFeatures();
+    return supportedFeaturesSet.GetEnabledFeatureNames();
+}
+
+WGPUDeviceProperties Adapter::GetAdapterProperties() const {
+    return mImpl->GetAdapterProperties();
+}
+
+bool Adapter::GetLimits(WGPUSupportedLimits* limits) const {
+    return mImpl->GetLimits(FromAPI(limits));
+}
+
+void Adapter::SetUseTieredLimits(bool useTieredLimits) {
+    mImpl->SetUseTieredLimits(useTieredLimits);
+}
+
+bool Adapter::SupportsExternalImages() const {
+    return mImpl->SupportsExternalImages();
+}
+
+Adapter::operator bool() const {
+    return mImpl != nullptr;
+}
+
+WGPUDevice Adapter::CreateDevice(const DawnDeviceDescriptor* deviceDescriptor) {
+    ComboDeprecatedDawnDeviceDescriptor desc(deviceDescriptor);
+    return ToAPI(mImpl->APICreateDevice(&desc));
+}
+
+WGPUDevice Adapter::CreateDevice(const wgpu::DeviceDescriptor* deviceDescriptor) {
+    return CreateDevice(reinterpret_cast<const WGPUDeviceDescriptor*>(deviceDescriptor));
+}
+
+WGPUDevice Adapter::CreateDevice(const WGPUDeviceDescriptor* deviceDescriptor) {
+    return ToAPI(mImpl->APICreateDevice(FromAPI(deviceDescriptor)));
+}
+
+void Adapter::RequestDevice(const DawnDeviceDescriptor* descriptor,
+                            WGPURequestDeviceCallback callback,
+                            void* userdata) {
+    ComboDeprecatedDawnDeviceDescriptor desc(descriptor);
+    mImpl->APIRequestDevice(&desc, callback, userdata);
+}
+
+void Adapter::RequestDevice(const wgpu::DeviceDescriptor* descriptor,
+                            WGPURequestDeviceCallback callback,
+                            void* userdata) {
+    mImpl->APIRequestDevice(reinterpret_cast<const DeviceDescriptor*>(descriptor), callback,
+                            userdata);
+}
+
+void Adapter::RequestDevice(const WGPUDeviceDescriptor* descriptor,
+                            WGPURequestDeviceCallback callback,
+                            void* userdata) {
+    mImpl->APIRequestDevice(reinterpret_cast<const DeviceDescriptor*>(descriptor), callback,
+                            userdata);
+}
+
+void Adapter::ResetInternalDeviceForTesting() {
+    mImpl->ResetInternalDeviceForTesting();
+}
+
+// AdapterDiscoverOptionsBase
+
+AdapterDiscoveryOptionsBase::AdapterDiscoveryOptionsBase(WGPUBackendType type)
+    : backendType(type) {}
+
+// Instance
+
+Instance::Instance(const WGPUInstanceDescriptor* desc)
+    : mImpl(APICreateInstance(reinterpret_cast<const InstanceDescriptor*>(desc))) {}
+
+Instance::~Instance() {
+    if (mImpl != nullptr) {
+        mImpl->Release();
         mImpl = nullptr;
     }
+}
 
-    Adapter::Adapter(const Adapter& other) : Adapter(other.mImpl) {
+void Instance::DiscoverDefaultAdapters() {
+    mImpl->DiscoverDefaultAdapters();
+}
+
+bool Instance::DiscoverAdapters(const AdapterDiscoveryOptionsBase* options) {
+    return mImpl->DiscoverAdapters(options);
+}
+
+std::vector<Adapter> Instance::GetAdapters() const {
+    // Adapters are owned by mImpl so it is safe to return non RAII pointers to them
+    std::vector<Adapter> adapters;
+    for (const Ref<AdapterBase>& adapter : mImpl->GetAdapters()) {
+        adapters.push_back(Adapter(adapter.Get()));
     }
+    return adapters;
+}
 
-    Adapter& Adapter::operator=(const Adapter& other) {
-        if (this != &other) {
-            if (mImpl) {
-                mImpl->Release();
-            }
-            mImpl = other.mImpl;
-            if (mImpl) {
-                mImpl->Reference();
-            }
-        }
-        return *this;
+const ToggleInfo* Instance::GetToggleInfo(const char* toggleName) {
+    return mImpl->GetToggleInfo(toggleName);
+}
+
+const FeatureInfo* Instance::GetFeatureInfo(WGPUFeatureName feature) {
+    return mImpl->GetFeatureInfo(static_cast<wgpu::FeatureName>(feature));
+}
+
+void Instance::EnableBackendValidation(bool enableBackendValidation) {
+    if (enableBackendValidation) {
+        mImpl->SetBackendValidationLevel(BackendValidationLevel::Full);
     }
+}
 
-    void Adapter::GetProperties(wgpu::AdapterProperties* properties) const {
-        GetProperties(reinterpret_cast<WGPUAdapterProperties*>(properties));
-    }
+void Instance::SetBackendValidationLevel(BackendValidationLevel level) {
+    mImpl->SetBackendValidationLevel(level);
+}
 
-    void Adapter::GetProperties(WGPUAdapterProperties* properties) const {
-        mImpl->APIGetProperties(FromAPI(properties));
-    }
+void Instance::EnableBeginCaptureOnStartup(bool beginCaptureOnStartup) {
+    mImpl->EnableBeginCaptureOnStartup(beginCaptureOnStartup);
+}
 
-    WGPUAdapter Adapter::Get() const {
-        return ToAPI(mImpl);
-    }
+// TODO(dawn:1374) Deprecate this once it is passed via the descriptor.
+void Instance::SetPlatform(dawn::platform::Platform* platform) {
+    mImpl->SetPlatform(platform);
+}
 
-    std::vector<const char*> Adapter::GetSupportedFeatures() const {
-        FeaturesSet supportedFeaturesSet = mImpl->GetSupportedFeatures();
-        return supportedFeaturesSet.GetEnabledFeatureNames();
-    }
+WGPUInstance Instance::Get() const {
+    return ToAPI(mImpl);
+}
 
-    WGPUDeviceProperties Adapter::GetAdapterProperties() const {
-        return mImpl->GetAdapterProperties();
-    }
+size_t GetLazyClearCountForTesting(WGPUDevice device) {
+    return FromAPI(device)->GetLazyClearCountForTesting();
+}
 
-    bool Adapter::GetLimits(WGPUSupportedLimits* limits) const {
-        return mImpl->GetLimits(FromAPI(limits));
-    }
+size_t GetDeprecationWarningCountForTesting(WGPUDevice device) {
+    return FromAPI(device)->GetDeprecationWarningCountForTesting();
+}
 
-    void Adapter::SetUseTieredLimits(bool useTieredLimits) {
-        mImpl->SetUseTieredLimits(useTieredLimits);
-    }
+bool IsTextureSubresourceInitialized(WGPUTexture texture,
+                                     uint32_t baseMipLevel,
+                                     uint32_t levelCount,
+                                     uint32_t baseArrayLayer,
+                                     uint32_t layerCount,
+                                     WGPUTextureAspect cAspect) {
+    TextureBase* textureBase = FromAPI(texture);
 
-    bool Adapter::SupportsExternalImages() const {
-        return mImpl->SupportsExternalImages();
-    }
+    Aspect aspect =
+        ConvertAspect(textureBase->GetFormat(), static_cast<wgpu::TextureAspect>(cAspect));
+    SubresourceRange range(aspect, {baseArrayLayer, layerCount}, {baseMipLevel, levelCount});
+    return textureBase->IsSubresourceContentInitialized(range);
+}
 
-    Adapter::operator bool() const {
-        return mImpl != nullptr;
-    }
+std::vector<const char*> GetProcMapNamesForTestingInternal();
 
-    WGPUDevice Adapter::CreateDevice(const DawnDeviceDescriptor* deviceDescriptor) {
-        ComboDeprecatedDawnDeviceDescriptor desc(deviceDescriptor);
-        return ToAPI(mImpl->APICreateDevice(&desc));
-    }
+std::vector<const char*> GetProcMapNamesForTesting() {
+    return GetProcMapNamesForTestingInternal();
+}
 
-    WGPUDevice Adapter::CreateDevice(const wgpu::DeviceDescriptor* deviceDescriptor) {
-        return CreateDevice(reinterpret_cast<const WGPUDeviceDescriptor*>(deviceDescriptor));
-    }
+DAWN_NATIVE_EXPORT bool DeviceTick(WGPUDevice device) {
+    return FromAPI(device)->APITick();
+}
 
-    WGPUDevice Adapter::CreateDevice(const WGPUDeviceDescriptor* deviceDescriptor) {
-        return ToAPI(mImpl->APICreateDevice(FromAPI(deviceDescriptor)));
-    }
+// ExternalImageDescriptor
 
-    void Adapter::RequestDevice(const DawnDeviceDescriptor* descriptor,
-                                WGPURequestDeviceCallback callback,
-                                void* userdata) {
-        ComboDeprecatedDawnDeviceDescriptor desc(descriptor);
-        mImpl->APIRequestDevice(&desc, callback, userdata);
-    }
+ExternalImageDescriptor::ExternalImageDescriptor(ExternalImageType type) : mType(type) {}
 
-    void Adapter::RequestDevice(const wgpu::DeviceDescriptor* descriptor,
-                                WGPURequestDeviceCallback callback,
-                                void* userdata) {
-        mImpl->APIRequestDevice(reinterpret_cast<const DeviceDescriptor*>(descriptor), callback,
-                                userdata);
-    }
+ExternalImageType ExternalImageDescriptor::GetType() const {
+    return mType;
+}
 
-    void Adapter::RequestDevice(const WGPUDeviceDescriptor* descriptor,
-                                WGPURequestDeviceCallback callback,
-                                void* userdata) {
-        mImpl->APIRequestDevice(reinterpret_cast<const DeviceDescriptor*>(descriptor), callback,
-                                userdata);
-    }
+// ExternalImageExportInfo
 
-    void Adapter::ResetInternalDeviceForTesting() {
-        mImpl->ResetInternalDeviceForTesting();
-    }
+ExternalImageExportInfo::ExternalImageExportInfo(ExternalImageType type) : mType(type) {}
 
-    // AdapterDiscoverOptionsBase
+ExternalImageType ExternalImageExportInfo::GetType() const {
+    return mType;
+}
 
-    AdapterDiscoveryOptionsBase::AdapterDiscoveryOptionsBase(WGPUBackendType type)
-        : backendType(type) {
-    }
+const char* GetObjectLabelForTesting(void* objectHandle) {
+    ApiObjectBase* object = reinterpret_cast<ApiObjectBase*>(objectHandle);
+    return object->GetLabel().c_str();
+}
 
-    // Instance
+uint64_t GetAllocatedSizeForTesting(WGPUBuffer buffer) {
+    return FromAPI(buffer)->GetAllocatedSize();
+}
 
-    Instance::Instance(const WGPUInstanceDescriptor* desc)
-        : mImpl(APICreateInstance(reinterpret_cast<const InstanceDescriptor*>(desc))) {
-    }
-
-    Instance::~Instance() {
-        if (mImpl != nullptr) {
-            mImpl->Release();
-            mImpl = nullptr;
-        }
-    }
-
-    void Instance::DiscoverDefaultAdapters() {
-        mImpl->DiscoverDefaultAdapters();
-    }
-
-    bool Instance::DiscoverAdapters(const AdapterDiscoveryOptionsBase* options) {
-        return mImpl->DiscoverAdapters(options);
-    }
-
-    std::vector<Adapter> Instance::GetAdapters() const {
-        // Adapters are owned by mImpl so it is safe to return non RAII pointers to them
-        std::vector<Adapter> adapters;
-        for (const Ref<AdapterBase>& adapter : mImpl->GetAdapters()) {
-            adapters.push_back(Adapter(adapter.Get()));
-        }
-        return adapters;
-    }
-
-    const ToggleInfo* Instance::GetToggleInfo(const char* toggleName) {
-        return mImpl->GetToggleInfo(toggleName);
-    }
-
-    const FeatureInfo* Instance::GetFeatureInfo(WGPUFeatureName feature) {
-        return mImpl->GetFeatureInfo(static_cast<wgpu::FeatureName>(feature));
-    }
-
-    void Instance::EnableBackendValidation(bool enableBackendValidation) {
-        if (enableBackendValidation) {
-            mImpl->SetBackendValidationLevel(BackendValidationLevel::Full);
-        }
-    }
-
-    void Instance::SetBackendValidationLevel(BackendValidationLevel level) {
-        mImpl->SetBackendValidationLevel(level);
-    }
-
-    void Instance::EnableBeginCaptureOnStartup(bool beginCaptureOnStartup) {
-        mImpl->EnableBeginCaptureOnStartup(beginCaptureOnStartup);
-    }
-
-    // TODO(dawn:1374) Deprecate this once it is passed via the descriptor.
-    void Instance::SetPlatform(dawn::platform::Platform* platform) {
-        mImpl->SetPlatform(platform);
-    }
-
-    WGPUInstance Instance::Get() const {
-        return ToAPI(mImpl);
-    }
-
-    size_t GetLazyClearCountForTesting(WGPUDevice device) {
-        return FromAPI(device)->GetLazyClearCountForTesting();
-    }
-
-    size_t GetDeprecationWarningCountForTesting(WGPUDevice device) {
-        return FromAPI(device)->GetDeprecationWarningCountForTesting();
-    }
-
-    bool IsTextureSubresourceInitialized(WGPUTexture texture,
-                                         uint32_t baseMipLevel,
-                                         uint32_t levelCount,
-                                         uint32_t baseArrayLayer,
-                                         uint32_t layerCount,
-                                         WGPUTextureAspect cAspect) {
-        TextureBase* textureBase = FromAPI(texture);
-
-        Aspect aspect =
-            ConvertAspect(textureBase->GetFormat(), static_cast<wgpu::TextureAspect>(cAspect));
-        SubresourceRange range(aspect, {baseArrayLayer, layerCount}, {baseMipLevel, levelCount});
-        return textureBase->IsSubresourceContentInitialized(range);
-    }
-
-    std::vector<const char*> GetProcMapNamesForTestingInternal();
-
-    std::vector<const char*> GetProcMapNamesForTesting() {
-        return GetProcMapNamesForTestingInternal();
-    }
-
-    DAWN_NATIVE_EXPORT bool DeviceTick(WGPUDevice device) {
-        return FromAPI(device)->APITick();
-    }
-
-    // ExternalImageDescriptor
-
-    ExternalImageDescriptor::ExternalImageDescriptor(ExternalImageType type) : mType(type) {
-    }
-
-    ExternalImageType ExternalImageDescriptor::GetType() const {
-        return mType;
-    }
-
-    // ExternalImageExportInfo
-
-    ExternalImageExportInfo::ExternalImageExportInfo(ExternalImageType type) : mType(type) {
-    }
-
-    ExternalImageType ExternalImageExportInfo::GetType() const {
-        return mType;
-    }
-
-    const char* GetObjectLabelForTesting(void* objectHandle) {
-        ApiObjectBase* object = reinterpret_cast<ApiObjectBase*>(objectHandle);
-        return object->GetLabel().c_str();
-    }
-
-    uint64_t GetAllocatedSizeForTesting(WGPUBuffer buffer) {
-        return FromAPI(buffer)->GetAllocatedSize();
-    }
-
-    bool BindGroupLayoutBindingsEqualForTesting(WGPUBindGroupLayout a, WGPUBindGroupLayout b) {
-        bool excludePipelineCompatibiltyToken = true;
-        return FromAPI(a)->IsLayoutEqual(FromAPI(b), excludePipelineCompatibiltyToken);
-    }
+bool BindGroupLayoutBindingsEqualForTesting(WGPUBindGroupLayout a, WGPUBindGroupLayout b) {
+    bool excludePipelineCompatibiltyToken = true;
+    return FromAPI(a)->IsLayoutEqual(FromAPI(b), excludePipelineCompatibiltyToken);
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/Device.cpp b/src/dawn/native/Device.cpp
index b8048c3..fed23bf 100644
--- a/src/dawn/native/Device.cpp
+++ b/src/dawn/native/Device.cpp
@@ -55,249 +55,243 @@
 
 namespace dawn::native {
 
-    // DeviceBase sub-structures
+// DeviceBase sub-structures
 
-    // The caches are unordered_sets of pointers with special hash and compare functions
-    // to compare the value of the objects, instead of the pointers.
-    template <typename Object>
-    using ContentLessObjectCache =
-        std::unordered_set<Object*, typename Object::HashFunc, typename Object::EqualityFunc>;
+// The caches are unordered_sets of pointers with special hash and compare functions
+// to compare the value of the objects, instead of the pointers.
+template <typename Object>
+using ContentLessObjectCache =
+    std::unordered_set<Object*, typename Object::HashFunc, typename Object::EqualityFunc>;
 
-    struct DeviceBase::Caches {
-        ~Caches() {
-            ASSERT(attachmentStates.empty());
-            ASSERT(bindGroupLayouts.empty());
-            ASSERT(computePipelines.empty());
-            ASSERT(pipelineLayouts.empty());
-            ASSERT(renderPipelines.empty());
-            ASSERT(samplers.empty());
-            ASSERT(shaderModules.empty());
-        }
-
-        ContentLessObjectCache<AttachmentStateBlueprint> attachmentStates;
-        ContentLessObjectCache<BindGroupLayoutBase> bindGroupLayouts;
-        ContentLessObjectCache<ComputePipelineBase> computePipelines;
-        ContentLessObjectCache<PipelineLayoutBase> pipelineLayouts;
-        ContentLessObjectCache<RenderPipelineBase> renderPipelines;
-        ContentLessObjectCache<SamplerBase> samplers;
-        ContentLessObjectCache<ShaderModuleBase> shaderModules;
-    };
-
-    struct DeviceBase::DeprecationWarnings {
-        std::unordered_set<std::string> emitted;
-        size_t count = 0;
-    };
-
-    namespace {
-        struct LoggingCallbackTask : CallbackTask {
-          public:
-            LoggingCallbackTask() = delete;
-            LoggingCallbackTask(wgpu::LoggingCallback loggingCallback,
-                                WGPULoggingType loggingType,
-                                const char* message,
-                                void* userdata)
-                : mCallback(loggingCallback),
-                  mLoggingType(loggingType),
-                  mMessage(message),
-                  mUserdata(userdata) {
-                // Since the Finish() will be called in uncertain future in which time the message
-                // may already disposed, we must keep a local copy in the CallbackTask.
-            }
-
-            void Finish() override {
-                mCallback(mLoggingType, mMessage.c_str(), mUserdata);
-            }
-
-            void HandleShutDown() override {
-                // Do the logging anyway
-                mCallback(mLoggingType, mMessage.c_str(), mUserdata);
-            }
-
-            void HandleDeviceLoss() override {
-                mCallback(mLoggingType, mMessage.c_str(), mUserdata);
-            }
-
-          private:
-            // As all deferred callback tasks will be triggered before modifying the registered
-            // callback or shutting down, we are ensured that callback function and userdata pointer
-            // stored in tasks is valid when triggered.
-            wgpu::LoggingCallback mCallback;
-            WGPULoggingType mLoggingType;
-            std::string mMessage;
-            void* mUserdata;
-        };
-
-        ResultOrError<Ref<PipelineLayoutBase>>
-        ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
-            DeviceBase* device,
-            const ComputePipelineDescriptor& descriptor,
-            ComputePipelineDescriptor* outDescriptor) {
-            Ref<PipelineLayoutBase> layoutRef;
-            *outDescriptor = descriptor;
-
-            if (outDescriptor->layout == nullptr) {
-                DAWN_TRY_ASSIGN(layoutRef, PipelineLayoutBase::CreateDefault(
-                                               device, {{
-                                                           SingleShaderStage::Compute,
-                                                           outDescriptor->compute.module,
-                                                           outDescriptor->compute.entryPoint,
-                                                           outDescriptor->compute.constantCount,
-                                                           outDescriptor->compute.constants,
-                                                       }}));
-                outDescriptor->layout = layoutRef.Get();
-            }
-
-            return layoutRef;
-        }
-
-        ResultOrError<Ref<PipelineLayoutBase>>
-        ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
-            DeviceBase* device,
-            const RenderPipelineDescriptor& descriptor,
-            RenderPipelineDescriptor* outDescriptor) {
-            Ref<PipelineLayoutBase> layoutRef;
-            *outDescriptor = descriptor;
-
-            if (descriptor.layout == nullptr) {
-                // Ref will keep the pipeline layout alive until the end of the function where
-                // the pipeline will take another reference.
-                DAWN_TRY_ASSIGN(layoutRef, PipelineLayoutBase::CreateDefault(
-                                               device, GetRenderStagesAndSetPlaceholderShader(
-                                                           device, &descriptor)));
-                outDescriptor->layout = layoutRef.Get();
-            }
-
-            return layoutRef;
-        }
-
-    }  // anonymous namespace
-
-    // DeviceBase
-
-    DeviceBase::DeviceBase(AdapterBase* adapter, const DeviceDescriptor* descriptor)
-        : mInstance(adapter->GetInstance()), mAdapter(adapter), mNextPipelineCompatibilityToken(1) {
-        ASSERT(descriptor != nullptr);
-
-        AdapterProperties adapterProperties;
-        adapter->APIGetProperties(&adapterProperties);
-
-        const DawnTogglesDeviceDescriptor* togglesDesc = nullptr;
-        FindInChain(descriptor->nextInChain, &togglesDesc);
-        if (togglesDesc != nullptr) {
-            ApplyToggleOverrides(togglesDesc);
-        }
-        ApplyFeatures(descriptor);
-
-        DawnCacheDeviceDescriptor defaultCacheDesc = {};
-        const DawnCacheDeviceDescriptor* cacheDesc = nullptr;
-        FindInChain(descriptor->nextInChain, &cacheDesc);
-        if (cacheDesc == nullptr) {
-            cacheDesc = &defaultCacheDesc;
-        }
-
-        if (descriptor->requiredLimits != nullptr) {
-            mLimits.v1 = ReifyDefaultLimits(descriptor->requiredLimits->limits);
-        } else {
-            GetDefaultLimits(&mLimits.v1);
-        }
-
-        mFormatTable = BuildFormatTable(this);
-        SetDefaultToggles();
-
-        if (descriptor->label != nullptr && strlen(descriptor->label) != 0) {
-            mLabel = descriptor->label;
-        }
-
-        // Record the cache key from the properties. Note that currently, if a new extension
-        // descriptor is added (and probably handled here), the cache key recording needs to be
-        // updated.
-        mDeviceCacheKey.Record(adapterProperties, mEnabledFeatures.featuresBitSet,
-                               mEnabledToggles.toggleBitset, cacheDesc);
+struct DeviceBase::Caches {
+    ~Caches() {
+        ASSERT(attachmentStates.empty());
+        ASSERT(bindGroupLayouts.empty());
+        ASSERT(computePipelines.empty());
+        ASSERT(pipelineLayouts.empty());
+        ASSERT(renderPipelines.empty());
+        ASSERT(samplers.empty());
+        ASSERT(shaderModules.empty());
     }
 
-    DeviceBase::DeviceBase() : mState(State::Alive) {
-        mCaches = std::make_unique<DeviceBase::Caches>();
+    ContentLessObjectCache<AttachmentStateBlueprint> attachmentStates;
+    ContentLessObjectCache<BindGroupLayoutBase> bindGroupLayouts;
+    ContentLessObjectCache<ComputePipelineBase> computePipelines;
+    ContentLessObjectCache<PipelineLayoutBase> pipelineLayouts;
+    ContentLessObjectCache<RenderPipelineBase> renderPipelines;
+    ContentLessObjectCache<SamplerBase> samplers;
+    ContentLessObjectCache<ShaderModuleBase> shaderModules;
+};
+
+struct DeviceBase::DeprecationWarnings {
+    std::unordered_set<std::string> emitted;
+    size_t count = 0;
+};
+
+namespace {
+struct LoggingCallbackTask : CallbackTask {
+  public:
+    LoggingCallbackTask() = delete;
+    LoggingCallbackTask(wgpu::LoggingCallback loggingCallback,
+                        WGPULoggingType loggingType,
+                        const char* message,
+                        void* userdata)
+        : mCallback(loggingCallback),
+          mLoggingType(loggingType),
+          mMessage(message),
+          mUserdata(userdata) {
+        // Since the Finish() will be called in uncertain future in which time the message
+        // may already disposed, we must keep a local copy in the CallbackTask.
     }
 
-    DeviceBase::~DeviceBase() {
-        // We need to explicitly release the Queue before we complete the destructor so that the
-        // Queue does not get destroyed after the Device.
-        mQueue = nullptr;
+    void Finish() override { mCallback(mLoggingType, mMessage.c_str(), mUserdata); }
+
+    void HandleShutDown() override {
+        // Do the logging anyway
+        mCallback(mLoggingType, mMessage.c_str(), mUserdata);
     }
 
-    MaybeError DeviceBase::Initialize(Ref<QueueBase> defaultQueue) {
-        mQueue = std::move(defaultQueue);
+    void HandleDeviceLoss() override { mCallback(mLoggingType, mMessage.c_str(), mUserdata); }
+
+  private:
+    // As all deferred callback tasks will be triggered before modifying the registered
+    // callback or shutting down, we are ensured that callback function and userdata pointer
+    // stored in tasks is valid when triggered.
+    wgpu::LoggingCallback mCallback;
+    WGPULoggingType mLoggingType;
+    std::string mMessage;
+    void* mUserdata;
+};
+
+ResultOrError<Ref<PipelineLayoutBase>> ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
+    DeviceBase* device,
+    const ComputePipelineDescriptor& descriptor,
+    ComputePipelineDescriptor* outDescriptor) {
+    Ref<PipelineLayoutBase> layoutRef;
+    *outDescriptor = descriptor;
+
+    if (outDescriptor->layout == nullptr) {
+        DAWN_TRY_ASSIGN(layoutRef, PipelineLayoutBase::CreateDefault(
+                                       device, {{
+                                                   SingleShaderStage::Compute,
+                                                   outDescriptor->compute.module,
+                                                   outDescriptor->compute.entryPoint,
+                                                   outDescriptor->compute.constantCount,
+                                                   outDescriptor->compute.constants,
+                                               }}));
+        outDescriptor->layout = layoutRef.Get();
+    }
+
+    return layoutRef;
+}
+
+ResultOrError<Ref<PipelineLayoutBase>> ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
+    DeviceBase* device,
+    const RenderPipelineDescriptor& descriptor,
+    RenderPipelineDescriptor* outDescriptor) {
+    Ref<PipelineLayoutBase> layoutRef;
+    *outDescriptor = descriptor;
+
+    if (descriptor.layout == nullptr) {
+        // Ref will keep the pipeline layout alive until the end of the function where
+        // the pipeline will take another reference.
+        DAWN_TRY_ASSIGN(layoutRef,
+                        PipelineLayoutBase::CreateDefault(
+                            device, GetRenderStagesAndSetPlaceholderShader(device, &descriptor)));
+        outDescriptor->layout = layoutRef.Get();
+    }
+
+    return layoutRef;
+}
+
+}  // anonymous namespace
+
+// DeviceBase
+
+DeviceBase::DeviceBase(AdapterBase* adapter, const DeviceDescriptor* descriptor)
+    : mInstance(adapter->GetInstance()), mAdapter(adapter), mNextPipelineCompatibilityToken(1) {
+    ASSERT(descriptor != nullptr);
+
+    AdapterProperties adapterProperties;
+    adapter->APIGetProperties(&adapterProperties);
+
+    const DawnTogglesDeviceDescriptor* togglesDesc = nullptr;
+    FindInChain(descriptor->nextInChain, &togglesDesc);
+    if (togglesDesc != nullptr) {
+        ApplyToggleOverrides(togglesDesc);
+    }
+    ApplyFeatures(descriptor);
+
+    DawnCacheDeviceDescriptor defaultCacheDesc = {};
+    const DawnCacheDeviceDescriptor* cacheDesc = nullptr;
+    FindInChain(descriptor->nextInChain, &cacheDesc);
+    if (cacheDesc == nullptr) {
+        cacheDesc = &defaultCacheDesc;
+    }
+
+    if (descriptor->requiredLimits != nullptr) {
+        mLimits.v1 = ReifyDefaultLimits(descriptor->requiredLimits->limits);
+    } else {
+        GetDefaultLimits(&mLimits.v1);
+    }
+
+    mFormatTable = BuildFormatTable(this);
+    SetDefaultToggles();
+
+    if (descriptor->label != nullptr && strlen(descriptor->label) != 0) {
+        mLabel = descriptor->label;
+    }
+
+    // Record the cache key from the properties. Note that currently, if a new extension
+    // descriptor is added (and probably handled here), the cache key recording needs to be
+    // updated.
+    mDeviceCacheKey.Record(adapterProperties, mEnabledFeatures.featuresBitSet,
+                           mEnabledToggles.toggleBitset, cacheDesc);
+}
+
+DeviceBase::DeviceBase() : mState(State::Alive) {
+    mCaches = std::make_unique<DeviceBase::Caches>();
+}
+
+DeviceBase::~DeviceBase() {
+    // We need to explicitly release the Queue before we complete the destructor so that the
+    // Queue does not get destroyed after the Device.
+    mQueue = nullptr;
+}
+
+MaybeError DeviceBase::Initialize(Ref<QueueBase> defaultQueue) {
+    mQueue = std::move(defaultQueue);
 
 #if defined(DAWN_ENABLE_ASSERTS)
-        mUncapturedErrorCallback = [](WGPUErrorType, char const*, void*) {
-            static bool calledOnce = false;
-            if (!calledOnce) {
-                calledOnce = true;
-                dawn::WarningLog() << "No Dawn device uncaptured error callback was set. This is "
-                                      "probably not intended. If you really want to ignore errors "
-                                      "and suppress this message, set the callback to null.";
-            }
-        };
+    mUncapturedErrorCallback = [](WGPUErrorType, char const*, void*) {
+        static bool calledOnce = false;
+        if (!calledOnce) {
+            calledOnce = true;
+            dawn::WarningLog() << "No Dawn device uncaptured error callback was set. This is "
+                                  "probably not intended. If you really want to ignore errors "
+                                  "and suppress this message, set the callback to null.";
+        }
+    };
 
-        mDeviceLostCallback = [](WGPUDeviceLostReason, char const*, void*) {
-            static bool calledOnce = false;
-            if (!calledOnce) {
-                calledOnce = true;
-                dawn::WarningLog() << "No Dawn device lost callback was set. This is probably not "
-                                      "intended. If you really want to ignore device lost "
-                                      "and suppress this message, set the callback to null.";
-            }
-        };
+    mDeviceLostCallback = [](WGPUDeviceLostReason, char const*, void*) {
+        static bool calledOnce = false;
+        if (!calledOnce) {
+            calledOnce = true;
+            dawn::WarningLog() << "No Dawn device lost callback was set. This is probably not "
+                                  "intended. If you really want to ignore device lost "
+                                  "and suppress this message, set the callback to null.";
+        }
+    };
 #endif  // DAWN_ENABLE_ASSERTS
 
-        mCaches = std::make_unique<DeviceBase::Caches>();
-        mErrorScopeStack = std::make_unique<ErrorScopeStack>();
-        mDynamicUploader = std::make_unique<DynamicUploader>(this);
-        mCallbackTaskManager = std::make_unique<CallbackTaskManager>();
-        mDeprecationWarnings = std::make_unique<DeprecationWarnings>();
-        mInternalPipelineStore = std::make_unique<InternalPipelineStore>(this);
+    mCaches = std::make_unique<DeviceBase::Caches>();
+    mErrorScopeStack = std::make_unique<ErrorScopeStack>();
+    mDynamicUploader = std::make_unique<DynamicUploader>(this);
+    mCallbackTaskManager = std::make_unique<CallbackTaskManager>();
+    mDeprecationWarnings = std::make_unique<DeprecationWarnings>();
+    mInternalPipelineStore = std::make_unique<InternalPipelineStore>(this);
 
-        ASSERT(GetPlatform() != nullptr);
-        mWorkerTaskPool = GetPlatform()->CreateWorkerTaskPool();
-        mAsyncTaskManager = std::make_unique<AsyncTaskManager>(mWorkerTaskPool.get());
+    ASSERT(GetPlatform() != nullptr);
+    mWorkerTaskPool = GetPlatform()->CreateWorkerTaskPool();
+    mAsyncTaskManager = std::make_unique<AsyncTaskManager>(mWorkerTaskPool.get());
 
-        // Starting from now the backend can start doing reentrant calls so the device is marked as
-        // alive.
-        mState = State::Alive;
+    // Starting from now the backend can start doing reentrant calls so the device is marked as
+    // alive.
+    mState = State::Alive;
 
-        DAWN_TRY_ASSIGN(mEmptyBindGroupLayout, CreateEmptyBindGroupLayout());
+    DAWN_TRY_ASSIGN(mEmptyBindGroupLayout, CreateEmptyBindGroupLayout());
 
-        // If placeholder fragment shader module is needed, initialize it
-        if (IsToggleEnabled(Toggle::UsePlaceholderFragmentInVertexOnlyPipeline)) {
-            // The empty fragment shader, used as a work around for vertex-only render pipeline
-            constexpr char kEmptyFragmentShader[] = R"(
+    // If placeholder fragment shader module is needed, initialize it
+    if (IsToggleEnabled(Toggle::UsePlaceholderFragmentInVertexOnlyPipeline)) {
+        // The empty fragment shader, used as a work around for vertex-only render pipeline
+        constexpr char kEmptyFragmentShader[] = R"(
                 @stage(fragment) fn fs_empty_main() {}
             )";
-            ShaderModuleDescriptor descriptor;
-            ShaderModuleWGSLDescriptor wgslDesc;
-            wgslDesc.source = kEmptyFragmentShader;
-            descriptor.nextInChain = &wgslDesc;
+        ShaderModuleDescriptor descriptor;
+        ShaderModuleWGSLDescriptor wgslDesc;
+        wgslDesc.source = kEmptyFragmentShader;
+        descriptor.nextInChain = &wgslDesc;
 
-            DAWN_TRY_ASSIGN(mInternalPipelineStore->placeholderFragmentShader,
-                            CreateShaderModule(&descriptor));
-        }
-
-        return {};
+        DAWN_TRY_ASSIGN(mInternalPipelineStore->placeholderFragmentShader,
+                        CreateShaderModule(&descriptor));
     }
 
-    void DeviceBase::DestroyObjects() {
-        // List of object types in reverse "dependency" order so we can iterate and delete the
-        // objects safely. We define dependent here such that if B has a ref to A, then B depends on
-        // A. We therefore try to destroy B before destroying A. Note that this only considers the
-        // immediate frontend dependencies, while backend objects could add complications and extra
-        // dependencies.
-        //
-        // Note that AttachmentState is not an ApiObject so it cannot be eagerly destroyed. However,
-        // since AttachmentStates are cached by the device, objects that hold references to
-        // AttachmentStates should make sure to un-ref them in their Destroy operation so that we
-        // can destroy the frontend cache.
+    return {};
+}
 
-        // clang-format off
+void DeviceBase::DestroyObjects() {
+    // List of object types in reverse "dependency" order so we can iterate and delete the
+    // objects safely. We define dependent here such that if B has a ref to A, then B depends on
+    // A. We therefore try to destroy B before destroying A. Note that this only considers the
+    // immediate frontend dependencies, while backend objects could add complications and extra
+    // dependencies.
+    //
+    // Note that AttachmentState is not an ApiObject so it cannot be eagerly destroyed. However,
+    // since AttachmentStates are cached by the device, objects that hold references to
+    // AttachmentStates should make sure to un-ref them in their Destroy operation so that we
+    // can destroy the frontend cache.
+
+    // clang-format off
         static constexpr std::array<ObjectType, 19> kObjectTypeDependencyOrder = {
             ObjectType::ComputePassEncoder,
             ObjectType::RenderPassEncoder,
@@ -319,1518 +313,1502 @@
             ObjectType::Sampler,
             ObjectType::Buffer,
         };
-        // clang-format on
+    // clang-format on
 
-        // We first move all objects out from the tracking list into a separate list so that we can
-        // avoid locking the same mutex twice. We can then iterate across the separate list to call
-        // the actual destroy function.
-        LinkedList<ApiObjectBase> objects;
-        for (ObjectType type : kObjectTypeDependencyOrder) {
-            ApiObjectList& objList = mObjectLists[type];
-            const std::lock_guard<std::mutex> lock(objList.mutex);
-            objList.objects.MoveInto(&objects);
+    // We first move all objects out from the tracking list into a separate list so that we can
+    // avoid locking the same mutex twice. We can then iterate across the separate list to call
+    // the actual destroy function.
+    LinkedList<ApiObjectBase> objects;
+    for (ObjectType type : kObjectTypeDependencyOrder) {
+        ApiObjectList& objList = mObjectLists[type];
+        const std::lock_guard<std::mutex> lock(objList.mutex);
+        objList.objects.MoveInto(&objects);
+    }
+    while (!objects.empty()) {
+        // The destroy call should also remove the object from the list.
+        objects.head()->value()->Destroy();
+    }
+}
+
+void DeviceBase::Destroy() {
+    // Skip if we are already destroyed.
+    if (mState == State::Destroyed) {
+        return;
+    }
+
+    // Skip handling device facilities if they haven't even been created (or failed doing so)
+    if (mState != State::BeingCreated) {
+        // The device is being destroyed so it will be lost, call the application callback.
+        if (mDeviceLostCallback != nullptr) {
+            mDeviceLostCallback(WGPUDeviceLostReason_Destroyed, "Device was destroyed.",
+                                mDeviceLostUserdata);
+            mDeviceLostCallback = nullptr;
         }
-        while (!objects.empty()) {
-            // The destroy call should also remove the object from the list.
-            objects.head()->value()->Destroy();
+
+        // Call all the callbacks immediately as the device is about to shut down.
+        // TODO(crbug.com/dawn/826): Cancel the tasks that are in flight if possible.
+        mAsyncTaskManager->WaitAllPendingTasks();
+        auto callbackTasks = mCallbackTaskManager->AcquireCallbackTasks();
+        for (std::unique_ptr<CallbackTask>& callbackTask : callbackTasks) {
+            callbackTask->HandleShutDown();
         }
     }
 
-    void DeviceBase::Destroy() {
-        // Skip if we are already destroyed.
-        if (mState == State::Destroyed) {
-            return;
-        }
+    // Disconnect the device, depending on which state we are currently in.
+    switch (mState) {
+        case State::BeingCreated:
+            // The GPU timeline was never started so we don't have to wait.
+            break;
 
-        // Skip handling device facilities if they haven't even been created (or failed doing so)
-        if (mState != State::BeingCreated) {
-            // The device is being destroyed so it will be lost, call the application callback.
-            if (mDeviceLostCallback != nullptr) {
-                mDeviceLostCallback(WGPUDeviceLostReason_Destroyed, "Device was destroyed.",
-                                    mDeviceLostUserdata);
-                mDeviceLostCallback = nullptr;
-            }
-
-            // Call all the callbacks immediately as the device is about to shut down.
-            // TODO(crbug.com/dawn/826): Cancel the tasks that are in flight if possible.
-            mAsyncTaskManager->WaitAllPendingTasks();
-            auto callbackTasks = mCallbackTaskManager->AcquireCallbackTasks();
-            for (std::unique_ptr<CallbackTask>& callbackTask : callbackTasks) {
-                callbackTask->HandleShutDown();
-            }
-        }
-
-        // Disconnect the device, depending on which state we are currently in.
-        switch (mState) {
-            case State::BeingCreated:
-                // The GPU timeline was never started so we don't have to wait.
-                break;
-
-            case State::Alive:
-                // Alive is the only state which can have GPU work happening. Wait for all of it to
-                // complete before proceeding with destruction.
-                // Ignore errors so that we can continue with destruction
-                IgnoreErrors(WaitForIdleForDestruction());
-                AssumeCommandsComplete();
-                break;
-
-            case State::BeingDisconnected:
-                // Getting disconnected is a transient state happening in a single API call so there
-                // is always an external reference keeping the Device alive, which means the
-                // destructor cannot run while BeingDisconnected.
-                UNREACHABLE();
-                break;
-
-            case State::Disconnected:
-                break;
-
-            case State::Destroyed:
-                // If we are already destroyed we should've skipped this work entirely.
-                UNREACHABLE();
-                break;
-        }
-        ASSERT(mCompletedSerial == mLastSubmittedSerial);
-        ASSERT(mFutureSerial <= mCompletedSerial);
-
-        if (mState != State::BeingCreated) {
-            // The GPU timeline is finished.
-            // Finish destroying all objects owned by the device and tick the queue-related tasks
-            // since they should be complete. This must be done before DestroyImpl() it may
-            // relinquish resources that will be freed by backends in the DestroyImpl() call.
-            DestroyObjects();
-            mQueue->Tick(GetCompletedCommandSerial());
-            // Call TickImpl once last time to clean up resources
+        case State::Alive:
+            // Alive is the only state which can have GPU work happening. Wait for all of it to
+            // complete before proceeding with destruction.
             // Ignore errors so that we can continue with destruction
-            IgnoreErrors(TickImpl());
-        }
+            IgnoreErrors(WaitForIdleForDestruction());
+            AssumeCommandsComplete();
+            break;
 
-        // At this point GPU operations are always finished, so we are in the disconnected state.
-        // Note that currently this state change is required because some of the backend
-        // implementations of DestroyImpl checks that we are disconnected before doing work.
+        case State::BeingDisconnected:
+            // Getting disconnected is a transient state happening in a single API call so there
+            // is always an external reference keeping the Device alive, which means the
+            // destructor cannot run while BeingDisconnected.
+            UNREACHABLE();
+            break;
+
+        case State::Disconnected:
+            break;
+
+        case State::Destroyed:
+            // If we are already destroyed we should've skipped this work entirely.
+            UNREACHABLE();
+            break;
+    }
+    ASSERT(mCompletedSerial == mLastSubmittedSerial);
+    ASSERT(mFutureSerial <= mCompletedSerial);
+
+    if (mState != State::BeingCreated) {
+        // The GPU timeline is finished.
+        // Finish destroying all objects owned by the device and tick the queue-related tasks
+        // since they should be complete. This must be done before DestroyImpl() it may
+        // relinquish resources that will be freed by backends in the DestroyImpl() call.
+        DestroyObjects();
+        mQueue->Tick(GetCompletedCommandSerial());
+        // Call TickImpl once last time to clean up resources
+        // Ignore errors so that we can continue with destruction
+        IgnoreErrors(TickImpl());
+    }
+
+    // At this point GPU operations are always finished, so we are in the disconnected state.
+    // Note that currently this state change is required because some of the backend
+    // implementations of DestroyImpl checks that we are disconnected before doing work.
+    mState = State::Disconnected;
+
+    mDynamicUploader = nullptr;
+    mCallbackTaskManager = nullptr;
+    mAsyncTaskManager = nullptr;
+    mEmptyBindGroupLayout = nullptr;
+    mInternalPipelineStore = nullptr;
+    mExternalTexturePlaceholderView = nullptr;
+
+    AssumeCommandsComplete();
+
+    // Now that the GPU timeline is empty, destroy the backend device.
+    DestroyImpl();
+
+    mCaches = nullptr;
+    mState = State::Destroyed;
+}
+
+void DeviceBase::APIDestroy() {
+    Destroy();
+}
+
+void DeviceBase::HandleError(InternalErrorType type, const char* message) {
+    if (type == InternalErrorType::DeviceLost) {
         mState = State::Disconnected;
 
-        mDynamicUploader = nullptr;
-        mCallbackTaskManager = nullptr;
-        mAsyncTaskManager = nullptr;
-        mEmptyBindGroupLayout = nullptr;
-        mInternalPipelineStore = nullptr;
-        mExternalTexturePlaceholderView = nullptr;
-
-        AssumeCommandsComplete();
-
-        // Now that the GPU timeline is empty, destroy the backend device.
-        DestroyImpl();
-
-        mCaches = nullptr;
-        mState = State::Destroyed;
-    }
-
-    void DeviceBase::APIDestroy() {
-        Destroy();
-    }
-
-    void DeviceBase::HandleError(InternalErrorType type, const char* message) {
-        if (type == InternalErrorType::DeviceLost) {
-            mState = State::Disconnected;
-
-            // If the ErrorInjector is enabled, then the device loss might be fake and the device
-            // still be executing commands. Force a wait for idle in this case, with State being
-            // Disconnected so we can detect this case in WaitForIdleForDestruction.
-            if (ErrorInjectorEnabled()) {
-                IgnoreErrors(WaitForIdleForDestruction());
-            }
-
-            // A real device lost happened. Set the state to disconnected as the device cannot be
-            // used. Also tags all commands as completed since the device stopped running.
-            AssumeCommandsComplete();
-        } else if (type == InternalErrorType::Internal) {
-            // If we receive an internal error, assume the backend can't recover and proceed with
-            // device destruction. We first wait for all previous commands to be completed so that
-            // backend objects can be freed immediately, before handling the loss.
-
-            // Move away from the Alive state so that the application cannot use this device
-            // anymore.
-            // TODO(crbug.com/dawn/831): Do we need atomics for this to become visible to other
-            // threads in a multithreaded scenario?
-            mState = State::BeingDisconnected;
-
-            // Ignore errors so that we can continue with destruction
-            // Assume all commands are complete after WaitForIdleForDestruction (because they were)
+        // If the ErrorInjector is enabled, then the device loss might be fake and the device
+        // still be executing commands. Force a wait for idle in this case, with State being
+        // Disconnected so we can detect this case in WaitForIdleForDestruction.
+        if (ErrorInjectorEnabled()) {
             IgnoreErrors(WaitForIdleForDestruction());
-            IgnoreErrors(TickImpl());
-            AssumeCommandsComplete();
-            ASSERT(mFutureSerial <= mCompletedSerial);
-            mState = State::Disconnected;
-
-            // Now everything is as if the device was lost.
-            type = InternalErrorType::DeviceLost;
         }
 
-        if (type == InternalErrorType::DeviceLost) {
-            // The device was lost, call the application callback.
-            if (mDeviceLostCallback != nullptr) {
-                mDeviceLostCallback(WGPUDeviceLostReason_Undefined, message, mDeviceLostUserdata);
-                mDeviceLostCallback = nullptr;
-            }
+        // A real device lost happened. Set the state to disconnected as the device cannot be
+        // used. Also tags all commands as completed since the device stopped running.
+        AssumeCommandsComplete();
+    } else if (type == InternalErrorType::Internal) {
+        // If we receive an internal error, assume the backend can't recover and proceed with
+        // device destruction. We first wait for all previous commands to be completed so that
+        // backend objects can be freed immediately, before handling the loss.
 
-            mQueue->HandleDeviceLoss();
+        // Move away from the Alive state so that the application cannot use this device
+        // anymore.
+        // TODO(crbug.com/dawn/831): Do we need atomics for this to become visible to other
+        // threads in a multithreaded scenario?
+        mState = State::BeingDisconnected;
 
-            // TODO(crbug.com/dawn/826): Cancel the tasks that are in flight if possible.
-            mAsyncTaskManager->WaitAllPendingTasks();
-            auto callbackTasks = mCallbackTaskManager->AcquireCallbackTasks();
-            for (std::unique_ptr<CallbackTask>& callbackTask : callbackTasks) {
-                callbackTask->HandleDeviceLoss();
-            }
+        // Ignore errors so that we can continue with destruction
+        // Assume all commands are complete after WaitForIdleForDestruction (because they were)
+        IgnoreErrors(WaitForIdleForDestruction());
+        IgnoreErrors(TickImpl());
+        AssumeCommandsComplete();
+        ASSERT(mFutureSerial <= mCompletedSerial);
+        mState = State::Disconnected;
 
-            // Still forward device loss errors to the error scopes so they all reject.
-            mErrorScopeStack->HandleError(ToWGPUErrorType(type), message);
-        } else {
-            // Pass the error to the error scope stack and call the uncaptured error callback
-            // if it isn't handled. DeviceLost is not handled here because it should be
-            // handled by the lost callback.
-            bool captured = mErrorScopeStack->HandleError(ToWGPUErrorType(type), message);
-            if (!captured && mUncapturedErrorCallback != nullptr) {
-                mUncapturedErrorCallback(static_cast<WGPUErrorType>(ToWGPUErrorType(type)), message,
-                                         mUncapturedErrorUserdata);
-            }
-        }
+        // Now everything is as if the device was lost.
+        type = InternalErrorType::DeviceLost;
     }
 
-    void DeviceBase::ConsumeError(std::unique_ptr<ErrorData> error) {
-        ASSERT(error != nullptr);
-        AppendDebugLayerMessages(error.get());
-        HandleError(error->GetType(), error->GetFormattedMessage().c_str());
-    }
+    if (type == InternalErrorType::DeviceLost) {
+        // The device was lost, call the application callback.
+        if (mDeviceLostCallback != nullptr) {
+            mDeviceLostCallback(WGPUDeviceLostReason_Undefined, message, mDeviceLostUserdata);
+            mDeviceLostCallback = nullptr;
+        }
 
-    void DeviceBase::APISetLoggingCallback(wgpu::LoggingCallback callback, void* userdata) {
-        // The registered callback function and userdata pointer are stored and used by deferred
-        // callback tasks, and after setting a different callback (especially in the case of
-        // resetting) the resources pointed by such pointer may be freed. Flush all deferred
-        // callback tasks to guarantee we are never going to use the previous callback after
-        // this call.
-        if (IsLost()) {
-            return;
-        }
-        FlushCallbackTaskQueue();
-        mLoggingCallback = callback;
-        mLoggingUserdata = userdata;
-    }
+        mQueue->HandleDeviceLoss();
 
-    void DeviceBase::APISetUncapturedErrorCallback(wgpu::ErrorCallback callback, void* userdata) {
-        // The registered callback function and userdata pointer are stored and used by deferred
-        // callback tasks, and after setting a different callback (especially in the case of
-        // resetting) the resources pointed by such pointer may be freed. Flush all deferred
-        // callback tasks to guarantee we are never going to use the previous callback after
-        // this call.
-        if (IsLost()) {
-            return;
+        // TODO(crbug.com/dawn/826): Cancel the tasks that are in flight if possible.
+        mAsyncTaskManager->WaitAllPendingTasks();
+        auto callbackTasks = mCallbackTaskManager->AcquireCallbackTasks();
+        for (std::unique_ptr<CallbackTask>& callbackTask : callbackTasks) {
+            callbackTask->HandleDeviceLoss();
         }
-        FlushCallbackTaskQueue();
-        mUncapturedErrorCallback = callback;
-        mUncapturedErrorUserdata = userdata;
-    }
 
-    void DeviceBase::APISetDeviceLostCallback(wgpu::DeviceLostCallback callback, void* userdata) {
-        // The registered callback function and userdata pointer are stored and used by deferred
-        // callback tasks, and after setting a different callback (especially in the case of
-        // resetting) the resources pointed by such pointer may be freed. Flush all deferred
-        // callback tasks to guarantee we are never going to use the previous callback after
-        // this call.
-        if (IsLost()) {
-            return;
+        // Still forward device loss errors to the error scopes so they all reject.
+        mErrorScopeStack->HandleError(ToWGPUErrorType(type), message);
+    } else {
+        // Pass the error to the error scope stack and call the uncaptured error callback
+        // if it isn't handled. DeviceLost is not handled here because it should be
+        // handled by the lost callback.
+        bool captured = mErrorScopeStack->HandleError(ToWGPUErrorType(type), message);
+        if (!captured && mUncapturedErrorCallback != nullptr) {
+            mUncapturedErrorCallback(static_cast<WGPUErrorType>(ToWGPUErrorType(type)), message,
+                                     mUncapturedErrorUserdata);
         }
-        FlushCallbackTaskQueue();
-        mDeviceLostCallback = callback;
-        mDeviceLostUserdata = userdata;
     }
+}
 
-    void DeviceBase::APIPushErrorScope(wgpu::ErrorFilter filter) {
-        if (ConsumedError(ValidateErrorFilter(filter))) {
-            return;
-        }
-        mErrorScopeStack->Push(filter);
+void DeviceBase::ConsumeError(std::unique_ptr<ErrorData> error) {
+    ASSERT(error != nullptr);
+    AppendDebugLayerMessages(error.get());
+    HandleError(error->GetType(), error->GetFormattedMessage().c_str());
+}
+
+void DeviceBase::APISetLoggingCallback(wgpu::LoggingCallback callback, void* userdata) {
+    // The registered callback function and userdata pointer are stored and used by deferred
+    // callback tasks, and after setting a different callback (especially in the case of
+    // resetting) the resources pointed by such pointer may be freed. Flush all deferred
+    // callback tasks to guarantee we are never going to use the previous callback after
+    // this call.
+    if (IsLost()) {
+        return;
     }
+    FlushCallbackTaskQueue();
+    mLoggingCallback = callback;
+    mLoggingUserdata = userdata;
+}
 
-    bool DeviceBase::APIPopErrorScope(wgpu::ErrorCallback callback, void* userdata) {
-        // TODO(crbug.com/dawn/1324) Remove return and make function void when users are updated.
-        bool returnValue = true;
-        if (callback == nullptr) {
-            static wgpu::ErrorCallback defaultCallback = [](WGPUErrorType, char const*, void*) {};
-            callback = defaultCallback;
-        }
-        // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
-        if (IsLost()) {
-            callback(WGPUErrorType_DeviceLost, "GPU device disconnected", userdata);
-            return returnValue;
-        }
-        if (mErrorScopeStack->Empty()) {
-            callback(WGPUErrorType_Unknown, "No error scopes to pop", userdata);
-            return returnValue;
-        }
-        ErrorScope scope = mErrorScopeStack->Pop();
-        callback(static_cast<WGPUErrorType>(scope.GetErrorType()), scope.GetErrorMessage(),
-                 userdata);
+void DeviceBase::APISetUncapturedErrorCallback(wgpu::ErrorCallback callback, void* userdata) {
+    // The registered callback function and userdata pointer are stored and used by deferred
+    // callback tasks, and after setting a different callback (especially in the case of
+    // resetting) the resources pointed by such pointer may be freed. Flush all deferred
+    // callback tasks to guarantee we are never going to use the previous callback after
+    // this call.
+    if (IsLost()) {
+        return;
+    }
+    FlushCallbackTaskQueue();
+    mUncapturedErrorCallback = callback;
+    mUncapturedErrorUserdata = userdata;
+}
+
+void DeviceBase::APISetDeviceLostCallback(wgpu::DeviceLostCallback callback, void* userdata) {
+    // The registered callback function and userdata pointer are stored and used by deferred
+    // callback tasks, and after setting a different callback (especially in the case of
+    // resetting) the resources pointed by such pointer may be freed. Flush all deferred
+    // callback tasks to guarantee we are never going to use the previous callback after
+    // this call.
+    if (IsLost()) {
+        return;
+    }
+    FlushCallbackTaskQueue();
+    mDeviceLostCallback = callback;
+    mDeviceLostUserdata = userdata;
+}
+
+void DeviceBase::APIPushErrorScope(wgpu::ErrorFilter filter) {
+    if (ConsumedError(ValidateErrorFilter(filter))) {
+        return;
+    }
+    mErrorScopeStack->Push(filter);
+}
+
+bool DeviceBase::APIPopErrorScope(wgpu::ErrorCallback callback, void* userdata) {
+    // TODO(crbug.com/dawn/1324) Remove return and make function void when users are updated.
+    bool returnValue = true;
+    if (callback == nullptr) {
+        static wgpu::ErrorCallback defaultCallback = [](WGPUErrorType, char const*, void*) {};
+        callback = defaultCallback;
+    }
+    // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+    if (IsLost()) {
+        callback(WGPUErrorType_DeviceLost, "GPU device disconnected", userdata);
         return returnValue;
     }
+    if (mErrorScopeStack->Empty()) {
+        callback(WGPUErrorType_Unknown, "No error scopes to pop", userdata);
+        return returnValue;
+    }
+    ErrorScope scope = mErrorScopeStack->Pop();
+    callback(static_cast<WGPUErrorType>(scope.GetErrorType()), scope.GetErrorMessage(), userdata);
+    return returnValue;
+}
 
-    BlobCache* DeviceBase::GetBlobCache() {
-        return mInstance->GetBlobCache();
+BlobCache* DeviceBase::GetBlobCache() {
+    return mInstance->GetBlobCache();
+}
+
+MaybeError DeviceBase::ValidateObject(const ApiObjectBase* object) const {
+    ASSERT(object != nullptr);
+    DAWN_INVALID_IF(object->GetDevice() != this,
+                    "%s is associated with %s, and cannot be used with %s.", object,
+                    object->GetDevice(), this);
+
+    // TODO(dawn:563): Preserve labels for error objects.
+    DAWN_INVALID_IF(object->IsError(), "%s is invalid.", object);
+
+    return {};
+}
+
+MaybeError DeviceBase::ValidateIsAlive() const {
+    DAWN_INVALID_IF(mState != State::Alive, "%s is lost.", this);
+    return {};
+}
+
+void DeviceBase::APILoseForTesting() {
+    if (mState != State::Alive) {
+        return;
     }
 
-    MaybeError DeviceBase::ValidateObject(const ApiObjectBase* object) const {
-        ASSERT(object != nullptr);
-        DAWN_INVALID_IF(object->GetDevice() != this,
-                        "%s is associated with %s, and cannot be used with %s.", object,
-                        object->GetDevice(), this);
+    HandleError(InternalErrorType::Internal, "Device lost for testing");
+}
 
-        // TODO(dawn:563): Preserve labels for error objects.
-        DAWN_INVALID_IF(object->IsError(), "%s is invalid.", object);
+DeviceBase::State DeviceBase::GetState() const {
+    return mState;
+}
 
-        return {};
-    }
+bool DeviceBase::IsLost() const {
+    ASSERT(mState != State::BeingCreated);
+    return mState != State::Alive;
+}
 
-    MaybeError DeviceBase::ValidateIsAlive() const {
-        DAWN_INVALID_IF(mState != State::Alive, "%s is lost.", this);
-        return {};
-    }
+void DeviceBase::TrackObject(ApiObjectBase* object) {
+    ApiObjectList& objectList = mObjectLists[object->GetType()];
+    std::lock_guard<std::mutex> lock(objectList.mutex);
+    object->InsertBefore(objectList.objects.head());
+}
 
-    void DeviceBase::APILoseForTesting() {
-        if (mState != State::Alive) {
-            return;
-        }
+std::mutex* DeviceBase::GetObjectListMutex(ObjectType type) {
+    return &mObjectLists[type].mutex;
+}
 
-        HandleError(InternalErrorType::Internal, "Device lost for testing");
-    }
+AdapterBase* DeviceBase::GetAdapter() const {
+    return mAdapter;
+}
 
-    DeviceBase::State DeviceBase::GetState() const {
-        return mState;
-    }
+dawn::platform::Platform* DeviceBase::GetPlatform() const {
+    return GetAdapter()->GetInstance()->GetPlatform();
+}
 
-    bool DeviceBase::IsLost() const {
-        ASSERT(mState != State::BeingCreated);
-        return mState != State::Alive;
-    }
+ExecutionSerial DeviceBase::GetCompletedCommandSerial() const {
+    return mCompletedSerial;
+}
 
-    void DeviceBase::TrackObject(ApiObjectBase* object) {
-        ApiObjectList& objectList = mObjectLists[object->GetType()];
-        std::lock_guard<std::mutex> lock(objectList.mutex);
-        object->InsertBefore(objectList.objects.head());
-    }
+ExecutionSerial DeviceBase::GetLastSubmittedCommandSerial() const {
+    return mLastSubmittedSerial;
+}
 
-    std::mutex* DeviceBase::GetObjectListMutex(ObjectType type) {
-        return &mObjectLists[type].mutex;
-    }
+ExecutionSerial DeviceBase::GetFutureSerial() const {
+    return mFutureSerial;
+}
 
-    AdapterBase* DeviceBase::GetAdapter() const {
-        return mAdapter;
-    }
+InternalPipelineStore* DeviceBase::GetInternalPipelineStore() {
+    return mInternalPipelineStore.get();
+}
 
-    dawn::platform::Platform* DeviceBase::GetPlatform() const {
-        return GetAdapter()->GetInstance()->GetPlatform();
-    }
+void DeviceBase::IncrementLastSubmittedCommandSerial() {
+    mLastSubmittedSerial++;
+}
 
-    ExecutionSerial DeviceBase::GetCompletedCommandSerial() const {
-        return mCompletedSerial;
-    }
+void DeviceBase::AssumeCommandsComplete() {
+    ExecutionSerial maxSerial =
+        ExecutionSerial(std::max(mLastSubmittedSerial + ExecutionSerial(1), mFutureSerial));
+    mLastSubmittedSerial = maxSerial;
+    mCompletedSerial = maxSerial;
+}
 
-    ExecutionSerial DeviceBase::GetLastSubmittedCommandSerial() const {
-        return mLastSubmittedSerial;
-    }
-
-    ExecutionSerial DeviceBase::GetFutureSerial() const {
-        return mFutureSerial;
-    }
-
-    InternalPipelineStore* DeviceBase::GetInternalPipelineStore() {
-        return mInternalPipelineStore.get();
-    }
-
-    void DeviceBase::IncrementLastSubmittedCommandSerial() {
-        mLastSubmittedSerial++;
-    }
-
-    void DeviceBase::AssumeCommandsComplete() {
-        ExecutionSerial maxSerial =
-            ExecutionSerial(std::max(mLastSubmittedSerial + ExecutionSerial(1), mFutureSerial));
-        mLastSubmittedSerial = maxSerial;
-        mCompletedSerial = maxSerial;
-    }
-
-    bool DeviceBase::IsDeviceIdle() {
-        if (mAsyncTaskManager->HasPendingTasks()) {
-            return false;
-        }
-
-        ExecutionSerial maxSerial = std::max(mLastSubmittedSerial, mFutureSerial);
-        if (mCompletedSerial == maxSerial) {
-            return true;
-        }
+bool DeviceBase::IsDeviceIdle() {
+    if (mAsyncTaskManager->HasPendingTasks()) {
         return false;
     }
 
-    ExecutionSerial DeviceBase::GetPendingCommandSerial() const {
-        return mLastSubmittedSerial + ExecutionSerial(1);
-    }
-
-    void DeviceBase::AddFutureSerial(ExecutionSerial serial) {
-        if (serial > mFutureSerial) {
-            mFutureSerial = serial;
-        }
-    }
-
-    MaybeError DeviceBase::CheckPassedSerials() {
-        ExecutionSerial completedSerial;
-        DAWN_TRY_ASSIGN(completedSerial, CheckAndUpdateCompletedSerials());
-
-        ASSERT(completedSerial <= mLastSubmittedSerial);
-        // completedSerial should not be less than mCompletedSerial unless it is 0.
-        // It can be 0 when there's no fences to check.
-        ASSERT(completedSerial >= mCompletedSerial || completedSerial == ExecutionSerial(0));
-
-        if (completedSerial > mCompletedSerial) {
-            mCompletedSerial = completedSerial;
-        }
-
-        return {};
-    }
-
-    ResultOrError<const Format*> DeviceBase::GetInternalFormat(wgpu::TextureFormat format) const {
-        FormatIndex index = ComputeFormatIndex(format);
-        DAWN_INVALID_IF(index >= mFormatTable.size(), "Unknown texture format %s.", format);
-
-        const Format* internalFormat = &mFormatTable[index];
-        DAWN_INVALID_IF(!internalFormat->isSupported, "Unsupported texture format %s.", format);
-
-        return internalFormat;
-    }
-
-    const Format& DeviceBase::GetValidInternalFormat(wgpu::TextureFormat format) const {
-        FormatIndex index = ComputeFormatIndex(format);
-        ASSERT(index < mFormatTable.size());
-        ASSERT(mFormatTable[index].isSupported);
-        return mFormatTable[index];
-    }
-
-    const Format& DeviceBase::GetValidInternalFormat(FormatIndex index) const {
-        ASSERT(index < mFormatTable.size());
-        ASSERT(mFormatTable[index].isSupported);
-        return mFormatTable[index];
-    }
-
-    ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::GetOrCreateBindGroupLayout(
-        const BindGroupLayoutDescriptor* descriptor,
-        PipelineCompatibilityToken pipelineCompatibilityToken) {
-        BindGroupLayoutBase blueprint(this, descriptor, pipelineCompatibilityToken,
-                                      ApiObjectBase::kUntrackedByDevice);
-
-        const size_t blueprintHash = blueprint.ComputeContentHash();
-        blueprint.SetContentHash(blueprintHash);
-
-        Ref<BindGroupLayoutBase> result;
-        auto iter = mCaches->bindGroupLayouts.find(&blueprint);
-        if (iter != mCaches->bindGroupLayouts.end()) {
-            result = *iter;
-        } else {
-            DAWN_TRY_ASSIGN(result,
-                            CreateBindGroupLayoutImpl(descriptor, pipelineCompatibilityToken));
-            result->SetIsCachedReference();
-            result->SetContentHash(blueprintHash);
-            mCaches->bindGroupLayouts.insert(result.Get());
-        }
-
-        return std::move(result);
-    }
-
-    void DeviceBase::UncacheBindGroupLayout(BindGroupLayoutBase* obj) {
-        ASSERT(obj->IsCachedReference());
-        size_t removedCount = mCaches->bindGroupLayouts.erase(obj);
-        ASSERT(removedCount == 1);
-    }
-
-    // Private function used at initialization
-    ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::CreateEmptyBindGroupLayout() {
-        BindGroupLayoutDescriptor desc = {};
-        desc.entryCount = 0;
-        desc.entries = nullptr;
-
-        return GetOrCreateBindGroupLayout(&desc);
-    }
-
-    BindGroupLayoutBase* DeviceBase::GetEmptyBindGroupLayout() {
-        ASSERT(mEmptyBindGroupLayout != nullptr);
-        return mEmptyBindGroupLayout.Get();
-    }
-
-    Ref<ComputePipelineBase> DeviceBase::GetCachedComputePipeline(
-        ComputePipelineBase* uninitializedComputePipeline) {
-        Ref<ComputePipelineBase> cachedPipeline;
-        auto iter = mCaches->computePipelines.find(uninitializedComputePipeline);
-        if (iter != mCaches->computePipelines.end()) {
-            cachedPipeline = *iter;
-        }
-
-        return cachedPipeline;
-    }
-
-    Ref<RenderPipelineBase> DeviceBase::GetCachedRenderPipeline(
-        RenderPipelineBase* uninitializedRenderPipeline) {
-        Ref<RenderPipelineBase> cachedPipeline;
-        auto iter = mCaches->renderPipelines.find(uninitializedRenderPipeline);
-        if (iter != mCaches->renderPipelines.end()) {
-            cachedPipeline = *iter;
-        }
-        return cachedPipeline;
-    }
-
-    Ref<ComputePipelineBase> DeviceBase::AddOrGetCachedComputePipeline(
-        Ref<ComputePipelineBase> computePipeline) {
-        auto [cachedPipeline, inserted] = mCaches->computePipelines.insert(computePipeline.Get());
-        if (inserted) {
-            computePipeline->SetIsCachedReference();
-            return computePipeline;
-        } else {
-            return *cachedPipeline;
-        }
-    }
-
-    Ref<RenderPipelineBase> DeviceBase::AddOrGetCachedRenderPipeline(
-        Ref<RenderPipelineBase> renderPipeline) {
-        auto [cachedPipeline, inserted] = mCaches->renderPipelines.insert(renderPipeline.Get());
-        if (inserted) {
-            renderPipeline->SetIsCachedReference();
-            return renderPipeline;
-        } else {
-            return *cachedPipeline;
-        }
-    }
-
-    void DeviceBase::UncacheComputePipeline(ComputePipelineBase* obj) {
-        ASSERT(obj->IsCachedReference());
-        size_t removedCount = mCaches->computePipelines.erase(obj);
-        ASSERT(removedCount == 1);
-    }
-
-    ResultOrError<Ref<TextureViewBase>>
-    DeviceBase::GetOrCreatePlaceholderTextureViewForExternalTexture() {
-        if (!mExternalTexturePlaceholderView.Get()) {
-            Ref<TextureBase> externalTexturePlaceholder;
-            TextureDescriptor textureDesc;
-            textureDesc.dimension = wgpu::TextureDimension::e2D;
-            textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
-            textureDesc.label = "Dawn_External_Texture_Placeholder_Texture";
-            textureDesc.size = {1, 1, 1};
-            textureDesc.usage = wgpu::TextureUsage::TextureBinding;
-
-            DAWN_TRY_ASSIGN(externalTexturePlaceholder, CreateTexture(&textureDesc));
-
-            TextureViewDescriptor textureViewDesc;
-            textureViewDesc.arrayLayerCount = 1;
-            textureViewDesc.aspect = wgpu::TextureAspect::All;
-            textureViewDesc.baseArrayLayer = 0;
-            textureViewDesc.dimension = wgpu::TextureViewDimension::e2D;
-            textureViewDesc.format = wgpu::TextureFormat::RGBA8Unorm;
-            textureViewDesc.label = "Dawn_External_Texture_Placeholder_Texture_View";
-            textureViewDesc.mipLevelCount = 1;
-
-            DAWN_TRY_ASSIGN(mExternalTexturePlaceholderView,
-                            CreateTextureView(externalTexturePlaceholder.Get(), &textureViewDesc));
-        }
-
-        return mExternalTexturePlaceholderView;
-    }
-
-    ResultOrError<Ref<PipelineLayoutBase>> DeviceBase::GetOrCreatePipelineLayout(
-        const PipelineLayoutDescriptor* descriptor) {
-        PipelineLayoutBase blueprint(this, descriptor, ApiObjectBase::kUntrackedByDevice);
-
-        const size_t blueprintHash = blueprint.ComputeContentHash();
-        blueprint.SetContentHash(blueprintHash);
-
-        Ref<PipelineLayoutBase> result;
-        auto iter = mCaches->pipelineLayouts.find(&blueprint);
-        if (iter != mCaches->pipelineLayouts.end()) {
-            result = *iter;
-        } else {
-            DAWN_TRY_ASSIGN(result, CreatePipelineLayoutImpl(descriptor));
-            result->SetIsCachedReference();
-            result->SetContentHash(blueprintHash);
-            mCaches->pipelineLayouts.insert(result.Get());
-        }
-
-        return std::move(result);
-    }
-
-    void DeviceBase::UncachePipelineLayout(PipelineLayoutBase* obj) {
-        ASSERT(obj->IsCachedReference());
-        size_t removedCount = mCaches->pipelineLayouts.erase(obj);
-        ASSERT(removedCount == 1);
-    }
-
-    void DeviceBase::UncacheRenderPipeline(RenderPipelineBase* obj) {
-        ASSERT(obj->IsCachedReference());
-        size_t removedCount = mCaches->renderPipelines.erase(obj);
-        ASSERT(removedCount == 1);
-    }
-
-    ResultOrError<Ref<SamplerBase>> DeviceBase::GetOrCreateSampler(
-        const SamplerDescriptor* descriptor) {
-        SamplerBase blueprint(this, descriptor, ApiObjectBase::kUntrackedByDevice);
-
-        const size_t blueprintHash = blueprint.ComputeContentHash();
-        blueprint.SetContentHash(blueprintHash);
-
-        Ref<SamplerBase> result;
-        auto iter = mCaches->samplers.find(&blueprint);
-        if (iter != mCaches->samplers.end()) {
-            result = *iter;
-        } else {
-            DAWN_TRY_ASSIGN(result, CreateSamplerImpl(descriptor));
-            result->SetIsCachedReference();
-            result->SetContentHash(blueprintHash);
-            mCaches->samplers.insert(result.Get());
-        }
-
-        return std::move(result);
-    }
-
-    void DeviceBase::UncacheSampler(SamplerBase* obj) {
-        ASSERT(obj->IsCachedReference());
-        size_t removedCount = mCaches->samplers.erase(obj);
-        ASSERT(removedCount == 1);
-    }
-
-    ResultOrError<Ref<ShaderModuleBase>> DeviceBase::GetOrCreateShaderModule(
-        const ShaderModuleDescriptor* descriptor,
-        ShaderModuleParseResult* parseResult,
-        OwnedCompilationMessages* compilationMessages) {
-        ASSERT(parseResult != nullptr);
-
-        ShaderModuleBase blueprint(this, descriptor, ApiObjectBase::kUntrackedByDevice);
-
-        const size_t blueprintHash = blueprint.ComputeContentHash();
-        blueprint.SetContentHash(blueprintHash);
-
-        Ref<ShaderModuleBase> result;
-        auto iter = mCaches->shaderModules.find(&blueprint);
-        if (iter != mCaches->shaderModules.end()) {
-            result = *iter;
-        } else {
-            if (!parseResult->HasParsedShader()) {
-                // We skip the parse on creation if validation isn't enabled which let's us quickly
-                // lookup in the cache without validating and parsing. We need the parsed module
-                // now, so call validate. Most of |ValidateShaderModuleDescriptor| is parsing, but
-                // we can consider splitting it if additional validation is added.
-                ASSERT(!IsValidationEnabled());
-                DAWN_TRY(ValidateShaderModuleDescriptor(this, descriptor, parseResult,
-                                                        compilationMessages));
-            }
-            DAWN_TRY_ASSIGN(result, CreateShaderModuleImpl(descriptor, parseResult));
-            result->SetIsCachedReference();
-            result->SetContentHash(blueprintHash);
-            mCaches->shaderModules.insert(result.Get());
-        }
-
-        return std::move(result);
-    }
-
-    void DeviceBase::UncacheShaderModule(ShaderModuleBase* obj) {
-        ASSERT(obj->IsCachedReference());
-        size_t removedCount = mCaches->shaderModules.erase(obj);
-        ASSERT(removedCount == 1);
-    }
-
-    Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
-        AttachmentStateBlueprint* blueprint) {
-        auto iter = mCaches->attachmentStates.find(blueprint);
-        if (iter != mCaches->attachmentStates.end()) {
-            return static_cast<AttachmentState*>(*iter);
-        }
-
-        Ref<AttachmentState> attachmentState = AcquireRef(new AttachmentState(this, *blueprint));
-        attachmentState->SetIsCachedReference();
-        attachmentState->SetContentHash(attachmentState->ComputeContentHash());
-        mCaches->attachmentStates.insert(attachmentState.Get());
-        return attachmentState;
-    }
-
-    Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
-        const RenderBundleEncoderDescriptor* descriptor) {
-        AttachmentStateBlueprint blueprint(descriptor);
-        return GetOrCreateAttachmentState(&blueprint);
-    }
-
-    Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
-        const RenderPipelineDescriptor* descriptor) {
-        AttachmentStateBlueprint blueprint(descriptor);
-        return GetOrCreateAttachmentState(&blueprint);
-    }
-
-    Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
-        const RenderPassDescriptor* descriptor) {
-        AttachmentStateBlueprint blueprint(descriptor);
-        return GetOrCreateAttachmentState(&blueprint);
-    }
-
-    void DeviceBase::UncacheAttachmentState(AttachmentState* obj) {
-        ASSERT(obj->IsCachedReference());
-        size_t removedCount = mCaches->attachmentStates.erase(obj);
-        ASSERT(removedCount == 1);
-    }
-
-    // Object creation API methods
-
-    BindGroupBase* DeviceBase::APICreateBindGroup(const BindGroupDescriptor* descriptor) {
-        Ref<BindGroupBase> result;
-        if (ConsumedError(CreateBindGroup(descriptor), &result, "calling %s.CreateBindGroup(%s).",
-                          this, descriptor)) {
-            return BindGroupBase::MakeError(this);
-        }
-        return result.Detach();
-    }
-    BindGroupLayoutBase* DeviceBase::APICreateBindGroupLayout(
-        const BindGroupLayoutDescriptor* descriptor) {
-        Ref<BindGroupLayoutBase> result;
-        if (ConsumedError(CreateBindGroupLayout(descriptor), &result,
-                          "calling %s.CreateBindGroupLayout(%s).", this, descriptor)) {
-            return BindGroupLayoutBase::MakeError(this);
-        }
-        return result.Detach();
-    }
-    BufferBase* DeviceBase::APICreateBuffer(const BufferDescriptor* descriptor) {
-        Ref<BufferBase> result = nullptr;
-        if (ConsumedError(CreateBuffer(descriptor), &result, "calling %s.CreateBuffer(%s).", this,
-                          descriptor)) {
-            ASSERT(result == nullptr);
-            return BufferBase::MakeError(this, descriptor);
-        }
-        return result.Detach();
-    }
-    CommandEncoder* DeviceBase::APICreateCommandEncoder(
-        const CommandEncoderDescriptor* descriptor) {
-        Ref<CommandEncoder> result;
-        if (ConsumedError(CreateCommandEncoder(descriptor), &result,
-                          "calling %s.CreateCommandEncoder(%s).", this, descriptor)) {
-            return CommandEncoder::MakeError(this);
-        }
-        return result.Detach();
-    }
-    ComputePipelineBase* DeviceBase::APICreateComputePipeline(
-        const ComputePipelineDescriptor* descriptor) {
-        TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateComputePipeline", "label",
-                     utils::GetLabelForTrace(descriptor->label));
-
-        Ref<ComputePipelineBase> result;
-        if (ConsumedError(CreateComputePipeline(descriptor), &result,
-                          "calling %s.CreateComputePipeline(%s).", this, descriptor)) {
-            return ComputePipelineBase::MakeError(this);
-        }
-        return result.Detach();
-    }
-    void DeviceBase::APICreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
-                                                   WGPUCreateComputePipelineAsyncCallback callback,
-                                                   void* userdata) {
-        TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateComputePipelineAsync", "label",
-                     utils::GetLabelForTrace(descriptor->label));
-
-        MaybeError maybeResult = CreateComputePipelineAsync(descriptor, callback, userdata);
-
-        // Call the callback directly when a validation error has been found in the front-end
-        // validations. If there is no error, then CreateComputePipelineAsync will call the
-        // callback.
-        if (maybeResult.IsError()) {
-            std::unique_ptr<ErrorData> error = maybeResult.AcquireError();
-            // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
-            callback(WGPUCreatePipelineAsyncStatus_Error, nullptr, error->GetMessage().c_str(),
-                     userdata);
-        }
-    }
-    PipelineLayoutBase* DeviceBase::APICreatePipelineLayout(
-        const PipelineLayoutDescriptor* descriptor) {
-        Ref<PipelineLayoutBase> result;
-        if (ConsumedError(CreatePipelineLayout(descriptor), &result,
-                          "calling %s.CreatePipelineLayout(%s).", this, descriptor)) {
-            return PipelineLayoutBase::MakeError(this);
-        }
-        return result.Detach();
-    }
-    QuerySetBase* DeviceBase::APICreateQuerySet(const QuerySetDescriptor* descriptor) {
-        Ref<QuerySetBase> result;
-        if (ConsumedError(CreateQuerySet(descriptor), &result, "calling %s.CreateQuerySet(%s).",
-                          this, descriptor)) {
-            return QuerySetBase::MakeError(this);
-        }
-        return result.Detach();
-    }
-    SamplerBase* DeviceBase::APICreateSampler(const SamplerDescriptor* descriptor) {
-        Ref<SamplerBase> result;
-        if (ConsumedError(CreateSampler(descriptor), &result, "calling %s.CreateSampler(%s).", this,
-                          descriptor)) {
-            return SamplerBase::MakeError(this);
-        }
-        return result.Detach();
-    }
-    void DeviceBase::APICreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
-                                                  WGPUCreateRenderPipelineAsyncCallback callback,
-                                                  void* userdata) {
-        TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateRenderPipelineAsync", "label",
-                     utils::GetLabelForTrace(descriptor->label));
-        // TODO(dawn:563): Add validation error context.
-        MaybeError maybeResult = CreateRenderPipelineAsync(descriptor, callback, userdata);
-
-        // Call the callback directly when a validation error has been found in the front-end
-        // validations. If there is no error, then CreateRenderPipelineAsync will call the
-        // callback.
-        if (maybeResult.IsError()) {
-            std::unique_ptr<ErrorData> error = maybeResult.AcquireError();
-            // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
-            callback(WGPUCreatePipelineAsyncStatus_Error, nullptr, error->GetMessage().c_str(),
-                     userdata);
-        }
-    }
-    RenderBundleEncoder* DeviceBase::APICreateRenderBundleEncoder(
-        const RenderBundleEncoderDescriptor* descriptor) {
-        Ref<RenderBundleEncoder> result;
-        if (ConsumedError(CreateRenderBundleEncoder(descriptor), &result,
-                          "calling %s.CreateRenderBundleEncoder(%s).", this, descriptor)) {
-            return RenderBundleEncoder::MakeError(this);
-        }
-        return result.Detach();
-    }
-    RenderPipelineBase* DeviceBase::APICreateRenderPipeline(
-        const RenderPipelineDescriptor* descriptor) {
-        TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateRenderPipeline", "label",
-                     utils::GetLabelForTrace(descriptor->label));
-
-        Ref<RenderPipelineBase> result;
-        if (ConsumedError(CreateRenderPipeline(descriptor), &result,
-                          "calling %s.CreateRenderPipeline(%s).", this, descriptor)) {
-            return RenderPipelineBase::MakeError(this);
-        }
-        return result.Detach();
-    }
-    ShaderModuleBase* DeviceBase::APICreateShaderModule(const ShaderModuleDescriptor* descriptor) {
-        TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateShaderModule", "label",
-                     utils::GetLabelForTrace(descriptor->label));
-
-        Ref<ShaderModuleBase> result;
-        std::unique_ptr<OwnedCompilationMessages> compilationMessages(
-            std::make_unique<OwnedCompilationMessages>());
-        if (ConsumedError(CreateShaderModule(descriptor, compilationMessages.get()), &result,
-                          "calling %s.CreateShaderModule(%s).", this, descriptor)) {
-            DAWN_ASSERT(result == nullptr);
-            result = ShaderModuleBase::MakeError(this);
-        }
-        // Move compilation messages into ShaderModuleBase and emit tint errors and warnings
-        // after all other operations are finished successfully.
-        result->InjectCompilationMessages(std::move(compilationMessages));
-
-        return result.Detach();
-    }
-    SwapChainBase* DeviceBase::APICreateSwapChain(Surface* surface,
-                                                  const SwapChainDescriptor* descriptor) {
-        Ref<SwapChainBase> result;
-        if (ConsumedError(CreateSwapChain(surface, descriptor), &result,
-                          "calling %s.CreateSwapChain(%s).", this, descriptor)) {
-            return SwapChainBase::MakeError(this);
-        }
-        return result.Detach();
-    }
-    TextureBase* DeviceBase::APICreateTexture(const TextureDescriptor* descriptor) {
-        Ref<TextureBase> result;
-        if (ConsumedError(CreateTexture(descriptor), &result, "calling %s.CreateTexture(%s).", this,
-                          descriptor)) {
-            return TextureBase::MakeError(this);
-        }
-        return result.Detach();
-    }
-
-    // For Dawn Wire
-
-    BufferBase* DeviceBase::APICreateErrorBuffer() {
-        BufferDescriptor desc = {};
-        return BufferBase::MakeError(this, &desc);
-    }
-
-    // Other Device API methods
-
-    // Returns true if future ticking is needed.
-    bool DeviceBase::APITick() {
-        if (IsLost() || ConsumedError(Tick())) {
-            return false;
-        }
-
-        TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APITick::IsDeviceIdle", "isDeviceIdle",
-                     IsDeviceIdle());
-
-        return !IsDeviceIdle();
-    }
-
-    MaybeError DeviceBase::Tick() {
-        DAWN_TRY(ValidateIsAlive());
-
-        // to avoid overly ticking, we only want to tick when:
-        // 1. the last submitted serial has moved beyond the completed serial
-        // 2. or the completed serial has not reached the future serial set by the trackers
-        if (mLastSubmittedSerial > mCompletedSerial || mCompletedSerial < mFutureSerial) {
-            DAWN_TRY(CheckPassedSerials());
-            DAWN_TRY(TickImpl());
-
-            // There is no GPU work in flight, we need to move the serials forward so that
-            // so that CPU operations waiting on GPU completion can know they don't have to wait.
-            // AssumeCommandsComplete will assign the max serial we must tick to in order to
-            // fire the awaiting callbacks.
-            if (mCompletedSerial == mLastSubmittedSerial) {
-                AssumeCommandsComplete();
-            }
-
-            // TODO(crbug.com/dawn/833): decouple TickImpl from updating the serial so that we can
-            // tick the dynamic uploader before the backend resource allocators. This would allow
-            // reclaiming resources one tick earlier.
-            mDynamicUploader->Deallocate(mCompletedSerial);
-            mQueue->Tick(mCompletedSerial);
-        }
-
-        // We have to check callback tasks in every Tick because it is not related to any global
-        // serials.
-        FlushCallbackTaskQueue();
-
-        return {};
-    }
-
-    QueueBase* DeviceBase::APIGetQueue() {
-        // Backends gave the primary queue during initialization.
-        ASSERT(mQueue != nullptr);
-
-        // Returns a new reference to the queue.
-        mQueue->Reference();
-        return mQueue.Get();
-    }
-
-    ExternalTextureBase* DeviceBase::APICreateExternalTexture(
-        const ExternalTextureDescriptor* descriptor) {
-        Ref<ExternalTextureBase> result = nullptr;
-        if (ConsumedError(CreateExternalTextureImpl(descriptor), &result,
-                          "calling %s.CreateExternalTexture(%s).", this, descriptor)) {
-            return ExternalTextureBase::MakeError(this);
-        }
-
-        return result.Detach();
-    }
-
-    void DeviceBase::ApplyFeatures(const DeviceDescriptor* deviceDescriptor) {
-        ASSERT(deviceDescriptor);
-        ASSERT(GetAdapter()->SupportsAllRequiredFeatures(
-            {deviceDescriptor->requiredFeatures, deviceDescriptor->requiredFeaturesCount}));
-
-        for (uint32_t i = 0; i < deviceDescriptor->requiredFeaturesCount; ++i) {
-            mEnabledFeatures.EnableFeature(deviceDescriptor->requiredFeatures[i]);
-        }
-    }
-
-    bool DeviceBase::IsFeatureEnabled(Feature feature) const {
-        return mEnabledFeatures.IsEnabled(feature);
-    }
-
-    bool DeviceBase::IsValidationEnabled() const {
-        return !IsToggleEnabled(Toggle::SkipValidation);
-    }
-
-    bool DeviceBase::IsRobustnessEnabled() const {
-        return !IsToggleEnabled(Toggle::DisableRobustness);
-    }
-
-    size_t DeviceBase::GetLazyClearCountForTesting() {
-        return mLazyClearCountForTesting;
-    }
-
-    void DeviceBase::IncrementLazyClearCountForTesting() {
-        ++mLazyClearCountForTesting;
-    }
-
-    size_t DeviceBase::GetDeprecationWarningCountForTesting() {
-        return mDeprecationWarnings->count;
-    }
-
-    void DeviceBase::EmitDeprecationWarning(const char* warning) {
-        mDeprecationWarnings->count++;
-        if (mDeprecationWarnings->emitted.insert(warning).second) {
-            dawn::WarningLog() << warning;
-        }
-    }
-
-    void DeviceBase::EmitLog(const char* message) {
-        this->EmitLog(WGPULoggingType_Info, message);
-    }
-
-    void DeviceBase::EmitLog(WGPULoggingType loggingType, const char* message) {
-        if (mLoggingCallback != nullptr) {
-            // Use the thread-safe CallbackTaskManager routine
-            std::unique_ptr<LoggingCallbackTask> callbackTask =
-                std::make_unique<LoggingCallbackTask>(mLoggingCallback, loggingType, message,
-                                                      mLoggingUserdata);
-            mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
-        }
-    }
-
-    bool DeviceBase::APIGetLimits(SupportedLimits* limits) const {
-        ASSERT(limits != nullptr);
-        if (limits->nextInChain != nullptr) {
-            return false;
-        }
-        limits->limits = mLimits.v1;
+    ExecutionSerial maxSerial = std::max(mLastSubmittedSerial, mFutureSerial);
+    if (mCompletedSerial == maxSerial) {
         return true;
     }
+    return false;
+}
 
-    bool DeviceBase::APIHasFeature(wgpu::FeatureName feature) const {
-        return mEnabledFeatures.IsEnabled(feature);
+ExecutionSerial DeviceBase::GetPendingCommandSerial() const {
+    return mLastSubmittedSerial + ExecutionSerial(1);
+}
+
+void DeviceBase::AddFutureSerial(ExecutionSerial serial) {
+    if (serial > mFutureSerial) {
+        mFutureSerial = serial;
+    }
+}
+
+MaybeError DeviceBase::CheckPassedSerials() {
+    ExecutionSerial completedSerial;
+    DAWN_TRY_ASSIGN(completedSerial, CheckAndUpdateCompletedSerials());
+
+    ASSERT(completedSerial <= mLastSubmittedSerial);
+    // completedSerial should not be less than mCompletedSerial unless it is 0.
+    // It can be 0 when there's no fences to check.
+    ASSERT(completedSerial >= mCompletedSerial || completedSerial == ExecutionSerial(0));
+
+    if (completedSerial > mCompletedSerial) {
+        mCompletedSerial = completedSerial;
     }
 
-    size_t DeviceBase::APIEnumerateFeatures(wgpu::FeatureName* features) const {
-        return mEnabledFeatures.EnumerateFeatures(features);
+    return {};
+}
+
+ResultOrError<const Format*> DeviceBase::GetInternalFormat(wgpu::TextureFormat format) const {
+    FormatIndex index = ComputeFormatIndex(format);
+    DAWN_INVALID_IF(index >= mFormatTable.size(), "Unknown texture format %s.", format);
+
+    const Format* internalFormat = &mFormatTable[index];
+    DAWN_INVALID_IF(!internalFormat->isSupported, "Unsupported texture format %s.", format);
+
+    return internalFormat;
+}
+
+const Format& DeviceBase::GetValidInternalFormat(wgpu::TextureFormat format) const {
+    FormatIndex index = ComputeFormatIndex(format);
+    ASSERT(index < mFormatTable.size());
+    ASSERT(mFormatTable[index].isSupported);
+    return mFormatTable[index];
+}
+
+const Format& DeviceBase::GetValidInternalFormat(FormatIndex index) const {
+    ASSERT(index < mFormatTable.size());
+    ASSERT(mFormatTable[index].isSupported);
+    return mFormatTable[index];
+}
+
+ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::GetOrCreateBindGroupLayout(
+    const BindGroupLayoutDescriptor* descriptor,
+    PipelineCompatibilityToken pipelineCompatibilityToken) {
+    BindGroupLayoutBase blueprint(this, descriptor, pipelineCompatibilityToken,
+                                  ApiObjectBase::kUntrackedByDevice);
+
+    const size_t blueprintHash = blueprint.ComputeContentHash();
+    blueprint.SetContentHash(blueprintHash);
+
+    Ref<BindGroupLayoutBase> result;
+    auto iter = mCaches->bindGroupLayouts.find(&blueprint);
+    if (iter != mCaches->bindGroupLayouts.end()) {
+        result = *iter;
+    } else {
+        DAWN_TRY_ASSIGN(result, CreateBindGroupLayoutImpl(descriptor, pipelineCompatibilityToken));
+        result->SetIsCachedReference();
+        result->SetContentHash(blueprintHash);
+        mCaches->bindGroupLayouts.insert(result.Get());
     }
 
-    void DeviceBase::APIInjectError(wgpu::ErrorType type, const char* message) {
-        if (ConsumedError(ValidateErrorType(type))) {
-            return;
-        }
+    return std::move(result);
+}
 
-        // This method should only be used to make error scope reject. For DeviceLost there is the
-        // LoseForTesting function that can be used instead.
-        if (type != wgpu::ErrorType::Validation && type != wgpu::ErrorType::OutOfMemory) {
-            HandleError(InternalErrorType::Validation,
-                        "Invalid injected error, must be Validation or OutOfMemory");
-            return;
-        }
+void DeviceBase::UncacheBindGroupLayout(BindGroupLayoutBase* obj) {
+    ASSERT(obj->IsCachedReference());
+    size_t removedCount = mCaches->bindGroupLayouts.erase(obj);
+    ASSERT(removedCount == 1);
+}
 
-        HandleError(FromWGPUErrorType(type), message);
+// Private function used at initialization
+ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::CreateEmptyBindGroupLayout() {
+    BindGroupLayoutDescriptor desc = {};
+    desc.entryCount = 0;
+    desc.entries = nullptr;
+
+    return GetOrCreateBindGroupLayout(&desc);
+}
+
+BindGroupLayoutBase* DeviceBase::GetEmptyBindGroupLayout() {
+    ASSERT(mEmptyBindGroupLayout != nullptr);
+    return mEmptyBindGroupLayout.Get();
+}
+
+Ref<ComputePipelineBase> DeviceBase::GetCachedComputePipeline(
+    ComputePipelineBase* uninitializedComputePipeline) {
+    Ref<ComputePipelineBase> cachedPipeline;
+    auto iter = mCaches->computePipelines.find(uninitializedComputePipeline);
+    if (iter != mCaches->computePipelines.end()) {
+        cachedPipeline = *iter;
     }
 
-    QueueBase* DeviceBase::GetQueue() const {
-        return mQueue.Get();
+    return cachedPipeline;
+}
+
+Ref<RenderPipelineBase> DeviceBase::GetCachedRenderPipeline(
+    RenderPipelineBase* uninitializedRenderPipeline) {
+    Ref<RenderPipelineBase> cachedPipeline;
+    auto iter = mCaches->renderPipelines.find(uninitializedRenderPipeline);
+    if (iter != mCaches->renderPipelines.end()) {
+        cachedPipeline = *iter;
+    }
+    return cachedPipeline;
+}
+
+Ref<ComputePipelineBase> DeviceBase::AddOrGetCachedComputePipeline(
+    Ref<ComputePipelineBase> computePipeline) {
+    auto [cachedPipeline, inserted] = mCaches->computePipelines.insert(computePipeline.Get());
+    if (inserted) {
+        computePipeline->SetIsCachedReference();
+        return computePipeline;
+    } else {
+        return *cachedPipeline;
+    }
+}
+
+Ref<RenderPipelineBase> DeviceBase::AddOrGetCachedRenderPipeline(
+    Ref<RenderPipelineBase> renderPipeline) {
+    auto [cachedPipeline, inserted] = mCaches->renderPipelines.insert(renderPipeline.Get());
+    if (inserted) {
+        renderPipeline->SetIsCachedReference();
+        return renderPipeline;
+    } else {
+        return *cachedPipeline;
+    }
+}
+
+void DeviceBase::UncacheComputePipeline(ComputePipelineBase* obj) {
+    ASSERT(obj->IsCachedReference());
+    size_t removedCount = mCaches->computePipelines.erase(obj);
+    ASSERT(removedCount == 1);
+}
+
+ResultOrError<Ref<TextureViewBase>>
+DeviceBase::GetOrCreatePlaceholderTextureViewForExternalTexture() {
+    if (!mExternalTexturePlaceholderView.Get()) {
+        Ref<TextureBase> externalTexturePlaceholder;
+        TextureDescriptor textureDesc;
+        textureDesc.dimension = wgpu::TextureDimension::e2D;
+        textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+        textureDesc.label = "Dawn_External_Texture_Placeholder_Texture";
+        textureDesc.size = {1, 1, 1};
+        textureDesc.usage = wgpu::TextureUsage::TextureBinding;
+
+        DAWN_TRY_ASSIGN(externalTexturePlaceholder, CreateTexture(&textureDesc));
+
+        TextureViewDescriptor textureViewDesc;
+        textureViewDesc.arrayLayerCount = 1;
+        textureViewDesc.aspect = wgpu::TextureAspect::All;
+        textureViewDesc.baseArrayLayer = 0;
+        textureViewDesc.dimension = wgpu::TextureViewDimension::e2D;
+        textureViewDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+        textureViewDesc.label = "Dawn_External_Texture_Placeholder_Texture_View";
+        textureViewDesc.mipLevelCount = 1;
+
+        DAWN_TRY_ASSIGN(mExternalTexturePlaceholderView,
+                        CreateTextureView(externalTexturePlaceholder.Get(), &textureViewDesc));
     }
 
-    // Implementation details of object creation
+    return mExternalTexturePlaceholderView;
+}
 
-    ResultOrError<Ref<BindGroupBase>> DeviceBase::CreateBindGroup(
-        const BindGroupDescriptor* descriptor) {
-        DAWN_TRY(ValidateIsAlive());
-        if (IsValidationEnabled()) {
-            DAWN_TRY_CONTEXT(ValidateBindGroupDescriptor(this, descriptor),
-                             "validating %s against %s", descriptor, descriptor->layout);
-        }
-        return CreateBindGroupImpl(descriptor);
+ResultOrError<Ref<PipelineLayoutBase>> DeviceBase::GetOrCreatePipelineLayout(
+    const PipelineLayoutDescriptor* descriptor) {
+    PipelineLayoutBase blueprint(this, descriptor, ApiObjectBase::kUntrackedByDevice);
+
+    const size_t blueprintHash = blueprint.ComputeContentHash();
+    blueprint.SetContentHash(blueprintHash);
+
+    Ref<PipelineLayoutBase> result;
+    auto iter = mCaches->pipelineLayouts.find(&blueprint);
+    if (iter != mCaches->pipelineLayouts.end()) {
+        result = *iter;
+    } else {
+        DAWN_TRY_ASSIGN(result, CreatePipelineLayoutImpl(descriptor));
+        result->SetIsCachedReference();
+        result->SetContentHash(blueprintHash);
+        mCaches->pipelineLayouts.insert(result.Get());
     }
 
-    ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::CreateBindGroupLayout(
-        const BindGroupLayoutDescriptor* descriptor,
-        bool allowInternalBinding) {
-        DAWN_TRY(ValidateIsAlive());
-        if (IsValidationEnabled()) {
-            DAWN_TRY_CONTEXT(
-                ValidateBindGroupLayoutDescriptor(this, descriptor, allowInternalBinding),
-                "validating %s", descriptor);
-        }
-        return GetOrCreateBindGroupLayout(descriptor);
+    return std::move(result);
+}
+
+void DeviceBase::UncachePipelineLayout(PipelineLayoutBase* obj) {
+    ASSERT(obj->IsCachedReference());
+    size_t removedCount = mCaches->pipelineLayouts.erase(obj);
+    ASSERT(removedCount == 1);
+}
+
+void DeviceBase::UncacheRenderPipeline(RenderPipelineBase* obj) {
+    ASSERT(obj->IsCachedReference());
+    size_t removedCount = mCaches->renderPipelines.erase(obj);
+    ASSERT(removedCount == 1);
+}
+
+ResultOrError<Ref<SamplerBase>> DeviceBase::GetOrCreateSampler(
+    const SamplerDescriptor* descriptor) {
+    SamplerBase blueprint(this, descriptor, ApiObjectBase::kUntrackedByDevice);
+
+    const size_t blueprintHash = blueprint.ComputeContentHash();
+    blueprint.SetContentHash(blueprintHash);
+
+    Ref<SamplerBase> result;
+    auto iter = mCaches->samplers.find(&blueprint);
+    if (iter != mCaches->samplers.end()) {
+        result = *iter;
+    } else {
+        DAWN_TRY_ASSIGN(result, CreateSamplerImpl(descriptor));
+        result->SetIsCachedReference();
+        result->SetContentHash(blueprintHash);
+        mCaches->samplers.insert(result.Get());
     }
 
-    ResultOrError<Ref<BufferBase>> DeviceBase::CreateBuffer(const BufferDescriptor* descriptor) {
-        DAWN_TRY(ValidateIsAlive());
-        if (IsValidationEnabled()) {
-            DAWN_TRY_CONTEXT(ValidateBufferDescriptor(this, descriptor), "validating %s",
-                             descriptor);
+    return std::move(result);
+}
+
+void DeviceBase::UncacheSampler(SamplerBase* obj) {
+    ASSERT(obj->IsCachedReference());
+    size_t removedCount = mCaches->samplers.erase(obj);
+    ASSERT(removedCount == 1);
+}
+
+ResultOrError<Ref<ShaderModuleBase>> DeviceBase::GetOrCreateShaderModule(
+    const ShaderModuleDescriptor* descriptor,
+    ShaderModuleParseResult* parseResult,
+    OwnedCompilationMessages* compilationMessages) {
+    ASSERT(parseResult != nullptr);
+
+    ShaderModuleBase blueprint(this, descriptor, ApiObjectBase::kUntrackedByDevice);
+
+    const size_t blueprintHash = blueprint.ComputeContentHash();
+    blueprint.SetContentHash(blueprintHash);
+
+    Ref<ShaderModuleBase> result;
+    auto iter = mCaches->shaderModules.find(&blueprint);
+    if (iter != mCaches->shaderModules.end()) {
+        result = *iter;
+    } else {
+        if (!parseResult->HasParsedShader()) {
+            // We skip the parse on creation if validation isn't enabled which let's us quickly
+            // lookup in the cache without validating and parsing. We need the parsed module
+            // now, so call validate. Most of |ValidateShaderModuleDescriptor| is parsing, but
+            // we can consider splitting it if additional validation is added.
+            ASSERT(!IsValidationEnabled());
+            DAWN_TRY(
+                ValidateShaderModuleDescriptor(this, descriptor, parseResult, compilationMessages));
         }
-
-        Ref<BufferBase> buffer;
-        DAWN_TRY_ASSIGN(buffer, CreateBufferImpl(descriptor));
-
-        if (descriptor->mappedAtCreation) {
-            DAWN_TRY(buffer->MapAtCreation());
-        }
-
-        return std::move(buffer);
+        DAWN_TRY_ASSIGN(result, CreateShaderModuleImpl(descriptor, parseResult));
+        result->SetIsCachedReference();
+        result->SetContentHash(blueprintHash);
+        mCaches->shaderModules.insert(result.Get());
     }
 
-    ResultOrError<Ref<ComputePipelineBase>> DeviceBase::CreateComputePipeline(
-        const ComputePipelineDescriptor* descriptor) {
-        DAWN_TRY(ValidateIsAlive());
-        if (IsValidationEnabled()) {
-            DAWN_TRY(ValidateComputePipelineDescriptor(this, descriptor));
-        }
+    return std::move(result);
+}
 
-        // Ref will keep the pipeline layout alive until the end of the function where
-        // the pipeline will take another reference.
-        Ref<PipelineLayoutBase> layoutRef;
-        ComputePipelineDescriptor appliedDescriptor;
-        DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
-                                       this, *descriptor, &appliedDescriptor));
+void DeviceBase::UncacheShaderModule(ShaderModuleBase* obj) {
+    ASSERT(obj->IsCachedReference());
+    size_t removedCount = mCaches->shaderModules.erase(obj);
+    ASSERT(removedCount == 1);
+}
 
-        Ref<ComputePipelineBase> uninitializedComputePipeline =
-            CreateUninitializedComputePipelineImpl(&appliedDescriptor);
-        Ref<ComputePipelineBase> cachedComputePipeline =
-            GetCachedComputePipeline(uninitializedComputePipeline.Get());
-        if (cachedComputePipeline.Get() != nullptr) {
-            return cachedComputePipeline;
-        }
-
-        DAWN_TRY(uninitializedComputePipeline->Initialize());
-        return AddOrGetCachedComputePipeline(std::move(uninitializedComputePipeline));
+Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(AttachmentStateBlueprint* blueprint) {
+    auto iter = mCaches->attachmentStates.find(blueprint);
+    if (iter != mCaches->attachmentStates.end()) {
+        return static_cast<AttachmentState*>(*iter);
     }
 
-    ResultOrError<Ref<CommandEncoder>> DeviceBase::CreateCommandEncoder(
-        const CommandEncoderDescriptor* descriptor) {
-        const CommandEncoderDescriptor defaultDescriptor = {};
-        if (descriptor == nullptr) {
-            descriptor = &defaultDescriptor;
-        }
+    Ref<AttachmentState> attachmentState = AcquireRef(new AttachmentState(this, *blueprint));
+    attachmentState->SetIsCachedReference();
+    attachmentState->SetContentHash(attachmentState->ComputeContentHash());
+    mCaches->attachmentStates.insert(attachmentState.Get());
+    return attachmentState;
+}
 
-        DAWN_TRY(ValidateIsAlive());
-        if (IsValidationEnabled()) {
-            DAWN_TRY(ValidateCommandEncoderDescriptor(this, descriptor));
-        }
-        return CommandEncoder::Create(this, descriptor);
+Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
+    const RenderBundleEncoderDescriptor* descriptor) {
+    AttachmentStateBlueprint blueprint(descriptor);
+    return GetOrCreateAttachmentState(&blueprint);
+}
+
+Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
+    const RenderPipelineDescriptor* descriptor) {
+    AttachmentStateBlueprint blueprint(descriptor);
+    return GetOrCreateAttachmentState(&blueprint);
+}
+
+Ref<AttachmentState> DeviceBase::GetOrCreateAttachmentState(
+    const RenderPassDescriptor* descriptor) {
+    AttachmentStateBlueprint blueprint(descriptor);
+    return GetOrCreateAttachmentState(&blueprint);
+}
+
+void DeviceBase::UncacheAttachmentState(AttachmentState* obj) {
+    ASSERT(obj->IsCachedReference());
+    size_t removedCount = mCaches->attachmentStates.erase(obj);
+    ASSERT(removedCount == 1);
+}
+
+// Object creation API methods
+
+BindGroupBase* DeviceBase::APICreateBindGroup(const BindGroupDescriptor* descriptor) {
+    Ref<BindGroupBase> result;
+    if (ConsumedError(CreateBindGroup(descriptor), &result, "calling %s.CreateBindGroup(%s).", this,
+                      descriptor)) {
+        return BindGroupBase::MakeError(this);
+    }
+    return result.Detach();
+}
+BindGroupLayoutBase* DeviceBase::APICreateBindGroupLayout(
+    const BindGroupLayoutDescriptor* descriptor) {
+    Ref<BindGroupLayoutBase> result;
+    if (ConsumedError(CreateBindGroupLayout(descriptor), &result,
+                      "calling %s.CreateBindGroupLayout(%s).", this, descriptor)) {
+        return BindGroupLayoutBase::MakeError(this);
+    }
+    return result.Detach();
+}
+BufferBase* DeviceBase::APICreateBuffer(const BufferDescriptor* descriptor) {
+    Ref<BufferBase> result = nullptr;
+    if (ConsumedError(CreateBuffer(descriptor), &result, "calling %s.CreateBuffer(%s).", this,
+                      descriptor)) {
+        ASSERT(result == nullptr);
+        return BufferBase::MakeError(this, descriptor);
+    }
+    return result.Detach();
+}
+CommandEncoder* DeviceBase::APICreateCommandEncoder(const CommandEncoderDescriptor* descriptor) {
+    Ref<CommandEncoder> result;
+    if (ConsumedError(CreateCommandEncoder(descriptor), &result,
+                      "calling %s.CreateCommandEncoder(%s).", this, descriptor)) {
+        return CommandEncoder::MakeError(this);
+    }
+    return result.Detach();
+}
+ComputePipelineBase* DeviceBase::APICreateComputePipeline(
+    const ComputePipelineDescriptor* descriptor) {
+    TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateComputePipeline", "label",
+                 utils::GetLabelForTrace(descriptor->label));
+
+    Ref<ComputePipelineBase> result;
+    if (ConsumedError(CreateComputePipeline(descriptor), &result,
+                      "calling %s.CreateComputePipeline(%s).", this, descriptor)) {
+        return ComputePipelineBase::MakeError(this);
+    }
+    return result.Detach();
+}
+void DeviceBase::APICreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
+                                               WGPUCreateComputePipelineAsyncCallback callback,
+                                               void* userdata) {
+    TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateComputePipelineAsync", "label",
+                 utils::GetLabelForTrace(descriptor->label));
+
+    MaybeError maybeResult = CreateComputePipelineAsync(descriptor, callback, userdata);
+
+    // Call the callback directly when a validation error has been found in the front-end
+    // validations. If there is no error, then CreateComputePipelineAsync will call the
+    // callback.
+    if (maybeResult.IsError()) {
+        std::unique_ptr<ErrorData> error = maybeResult.AcquireError();
+        // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+        callback(WGPUCreatePipelineAsyncStatus_Error, nullptr, error->GetMessage().c_str(),
+                 userdata);
+    }
+}
+PipelineLayoutBase* DeviceBase::APICreatePipelineLayout(
+    const PipelineLayoutDescriptor* descriptor) {
+    Ref<PipelineLayoutBase> result;
+    if (ConsumedError(CreatePipelineLayout(descriptor), &result,
+                      "calling %s.CreatePipelineLayout(%s).", this, descriptor)) {
+        return PipelineLayoutBase::MakeError(this);
+    }
+    return result.Detach();
+}
+QuerySetBase* DeviceBase::APICreateQuerySet(const QuerySetDescriptor* descriptor) {
+    Ref<QuerySetBase> result;
+    if (ConsumedError(CreateQuerySet(descriptor), &result, "calling %s.CreateQuerySet(%s).", this,
+                      descriptor)) {
+        return QuerySetBase::MakeError(this);
+    }
+    return result.Detach();
+}
+SamplerBase* DeviceBase::APICreateSampler(const SamplerDescriptor* descriptor) {
+    Ref<SamplerBase> result;
+    if (ConsumedError(CreateSampler(descriptor), &result, "calling %s.CreateSampler(%s).", this,
+                      descriptor)) {
+        return SamplerBase::MakeError(this);
+    }
+    return result.Detach();
+}
+void DeviceBase::APICreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
+                                              WGPUCreateRenderPipelineAsyncCallback callback,
+                                              void* userdata) {
+    TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateRenderPipelineAsync", "label",
+                 utils::GetLabelForTrace(descriptor->label));
+    // TODO(dawn:563): Add validation error context.
+    MaybeError maybeResult = CreateRenderPipelineAsync(descriptor, callback, userdata);
+
+    // Call the callback directly when a validation error has been found in the front-end
+    // validations. If there is no error, then CreateRenderPipelineAsync will call the
+    // callback.
+    if (maybeResult.IsError()) {
+        std::unique_ptr<ErrorData> error = maybeResult.AcquireError();
+        // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+        callback(WGPUCreatePipelineAsyncStatus_Error, nullptr, error->GetMessage().c_str(),
+                 userdata);
+    }
+}
+RenderBundleEncoder* DeviceBase::APICreateRenderBundleEncoder(
+    const RenderBundleEncoderDescriptor* descriptor) {
+    Ref<RenderBundleEncoder> result;
+    if (ConsumedError(CreateRenderBundleEncoder(descriptor), &result,
+                      "calling %s.CreateRenderBundleEncoder(%s).", this, descriptor)) {
+        return RenderBundleEncoder::MakeError(this);
+    }
+    return result.Detach();
+}
+RenderPipelineBase* DeviceBase::APICreateRenderPipeline(
+    const RenderPipelineDescriptor* descriptor) {
+    TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateRenderPipeline", "label",
+                 utils::GetLabelForTrace(descriptor->label));
+
+    Ref<RenderPipelineBase> result;
+    if (ConsumedError(CreateRenderPipeline(descriptor), &result,
+                      "calling %s.CreateRenderPipeline(%s).", this, descriptor)) {
+        return RenderPipelineBase::MakeError(this);
+    }
+    return result.Detach();
+}
+ShaderModuleBase* DeviceBase::APICreateShaderModule(const ShaderModuleDescriptor* descriptor) {
+    TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APICreateShaderModule", "label",
+                 utils::GetLabelForTrace(descriptor->label));
+
+    Ref<ShaderModuleBase> result;
+    std::unique_ptr<OwnedCompilationMessages> compilationMessages(
+        std::make_unique<OwnedCompilationMessages>());
+    if (ConsumedError(CreateShaderModule(descriptor, compilationMessages.get()), &result,
+                      "calling %s.CreateShaderModule(%s).", this, descriptor)) {
+        DAWN_ASSERT(result == nullptr);
+        result = ShaderModuleBase::MakeError(this);
+    }
+    // Move compilation messages into ShaderModuleBase and emit tint errors and warnings
+    // after all other operations are finished successfully.
+    result->InjectCompilationMessages(std::move(compilationMessages));
+
+    return result.Detach();
+}
+SwapChainBase* DeviceBase::APICreateSwapChain(Surface* surface,
+                                              const SwapChainDescriptor* descriptor) {
+    Ref<SwapChainBase> result;
+    if (ConsumedError(CreateSwapChain(surface, descriptor), &result,
+                      "calling %s.CreateSwapChain(%s).", this, descriptor)) {
+        return SwapChainBase::MakeError(this);
+    }
+    return result.Detach();
+}
+TextureBase* DeviceBase::APICreateTexture(const TextureDescriptor* descriptor) {
+    Ref<TextureBase> result;
+    if (ConsumedError(CreateTexture(descriptor), &result, "calling %s.CreateTexture(%s).", this,
+                      descriptor)) {
+        return TextureBase::MakeError(this);
+    }
+    return result.Detach();
+}
+
+// For Dawn Wire
+
+BufferBase* DeviceBase::APICreateErrorBuffer() {
+    BufferDescriptor desc = {};
+    return BufferBase::MakeError(this, &desc);
+}
+
+// Other Device API methods
+
+// Returns true if future ticking is needed.
+bool DeviceBase::APITick() {
+    if (IsLost() || ConsumedError(Tick())) {
+        return false;
     }
 
-    MaybeError DeviceBase::CreateComputePipelineAsync(
-        const ComputePipelineDescriptor* descriptor,
-        WGPUCreateComputePipelineAsyncCallback callback,
-        void* userdata) {
-        DAWN_TRY(ValidateIsAlive());
-        if (IsValidationEnabled()) {
-            DAWN_TRY(ValidateComputePipelineDescriptor(this, descriptor));
+    TRACE_EVENT1(GetPlatform(), General, "DeviceBase::APITick::IsDeviceIdle", "isDeviceIdle",
+                 IsDeviceIdle());
+
+    return !IsDeviceIdle();
+}
+
+MaybeError DeviceBase::Tick() {
+    DAWN_TRY(ValidateIsAlive());
+
+    // to avoid overly ticking, we only want to tick when:
+    // 1. the last submitted serial has moved beyond the completed serial
+    // 2. or the completed serial has not reached the future serial set by the trackers
+    if (mLastSubmittedSerial > mCompletedSerial || mCompletedSerial < mFutureSerial) {
+        DAWN_TRY(CheckPassedSerials());
+        DAWN_TRY(TickImpl());
+
+        // There is no GPU work in flight, we need to move the serials forward so that
+        // so that CPU operations waiting on GPU completion can know they don't have to wait.
+        // AssumeCommandsComplete will assign the max serial we must tick to in order to
+        // fire the awaiting callbacks.
+        if (mCompletedSerial == mLastSubmittedSerial) {
+            AssumeCommandsComplete();
         }
 
-        Ref<PipelineLayoutBase> layoutRef;
-        ComputePipelineDescriptor appliedDescriptor;
-        DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
-                                       this, *descriptor, &appliedDescriptor));
-
-        Ref<ComputePipelineBase> uninitializedComputePipeline =
-            CreateUninitializedComputePipelineImpl(&appliedDescriptor);
-
-        // Call the callback directly when we can get a cached compute pipeline object.
-        Ref<ComputePipelineBase> cachedComputePipeline =
-            GetCachedComputePipeline(uninitializedComputePipeline.Get());
-        if (cachedComputePipeline.Get() != nullptr) {
-            // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
-            callback(WGPUCreatePipelineAsyncStatus_Success, ToAPI(cachedComputePipeline.Detach()),
-                     "", userdata);
-        } else {
-            // Otherwise we will create the pipeline object in InitializeComputePipelineAsyncImpl(),
-            // where the pipeline object may be initialized asynchronously and the result will be
-            // saved to mCreatePipelineAsyncTracker.
-            InitializeComputePipelineAsyncImpl(std::move(uninitializedComputePipeline), callback,
-                                               userdata);
-        }
-
-        return {};
+        // TODO(crbug.com/dawn/833): decouple TickImpl from updating the serial so that we can
+        // tick the dynamic uploader before the backend resource allocators. This would allow
+        // reclaiming resources one tick earlier.
+        mDynamicUploader->Deallocate(mCompletedSerial);
+        mQueue->Tick(mCompletedSerial);
     }
 
-    // This function is overwritten with the async version on the backends that supports
-    //  initializing compute pipelines asynchronously.
-    void DeviceBase::InitializeComputePipelineAsyncImpl(
-        Ref<ComputePipelineBase> computePipeline,
-        WGPUCreateComputePipelineAsyncCallback callback,
-        void* userdata) {
-        Ref<ComputePipelineBase> result;
-        std::string errorMessage;
+    // We have to check callback tasks in every Tick because it is not related to any global
+    // serials.
+    FlushCallbackTaskQueue();
 
-        MaybeError maybeError = computePipeline->Initialize();
-        if (maybeError.IsError()) {
-            std::unique_ptr<ErrorData> error = maybeError.AcquireError();
-            errorMessage = error->GetMessage();
-        } else {
-            result = AddOrGetCachedComputePipeline(std::move(computePipeline));
-        }
+    return {};
+}
 
-        std::unique_ptr<CreateComputePipelineAsyncCallbackTask> callbackTask =
-            std::make_unique<CreateComputePipelineAsyncCallbackTask>(
-                std::move(result), errorMessage, callback, userdata);
+QueueBase* DeviceBase::APIGetQueue() {
+    // Backends gave the primary queue during initialization.
+    ASSERT(mQueue != nullptr);
+
+    // Returns a new reference to the queue.
+    mQueue->Reference();
+    return mQueue.Get();
+}
+
+ExternalTextureBase* DeviceBase::APICreateExternalTexture(
+    const ExternalTextureDescriptor* descriptor) {
+    Ref<ExternalTextureBase> result = nullptr;
+    if (ConsumedError(CreateExternalTextureImpl(descriptor), &result,
+                      "calling %s.CreateExternalTexture(%s).", this, descriptor)) {
+        return ExternalTextureBase::MakeError(this);
+    }
+
+    return result.Detach();
+}
+
+void DeviceBase::ApplyFeatures(const DeviceDescriptor* deviceDescriptor) {
+    ASSERT(deviceDescriptor);
+    ASSERT(GetAdapter()->SupportsAllRequiredFeatures(
+        {deviceDescriptor->requiredFeatures, deviceDescriptor->requiredFeaturesCount}));
+
+    for (uint32_t i = 0; i < deviceDescriptor->requiredFeaturesCount; ++i) {
+        mEnabledFeatures.EnableFeature(deviceDescriptor->requiredFeatures[i]);
+    }
+}
+
+bool DeviceBase::IsFeatureEnabled(Feature feature) const {
+    return mEnabledFeatures.IsEnabled(feature);
+}
+
+bool DeviceBase::IsValidationEnabled() const {
+    return !IsToggleEnabled(Toggle::SkipValidation);
+}
+
+bool DeviceBase::IsRobustnessEnabled() const {
+    return !IsToggleEnabled(Toggle::DisableRobustness);
+}
+
+size_t DeviceBase::GetLazyClearCountForTesting() {
+    return mLazyClearCountForTesting;
+}
+
+void DeviceBase::IncrementLazyClearCountForTesting() {
+    ++mLazyClearCountForTesting;
+}
+
+size_t DeviceBase::GetDeprecationWarningCountForTesting() {
+    return mDeprecationWarnings->count;
+}
+
+void DeviceBase::EmitDeprecationWarning(const char* warning) {
+    mDeprecationWarnings->count++;
+    if (mDeprecationWarnings->emitted.insert(warning).second) {
+        dawn::WarningLog() << warning;
+    }
+}
+
+void DeviceBase::EmitLog(const char* message) {
+    this->EmitLog(WGPULoggingType_Info, message);
+}
+
+void DeviceBase::EmitLog(WGPULoggingType loggingType, const char* message) {
+    if (mLoggingCallback != nullptr) {
+        // Use the thread-safe CallbackTaskManager routine
+        std::unique_ptr<LoggingCallbackTask> callbackTask = std::make_unique<LoggingCallbackTask>(
+            mLoggingCallback, loggingType, message, mLoggingUserdata);
         mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
     }
+}
 
-    // This function is overwritten with the async version on the backends
-    // that supports initializing render pipeline asynchronously
-    void DeviceBase::InitializeRenderPipelineAsyncImpl(
-        Ref<RenderPipelineBase> renderPipeline,
-        WGPUCreateRenderPipelineAsyncCallback callback,
-        void* userdata) {
-        Ref<RenderPipelineBase> result;
-        std::string errorMessage;
+bool DeviceBase::APIGetLimits(SupportedLimits* limits) const {
+    ASSERT(limits != nullptr);
+    if (limits->nextInChain != nullptr) {
+        return false;
+    }
+    limits->limits = mLimits.v1;
+    return true;
+}
 
-        MaybeError maybeError = renderPipeline->Initialize();
-        if (maybeError.IsError()) {
-            std::unique_ptr<ErrorData> error = maybeError.AcquireError();
-            errorMessage = error->GetMessage();
-        } else {
-            result = AddOrGetCachedRenderPipeline(std::move(renderPipeline));
-        }
+bool DeviceBase::APIHasFeature(wgpu::FeatureName feature) const {
+    return mEnabledFeatures.IsEnabled(feature);
+}
 
-        std::unique_ptr<CreateRenderPipelineAsyncCallbackTask> callbackTask =
-            std::make_unique<CreateRenderPipelineAsyncCallbackTask>(std::move(result), errorMessage,
-                                                                    callback, userdata);
-        mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
+size_t DeviceBase::APIEnumerateFeatures(wgpu::FeatureName* features) const {
+    return mEnabledFeatures.EnumerateFeatures(features);
+}
+
+void DeviceBase::APIInjectError(wgpu::ErrorType type, const char* message) {
+    if (ConsumedError(ValidateErrorType(type))) {
+        return;
     }
 
-    ResultOrError<Ref<PipelineLayoutBase>> DeviceBase::CreatePipelineLayout(
-        const PipelineLayoutDescriptor* descriptor) {
-        DAWN_TRY(ValidateIsAlive());
-        if (IsValidationEnabled()) {
-            DAWN_TRY(ValidatePipelineLayoutDescriptor(this, descriptor));
-        }
-        return GetOrCreatePipelineLayout(descriptor);
+    // This method should only be used to make error scope reject. For DeviceLost there is the
+    // LoseForTesting function that can be used instead.
+    if (type != wgpu::ErrorType::Validation && type != wgpu::ErrorType::OutOfMemory) {
+        HandleError(InternalErrorType::Validation,
+                    "Invalid injected error, must be Validation or OutOfMemory");
+        return;
     }
 
-    ResultOrError<Ref<ExternalTextureBase>> DeviceBase::CreateExternalTextureImpl(
-        const ExternalTextureDescriptor* descriptor) {
-        if (IsValidationEnabled()) {
-            DAWN_TRY_CONTEXT(ValidateExternalTextureDescriptor(this, descriptor), "validating %s",
-                             descriptor);
-        }
+    HandleError(FromWGPUErrorType(type), message);
+}
 
-        return ExternalTextureBase::Create(this, descriptor);
+QueueBase* DeviceBase::GetQueue() const {
+    return mQueue.Get();
+}
+
+// Implementation details of object creation
+
+ResultOrError<Ref<BindGroupBase>> DeviceBase::CreateBindGroup(
+    const BindGroupDescriptor* descriptor) {
+    DAWN_TRY(ValidateIsAlive());
+    if (IsValidationEnabled()) {
+        DAWN_TRY_CONTEXT(ValidateBindGroupDescriptor(this, descriptor), "validating %s against %s",
+                         descriptor, descriptor->layout);
+    }
+    return CreateBindGroupImpl(descriptor);
+}
+
+ResultOrError<Ref<BindGroupLayoutBase>> DeviceBase::CreateBindGroupLayout(
+    const BindGroupLayoutDescriptor* descriptor,
+    bool allowInternalBinding) {
+    DAWN_TRY(ValidateIsAlive());
+    if (IsValidationEnabled()) {
+        DAWN_TRY_CONTEXT(ValidateBindGroupLayoutDescriptor(this, descriptor, allowInternalBinding),
+                         "validating %s", descriptor);
+    }
+    return GetOrCreateBindGroupLayout(descriptor);
+}
+
+ResultOrError<Ref<BufferBase>> DeviceBase::CreateBuffer(const BufferDescriptor* descriptor) {
+    DAWN_TRY(ValidateIsAlive());
+    if (IsValidationEnabled()) {
+        DAWN_TRY_CONTEXT(ValidateBufferDescriptor(this, descriptor), "validating %s", descriptor);
     }
 
-    ResultOrError<Ref<QuerySetBase>> DeviceBase::CreateQuerySet(
-        const QuerySetDescriptor* descriptor) {
-        DAWN_TRY(ValidateIsAlive());
-        if (IsValidationEnabled()) {
-            DAWN_TRY_CONTEXT(ValidateQuerySetDescriptor(this, descriptor), "validating %s",
-                             descriptor);
-        }
-        return CreateQuerySetImpl(descriptor);
+    Ref<BufferBase> buffer;
+    DAWN_TRY_ASSIGN(buffer, CreateBufferImpl(descriptor));
+
+    if (descriptor->mappedAtCreation) {
+        DAWN_TRY(buffer->MapAtCreation());
     }
 
-    ResultOrError<Ref<RenderBundleEncoder>> DeviceBase::CreateRenderBundleEncoder(
-        const RenderBundleEncoderDescriptor* descriptor) {
-        DAWN_TRY(ValidateIsAlive());
-        if (IsValidationEnabled()) {
-            DAWN_TRY(ValidateRenderBundleEncoderDescriptor(this, descriptor));
-        }
-        return RenderBundleEncoder::Create(this, descriptor);
+    return std::move(buffer);
+}
+
+ResultOrError<Ref<ComputePipelineBase>> DeviceBase::CreateComputePipeline(
+    const ComputePipelineDescriptor* descriptor) {
+    DAWN_TRY(ValidateIsAlive());
+    if (IsValidationEnabled()) {
+        DAWN_TRY(ValidateComputePipelineDescriptor(this, descriptor));
     }
 
-    ResultOrError<Ref<RenderPipelineBase>> DeviceBase::CreateRenderPipeline(
-        const RenderPipelineDescriptor* descriptor) {
-        DAWN_TRY(ValidateIsAlive());
-        if (IsValidationEnabled()) {
-            DAWN_TRY(ValidateRenderPipelineDescriptor(this, descriptor));
-        }
+    // Ref will keep the pipeline layout alive until the end of the function where
+    // the pipeline will take another reference.
+    Ref<PipelineLayoutBase> layoutRef;
+    ComputePipelineDescriptor appliedDescriptor;
+    DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
+                                   this, *descriptor, &appliedDescriptor));
 
-        // Ref will keep the pipeline layout alive until the end of the function where
-        // the pipeline will take another reference.
-        Ref<PipelineLayoutBase> layoutRef;
-        RenderPipelineDescriptor appliedDescriptor;
-        DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
-                                       this, *descriptor, &appliedDescriptor));
-
-        Ref<RenderPipelineBase> uninitializedRenderPipeline =
-            CreateUninitializedRenderPipelineImpl(&appliedDescriptor);
-
-        Ref<RenderPipelineBase> cachedRenderPipeline =
-            GetCachedRenderPipeline(uninitializedRenderPipeline.Get());
-        if (cachedRenderPipeline != nullptr) {
-            return cachedRenderPipeline;
-        }
-
-        DAWN_TRY(uninitializedRenderPipeline->Initialize());
-        return AddOrGetCachedRenderPipeline(std::move(uninitializedRenderPipeline));
+    Ref<ComputePipelineBase> uninitializedComputePipeline =
+        CreateUninitializedComputePipelineImpl(&appliedDescriptor);
+    Ref<ComputePipelineBase> cachedComputePipeline =
+        GetCachedComputePipeline(uninitializedComputePipeline.Get());
+    if (cachedComputePipeline.Get() != nullptr) {
+        return cachedComputePipeline;
     }
 
-    MaybeError DeviceBase::CreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
-                                                     WGPUCreateRenderPipelineAsyncCallback callback,
-                                                     void* userdata) {
-        DAWN_TRY(ValidateIsAlive());
-        if (IsValidationEnabled()) {
-            DAWN_TRY(ValidateRenderPipelineDescriptor(this, descriptor));
-        }
+    DAWN_TRY(uninitializedComputePipeline->Initialize());
+    return AddOrGetCachedComputePipeline(std::move(uninitializedComputePipeline));
+}
 
-        // Ref will keep the pipeline layout alive until the end of the function where
-        // the pipeline will take another reference.
-        Ref<PipelineLayoutBase> layoutRef;
-        RenderPipelineDescriptor appliedDescriptor;
-        DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
-                                       this, *descriptor, &appliedDescriptor));
-
-        Ref<RenderPipelineBase> uninitializedRenderPipeline =
-            CreateUninitializedRenderPipelineImpl(&appliedDescriptor);
-
-        // Call the callback directly when we can get a cached render pipeline object.
-        Ref<RenderPipelineBase> cachedRenderPipeline =
-            GetCachedRenderPipeline(uninitializedRenderPipeline.Get());
-        if (cachedRenderPipeline != nullptr) {
-            // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
-            callback(WGPUCreatePipelineAsyncStatus_Success, ToAPI(cachedRenderPipeline.Detach()),
-                     "", userdata);
-        } else {
-            // Otherwise we will create the pipeline object in InitializeRenderPipelineAsyncImpl(),
-            // where the pipeline object may be initialized asynchronously and the result will be
-            // saved to mCreatePipelineAsyncTracker.
-            InitializeRenderPipelineAsyncImpl(std::move(uninitializedRenderPipeline), callback,
-                                              userdata);
-        }
-
-        return {};
+ResultOrError<Ref<CommandEncoder>> DeviceBase::CreateCommandEncoder(
+    const CommandEncoderDescriptor* descriptor) {
+    const CommandEncoderDescriptor defaultDescriptor = {};
+    if (descriptor == nullptr) {
+        descriptor = &defaultDescriptor;
     }
 
-    ResultOrError<Ref<SamplerBase>> DeviceBase::CreateSampler(const SamplerDescriptor* descriptor) {
-        const SamplerDescriptor defaultDescriptor = {};
-        DAWN_TRY(ValidateIsAlive());
-        descriptor = descriptor != nullptr ? descriptor : &defaultDescriptor;
-        if (IsValidationEnabled()) {
-            DAWN_TRY_CONTEXT(ValidateSamplerDescriptor(this, descriptor), "validating %s",
-                             descriptor);
-        }
-        return GetOrCreateSampler(descriptor);
+    DAWN_TRY(ValidateIsAlive());
+    if (IsValidationEnabled()) {
+        DAWN_TRY(ValidateCommandEncoderDescriptor(this, descriptor));
+    }
+    return CommandEncoder::Create(this, descriptor);
+}
+
+MaybeError DeviceBase::CreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
+                                                  WGPUCreateComputePipelineAsyncCallback callback,
+                                                  void* userdata) {
+    DAWN_TRY(ValidateIsAlive());
+    if (IsValidationEnabled()) {
+        DAWN_TRY(ValidateComputePipelineDescriptor(this, descriptor));
     }
 
-    ResultOrError<Ref<ShaderModuleBase>> DeviceBase::CreateShaderModule(
-        const ShaderModuleDescriptor* descriptor,
-        OwnedCompilationMessages* compilationMessages) {
-        DAWN_TRY(ValidateIsAlive());
+    Ref<PipelineLayoutBase> layoutRef;
+    ComputePipelineDescriptor appliedDescriptor;
+    DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetComputePipelineDescriptorWithDefaults(
+                                   this, *descriptor, &appliedDescriptor));
 
-        // CreateShaderModule can be called from inside dawn_native. If that's the case handle the
-        // error directly in Dawn and no compilationMessages held in the shader module. It is ok as
-        // long as dawn_native don't use the compilationMessages of these internal shader modules.
-        ShaderModuleParseResult parseResult;
+    Ref<ComputePipelineBase> uninitializedComputePipeline =
+        CreateUninitializedComputePipelineImpl(&appliedDescriptor);
 
-        if (IsValidationEnabled()) {
-            DAWN_TRY_CONTEXT(
-                ValidateShaderModuleDescriptor(this, descriptor, &parseResult, compilationMessages),
-                "validating %s", descriptor);
+    // Call the callback directly when we can get a cached compute pipeline object.
+    Ref<ComputePipelineBase> cachedComputePipeline =
+        GetCachedComputePipeline(uninitializedComputePipeline.Get());
+    if (cachedComputePipeline.Get() != nullptr) {
+        // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+        callback(WGPUCreatePipelineAsyncStatus_Success, ToAPI(cachedComputePipeline.Detach()), "",
+                 userdata);
+    } else {
+        // Otherwise we will create the pipeline object in InitializeComputePipelineAsyncImpl(),
+        // where the pipeline object may be initialized asynchronously and the result will be
+        // saved to mCreatePipelineAsyncTracker.
+        InitializeComputePipelineAsyncImpl(std::move(uninitializedComputePipeline), callback,
+                                           userdata);
+    }
+
+    return {};
+}
+
+// This function is overwritten with the async version on the backends that supports
+//  initializing compute pipelines asynchronously.
+void DeviceBase::InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
+                                                    WGPUCreateComputePipelineAsyncCallback callback,
+                                                    void* userdata) {
+    Ref<ComputePipelineBase> result;
+    std::string errorMessage;
+
+    MaybeError maybeError = computePipeline->Initialize();
+    if (maybeError.IsError()) {
+        std::unique_ptr<ErrorData> error = maybeError.AcquireError();
+        errorMessage = error->GetMessage();
+    } else {
+        result = AddOrGetCachedComputePipeline(std::move(computePipeline));
+    }
+
+    std::unique_ptr<CreateComputePipelineAsyncCallbackTask> callbackTask =
+        std::make_unique<CreateComputePipelineAsyncCallbackTask>(std::move(result), errorMessage,
+                                                                 callback, userdata);
+    mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
+}
+
+// This function is overwritten with the async version on the backends
+// that supports initializing render pipeline asynchronously
+void DeviceBase::InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+                                                   WGPUCreateRenderPipelineAsyncCallback callback,
+                                                   void* userdata) {
+    Ref<RenderPipelineBase> result;
+    std::string errorMessage;
+
+    MaybeError maybeError = renderPipeline->Initialize();
+    if (maybeError.IsError()) {
+        std::unique_ptr<ErrorData> error = maybeError.AcquireError();
+        errorMessage = error->GetMessage();
+    } else {
+        result = AddOrGetCachedRenderPipeline(std::move(renderPipeline));
+    }
+
+    std::unique_ptr<CreateRenderPipelineAsyncCallbackTask> callbackTask =
+        std::make_unique<CreateRenderPipelineAsyncCallbackTask>(std::move(result), errorMessage,
+                                                                callback, userdata);
+    mCallbackTaskManager->AddCallbackTask(std::move(callbackTask));
+}
+
+ResultOrError<Ref<PipelineLayoutBase>> DeviceBase::CreatePipelineLayout(
+    const PipelineLayoutDescriptor* descriptor) {
+    DAWN_TRY(ValidateIsAlive());
+    if (IsValidationEnabled()) {
+        DAWN_TRY(ValidatePipelineLayoutDescriptor(this, descriptor));
+    }
+    return GetOrCreatePipelineLayout(descriptor);
+}
+
+ResultOrError<Ref<ExternalTextureBase>> DeviceBase::CreateExternalTextureImpl(
+    const ExternalTextureDescriptor* descriptor) {
+    if (IsValidationEnabled()) {
+        DAWN_TRY_CONTEXT(ValidateExternalTextureDescriptor(this, descriptor), "validating %s",
+                         descriptor);
+    }
+
+    return ExternalTextureBase::Create(this, descriptor);
+}
+
+ResultOrError<Ref<QuerySetBase>> DeviceBase::CreateQuerySet(const QuerySetDescriptor* descriptor) {
+    DAWN_TRY(ValidateIsAlive());
+    if (IsValidationEnabled()) {
+        DAWN_TRY_CONTEXT(ValidateQuerySetDescriptor(this, descriptor), "validating %s", descriptor);
+    }
+    return CreateQuerySetImpl(descriptor);
+}
+
+ResultOrError<Ref<RenderBundleEncoder>> DeviceBase::CreateRenderBundleEncoder(
+    const RenderBundleEncoderDescriptor* descriptor) {
+    DAWN_TRY(ValidateIsAlive());
+    if (IsValidationEnabled()) {
+        DAWN_TRY(ValidateRenderBundleEncoderDescriptor(this, descriptor));
+    }
+    return RenderBundleEncoder::Create(this, descriptor);
+}
+
+ResultOrError<Ref<RenderPipelineBase>> DeviceBase::CreateRenderPipeline(
+    const RenderPipelineDescriptor* descriptor) {
+    DAWN_TRY(ValidateIsAlive());
+    if (IsValidationEnabled()) {
+        DAWN_TRY(ValidateRenderPipelineDescriptor(this, descriptor));
+    }
+
+    // Ref will keep the pipeline layout alive until the end of the function where
+    // the pipeline will take another reference.
+    Ref<PipelineLayoutBase> layoutRef;
+    RenderPipelineDescriptor appliedDescriptor;
+    DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
+                                   this, *descriptor, &appliedDescriptor));
+
+    Ref<RenderPipelineBase> uninitializedRenderPipeline =
+        CreateUninitializedRenderPipelineImpl(&appliedDescriptor);
+
+    Ref<RenderPipelineBase> cachedRenderPipeline =
+        GetCachedRenderPipeline(uninitializedRenderPipeline.Get());
+    if (cachedRenderPipeline != nullptr) {
+        return cachedRenderPipeline;
+    }
+
+    DAWN_TRY(uninitializedRenderPipeline->Initialize());
+    return AddOrGetCachedRenderPipeline(std::move(uninitializedRenderPipeline));
+}
+
+MaybeError DeviceBase::CreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
+                                                 WGPUCreateRenderPipelineAsyncCallback callback,
+                                                 void* userdata) {
+    DAWN_TRY(ValidateIsAlive());
+    if (IsValidationEnabled()) {
+        DAWN_TRY(ValidateRenderPipelineDescriptor(this, descriptor));
+    }
+
+    // Ref will keep the pipeline layout alive until the end of the function where
+    // the pipeline will take another reference.
+    Ref<PipelineLayoutBase> layoutRef;
+    RenderPipelineDescriptor appliedDescriptor;
+    DAWN_TRY_ASSIGN(layoutRef, ValidateLayoutAndGetRenderPipelineDescriptorWithDefaults(
+                                   this, *descriptor, &appliedDescriptor));
+
+    Ref<RenderPipelineBase> uninitializedRenderPipeline =
+        CreateUninitializedRenderPipelineImpl(&appliedDescriptor);
+
+    // Call the callback directly when we can get a cached render pipeline object.
+    Ref<RenderPipelineBase> cachedRenderPipeline =
+        GetCachedRenderPipeline(uninitializedRenderPipeline.Get());
+    if (cachedRenderPipeline != nullptr) {
+        // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+        callback(WGPUCreatePipelineAsyncStatus_Success, ToAPI(cachedRenderPipeline.Detach()), "",
+                 userdata);
+    } else {
+        // Otherwise we will create the pipeline object in InitializeRenderPipelineAsyncImpl(),
+        // where the pipeline object may be initialized asynchronously and the result will be
+        // saved to mCreatePipelineAsyncTracker.
+        InitializeRenderPipelineAsyncImpl(std::move(uninitializedRenderPipeline), callback,
+                                          userdata);
+    }
+
+    return {};
+}
+
+ResultOrError<Ref<SamplerBase>> DeviceBase::CreateSampler(const SamplerDescriptor* descriptor) {
+    const SamplerDescriptor defaultDescriptor = {};
+    DAWN_TRY(ValidateIsAlive());
+    descriptor = descriptor != nullptr ? descriptor : &defaultDescriptor;
+    if (IsValidationEnabled()) {
+        DAWN_TRY_CONTEXT(ValidateSamplerDescriptor(this, descriptor), "validating %s", descriptor);
+    }
+    return GetOrCreateSampler(descriptor);
+}
+
+ResultOrError<Ref<ShaderModuleBase>> DeviceBase::CreateShaderModule(
+    const ShaderModuleDescriptor* descriptor,
+    OwnedCompilationMessages* compilationMessages) {
+    DAWN_TRY(ValidateIsAlive());
+
+    // CreateShaderModule can be called from inside dawn_native. If that's the case handle the
+    // error directly in Dawn and no compilationMessages held in the shader module. It is ok as
+    // long as dawn_native don't use the compilationMessages of these internal shader modules.
+    ShaderModuleParseResult parseResult;
+
+    if (IsValidationEnabled()) {
+        DAWN_TRY_CONTEXT(
+            ValidateShaderModuleDescriptor(this, descriptor, &parseResult, compilationMessages),
+            "validating %s", descriptor);
+    }
+
+    return GetOrCreateShaderModule(descriptor, &parseResult, compilationMessages);
+}
+
+ResultOrError<Ref<SwapChainBase>> DeviceBase::CreateSwapChain(
+    Surface* surface,
+    const SwapChainDescriptor* descriptor) {
+    DAWN_TRY(ValidateIsAlive());
+    if (IsValidationEnabled()) {
+        DAWN_TRY_CONTEXT(ValidateSwapChainDescriptor(this, surface, descriptor), "validating %s",
+                         descriptor);
+    }
+
+    // TODO(dawn:269): Remove this code path once implementation-based swapchains are removed.
+    if (surface == nullptr) {
+        return CreateSwapChainImpl(descriptor);
+    } else {
+        ASSERT(descriptor->implementation == 0);
+
+        NewSwapChainBase* previousSwapChain = surface->GetAttachedSwapChain();
+        ResultOrError<Ref<NewSwapChainBase>> maybeNewSwapChain =
+            CreateSwapChainImpl(surface, previousSwapChain, descriptor);
+
+        if (previousSwapChain != nullptr) {
+            previousSwapChain->DetachFromSurface();
         }
 
-        return GetOrCreateShaderModule(descriptor, &parseResult, compilationMessages);
+        Ref<NewSwapChainBase> newSwapChain;
+        DAWN_TRY_ASSIGN(newSwapChain, std::move(maybeNewSwapChain));
+
+        newSwapChain->SetIsAttached();
+        surface->SetAttachedSwapChain(newSwapChain.Get());
+        return newSwapChain;
     }
+}
 
-    ResultOrError<Ref<SwapChainBase>> DeviceBase::CreateSwapChain(
-        Surface* surface,
-        const SwapChainDescriptor* descriptor) {
-        DAWN_TRY(ValidateIsAlive());
-        if (IsValidationEnabled()) {
-            DAWN_TRY_CONTEXT(ValidateSwapChainDescriptor(this, surface, descriptor),
-                             "validating %s", descriptor);
-        }
-
-        // TODO(dawn:269): Remove this code path once implementation-based swapchains are removed.
-        if (surface == nullptr) {
-            return CreateSwapChainImpl(descriptor);
-        } else {
-            ASSERT(descriptor->implementation == 0);
-
-            NewSwapChainBase* previousSwapChain = surface->GetAttachedSwapChain();
-            ResultOrError<Ref<NewSwapChainBase>> maybeNewSwapChain =
-                CreateSwapChainImpl(surface, previousSwapChain, descriptor);
-
-            if (previousSwapChain != nullptr) {
-                previousSwapChain->DetachFromSurface();
-            }
-
-            Ref<NewSwapChainBase> newSwapChain;
-            DAWN_TRY_ASSIGN(newSwapChain, std::move(maybeNewSwapChain));
-
-            newSwapChain->SetIsAttached();
-            surface->SetAttachedSwapChain(newSwapChain.Get());
-            return newSwapChain;
-        }
+ResultOrError<Ref<TextureBase>> DeviceBase::CreateTexture(const TextureDescriptor* descriptor) {
+    DAWN_TRY(ValidateIsAlive());
+    if (IsValidationEnabled()) {
+        DAWN_TRY_CONTEXT(ValidateTextureDescriptor(this, descriptor), "validating %s.", descriptor);
     }
+    return CreateTextureImpl(descriptor);
+}
 
-    ResultOrError<Ref<TextureBase>> DeviceBase::CreateTexture(const TextureDescriptor* descriptor) {
-        DAWN_TRY(ValidateIsAlive());
-        if (IsValidationEnabled()) {
-            DAWN_TRY_CONTEXT(ValidateTextureDescriptor(this, descriptor), "validating %s.",
-                             descriptor);
-        }
-        return CreateTextureImpl(descriptor);
+ResultOrError<Ref<TextureViewBase>> DeviceBase::CreateTextureView(
+    TextureBase* texture,
+    const TextureViewDescriptor* descriptor) {
+    DAWN_TRY(ValidateIsAlive());
+    DAWN_TRY(ValidateObject(texture));
+
+    TextureViewDescriptor desc;
+    DAWN_TRY_ASSIGN(desc, GetTextureViewDescriptorWithDefaults(texture, descriptor));
+
+    if (IsValidationEnabled()) {
+        DAWN_TRY_CONTEXT(ValidateTextureViewDescriptor(this, texture, &desc),
+                         "validating %s against %s.", &desc, texture);
     }
+    return CreateTextureViewImpl(texture, &desc);
+}
 
-    ResultOrError<Ref<TextureViewBase>> DeviceBase::CreateTextureView(
-        TextureBase* texture,
-        const TextureViewDescriptor* descriptor) {
-        DAWN_TRY(ValidateIsAlive());
-        DAWN_TRY(ValidateObject(texture));
+// Other implementation details
 
-        TextureViewDescriptor desc;
-        DAWN_TRY_ASSIGN(desc, GetTextureViewDescriptorWithDefaults(texture, descriptor));
+DynamicUploader* DeviceBase::GetDynamicUploader() const {
+    return mDynamicUploader.get();
+}
 
-        if (IsValidationEnabled()) {
-            DAWN_TRY_CONTEXT(ValidateTextureViewDescriptor(this, texture, &desc),
-                             "validating %s against %s.", &desc, texture);
-        }
-        return CreateTextureViewImpl(texture, &desc);
-    }
+// The Toggle device facility
 
-    // Other implementation details
+std::vector<const char*> DeviceBase::GetTogglesUsed() const {
+    return mEnabledToggles.GetContainedToggleNames();
+}
 
-    DynamicUploader* DeviceBase::GetDynamicUploader() const {
-        return mDynamicUploader.get();
-    }
+bool DeviceBase::IsToggleEnabled(Toggle toggle) const {
+    return mEnabledToggles.Has(toggle);
+}
 
-    // The Toggle device facility
-
-    std::vector<const char*> DeviceBase::GetTogglesUsed() const {
-        return mEnabledToggles.GetContainedToggleNames();
-    }
-
-    bool DeviceBase::IsToggleEnabled(Toggle toggle) const {
-        return mEnabledToggles.Has(toggle);
-    }
-
-    void DeviceBase::SetToggle(Toggle toggle, bool isEnabled) {
-        if (!mOverridenToggles.Has(toggle)) {
-            mEnabledToggles.Set(toggle, isEnabled);
-        }
-    }
-
-    void DeviceBase::ForceSetToggle(Toggle toggle, bool isEnabled) {
-        if (mOverridenToggles.Has(toggle) && mEnabledToggles.Has(toggle) != isEnabled) {
-            dawn::WarningLog() << "Forcing toggle \"" << ToggleEnumToName(toggle) << "\" to "
-                               << isEnabled << " when it was overriden to be " << !isEnabled;
-        }
+void DeviceBase::SetToggle(Toggle toggle, bool isEnabled) {
+    if (!mOverridenToggles.Has(toggle)) {
         mEnabledToggles.Set(toggle, isEnabled);
     }
+}
 
-    void DeviceBase::SetDefaultToggles() {
-        SetToggle(Toggle::LazyClearResourceOnFirstUse, true);
-        SetToggle(Toggle::DisallowUnsafeAPIs, true);
+void DeviceBase::ForceSetToggle(Toggle toggle, bool isEnabled) {
+    if (mOverridenToggles.Has(toggle) && mEnabledToggles.Has(toggle) != isEnabled) {
+        dawn::WarningLog() << "Forcing toggle \"" << ToggleEnumToName(toggle) << "\" to "
+                           << isEnabled << " when it was overriden to be " << !isEnabled;
     }
+    mEnabledToggles.Set(toggle, isEnabled);
+}
 
-    void DeviceBase::ApplyToggleOverrides(const DawnTogglesDeviceDescriptor* togglesDescriptor) {
-        ASSERT(togglesDescriptor != nullptr);
+void DeviceBase::SetDefaultToggles() {
+    SetToggle(Toggle::LazyClearResourceOnFirstUse, true);
+    SetToggle(Toggle::DisallowUnsafeAPIs, true);
+}
 
-        for (uint32_t i = 0; i < togglesDescriptor->forceEnabledTogglesCount; ++i) {
-            Toggle toggle = GetAdapter()->GetInstance()->ToggleNameToEnum(
-                togglesDescriptor->forceEnabledToggles[i]);
-            if (toggle != Toggle::InvalidEnum) {
-                mEnabledToggles.Set(toggle, true);
-                mOverridenToggles.Set(toggle, true);
-            }
-        }
-        for (uint32_t i = 0; i < togglesDescriptor->forceDisabledTogglesCount; ++i) {
-            Toggle toggle = GetAdapter()->GetInstance()->ToggleNameToEnum(
-                togglesDescriptor->forceDisabledToggles[i]);
-            if (toggle != Toggle::InvalidEnum) {
-                mEnabledToggles.Set(toggle, false);
-                mOverridenToggles.Set(toggle, true);
-            }
+void DeviceBase::ApplyToggleOverrides(const DawnTogglesDeviceDescriptor* togglesDescriptor) {
+    ASSERT(togglesDescriptor != nullptr);
+
+    for (uint32_t i = 0; i < togglesDescriptor->forceEnabledTogglesCount; ++i) {
+        Toggle toggle = GetAdapter()->GetInstance()->ToggleNameToEnum(
+            togglesDescriptor->forceEnabledToggles[i]);
+        if (toggle != Toggle::InvalidEnum) {
+            mEnabledToggles.Set(toggle, true);
+            mOverridenToggles.Set(toggle, true);
         }
     }
-
-    void DeviceBase::FlushCallbackTaskQueue() {
-        if (!mCallbackTaskManager->IsEmpty()) {
-            // If a user calls Queue::Submit inside the callback, then the device will be ticked,
-            // which in turns ticks the tracker, causing reentrance and dead lock here. To prevent
-            // such reentrant call, we remove all the callback tasks from mCallbackTaskManager,
-            // update mCallbackTaskManager, then call all the callbacks.
-            auto callbackTasks = mCallbackTaskManager->AcquireCallbackTasks();
-            for (std::unique_ptr<CallbackTask>& callbackTask : callbackTasks) {
-                callbackTask->Finish();
-            }
+    for (uint32_t i = 0; i < togglesDescriptor->forceDisabledTogglesCount; ++i) {
+        Toggle toggle = GetAdapter()->GetInstance()->ToggleNameToEnum(
+            togglesDescriptor->forceDisabledToggles[i]);
+        if (toggle != Toggle::InvalidEnum) {
+            mEnabledToggles.Set(toggle, false);
+            mOverridenToggles.Set(toggle, true);
         }
     }
+}
 
-    const CombinedLimits& DeviceBase::GetLimits() const {
-        return mLimits;
+void DeviceBase::FlushCallbackTaskQueue() {
+    if (!mCallbackTaskManager->IsEmpty()) {
+        // If a user calls Queue::Submit inside the callback, then the device will be ticked,
+        // which in turns ticks the tracker, causing reentrance and dead lock here. To prevent
+        // such reentrant call, we remove all the callback tasks from mCallbackTaskManager,
+        // update mCallbackTaskManager, then call all the callbacks.
+        auto callbackTasks = mCallbackTaskManager->AcquireCallbackTasks();
+        for (std::unique_ptr<CallbackTask>& callbackTask : callbackTasks) {
+            callbackTask->Finish();
+        }
     }
+}
 
-    AsyncTaskManager* DeviceBase::GetAsyncTaskManager() const {
-        return mAsyncTaskManager.get();
-    }
+const CombinedLimits& DeviceBase::GetLimits() const {
+    return mLimits;
+}
 
-    CallbackTaskManager* DeviceBase::GetCallbackTaskManager() const {
-        return mCallbackTaskManager.get();
-    }
+AsyncTaskManager* DeviceBase::GetAsyncTaskManager() const {
+    return mAsyncTaskManager.get();
+}
 
-    dawn::platform::WorkerTaskPool* DeviceBase::GetWorkerTaskPool() const {
-        return mWorkerTaskPool.get();
-    }
+CallbackTaskManager* DeviceBase::GetCallbackTaskManager() const {
+    return mCallbackTaskManager.get();
+}
 
-    void DeviceBase::AddComputePipelineAsyncCallbackTask(
-        Ref<ComputePipelineBase> pipeline,
-        std::string errorMessage,
-        WGPUCreateComputePipelineAsyncCallback callback,
-        void* userdata) {
-        // CreateComputePipelineAsyncWaitableCallbackTask is declared as an internal class as it
-        // needs to call the private member function DeviceBase::AddOrGetCachedComputePipeline().
-        struct CreateComputePipelineAsyncWaitableCallbackTask final
-            : CreateComputePipelineAsyncCallbackTask {
-            using CreateComputePipelineAsyncCallbackTask::CreateComputePipelineAsyncCallbackTask;
-            void Finish() final {
-                // TODO(dawn:529): call AddOrGetCachedComputePipeline() asynchronously in
-                // CreateComputePipelineAsyncTaskImpl::Run() when the front-end pipeline cache is
-                // thread-safe.
-                if (mPipeline.Get() != nullptr) {
-                    mPipeline = mPipeline->GetDevice()->AddOrGetCachedComputePipeline(mPipeline);
-                }
+dawn::platform::WorkerTaskPool* DeviceBase::GetWorkerTaskPool() const {
+    return mWorkerTaskPool.get();
+}
 
-                CreateComputePipelineAsyncCallbackTask::Finish();
+void DeviceBase::AddComputePipelineAsyncCallbackTask(
+    Ref<ComputePipelineBase> pipeline,
+    std::string errorMessage,
+    WGPUCreateComputePipelineAsyncCallback callback,
+    void* userdata) {
+    // CreateComputePipelineAsyncWaitableCallbackTask is declared as an internal class as it
+    // needs to call the private member function DeviceBase::AddOrGetCachedComputePipeline().
+    struct CreateComputePipelineAsyncWaitableCallbackTask final
+        : CreateComputePipelineAsyncCallbackTask {
+        using CreateComputePipelineAsyncCallbackTask::CreateComputePipelineAsyncCallbackTask;
+        void Finish() final {
+            // TODO(dawn:529): call AddOrGetCachedComputePipeline() asynchronously in
+            // CreateComputePipelineAsyncTaskImpl::Run() when the front-end pipeline cache is
+            // thread-safe.
+            if (mPipeline.Get() != nullptr) {
+                mPipeline = mPipeline->GetDevice()->AddOrGetCachedComputePipeline(mPipeline);
             }
-        };
 
-        mCallbackTaskManager->AddCallbackTask(
-            std::make_unique<CreateComputePipelineAsyncWaitableCallbackTask>(
-                std::move(pipeline), errorMessage, callback, userdata));
-    }
+            CreateComputePipelineAsyncCallbackTask::Finish();
+        }
+    };
 
-    void DeviceBase::AddRenderPipelineAsyncCallbackTask(
-        Ref<RenderPipelineBase> pipeline,
-        std::string errorMessage,
-        WGPUCreateRenderPipelineAsyncCallback callback,
-        void* userdata) {
-        // CreateRenderPipelineAsyncWaitableCallbackTask is declared as an internal class as it
-        // needs to call the private member function DeviceBase::AddOrGetCachedRenderPipeline().
-        struct CreateRenderPipelineAsyncWaitableCallbackTask final
-            : CreateRenderPipelineAsyncCallbackTask {
-            using CreateRenderPipelineAsyncCallbackTask::CreateRenderPipelineAsyncCallbackTask;
+    mCallbackTaskManager->AddCallbackTask(
+        std::make_unique<CreateComputePipelineAsyncWaitableCallbackTask>(
+            std::move(pipeline), errorMessage, callback, userdata));
+}
 
-            void Finish() final {
-                // TODO(dawn:529): call AddOrGetCachedRenderPipeline() asynchronously in
-                // CreateRenderPipelineAsyncTaskImpl::Run() when the front-end pipeline cache is
-                // thread-safe.
-                if (mPipeline.Get() != nullptr) {
-                    mPipeline = mPipeline->GetDevice()->AddOrGetCachedRenderPipeline(mPipeline);
-                }
+void DeviceBase::AddRenderPipelineAsyncCallbackTask(Ref<RenderPipelineBase> pipeline,
+                                                    std::string errorMessage,
+                                                    WGPUCreateRenderPipelineAsyncCallback callback,
+                                                    void* userdata) {
+    // CreateRenderPipelineAsyncWaitableCallbackTask is declared as an internal class as it
+    // needs to call the private member function DeviceBase::AddOrGetCachedRenderPipeline().
+    struct CreateRenderPipelineAsyncWaitableCallbackTask final
+        : CreateRenderPipelineAsyncCallbackTask {
+        using CreateRenderPipelineAsyncCallbackTask::CreateRenderPipelineAsyncCallbackTask;
 
-                CreateRenderPipelineAsyncCallbackTask::Finish();
+        void Finish() final {
+            // TODO(dawn:529): call AddOrGetCachedRenderPipeline() asynchronously in
+            // CreateRenderPipelineAsyncTaskImpl::Run() when the front-end pipeline cache is
+            // thread-safe.
+            if (mPipeline.Get() != nullptr) {
+                mPipeline = mPipeline->GetDevice()->AddOrGetCachedRenderPipeline(mPipeline);
             }
-        };
 
-        mCallbackTaskManager->AddCallbackTask(
-            std::make_unique<CreateRenderPipelineAsyncWaitableCallbackTask>(
-                std::move(pipeline), errorMessage, callback, userdata));
-    }
+            CreateRenderPipelineAsyncCallbackTask::Finish();
+        }
+    };
 
-    PipelineCompatibilityToken DeviceBase::GetNextPipelineCompatibilityToken() {
-        return PipelineCompatibilityToken(mNextPipelineCompatibilityToken++);
-    }
+    mCallbackTaskManager->AddCallbackTask(
+        std::make_unique<CreateRenderPipelineAsyncWaitableCallbackTask>(
+            std::move(pipeline), errorMessage, callback, userdata));
+}
 
-    const CacheKey& DeviceBase::GetCacheKey() const {
-        return mDeviceCacheKey;
-    }
+PipelineCompatibilityToken DeviceBase::GetNextPipelineCompatibilityToken() {
+    return PipelineCompatibilityToken(mNextPipelineCompatibilityToken++);
+}
 
-    const std::string& DeviceBase::GetLabel() const {
-        return mLabel;
-    }
+const CacheKey& DeviceBase::GetCacheKey() const {
+    return mDeviceCacheKey;
+}
 
-    void DeviceBase::APISetLabel(const char* label) {
-        mLabel = label;
-        SetLabelImpl();
-    }
+const std::string& DeviceBase::GetLabel() const {
+    return mLabel;
+}
 
-    void DeviceBase::SetLabelImpl() {
-    }
+void DeviceBase::APISetLabel(const char* label) {
+    mLabel = label;
+    SetLabelImpl();
+}
 
-    bool DeviceBase::ShouldDuplicateNumWorkgroupsForDispatchIndirect(
-        ComputePipelineBase* computePipeline) const {
-        return false;
-    }
+void DeviceBase::SetLabelImpl() {}
 
-    bool DeviceBase::MayRequireDuplicationOfIndirectParameters() const {
-        return false;
-    }
+bool DeviceBase::ShouldDuplicateNumWorkgroupsForDispatchIndirect(
+    ComputePipelineBase* computePipeline) const {
+    return false;
+}
 
-    bool DeviceBase::ShouldDuplicateParametersForDrawIndirect(
-        const RenderPipelineBase* renderPipelineBase) const {
-        return false;
-    }
+bool DeviceBase::MayRequireDuplicationOfIndirectParameters() const {
+    return false;
+}
+
+bool DeviceBase::ShouldDuplicateParametersForDrawIndirect(
+    const RenderPipelineBase* renderPipelineBase) const {
+    return false;
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/Device.h b/src/dawn/native/Device.h
index 3f53461..9501bbb 100644
--- a/src/dawn/native/Device.h
+++ b/src/dawn/native/Device.h
@@ -38,523 +38,515 @@
 #include "dawn/native/dawn_platform.h"
 
 namespace dawn::platform {
-    class WorkerTaskPool;
+class WorkerTaskPool;
 }  // namespace dawn::platform
 
 namespace dawn::native {
-    class AsyncTaskManager;
-    class AttachmentState;
-    class AttachmentStateBlueprint;
-    class BlobCache;
-    class CallbackTaskManager;
-    class DynamicUploader;
-    class ErrorScopeStack;
-    class OwnedCompilationMessages;
-    struct CallbackTask;
-    struct InternalPipelineStore;
-    struct ShaderModuleParseResult;
+class AsyncTaskManager;
+class AttachmentState;
+class AttachmentStateBlueprint;
+class BlobCache;
+class CallbackTaskManager;
+class DynamicUploader;
+class ErrorScopeStack;
+class OwnedCompilationMessages;
+struct CallbackTask;
+struct InternalPipelineStore;
+struct ShaderModuleParseResult;
 
-    class DeviceBase : public RefCounted {
-      public:
-        DeviceBase(AdapterBase* adapter, const DeviceDescriptor* descriptor);
-        virtual ~DeviceBase();
+class DeviceBase : public RefCounted {
+  public:
+    DeviceBase(AdapterBase* adapter, const DeviceDescriptor* descriptor);
+    virtual ~DeviceBase();
 
-        void HandleError(InternalErrorType type, const char* message);
+    void HandleError(InternalErrorType type, const char* message);
 
-        bool ConsumedError(MaybeError maybeError) {
-            if (DAWN_UNLIKELY(maybeError.IsError())) {
-                ConsumeError(maybeError.AcquireError());
-                return true;
-            }
-            return false;
+    bool ConsumedError(MaybeError maybeError) {
+        if (DAWN_UNLIKELY(maybeError.IsError())) {
+            ConsumeError(maybeError.AcquireError());
+            return true;
         }
+        return false;
+    }
 
-        template <typename T>
-        bool ConsumedError(ResultOrError<T> resultOrError, T* result) {
-            if (DAWN_UNLIKELY(resultOrError.IsError())) {
-                ConsumeError(resultOrError.AcquireError());
-                return true;
-            }
-            *result = resultOrError.AcquireSuccess();
-            return false;
+    template <typename T>
+    bool ConsumedError(ResultOrError<T> resultOrError, T* result) {
+        if (DAWN_UNLIKELY(resultOrError.IsError())) {
+            ConsumeError(resultOrError.AcquireError());
+            return true;
         }
+        *result = resultOrError.AcquireSuccess();
+        return false;
+    }
 
-        template <typename... Args>
-        bool ConsumedError(MaybeError maybeError, const char* formatStr, const Args&... args) {
-            if (DAWN_UNLIKELY(maybeError.IsError())) {
-                std::unique_ptr<ErrorData> error = maybeError.AcquireError();
-                if (error->GetType() == InternalErrorType::Validation) {
-                    std::string out;
-                    absl::UntypedFormatSpec format(formatStr);
-                    if (absl::FormatUntyped(&out, format, {absl::FormatArg(args)...})) {
-                        error->AppendContext(std::move(out));
-                    } else {
-                        error->AppendContext(
-                            absl::StrFormat("[Failed to format error: \"%s\"]", formatStr));
-                    }
+    template <typename... Args>
+    bool ConsumedError(MaybeError maybeError, const char* formatStr, const Args&... args) {
+        if (DAWN_UNLIKELY(maybeError.IsError())) {
+            std::unique_ptr<ErrorData> error = maybeError.AcquireError();
+            if (error->GetType() == InternalErrorType::Validation) {
+                std::string out;
+                absl::UntypedFormatSpec format(formatStr);
+                if (absl::FormatUntyped(&out, format, {absl::FormatArg(args)...})) {
+                    error->AppendContext(std::move(out));
+                } else {
+                    error->AppendContext(
+                        absl::StrFormat("[Failed to format error: \"%s\"]", formatStr));
                 }
-                ConsumeError(std::move(error));
-                return true;
             }
-            return false;
+            ConsumeError(std::move(error));
+            return true;
         }
+        return false;
+    }
 
-        template <typename T, typename... Args>
-        bool ConsumedError(ResultOrError<T> resultOrError,
-                           T* result,
-                           const char* formatStr,
-                           const Args&... args) {
-            if (DAWN_UNLIKELY(resultOrError.IsError())) {
-                std::unique_ptr<ErrorData> error = resultOrError.AcquireError();
-                if (error->GetType() == InternalErrorType::Validation) {
-                    std::string out;
-                    absl::UntypedFormatSpec format(formatStr);
-                    if (absl::FormatUntyped(&out, format, {absl::FormatArg(args)...})) {
-                        error->AppendContext(std::move(out));
-                    } else {
-                        error->AppendContext(
-                            absl::StrFormat("[Failed to format error: \"%s\"]", formatStr));
-                    }
+    template <typename T, typename... Args>
+    bool ConsumedError(ResultOrError<T> resultOrError,
+                       T* result,
+                       const char* formatStr,
+                       const Args&... args) {
+        if (DAWN_UNLIKELY(resultOrError.IsError())) {
+            std::unique_ptr<ErrorData> error = resultOrError.AcquireError();
+            if (error->GetType() == InternalErrorType::Validation) {
+                std::string out;
+                absl::UntypedFormatSpec format(formatStr);
+                if (absl::FormatUntyped(&out, format, {absl::FormatArg(args)...})) {
+                    error->AppendContext(std::move(out));
+                } else {
+                    error->AppendContext(
+                        absl::StrFormat("[Failed to format error: \"%s\"]", formatStr));
                 }
-                ConsumeError(std::move(error));
-                return true;
             }
-            *result = resultOrError.AcquireSuccess();
-            return false;
+            ConsumeError(std::move(error));
+            return true;
         }
+        *result = resultOrError.AcquireSuccess();
+        return false;
+    }
 
-        MaybeError ValidateObject(const ApiObjectBase* object) const;
+    MaybeError ValidateObject(const ApiObjectBase* object) const;
 
-        AdapterBase* GetAdapter() const;
-        dawn::platform::Platform* GetPlatform() const;
+    AdapterBase* GetAdapter() const;
+    dawn::platform::Platform* GetPlatform() const;
 
-        // Returns the Format corresponding to the wgpu::TextureFormat or an error if the format
-        // isn't a valid wgpu::TextureFormat or isn't supported by this device.
-        // The pointer returned has the same lifetime as the device.
-        ResultOrError<const Format*> GetInternalFormat(wgpu::TextureFormat format) const;
+    // Returns the Format corresponding to the wgpu::TextureFormat or an error if the format
+    // isn't a valid wgpu::TextureFormat or isn't supported by this device.
+    // The pointer returned has the same lifetime as the device.
+    ResultOrError<const Format*> GetInternalFormat(wgpu::TextureFormat format) const;
 
-        // Returns the Format corresponding to the wgpu::TextureFormat and assumes the format is
-        // valid and supported.
-        // The reference returned has the same lifetime as the device.
-        const Format& GetValidInternalFormat(wgpu::TextureFormat format) const;
-        const Format& GetValidInternalFormat(FormatIndex formatIndex) const;
+    // Returns the Format corresponding to the wgpu::TextureFormat and assumes the format is
+    // valid and supported.
+    // The reference returned has the same lifetime as the device.
+    const Format& GetValidInternalFormat(wgpu::TextureFormat format) const;
+    const Format& GetValidInternalFormat(FormatIndex formatIndex) const;
 
-        virtual ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
-            CommandEncoder* encoder,
-            const CommandBufferDescriptor* descriptor) = 0;
+    virtual ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
+        CommandEncoder* encoder,
+        const CommandBufferDescriptor* descriptor) = 0;
 
-        ExecutionSerial GetCompletedCommandSerial() const;
-        ExecutionSerial GetLastSubmittedCommandSerial() const;
-        ExecutionSerial GetFutureSerial() const;
-        ExecutionSerial GetPendingCommandSerial() const;
+    ExecutionSerial GetCompletedCommandSerial() const;
+    ExecutionSerial GetLastSubmittedCommandSerial() const;
+    ExecutionSerial GetFutureSerial() const;
+    ExecutionSerial GetPendingCommandSerial() const;
 
-        // Many Dawn objects are completely immutable once created which means that if two
-        // creations are given the same arguments, they can return the same object. Reusing
-        // objects will help make comparisons between objects by a single pointer comparison.
-        //
-        // Technically no object is immutable as they have a reference count, and an
-        // application with reference-counting issues could "see" that objects are reused.
-        // This is solved by automatic-reference counting, and also the fact that when using
-        // the client-server wire every creation will get a different proxy object, with a
-        // different reference count.
-        //
-        // When trying to create an object, we give both the descriptor and an example of what
-        // the created object will be, the "blueprint". The blueprint is just a FooBase object
-        // instead of a backend Foo object. If the blueprint doesn't match an object in the
-        // cache, then the descriptor is used to make a new object.
-        ResultOrError<Ref<BindGroupLayoutBase>> GetOrCreateBindGroupLayout(
-            const BindGroupLayoutDescriptor* descriptor,
-            PipelineCompatibilityToken pipelineCompatibilityToken = PipelineCompatibilityToken(0));
-        void UncacheBindGroupLayout(BindGroupLayoutBase* obj);
+    // Many Dawn objects are completely immutable once created which means that if two
+    // creations are given the same arguments, they can return the same object. Reusing
+    // objects will help make comparisons between objects by a single pointer comparison.
+    //
+    // Technically no object is immutable as they have a reference count, and an
+    // application with reference-counting issues could "see" that objects are reused.
+    // This is solved by automatic-reference counting, and also the fact that when using
+    // the client-server wire every creation will get a different proxy object, with a
+    // different reference count.
+    //
+    // When trying to create an object, we give both the descriptor and an example of what
+    // the created object will be, the "blueprint". The blueprint is just a FooBase object
+    // instead of a backend Foo object. If the blueprint doesn't match an object in the
+    // cache, then the descriptor is used to make a new object.
+    ResultOrError<Ref<BindGroupLayoutBase>> GetOrCreateBindGroupLayout(
+        const BindGroupLayoutDescriptor* descriptor,
+        PipelineCompatibilityToken pipelineCompatibilityToken = PipelineCompatibilityToken(0));
+    void UncacheBindGroupLayout(BindGroupLayoutBase* obj);
 
-        BindGroupLayoutBase* GetEmptyBindGroupLayout();
+    BindGroupLayoutBase* GetEmptyBindGroupLayout();
 
-        void UncacheComputePipeline(ComputePipelineBase* obj);
+    void UncacheComputePipeline(ComputePipelineBase* obj);
 
-        ResultOrError<Ref<TextureViewBase>> GetOrCreatePlaceholderTextureViewForExternalTexture();
+    ResultOrError<Ref<TextureViewBase>> GetOrCreatePlaceholderTextureViewForExternalTexture();
 
-        ResultOrError<Ref<PipelineLayoutBase>> GetOrCreatePipelineLayout(
-            const PipelineLayoutDescriptor* descriptor);
-        void UncachePipelineLayout(PipelineLayoutBase* obj);
+    ResultOrError<Ref<PipelineLayoutBase>> GetOrCreatePipelineLayout(
+        const PipelineLayoutDescriptor* descriptor);
+    void UncachePipelineLayout(PipelineLayoutBase* obj);
 
-        void UncacheRenderPipeline(RenderPipelineBase* obj);
+    void UncacheRenderPipeline(RenderPipelineBase* obj);
 
-        ResultOrError<Ref<SamplerBase>> GetOrCreateSampler(const SamplerDescriptor* descriptor);
-        void UncacheSampler(SamplerBase* obj);
+    ResultOrError<Ref<SamplerBase>> GetOrCreateSampler(const SamplerDescriptor* descriptor);
+    void UncacheSampler(SamplerBase* obj);
 
-        ResultOrError<Ref<ShaderModuleBase>> GetOrCreateShaderModule(
-            const ShaderModuleDescriptor* descriptor,
-            ShaderModuleParseResult* parseResult,
-            OwnedCompilationMessages* compilationMessages);
-        void UncacheShaderModule(ShaderModuleBase* obj);
+    ResultOrError<Ref<ShaderModuleBase>> GetOrCreateShaderModule(
+        const ShaderModuleDescriptor* descriptor,
+        ShaderModuleParseResult* parseResult,
+        OwnedCompilationMessages* compilationMessages);
+    void UncacheShaderModule(ShaderModuleBase* obj);
 
-        Ref<AttachmentState> GetOrCreateAttachmentState(AttachmentStateBlueprint* blueprint);
-        Ref<AttachmentState> GetOrCreateAttachmentState(
-            const RenderBundleEncoderDescriptor* descriptor);
-        Ref<AttachmentState> GetOrCreateAttachmentState(const RenderPipelineDescriptor* descriptor);
-        Ref<AttachmentState> GetOrCreateAttachmentState(const RenderPassDescriptor* descriptor);
-        void UncacheAttachmentState(AttachmentState* obj);
+    Ref<AttachmentState> GetOrCreateAttachmentState(AttachmentStateBlueprint* blueprint);
+    Ref<AttachmentState> GetOrCreateAttachmentState(
+        const RenderBundleEncoderDescriptor* descriptor);
+    Ref<AttachmentState> GetOrCreateAttachmentState(const RenderPipelineDescriptor* descriptor);
+    Ref<AttachmentState> GetOrCreateAttachmentState(const RenderPassDescriptor* descriptor);
+    void UncacheAttachmentState(AttachmentState* obj);
 
-        // Object creation methods that be used in a reentrant manner.
-        ResultOrError<Ref<BindGroupBase>> CreateBindGroup(const BindGroupDescriptor* descriptor);
-        ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayout(
-            const BindGroupLayoutDescriptor* descriptor,
-            bool allowInternalBinding = false);
-        ResultOrError<Ref<BufferBase>> CreateBuffer(const BufferDescriptor* descriptor);
-        ResultOrError<Ref<CommandEncoder>> CreateCommandEncoder(
-            const CommandEncoderDescriptor* descriptor = nullptr);
-        ResultOrError<Ref<ComputePipelineBase>> CreateComputePipeline(
-            const ComputePipelineDescriptor* descriptor);
-        MaybeError CreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
-                                              WGPUCreateComputePipelineAsyncCallback callback,
-                                              void* userdata);
-
-        ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayout(
-            const PipelineLayoutDescriptor* descriptor);
-        ResultOrError<Ref<QuerySetBase>> CreateQuerySet(const QuerySetDescriptor* descriptor);
-        ResultOrError<Ref<RenderBundleEncoder>> CreateRenderBundleEncoder(
-            const RenderBundleEncoderDescriptor* descriptor);
-        ResultOrError<Ref<RenderPipelineBase>> CreateRenderPipeline(
-            const RenderPipelineDescriptor* descriptor);
-        MaybeError CreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
-                                             WGPUCreateRenderPipelineAsyncCallback callback,
-                                             void* userdata);
-        ResultOrError<Ref<SamplerBase>> CreateSampler(
-            const SamplerDescriptor* descriptor = nullptr);
-        ResultOrError<Ref<ShaderModuleBase>> CreateShaderModule(
-            const ShaderModuleDescriptor* descriptor,
-            OwnedCompilationMessages* compilationMessages = nullptr);
-        ResultOrError<Ref<SwapChainBase>> CreateSwapChain(Surface* surface,
-                                                          const SwapChainDescriptor* descriptor);
-        ResultOrError<Ref<TextureBase>> CreateTexture(const TextureDescriptor* descriptor);
-        ResultOrError<Ref<TextureViewBase>> CreateTextureView(
-            TextureBase* texture,
-            const TextureViewDescriptor* descriptor);
-
-        // Implementation of API object creation methods. DO NOT use them in a reentrant manner.
-        BindGroupBase* APICreateBindGroup(const BindGroupDescriptor* descriptor);
-        BindGroupLayoutBase* APICreateBindGroupLayout(const BindGroupLayoutDescriptor* descriptor);
-        BufferBase* APICreateBuffer(const BufferDescriptor* descriptor);
-        CommandEncoder* APICreateCommandEncoder(const CommandEncoderDescriptor* descriptor);
-        ComputePipelineBase* APICreateComputePipeline(const ComputePipelineDescriptor* descriptor);
-        PipelineLayoutBase* APICreatePipelineLayout(const PipelineLayoutDescriptor* descriptor);
-        QuerySetBase* APICreateQuerySet(const QuerySetDescriptor* descriptor);
-        void APICreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
-                                           WGPUCreateComputePipelineAsyncCallback callback,
-                                           void* userdata);
-        void APICreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
-                                          WGPUCreateRenderPipelineAsyncCallback callback,
+    // Object creation methods that be used in a reentrant manner.
+    ResultOrError<Ref<BindGroupBase>> CreateBindGroup(const BindGroupDescriptor* descriptor);
+    ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayout(
+        const BindGroupLayoutDescriptor* descriptor,
+        bool allowInternalBinding = false);
+    ResultOrError<Ref<BufferBase>> CreateBuffer(const BufferDescriptor* descriptor);
+    ResultOrError<Ref<CommandEncoder>> CreateCommandEncoder(
+        const CommandEncoderDescriptor* descriptor = nullptr);
+    ResultOrError<Ref<ComputePipelineBase>> CreateComputePipeline(
+        const ComputePipelineDescriptor* descriptor);
+    MaybeError CreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
+                                          WGPUCreateComputePipelineAsyncCallback callback,
                                           void* userdata);
-        RenderBundleEncoder* APICreateRenderBundleEncoder(
-            const RenderBundleEncoderDescriptor* descriptor);
-        RenderPipelineBase* APICreateRenderPipeline(const RenderPipelineDescriptor* descriptor);
-        ExternalTextureBase* APICreateExternalTexture(const ExternalTextureDescriptor* descriptor);
-        SamplerBase* APICreateSampler(const SamplerDescriptor* descriptor);
-        ShaderModuleBase* APICreateShaderModule(const ShaderModuleDescriptor* descriptor);
-        SwapChainBase* APICreateSwapChain(Surface* surface, const SwapChainDescriptor* descriptor);
-        TextureBase* APICreateTexture(const TextureDescriptor* descriptor);
 
-        InternalPipelineStore* GetInternalPipelineStore();
+    ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayout(
+        const PipelineLayoutDescriptor* descriptor);
+    ResultOrError<Ref<QuerySetBase>> CreateQuerySet(const QuerySetDescriptor* descriptor);
+    ResultOrError<Ref<RenderBundleEncoder>> CreateRenderBundleEncoder(
+        const RenderBundleEncoderDescriptor* descriptor);
+    ResultOrError<Ref<RenderPipelineBase>> CreateRenderPipeline(
+        const RenderPipelineDescriptor* descriptor);
+    MaybeError CreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
+                                         WGPUCreateRenderPipelineAsyncCallback callback,
+                                         void* userdata);
+    ResultOrError<Ref<SamplerBase>> CreateSampler(const SamplerDescriptor* descriptor = nullptr);
+    ResultOrError<Ref<ShaderModuleBase>> CreateShaderModule(
+        const ShaderModuleDescriptor* descriptor,
+        OwnedCompilationMessages* compilationMessages = nullptr);
+    ResultOrError<Ref<SwapChainBase>> CreateSwapChain(Surface* surface,
+                                                      const SwapChainDescriptor* descriptor);
+    ResultOrError<Ref<TextureBase>> CreateTexture(const TextureDescriptor* descriptor);
+    ResultOrError<Ref<TextureViewBase>> CreateTextureView(TextureBase* texture,
+                                                          const TextureViewDescriptor* descriptor);
 
-        // For Dawn Wire
-        BufferBase* APICreateErrorBuffer();
+    // Implementation of API object creation methods. DO NOT use them in a reentrant manner.
+    BindGroupBase* APICreateBindGroup(const BindGroupDescriptor* descriptor);
+    BindGroupLayoutBase* APICreateBindGroupLayout(const BindGroupLayoutDescriptor* descriptor);
+    BufferBase* APICreateBuffer(const BufferDescriptor* descriptor);
+    CommandEncoder* APICreateCommandEncoder(const CommandEncoderDescriptor* descriptor);
+    ComputePipelineBase* APICreateComputePipeline(const ComputePipelineDescriptor* descriptor);
+    PipelineLayoutBase* APICreatePipelineLayout(const PipelineLayoutDescriptor* descriptor);
+    QuerySetBase* APICreateQuerySet(const QuerySetDescriptor* descriptor);
+    void APICreateComputePipelineAsync(const ComputePipelineDescriptor* descriptor,
+                                       WGPUCreateComputePipelineAsyncCallback callback,
+                                       void* userdata);
+    void APICreateRenderPipelineAsync(const RenderPipelineDescriptor* descriptor,
+                                      WGPUCreateRenderPipelineAsyncCallback callback,
+                                      void* userdata);
+    RenderBundleEncoder* APICreateRenderBundleEncoder(
+        const RenderBundleEncoderDescriptor* descriptor);
+    RenderPipelineBase* APICreateRenderPipeline(const RenderPipelineDescriptor* descriptor);
+    ExternalTextureBase* APICreateExternalTexture(const ExternalTextureDescriptor* descriptor);
+    SamplerBase* APICreateSampler(const SamplerDescriptor* descriptor);
+    ShaderModuleBase* APICreateShaderModule(const ShaderModuleDescriptor* descriptor);
+    SwapChainBase* APICreateSwapChain(Surface* surface, const SwapChainDescriptor* descriptor);
+    TextureBase* APICreateTexture(const TextureDescriptor* descriptor);
 
-        QueueBase* APIGetQueue();
+    InternalPipelineStore* GetInternalPipelineStore();
 
-        bool APIGetLimits(SupportedLimits* limits) const;
-        bool APIHasFeature(wgpu::FeatureName feature) const;
-        size_t APIEnumerateFeatures(wgpu::FeatureName* features) const;
-        void APIInjectError(wgpu::ErrorType type, const char* message);
-        bool APITick();
+    // For Dawn Wire
+    BufferBase* APICreateErrorBuffer();
 
-        void APISetDeviceLostCallback(wgpu::DeviceLostCallback callback, void* userdata);
-        void APISetUncapturedErrorCallback(wgpu::ErrorCallback callback, void* userdata);
-        void APISetLoggingCallback(wgpu::LoggingCallback callback, void* userdata);
-        void APIPushErrorScope(wgpu::ErrorFilter filter);
-        bool APIPopErrorScope(wgpu::ErrorCallback callback, void* userdata);
+    QueueBase* APIGetQueue();
 
-        MaybeError ValidateIsAlive() const;
+    bool APIGetLimits(SupportedLimits* limits) const;
+    bool APIHasFeature(wgpu::FeatureName feature) const;
+    size_t APIEnumerateFeatures(wgpu::FeatureName* features) const;
+    void APIInjectError(wgpu::ErrorType type, const char* message);
+    bool APITick();
 
-        BlobCache* GetBlobCache();
+    void APISetDeviceLostCallback(wgpu::DeviceLostCallback callback, void* userdata);
+    void APISetUncapturedErrorCallback(wgpu::ErrorCallback callback, void* userdata);
+    void APISetLoggingCallback(wgpu::LoggingCallback callback, void* userdata);
+    void APIPushErrorScope(wgpu::ErrorFilter filter);
+    bool APIPopErrorScope(wgpu::ErrorCallback callback, void* userdata);
 
-        virtual ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(
-            size_t size) = 0;
-        virtual MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
-                                                   uint64_t sourceOffset,
-                                                   BufferBase* destination,
-                                                   uint64_t destinationOffset,
-                                                   uint64_t size) = 0;
-        virtual MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
-                                                    const TextureDataLayout& src,
-                                                    TextureCopy* dst,
-                                                    const Extent3D& copySizePixels) = 0;
+    MaybeError ValidateIsAlive() const;
 
-        DynamicUploader* GetDynamicUploader() const;
+    BlobCache* GetBlobCache();
 
-        // The device state which is a combination of creation state and loss state.
-        //
-        //   - BeingCreated: the device didn't finish creation yet and the frontend cannot be used
-        //     (both for the application calling WebGPU, or re-entrant calls). No work exists on
-        //     the GPU timeline.
-        //   - Alive: the device is usable and might have work happening on the GPU timeline.
-        //   - BeingDisconnected: the device is no longer usable because we are waiting for all
-        //     work on the GPU timeline to finish. (this is to make validation prevent the
-        //     application from adding more work during the transition from Available to
-        //     Disconnected)
-        //   - Disconnected: there is no longer work happening on the GPU timeline and the CPU data
-        //     structures can be safely destroyed without additional synchronization.
-        //   - Destroyed: the device is disconnected and resources have been reclaimed.
-        enum class State {
-            BeingCreated,
-            Alive,
-            BeingDisconnected,
-            Disconnected,
-            Destroyed,
-        };
-        State GetState() const;
-        bool IsLost() const;
-        void TrackObject(ApiObjectBase* object);
-        std::mutex* GetObjectListMutex(ObjectType type);
+    virtual ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) = 0;
+    virtual MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
+                                               uint64_t sourceOffset,
+                                               BufferBase* destination,
+                                               uint64_t destinationOffset,
+                                               uint64_t size) = 0;
+    virtual MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
+                                                const TextureDataLayout& src,
+                                                TextureCopy* dst,
+                                                const Extent3D& copySizePixels) = 0;
 
-        std::vector<const char*> GetTogglesUsed() const;
-        bool IsFeatureEnabled(Feature feature) const;
-        bool IsToggleEnabled(Toggle toggle) const;
-        bool IsValidationEnabled() const;
-        bool IsRobustnessEnabled() const;
-        size_t GetLazyClearCountForTesting();
-        void IncrementLazyClearCountForTesting();
-        size_t GetDeprecationWarningCountForTesting();
-        void EmitDeprecationWarning(const char* warning);
-        void EmitLog(const char* message);
-        void EmitLog(WGPULoggingType loggingType, const char* message);
-        void APILoseForTesting();
-        QueueBase* GetQueue() const;
+    DynamicUploader* GetDynamicUploader() const;
 
-        // AddFutureSerial is used to update the mFutureSerial with the max serial needed to be
-        // ticked in order to clean up all pending callback work or to execute asynchronous resource
-        // writes. It should be given the serial that a callback is tracked with, so that once that
-        // serial is completed, it can be resolved and cleaned up. This is so that when there is no
-        // gpu work (the last submitted serial has not moved beyond the completed serial), Tick can
-        // still check if we have pending work to take care of, rather than hanging and never
-        // reaching the serial the work will be executed on.
-        void AddFutureSerial(ExecutionSerial serial);
-        // Check for passed fences and set the new completed serial
-        MaybeError CheckPassedSerials();
-
-        MaybeError Tick();
-
-        // TODO(crbug.com/dawn/839): Organize the below backend-specific parameters into the struct
-        // BackendMetadata that we can query from the device.
-        virtual uint32_t GetOptimalBytesPerRowAlignment() const = 0;
-        virtual uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const = 0;
-
-        virtual float GetTimestampPeriodInNS() const = 0;
-
-        virtual bool ShouldDuplicateNumWorkgroupsForDispatchIndirect(
-            ComputePipelineBase* computePipeline) const;
-
-        virtual bool MayRequireDuplicationOfIndirectParameters() const;
-
-        virtual bool ShouldDuplicateParametersForDrawIndirect(
-            const RenderPipelineBase* renderPipelineBase) const;
-
-        const CombinedLimits& GetLimits() const;
-
-        AsyncTaskManager* GetAsyncTaskManager() const;
-        CallbackTaskManager* GetCallbackTaskManager() const;
-        dawn::platform::WorkerTaskPool* GetWorkerTaskPool() const;
-
-        void AddComputePipelineAsyncCallbackTask(Ref<ComputePipelineBase> pipeline,
-                                                 std::string errorMessage,
-                                                 WGPUCreateComputePipelineAsyncCallback callback,
-                                                 void* userdata);
-        void AddRenderPipelineAsyncCallbackTask(Ref<RenderPipelineBase> pipeline,
-                                                std::string errorMessage,
-                                                WGPUCreateRenderPipelineAsyncCallback callback,
-                                                void* userdata);
-
-        PipelineCompatibilityToken GetNextPipelineCompatibilityToken();
-
-        const CacheKey& GetCacheKey() const;
-        const std::string& GetLabel() const;
-        void APISetLabel(const char* label);
-        void APIDestroy();
-
-        virtual void AppendDebugLayerMessages(ErrorData* error) {
-        }
-
-      protected:
-        // Constructor used only for mocking and testing.
-        DeviceBase();
-
-        void SetToggle(Toggle toggle, bool isEnabled);
-        void ForceSetToggle(Toggle toggle, bool isEnabled);
-
-        MaybeError Initialize(Ref<QueueBase> defaultQueue);
-        void DestroyObjects();
-        void Destroy();
-
-        // Incrememt mLastSubmittedSerial when we submit the next serial
-        void IncrementLastSubmittedCommandSerial();
-
-      private:
-        virtual ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
-            const BindGroupDescriptor* descriptor) = 0;
-        virtual ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
-            const BindGroupLayoutDescriptor* descriptor,
-            PipelineCompatibilityToken pipelineCompatibilityToken) = 0;
-        virtual ResultOrError<Ref<BufferBase>> CreateBufferImpl(
-            const BufferDescriptor* descriptor) = 0;
-        virtual ResultOrError<Ref<ExternalTextureBase>> CreateExternalTextureImpl(
-            const ExternalTextureDescriptor* descriptor);
-        virtual ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
-            const PipelineLayoutDescriptor* descriptor) = 0;
-        virtual ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
-            const QuerySetDescriptor* descriptor) = 0;
-        virtual ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
-            const SamplerDescriptor* descriptor) = 0;
-        virtual ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
-            const ShaderModuleDescriptor* descriptor,
-            ShaderModuleParseResult* parseResult) = 0;
-        virtual ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
-            const SwapChainDescriptor* descriptor) = 0;
-        // Note that previousSwapChain may be nullptr, or come from a different backend.
-        virtual ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
-            Surface* surface,
-            NewSwapChainBase* previousSwapChain,
-            const SwapChainDescriptor* descriptor) = 0;
-        virtual ResultOrError<Ref<TextureBase>> CreateTextureImpl(
-            const TextureDescriptor* descriptor) = 0;
-        virtual ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
-            TextureBase* texture,
-            const TextureViewDescriptor* descriptor) = 0;
-        virtual Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
-            const ComputePipelineDescriptor* descriptor) = 0;
-        virtual Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
-            const RenderPipelineDescriptor* descriptor) = 0;
-        virtual void SetLabelImpl();
-
-        virtual MaybeError TickImpl() = 0;
-        void FlushCallbackTaskQueue();
-
-        ResultOrError<Ref<BindGroupLayoutBase>> CreateEmptyBindGroupLayout();
-
-        Ref<ComputePipelineBase> GetCachedComputePipeline(
-            ComputePipelineBase* uninitializedComputePipeline);
-        Ref<RenderPipelineBase> GetCachedRenderPipeline(
-            RenderPipelineBase* uninitializedRenderPipeline);
-        Ref<ComputePipelineBase> AddOrGetCachedComputePipeline(
-            Ref<ComputePipelineBase> computePipeline);
-        Ref<RenderPipelineBase> AddOrGetCachedRenderPipeline(
-            Ref<RenderPipelineBase> renderPipeline);
-        virtual void InitializeComputePipelineAsyncImpl(
-            Ref<ComputePipelineBase> computePipeline,
-            WGPUCreateComputePipelineAsyncCallback callback,
-            void* userdata);
-        virtual void InitializeRenderPipelineAsyncImpl(
-            Ref<RenderPipelineBase> renderPipeline,
-            WGPUCreateRenderPipelineAsyncCallback callback,
-            void* userdata);
-
-        void ApplyToggleOverrides(const DawnTogglesDeviceDescriptor* togglesDescriptor);
-        void ApplyFeatures(const DeviceDescriptor* deviceDescriptor);
-
-        void SetDefaultToggles();
-
-        void ConsumeError(std::unique_ptr<ErrorData> error);
-
-        // Each backend should implement to check their passed fences if there are any and return a
-        // completed serial. Return 0 should indicate no fences to check.
-        virtual ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() = 0;
-        // During shut down of device, some operations might have been started since the last submit
-        // and waiting on a serial that doesn't have a corresponding fence enqueued. Fake serials to
-        // make all commands look completed.
-        void AssumeCommandsComplete();
-        bool IsDeviceIdle();
-
-        // mCompletedSerial tracks the last completed command serial that the fence has returned.
-        // mLastSubmittedSerial tracks the last submitted command serial.
-        // During device removal, the serials could be artificially incremented
-        // to make it appear as if commands have been compeleted. They can also be artificially
-        // incremented when no work is being done in the GPU so CPU operations don't have to wait on
-        // stale serials.
-        // mFutureSerial tracks the largest serial we need to tick to for asynchronous commands or
-        // callbacks to fire
-        ExecutionSerial mCompletedSerial = ExecutionSerial(0);
-        ExecutionSerial mLastSubmittedSerial = ExecutionSerial(0);
-        ExecutionSerial mFutureSerial = ExecutionSerial(0);
-
-        // DestroyImpl is used to clean up and release resources used by device, does not wait for
-        // GPU or check errors.
-        virtual void DestroyImpl() = 0;
-
-        // WaitForIdleForDestruction waits for GPU to finish, checks errors and gets ready for
-        // destruction. This is only used when properly destructing the device. For a real
-        // device loss, this function doesn't need to be called since the driver already closed all
-        // resources.
-        virtual MaybeError WaitForIdleForDestruction() = 0;
-
-        wgpu::ErrorCallback mUncapturedErrorCallback = nullptr;
-        void* mUncapturedErrorUserdata = nullptr;
-
-        wgpu::LoggingCallback mLoggingCallback = nullptr;
-        void* mLoggingUserdata = nullptr;
-
-        wgpu::DeviceLostCallback mDeviceLostCallback = nullptr;
-        void* mDeviceLostUserdata = nullptr;
-
-        std::unique_ptr<ErrorScopeStack> mErrorScopeStack;
-
-        // The Device keeps a ref to the Instance so that any live Device keeps the Instance alive.
-        // The Instance shouldn't need to ref child objects so this shouldn't introduce ref cycles.
-        // The Device keeps a simple pointer to the Adapter because the Adapter is owned by the
-        // Instance.
-        Ref<InstanceBase> mInstance;
-        AdapterBase* mAdapter = nullptr;
-
-        // The object caches aren't exposed in the header as they would require a lot of
-        // additional includes.
-        struct Caches;
-        std::unique_ptr<Caches> mCaches;
-
-        Ref<BindGroupLayoutBase> mEmptyBindGroupLayout;
-
-        Ref<TextureViewBase> mExternalTexturePlaceholderView;
-
-        std::unique_ptr<DynamicUploader> mDynamicUploader;
-        std::unique_ptr<AsyncTaskManager> mAsyncTaskManager;
-        Ref<QueueBase> mQueue;
-
-        struct DeprecationWarnings;
-        std::unique_ptr<DeprecationWarnings> mDeprecationWarnings;
-
-        State mState = State::BeingCreated;
-
-        // Encompasses the mutex and the actual list that contains all live objects "owned" by the
-        // device.
-        struct ApiObjectList {
-            std::mutex mutex;
-            LinkedList<ApiObjectBase> objects;
-        };
-        PerObjectType<ApiObjectList> mObjectLists;
-
-        FormatTable mFormatTable;
-
-        TogglesSet mEnabledToggles;
-        TogglesSet mOverridenToggles;
-        size_t mLazyClearCountForTesting = 0;
-        std::atomic_uint64_t mNextPipelineCompatibilityToken;
-
-        CombinedLimits mLimits;
-        FeaturesSet mEnabledFeatures;
-
-        std::unique_ptr<InternalPipelineStore> mInternalPipelineStore;
-
-        std::unique_ptr<CallbackTaskManager> mCallbackTaskManager;
-        std::unique_ptr<dawn::platform::WorkerTaskPool> mWorkerTaskPool;
-        std::string mLabel;
-        CacheKey mDeviceCacheKey;
+    // The device state which is a combination of creation state and loss state.
+    //
+    //   - BeingCreated: the device didn't finish creation yet and the frontend cannot be used
+    //     (both for the application calling WebGPU, or re-entrant calls). No work exists on
+    //     the GPU timeline.
+    //   - Alive: the device is usable and might have work happening on the GPU timeline.
+    //   - BeingDisconnected: the device is no longer usable because we are waiting for all
+    //     work on the GPU timeline to finish. (this is to make validation prevent the
+    //     application from adding more work during the transition from Available to
+    //     Disconnected)
+    //   - Disconnected: there is no longer work happening on the GPU timeline and the CPU data
+    //     structures can be safely destroyed without additional synchronization.
+    //   - Destroyed: the device is disconnected and resources have been reclaimed.
+    enum class State {
+        BeingCreated,
+        Alive,
+        BeingDisconnected,
+        Disconnected,
+        Destroyed,
     };
+    State GetState() const;
+    bool IsLost() const;
+    void TrackObject(ApiObjectBase* object);
+    std::mutex* GetObjectListMutex(ObjectType type);
+
+    std::vector<const char*> GetTogglesUsed() const;
+    bool IsFeatureEnabled(Feature feature) const;
+    bool IsToggleEnabled(Toggle toggle) const;
+    bool IsValidationEnabled() const;
+    bool IsRobustnessEnabled() const;
+    size_t GetLazyClearCountForTesting();
+    void IncrementLazyClearCountForTesting();
+    size_t GetDeprecationWarningCountForTesting();
+    void EmitDeprecationWarning(const char* warning);
+    void EmitLog(const char* message);
+    void EmitLog(WGPULoggingType loggingType, const char* message);
+    void APILoseForTesting();
+    QueueBase* GetQueue() const;
+
+    // AddFutureSerial is used to update the mFutureSerial with the max serial needed to be
+    // ticked in order to clean up all pending callback work or to execute asynchronous resource
+    // writes. It should be given the serial that a callback is tracked with, so that once that
+    // serial is completed, it can be resolved and cleaned up. This is so that when there is no
+    // gpu work (the last submitted serial has not moved beyond the completed serial), Tick can
+    // still check if we have pending work to take care of, rather than hanging and never
+    // reaching the serial the work will be executed on.
+    void AddFutureSerial(ExecutionSerial serial);
+    // Check for passed fences and set the new completed serial
+    MaybeError CheckPassedSerials();
+
+    MaybeError Tick();
+
+    // TODO(crbug.com/dawn/839): Organize the below backend-specific parameters into the struct
+    // BackendMetadata that we can query from the device.
+    virtual uint32_t GetOptimalBytesPerRowAlignment() const = 0;
+    virtual uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const = 0;
+
+    virtual float GetTimestampPeriodInNS() const = 0;
+
+    virtual bool ShouldDuplicateNumWorkgroupsForDispatchIndirect(
+        ComputePipelineBase* computePipeline) const;
+
+    virtual bool MayRequireDuplicationOfIndirectParameters() const;
+
+    virtual bool ShouldDuplicateParametersForDrawIndirect(
+        const RenderPipelineBase* renderPipelineBase) const;
+
+    const CombinedLimits& GetLimits() const;
+
+    AsyncTaskManager* GetAsyncTaskManager() const;
+    CallbackTaskManager* GetCallbackTaskManager() const;
+    dawn::platform::WorkerTaskPool* GetWorkerTaskPool() const;
+
+    void AddComputePipelineAsyncCallbackTask(Ref<ComputePipelineBase> pipeline,
+                                             std::string errorMessage,
+                                             WGPUCreateComputePipelineAsyncCallback callback,
+                                             void* userdata);
+    void AddRenderPipelineAsyncCallbackTask(Ref<RenderPipelineBase> pipeline,
+                                            std::string errorMessage,
+                                            WGPUCreateRenderPipelineAsyncCallback callback,
+                                            void* userdata);
+
+    PipelineCompatibilityToken GetNextPipelineCompatibilityToken();
+
+    const CacheKey& GetCacheKey() const;
+    const std::string& GetLabel() const;
+    void APISetLabel(const char* label);
+    void APIDestroy();
+
+    virtual void AppendDebugLayerMessages(ErrorData* error) {}
+
+  protected:
+    // Constructor used only for mocking and testing.
+    DeviceBase();
+
+    void SetToggle(Toggle toggle, bool isEnabled);
+    void ForceSetToggle(Toggle toggle, bool isEnabled);
+
+    MaybeError Initialize(Ref<QueueBase> defaultQueue);
+    void DestroyObjects();
+    void Destroy();
+
+    // Incrememt mLastSubmittedSerial when we submit the next serial
+    void IncrementLastSubmittedCommandSerial();
+
+  private:
+    virtual ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
+        const BindGroupDescriptor* descriptor) = 0;
+    virtual ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
+        const BindGroupLayoutDescriptor* descriptor,
+        PipelineCompatibilityToken pipelineCompatibilityToken) = 0;
+    virtual ResultOrError<Ref<BufferBase>> CreateBufferImpl(const BufferDescriptor* descriptor) = 0;
+    virtual ResultOrError<Ref<ExternalTextureBase>> CreateExternalTextureImpl(
+        const ExternalTextureDescriptor* descriptor);
+    virtual ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
+        const PipelineLayoutDescriptor* descriptor) = 0;
+    virtual ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
+        const QuerySetDescriptor* descriptor) = 0;
+    virtual ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
+        const SamplerDescriptor* descriptor) = 0;
+    virtual ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
+        const ShaderModuleDescriptor* descriptor,
+        ShaderModuleParseResult* parseResult) = 0;
+    virtual ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
+        const SwapChainDescriptor* descriptor) = 0;
+    // Note that previousSwapChain may be nullptr, or come from a different backend.
+    virtual ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
+        Surface* surface,
+        NewSwapChainBase* previousSwapChain,
+        const SwapChainDescriptor* descriptor) = 0;
+    virtual ResultOrError<Ref<TextureBase>> CreateTextureImpl(
+        const TextureDescriptor* descriptor) = 0;
+    virtual ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
+        TextureBase* texture,
+        const TextureViewDescriptor* descriptor) = 0;
+    virtual Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
+        const ComputePipelineDescriptor* descriptor) = 0;
+    virtual Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
+        const RenderPipelineDescriptor* descriptor) = 0;
+    virtual void SetLabelImpl();
+
+    virtual MaybeError TickImpl() = 0;
+    void FlushCallbackTaskQueue();
+
+    ResultOrError<Ref<BindGroupLayoutBase>> CreateEmptyBindGroupLayout();
+
+    Ref<ComputePipelineBase> GetCachedComputePipeline(
+        ComputePipelineBase* uninitializedComputePipeline);
+    Ref<RenderPipelineBase> GetCachedRenderPipeline(
+        RenderPipelineBase* uninitializedRenderPipeline);
+    Ref<ComputePipelineBase> AddOrGetCachedComputePipeline(
+        Ref<ComputePipelineBase> computePipeline);
+    Ref<RenderPipelineBase> AddOrGetCachedRenderPipeline(Ref<RenderPipelineBase> renderPipeline);
+    virtual void InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
+                                                    WGPUCreateComputePipelineAsyncCallback callback,
+                                                    void* userdata);
+    virtual void InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+                                                   WGPUCreateRenderPipelineAsyncCallback callback,
+                                                   void* userdata);
+
+    void ApplyToggleOverrides(const DawnTogglesDeviceDescriptor* togglesDescriptor);
+    void ApplyFeatures(const DeviceDescriptor* deviceDescriptor);
+
+    void SetDefaultToggles();
+
+    void ConsumeError(std::unique_ptr<ErrorData> error);
+
+    // Each backend should implement to check their passed fences if there are any and return a
+    // completed serial. Return 0 should indicate no fences to check.
+    virtual ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() = 0;
+    // During shut down of device, some operations might have been started since the last submit
+    // and waiting on a serial that doesn't have a corresponding fence enqueued. Fake serials to
+    // make all commands look completed.
+    void AssumeCommandsComplete();
+    bool IsDeviceIdle();
+
+    // mCompletedSerial tracks the last completed command serial that the fence has returned.
+    // mLastSubmittedSerial tracks the last submitted command serial.
+    // During device removal, the serials could be artificially incremented
+    // to make it appear as if commands have been compeleted. They can also be artificially
+    // incremented when no work is being done in the GPU so CPU operations don't have to wait on
+    // stale serials.
+    // mFutureSerial tracks the largest serial we need to tick to for asynchronous commands or
+    // callbacks to fire
+    ExecutionSerial mCompletedSerial = ExecutionSerial(0);
+    ExecutionSerial mLastSubmittedSerial = ExecutionSerial(0);
+    ExecutionSerial mFutureSerial = ExecutionSerial(0);
+
+    // DestroyImpl is used to clean up and release resources used by device, does not wait for
+    // GPU or check errors.
+    virtual void DestroyImpl() = 0;
+
+    // WaitForIdleForDestruction waits for GPU to finish, checks errors and gets ready for
+    // destruction. This is only used when properly destructing the device. For a real
+    // device loss, this function doesn't need to be called since the driver already closed all
+    // resources.
+    virtual MaybeError WaitForIdleForDestruction() = 0;
+
+    wgpu::ErrorCallback mUncapturedErrorCallback = nullptr;
+    void* mUncapturedErrorUserdata = nullptr;
+
+    wgpu::LoggingCallback mLoggingCallback = nullptr;
+    void* mLoggingUserdata = nullptr;
+
+    wgpu::DeviceLostCallback mDeviceLostCallback = nullptr;
+    void* mDeviceLostUserdata = nullptr;
+
+    std::unique_ptr<ErrorScopeStack> mErrorScopeStack;
+
+    // The Device keeps a ref to the Instance so that any live Device keeps the Instance alive.
+    // The Instance shouldn't need to ref child objects so this shouldn't introduce ref cycles.
+    // The Device keeps a simple pointer to the Adapter because the Adapter is owned by the
+    // Instance.
+    Ref<InstanceBase> mInstance;
+    AdapterBase* mAdapter = nullptr;
+
+    // The object caches aren't exposed in the header as they would require a lot of
+    // additional includes.
+    struct Caches;
+    std::unique_ptr<Caches> mCaches;
+
+    Ref<BindGroupLayoutBase> mEmptyBindGroupLayout;
+
+    Ref<TextureViewBase> mExternalTexturePlaceholderView;
+
+    std::unique_ptr<DynamicUploader> mDynamicUploader;
+    std::unique_ptr<AsyncTaskManager> mAsyncTaskManager;
+    Ref<QueueBase> mQueue;
+
+    struct DeprecationWarnings;
+    std::unique_ptr<DeprecationWarnings> mDeprecationWarnings;
+
+    State mState = State::BeingCreated;
+
+    // Encompasses the mutex and the actual list that contains all live objects "owned" by the
+    // device.
+    struct ApiObjectList {
+        std::mutex mutex;
+        LinkedList<ApiObjectBase> objects;
+    };
+    PerObjectType<ApiObjectList> mObjectLists;
+
+    FormatTable mFormatTable;
+
+    TogglesSet mEnabledToggles;
+    TogglesSet mOverridenToggles;
+    size_t mLazyClearCountForTesting = 0;
+    std::atomic_uint64_t mNextPipelineCompatibilityToken;
+
+    CombinedLimits mLimits;
+    FeaturesSet mEnabledFeatures;
+
+    std::unique_ptr<InternalPipelineStore> mInternalPipelineStore;
+
+    std::unique_ptr<CallbackTaskManager> mCallbackTaskManager;
+    std::unique_ptr<dawn::platform::WorkerTaskPool> mWorkerTaskPool;
+    std::string mLabel;
+    CacheKey mDeviceCacheKey;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/DynamicUploader.cpp b/src/dawn/native/DynamicUploader.cpp
index e1c0b6c..bae374f 100644
--- a/src/dawn/native/DynamicUploader.cpp
+++ b/src/dawn/native/DynamicUploader.cpp
@@ -21,112 +21,109 @@
 
 namespace dawn::native {
 
-    DynamicUploader::DynamicUploader(DeviceBase* device) : mDevice(device) {
+DynamicUploader::DynamicUploader(DeviceBase* device) : mDevice(device) {
+    mRingBuffers.emplace_back(
+        std::unique_ptr<RingBuffer>(new RingBuffer{nullptr, RingBufferAllocator(kRingBufferSize)}));
+}
+
+void DynamicUploader::ReleaseStagingBuffer(std::unique_ptr<StagingBufferBase> stagingBuffer) {
+    mReleasedStagingBuffers.Enqueue(std::move(stagingBuffer), mDevice->GetPendingCommandSerial());
+}
+
+ResultOrError<UploadHandle> DynamicUploader::AllocateInternal(uint64_t allocationSize,
+                                                              ExecutionSerial serial) {
+    // Disable further sub-allocation should the request be too large.
+    if (allocationSize > kRingBufferSize) {
+        std::unique_ptr<StagingBufferBase> stagingBuffer;
+        DAWN_TRY_ASSIGN(stagingBuffer, mDevice->CreateStagingBuffer(allocationSize));
+
+        UploadHandle uploadHandle;
+        uploadHandle.mappedBuffer = static_cast<uint8_t*>(stagingBuffer->GetMappedPointer());
+        uploadHandle.stagingBuffer = stagingBuffer.get();
+
+        ReleaseStagingBuffer(std::move(stagingBuffer));
+        return uploadHandle;
+    }
+
+    // Note: Validation ensures size is already aligned.
+    // First-fit: find next smallest buffer large enough to satisfy the allocation request.
+    RingBuffer* targetRingBuffer = mRingBuffers.back().get();
+    for (auto& ringBuffer : mRingBuffers) {
+        const RingBufferAllocator& ringBufferAllocator = ringBuffer->mAllocator;
+        // Prevent overflow.
+        ASSERT(ringBufferAllocator.GetSize() >= ringBufferAllocator.GetUsedSize());
+        const uint64_t remainingSize =
+            ringBufferAllocator.GetSize() - ringBufferAllocator.GetUsedSize();
+        if (allocationSize <= remainingSize) {
+            targetRingBuffer = ringBuffer.get();
+            break;
+        }
+    }
+
+    uint64_t startOffset = RingBufferAllocator::kInvalidOffset;
+    if (targetRingBuffer != nullptr) {
+        startOffset = targetRingBuffer->mAllocator.Allocate(allocationSize, serial);
+    }
+
+    // Upon failure, append a newly created ring buffer to fulfill the
+    // request.
+    if (startOffset == RingBufferAllocator::kInvalidOffset) {
         mRingBuffers.emplace_back(std::unique_ptr<RingBuffer>(
             new RingBuffer{nullptr, RingBufferAllocator(kRingBufferSize)}));
+
+        targetRingBuffer = mRingBuffers.back().get();
+        startOffset = targetRingBuffer->mAllocator.Allocate(allocationSize, serial);
     }
 
-    void DynamicUploader::ReleaseStagingBuffer(std::unique_ptr<StagingBufferBase> stagingBuffer) {
-        mReleasedStagingBuffers.Enqueue(std::move(stagingBuffer),
-                                        mDevice->GetPendingCommandSerial());
+    ASSERT(startOffset != RingBufferAllocator::kInvalidOffset);
+
+    // Allocate the staging buffer backing the ringbuffer.
+    // Note: the first ringbuffer will be lazily created.
+    if (targetRingBuffer->mStagingBuffer == nullptr) {
+        std::unique_ptr<StagingBufferBase> stagingBuffer;
+        DAWN_TRY_ASSIGN(stagingBuffer,
+                        mDevice->CreateStagingBuffer(targetRingBuffer->mAllocator.GetSize()));
+        targetRingBuffer->mStagingBuffer = std::move(stagingBuffer);
     }
 
-    ResultOrError<UploadHandle> DynamicUploader::AllocateInternal(uint64_t allocationSize,
-                                                                  ExecutionSerial serial) {
-        // Disable further sub-allocation should the request be too large.
-        if (allocationSize > kRingBufferSize) {
-            std::unique_ptr<StagingBufferBase> stagingBuffer;
-            DAWN_TRY_ASSIGN(stagingBuffer, mDevice->CreateStagingBuffer(allocationSize));
+    ASSERT(targetRingBuffer->mStagingBuffer != nullptr);
 
-            UploadHandle uploadHandle;
-            uploadHandle.mappedBuffer = static_cast<uint8_t*>(stagingBuffer->GetMappedPointer());
-            uploadHandle.stagingBuffer = stagingBuffer.get();
+    UploadHandle uploadHandle;
+    uploadHandle.stagingBuffer = targetRingBuffer->mStagingBuffer.get();
+    uploadHandle.mappedBuffer =
+        static_cast<uint8_t*>(uploadHandle.stagingBuffer->GetMappedPointer()) + startOffset;
+    uploadHandle.startOffset = startOffset;
 
-            ReleaseStagingBuffer(std::move(stagingBuffer));
-            return uploadHandle;
+    return uploadHandle;
+}
+
+void DynamicUploader::Deallocate(ExecutionSerial lastCompletedSerial) {
+    // Reclaim memory within the ring buffers by ticking (or removing requests no longer
+    // in-flight).
+    for (size_t i = 0; i < mRingBuffers.size(); ++i) {
+        mRingBuffers[i]->mAllocator.Deallocate(lastCompletedSerial);
+
+        // Never erase the last buffer as to prevent re-creating smaller buffers
+        // again. The last buffer is the largest.
+        if (mRingBuffers[i]->mAllocator.Empty() && i < mRingBuffers.size() - 1) {
+            mRingBuffers.erase(mRingBuffers.begin() + i);
         }
-
-        // Note: Validation ensures size is already aligned.
-        // First-fit: find next smallest buffer large enough to satisfy the allocation request.
-        RingBuffer* targetRingBuffer = mRingBuffers.back().get();
-        for (auto& ringBuffer : mRingBuffers) {
-            const RingBufferAllocator& ringBufferAllocator = ringBuffer->mAllocator;
-            // Prevent overflow.
-            ASSERT(ringBufferAllocator.GetSize() >= ringBufferAllocator.GetUsedSize());
-            const uint64_t remainingSize =
-                ringBufferAllocator.GetSize() - ringBufferAllocator.GetUsedSize();
-            if (allocationSize <= remainingSize) {
-                targetRingBuffer = ringBuffer.get();
-                break;
-            }
-        }
-
-        uint64_t startOffset = RingBufferAllocator::kInvalidOffset;
-        if (targetRingBuffer != nullptr) {
-            startOffset = targetRingBuffer->mAllocator.Allocate(allocationSize, serial);
-        }
-
-        // Upon failure, append a newly created ring buffer to fulfill the
-        // request.
-        if (startOffset == RingBufferAllocator::kInvalidOffset) {
-            mRingBuffers.emplace_back(std::unique_ptr<RingBuffer>(
-                new RingBuffer{nullptr, RingBufferAllocator(kRingBufferSize)}));
-
-            targetRingBuffer = mRingBuffers.back().get();
-            startOffset = targetRingBuffer->mAllocator.Allocate(allocationSize, serial);
-        }
-
-        ASSERT(startOffset != RingBufferAllocator::kInvalidOffset);
-
-        // Allocate the staging buffer backing the ringbuffer.
-        // Note: the first ringbuffer will be lazily created.
-        if (targetRingBuffer->mStagingBuffer == nullptr) {
-            std::unique_ptr<StagingBufferBase> stagingBuffer;
-            DAWN_TRY_ASSIGN(stagingBuffer,
-                            mDevice->CreateStagingBuffer(targetRingBuffer->mAllocator.GetSize()));
-            targetRingBuffer->mStagingBuffer = std::move(stagingBuffer);
-        }
-
-        ASSERT(targetRingBuffer->mStagingBuffer != nullptr);
-
-        UploadHandle uploadHandle;
-        uploadHandle.stagingBuffer = targetRingBuffer->mStagingBuffer.get();
-        uploadHandle.mappedBuffer =
-            static_cast<uint8_t*>(uploadHandle.stagingBuffer->GetMappedPointer()) + startOffset;
-        uploadHandle.startOffset = startOffset;
-
-        return uploadHandle;
     }
+    mReleasedStagingBuffers.ClearUpTo(lastCompletedSerial);
+}
 
-    void DynamicUploader::Deallocate(ExecutionSerial lastCompletedSerial) {
-        // Reclaim memory within the ring buffers by ticking (or removing requests no longer
-        // in-flight).
-        for (size_t i = 0; i < mRingBuffers.size(); ++i) {
-            mRingBuffers[i]->mAllocator.Deallocate(lastCompletedSerial);
-
-            // Never erase the last buffer as to prevent re-creating smaller buffers
-            // again. The last buffer is the largest.
-            if (mRingBuffers[i]->mAllocator.Empty() && i < mRingBuffers.size() - 1) {
-                mRingBuffers.erase(mRingBuffers.begin() + i);
-            }
-        }
-        mReleasedStagingBuffers.ClearUpTo(lastCompletedSerial);
-    }
-
-    // TODO(dawn:512): Optimize this function so that it doesn't allocate additional memory
-    // when it's not necessary.
-    ResultOrError<UploadHandle> DynamicUploader::Allocate(uint64_t allocationSize,
-                                                          ExecutionSerial serial,
-                                                          uint64_t offsetAlignment) {
-        ASSERT(offsetAlignment > 0);
-        UploadHandle uploadHandle;
-        DAWN_TRY_ASSIGN(uploadHandle,
-                        AllocateInternal(allocationSize + offsetAlignment - 1, serial));
-        uint64_t additionalOffset =
-            Align(uploadHandle.startOffset, offsetAlignment) - uploadHandle.startOffset;
-        uploadHandle.mappedBuffer =
-            static_cast<uint8_t*>(uploadHandle.mappedBuffer) + additionalOffset;
-        uploadHandle.startOffset += additionalOffset;
-        return uploadHandle;
-    }
+// TODO(dawn:512): Optimize this function so that it doesn't allocate additional memory
+// when it's not necessary.
+ResultOrError<UploadHandle> DynamicUploader::Allocate(uint64_t allocationSize,
+                                                      ExecutionSerial serial,
+                                                      uint64_t offsetAlignment) {
+    ASSERT(offsetAlignment > 0);
+    UploadHandle uploadHandle;
+    DAWN_TRY_ASSIGN(uploadHandle, AllocateInternal(allocationSize + offsetAlignment - 1, serial));
+    uint64_t additionalOffset =
+        Align(uploadHandle.startOffset, offsetAlignment) - uploadHandle.startOffset;
+    uploadHandle.mappedBuffer = static_cast<uint8_t*>(uploadHandle.mappedBuffer) + additionalOffset;
+    uploadHandle.startOffset += additionalOffset;
+    return uploadHandle;
+}
 }  // namespace dawn::native
diff --git a/src/dawn/native/DynamicUploader.h b/src/dawn/native/DynamicUploader.h
index 8148005..0317e8d 100644
--- a/src/dawn/native/DynamicUploader.h
+++ b/src/dawn/native/DynamicUploader.h
@@ -27,43 +27,42 @@
 // usage.
 namespace dawn::native {
 
-    struct UploadHandle {
-        uint8_t* mappedBuffer = nullptr;
-        uint64_t startOffset = 0;
-        StagingBufferBase* stagingBuffer = nullptr;
+struct UploadHandle {
+    uint8_t* mappedBuffer = nullptr;
+    uint64_t startOffset = 0;
+    StagingBufferBase* stagingBuffer = nullptr;
+};
+
+class DynamicUploader {
+  public:
+    explicit DynamicUploader(DeviceBase* device);
+    ~DynamicUploader() = default;
+
+    // We add functions to Release StagingBuffers to the DynamicUploader as there's
+    // currently no place to track the allocated staging buffers such that they're freed after
+    // pending commands are finished. This should be changed when better resource allocation is
+    // implemented.
+    void ReleaseStagingBuffer(std::unique_ptr<StagingBufferBase> stagingBuffer);
+
+    ResultOrError<UploadHandle> Allocate(uint64_t allocationSize,
+                                         ExecutionSerial serial,
+                                         uint64_t offsetAlignment);
+    void Deallocate(ExecutionSerial lastCompletedSerial);
+
+  private:
+    static constexpr uint64_t kRingBufferSize = 4 * 1024 * 1024;
+
+    struct RingBuffer {
+        std::unique_ptr<StagingBufferBase> mStagingBuffer;
+        RingBufferAllocator mAllocator;
     };
 
-    class DynamicUploader {
-      public:
-        explicit DynamicUploader(DeviceBase* device);
-        ~DynamicUploader() = default;
+    ResultOrError<UploadHandle> AllocateInternal(uint64_t allocationSize, ExecutionSerial serial);
 
-        // We add functions to Release StagingBuffers to the DynamicUploader as there's
-        // currently no place to track the allocated staging buffers such that they're freed after
-        // pending commands are finished. This should be changed when better resource allocation is
-        // implemented.
-        void ReleaseStagingBuffer(std::unique_ptr<StagingBufferBase> stagingBuffer);
-
-        ResultOrError<UploadHandle> Allocate(uint64_t allocationSize,
-                                             ExecutionSerial serial,
-                                             uint64_t offsetAlignment);
-        void Deallocate(ExecutionSerial lastCompletedSerial);
-
-      private:
-        static constexpr uint64_t kRingBufferSize = 4 * 1024 * 1024;
-
-        struct RingBuffer {
-            std::unique_ptr<StagingBufferBase> mStagingBuffer;
-            RingBufferAllocator mAllocator;
-        };
-
-        ResultOrError<UploadHandle> AllocateInternal(uint64_t allocationSize,
-                                                     ExecutionSerial serial);
-
-        std::vector<std::unique_ptr<RingBuffer>> mRingBuffers;
-        SerialQueue<ExecutionSerial, std::unique_ptr<StagingBufferBase>> mReleasedStagingBuffers;
-        DeviceBase* mDevice;
-    };
+    std::vector<std::unique_ptr<RingBuffer>> mRingBuffers;
+    SerialQueue<ExecutionSerial, std::unique_ptr<StagingBufferBase>> mReleasedStagingBuffers;
+    DeviceBase* mDevice;
+};
 }  // namespace dawn::native
 
 #endif  // SRC_DAWN_NATIVE_DYNAMICUPLOADER_H_
diff --git a/src/dawn/native/EncodingContext.cpp b/src/dawn/native/EncodingContext.cpp
index 973cdec..1a91a8e 100644
--- a/src/dawn/native/EncodingContext.cpp
+++ b/src/dawn/native/EncodingContext.cpp
@@ -24,197 +24,194 @@
 
 namespace dawn::native {
 
-    EncodingContext::EncodingContext(DeviceBase* device, const ApiObjectBase* initialEncoder)
-        : mDevice(device), mTopLevelEncoder(initialEncoder), mCurrentEncoder(initialEncoder) {
+EncodingContext::EncodingContext(DeviceBase* device, const ApiObjectBase* initialEncoder)
+    : mDevice(device), mTopLevelEncoder(initialEncoder), mCurrentEncoder(initialEncoder) {}
+
+EncodingContext::~EncodingContext() {
+    Destroy();
+}
+
+void EncodingContext::Destroy() {
+    if (mDestroyed) {
+        return;
+    }
+    if (!mWereCommandsAcquired) {
+        FreeCommands(GetIterator());
+    }
+    // If we weren't already finished, then we want to handle an error here so that any calls
+    // to Finish after Destroy will return a meaningful error.
+    if (!IsFinished()) {
+        HandleError(DAWN_FORMAT_VALIDATION_ERROR("Destroyed encoder cannot be finished."));
+    }
+    mDestroyed = true;
+    mCurrentEncoder = nullptr;
+}
+
+CommandIterator EncodingContext::AcquireCommands() {
+    MoveToIterator();
+    ASSERT(!mWereCommandsAcquired);
+    mWereCommandsAcquired = true;
+    return std::move(mIterator);
+}
+
+CommandIterator* EncodingContext::GetIterator() {
+    MoveToIterator();
+    ASSERT(!mWereCommandsAcquired);
+    return &mIterator;
+}
+
+void EncodingContext::MoveToIterator() {
+    CommitCommands(std::move(mPendingCommands));
+    if (!mWasMovedToIterator) {
+        mIterator.AcquireCommandBlocks(std::move(mAllocators));
+        mWasMovedToIterator = true;
+    }
+}
+
+void EncodingContext::HandleError(std::unique_ptr<ErrorData> error) {
+    // Append in reverse so that the most recently set debug group is printed first, like a
+    // call stack.
+    for (auto iter = mDebugGroupLabels.rbegin(); iter != mDebugGroupLabels.rend(); ++iter) {
+        error->AppendDebugGroup(*iter);
     }
 
-    EncodingContext::~EncodingContext() {
-        Destroy();
-    }
-
-    void EncodingContext::Destroy() {
-        if (mDestroyed) {
-            return;
+    if (!IsFinished()) {
+        // Encoding should only generate validation errors.
+        ASSERT(error->GetType() == InternalErrorType::Validation);
+        // If the encoding context is not finished, errors are deferred until
+        // Finish() is called.
+        if (mError == nullptr) {
+            mError = std::move(error);
         }
-        if (!mWereCommandsAcquired) {
-            FreeCommands(GetIterator());
-        }
-        // If we weren't already finished, then we want to handle an error here so that any calls
-        // to Finish after Destroy will return a meaningful error.
-        if (!IsFinished()) {
-            HandleError(DAWN_FORMAT_VALIDATION_ERROR("Destroyed encoder cannot be finished."));
-        }
-        mDestroyed = true;
-        mCurrentEncoder = nullptr;
+    } else {
+        mDevice->HandleError(error->GetType(), error->GetFormattedMessage().c_str());
     }
+}
 
-    CommandIterator EncodingContext::AcquireCommands() {
-        MoveToIterator();
-        ASSERT(!mWereCommandsAcquired);
-        mWereCommandsAcquired = true;
-        return std::move(mIterator);
-    }
-
-    CommandIterator* EncodingContext::GetIterator() {
-        MoveToIterator();
-        ASSERT(!mWereCommandsAcquired);
-        return &mIterator;
-    }
-
-    void EncodingContext::MoveToIterator() {
+void EncodingContext::WillBeginRenderPass() {
+    ASSERT(mCurrentEncoder == mTopLevelEncoder);
+    if (mDevice->IsValidationEnabled() || mDevice->MayRequireDuplicationOfIndirectParameters()) {
+        // When validation is enabled or indirect parameters require duplication, we are going
+        // to want to capture all commands encoded between and including BeginRenderPassCmd and
+        // EndRenderPassCmd, and defer their sequencing util after we have a chance to insert
+        // any necessary validation or duplication commands. To support this we commit any
+        // current commands now, so that the impending BeginRenderPassCmd starts in a fresh
+        // CommandAllocator.
         CommitCommands(std::move(mPendingCommands));
-        if (!mWasMovedToIterator) {
-            mIterator.AcquireCommandBlocks(std::move(mAllocators));
-            mWasMovedToIterator = true;
-        }
     }
+}
 
-    void EncodingContext::HandleError(std::unique_ptr<ErrorData> error) {
-        // Append in reverse so that the most recently set debug group is printed first, like a
-        // call stack.
-        for (auto iter = mDebugGroupLabels.rbegin(); iter != mDebugGroupLabels.rend(); ++iter) {
-            error->AppendDebugGroup(*iter);
-        }
+void EncodingContext::EnterPass(const ApiObjectBase* passEncoder) {
+    // Assert we're at the top level.
+    ASSERT(mCurrentEncoder == mTopLevelEncoder);
+    ASSERT(passEncoder != nullptr);
 
-        if (!IsFinished()) {
-            // Encoding should only generate validation errors.
-            ASSERT(error->GetType() == InternalErrorType::Validation);
-            // If the encoding context is not finished, errors are deferred until
-            // Finish() is called.
-            if (mError == nullptr) {
-                mError = std::move(error);
-            }
-        } else {
-            mDevice->HandleError(error->GetType(), error->GetFormattedMessage().c_str());
-        }
-    }
+    mCurrentEncoder = passEncoder;
+}
 
-    void EncodingContext::WillBeginRenderPass() {
-        ASSERT(mCurrentEncoder == mTopLevelEncoder);
-        if (mDevice->IsValidationEnabled() ||
-            mDevice->MayRequireDuplicationOfIndirectParameters()) {
-            // When validation is enabled or indirect parameters require duplication, we are going
-            // to want to capture all commands encoded between and including BeginRenderPassCmd and
-            // EndRenderPassCmd, and defer their sequencing util after we have a chance to insert
-            // any necessary validation or duplication commands. To support this we commit any
-            // current commands now, so that the impending BeginRenderPassCmd starts in a fresh
-            // CommandAllocator.
-            CommitCommands(std::move(mPendingCommands));
-        }
-    }
+MaybeError EncodingContext::ExitRenderPass(const ApiObjectBase* passEncoder,
+                                           RenderPassResourceUsageTracker usageTracker,
+                                           CommandEncoder* commandEncoder,
+                                           IndirectDrawMetadata indirectDrawMetadata) {
+    ASSERT(mCurrentEncoder != mTopLevelEncoder);
+    ASSERT(mCurrentEncoder == passEncoder);
 
-    void EncodingContext::EnterPass(const ApiObjectBase* passEncoder) {
-        // Assert we're at the top level.
-        ASSERT(mCurrentEncoder == mTopLevelEncoder);
-        ASSERT(passEncoder != nullptr);
+    mCurrentEncoder = mTopLevelEncoder;
 
-        mCurrentEncoder = passEncoder;
-    }
-
-    MaybeError EncodingContext::ExitRenderPass(const ApiObjectBase* passEncoder,
-                                               RenderPassResourceUsageTracker usageTracker,
-                                               CommandEncoder* commandEncoder,
-                                               IndirectDrawMetadata indirectDrawMetadata) {
-        ASSERT(mCurrentEncoder != mTopLevelEncoder);
-        ASSERT(mCurrentEncoder == passEncoder);
-
-        mCurrentEncoder = mTopLevelEncoder;
-
-        if (mDevice->IsValidationEnabled() ||
-            mDevice->MayRequireDuplicationOfIndirectParameters()) {
-            // With validation enabled, commands were committed just before BeginRenderPassCmd was
-            // encoded by our RenderPassEncoder (see WillBeginRenderPass above). This means
-            // mPendingCommands contains only the commands from BeginRenderPassCmd to
-            // EndRenderPassCmd, inclusive. Now we swap out this allocator with a fresh one to give
-            // the validation encoder a chance to insert its commands first.
-            CommandAllocator renderCommands = std::move(mPendingCommands);
-            DAWN_TRY(EncodeIndirectDrawValidationCommands(mDevice, commandEncoder, &usageTracker,
-                                                          &indirectDrawMetadata));
-            CommitCommands(std::move(mPendingCommands));
-            CommitCommands(std::move(renderCommands));
-        }
-
-        mRenderPassUsages.push_back(usageTracker.AcquireResourceUsage());
-        return {};
-    }
-
-    void EncodingContext::ExitComputePass(const ApiObjectBase* passEncoder,
-                                          ComputePassResourceUsage usages) {
-        ASSERT(mCurrentEncoder != mTopLevelEncoder);
-        ASSERT(mCurrentEncoder == passEncoder);
-
-        mCurrentEncoder = mTopLevelEncoder;
-        mComputePassUsages.push_back(std::move(usages));
-    }
-
-    void EncodingContext::EnsurePassExited(const ApiObjectBase* passEncoder) {
-        if (mCurrentEncoder != mTopLevelEncoder && mCurrentEncoder == passEncoder) {
-            // The current pass encoder is being deleted. Implicitly end the pass with an error.
-            mCurrentEncoder = mTopLevelEncoder;
-            HandleError(DAWN_FORMAT_VALIDATION_ERROR(
-                "Command buffer recording ended before %s was ended.", passEncoder));
-        }
-    }
-
-    const RenderPassUsages& EncodingContext::GetRenderPassUsages() const {
-        ASSERT(!mWereRenderPassUsagesAcquired);
-        return mRenderPassUsages;
-    }
-
-    RenderPassUsages EncodingContext::AcquireRenderPassUsages() {
-        ASSERT(!mWereRenderPassUsagesAcquired);
-        mWereRenderPassUsagesAcquired = true;
-        return std::move(mRenderPassUsages);
-    }
-
-    const ComputePassUsages& EncodingContext::GetComputePassUsages() const {
-        ASSERT(!mWereComputePassUsagesAcquired);
-        return mComputePassUsages;
-    }
-
-    ComputePassUsages EncodingContext::AcquireComputePassUsages() {
-        ASSERT(!mWereComputePassUsagesAcquired);
-        mWereComputePassUsagesAcquired = true;
-        return std::move(mComputePassUsages);
-    }
-
-    void EncodingContext::PushDebugGroupLabel(const char* groupLabel) {
-        mDebugGroupLabels.emplace_back(groupLabel);
-    }
-
-    void EncodingContext::PopDebugGroupLabel() {
-        mDebugGroupLabels.pop_back();
-    }
-
-    MaybeError EncodingContext::Finish() {
-        DAWN_INVALID_IF(IsFinished(), "Command encoding already finished.");
-
-        const ApiObjectBase* currentEncoder = mCurrentEncoder;
-        const ApiObjectBase* topLevelEncoder = mTopLevelEncoder;
-
-        // Even if finish validation fails, it is now invalid to call any encoding commands,
-        // so we clear the encoders. Note: mTopLevelEncoder == nullptr is used as a flag for
-        // if Finish() has been called.
-        mCurrentEncoder = nullptr;
-        mTopLevelEncoder = nullptr;
+    if (mDevice->IsValidationEnabled() || mDevice->MayRequireDuplicationOfIndirectParameters()) {
+        // With validation enabled, commands were committed just before BeginRenderPassCmd was
+        // encoded by our RenderPassEncoder (see WillBeginRenderPass above). This means
+        // mPendingCommands contains only the commands from BeginRenderPassCmd to
+        // EndRenderPassCmd, inclusive. Now we swap out this allocator with a fresh one to give
+        // the validation encoder a chance to insert its commands first.
+        CommandAllocator renderCommands = std::move(mPendingCommands);
+        DAWN_TRY(EncodeIndirectDrawValidationCommands(mDevice, commandEncoder, &usageTracker,
+                                                      &indirectDrawMetadata));
         CommitCommands(std::move(mPendingCommands));
-
-        if (mError != nullptr) {
-            return std::move(mError);
-        }
-        DAWN_INVALID_IF(currentEncoder != topLevelEncoder,
-                        "Command buffer recording ended before %s was ended.", currentEncoder);
-        return {};
+        CommitCommands(std::move(renderCommands));
     }
 
-    void EncodingContext::CommitCommands(CommandAllocator allocator) {
-        if (!allocator.IsEmpty()) {
-            mAllocators.push_back(std::move(allocator));
-        }
-    }
+    mRenderPassUsages.push_back(usageTracker.AcquireResourceUsage());
+    return {};
+}
 
-    bool EncodingContext::IsFinished() const {
-        return mTopLevelEncoder == nullptr;
+void EncodingContext::ExitComputePass(const ApiObjectBase* passEncoder,
+                                      ComputePassResourceUsage usages) {
+    ASSERT(mCurrentEncoder != mTopLevelEncoder);
+    ASSERT(mCurrentEncoder == passEncoder);
+
+    mCurrentEncoder = mTopLevelEncoder;
+    mComputePassUsages.push_back(std::move(usages));
+}
+
+void EncodingContext::EnsurePassExited(const ApiObjectBase* passEncoder) {
+    if (mCurrentEncoder != mTopLevelEncoder && mCurrentEncoder == passEncoder) {
+        // The current pass encoder is being deleted. Implicitly end the pass with an error.
+        mCurrentEncoder = mTopLevelEncoder;
+        HandleError(DAWN_FORMAT_VALIDATION_ERROR(
+            "Command buffer recording ended before %s was ended.", passEncoder));
     }
+}
+
+const RenderPassUsages& EncodingContext::GetRenderPassUsages() const {
+    ASSERT(!mWereRenderPassUsagesAcquired);
+    return mRenderPassUsages;
+}
+
+RenderPassUsages EncodingContext::AcquireRenderPassUsages() {
+    ASSERT(!mWereRenderPassUsagesAcquired);
+    mWereRenderPassUsagesAcquired = true;
+    return std::move(mRenderPassUsages);
+}
+
+const ComputePassUsages& EncodingContext::GetComputePassUsages() const {
+    ASSERT(!mWereComputePassUsagesAcquired);
+    return mComputePassUsages;
+}
+
+ComputePassUsages EncodingContext::AcquireComputePassUsages() {
+    ASSERT(!mWereComputePassUsagesAcquired);
+    mWereComputePassUsagesAcquired = true;
+    return std::move(mComputePassUsages);
+}
+
+void EncodingContext::PushDebugGroupLabel(const char* groupLabel) {
+    mDebugGroupLabels.emplace_back(groupLabel);
+}
+
+void EncodingContext::PopDebugGroupLabel() {
+    mDebugGroupLabels.pop_back();
+}
+
+MaybeError EncodingContext::Finish() {
+    DAWN_INVALID_IF(IsFinished(), "Command encoding already finished.");
+
+    const ApiObjectBase* currentEncoder = mCurrentEncoder;
+    const ApiObjectBase* topLevelEncoder = mTopLevelEncoder;
+
+    // Even if finish validation fails, it is now invalid to call any encoding commands,
+    // so we clear the encoders. Note: mTopLevelEncoder == nullptr is used as a flag for
+    // if Finish() has been called.
+    mCurrentEncoder = nullptr;
+    mTopLevelEncoder = nullptr;
+    CommitCommands(std::move(mPendingCommands));
+
+    if (mError != nullptr) {
+        return std::move(mError);
+    }
+    DAWN_INVALID_IF(currentEncoder != topLevelEncoder,
+                    "Command buffer recording ended before %s was ended.", currentEncoder);
+    return {};
+}
+
+void EncodingContext::CommitCommands(CommandAllocator allocator) {
+    if (!allocator.IsEmpty()) {
+        mAllocators.push_back(std::move(allocator));
+    }
+}
+
+bool EncodingContext::IsFinished() const {
+    return mTopLevelEncoder == nullptr;
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/EncodingContext.h b/src/dawn/native/EncodingContext.h
index 5b79aa3..360790a 100644
--- a/src/dawn/native/EncodingContext.h
+++ b/src/dawn/native/EncodingContext.h
@@ -29,156 +29,153 @@
 
 namespace dawn::native {
 
-    class CommandEncoder;
-    class DeviceBase;
-    class ApiObjectBase;
+class CommandEncoder;
+class DeviceBase;
+class ApiObjectBase;
 
-    // Base class for allocating/iterating commands.
-    // It performs error tracking as well as encoding state for render/compute passes.
-    class EncodingContext {
-      public:
-        EncodingContext(DeviceBase* device, const ApiObjectBase* initialEncoder);
-        ~EncodingContext();
+// Base class for allocating/iterating commands.
+// It performs error tracking as well as encoding state for render/compute passes.
+class EncodingContext {
+  public:
+    EncodingContext(DeviceBase* device, const ApiObjectBase* initialEncoder);
+    ~EncodingContext();
 
-        // Marks the encoding context as destroyed so that any future encodes will fail, and all
-        // encoded commands are released.
-        void Destroy();
+    // Marks the encoding context as destroyed so that any future encodes will fail, and all
+    // encoded commands are released.
+    void Destroy();
 
-        CommandIterator AcquireCommands();
-        CommandIterator* GetIterator();
+    CommandIterator AcquireCommands();
+    CommandIterator* GetIterator();
 
-        // Functions to handle encoder errors
-        void HandleError(std::unique_ptr<ErrorData> error);
+    // Functions to handle encoder errors
+    void HandleError(std::unique_ptr<ErrorData> error);
 
-        inline bool ConsumedError(MaybeError maybeError) {
-            if (DAWN_UNLIKELY(maybeError.IsError())) {
-                HandleError(maybeError.AcquireError());
-                return true;
-            }
-            return false;
-        }
-
-        template <typename... Args>
-        inline bool ConsumedError(MaybeError maybeError,
-                                  const char* formatStr,
-                                  const Args&... args) {
-            if (DAWN_UNLIKELY(maybeError.IsError())) {
-                std::unique_ptr<ErrorData> error = maybeError.AcquireError();
-                if (error->GetType() == InternalErrorType::Validation) {
-                    std::string out;
-                    absl::UntypedFormatSpec format(formatStr);
-                    if (absl::FormatUntyped(&out, format, {absl::FormatArg(args)...})) {
-                        error->AppendContext(std::move(out));
-                    } else {
-                        error->AppendContext(absl::StrFormat(
-                            "[Failed to format error message: \"%s\"].", formatStr));
-                    }
-                }
-                HandleError(std::move(error));
-                return true;
-            }
-            return false;
-        }
-
-        inline bool CheckCurrentEncoder(const ApiObjectBase* encoder) {
-            if (DAWN_UNLIKELY(encoder != mCurrentEncoder)) {
-                if (mDestroyed) {
-                    HandleError(
-                        DAWN_FORMAT_VALIDATION_ERROR("Recording in a destroyed %s.", encoder));
-                } else if (mCurrentEncoder != mTopLevelEncoder) {
-                    // The top level encoder was used when a pass encoder was current.
-                    HandleError(DAWN_FORMAT_VALIDATION_ERROR(
-                        "Command cannot be recorded while %s is active.", mCurrentEncoder));
-                } else {
-                    HandleError(DAWN_FORMAT_VALIDATION_ERROR(
-                        "Recording in an error or already ended %s.", encoder));
-                }
-                return false;
-            }
+    inline bool ConsumedError(MaybeError maybeError) {
+        if (DAWN_UNLIKELY(maybeError.IsError())) {
+            HandleError(maybeError.AcquireError());
             return true;
         }
+        return false;
+    }
 
-        template <typename EncodeFunction>
-        inline bool TryEncode(const ApiObjectBase* encoder, EncodeFunction&& encodeFunction) {
-            if (!CheckCurrentEncoder(encoder)) {
-                return false;
+    template <typename... Args>
+    inline bool ConsumedError(MaybeError maybeError, const char* formatStr, const Args&... args) {
+        if (DAWN_UNLIKELY(maybeError.IsError())) {
+            std::unique_ptr<ErrorData> error = maybeError.AcquireError();
+            if (error->GetType() == InternalErrorType::Validation) {
+                std::string out;
+                absl::UntypedFormatSpec format(formatStr);
+                if (absl::FormatUntyped(&out, format, {absl::FormatArg(args)...})) {
+                    error->AppendContext(std::move(out));
+                } else {
+                    error->AppendContext(
+                        absl::StrFormat("[Failed to format error message: \"%s\"].", formatStr));
+                }
             }
-            ASSERT(!mWasMovedToIterator);
-            return !ConsumedError(encodeFunction(&mPendingCommands));
+            HandleError(std::move(error));
+            return true;
         }
+        return false;
+    }
 
-        template <typename EncodeFunction, typename... Args>
-        inline bool TryEncode(const ApiObjectBase* encoder,
-                              EncodeFunction&& encodeFunction,
-                              const char* formatStr,
-                              const Args&... args) {
-            if (!CheckCurrentEncoder(encoder)) {
-                return false;
+    inline bool CheckCurrentEncoder(const ApiObjectBase* encoder) {
+        if (DAWN_UNLIKELY(encoder != mCurrentEncoder)) {
+            if (mDestroyed) {
+                HandleError(DAWN_FORMAT_VALIDATION_ERROR("Recording in a destroyed %s.", encoder));
+            } else if (mCurrentEncoder != mTopLevelEncoder) {
+                // The top level encoder was used when a pass encoder was current.
+                HandleError(DAWN_FORMAT_VALIDATION_ERROR(
+                    "Command cannot be recorded while %s is active.", mCurrentEncoder));
+            } else {
+                HandleError(DAWN_FORMAT_VALIDATION_ERROR(
+                    "Recording in an error or already ended %s.", encoder));
             }
-            ASSERT(!mWasMovedToIterator);
-            return !ConsumedError(encodeFunction(&mPendingCommands), formatStr, args...);
+            return false;
         }
+        return true;
+    }
 
-        // Must be called prior to encoding a BeginRenderPassCmd. Note that it's OK to call this
-        // and then not actually call EnterPass+ExitRenderPass, for example if some other pass setup
-        // failed validation before the BeginRenderPassCmd could be encoded.
-        void WillBeginRenderPass();
+    template <typename EncodeFunction>
+    inline bool TryEncode(const ApiObjectBase* encoder, EncodeFunction&& encodeFunction) {
+        if (!CheckCurrentEncoder(encoder)) {
+            return false;
+        }
+        ASSERT(!mWasMovedToIterator);
+        return !ConsumedError(encodeFunction(&mPendingCommands));
+    }
 
-        // Functions to set current encoder state
-        void EnterPass(const ApiObjectBase* passEncoder);
-        MaybeError ExitRenderPass(const ApiObjectBase* passEncoder,
-                                  RenderPassResourceUsageTracker usageTracker,
-                                  CommandEncoder* commandEncoder,
-                                  IndirectDrawMetadata indirectDrawMetadata);
-        void ExitComputePass(const ApiObjectBase* passEncoder, ComputePassResourceUsage usages);
-        MaybeError Finish();
+    template <typename EncodeFunction, typename... Args>
+    inline bool TryEncode(const ApiObjectBase* encoder,
+                          EncodeFunction&& encodeFunction,
+                          const char* formatStr,
+                          const Args&... args) {
+        if (!CheckCurrentEncoder(encoder)) {
+            return false;
+        }
+        ASSERT(!mWasMovedToIterator);
+        return !ConsumedError(encodeFunction(&mPendingCommands), formatStr, args...);
+    }
 
-        // Called when a pass encoder is deleted. Provides an opportunity to clean up if it's the
-        // mCurrentEncoder.
-        void EnsurePassExited(const ApiObjectBase* passEncoder);
+    // Must be called prior to encoding a BeginRenderPassCmd. Note that it's OK to call this
+    // and then not actually call EnterPass+ExitRenderPass, for example if some other pass setup
+    // failed validation before the BeginRenderPassCmd could be encoded.
+    void WillBeginRenderPass();
 
-        const RenderPassUsages& GetRenderPassUsages() const;
-        const ComputePassUsages& GetComputePassUsages() const;
-        RenderPassUsages AcquireRenderPassUsages();
-        ComputePassUsages AcquireComputePassUsages();
+    // Functions to set current encoder state
+    void EnterPass(const ApiObjectBase* passEncoder);
+    MaybeError ExitRenderPass(const ApiObjectBase* passEncoder,
+                              RenderPassResourceUsageTracker usageTracker,
+                              CommandEncoder* commandEncoder,
+                              IndirectDrawMetadata indirectDrawMetadata);
+    void ExitComputePass(const ApiObjectBase* passEncoder, ComputePassResourceUsage usages);
+    MaybeError Finish();
 
-        void PushDebugGroupLabel(const char* groupLabel);
-        void PopDebugGroupLabel();
+    // Called when a pass encoder is deleted. Provides an opportunity to clean up if it's the
+    // mCurrentEncoder.
+    void EnsurePassExited(const ApiObjectBase* passEncoder);
 
-      private:
-        void CommitCommands(CommandAllocator allocator);
+    const RenderPassUsages& GetRenderPassUsages() const;
+    const ComputePassUsages& GetComputePassUsages() const;
+    RenderPassUsages AcquireRenderPassUsages();
+    ComputePassUsages AcquireComputePassUsages();
 
-        bool IsFinished() const;
-        void MoveToIterator();
+    void PushDebugGroupLabel(const char* groupLabel);
+    void PopDebugGroupLabel();
 
-        DeviceBase* mDevice;
+  private:
+    void CommitCommands(CommandAllocator allocator);
 
-        // There can only be two levels of encoders. Top-level and render/compute pass.
-        // The top level encoder is the encoder the EncodingContext is created with.
-        // It doubles as flag to check if encoding has been Finished.
-        const ApiObjectBase* mTopLevelEncoder;
-        // The current encoder must be the same as the encoder provided to TryEncode,
-        // otherwise an error is produced. It may be nullptr if the EncodingContext is an error.
-        // The current encoder changes with Enter/ExitPass which should be called by
-        // CommandEncoder::Begin/EndPass.
-        const ApiObjectBase* mCurrentEncoder;
+    bool IsFinished() const;
+    void MoveToIterator();
 
-        RenderPassUsages mRenderPassUsages;
-        bool mWereRenderPassUsagesAcquired = false;
-        ComputePassUsages mComputePassUsages;
-        bool mWereComputePassUsagesAcquired = false;
+    DeviceBase* mDevice;
 
-        CommandAllocator mPendingCommands;
+    // There can only be two levels of encoders. Top-level and render/compute pass.
+    // The top level encoder is the encoder the EncodingContext is created with.
+    // It doubles as flag to check if encoding has been Finished.
+    const ApiObjectBase* mTopLevelEncoder;
+    // The current encoder must be the same as the encoder provided to TryEncode,
+    // otherwise an error is produced. It may be nullptr if the EncodingContext is an error.
+    // The current encoder changes with Enter/ExitPass which should be called by
+    // CommandEncoder::Begin/EndPass.
+    const ApiObjectBase* mCurrentEncoder;
 
-        std::vector<CommandAllocator> mAllocators;
-        CommandIterator mIterator;
-        bool mWasMovedToIterator = false;
-        bool mWereCommandsAcquired = false;
-        bool mDestroyed = false;
+    RenderPassUsages mRenderPassUsages;
+    bool mWereRenderPassUsagesAcquired = false;
+    ComputePassUsages mComputePassUsages;
+    bool mWereComputePassUsagesAcquired = false;
 
-        std::unique_ptr<ErrorData> mError;
-        std::vector<std::string> mDebugGroupLabels;
-    };
+    CommandAllocator mPendingCommands;
+
+    std::vector<CommandAllocator> mAllocators;
+    CommandIterator mIterator;
+    bool mWasMovedToIterator = false;
+    bool mWereCommandsAcquired = false;
+    bool mDestroyed = false;
+
+    std::unique_ptr<ErrorData> mError;
+    std::vector<std::string> mDebugGroupLabels;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/EnumClassBitmasks.h b/src/dawn/native/EnumClassBitmasks.h
index 51453cd..1410155 100644
--- a/src/dawn/native/EnumClassBitmasks.h
+++ b/src/dawn/native/EnumClassBitmasks.h
@@ -19,20 +19,20 @@
 
 namespace dawn::native {
 
-    // EnumClassBitmmasks is a helper in the dawn:: namespace.
-    // Re-export it in the dawn_native namespace.
-    DAWN_IMPORT_BITMASK_OPERATORS
+// EnumClassBitmmasks is a helper in the dawn:: namespace.
+// Re-export it in the dawn_native namespace.
+DAWN_IMPORT_BITMASK_OPERATORS
 
-    // Specify this for usage with EnumMaskIterator
-    template <typename T>
-    struct EnumBitmaskSize {
-        static constexpr unsigned value = 0;
-    };
+// Specify this for usage with EnumMaskIterator
+template <typename T>
+struct EnumBitmaskSize {
+    static constexpr unsigned value = 0;
+};
 
-    template <typename T>
-    constexpr bool HasOneBit(T value) {
-        return HasZeroOrOneBits(value) && value != T(0);
-    }
+template <typename T>
+constexpr bool HasOneBit(T value) {
+    return HasZeroOrOneBits(value) && value != T(0);
+}
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/EnumMaskIterator.h b/src/dawn/native/EnumMaskIterator.h
index ec23285..1b0e89a 100644
--- a/src/dawn/native/EnumMaskIterator.h
+++ b/src/dawn/native/EnumMaskIterator.h
@@ -20,63 +20,54 @@
 
 namespace dawn::native {
 
-    template <typename T>
-    class EnumMaskIterator final {
-        static constexpr size_t N = EnumBitmaskSize<T>::value;
-        static_assert(N > 0);
+template <typename T>
+class EnumMaskIterator final {
+    static constexpr size_t N = EnumBitmaskSize<T>::value;
+    static_assert(N > 0);
 
-        using U = std::underlying_type_t<T>;
+    using U = std::underlying_type_t<T>;
 
+  public:
+    explicit EnumMaskIterator(const T& mask)
+        : mBitSetIterator(std::bitset<N>(static_cast<U>(mask))) {
+        // If you hit this ASSERT it means that you forgot to update EnumBitmaskSize<T>::value;
+        ASSERT(U(mask) == 0 || Log2(uint64_t(U(mask))) < N);
+    }
+
+    class Iterator final {
       public:
-        explicit EnumMaskIterator(const T& mask)
-            : mBitSetIterator(std::bitset<N>(static_cast<U>(mask))) {
-            // If you hit this ASSERT it means that you forgot to update EnumBitmaskSize<T>::value;
-            ASSERT(U(mask) == 0 || Log2(uint64_t(U(mask))) < N);
+        explicit Iterator(const typename BitSetIterator<N, U>::Iterator& iter) : mIter(iter) {}
+
+        Iterator& operator++() {
+            ++mIter;
+            return *this;
         }
 
-        class Iterator final {
-          public:
-            explicit Iterator(const typename BitSetIterator<N, U>::Iterator& iter) : mIter(iter) {
-            }
+        bool operator==(const Iterator& other) const { return mIter == other.mIter; }
 
-            Iterator& operator++() {
-                ++mIter;
-                return *this;
-            }
+        bool operator!=(const Iterator& other) const { return mIter != other.mIter; }
 
-            bool operator==(const Iterator& other) const {
-                return mIter == other.mIter;
-            }
-
-            bool operator!=(const Iterator& other) const {
-                return mIter != other.mIter;
-            }
-
-            T operator*() const {
-                U value = *mIter;
-                return static_cast<T>(U(1) << value);
-            }
-
-          private:
-            typename BitSetIterator<N, U>::Iterator mIter;
-        };
-
-        Iterator begin() const {
-            return Iterator(mBitSetIterator.begin());
-        }
-
-        Iterator end() const {
-            return Iterator(mBitSetIterator.end());
+        T operator*() const {
+            U value = *mIter;
+            return static_cast<T>(U(1) << value);
         }
 
       private:
-        BitSetIterator<N, U> mBitSetIterator;
+        typename BitSetIterator<N, U>::Iterator mIter;
     };
 
-    template <typename T>
-    EnumMaskIterator<T> IterateEnumMask(const T& mask) {
-        return EnumMaskIterator<T>(mask);
-    }
+    Iterator begin() const { return Iterator(mBitSetIterator.begin()); }
+
+    Iterator end() const { return Iterator(mBitSetIterator.end()); }
+
+  private:
+    BitSetIterator<N, U> mBitSetIterator;
+};
+
+template <typename T>
+EnumMaskIterator<T> IterateEnumMask(const T& mask) {
+    return EnumMaskIterator<T>(mask);
+}
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/Error.cpp b/src/dawn/native/Error.cpp
index d524a32..2d06da2 100644
--- a/src/dawn/native/Error.cpp
+++ b/src/dawn/native/Error.cpp
@@ -19,46 +19,46 @@
 
 namespace dawn::native {
 
-    void IgnoreErrors(MaybeError maybeError) {
-        if (maybeError.IsError()) {
-            std::unique_ptr<ErrorData> errorData = maybeError.AcquireError();
-            // During shutdown and destruction, device lost errors can be ignored.
-            // We can also ignore other unexpected internal errors on shut down and treat it as
-            // device lost so that we can continue with destruction.
-            ASSERT(errorData->GetType() == InternalErrorType::DeviceLost ||
-                   errorData->GetType() == InternalErrorType::Internal);
-        }
+void IgnoreErrors(MaybeError maybeError) {
+    if (maybeError.IsError()) {
+        std::unique_ptr<ErrorData> errorData = maybeError.AcquireError();
+        // During shutdown and destruction, device lost errors can be ignored.
+        // We can also ignore other unexpected internal errors on shut down and treat it as
+        // device lost so that we can continue with destruction.
+        ASSERT(errorData->GetType() == InternalErrorType::DeviceLost ||
+               errorData->GetType() == InternalErrorType::Internal);
     }
+}
 
-    wgpu::ErrorType ToWGPUErrorType(InternalErrorType type) {
-        switch (type) {
-            case InternalErrorType::Validation:
-                return wgpu::ErrorType::Validation;
-            case InternalErrorType::OutOfMemory:
-                return wgpu::ErrorType::OutOfMemory;
+wgpu::ErrorType ToWGPUErrorType(InternalErrorType type) {
+    switch (type) {
+        case InternalErrorType::Validation:
+            return wgpu::ErrorType::Validation;
+        case InternalErrorType::OutOfMemory:
+            return wgpu::ErrorType::OutOfMemory;
 
-            // There is no equivalent of Internal errors in the WebGPU API. Internal errors cause
-            // the device at the API level to be lost, so treat it like a DeviceLost error.
-            case InternalErrorType::Internal:
-            case InternalErrorType::DeviceLost:
-                return wgpu::ErrorType::DeviceLost;
+        // There is no equivalent of Internal errors in the WebGPU API. Internal errors cause
+        // the device at the API level to be lost, so treat it like a DeviceLost error.
+        case InternalErrorType::Internal:
+        case InternalErrorType::DeviceLost:
+            return wgpu::ErrorType::DeviceLost;
 
-            default:
-                return wgpu::ErrorType::Unknown;
-        }
+        default:
+            return wgpu::ErrorType::Unknown;
     }
+}
 
-    InternalErrorType FromWGPUErrorType(wgpu::ErrorType type) {
-        switch (type) {
-            case wgpu::ErrorType::Validation:
-                return InternalErrorType::Validation;
-            case wgpu::ErrorType::OutOfMemory:
-                return InternalErrorType::OutOfMemory;
-            case wgpu::ErrorType::DeviceLost:
-                return InternalErrorType::DeviceLost;
-            default:
-                return InternalErrorType::Internal;
-        }
+InternalErrorType FromWGPUErrorType(wgpu::ErrorType type) {
+    switch (type) {
+        case wgpu::ErrorType::Validation:
+            return InternalErrorType::Validation;
+        case wgpu::ErrorType::OutOfMemory:
+            return InternalErrorType::OutOfMemory;
+        case wgpu::ErrorType::DeviceLost:
+            return InternalErrorType::DeviceLost;
+        default:
+            return InternalErrorType::Internal;
     }
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/Error.h b/src/dawn/native/Error.h
index 9ab3ee2..0b71644 100644
--- a/src/dawn/native/Error.h
+++ b/src/dawn/native/Error.h
@@ -26,47 +26,47 @@
 
 namespace dawn::native {
 
-    enum class InternalErrorType : uint32_t { Validation, DeviceLost, Internal, OutOfMemory };
+enum class InternalErrorType : uint32_t { Validation, DeviceLost, Internal, OutOfMemory };
 
-    // MaybeError and ResultOrError are meant to be used as return value for function that are not
-    // expected to, but might fail. The handling of error is potentially much slower than successes.
-    using MaybeError = Result<void, ErrorData>;
+// MaybeError and ResultOrError are meant to be used as return value for function that are not
+// expected to, but might fail. The handling of error is potentially much slower than successes.
+using MaybeError = Result<void, ErrorData>;
 
-    template <typename T>
-    using ResultOrError = Result<T, ErrorData>;
+template <typename T>
+using ResultOrError = Result<T, ErrorData>;
 
-    // Returning a success is done like so:
-    //   return {}; // for Error
-    //   return SomethingOfTypeT; // for ResultOrError<T>
-    //
-    // Returning an error is done via:
-    //   return DAWN_MAKE_ERROR(errorType, "My error message");
-    //
-    // but shorthand version for specific error types are preferred:
-    //   return DAWN_VALIDATION_ERROR("My error message");
-    //
-    // There are different types of errors that should be used for different purpose:
-    //
-    //   - Validation: these are errors that show the user did something bad, which causes the
-    //     whole call to be a no-op. It's most commonly found in the frontend but there can be some
-    //     backend specific validation in non-conformant backends too.
-    //
-    //   - Out of memory: creation of a Buffer or Texture failed because there isn't enough memory.
-    //     This is similar to validation errors in that the call becomes a no-op and returns an
-    //     error object, but is reported separated from validation to the user.
-    //
-    //   - Device loss: the backend driver reported that the GPU has been lost, which means all
-    //     previous commands magically disappeared and the only thing left to do is clean up.
-    //     Note: Device loss should be used rarely and in most case you want to use Internal
-    //     instead.
-    //
-    //   - Internal: something happened that the backend didn't expect, and it doesn't know
-    //     how to recover from that situation. This causes the device to be lost, but is separate
-    //     from device loss, because the GPU execution is still happening so we need to clean up
-    //     more gracefully.
-    //
-    //   - Unimplemented: same as Internal except it puts "unimplemented" in the error message for
-    //     more clarity.
+// Returning a success is done like so:
+//   return {}; // for Error
+//   return SomethingOfTypeT; // for ResultOrError<T>
+//
+// Returning an error is done via:
+//   return DAWN_MAKE_ERROR(errorType, "My error message");
+//
+// but shorthand version for specific error types are preferred:
+//   return DAWN_VALIDATION_ERROR("My error message");
+//
+// There are different types of errors that should be used for different purpose:
+//
+//   - Validation: these are errors that show the user did something bad, which causes the
+//     whole call to be a no-op. It's most commonly found in the frontend but there can be some
+//     backend specific validation in non-conformant backends too.
+//
+//   - Out of memory: creation of a Buffer or Texture failed because there isn't enough memory.
+//     This is similar to validation errors in that the call becomes a no-op and returns an
+//     error object, but is reported separated from validation to the user.
+//
+//   - Device loss: the backend driver reported that the GPU has been lost, which means all
+//     previous commands magically disappeared and the only thing left to do is clean up.
+//     Note: Device loss should be used rarely and in most case you want to use Internal
+//     instead.
+//
+//   - Internal: something happened that the backend didn't expect, and it doesn't know
+//     how to recover from that situation. This causes the device to be lost, but is separate
+//     from device loss, because the GPU execution is still happening so we need to clean up
+//     more gracefully.
+//
+//   - Unimplemented: same as Internal except it puts "unimplemented" in the error message for
+//     more clarity.
 
 #define DAWN_MAKE_ERROR(TYPE, MESSAGE) \
     ::dawn::native::ErrorData::Create(TYPE, MESSAGE, __FILE__, __func__, __LINE__)
@@ -108,9 +108,9 @@
 #define DAWN_CONCAT2(x, y) DAWN_CONCAT1(x, y)
 #define DAWN_LOCAL_VAR DAWN_CONCAT2(_localVar, __LINE__)
 
-    // When Errors aren't handled explicitly, calls to functions returning errors should be
-    // wrapped in an DAWN_TRY. It will return the error if any, otherwise keep executing
-    // the current function.
+// When Errors aren't handled explicitly, calls to functions returning errors should be
+// wrapped in an DAWN_TRY. It will return the error if any, otherwise keep executing
+// the current function.
 #define DAWN_TRY(EXPR) DAWN_TRY_WITH_CLEANUP(EXPR, {})
 
 #define DAWN_TRY_CONTEXT(EXPR, ...) \
@@ -129,39 +129,39 @@
     for (;;)                                                                                  \
     break
 
-    // DAWN_TRY_ASSIGN is the same as DAWN_TRY for ResultOrError and assigns the success value, if
-    // any, to VAR.
+// DAWN_TRY_ASSIGN is the same as DAWN_TRY for ResultOrError and assigns the success value, if
+// any, to VAR.
 #define DAWN_TRY_ASSIGN(VAR, EXPR) DAWN_TRY_ASSIGN_WITH_CLEANUP(VAR, EXPR, {})
 #define DAWN_TRY_ASSIGN_CONTEXT(VAR, EXPR, ...) \
     DAWN_TRY_ASSIGN_WITH_CLEANUP(VAR, EXPR, { error->AppendContext(absl::StrFormat(__VA_ARGS__)); })
 
-    // Argument helpers are used to determine which macro implementations should be called when
-    // overloading with different number of variables.
+// Argument helpers are used to determine which macro implementations should be called when
+// overloading with different number of variables.
 #define DAWN_ERROR_UNIMPLEMENTED_MACRO_(...) UNREACHABLE()
 #define DAWN_ERROR_GET_5TH_ARG_HELPER_(_1, _2, _3, _4, NAME, ...) NAME
 #define DAWN_ERROR_GET_5TH_ARG_(args) DAWN_ERROR_GET_5TH_ARG_HELPER_ args
 
-    // DAWN_TRY_ASSIGN_WITH_CLEANUP is overloaded with 2 version so that users can override the
-    // return value of the macro when necessary. This is particularly useful if the function
-    // calling the macro may want to return void instead of the error, i.e. in a test where we may
-    // just want to assert and fail if the assign cannot go through. In both the cleanup and return
-    // clauses, users can use the `error` variable to access the pointer to the acquired error.
-    //
-    // Example usages:
-    //     3 Argument Case:
-    //          Result res;
-    //          DAWN_TRY_ASSIGN_WITH_CLEANUP(
-    //              res, GetResultOrErrorFunction(), { AddAdditionalErrorInformation(error.get()); }
-    //          );
-    //
-    //     4 Argument Case:
-    //          bool FunctionThatReturnsBool() {
-    //              DAWN_TRY_ASSIGN_WITH_CLEANUP(
-    //                  res, GetResultOrErrorFunction(),
-    //                  { AddAdditionalErrorInformation(error.get()); },
-    //                  false
-    //              );
-    //          }
+// DAWN_TRY_ASSIGN_WITH_CLEANUP is overloaded with 2 version so that users can override the
+// return value of the macro when necessary. This is particularly useful if the function
+// calling the macro may want to return void instead of the error, i.e. in a test where we may
+// just want to assert and fail if the assign cannot go through. In both the cleanup and return
+// clauses, users can use the `error` variable to access the pointer to the acquired error.
+//
+// Example usages:
+//     3 Argument Case:
+//          Result res;
+//          DAWN_TRY_ASSIGN_WITH_CLEANUP(
+//              res, GetResultOrErrorFunction(), { AddAdditionalErrorInformation(error.get()); }
+//          );
+//
+//     4 Argument Case:
+//          bool FunctionThatReturnsBool() {
+//              DAWN_TRY_ASSIGN_WITH_CLEANUP(
+//                  res, GetResultOrErrorFunction(),
+//                  { AddAdditionalErrorInformation(error.get()); },
+//                  false
+//              );
+//          }
 #define DAWN_TRY_ASSIGN_WITH_CLEANUP(...)                                       \
     DAWN_ERROR_GET_5TH_ARG_((__VA_ARGS__, DAWN_TRY_ASSIGN_WITH_CLEANUP_IMPL_4_, \
                              DAWN_TRY_ASSIGN_WITH_CLEANUP_IMPL_3_,              \
@@ -185,11 +185,11 @@
     for (;;)                                                                  \
     break
 
-    // Assert that errors are device loss so that we can continue with destruction
-    void IgnoreErrors(MaybeError maybeError);
+// Assert that errors are device loss so that we can continue with destruction
+void IgnoreErrors(MaybeError maybeError);
 
-    wgpu::ErrorType ToWGPUErrorType(InternalErrorType type);
-    InternalErrorType FromWGPUErrorType(wgpu::ErrorType type);
+wgpu::ErrorType ToWGPUErrorType(InternalErrorType type);
+InternalErrorType FromWGPUErrorType(wgpu::ErrorType type);
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/ErrorData.cpp b/src/dawn/native/ErrorData.cpp
index b23640c..951200b 100644
--- a/src/dawn/native/ErrorData.cpp
+++ b/src/dawn/native/ErrorData.cpp
@@ -22,99 +22,98 @@
 
 namespace dawn::native {
 
-    std::unique_ptr<ErrorData> ErrorData::Create(InternalErrorType type,
-                                                 std::string message,
-                                                 const char* file,
-                                                 const char* function,
-                                                 int line) {
-        std::unique_ptr<ErrorData> error = std::make_unique<ErrorData>(type, message);
-        error->AppendBacktrace(file, function, line);
-        return error;
-    }
+std::unique_ptr<ErrorData> ErrorData::Create(InternalErrorType type,
+                                             std::string message,
+                                             const char* file,
+                                             const char* function,
+                                             int line) {
+    std::unique_ptr<ErrorData> error = std::make_unique<ErrorData>(type, message);
+    error->AppendBacktrace(file, function, line);
+    return error;
+}
 
-    ErrorData::ErrorData(InternalErrorType type, std::string message)
-        : mType(type), mMessage(std::move(message)) {
-    }
+ErrorData::ErrorData(InternalErrorType type, std::string message)
+    : mType(type), mMessage(std::move(message)) {}
 
-    void ErrorData::AppendBacktrace(const char* file, const char* function, int line) {
-        BacktraceRecord record;
-        record.file = file;
-        record.function = function;
-        record.line = line;
+void ErrorData::AppendBacktrace(const char* file, const char* function, int line) {
+    BacktraceRecord record;
+    record.file = file;
+    record.function = function;
+    record.line = line;
 
-        mBacktrace.push_back(std::move(record));
-    }
+    mBacktrace.push_back(std::move(record));
+}
 
-    void ErrorData::AppendContext(std::string context) {
-        mContexts.push_back(std::move(context));
-    }
+void ErrorData::AppendContext(std::string context) {
+    mContexts.push_back(std::move(context));
+}
 
-    void ErrorData::AppendDebugGroup(std::string label) {
-        mDebugGroups.push_back(std::move(label));
-    }
+void ErrorData::AppendDebugGroup(std::string label) {
+    mDebugGroups.push_back(std::move(label));
+}
 
-    void ErrorData::AppendBackendMessage(std::string message) {
-        mBackendMessages.push_back(std::move(message));
-    }
+void ErrorData::AppendBackendMessage(std::string message) {
+    mBackendMessages.push_back(std::move(message));
+}
 
-    InternalErrorType ErrorData::GetType() const {
-        return mType;
-    }
+InternalErrorType ErrorData::GetType() const {
+    return mType;
+}
 
-    const std::string& ErrorData::GetMessage() const {
-        return mMessage;
-    }
+const std::string& ErrorData::GetMessage() const {
+    return mMessage;
+}
 
-    const std::vector<ErrorData::BacktraceRecord>& ErrorData::GetBacktrace() const {
-        return mBacktrace;
-    }
+const std::vector<ErrorData::BacktraceRecord>& ErrorData::GetBacktrace() const {
+    return mBacktrace;
+}
 
-    const std::vector<std::string>& ErrorData::GetContexts() const {
-        return mContexts;
-    }
+const std::vector<std::string>& ErrorData::GetContexts() const {
+    return mContexts;
+}
 
-    const std::vector<std::string>& ErrorData::GetDebugGroups() const {
-        return mDebugGroups;
-    }
+const std::vector<std::string>& ErrorData::GetDebugGroups() const {
+    return mDebugGroups;
+}
 
-    const std::vector<std::string>& ErrorData::GetBackendMessages() const {
-        return mBackendMessages;
-    }
+const std::vector<std::string>& ErrorData::GetBackendMessages() const {
+    return mBackendMessages;
+}
 
-    std::string ErrorData::GetFormattedMessage() const {
-        std::ostringstream ss;
-        ss << mMessage << "\n";
+std::string ErrorData::GetFormattedMessage() const {
+    std::ostringstream ss;
+    ss << mMessage << "\n";
 
-        if (!mContexts.empty()) {
-            for (auto context : mContexts) {
-                ss << " - While " << context << "\n";
-            }
+    if (!mContexts.empty()) {
+        for (auto context : mContexts) {
+            ss << " - While " << context << "\n";
         }
-
-        // For non-validation errors, or errors that lack a context include the
-        // stack trace for debugging purposes.
-        if (mContexts.empty() || mType != InternalErrorType::Validation) {
-            for (const auto& callsite : mBacktrace) {
-                ss << "    at " << callsite.function << " (" << callsite.file << ":"
-                   << callsite.line << ")\n";
-            }
-        }
-
-        if (!mDebugGroups.empty()) {
-            ss << "\nDebug group stack:\n";
-            for (auto label : mDebugGroups) {
-                ss << " > \"" << label << "\"\n";
-            }
-        }
-
-        if (!mBackendMessages.empty()) {
-            ss << "\nBackend messages:\n";
-            for (auto message : mBackendMessages) {
-                ss << " * " << message << "\n";
-            }
-        }
-
-        return ss.str();
     }
 
+    // For non-validation errors, or errors that lack a context include the
+    // stack trace for debugging purposes.
+    if (mContexts.empty() || mType != InternalErrorType::Validation) {
+        for (const auto& callsite : mBacktrace) {
+            ss << "    at " << callsite.function << " (" << callsite.file << ":" << callsite.line
+               << ")\n";
+        }
+    }
+
+    if (!mDebugGroups.empty()) {
+        ss << "\nDebug group stack:\n";
+        for (auto label : mDebugGroups) {
+            ss << " > \"" << label << "\"\n";
+        }
+    }
+
+    if (!mBackendMessages.empty()) {
+        ss << "\nBackend messages:\n";
+        for (auto message : mBackendMessages) {
+            ss << " * " << message << "\n";
+        }
+    }
+
+    return ss.str();
+}
+
 }  // namespace dawn::native
diff --git a/src/dawn/native/ErrorData.h b/src/dawn/native/ErrorData.h
index bc45012..30e33c6 100644
--- a/src/dawn/native/ErrorData.h
+++ b/src/dawn/native/ErrorData.h
@@ -23,50 +23,52 @@
 #include "dawn/common/Compiler.h"
 
 namespace wgpu {
-    enum class ErrorType : uint32_t;
+enum class ErrorType : uint32_t;
 }
 
 namespace dawn {
-    using ErrorType = wgpu::ErrorType;
+using ErrorType = wgpu::ErrorType;
 }
 
 namespace dawn::native {
-    enum class InternalErrorType : uint32_t;
+enum class InternalErrorType : uint32_t;
 
-    class [[nodiscard]] ErrorData {
-      public:
-        [[nodiscard]] static std::unique_ptr<ErrorData> Create(
-            InternalErrorType type, std::string message, const char* file, const char* function,
-            int line);
-        ErrorData(InternalErrorType type, std::string message);
+class [[nodiscard]] ErrorData {
+  public:
+    [[nodiscard]] static std::unique_ptr<ErrorData> Create(InternalErrorType type,
+                                                           std::string message,
+                                                           const char* file,
+                                                           const char* function,
+                                                           int line);
+    ErrorData(InternalErrorType type, std::string message);
 
-        struct BacktraceRecord {
-            const char* file;
-            const char* function;
-            int line;
-        };
-        void AppendBacktrace(const char* file, const char* function, int line);
-        void AppendContext(std::string context);
-        void AppendDebugGroup(std::string label);
-        void AppendBackendMessage(std::string message);
-
-        InternalErrorType GetType() const;
-        const std::string& GetMessage() const;
-        const std::vector<BacktraceRecord>& GetBacktrace() const;
-        const std::vector<std::string>& GetContexts() const;
-        const std::vector<std::string>& GetDebugGroups() const;
-        const std::vector<std::string>& GetBackendMessages() const;
-
-        std::string GetFormattedMessage() const;
-
-      private:
-        InternalErrorType mType;
-        std::string mMessage;
-        std::vector<BacktraceRecord> mBacktrace;
-        std::vector<std::string> mContexts;
-        std::vector<std::string> mDebugGroups;
-        std::vector<std::string> mBackendMessages;
+    struct BacktraceRecord {
+        const char* file;
+        const char* function;
+        int line;
     };
+    void AppendBacktrace(const char* file, const char* function, int line);
+    void AppendContext(std::string context);
+    void AppendDebugGroup(std::string label);
+    void AppendBackendMessage(std::string message);
+
+    InternalErrorType GetType() const;
+    const std::string& GetMessage() const;
+    const std::vector<BacktraceRecord>& GetBacktrace() const;
+    const std::vector<std::string>& GetContexts() const;
+    const std::vector<std::string>& GetDebugGroups() const;
+    const std::vector<std::string>& GetBackendMessages() const;
+
+    std::string GetFormattedMessage() const;
+
+  private:
+    InternalErrorType mType;
+    std::string mMessage;
+    std::vector<BacktraceRecord> mBacktrace;
+    std::vector<std::string> mContexts;
+    std::vector<std::string> mDebugGroups;
+    std::vector<std::string> mBackendMessages;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/ErrorInjector.cpp b/src/dawn/native/ErrorInjector.cpp
index af87498..5942f30 100644
--- a/src/dawn/native/ErrorInjector.cpp
+++ b/src/dawn/native/ErrorInjector.cpp
@@ -19,52 +19,52 @@
 
 namespace dawn::native {
 
-    namespace {
+namespace {
 
-        bool sIsEnabled = false;
-        uint64_t sNextIndex = 0;
-        uint64_t sInjectedFailureIndex = 0;
-        bool sHasPendingInjectedError = false;
+bool sIsEnabled = false;
+uint64_t sNextIndex = 0;
+uint64_t sInjectedFailureIndex = 0;
+bool sHasPendingInjectedError = false;
 
-    }  // anonymous namespace
+}  // anonymous namespace
 
-    void EnableErrorInjector() {
-        sIsEnabled = true;
-    }
+void EnableErrorInjector() {
+    sIsEnabled = true;
+}
 
-    void DisableErrorInjector() {
-        sIsEnabled = false;
-    }
+void DisableErrorInjector() {
+    sIsEnabled = false;
+}
 
-    void ClearErrorInjector() {
-        sNextIndex = 0;
+void ClearErrorInjector() {
+    sNextIndex = 0;
+    sHasPendingInjectedError = false;
+}
+
+bool ErrorInjectorEnabled() {
+    return sIsEnabled;
+}
+
+uint64_t AcquireErrorInjectorCallCount() {
+    uint64_t count = sNextIndex;
+    ClearErrorInjector();
+    return count;
+}
+
+bool ShouldInjectError() {
+    uint64_t index = sNextIndex++;
+    if (sHasPendingInjectedError && index == sInjectedFailureIndex) {
         sHasPendingInjectedError = false;
+        return true;
     }
+    return false;
+}
 
-    bool ErrorInjectorEnabled() {
-        return sIsEnabled;
-    }
-
-    uint64_t AcquireErrorInjectorCallCount() {
-        uint64_t count = sNextIndex;
-        ClearErrorInjector();
-        return count;
-    }
-
-    bool ShouldInjectError() {
-        uint64_t index = sNextIndex++;
-        if (sHasPendingInjectedError && index == sInjectedFailureIndex) {
-            sHasPendingInjectedError = false;
-            return true;
-        }
-        return false;
-    }
-
-    void InjectErrorAt(uint64_t index) {
-        // Only one error can be injected at a time.
-        ASSERT(!sHasPendingInjectedError);
-        sInjectedFailureIndex = index;
-        sHasPendingInjectedError = true;
-    }
+void InjectErrorAt(uint64_t index) {
+    // Only one error can be injected at a time.
+    ASSERT(!sHasPendingInjectedError);
+    sInjectedFailureIndex = index;
+    sHasPendingInjectedError = true;
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/ErrorInjector.h b/src/dawn/native/ErrorInjector.h
index 02cbea2..a65d80b 100644
--- a/src/dawn/native/ErrorInjector.h
+++ b/src/dawn/native/ErrorInjector.h
@@ -20,48 +20,48 @@
 
 namespace dawn::native {
 
-    template <typename ErrorType>
-    struct InjectedErrorResult {
-        ErrorType error;
-        bool injected;
-    };
+template <typename ErrorType>
+struct InjectedErrorResult {
+    ErrorType error;
+    bool injected;
+};
 
-    bool ErrorInjectorEnabled();
+bool ErrorInjectorEnabled();
 
-    bool ShouldInjectError();
+bool ShouldInjectError();
 
-    template <typename ErrorType>
-    InjectedErrorResult<ErrorType> MaybeInjectError(ErrorType errorType) {
-        return InjectedErrorResult<ErrorType>{errorType, ShouldInjectError()};
+template <typename ErrorType>
+InjectedErrorResult<ErrorType> MaybeInjectError(ErrorType errorType) {
+    return InjectedErrorResult<ErrorType>{errorType, ShouldInjectError()};
+}
+
+template <typename ErrorType, typename... ErrorTypes>
+InjectedErrorResult<ErrorType> MaybeInjectError(ErrorType errorType, ErrorTypes... errorTypes) {
+    if (ShouldInjectError()) {
+        return InjectedErrorResult<ErrorType>{errorType, true};
     }
-
-    template <typename ErrorType, typename... ErrorTypes>
-    InjectedErrorResult<ErrorType> MaybeInjectError(ErrorType errorType, ErrorTypes... errorTypes) {
-        if (ShouldInjectError()) {
-            return InjectedErrorResult<ErrorType>{errorType, true};
-        }
-        return MaybeInjectError(errorTypes...);
-    }
+    return MaybeInjectError(errorTypes...);
+}
 
 }  // namespace dawn::native
 
 #if defined(DAWN_ENABLE_ERROR_INJECTION)
 
-#    define INJECT_ERROR_OR_RUN(stmt, ...)                                                   \
-        [&]() {                                                                              \
-            if (DAWN_UNLIKELY(::dawn::native::ErrorInjectorEnabled())) {                     \
-                /* Only used for testing and fuzzing, so it's okay if this is deoptimized */ \
-                auto injectedError = ::dawn::native::MaybeInjectError(__VA_ARGS__);          \
-                if (injectedError.injected) {                                                \
-                    return injectedError.error;                                              \
-                }                                                                            \
-            }                                                                                \
-            return (stmt);                                                                   \
-        }()
+#define INJECT_ERROR_OR_RUN(stmt, ...)                                                   \
+    [&]() {                                                                              \
+        if (DAWN_UNLIKELY(::dawn::native::ErrorInjectorEnabled())) {                     \
+            /* Only used for testing and fuzzing, so it's okay if this is deoptimized */ \
+            auto injectedError = ::dawn::native::MaybeInjectError(__VA_ARGS__);          \
+            if (injectedError.injected) {                                                \
+                return injectedError.error;                                              \
+            }                                                                            \
+        }                                                                                \
+        return (stmt);                                                                   \
+    }()
 
 #else
 
-#    define INJECT_ERROR_OR_RUN(stmt, ...) stmt
+#define INJECT_ERROR_OR_RUN(stmt, ...) stmt
 
 #endif
 
diff --git a/src/dawn/native/ErrorScope.cpp b/src/dawn/native/ErrorScope.cpp
index 78a6a49..fe40d2d 100644
--- a/src/dawn/native/ErrorScope.cpp
+++ b/src/dawn/native/ErrorScope.cpp
@@ -20,75 +20,74 @@
 
 namespace dawn::native {
 
-    namespace {
+namespace {
 
-        wgpu::ErrorType ErrorFilterToErrorType(wgpu::ErrorFilter filter) {
-            switch (filter) {
-                case wgpu::ErrorFilter::Validation:
-                    return wgpu::ErrorType::Validation;
-                case wgpu::ErrorFilter::OutOfMemory:
-                    return wgpu::ErrorType::OutOfMemory;
-            }
-            UNREACHABLE();
+wgpu::ErrorType ErrorFilterToErrorType(wgpu::ErrorFilter filter) {
+    switch (filter) {
+        case wgpu::ErrorFilter::Validation:
+            return wgpu::ErrorType::Validation;
+        case wgpu::ErrorFilter::OutOfMemory:
+            return wgpu::ErrorType::OutOfMemory;
+    }
+    UNREACHABLE();
+}
+
+}  // namespace
+
+ErrorScope::ErrorScope(wgpu::ErrorFilter errorFilter)
+    : mMatchedErrorType(ErrorFilterToErrorType(errorFilter)) {}
+
+wgpu::ErrorType ErrorScope::GetErrorType() const {
+    return mCapturedError;
+}
+
+const char* ErrorScope::GetErrorMessage() const {
+    return mErrorMessage.c_str();
+}
+
+void ErrorScopeStack::Push(wgpu::ErrorFilter filter) {
+    mScopes.push_back(ErrorScope(filter));
+}
+
+ErrorScope ErrorScopeStack::Pop() {
+    ASSERT(!mScopes.empty());
+    ErrorScope scope = std::move(mScopes.back());
+    mScopes.pop_back();
+    return scope;
+}
+
+bool ErrorScopeStack::Empty() const {
+    return mScopes.empty();
+}
+
+bool ErrorScopeStack::HandleError(wgpu::ErrorType type, const char* message) {
+    for (auto it = mScopes.rbegin(); it != mScopes.rend(); ++it) {
+        if (it->mMatchedErrorType != type) {
+            // Error filter does not match. Move on to the next scope.
+            continue;
         }
 
-    }  // namespace
+        // Filter matches.
+        // Record the error if the scope doesn't have one yet.
+        if (it->mCapturedError == wgpu::ErrorType::NoError) {
+            it->mCapturedError = type;
+            it->mErrorMessage = message;
+        }
 
-    ErrorScope::ErrorScope(wgpu::ErrorFilter errorFilter)
-        : mMatchedErrorType(ErrorFilterToErrorType(errorFilter)) {
-    }
-
-    wgpu::ErrorType ErrorScope::GetErrorType() const {
-        return mCapturedError;
-    }
-
-    const char* ErrorScope::GetErrorMessage() const {
-        return mErrorMessage.c_str();
-    }
-
-    void ErrorScopeStack::Push(wgpu::ErrorFilter filter) {
-        mScopes.push_back(ErrorScope(filter));
-    }
-
-    ErrorScope ErrorScopeStack::Pop() {
-        ASSERT(!mScopes.empty());
-        ErrorScope scope = std::move(mScopes.back());
-        mScopes.pop_back();
-        return scope;
-    }
-
-    bool ErrorScopeStack::Empty() const {
-        return mScopes.empty();
-    }
-
-    bool ErrorScopeStack::HandleError(wgpu::ErrorType type, const char* message) {
-        for (auto it = mScopes.rbegin(); it != mScopes.rend(); ++it) {
-            if (it->mMatchedErrorType != type) {
-                // Error filter does not match. Move on to the next scope.
-                continue;
-            }
-
-            // Filter matches.
-            // Record the error if the scope doesn't have one yet.
-            if (it->mCapturedError == wgpu::ErrorType::NoError) {
+        if (type == wgpu::ErrorType::DeviceLost) {
+            if (it->mCapturedError != wgpu::ErrorType::DeviceLost) {
+                // DeviceLost overrides any other error that is not a DeviceLost.
                 it->mCapturedError = type;
                 it->mErrorMessage = message;
             }
-
-            if (type == wgpu::ErrorType::DeviceLost) {
-                if (it->mCapturedError != wgpu::ErrorType::DeviceLost) {
-                    // DeviceLost overrides any other error that is not a DeviceLost.
-                    it->mCapturedError = type;
-                    it->mErrorMessage = message;
-                }
-            } else {
-                // Errors that are not device lost are captured and stop propogating.
-                return true;
-            }
+        } else {
+            // Errors that are not device lost are captured and stop propogating.
+            return true;
         }
-
-        // The error was not captured.
-        return false;
     }
 
+    // The error was not captured.
+    return false;
+}
+
 }  // namespace dawn::native
diff --git a/src/dawn/native/ErrorScope.h b/src/dawn/native/ErrorScope.h
index f99c71d..3ffb510 100644
--- a/src/dawn/native/ErrorScope.h
+++ b/src/dawn/native/ErrorScope.h
@@ -22,35 +22,35 @@
 
 namespace dawn::native {
 
-    class ErrorScope {
-      public:
-        wgpu::ErrorType GetErrorType() const;
-        const char* GetErrorMessage() const;
+class ErrorScope {
+  public:
+    wgpu::ErrorType GetErrorType() const;
+    const char* GetErrorMessage() const;
 
-      private:
-        friend class ErrorScopeStack;
-        explicit ErrorScope(wgpu::ErrorFilter errorFilter);
+  private:
+    friend class ErrorScopeStack;
+    explicit ErrorScope(wgpu::ErrorFilter errorFilter);
 
-        wgpu::ErrorType mMatchedErrorType;
-        wgpu::ErrorType mCapturedError = wgpu::ErrorType::NoError;
-        std::string mErrorMessage = "";
-    };
+    wgpu::ErrorType mMatchedErrorType;
+    wgpu::ErrorType mCapturedError = wgpu::ErrorType::NoError;
+    std::string mErrorMessage = "";
+};
 
-    class ErrorScopeStack {
-      public:
-        void Push(wgpu::ErrorFilter errorFilter);
-        ErrorScope Pop();
+class ErrorScopeStack {
+  public:
+    void Push(wgpu::ErrorFilter errorFilter);
+    ErrorScope Pop();
 
-        bool Empty() const;
+    bool Empty() const;
 
-        // Pass an error to the scopes in the stack. Returns true if one of the scopes
-        // captured the error. Returns false if the error should be forwarded to the
-        // uncaptured error callback.
-        bool HandleError(wgpu::ErrorType type, const char* message);
+    // Pass an error to the scopes in the stack. Returns true if one of the scopes
+    // captured the error. Returns false if the error should be forwarded to the
+    // uncaptured error callback.
+    bool HandleError(wgpu::ErrorType type, const char* message);
 
-      private:
-        std::vector<ErrorScope> mScopes;
-    };
+  private:
+    std::vector<ErrorScope> mScopes;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/ExternalTexture.cpp b/src/dawn/native/ExternalTexture.cpp
index f9e7044..1cbcb6f 100644
--- a/src/dawn/native/ExternalTexture.cpp
+++ b/src/dawn/native/ExternalTexture.cpp
@@ -26,205 +26,204 @@
 
 namespace dawn::native {
 
-    MaybeError ValidateExternalTexturePlane(const TextureViewBase* textureView) {
+MaybeError ValidateExternalTexturePlane(const TextureViewBase* textureView) {
+    DAWN_INVALID_IF(
+        (textureView->GetTexture()->GetUsage() & wgpu::TextureUsage::TextureBinding) == 0,
+        "The external texture plane (%s) usage (%s) doesn't include the required usage (%s)",
+        textureView, textureView->GetTexture()->GetUsage(), wgpu::TextureUsage::TextureBinding);
+
+    DAWN_INVALID_IF(textureView->GetDimension() != wgpu::TextureViewDimension::e2D,
+                    "The external texture plane (%s) dimension (%s) is not 2D.", textureView,
+                    textureView->GetDimension());
+
+    DAWN_INVALID_IF(textureView->GetLevelCount() > 1,
+                    "The external texture plane (%s) mip level count (%u) is not 1.", textureView,
+                    textureView->GetLevelCount());
+
+    DAWN_INVALID_IF(textureView->GetTexture()->GetSampleCount() != 1,
+                    "The external texture plane (%s) sample count (%u) is not one.", textureView,
+                    textureView->GetTexture()->GetSampleCount());
+
+    return {};
+}
+
+MaybeError ValidateExternalTextureDescriptor(const DeviceBase* device,
+                                             const ExternalTextureDescriptor* descriptor) {
+    ASSERT(descriptor);
+    ASSERT(descriptor->plane0);
+
+    DAWN_TRY(device->ValidateObject(descriptor->plane0));
+
+    wgpu::TextureFormat plane0Format = descriptor->plane0->GetFormat().format;
+
+    if (descriptor->plane1) {
         DAWN_INVALID_IF(
-            (textureView->GetTexture()->GetUsage() & wgpu::TextureUsage::TextureBinding) == 0,
-            "The external texture plane (%s) usage (%s) doesn't include the required usage (%s)",
-            textureView, textureView->GetTexture()->GetUsage(), wgpu::TextureUsage::TextureBinding);
+            device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
+            "Bi-planar external textures are disabled until the implementation is completed.");
 
-        DAWN_INVALID_IF(textureView->GetDimension() != wgpu::TextureViewDimension::e2D,
-                        "The external texture plane (%s) dimension (%s) is not 2D.", textureView,
-                        textureView->GetDimension());
+        DAWN_INVALID_IF(descriptor->colorSpace != wgpu::PredefinedColorSpace::Srgb,
+                        "The specified color space (%s) is not %s.", descriptor->colorSpace,
+                        wgpu::PredefinedColorSpace::Srgb);
 
-        DAWN_INVALID_IF(textureView->GetLevelCount() > 1,
-                        "The external texture plane (%s) mip level count (%u) is not 1.",
-                        textureView, textureView->GetLevelCount());
+        DAWN_TRY(device->ValidateObject(descriptor->plane1));
+        wgpu::TextureFormat plane1Format = descriptor->plane1->GetFormat().format;
 
-        DAWN_INVALID_IF(textureView->GetTexture()->GetSampleCount() != 1,
-                        "The external texture plane (%s) sample count (%u) is not one.",
-                        textureView, textureView->GetTexture()->GetSampleCount());
+        DAWN_INVALID_IF(plane0Format != wgpu::TextureFormat::R8Unorm,
+                        "The bi-planar external texture plane (%s) format (%s) is not %s.",
+                        descriptor->plane0, plane0Format, wgpu::TextureFormat::R8Unorm);
+        DAWN_INVALID_IF(plane1Format != wgpu::TextureFormat::RG8Unorm,
+                        "The bi-planar external texture plane (%s) format (%s) is not %s.",
+                        descriptor->plane1, plane1Format, wgpu::TextureFormat::RG8Unorm);
 
-        return {};
-    }
-
-    MaybeError ValidateExternalTextureDescriptor(const DeviceBase* device,
-                                                 const ExternalTextureDescriptor* descriptor) {
-        ASSERT(descriptor);
-        ASSERT(descriptor->plane0);
-
-        DAWN_TRY(device->ValidateObject(descriptor->plane0));
-
-        wgpu::TextureFormat plane0Format = descriptor->plane0->GetFormat().format;
-
-        if (descriptor->plane1) {
-            DAWN_INVALID_IF(
-                device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
-                "Bi-planar external textures are disabled until the implementation is completed.");
-
-            DAWN_INVALID_IF(descriptor->colorSpace != wgpu::PredefinedColorSpace::Srgb,
-                            "The specified color space (%s) is not %s.", descriptor->colorSpace,
-                            wgpu::PredefinedColorSpace::Srgb);
-
-            DAWN_TRY(device->ValidateObject(descriptor->plane1));
-            wgpu::TextureFormat plane1Format = descriptor->plane1->GetFormat().format;
-
-            DAWN_INVALID_IF(plane0Format != wgpu::TextureFormat::R8Unorm,
-                            "The bi-planar external texture plane (%s) format (%s) is not %s.",
-                            descriptor->plane0, plane0Format, wgpu::TextureFormat::R8Unorm);
-            DAWN_INVALID_IF(plane1Format != wgpu::TextureFormat::RG8Unorm,
-                            "The bi-planar external texture plane (%s) format (%s) is not %s.",
-                            descriptor->plane1, plane1Format, wgpu::TextureFormat::RG8Unorm);
-
-            DAWN_TRY(ValidateExternalTexturePlane(descriptor->plane0));
-            DAWN_TRY(ValidateExternalTexturePlane(descriptor->plane1));
-        } else {
-            switch (plane0Format) {
-                case wgpu::TextureFormat::RGBA8Unorm:
-                case wgpu::TextureFormat::BGRA8Unorm:
-                case wgpu::TextureFormat::RGBA16Float:
-                    DAWN_TRY(ValidateExternalTexturePlane(descriptor->plane0));
-                    break;
-                default:
-                    return DAWN_FORMAT_VALIDATION_ERROR(
-                        "The external texture plane (%s) format (%s) is not a supported format "
-                        "(%s, %s, %s).",
-                        descriptor->plane0, plane0Format, wgpu::TextureFormat::RGBA8Unorm,
-                        wgpu::TextureFormat::BGRA8Unorm, wgpu::TextureFormat::RGBA16Float);
-            }
-        }
-
-        return {};
-    }
-
-    // static
-    ResultOrError<Ref<ExternalTextureBase>> ExternalTextureBase::Create(
-        DeviceBase* device,
-        const ExternalTextureDescriptor* descriptor) {
-        Ref<ExternalTextureBase> externalTexture =
-            AcquireRef(new ExternalTextureBase(device, descriptor));
-        DAWN_TRY(externalTexture->Initialize(device, descriptor));
-        return std::move(externalTexture);
-    }
-
-    ExternalTextureBase::ExternalTextureBase(DeviceBase* device,
-                                             const ExternalTextureDescriptor* descriptor)
-        : ApiObjectBase(device, descriptor->label), mState(ExternalTextureState::Alive) {
-        TrackInDevice();
-    }
-
-    ExternalTextureBase::ExternalTextureBase(DeviceBase* device)
-        : ApiObjectBase(device, kLabelNotImplemented), mState(ExternalTextureState::Alive) {
-        TrackInDevice();
-    }
-
-    ExternalTextureBase::ExternalTextureBase(DeviceBase* device, ObjectBase::ErrorTag tag)
-        : ApiObjectBase(device, tag) {
-    }
-
-    ExternalTextureBase::~ExternalTextureBase() = default;
-
-    MaybeError ExternalTextureBase::Initialize(DeviceBase* device,
-                                               const ExternalTextureDescriptor* descriptor) {
-        // Store any passed in TextureViews associated with individual planes.
-        mTextureViews[0] = descriptor->plane0;
-
-        if (descriptor->plane1) {
-            mTextureViews[1] = descriptor->plane1;
-        } else {
-            DAWN_TRY_ASSIGN(mTextureViews[1],
-                            device->GetOrCreatePlaceholderTextureViewForExternalTexture());
-        }
-
-        // We must create a buffer to store parameters needed by a shader that operates on this
-        // external texture.
-        BufferDescriptor bufferDesc;
-        bufferDesc.size = sizeof(ExternalTextureParams);
-        bufferDesc.usage = wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopyDst;
-        bufferDesc.label = "Dawn_External_Texture_Params_Buffer";
-
-        DAWN_TRY_ASSIGN(mParamsBuffer, device->CreateBuffer(&bufferDesc));
-
-        // Dawn & Tint's YUV-to-RGB conversion implementation is a simple 3x4 matrix multiplication
-        // using a standard conversion matrix. These matrices can be found in
-        // chromium/src/third_party/skia/src/core/SkYUVMath.cpp
-        ExternalTextureParams params;
-        params.numPlanes = descriptor->plane1 == nullptr ? 1 : 2;
-
-        // TODO(dawn:1082): Make this field configurable from outside of Dawn.
-        // Conversion matrix for BT.709 limited range. Columns 1, 2 and 3 are copied
-        // directly from the corresponding matrix in SkYUVMath.cpp. Column 4 is the range
-        // bias (for RGB) found in column 5 of the same SkYUVMath.cpp matrix.
-        params.yuvToRgbConversionMatrix = {1.164384f, 0.0f,       1.792741f,  -0.972945f,
-                                           1.164384f, -0.213249f, -0.532909f, 0.301483f,
-                                           1.164384f, 2.112402f,  0.0f,       -1.133402f};
-
-        // TODO(dawn:1082): Make this field configurable from outside of Dawn.
-        // Use an identity matrix when converting BT.709 to sRGB because they shared the
-        // same primaries.
-        params.gamutConversionMatrix = {1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f,
-                                        0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f};
-
-        switch (descriptor->colorSpace) {
-            case wgpu::PredefinedColorSpace::Undefined:
-                // Undefined color space should eventually produce an error. For now, these
-                // constants will effectively perform no gamma correction so tests can continue
-                // passing.
-                params.gammaDecodingParams = {1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0};
-                params.gammaEncodingParams = {1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0};
+        DAWN_TRY(ValidateExternalTexturePlane(descriptor->plane0));
+        DAWN_TRY(ValidateExternalTexturePlane(descriptor->plane1));
+    } else {
+        switch (plane0Format) {
+            case wgpu::TextureFormat::RGBA8Unorm:
+            case wgpu::TextureFormat::BGRA8Unorm:
+            case wgpu::TextureFormat::RGBA16Float:
+                DAWN_TRY(ValidateExternalTexturePlane(descriptor->plane0));
                 break;
-            case wgpu::PredefinedColorSpace::Srgb:
-                // TODO(dawn:1082): Make this field configurable from outside of Dawn.
-                // These are the inverted parameters as specified by Rec. ITU-R BT.1886 for BT.709
-                params.gammaDecodingParams = {2.2, 1.0 / 1.099, 0.099 / 1.099, 1 / 4.5, 0.081,
-                                              0.0, 0.0};
-
-                // Constants for sRGB transfer function pulled from
-                // https://en.wikipedia.org/wiki/SRGB
-                params.gammaEncodingParams = {
-                    1 / 2.4, 1.137119 /*1.055^2.4*/, 0.0, 12.92, 0.0031308, -0.055, 0.0};
-                break;
+            default:
+                return DAWN_FORMAT_VALIDATION_ERROR(
+                    "The external texture plane (%s) format (%s) is not a supported format "
+                    "(%s, %s, %s).",
+                    descriptor->plane0, plane0Format, wgpu::TextureFormat::RGBA8Unorm,
+                    wgpu::TextureFormat::BGRA8Unorm, wgpu::TextureFormat::RGBA16Float);
         }
-
-        DAWN_TRY(device->GetQueue()->WriteBuffer(mParamsBuffer.Get(), 0, &params,
-                                                 sizeof(ExternalTextureParams)));
-
-        return {};
     }
 
-    const std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat>&
-    ExternalTextureBase::GetTextureViews() const {
-        return mTextureViews;
+    return {};
+}
+
+// static
+ResultOrError<Ref<ExternalTextureBase>> ExternalTextureBase::Create(
+    DeviceBase* device,
+    const ExternalTextureDescriptor* descriptor) {
+    Ref<ExternalTextureBase> externalTexture =
+        AcquireRef(new ExternalTextureBase(device, descriptor));
+    DAWN_TRY(externalTexture->Initialize(device, descriptor));
+    return std::move(externalTexture);
+}
+
+ExternalTextureBase::ExternalTextureBase(DeviceBase* device,
+                                         const ExternalTextureDescriptor* descriptor)
+    : ApiObjectBase(device, descriptor->label), mState(ExternalTextureState::Alive) {
+    TrackInDevice();
+}
+
+ExternalTextureBase::ExternalTextureBase(DeviceBase* device)
+    : ApiObjectBase(device, kLabelNotImplemented), mState(ExternalTextureState::Alive) {
+    TrackInDevice();
+}
+
+ExternalTextureBase::ExternalTextureBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+    : ApiObjectBase(device, tag) {}
+
+ExternalTextureBase::~ExternalTextureBase() = default;
+
+MaybeError ExternalTextureBase::Initialize(DeviceBase* device,
+                                           const ExternalTextureDescriptor* descriptor) {
+    // Store any passed in TextureViews associated with individual planes.
+    mTextureViews[0] = descriptor->plane0;
+
+    if (descriptor->plane1) {
+        mTextureViews[1] = descriptor->plane1;
+    } else {
+        DAWN_TRY_ASSIGN(mTextureViews[1],
+                        device->GetOrCreatePlaceholderTextureViewForExternalTexture());
     }
 
-    MaybeError ExternalTextureBase::ValidateCanUseInSubmitNow() const {
-        ASSERT(!IsError());
-        DAWN_INVALID_IF(mState == ExternalTextureState::Destroyed,
-                        "Destroyed external texture %s is used in a submit.", this);
-        return {};
+    // We must create a buffer to store parameters needed by a shader that operates on this
+    // external texture.
+    BufferDescriptor bufferDesc;
+    bufferDesc.size = sizeof(ExternalTextureParams);
+    bufferDesc.usage = wgpu::BufferUsage::Uniform | wgpu::BufferUsage::CopyDst;
+    bufferDesc.label = "Dawn_External_Texture_Params_Buffer";
+
+    DAWN_TRY_ASSIGN(mParamsBuffer, device->CreateBuffer(&bufferDesc));
+
+    // Dawn & Tint's YUV-to-RGB conversion implementation is a simple 3x4 matrix multiplication
+    // using a standard conversion matrix. These matrices can be found in
+    // chromium/src/third_party/skia/src/core/SkYUVMath.cpp
+    ExternalTextureParams params;
+    params.numPlanes = descriptor->plane1 == nullptr ? 1 : 2;
+
+    // TODO(dawn:1082): Make this field configurable from outside of Dawn.
+    // Conversion matrix for BT.709 limited range. Columns 1, 2 and 3 are copied
+    // directly from the corresponding matrix in SkYUVMath.cpp. Column 4 is the range
+    // bias (for RGB) found in column 5 of the same SkYUVMath.cpp matrix.
+    params.yuvToRgbConversionMatrix = {1.164384f, 0.0f,       1.792741f,  -0.972945f,
+                                       1.164384f, -0.213249f, -0.532909f, 0.301483f,
+                                       1.164384f, 2.112402f,  0.0f,       -1.133402f};
+
+    // TODO(dawn:1082): Make this field configurable from outside of Dawn.
+    // Use an identity matrix when converting BT.709 to sRGB because they shared the
+    // same primaries.
+    params.gamutConversionMatrix = {1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f,
+                                    0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f};
+
+    switch (descriptor->colorSpace) {
+        case wgpu::PredefinedColorSpace::Undefined:
+            // Undefined color space should eventually produce an error. For now, these
+            // constants will effectively perform no gamma correction so tests can continue
+            // passing.
+            params.gammaDecodingParams = {1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0};
+            params.gammaEncodingParams = {1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0};
+            break;
+        case wgpu::PredefinedColorSpace::Srgb:
+            // TODO(dawn:1082): Make this field configurable from outside of Dawn.
+            // These are the inverted parameters as specified by Rec. ITU-R BT.1886 for BT.709
+            params.gammaDecodingParams = {2.2, 1.0 / 1.099, 0.099 / 1.099, 1 / 4.5, 0.081,
+                                          0.0, 0.0};
+
+            // Constants for sRGB transfer function pulled from
+            // https://en.wikipedia.org/wiki/SRGB
+            params.gammaEncodingParams = {
+                1 / 2.4, 1.137119 /*1.055^2.4*/, 0.0, 12.92, 0.0031308, -0.055, 0.0};
+            break;
     }
 
-    void ExternalTextureBase::APIDestroy() {
-        if (GetDevice()->ConsumedError(GetDevice()->ValidateObject(this))) {
-            return;
-        }
-        Destroy();
-    }
+    DAWN_TRY(device->GetQueue()->WriteBuffer(mParamsBuffer.Get(), 0, &params,
+                                             sizeof(ExternalTextureParams)));
 
-    void ExternalTextureBase::DestroyImpl() {
-        mState = ExternalTextureState::Destroyed;
-    }
+    return {};
+}
 
-    // static
-    ExternalTextureBase* ExternalTextureBase::MakeError(DeviceBase* device) {
-        return new ExternalTextureBase(device, ObjectBase::kError);
-    }
+const std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat>& ExternalTextureBase::GetTextureViews()
+    const {
+    return mTextureViews;
+}
 
-    BufferBase* ExternalTextureBase::GetParamsBuffer() const {
-        return mParamsBuffer.Get();
-    }
+MaybeError ExternalTextureBase::ValidateCanUseInSubmitNow() const {
+    ASSERT(!IsError());
+    DAWN_INVALID_IF(mState == ExternalTextureState::Destroyed,
+                    "Destroyed external texture %s is used in a submit.", this);
+    return {};
+}
 
-    ObjectType ExternalTextureBase::GetType() const {
-        return ObjectType::ExternalTexture;
+void ExternalTextureBase::APIDestroy() {
+    if (GetDevice()->ConsumedError(GetDevice()->ValidateObject(this))) {
+        return;
     }
+    Destroy();
+}
+
+void ExternalTextureBase::DestroyImpl() {
+    mState = ExternalTextureState::Destroyed;
+}
+
+// static
+ExternalTextureBase* ExternalTextureBase::MakeError(DeviceBase* device) {
+    return new ExternalTextureBase(device, ObjectBase::kError);
+}
+
+BufferBase* ExternalTextureBase::GetParamsBuffer() const {
+    return mParamsBuffer.Get();
+}
+
+ObjectType ExternalTextureBase::GetType() const {
+    return ObjectType::ExternalTexture;
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/ExternalTexture.h b/src/dawn/native/ExternalTexture.h
index 86b6886..fcb8390 100644
--- a/src/dawn/native/ExternalTexture.h
+++ b/src/dawn/native/ExternalTexture.h
@@ -24,66 +24,66 @@
 
 namespace dawn::native {
 
-    class TextureViewBase;
+class TextureViewBase;
 
-    struct GammaTransferParams {
-        float G = 0.0;
-        float A = 0.0;
-        float B = 0.0;
-        float C = 0.0;
-        float D = 0.0;
-        float E = 0.0;
-        float F = 0.0;
-        uint32_t padding = 0;
-    };
+struct GammaTransferParams {
+    float G = 0.0;
+    float A = 0.0;
+    float B = 0.0;
+    float C = 0.0;
+    float D = 0.0;
+    float E = 0.0;
+    float F = 0.0;
+    uint32_t padding = 0;
+};
 
-    struct ExternalTextureParams {
-        uint32_t numPlanes;
-        std::array<uint32_t, 3> padding;
-        std::array<float, 12> yuvToRgbConversionMatrix;
-        GammaTransferParams gammaDecodingParams = {};
-        GammaTransferParams gammaEncodingParams = {};
-        std::array<float, 12> gamutConversionMatrix = {};
-    };
+struct ExternalTextureParams {
+    uint32_t numPlanes;
+    std::array<uint32_t, 3> padding;
+    std::array<float, 12> yuvToRgbConversionMatrix;
+    GammaTransferParams gammaDecodingParams = {};
+    GammaTransferParams gammaEncodingParams = {};
+    std::array<float, 12> gamutConversionMatrix = {};
+};
 
-    MaybeError ValidateExternalTextureDescriptor(const DeviceBase* device,
-                                                 const ExternalTextureDescriptor* descriptor);
+MaybeError ValidateExternalTextureDescriptor(const DeviceBase* device,
+                                             const ExternalTextureDescriptor* descriptor);
 
-    class ExternalTextureBase : public ApiObjectBase {
-      public:
-        static ResultOrError<Ref<ExternalTextureBase>> Create(
-            DeviceBase* device,
-            const ExternalTextureDescriptor* descriptor);
+class ExternalTextureBase : public ApiObjectBase {
+  public:
+    static ResultOrError<Ref<ExternalTextureBase>> Create(
+        DeviceBase* device,
+        const ExternalTextureDescriptor* descriptor);
 
-        BufferBase* GetParamsBuffer() const;
-        const std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat>& GetTextureViews() const;
-        ObjectType GetType() const override;
+    BufferBase* GetParamsBuffer() const;
+    const std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat>& GetTextureViews() const;
+    ObjectType GetType() const override;
 
-        MaybeError ValidateCanUseInSubmitNow() const;
-        static ExternalTextureBase* MakeError(DeviceBase* device);
+    MaybeError ValidateCanUseInSubmitNow() const;
+    static ExternalTextureBase* MakeError(DeviceBase* device);
 
-        void APIDestroy();
+    void APIDestroy();
 
-      protected:
-        // Constructor used only for mocking and testing.
-        explicit ExternalTextureBase(DeviceBase* device);
-        void DestroyImpl() override;
+  protected:
+    // Constructor used only for mocking and testing.
+    explicit ExternalTextureBase(DeviceBase* device);
+    void DestroyImpl() override;
 
-        ~ExternalTextureBase() override;
+    ~ExternalTextureBase() override;
 
-      private:
-        ExternalTextureBase(DeviceBase* device, const ExternalTextureDescriptor* descriptor);
+  private:
+    ExternalTextureBase(DeviceBase* device, const ExternalTextureDescriptor* descriptor);
 
-        enum class ExternalTextureState { Alive, Destroyed };
-        ExternalTextureBase(DeviceBase* device, ObjectBase::ErrorTag tag);
-        MaybeError Initialize(DeviceBase* device, const ExternalTextureDescriptor* descriptor);
+    enum class ExternalTextureState { Alive, Destroyed };
+    ExternalTextureBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+    MaybeError Initialize(DeviceBase* device, const ExternalTextureDescriptor* descriptor);
 
-        Ref<TextureBase> mPlaceholderTexture;
-        Ref<BufferBase> mParamsBuffer;
-        std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat> mTextureViews;
+    Ref<TextureBase> mPlaceholderTexture;
+    Ref<BufferBase> mParamsBuffer;
+    std::array<Ref<TextureViewBase>, kMaxPlanesPerFormat> mTextureViews;
 
-        ExternalTextureState mState;
-    };
+    ExternalTextureState mState;
+};
 }  // namespace dawn::native
 
 #endif  // SRC_DAWN_NATIVE_EXTERNALTEXTURE_H_
diff --git a/src/dawn/native/Features.cpp b/src/dawn/native/Features.cpp
index 8a41f20..5621b63 100644
--- a/src/dawn/native/Features.cpp
+++ b/src/dawn/native/Features.cpp
@@ -21,260 +21,259 @@
 #include "dawn/common/BitSetIterator.h"
 
 namespace dawn::native {
-    namespace {
+namespace {
 
-        struct FeatureEnumAndInfo {
-            Feature feature;
-            FeatureInfo info;
-            bool WGPUDeviceProperties::*memberInWGPUDeviceProperties;
-        };
+struct FeatureEnumAndInfo {
+    Feature feature;
+    FeatureInfo info;
+    bool WGPUDeviceProperties::*memberInWGPUDeviceProperties;
+};
 
-        using FeatureEnumAndInfoList =
-            std::array<FeatureEnumAndInfo, static_cast<size_t>(Feature::EnumCount)>;
+using FeatureEnumAndInfoList =
+    std::array<FeatureEnumAndInfo, static_cast<size_t>(Feature::EnumCount)>;
 
-        static constexpr FeatureEnumAndInfoList kFeatureNameAndInfoList = {
-            {{Feature::TextureCompressionBC,
-              {"texture-compression-bc", "Support Block Compressed (BC) texture formats",
-               "https://bugs.chromium.org/p/dawn/issues/detail?id=42"},
-              &WGPUDeviceProperties::textureCompressionBC},
-             {Feature::TextureCompressionETC2,
-              {"texture-compression-etc2",
-               "Support Ericsson Texture Compressed (ETC2/EAC) texture "
-               "formats",
-               "https://bugs.chromium.org/p/dawn/issues/detail?id=955"},
-              &WGPUDeviceProperties::textureCompressionETC2},
-             {Feature::TextureCompressionASTC,
-              {"texture-compression-astc",
-               "Support Adaptable Scalable Texture Compressed (ASTC) "
-               "texture formats",
-               "https://bugs.chromium.org/p/dawn/issues/detail?id=955"},
-              &WGPUDeviceProperties::textureCompressionASTC},
-             {Feature::ShaderFloat16,
-              {"shader-float16",
-               "Support 16bit float arithmetic and declarations in uniform and storage buffers",
-               "https://bugs.chromium.org/p/dawn/issues/detail?id=426"},
-              &WGPUDeviceProperties::shaderFloat16},
-             {Feature::PipelineStatisticsQuery,
-              {"pipeline-statistics-query", "Support Pipeline Statistics Query",
-               "https://bugs.chromium.org/p/dawn/issues/detail?id=434"},
-              &WGPUDeviceProperties::pipelineStatisticsQuery},
-             {Feature::TimestampQuery,
-              {"timestamp-query", "Support Timestamp Query",
-               "https://bugs.chromium.org/p/dawn/issues/detail?id=434"},
-              &WGPUDeviceProperties::timestampQuery},
-             {Feature::DepthClamping,
-              {"depth-clamping", "Clamp depth to [0, 1] in NDC space instead of clipping",
-               "https://bugs.chromium.org/p/dawn/issues/detail?id=716"},
-              &WGPUDeviceProperties::depthClamping},
-             {Feature::Depth24UnormStencil8,
-              {"depth24unorm-stencil8", "Support depth24unorm-stencil8 texture format",
-               "https://bugs.chromium.org/p/dawn/issues/detail?id=690"},
-              &WGPUDeviceProperties::depth24UnormStencil8},
-             {Feature::Depth32FloatStencil8,
-              {"depth32float-stencil8", "Support depth32float-stencil8 texture format",
-               "https://bugs.chromium.org/p/dawn/issues/detail?id=690"},
-              &WGPUDeviceProperties::depth32FloatStencil8},
-             {Feature::DawnInternalUsages,
-              {"dawn-internal-usages",
-               "Add internal usages to resources to affect how the texture is allocated, but not "
-               "frontend validation. Other internal commands may access this usage.",
-               "https://dawn.googlesource.com/dawn/+/refs/heads/main/docs/dawn/features/"
-               "dawn_internal_usages.md"},
-              &WGPUDeviceProperties::dawnInternalUsages},
-             {Feature::MultiPlanarFormats,
-              {"multiplanar-formats",
-               "Import and use multi-planar texture formats with per plane views",
-               "https://bugs.chromium.org/p/dawn/issues/detail?id=551"},
-              &WGPUDeviceProperties::multiPlanarFormats},
-             {Feature::DawnNative,
-              {"dawn-native", "WebGPU is running on top of dawn_native.",
-               "https://dawn.googlesource.com/dawn/+/refs/heads/main/docs/dawn/features/"
-               "dawn_native.md"},
-              &WGPUDeviceProperties::dawnNative}}};
+static constexpr FeatureEnumAndInfoList kFeatureNameAndInfoList = {
+    {{Feature::TextureCompressionBC,
+      {"texture-compression-bc", "Support Block Compressed (BC) texture formats",
+       "https://bugs.chromium.org/p/dawn/issues/detail?id=42"},
+      &WGPUDeviceProperties::textureCompressionBC},
+     {Feature::TextureCompressionETC2,
+      {"texture-compression-etc2",
+       "Support Ericsson Texture Compressed (ETC2/EAC) texture "
+       "formats",
+       "https://bugs.chromium.org/p/dawn/issues/detail?id=955"},
+      &WGPUDeviceProperties::textureCompressionETC2},
+     {Feature::TextureCompressionASTC,
+      {"texture-compression-astc",
+       "Support Adaptable Scalable Texture Compressed (ASTC) "
+       "texture formats",
+       "https://bugs.chromium.org/p/dawn/issues/detail?id=955"},
+      &WGPUDeviceProperties::textureCompressionASTC},
+     {Feature::ShaderFloat16,
+      {"shader-float16",
+       "Support 16bit float arithmetic and declarations in uniform and storage buffers",
+       "https://bugs.chromium.org/p/dawn/issues/detail?id=426"},
+      &WGPUDeviceProperties::shaderFloat16},
+     {Feature::PipelineStatisticsQuery,
+      {"pipeline-statistics-query", "Support Pipeline Statistics Query",
+       "https://bugs.chromium.org/p/dawn/issues/detail?id=434"},
+      &WGPUDeviceProperties::pipelineStatisticsQuery},
+     {Feature::TimestampQuery,
+      {"timestamp-query", "Support Timestamp Query",
+       "https://bugs.chromium.org/p/dawn/issues/detail?id=434"},
+      &WGPUDeviceProperties::timestampQuery},
+     {Feature::DepthClamping,
+      {"depth-clamping", "Clamp depth to [0, 1] in NDC space instead of clipping",
+       "https://bugs.chromium.org/p/dawn/issues/detail?id=716"},
+      &WGPUDeviceProperties::depthClamping},
+     {Feature::Depth24UnormStencil8,
+      {"depth24unorm-stencil8", "Support depth24unorm-stencil8 texture format",
+       "https://bugs.chromium.org/p/dawn/issues/detail?id=690"},
+      &WGPUDeviceProperties::depth24UnormStencil8},
+     {Feature::Depth32FloatStencil8,
+      {"depth32float-stencil8", "Support depth32float-stencil8 texture format",
+       "https://bugs.chromium.org/p/dawn/issues/detail?id=690"},
+      &WGPUDeviceProperties::depth32FloatStencil8},
+     {Feature::DawnInternalUsages,
+      {"dawn-internal-usages",
+       "Add internal usages to resources to affect how the texture is allocated, but not "
+       "frontend validation. Other internal commands may access this usage.",
+       "https://dawn.googlesource.com/dawn/+/refs/heads/main/docs/dawn/features/"
+       "dawn_internal_usages.md"},
+      &WGPUDeviceProperties::dawnInternalUsages},
+     {Feature::MultiPlanarFormats,
+      {"multiplanar-formats", "Import and use multi-planar texture formats with per plane views",
+       "https://bugs.chromium.org/p/dawn/issues/detail?id=551"},
+      &WGPUDeviceProperties::multiPlanarFormats},
+     {Feature::DawnNative,
+      {"dawn-native", "WebGPU is running on top of dawn_native.",
+       "https://dawn.googlesource.com/dawn/+/refs/heads/main/docs/dawn/features/"
+       "dawn_native.md"},
+      &WGPUDeviceProperties::dawnNative}}};
 
-        Feature FromAPIFeature(wgpu::FeatureName feature) {
-            switch (feature) {
-                case wgpu::FeatureName::Undefined:
-                    return Feature::InvalidEnum;
-
-                case wgpu::FeatureName::TimestampQuery:
-                    return Feature::TimestampQuery;
-                case wgpu::FeatureName::PipelineStatisticsQuery:
-                    return Feature::PipelineStatisticsQuery;
-                case wgpu::FeatureName::TextureCompressionBC:
-                    return Feature::TextureCompressionBC;
-                case wgpu::FeatureName::TextureCompressionETC2:
-                    return Feature::TextureCompressionETC2;
-                case wgpu::FeatureName::TextureCompressionASTC:
-                    return Feature::TextureCompressionASTC;
-                case wgpu::FeatureName::DepthClamping:
-                    return Feature::DepthClamping;
-                case wgpu::FeatureName::Depth24UnormStencil8:
-                    return Feature::Depth24UnormStencil8;
-                case wgpu::FeatureName::Depth32FloatStencil8:
-                    return Feature::Depth32FloatStencil8;
-                case wgpu::FeatureName::DawnShaderFloat16:
-                    return Feature::ShaderFloat16;
-                case wgpu::FeatureName::DawnInternalUsages:
-                    return Feature::DawnInternalUsages;
-                case wgpu::FeatureName::DawnMultiPlanarFormats:
-                    return Feature::MultiPlanarFormats;
-                case wgpu::FeatureName::DawnNative:
-                    return Feature::DawnNative;
-
-                case wgpu::FeatureName::IndirectFirstInstance:
-                    return Feature::InvalidEnum;
-            }
+Feature FromAPIFeature(wgpu::FeatureName feature) {
+    switch (feature) {
+        case wgpu::FeatureName::Undefined:
             return Feature::InvalidEnum;
+
+        case wgpu::FeatureName::TimestampQuery:
+            return Feature::TimestampQuery;
+        case wgpu::FeatureName::PipelineStatisticsQuery:
+            return Feature::PipelineStatisticsQuery;
+        case wgpu::FeatureName::TextureCompressionBC:
+            return Feature::TextureCompressionBC;
+        case wgpu::FeatureName::TextureCompressionETC2:
+            return Feature::TextureCompressionETC2;
+        case wgpu::FeatureName::TextureCompressionASTC:
+            return Feature::TextureCompressionASTC;
+        case wgpu::FeatureName::DepthClamping:
+            return Feature::DepthClamping;
+        case wgpu::FeatureName::Depth24UnormStencil8:
+            return Feature::Depth24UnormStencil8;
+        case wgpu::FeatureName::Depth32FloatStencil8:
+            return Feature::Depth32FloatStencil8;
+        case wgpu::FeatureName::DawnShaderFloat16:
+            return Feature::ShaderFloat16;
+        case wgpu::FeatureName::DawnInternalUsages:
+            return Feature::DawnInternalUsages;
+        case wgpu::FeatureName::DawnMultiPlanarFormats:
+            return Feature::MultiPlanarFormats;
+        case wgpu::FeatureName::DawnNative:
+            return Feature::DawnNative;
+
+        case wgpu::FeatureName::IndirectFirstInstance:
+            return Feature::InvalidEnum;
+    }
+    return Feature::InvalidEnum;
+}
+
+wgpu::FeatureName ToAPIFeature(Feature feature) {
+    switch (feature) {
+        case Feature::TextureCompressionBC:
+            return wgpu::FeatureName::TextureCompressionBC;
+        case Feature::TextureCompressionETC2:
+            return wgpu::FeatureName::TextureCompressionETC2;
+        case Feature::TextureCompressionASTC:
+            return wgpu::FeatureName::TextureCompressionASTC;
+        case Feature::PipelineStatisticsQuery:
+            return wgpu::FeatureName::PipelineStatisticsQuery;
+        case Feature::TimestampQuery:
+            return wgpu::FeatureName::TimestampQuery;
+        case Feature::DepthClamping:
+            return wgpu::FeatureName::DepthClamping;
+        case Feature::Depth24UnormStencil8:
+            return wgpu::FeatureName::Depth24UnormStencil8;
+        case Feature::Depth32FloatStencil8:
+            return wgpu::FeatureName::Depth32FloatStencil8;
+        case Feature::ShaderFloat16:
+            return wgpu::FeatureName::DawnShaderFloat16;
+        case Feature::DawnInternalUsages:
+            return wgpu::FeatureName::DawnInternalUsages;
+        case Feature::MultiPlanarFormats:
+            return wgpu::FeatureName::DawnMultiPlanarFormats;
+        case Feature::DawnNative:
+            return wgpu::FeatureName::DawnNative;
+
+        case Feature::EnumCount:
+            break;
+    }
+    UNREACHABLE();
+}
+
+}  // anonymous namespace
+
+void FeaturesSet::EnableFeature(Feature feature) {
+    ASSERT(feature != Feature::InvalidEnum);
+    const size_t featureIndex = static_cast<size_t>(feature);
+    featuresBitSet.set(featureIndex);
+}
+
+void FeaturesSet::EnableFeature(wgpu::FeatureName feature) {
+    EnableFeature(FromAPIFeature(feature));
+}
+
+bool FeaturesSet::IsEnabled(Feature feature) const {
+    ASSERT(feature != Feature::InvalidEnum);
+    const size_t featureIndex = static_cast<size_t>(feature);
+    return featuresBitSet[featureIndex];
+}
+
+bool FeaturesSet::IsEnabled(wgpu::FeatureName feature) const {
+    Feature f = FromAPIFeature(feature);
+    return f != Feature::InvalidEnum && IsEnabled(f);
+}
+
+size_t FeaturesSet::EnumerateFeatures(wgpu::FeatureName* features) const {
+    for (uint32_t i : IterateBitSet(featuresBitSet)) {
+        wgpu::FeatureName feature = ToAPIFeature(static_cast<Feature>(i));
+        if (features != nullptr) {
+            *features = feature;
+            features += 1;
         }
+    }
+    return featuresBitSet.count();
+}
 
-        wgpu::FeatureName ToAPIFeature(Feature feature) {
-            switch (feature) {
-                case Feature::TextureCompressionBC:
-                    return wgpu::FeatureName::TextureCompressionBC;
-                case Feature::TextureCompressionETC2:
-                    return wgpu::FeatureName::TextureCompressionETC2;
-                case Feature::TextureCompressionASTC:
-                    return wgpu::FeatureName::TextureCompressionASTC;
-                case Feature::PipelineStatisticsQuery:
-                    return wgpu::FeatureName::PipelineStatisticsQuery;
-                case Feature::TimestampQuery:
-                    return wgpu::FeatureName::TimestampQuery;
-                case Feature::DepthClamping:
-                    return wgpu::FeatureName::DepthClamping;
-                case Feature::Depth24UnormStencil8:
-                    return wgpu::FeatureName::Depth24UnormStencil8;
-                case Feature::Depth32FloatStencil8:
-                    return wgpu::FeatureName::Depth32FloatStencil8;
-                case Feature::ShaderFloat16:
-                    return wgpu::FeatureName::DawnShaderFloat16;
-                case Feature::DawnInternalUsages:
-                    return wgpu::FeatureName::DawnInternalUsages;
-                case Feature::MultiPlanarFormats:
-                    return wgpu::FeatureName::DawnMultiPlanarFormats;
-                case Feature::DawnNative:
-                    return wgpu::FeatureName::DawnNative;
+std::vector<const char*> FeaturesSet::GetEnabledFeatureNames() const {
+    std::vector<const char*> enabledFeatureNames(featuresBitSet.count());
 
-                case Feature::EnumCount:
-                    break;
-            }
-            UNREACHABLE();
-        }
-
-    }  // anonymous namespace
-
-    void FeaturesSet::EnableFeature(Feature feature) {
+    uint32_t index = 0;
+    for (uint32_t i : IterateBitSet(featuresBitSet)) {
+        Feature feature = static_cast<Feature>(i);
         ASSERT(feature != Feature::InvalidEnum);
-        const size_t featureIndex = static_cast<size_t>(feature);
-        featuresBitSet.set(featureIndex);
+
+        const FeatureEnumAndInfo& featureNameAndInfo = kFeatureNameAndInfoList[i];
+        ASSERT(featureNameAndInfo.feature == feature);
+
+        enabledFeatureNames[index] = featureNameAndInfo.info.name;
+        ++index;
+    }
+    return enabledFeatureNames;
+}
+
+void FeaturesSet::InitializeDeviceProperties(WGPUDeviceProperties* properties) const {
+    ASSERT(properties != nullptr);
+
+    for (uint32_t i : IterateBitSet(featuresBitSet)) {
+        properties->*(kFeatureNameAndInfoList[i].memberInWGPUDeviceProperties) = true;
+    }
+}
+
+wgpu::FeatureName FeatureEnumToAPIFeature(Feature feature) {
+    ASSERT(feature != Feature::InvalidEnum);
+    return ToAPIFeature(feature);
+}
+
+FeaturesInfo::FeaturesInfo() {
+    for (size_t index = 0; index < kFeatureNameAndInfoList.size(); ++index) {
+        const FeatureEnumAndInfo& featureNameAndInfo = kFeatureNameAndInfoList[index];
+        ASSERT(index == static_cast<size_t>(featureNameAndInfo.feature));
+        mFeatureNameToEnumMap[featureNameAndInfo.info.name] = featureNameAndInfo.feature;
+    }
+}
+
+const FeatureInfo* FeaturesInfo::GetFeatureInfo(wgpu::FeatureName feature) const {
+    Feature f = FromAPIFeature(feature);
+    if (f == Feature::InvalidEnum) {
+        return nullptr;
+    }
+    return &kFeatureNameAndInfoList[static_cast<size_t>(f)].info;
+}
+
+Feature FeaturesInfo::FeatureNameToEnum(const char* featureName) const {
+    ASSERT(featureName);
+
+    const auto& iter = mFeatureNameToEnumMap.find(featureName);
+    if (iter != mFeatureNameToEnumMap.cend()) {
+        return kFeatureNameAndInfoList[static_cast<size_t>(iter->second)].feature;
     }
 
-    void FeaturesSet::EnableFeature(wgpu::FeatureName feature) {
-        EnableFeature(FromAPIFeature(feature));
-    }
-
-    bool FeaturesSet::IsEnabled(Feature feature) const {
-        ASSERT(feature != Feature::InvalidEnum);
-        const size_t featureIndex = static_cast<size_t>(feature);
-        return featuresBitSet[featureIndex];
-    }
-
-    bool FeaturesSet::IsEnabled(wgpu::FeatureName feature) const {
-        Feature f = FromAPIFeature(feature);
-        return f != Feature::InvalidEnum && IsEnabled(f);
-    }
-
-    size_t FeaturesSet::EnumerateFeatures(wgpu::FeatureName* features) const {
-        for (uint32_t i : IterateBitSet(featuresBitSet)) {
-            wgpu::FeatureName feature = ToAPIFeature(static_cast<Feature>(i));
-            if (features != nullptr) {
-                *features = feature;
-                features += 1;
-            }
-        }
-        return featuresBitSet.count();
-    }
-
-    std::vector<const char*> FeaturesSet::GetEnabledFeatureNames() const {
-        std::vector<const char*> enabledFeatureNames(featuresBitSet.count());
-
-        uint32_t index = 0;
-        for (uint32_t i : IterateBitSet(featuresBitSet)) {
-            Feature feature = static_cast<Feature>(i);
-            ASSERT(feature != Feature::InvalidEnum);
-
-            const FeatureEnumAndInfo& featureNameAndInfo = kFeatureNameAndInfoList[i];
-            ASSERT(featureNameAndInfo.feature == feature);
-
-            enabledFeatureNames[index] = featureNameAndInfo.info.name;
-            ++index;
-        }
-        return enabledFeatureNames;
-    }
-
-    void FeaturesSet::InitializeDeviceProperties(WGPUDeviceProperties* properties) const {
-        ASSERT(properties != nullptr);
-
-        for (uint32_t i : IterateBitSet(featuresBitSet)) {
-            properties->*(kFeatureNameAndInfoList[i].memberInWGPUDeviceProperties) = true;
+    // TODO(dawn:550): Remove this fallback logic when Chromium is updated.
+    constexpr std::array<std::pair<const char*, const char*>, 6> kReplacementsForDeprecatedNames = {
+        {
+            {"texture_compression_bc", "texture-compression-bc"},
+            {"depth_clamping", "depth-clamping"},
+            {"pipeline_statistics_query", "pipeline-statistics-query"},
+            {"shader_float16", "shader-float16"},
+            {"timestamp_query", "timestamp-query"},
+            {"multiplanar_formats", "multiplanar-formats"},
+        }};
+    for (const auto& [name, replacement] : kReplacementsForDeprecatedNames) {
+        if (strcmp(featureName, name) == 0) {
+            return FeatureNameToEnum(replacement);
         }
     }
 
-    wgpu::FeatureName FeatureEnumToAPIFeature(Feature feature) {
-        ASSERT(feature != Feature::InvalidEnum);
-        return ToAPIFeature(feature);
+    return Feature::InvalidEnum;
+}
+
+wgpu::FeatureName FeaturesInfo::FeatureNameToAPIEnum(const char* featureName) const {
+    Feature f = FeatureNameToEnum(featureName);
+    if (f != Feature::InvalidEnum) {
+        return ToAPIFeature(f);
     }
-
-    FeaturesInfo::FeaturesInfo() {
-        for (size_t index = 0; index < kFeatureNameAndInfoList.size(); ++index) {
-            const FeatureEnumAndInfo& featureNameAndInfo = kFeatureNameAndInfoList[index];
-            ASSERT(index == static_cast<size_t>(featureNameAndInfo.feature));
-            mFeatureNameToEnumMap[featureNameAndInfo.info.name] = featureNameAndInfo.feature;
-        }
-    }
-
-    const FeatureInfo* FeaturesInfo::GetFeatureInfo(wgpu::FeatureName feature) const {
-        Feature f = FromAPIFeature(feature);
-        if (f == Feature::InvalidEnum) {
-            return nullptr;
-        }
-        return &kFeatureNameAndInfoList[static_cast<size_t>(f)].info;
-    }
-
-    Feature FeaturesInfo::FeatureNameToEnum(const char* featureName) const {
-        ASSERT(featureName);
-
-        const auto& iter = mFeatureNameToEnumMap.find(featureName);
-        if (iter != mFeatureNameToEnumMap.cend()) {
-            return kFeatureNameAndInfoList[static_cast<size_t>(iter->second)].feature;
-        }
-
-        // TODO(dawn:550): Remove this fallback logic when Chromium is updated.
-        constexpr std::array<std::pair<const char*, const char*>, 6>
-            kReplacementsForDeprecatedNames = {{
-                {"texture_compression_bc", "texture-compression-bc"},
-                {"depth_clamping", "depth-clamping"},
-                {"pipeline_statistics_query", "pipeline-statistics-query"},
-                {"shader_float16", "shader-float16"},
-                {"timestamp_query", "timestamp-query"},
-                {"multiplanar_formats", "multiplanar-formats"},
-            }};
-        for (const auto& [name, replacement] : kReplacementsForDeprecatedNames) {
-            if (strcmp(featureName, name) == 0) {
-                return FeatureNameToEnum(replacement);
-            }
-        }
-
-        return Feature::InvalidEnum;
-    }
-
-    wgpu::FeatureName FeaturesInfo::FeatureNameToAPIEnum(const char* featureName) const {
-        Feature f = FeatureNameToEnum(featureName);
-        if (f != Feature::InvalidEnum) {
-            return ToAPIFeature(f);
-        }
-        // Pass something invalid.
-        return static_cast<wgpu::FeatureName>(-1);
-    }
+    // Pass something invalid.
+    return static_cast<wgpu::FeatureName>(-1);
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/Features.h b/src/dawn/native/Features.h
index abd19c1..bb1f015 100644
--- a/src/dawn/native/Features.h
+++ b/src/dawn/native/Features.h
@@ -26,58 +26,58 @@
 
 namespace dawn::native {
 
-    enum class Feature {
-        TextureCompressionBC,
-        TextureCompressionETC2,
-        TextureCompressionASTC,
-        ShaderFloat16,
-        PipelineStatisticsQuery,
-        TimestampQuery,
-        DepthClamping,
-        Depth24UnormStencil8,
-        Depth32FloatStencil8,
+enum class Feature {
+    TextureCompressionBC,
+    TextureCompressionETC2,
+    TextureCompressionASTC,
+    ShaderFloat16,
+    PipelineStatisticsQuery,
+    TimestampQuery,
+    DepthClamping,
+    Depth24UnormStencil8,
+    Depth32FloatStencil8,
 
-        // Dawn-specific
-        DawnInternalUsages,
-        MultiPlanarFormats,
-        DawnNative,
+    // Dawn-specific
+    DawnInternalUsages,
+    MultiPlanarFormats,
+    DawnNative,
 
-        EnumCount,
-        InvalidEnum = EnumCount,
-        FeatureMin = TextureCompressionBC,
-    };
+    EnumCount,
+    InvalidEnum = EnumCount,
+    FeatureMin = TextureCompressionBC,
+};
 
-    // A wrapper of the bitset to store if an feature is enabled or not. This wrapper provides the
-    // convenience to convert the enums of enum class Feature to the indices of a bitset.
-    struct FeaturesSet {
-        std::bitset<static_cast<size_t>(Feature::EnumCount)> featuresBitSet;
+// A wrapper of the bitset to store if an feature is enabled or not. This wrapper provides the
+// convenience to convert the enums of enum class Feature to the indices of a bitset.
+struct FeaturesSet {
+    std::bitset<static_cast<size_t>(Feature::EnumCount)> featuresBitSet;
 
-        void EnableFeature(Feature feature);
-        void EnableFeature(wgpu::FeatureName feature);
-        bool IsEnabled(Feature feature) const;
-        bool IsEnabled(wgpu::FeatureName feature) const;
-        // Returns |count|, the number of features. Writes out all |count| values if |features| is
-        // non-null.
-        size_t EnumerateFeatures(wgpu::FeatureName* features) const;
-        std::vector<const char*> GetEnabledFeatureNames() const;
-        void InitializeDeviceProperties(WGPUDeviceProperties* properties) const;
-    };
+    void EnableFeature(Feature feature);
+    void EnableFeature(wgpu::FeatureName feature);
+    bool IsEnabled(Feature feature) const;
+    bool IsEnabled(wgpu::FeatureName feature) const;
+    // Returns |count|, the number of features. Writes out all |count| values if |features| is
+    // non-null.
+    size_t EnumerateFeatures(wgpu::FeatureName* features) const;
+    std::vector<const char*> GetEnabledFeatureNames() const;
+    void InitializeDeviceProperties(WGPUDeviceProperties* properties) const;
+};
 
-    wgpu::FeatureName FeatureEnumToAPIFeature(Feature feature);
+wgpu::FeatureName FeatureEnumToAPIFeature(Feature feature);
 
-    class FeaturesInfo {
-      public:
-        FeaturesInfo();
+class FeaturesInfo {
+  public:
+    FeaturesInfo();
 
-        // Used to query the details of an feature. Return nullptr if featureName is not a valid
-        // name of an feature supported in Dawn
-        const FeatureInfo* GetFeatureInfo(wgpu::FeatureName feature) const;
-        Feature FeatureNameToEnum(const char* featureName) const;
-        wgpu::FeatureName FeatureNameToAPIEnum(const char* featureName) const;
+    // Used to query the details of an feature. Return nullptr if featureName is not a valid
+    // name of an feature supported in Dawn
+    const FeatureInfo* GetFeatureInfo(wgpu::FeatureName feature) const;
+    Feature FeatureNameToEnum(const char* featureName) const;
+    wgpu::FeatureName FeatureNameToAPIEnum(const char* featureName) const;
 
-      private:
-        std::unordered_map<std::string, Feature> mFeatureNameToEnumMap;
-    };
+  private:
+    std::unordered_map<std::string, Feature> mFeatureNameToEnumMap;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/Format.cpp b/src/dawn/native/Format.cpp
index 1fc6a1a..f1b4cb8 100644
--- a/src/dawn/native/Format.cpp
+++ b/src/dawn/native/Format.cpp
@@ -23,304 +23,302 @@
 
 namespace dawn::native {
 
-    // Format
+// Format
 
-    // TODO(dawn:527): Remove when unused.
-    SampleTypeBit ToSampleTypeBit(wgpu::TextureComponentType type) {
-        switch (type) {
-            case wgpu::TextureComponentType::Float:
-                return SampleTypeBit::Float;
-            case wgpu::TextureComponentType::Sint:
-                return SampleTypeBit::Sint;
-            case wgpu::TextureComponentType::Uint:
-                return SampleTypeBit::Uint;
-            case wgpu::TextureComponentType::DepthComparison:
-                return SampleTypeBit::Depth;
-        }
-        UNREACHABLE();
+// TODO(dawn:527): Remove when unused.
+SampleTypeBit ToSampleTypeBit(wgpu::TextureComponentType type) {
+    switch (type) {
+        case wgpu::TextureComponentType::Float:
+            return SampleTypeBit::Float;
+        case wgpu::TextureComponentType::Sint:
+            return SampleTypeBit::Sint;
+        case wgpu::TextureComponentType::Uint:
+            return SampleTypeBit::Uint;
+        case wgpu::TextureComponentType::DepthComparison:
+            return SampleTypeBit::Depth;
+    }
+    UNREACHABLE();
+}
+
+SampleTypeBit SampleTypeToSampleTypeBit(wgpu::TextureSampleType sampleType) {
+    switch (sampleType) {
+        case wgpu::TextureSampleType::Float:
+        case wgpu::TextureSampleType::UnfilterableFloat:
+        case wgpu::TextureSampleType::Sint:
+        case wgpu::TextureSampleType::Uint:
+        case wgpu::TextureSampleType::Depth:
+        case wgpu::TextureSampleType::Undefined:
+            // When the compiler complains that you need to add a case statement here, please
+            // also add a corresponding static assert below!
+            break;
     }
 
-    SampleTypeBit SampleTypeToSampleTypeBit(wgpu::TextureSampleType sampleType) {
-        switch (sampleType) {
-            case wgpu::TextureSampleType::Float:
-            case wgpu::TextureSampleType::UnfilterableFloat:
-            case wgpu::TextureSampleType::Sint:
-            case wgpu::TextureSampleType::Uint:
-            case wgpu::TextureSampleType::Depth:
-            case wgpu::TextureSampleType::Undefined:
-                // When the compiler complains that you need to add a case statement here, please
-                // also add a corresponding static assert below!
-                break;
-        }
-
-        static_assert(static_cast<uint32_t>(wgpu::TextureSampleType::Undefined) == 0);
-        if (sampleType == wgpu::TextureSampleType::Undefined) {
-            return SampleTypeBit::None;
-        }
-
-        // Check that SampleTypeBit bits are in the same position / order as the respective
-        // wgpu::TextureSampleType value.
-        static_assert(SampleTypeBit::Float ==
-                      static_cast<SampleTypeBit>(
-                          1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Float) - 1)));
-        static_assert(
-            SampleTypeBit::UnfilterableFloat ==
-            static_cast<SampleTypeBit>(
-                1 << (static_cast<uint32_t>(wgpu::TextureSampleType::UnfilterableFloat) - 1)));
-        static_assert(SampleTypeBit::Uint ==
-                      static_cast<SampleTypeBit>(
-                          1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Uint) - 1)));
-        static_assert(SampleTypeBit::Sint ==
-                      static_cast<SampleTypeBit>(
-                          1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Sint) - 1)));
-        static_assert(SampleTypeBit::Depth ==
-                      static_cast<SampleTypeBit>(
-                          1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Depth) - 1)));
-        return static_cast<SampleTypeBit>(1 << (static_cast<uint32_t>(sampleType) - 1));
+    static_assert(static_cast<uint32_t>(wgpu::TextureSampleType::Undefined) == 0);
+    if (sampleType == wgpu::TextureSampleType::Undefined) {
+        return SampleTypeBit::None;
     }
 
-    bool Format::IsColor() const {
-        return aspects == Aspect::Color;
-    }
+    // Check that SampleTypeBit bits are in the same position / order as the respective
+    // wgpu::TextureSampleType value.
+    static_assert(SampleTypeBit::Float ==
+                  static_cast<SampleTypeBit>(
+                      1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Float) - 1)));
+    static_assert(
+        SampleTypeBit::UnfilterableFloat ==
+        static_cast<SampleTypeBit>(
+            1 << (static_cast<uint32_t>(wgpu::TextureSampleType::UnfilterableFloat) - 1)));
+    static_assert(SampleTypeBit::Uint ==
+                  static_cast<SampleTypeBit>(
+                      1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Uint) - 1)));
+    static_assert(SampleTypeBit::Sint ==
+                  static_cast<SampleTypeBit>(
+                      1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Sint) - 1)));
+    static_assert(SampleTypeBit::Depth ==
+                  static_cast<SampleTypeBit>(
+                      1 << (static_cast<uint32_t>(wgpu::TextureSampleType::Depth) - 1)));
+    return static_cast<SampleTypeBit>(1 << (static_cast<uint32_t>(sampleType) - 1));
+}
 
-    bool Format::HasDepth() const {
-        return (aspects & Aspect::Depth) != 0;
-    }
+bool Format::IsColor() const {
+    return aspects == Aspect::Color;
+}
 
-    bool Format::HasStencil() const {
-        return (aspects & Aspect::Stencil) != 0;
-    }
+bool Format::HasDepth() const {
+    return (aspects & Aspect::Depth) != 0;
+}
 
-    bool Format::HasDepthOrStencil() const {
-        return (aspects & (Aspect::Depth | Aspect::Stencil)) != 0;
-    }
+bool Format::HasStencil() const {
+    return (aspects & Aspect::Stencil) != 0;
+}
 
-    bool Format::IsMultiPlanar() const {
-        return (aspects & (Aspect::Plane0 | Aspect::Plane1)) != 0;
-    }
+bool Format::HasDepthOrStencil() const {
+    return (aspects & (Aspect::Depth | Aspect::Stencil)) != 0;
+}
 
-    bool Format::CopyCompatibleWith(const Format& format) const {
-        // TODO(crbug.com/dawn/1332): Add a Format compatibility matrix.
-        return baseFormat == format.baseFormat;
-    }
+bool Format::IsMultiPlanar() const {
+    return (aspects & (Aspect::Plane0 | Aspect::Plane1)) != 0;
+}
 
-    bool Format::ViewCompatibleWith(const Format& format) const {
-        // TODO(crbug.com/dawn/1332): Add a Format compatibility matrix.
-        return baseFormat == format.baseFormat;
-    }
+bool Format::CopyCompatibleWith(const Format& format) const {
+    // TODO(crbug.com/dawn/1332): Add a Format compatibility matrix.
+    return baseFormat == format.baseFormat;
+}
 
-    const AspectInfo& Format::GetAspectInfo(wgpu::TextureAspect aspect) const {
-        return GetAspectInfo(SelectFormatAspects(*this, aspect));
-    }
+bool Format::ViewCompatibleWith(const Format& format) const {
+    // TODO(crbug.com/dawn/1332): Add a Format compatibility matrix.
+    return baseFormat == format.baseFormat;
+}
 
-    const AspectInfo& Format::GetAspectInfo(Aspect aspect) const {
-        ASSERT(HasOneBit(aspect));
-        ASSERT(aspects & aspect);
-        const size_t aspectIndex = GetAspectIndex(aspect);
-        ASSERT(aspectIndex < GetAspectCount(aspects));
-        return aspectInfo[aspectIndex];
-    }
+const AspectInfo& Format::GetAspectInfo(wgpu::TextureAspect aspect) const {
+    return GetAspectInfo(SelectFormatAspects(*this, aspect));
+}
 
-    FormatIndex Format::GetIndex() const {
-        return ComputeFormatIndex(format);
-    }
+const AspectInfo& Format::GetAspectInfo(Aspect aspect) const {
+    ASSERT(HasOneBit(aspect));
+    ASSERT(aspects & aspect);
+    const size_t aspectIndex = GetAspectIndex(aspect);
+    ASSERT(aspectIndex < GetAspectCount(aspects));
+    return aspectInfo[aspectIndex];
+}
 
-    // FormatSet implementation
+FormatIndex Format::GetIndex() const {
+    return ComputeFormatIndex(format);
+}
 
-    bool FormatSet::operator[](const Format& format) const {
-        return Base::operator[](format.GetIndex());
-    }
+// FormatSet implementation
 
-    typename std::bitset<kKnownFormatCount>::reference FormatSet::operator[](const Format& format) {
-        return Base::operator[](format.GetIndex());
-    }
+bool FormatSet::operator[](const Format& format) const {
+    return Base::operator[](format.GetIndex());
+}
 
-    // Implementation details of the format table of the DeviceBase
+typename std::bitset<kKnownFormatCount>::reference FormatSet::operator[](const Format& format) {
+    return Base::operator[](format.GetIndex());
+}
 
-    // For the enum for formats are packed but this might change when we have a broader feature
-    // mechanism for webgpu.h. Formats start at 1 because 0 is the undefined format.
-    FormatIndex ComputeFormatIndex(wgpu::TextureFormat format) {
-        // This takes advantage of overflows to make the index of TextureFormat::Undefined outside
-        // of the range of the FormatTable.
-        static_assert(static_cast<uint32_t>(wgpu::TextureFormat::Undefined) - 1 >
-                      kKnownFormatCount);
-        return static_cast<FormatIndex>(static_cast<uint32_t>(format) - 1);
-    }
+// Implementation details of the format table of the DeviceBase
 
-    FormatTable BuildFormatTable(const DeviceBase* device) {
-        FormatTable table;
-        FormatSet formatsSet;
+// For the enum for formats are packed but this might change when we have a broader feature
+// mechanism for webgpu.h. Formats start at 1 because 0 is the undefined format.
+FormatIndex ComputeFormatIndex(wgpu::TextureFormat format) {
+    // This takes advantage of overflows to make the index of TextureFormat::Undefined outside
+    // of the range of the FormatTable.
+    static_assert(static_cast<uint32_t>(wgpu::TextureFormat::Undefined) - 1 > kKnownFormatCount);
+    return static_cast<FormatIndex>(static_cast<uint32_t>(format) - 1);
+}
 
-        static constexpr SampleTypeBit kAnyFloat =
-            SampleTypeBit::Float | SampleTypeBit::UnfilterableFloat;
+FormatTable BuildFormatTable(const DeviceBase* device) {
+    FormatTable table;
+    FormatSet formatsSet;
 
-        auto AddFormat = [&table, &formatsSet](Format format) {
-            FormatIndex index = ComputeFormatIndex(format.format);
-            ASSERT(index < table.size());
+    static constexpr SampleTypeBit kAnyFloat =
+        SampleTypeBit::Float | SampleTypeBit::UnfilterableFloat;
 
-            // This checks that each format is set at most once, the first part of checking that all
-            // formats are set exactly once.
-            ASSERT(!formatsSet[index]);
+    auto AddFormat = [&table, &formatsSet](Format format) {
+        FormatIndex index = ComputeFormatIndex(format.format);
+        ASSERT(index < table.size());
 
-            // Vulkan describes bytesPerRow in units of texels. If there's any format for which this
-            // ASSERT isn't true, then additional validation on bytesPerRow must be added.
-            const bool hasMultipleAspects = !HasOneBit(format.aspects);
-            ASSERT(hasMultipleAspects ||
-                   (kTextureBytesPerRowAlignment % format.aspectInfo[0].block.byteSize) == 0);
+        // This checks that each format is set at most once, the first part of checking that all
+        // formats are set exactly once.
+        ASSERT(!formatsSet[index]);
 
-            table[index] = format;
-            formatsSet.set(index);
-        };
+        // Vulkan describes bytesPerRow in units of texels. If there's any format for which this
+        // ASSERT isn't true, then additional validation on bytesPerRow must be added.
+        const bool hasMultipleAspects = !HasOneBit(format.aspects);
+        ASSERT(hasMultipleAspects ||
+               (kTextureBytesPerRowAlignment % format.aspectInfo[0].block.byteSize) == 0);
 
-        auto AddColorFormat =
-            [&AddFormat](wgpu::TextureFormat format, bool renderable, bool supportsStorageUsage,
-                         bool supportsMultisample, bool supportsResolveTarget, uint32_t byteSize,
-                         SampleTypeBit sampleTypes, uint8_t componentCount,
-                         wgpu::TextureFormat baseFormat = wgpu::TextureFormat::Undefined) {
-                Format internalFormat;
-                internalFormat.format = format;
-                internalFormat.isRenderable = renderable;
-                internalFormat.isCompressed = false;
-                internalFormat.isSupported = true;
-                internalFormat.supportsStorageUsage = supportsStorageUsage;
+        table[index] = format;
+        formatsSet.set(index);
+    };
 
-                if (supportsMultisample) {
-                    ASSERT(renderable);
-                }
-                internalFormat.supportsMultisample = supportsMultisample;
-                internalFormat.supportsResolveTarget = supportsResolveTarget;
-                internalFormat.aspects = Aspect::Color;
-                internalFormat.componentCount = componentCount;
-
-                // Default baseFormat of each color formats should be themselves.
-                if (baseFormat == wgpu::TextureFormat::Undefined) {
-                    internalFormat.baseFormat = format;
-                } else {
-                    internalFormat.baseFormat = baseFormat;
-                }
-
-                AspectInfo* firstAspect = internalFormat.aspectInfo.data();
-                firstAspect->block.byteSize = byteSize;
-                firstAspect->block.width = 1;
-                firstAspect->block.height = 1;
-                if (HasOneBit(sampleTypes)) {
-                    switch (sampleTypes) {
-                        case SampleTypeBit::Float:
-                        case SampleTypeBit::UnfilterableFloat:
-                            firstAspect->baseType = wgpu::TextureComponentType::Float;
-                            break;
-                        case SampleTypeBit::Sint:
-                            firstAspect->baseType = wgpu::TextureComponentType::Sint;
-                            break;
-                        case SampleTypeBit::Uint:
-                            firstAspect->baseType = wgpu::TextureComponentType::Uint;
-                            break;
-                        default:
-                            UNREACHABLE();
-                    }
-                } else {
-                    ASSERT((sampleTypes & SampleTypeBit::Float) != 0);
-                    firstAspect->baseType = wgpu::TextureComponentType::Float;
-                }
-                firstAspect->supportedSampleTypes = sampleTypes;
-                firstAspect->format = format;
-                AddFormat(internalFormat);
-            };
-
-        auto AddDepthFormat = [&AddFormat](wgpu::TextureFormat format, uint32_t byteSize,
-                                           bool isSupported) {
+    auto AddColorFormat =
+        [&AddFormat](wgpu::TextureFormat format, bool renderable, bool supportsStorageUsage,
+                     bool supportsMultisample, bool supportsResolveTarget, uint32_t byteSize,
+                     SampleTypeBit sampleTypes, uint8_t componentCount,
+                     wgpu::TextureFormat baseFormat = wgpu::TextureFormat::Undefined) {
             Format internalFormat;
             internalFormat.format = format;
-            internalFormat.baseFormat = format;
-            internalFormat.isRenderable = true;
+            internalFormat.isRenderable = renderable;
             internalFormat.isCompressed = false;
-            internalFormat.isSupported = isSupported;
-            internalFormat.supportsStorageUsage = false;
-            internalFormat.supportsMultisample = true;
-            internalFormat.supportsResolveTarget = false;
-            internalFormat.aspects = Aspect::Depth;
-            internalFormat.componentCount = 1;
+            internalFormat.isSupported = true;
+            internalFormat.supportsStorageUsage = supportsStorageUsage;
+
+            if (supportsMultisample) {
+                ASSERT(renderable);
+            }
+            internalFormat.supportsMultisample = supportsMultisample;
+            internalFormat.supportsResolveTarget = supportsResolveTarget;
+            internalFormat.aspects = Aspect::Color;
+            internalFormat.componentCount = componentCount;
+
+            // Default baseFormat of each color formats should be themselves.
+            if (baseFormat == wgpu::TextureFormat::Undefined) {
+                internalFormat.baseFormat = format;
+            } else {
+                internalFormat.baseFormat = baseFormat;
+            }
 
             AspectInfo* firstAspect = internalFormat.aspectInfo.data();
             firstAspect->block.byteSize = byteSize;
             firstAspect->block.width = 1;
             firstAspect->block.height = 1;
-            firstAspect->baseType = wgpu::TextureComponentType::Float;
-            firstAspect->supportedSampleTypes = SampleTypeBit::Depth;
+            if (HasOneBit(sampleTypes)) {
+                switch (sampleTypes) {
+                    case SampleTypeBit::Float:
+                    case SampleTypeBit::UnfilterableFloat:
+                        firstAspect->baseType = wgpu::TextureComponentType::Float;
+                        break;
+                    case SampleTypeBit::Sint:
+                        firstAspect->baseType = wgpu::TextureComponentType::Sint;
+                        break;
+                    case SampleTypeBit::Uint:
+                        firstAspect->baseType = wgpu::TextureComponentType::Uint;
+                        break;
+                    default:
+                        UNREACHABLE();
+                }
+            } else {
+                ASSERT((sampleTypes & SampleTypeBit::Float) != 0);
+                firstAspect->baseType = wgpu::TextureComponentType::Float;
+            }
+            firstAspect->supportedSampleTypes = sampleTypes;
             firstAspect->format = format;
             AddFormat(internalFormat);
         };
 
-        auto AddStencilFormat = [&AddFormat](wgpu::TextureFormat format, bool isSupported) {
+    auto AddDepthFormat = [&AddFormat](wgpu::TextureFormat format, uint32_t byteSize,
+                                       bool isSupported) {
+        Format internalFormat;
+        internalFormat.format = format;
+        internalFormat.baseFormat = format;
+        internalFormat.isRenderable = true;
+        internalFormat.isCompressed = false;
+        internalFormat.isSupported = isSupported;
+        internalFormat.supportsStorageUsage = false;
+        internalFormat.supportsMultisample = true;
+        internalFormat.supportsResolveTarget = false;
+        internalFormat.aspects = Aspect::Depth;
+        internalFormat.componentCount = 1;
+
+        AspectInfo* firstAspect = internalFormat.aspectInfo.data();
+        firstAspect->block.byteSize = byteSize;
+        firstAspect->block.width = 1;
+        firstAspect->block.height = 1;
+        firstAspect->baseType = wgpu::TextureComponentType::Float;
+        firstAspect->supportedSampleTypes = SampleTypeBit::Depth;
+        firstAspect->format = format;
+        AddFormat(internalFormat);
+    };
+
+    auto AddStencilFormat = [&AddFormat](wgpu::TextureFormat format, bool isSupported) {
+        Format internalFormat;
+        internalFormat.format = format;
+        internalFormat.baseFormat = format;
+        internalFormat.isRenderable = true;
+        internalFormat.isCompressed = false;
+        internalFormat.isSupported = isSupported;
+        internalFormat.supportsStorageUsage = false;
+        internalFormat.supportsMultisample = true;
+        internalFormat.supportsResolveTarget = false;
+        internalFormat.aspects = Aspect::Stencil;
+        internalFormat.componentCount = 1;
+
+        // Duplicate the data for the stencil aspect in both the first and second aspect info.
+        //  - aspectInfo[0] is used by AddMultiAspectFormat to copy the info for the whole
+        //    stencil8 aspect of depth-stencil8 formats.
+        //  - aspectInfo[1] is the actual info used in the rest of Dawn since
+        //    GetAspectIndex(Aspect::Stencil) is 1.
+        ASSERT(GetAspectIndex(Aspect::Stencil) == 1);
+
+        internalFormat.aspectInfo[0].block.byteSize = 1;
+        internalFormat.aspectInfo[0].block.width = 1;
+        internalFormat.aspectInfo[0].block.height = 1;
+        internalFormat.aspectInfo[0].baseType = wgpu::TextureComponentType::Uint;
+        internalFormat.aspectInfo[0].supportedSampleTypes = SampleTypeBit::Uint;
+        internalFormat.aspectInfo[0].format = format;
+
+        internalFormat.aspectInfo[1] = internalFormat.aspectInfo[0];
+
+        AddFormat(internalFormat);
+    };
+
+    auto AddCompressedFormat =
+        [&AddFormat](wgpu::TextureFormat format, uint32_t byteSize, uint32_t width, uint32_t height,
+                     bool isSupported, uint8_t componentCount,
+                     wgpu::TextureFormat baseFormat = wgpu::TextureFormat::Undefined) {
             Format internalFormat;
             internalFormat.format = format;
-            internalFormat.baseFormat = format;
-            internalFormat.isRenderable = true;
-            internalFormat.isCompressed = false;
+            internalFormat.isRenderable = false;
+            internalFormat.isCompressed = true;
             internalFormat.isSupported = isSupported;
             internalFormat.supportsStorageUsage = false;
-            internalFormat.supportsMultisample = true;
+            internalFormat.supportsMultisample = false;
             internalFormat.supportsResolveTarget = false;
-            internalFormat.aspects = Aspect::Stencil;
-            internalFormat.componentCount = 1;
+            internalFormat.aspects = Aspect::Color;
+            internalFormat.componentCount = componentCount;
 
-            // Duplicate the data for the stencil aspect in both the first and second aspect info.
-            //  - aspectInfo[0] is used by AddMultiAspectFormat to copy the info for the whole
-            //    stencil8 aspect of depth-stencil8 formats.
-            //  - aspectInfo[1] is the actual info used in the rest of Dawn since
-            //    GetAspectIndex(Aspect::Stencil) is 1.
-            ASSERT(GetAspectIndex(Aspect::Stencil) == 1);
+            // Default baseFormat of each compressed formats should be themselves.
+            if (baseFormat == wgpu::TextureFormat::Undefined) {
+                internalFormat.baseFormat = format;
+            } else {
+                internalFormat.baseFormat = baseFormat;
+            }
 
-            internalFormat.aspectInfo[0].block.byteSize = 1;
-            internalFormat.aspectInfo[0].block.width = 1;
-            internalFormat.aspectInfo[0].block.height = 1;
-            internalFormat.aspectInfo[0].baseType = wgpu::TextureComponentType::Uint;
-            internalFormat.aspectInfo[0].supportedSampleTypes = SampleTypeBit::Uint;
-            internalFormat.aspectInfo[0].format = format;
-
-            internalFormat.aspectInfo[1] = internalFormat.aspectInfo[0];
-
+            AspectInfo* firstAspect = internalFormat.aspectInfo.data();
+            firstAspect->block.byteSize = byteSize;
+            firstAspect->block.width = width;
+            firstAspect->block.height = height;
+            firstAspect->baseType = wgpu::TextureComponentType::Float;
+            firstAspect->supportedSampleTypes = kAnyFloat;
+            firstAspect->format = format;
             AddFormat(internalFormat);
         };
 
-        auto AddCompressedFormat =
-            [&AddFormat](wgpu::TextureFormat format, uint32_t byteSize, uint32_t width,
-                         uint32_t height, bool isSupported, uint8_t componentCount,
-                         wgpu::TextureFormat baseFormat = wgpu::TextureFormat::Undefined) {
-                Format internalFormat;
-                internalFormat.format = format;
-                internalFormat.isRenderable = false;
-                internalFormat.isCompressed = true;
-                internalFormat.isSupported = isSupported;
-                internalFormat.supportsStorageUsage = false;
-                internalFormat.supportsMultisample = false;
-                internalFormat.supportsResolveTarget = false;
-                internalFormat.aspects = Aspect::Color;
-                internalFormat.componentCount = componentCount;
-
-                // Default baseFormat of each compressed formats should be themselves.
-                if (baseFormat == wgpu::TextureFormat::Undefined) {
-                    internalFormat.baseFormat = format;
-                } else {
-                    internalFormat.baseFormat = baseFormat;
-                }
-
-                AspectInfo* firstAspect = internalFormat.aspectInfo.data();
-                firstAspect->block.byteSize = byteSize;
-                firstAspect->block.width = width;
-                firstAspect->block.height = height;
-                firstAspect->baseType = wgpu::TextureComponentType::Float;
-                firstAspect->supportedSampleTypes = kAnyFloat;
-                firstAspect->format = format;
-                AddFormat(internalFormat);
-            };
-
-        auto AddMultiAspectFormat = [&AddFormat, &table](wgpu::TextureFormat format, Aspect aspects,
-                                                         wgpu::TextureFormat firstFormat,
-                                                         wgpu::TextureFormat secondFormat,
-                                                         bool isRenderable, bool isSupported,
-                                                         bool supportsMultisample,
-                                                         uint8_t componentCount) {
+    auto AddMultiAspectFormat =
+        [&AddFormat, &table](wgpu::TextureFormat format, Aspect aspects,
+                             wgpu::TextureFormat firstFormat, wgpu::TextureFormat secondFormat,
+                             bool isRenderable, bool isSupported, bool supportsMultisample,
+                             uint8_t componentCount) {
             Format internalFormat;
             internalFormat.format = format;
             internalFormat.baseFormat = format;
@@ -348,7 +346,7 @@
             AddFormat(internalFormat);
         };
 
-        // clang-format off
+    // clang-format off
         // 1 byte color formats
         AddColorFormat(wgpu::TextureFormat::R8Unorm, true, false, true, true, 1, kAnyFloat, 1);
         AddColorFormat(wgpu::TextureFormat::R8Snorm, false, false, false, false, 1, kAnyFloat, 1);
@@ -479,13 +477,13 @@
         AddMultiAspectFormat(wgpu::TextureFormat::R8BG8Biplanar420Unorm, Aspect::Plane0 | Aspect::Plane1,
             wgpu::TextureFormat::R8Unorm, wgpu::TextureFormat::RG8Unorm, false, isMultiPlanarFormatSupported, false, 3);
 
-        // clang-format on
+    // clang-format on
 
-        // This checks that each format is set at least once, the second part of checking that all
-        // formats are checked exactly once.
-        ASSERT(formatsSet.all());
+    // This checks that each format is set at least once, the second part of checking that all
+    // formats are checked exactly once.
+    ASSERT(formatsSet.all());
 
-        return table;
-    }
+    return table;
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/Format.h b/src/dawn/native/Format.h
index 40a770a..b48e998 100644
--- a/src/dawn/native/Format.h
+++ b/src/dawn/native/Format.h
@@ -44,129 +44,129 @@
 
 namespace dawn::native {
 
-    enum class Aspect : uint8_t;
-    class DeviceBase;
+enum class Aspect : uint8_t;
+class DeviceBase;
 
-    // This mirrors wgpu::TextureSampleType as a bitmask instead.
-    enum class SampleTypeBit : uint8_t {
-        None = 0x0,
-        Float = 0x1,
-        UnfilterableFloat = 0x2,
-        Depth = 0x4,
-        Sint = 0x8,
-        Uint = 0x10,
-    };
+// This mirrors wgpu::TextureSampleType as a bitmask instead.
+enum class SampleTypeBit : uint8_t {
+    None = 0x0,
+    Float = 0x1,
+    UnfilterableFloat = 0x2,
+    Depth = 0x4,
+    Sint = 0x8,
+    Uint = 0x10,
+};
 
-    // Converts an wgpu::TextureComponentType to its bitmask representation.
-    SampleTypeBit ToSampleTypeBit(wgpu::TextureComponentType type);
-    // Converts an wgpu::TextureSampleType to its bitmask representation.
-    SampleTypeBit SampleTypeToSampleTypeBit(wgpu::TextureSampleType sampleType);
+// Converts an wgpu::TextureComponentType to its bitmask representation.
+SampleTypeBit ToSampleTypeBit(wgpu::TextureComponentType type);
+// Converts an wgpu::TextureSampleType to its bitmask representation.
+SampleTypeBit SampleTypeToSampleTypeBit(wgpu::TextureSampleType sampleType);
 
-    struct TexelBlockInfo {
-        uint32_t byteSize;
-        uint32_t width;
-        uint32_t height;
-    };
+struct TexelBlockInfo {
+    uint32_t byteSize;
+    uint32_t width;
+    uint32_t height;
+};
 
-    struct AspectInfo {
-        TexelBlockInfo block;
-        // TODO(crbug.com/dawn/367): Replace TextureComponentType with TextureSampleType, or make it
-        // an internal Dawn enum.
-        wgpu::TextureComponentType baseType;
-        SampleTypeBit supportedSampleTypes;
-        wgpu::TextureFormat format = wgpu::TextureFormat::Undefined;
-    };
+struct AspectInfo {
+    TexelBlockInfo block;
+    // TODO(crbug.com/dawn/367): Replace TextureComponentType with TextureSampleType, or make it
+    // an internal Dawn enum.
+    wgpu::TextureComponentType baseType;
+    SampleTypeBit supportedSampleTypes;
+    wgpu::TextureFormat format = wgpu::TextureFormat::Undefined;
+};
 
-    // The number of formats Dawn knows about. Asserts in BuildFormatTable ensure that this is the
-    // exact number of known format.
-    static constexpr uint32_t kKnownFormatCount = 96;
+// The number of formats Dawn knows about. Asserts in BuildFormatTable ensure that this is the
+// exact number of known format.
+static constexpr uint32_t kKnownFormatCount = 96;
 
-    using FormatIndex = TypedInteger<struct FormatIndexT, uint32_t>;
+using FormatIndex = TypedInteger<struct FormatIndexT, uint32_t>;
 
-    struct Format;
-    using FormatTable = ityp::array<FormatIndex, Format, kKnownFormatCount>;
+struct Format;
+using FormatTable = ityp::array<FormatIndex, Format, kKnownFormatCount>;
 
-    // A wgpu::TextureFormat along with all the information about it necessary for validation.
-    struct Format {
-        wgpu::TextureFormat format;
+// A wgpu::TextureFormat along with all the information about it necessary for validation.
+struct Format {
+    wgpu::TextureFormat format;
 
-        // TODO(crbug.com/dawn/1332): These members could be stored in a Format capability matrix.
-        bool isRenderable;
-        bool isCompressed;
-        // A format can be known but not supported because it is part of a disabled extension.
-        bool isSupported;
-        bool supportsStorageUsage;
-        bool supportsMultisample;
-        bool supportsResolveTarget;
-        Aspect aspects;
-        // Only used for renderable color formats, number of color channels.
-        uint8_t componentCount;
+    // TODO(crbug.com/dawn/1332): These members could be stored in a Format capability matrix.
+    bool isRenderable;
+    bool isCompressed;
+    // A format can be known but not supported because it is part of a disabled extension.
+    bool isSupported;
+    bool supportsStorageUsage;
+    bool supportsMultisample;
+    bool supportsResolveTarget;
+    Aspect aspects;
+    // Only used for renderable color formats, number of color channels.
+    uint8_t componentCount;
 
-        bool IsColor() const;
-        bool HasDepth() const;
-        bool HasStencil() const;
-        bool HasDepthOrStencil() const;
+    bool IsColor() const;
+    bool HasDepth() const;
+    bool HasStencil() const;
+    bool HasDepthOrStencil() const;
 
-        // IsMultiPlanar() returns true if the format allows selecting a plane index. This is only
-        // allowed by multi-planar formats (ex. NV12).
-        bool IsMultiPlanar() const;
+    // IsMultiPlanar() returns true if the format allows selecting a plane index. This is only
+    // allowed by multi-planar formats (ex. NV12).
+    bool IsMultiPlanar() const;
 
-        const AspectInfo& GetAspectInfo(wgpu::TextureAspect aspect) const;
-        const AspectInfo& GetAspectInfo(Aspect aspect) const;
+    const AspectInfo& GetAspectInfo(wgpu::TextureAspect aspect) const;
+    const AspectInfo& GetAspectInfo(Aspect aspect) const;
 
-        // The index of the format in the list of all known formats: a unique number for each format
-        // in [0, kKnownFormatCount)
-        FormatIndex GetIndex() const;
+    // The index of the format in the list of all known formats: a unique number for each format
+    // in [0, kKnownFormatCount)
+    FormatIndex GetIndex() const;
 
-        // baseFormat represents the memory layout of the format.
-        // If two formats has the same baseFormat, they could copy to and be viewed as the other
-        // format. Currently two formats have the same baseFormat if they differ only in sRGB-ness.
-        wgpu::TextureFormat baseFormat;
+    // baseFormat represents the memory layout of the format.
+    // If two formats has the same baseFormat, they could copy to and be viewed as the other
+    // format. Currently two formats have the same baseFormat if they differ only in sRGB-ness.
+    wgpu::TextureFormat baseFormat;
 
-        // Returns true if the formats are copy compatible.
-        // Currently means they differ only in sRGB-ness.
-        bool CopyCompatibleWith(const Format& format) const;
+    // Returns true if the formats are copy compatible.
+    // Currently means they differ only in sRGB-ness.
+    bool CopyCompatibleWith(const Format& format) const;
 
-        // Returns true if the formats are texture view format compatible.
-        // Currently means they differ only in sRGB-ness.
-        bool ViewCompatibleWith(const Format& format) const;
+    // Returns true if the formats are texture view format compatible.
+    // Currently means they differ only in sRGB-ness.
+    bool ViewCompatibleWith(const Format& format) const;
 
-      private:
-        // Used to store the aspectInfo for one or more planes. For single plane "color" formats,
-        // only the first aspect info or aspectInfo[0] is valid. For depth-stencil, the first aspect
-        // info is depth and the second aspect info is stencil. For multi-planar formats,
-        // aspectInfo[i] is the ith plane.
-        std::array<AspectInfo, kMaxPlanesPerFormat> aspectInfo;
+  private:
+    // Used to store the aspectInfo for one or more planes. For single plane "color" formats,
+    // only the first aspect info or aspectInfo[0] is valid. For depth-stencil, the first aspect
+    // info is depth and the second aspect info is stencil. For multi-planar formats,
+    // aspectInfo[i] is the ith plane.
+    std::array<AspectInfo, kMaxPlanesPerFormat> aspectInfo;
 
-        friend FormatTable BuildFormatTable(const DeviceBase* device);
-    };
+    friend FormatTable BuildFormatTable(const DeviceBase* device);
+};
 
-    class FormatSet : public ityp::bitset<FormatIndex, kKnownFormatCount> {
-        using Base = ityp::bitset<FormatIndex, kKnownFormatCount>;
+class FormatSet : public ityp::bitset<FormatIndex, kKnownFormatCount> {
+    using Base = ityp::bitset<FormatIndex, kKnownFormatCount>;
 
-      public:
-        using Base::Base;
-        using Base::operator[];
+  public:
+    using Base::Base;
+    using Base::operator[];
 
-        bool operator[](const Format& format) const;
-        typename Base::reference operator[](const Format& format);
-    };
+    bool operator[](const Format& format) const;
+    typename Base::reference operator[](const Format& format);
+};
 
-    // Implementation details of the format table in the device.
+// Implementation details of the format table in the device.
 
-    // Returns the index of a format in the FormatTable.
-    FormatIndex ComputeFormatIndex(wgpu::TextureFormat format);
-    // Builds the format table with the extensions enabled on the device.
-    FormatTable BuildFormatTable(const DeviceBase* device);
+// Returns the index of a format in the FormatTable.
+FormatIndex ComputeFormatIndex(wgpu::TextureFormat format);
+// Builds the format table with the extensions enabled on the device.
+FormatTable BuildFormatTable(const DeviceBase* device);
 
 }  // namespace dawn::native
 
 namespace dawn {
 
-    template <>
-    struct IsDawnBitmask<dawn::native::SampleTypeBit> {
-        static constexpr bool enable = true;
-    };
+template <>
+struct IsDawnBitmask<dawn::native::SampleTypeBit> {
+    static constexpr bool enable = true;
+};
 
 }  // namespace dawn
 
diff --git a/src/dawn/native/Forward.h b/src/dawn/native/Forward.h
index 80125a4..34e6b22 100644
--- a/src/dawn/native/Forward.h
+++ b/src/dawn/native/Forward.h
@@ -22,49 +22,49 @@
 
 namespace dawn::native {
 
-    enum class ObjectType : uint32_t;
+enum class ObjectType : uint32_t;
 
-    class AdapterBase;
-    class BindGroupBase;
-    class BindGroupLayoutBase;
-    class BufferBase;
-    class ComputePipelineBase;
-    class CommandBufferBase;
-    class CommandEncoder;
-    class ComputePassEncoder;
-    class ExternalTextureBase;
-    class InstanceBase;
-    class PipelineBase;
-    class PipelineLayoutBase;
-    class QuerySetBase;
-    class QueueBase;
-    class RenderBundleBase;
-    class RenderBundleEncoder;
-    class RenderPassEncoder;
-    class RenderPipelineBase;
-    class ResourceHeapBase;
-    class SamplerBase;
-    class Surface;
-    class ShaderModuleBase;
-    class StagingBufferBase;
-    class SwapChainBase;
-    class NewSwapChainBase;
-    class TextureBase;
-    class TextureViewBase;
+class AdapterBase;
+class BindGroupBase;
+class BindGroupLayoutBase;
+class BufferBase;
+class ComputePipelineBase;
+class CommandBufferBase;
+class CommandEncoder;
+class ComputePassEncoder;
+class ExternalTextureBase;
+class InstanceBase;
+class PipelineBase;
+class PipelineLayoutBase;
+class QuerySetBase;
+class QueueBase;
+class RenderBundleBase;
+class RenderBundleEncoder;
+class RenderPassEncoder;
+class RenderPipelineBase;
+class ResourceHeapBase;
+class SamplerBase;
+class Surface;
+class ShaderModuleBase;
+class StagingBufferBase;
+class SwapChainBase;
+class NewSwapChainBase;
+class TextureBase;
+class TextureViewBase;
 
-    class DeviceBase;
+class DeviceBase;
 
-    template <typename T>
-    class PerStage;
+template <typename T>
+class PerStage;
 
-    struct Format;
+struct Format;
 
-    // Aliases for frontend-only types.
-    using CommandEncoderBase = CommandEncoder;
-    using ComputePassEncoderBase = ComputePassEncoder;
-    using RenderBundleEncoderBase = RenderBundleEncoder;
-    using RenderPassEncoderBase = RenderPassEncoder;
-    using SurfaceBase = Surface;
+// Aliases for frontend-only types.
+using CommandEncoderBase = CommandEncoder;
+using ComputePassEncoderBase = ComputePassEncoder;
+using RenderBundleEncoderBase = RenderBundleEncoder;
+using RenderPassEncoderBase = RenderPassEncoder;
+using SurfaceBase = Surface;
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/IndirectDrawMetadata.cpp b/src/dawn/native/IndirectDrawMetadata.cpp
index b022e65..466b677 100644
--- a/src/dawn/native/IndirectDrawMetadata.cpp
+++ b/src/dawn/native/IndirectDrawMetadata.cpp
@@ -25,205 +25,201 @@
 
 namespace dawn::native {
 
-    uint64_t ComputeMaxIndirectValidationBatchOffsetRange(const CombinedLimits& limits) {
-        return limits.v1.maxStorageBufferBindingSize - limits.v1.minStorageBufferOffsetAlignment -
-               kDrawIndexedIndirectSize;
-    }
+uint64_t ComputeMaxIndirectValidationBatchOffsetRange(const CombinedLimits& limits) {
+    return limits.v1.maxStorageBufferBindingSize - limits.v1.minStorageBufferOffsetAlignment -
+           kDrawIndexedIndirectSize;
+}
 
-    IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::IndexedIndirectBufferValidationInfo(
-        BufferBase* indirectBuffer)
-        : mIndirectBuffer(indirectBuffer) {
-    }
+IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::IndexedIndirectBufferValidationInfo(
+    BufferBase* indirectBuffer)
+    : mIndirectBuffer(indirectBuffer) {}
 
-    void IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::AddIndirectDraw(
-        uint32_t maxDrawCallsPerIndirectValidationBatch,
-        uint64_t maxBatchOffsetRange,
-        IndirectDraw draw) {
-        const uint64_t newOffset = draw.inputBufferOffset;
-        auto it = mBatches.begin();
-        while (it != mBatches.end()) {
-            IndirectValidationBatch& batch = *it;
-            if (batch.draws.size() >= maxDrawCallsPerIndirectValidationBatch) {
-                // This batch is full. If its minOffset is to the right of the new offset, we can
-                // just insert a new batch here.
-                if (newOffset < batch.minOffset) {
-                    break;
-                }
-
-                // Otherwise keep looking.
-                ++it;
-                continue;
-            }
-
-            if (newOffset >= batch.minOffset && newOffset <= batch.maxOffset) {
-                batch.draws.push_back(std::move(draw));
-                return;
-            }
-
-            if (newOffset < batch.minOffset && batch.maxOffset - newOffset <= maxBatchOffsetRange) {
-                // We can extend this batch to the left in order to fit the new offset.
-                batch.minOffset = newOffset;
-                batch.draws.push_back(std::move(draw));
-                return;
-            }
-
-            if (newOffset > batch.maxOffset && newOffset - batch.minOffset <= maxBatchOffsetRange) {
-                // We can extend this batch to the right in order to fit the new offset.
-                batch.maxOffset = newOffset;
-                batch.draws.push_back(std::move(draw));
-                return;
-            }
-
+void IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::AddIndirectDraw(
+    uint32_t maxDrawCallsPerIndirectValidationBatch,
+    uint64_t maxBatchOffsetRange,
+    IndirectDraw draw) {
+    const uint64_t newOffset = draw.inputBufferOffset;
+    auto it = mBatches.begin();
+    while (it != mBatches.end()) {
+        IndirectValidationBatch& batch = *it;
+        if (batch.draws.size() >= maxDrawCallsPerIndirectValidationBatch) {
+            // This batch is full. If its minOffset is to the right of the new offset, we can
+            // just insert a new batch here.
             if (newOffset < batch.minOffset) {
-                // We want to insert a new batch just before this one.
                 break;
             }
 
+            // Otherwise keep looking.
             ++it;
+            continue;
         }
 
-        IndirectValidationBatch newBatch;
-        newBatch.minOffset = newOffset;
-        newBatch.maxOffset = newOffset;
-        newBatch.draws.push_back(std::move(draw));
-
-        mBatches.insert(it, std::move(newBatch));
-    }
-
-    void IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::AddBatch(
-        uint32_t maxDrawCallsPerIndirectValidationBatch,
-        uint64_t maxBatchOffsetRange,
-        const IndirectValidationBatch& newBatch) {
-        auto it = mBatches.begin();
-        while (it != mBatches.end()) {
-            IndirectValidationBatch& batch = *it;
-            uint64_t min = std::min(newBatch.minOffset, batch.minOffset);
-            uint64_t max = std::max(newBatch.maxOffset, batch.maxOffset);
-            if (max - min <= maxBatchOffsetRange && batch.draws.size() + newBatch.draws.size() <=
-                                                        maxDrawCallsPerIndirectValidationBatch) {
-                // This batch fits within the limits of an existing batch. Merge it.
-                batch.minOffset = min;
-                batch.maxOffset = max;
-                batch.draws.insert(batch.draws.end(), newBatch.draws.begin(), newBatch.draws.end());
-                return;
-            }
-
-            if (newBatch.minOffset < batch.minOffset) {
-                break;
-            }
-
-            ++it;
-        }
-        mBatches.push_back(newBatch);
-    }
-
-    const std::vector<IndirectDrawMetadata::IndirectValidationBatch>&
-    IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::GetBatches() const {
-        return mBatches;
-    }
-
-    IndirectDrawMetadata::IndirectDrawMetadata(const CombinedLimits& limits)
-        : mMaxBatchOffsetRange(ComputeMaxIndirectValidationBatchOffsetRange(limits)),
-          mMaxDrawCallsPerBatch(ComputeMaxDrawCallsPerIndirectValidationBatch(limits)) {
-    }
-
-    IndirectDrawMetadata::~IndirectDrawMetadata() = default;
-
-    IndirectDrawMetadata::IndirectDrawMetadata(IndirectDrawMetadata&&) = default;
-
-    IndirectDrawMetadata& IndirectDrawMetadata::operator=(IndirectDrawMetadata&&) = default;
-
-    IndirectDrawMetadata::IndexedIndirectBufferValidationInfoMap*
-    IndirectDrawMetadata::GetIndexedIndirectBufferValidationInfo() {
-        return &mIndexedIndirectBufferValidationInfo;
-    }
-
-    void IndirectDrawMetadata::AddBundle(RenderBundleBase* bundle) {
-        auto [_, inserted] = mAddedBundles.insert(bundle);
-        if (!inserted) {
+        if (newOffset >= batch.minOffset && newOffset <= batch.maxOffset) {
+            batch.draws.push_back(std::move(draw));
             return;
         }
 
-        for (const auto& [config, validationInfo] :
-             bundle->GetIndirectDrawMetadata().mIndexedIndirectBufferValidationInfo) {
-            auto it = mIndexedIndirectBufferValidationInfo.lower_bound(config);
-            if (it != mIndexedIndirectBufferValidationInfo.end() && it->first == config) {
-                // We already have batches for the same config. Merge the new ones in.
-                for (const IndirectValidationBatch& batch : validationInfo.GetBatches()) {
-                    it->second.AddBatch(mMaxDrawCallsPerBatch, mMaxBatchOffsetRange, batch);
-                }
-            } else {
-                mIndexedIndirectBufferValidationInfo.emplace_hint(it, config, validationInfo);
+        if (newOffset < batch.minOffset && batch.maxOffset - newOffset <= maxBatchOffsetRange) {
+            // We can extend this batch to the left in order to fit the new offset.
+            batch.minOffset = newOffset;
+            batch.draws.push_back(std::move(draw));
+            return;
+        }
+
+        if (newOffset > batch.maxOffset && newOffset - batch.minOffset <= maxBatchOffsetRange) {
+            // We can extend this batch to the right in order to fit the new offset.
+            batch.maxOffset = newOffset;
+            batch.draws.push_back(std::move(draw));
+            return;
+        }
+
+        if (newOffset < batch.minOffset) {
+            // We want to insert a new batch just before this one.
+            break;
+        }
+
+        ++it;
+    }
+
+    IndirectValidationBatch newBatch;
+    newBatch.minOffset = newOffset;
+    newBatch.maxOffset = newOffset;
+    newBatch.draws.push_back(std::move(draw));
+
+    mBatches.insert(it, std::move(newBatch));
+}
+
+void IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::AddBatch(
+    uint32_t maxDrawCallsPerIndirectValidationBatch,
+    uint64_t maxBatchOffsetRange,
+    const IndirectValidationBatch& newBatch) {
+    auto it = mBatches.begin();
+    while (it != mBatches.end()) {
+        IndirectValidationBatch& batch = *it;
+        uint64_t min = std::min(newBatch.minOffset, batch.minOffset);
+        uint64_t max = std::max(newBatch.maxOffset, batch.maxOffset);
+        if (max - min <= maxBatchOffsetRange &&
+            batch.draws.size() + newBatch.draws.size() <= maxDrawCallsPerIndirectValidationBatch) {
+            // This batch fits within the limits of an existing batch. Merge it.
+            batch.minOffset = min;
+            batch.maxOffset = max;
+            batch.draws.insert(batch.draws.end(), newBatch.draws.begin(), newBatch.draws.end());
+            return;
+        }
+
+        if (newBatch.minOffset < batch.minOffset) {
+            break;
+        }
+
+        ++it;
+    }
+    mBatches.push_back(newBatch);
+}
+
+const std::vector<IndirectDrawMetadata::IndirectValidationBatch>&
+IndirectDrawMetadata::IndexedIndirectBufferValidationInfo::GetBatches() const {
+    return mBatches;
+}
+
+IndirectDrawMetadata::IndirectDrawMetadata(const CombinedLimits& limits)
+    : mMaxBatchOffsetRange(ComputeMaxIndirectValidationBatchOffsetRange(limits)),
+      mMaxDrawCallsPerBatch(ComputeMaxDrawCallsPerIndirectValidationBatch(limits)) {}
+
+IndirectDrawMetadata::~IndirectDrawMetadata() = default;
+
+IndirectDrawMetadata::IndirectDrawMetadata(IndirectDrawMetadata&&) = default;
+
+IndirectDrawMetadata& IndirectDrawMetadata::operator=(IndirectDrawMetadata&&) = default;
+
+IndirectDrawMetadata::IndexedIndirectBufferValidationInfoMap*
+IndirectDrawMetadata::GetIndexedIndirectBufferValidationInfo() {
+    return &mIndexedIndirectBufferValidationInfo;
+}
+
+void IndirectDrawMetadata::AddBundle(RenderBundleBase* bundle) {
+    auto [_, inserted] = mAddedBundles.insert(bundle);
+    if (!inserted) {
+        return;
+    }
+
+    for (const auto& [config, validationInfo] :
+         bundle->GetIndirectDrawMetadata().mIndexedIndirectBufferValidationInfo) {
+        auto it = mIndexedIndirectBufferValidationInfo.lower_bound(config);
+        if (it != mIndexedIndirectBufferValidationInfo.end() && it->first == config) {
+            // We already have batches for the same config. Merge the new ones in.
+            for (const IndirectValidationBatch& batch : validationInfo.GetBatches()) {
+                it->second.AddBatch(mMaxDrawCallsPerBatch, mMaxBatchOffsetRange, batch);
             }
+        } else {
+            mIndexedIndirectBufferValidationInfo.emplace_hint(it, config, validationInfo);
         }
     }
+}
 
-    void IndirectDrawMetadata::AddIndexedIndirectDraw(wgpu::IndexFormat indexFormat,
-                                                      uint64_t indexBufferSize,
-                                                      BufferBase* indirectBuffer,
-                                                      uint64_t indirectOffset,
-                                                      bool duplicateBaseVertexInstance,
-                                                      DrawIndexedIndirectCmd* cmd) {
-        uint64_t numIndexBufferElements;
-        switch (indexFormat) {
-            case wgpu::IndexFormat::Uint16:
-                numIndexBufferElements = indexBufferSize / 2;
-                break;
-            case wgpu::IndexFormat::Uint32:
-                numIndexBufferElements = indexBufferSize / 4;
-                break;
-            case wgpu::IndexFormat::Undefined:
-                UNREACHABLE();
-        }
-
-        const IndexedIndirectConfig config = {indirectBuffer, numIndexBufferElements,
-                                              duplicateBaseVertexInstance, DrawType::Indexed};
-        auto it = mIndexedIndirectBufferValidationInfo.find(config);
-        if (it == mIndexedIndirectBufferValidationInfo.end()) {
-            auto result = mIndexedIndirectBufferValidationInfo.emplace(
-                config, IndexedIndirectBufferValidationInfo(indirectBuffer));
-            it = result.first;
-        }
-
-        IndirectDraw draw{};
-        draw.inputBufferOffset = indirectOffset;
-        draw.cmd = cmd;
-        it->second.AddIndirectDraw(mMaxDrawCallsPerBatch, mMaxBatchOffsetRange, draw);
+void IndirectDrawMetadata::AddIndexedIndirectDraw(wgpu::IndexFormat indexFormat,
+                                                  uint64_t indexBufferSize,
+                                                  BufferBase* indirectBuffer,
+                                                  uint64_t indirectOffset,
+                                                  bool duplicateBaseVertexInstance,
+                                                  DrawIndexedIndirectCmd* cmd) {
+    uint64_t numIndexBufferElements;
+    switch (indexFormat) {
+        case wgpu::IndexFormat::Uint16:
+            numIndexBufferElements = indexBufferSize / 2;
+            break;
+        case wgpu::IndexFormat::Uint32:
+            numIndexBufferElements = indexBufferSize / 4;
+            break;
+        case wgpu::IndexFormat::Undefined:
+            UNREACHABLE();
     }
 
-    void IndirectDrawMetadata::AddIndirectDraw(BufferBase* indirectBuffer,
-                                               uint64_t indirectOffset,
-                                               bool duplicateBaseVertexInstance,
-                                               DrawIndirectCmd* cmd) {
-        const IndexedIndirectConfig config = {indirectBuffer, 0, duplicateBaseVertexInstance,
-                                              DrawType::NonIndexed};
-        auto it = mIndexedIndirectBufferValidationInfo.find(config);
-        if (it == mIndexedIndirectBufferValidationInfo.end()) {
-            auto result = mIndexedIndirectBufferValidationInfo.emplace(
-                config, IndexedIndirectBufferValidationInfo(indirectBuffer));
-            it = result.first;
-        }
-
-        IndirectDraw draw{};
-        draw.inputBufferOffset = indirectOffset;
-        draw.cmd = cmd;
-        it->second.AddIndirectDraw(mMaxDrawCallsPerBatch, mMaxBatchOffsetRange, draw);
+    const IndexedIndirectConfig config = {indirectBuffer, numIndexBufferElements,
+                                          duplicateBaseVertexInstance, DrawType::Indexed};
+    auto it = mIndexedIndirectBufferValidationInfo.find(config);
+    if (it == mIndexedIndirectBufferValidationInfo.end()) {
+        auto result = mIndexedIndirectBufferValidationInfo.emplace(
+            config, IndexedIndirectBufferValidationInfo(indirectBuffer));
+        it = result.first;
     }
 
-    bool IndirectDrawMetadata::IndexedIndirectConfig::operator<(
-        const IndexedIndirectConfig& other) const {
-        return std::tie(inputIndirectBuffer, numIndexBufferElements, duplicateBaseVertexInstance,
-                        drawType) < std::tie(other.inputIndirectBuffer,
-                                             other.numIndexBufferElements,
-                                             other.duplicateBaseVertexInstance, other.drawType);
+    IndirectDraw draw{};
+    draw.inputBufferOffset = indirectOffset;
+    draw.cmd = cmd;
+    it->second.AddIndirectDraw(mMaxDrawCallsPerBatch, mMaxBatchOffsetRange, draw);
+}
+
+void IndirectDrawMetadata::AddIndirectDraw(BufferBase* indirectBuffer,
+                                           uint64_t indirectOffset,
+                                           bool duplicateBaseVertexInstance,
+                                           DrawIndirectCmd* cmd) {
+    const IndexedIndirectConfig config = {indirectBuffer, 0, duplicateBaseVertexInstance,
+                                          DrawType::NonIndexed};
+    auto it = mIndexedIndirectBufferValidationInfo.find(config);
+    if (it == mIndexedIndirectBufferValidationInfo.end()) {
+        auto result = mIndexedIndirectBufferValidationInfo.emplace(
+            config, IndexedIndirectBufferValidationInfo(indirectBuffer));
+        it = result.first;
     }
 
-    bool IndirectDrawMetadata::IndexedIndirectConfig::operator==(
-        const IndexedIndirectConfig& other) const {
-        return std::tie(inputIndirectBuffer, numIndexBufferElements, duplicateBaseVertexInstance,
-                        drawType) == std::tie(other.inputIndirectBuffer,
-                                              other.numIndexBufferElements,
-                                              other.duplicateBaseVertexInstance, other.drawType);
-    }
+    IndirectDraw draw{};
+    draw.inputBufferOffset = indirectOffset;
+    draw.cmd = cmd;
+    it->second.AddIndirectDraw(mMaxDrawCallsPerBatch, mMaxBatchOffsetRange, draw);
+}
+
+bool IndirectDrawMetadata::IndexedIndirectConfig::operator<(
+    const IndexedIndirectConfig& other) const {
+    return std::tie(inputIndirectBuffer, numIndexBufferElements, duplicateBaseVertexInstance,
+                    drawType) < std::tie(other.inputIndirectBuffer, other.numIndexBufferElements,
+                                         other.duplicateBaseVertexInstance, other.drawType);
+}
+
+bool IndirectDrawMetadata::IndexedIndirectConfig::operator==(
+    const IndexedIndirectConfig& other) const {
+    return std::tie(inputIndirectBuffer, numIndexBufferElements, duplicateBaseVertexInstance,
+                    drawType) == std::tie(other.inputIndirectBuffer, other.numIndexBufferElements,
+                                          other.duplicateBaseVertexInstance, other.drawType);
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/IndirectDrawMetadata.h b/src/dawn/native/IndirectDrawMetadata.h
index 47768a0..87c03ba 100644
--- a/src/dawn/native/IndirectDrawMetadata.h
+++ b/src/dawn/native/IndirectDrawMetadata.h
@@ -29,115 +29,115 @@
 
 namespace dawn::native {
 
-    class RenderBundleBase;
-    struct CombinedLimits;
+class RenderBundleBase;
+struct CombinedLimits;
 
-    // In the unlikely scenario that indirect offsets used over a single buffer span more than
-    // this length of the buffer, we split the validation work into multiple batches.
-    uint64_t ComputeMaxIndirectValidationBatchOffsetRange(const CombinedLimits& limits);
+// In the unlikely scenario that indirect offsets used over a single buffer span more than
+// this length of the buffer, we split the validation work into multiple batches.
+uint64_t ComputeMaxIndirectValidationBatchOffsetRange(const CombinedLimits& limits);
 
-    // Metadata corresponding to the validation requirements of a single render pass. This metadata
-    // is accumulated while its corresponding render pass is encoded, and is later used to encode
-    // validation commands to be inserted into the command buffer just before the render pass's own
-    // commands.
-    class IndirectDrawMetadata : public NonCopyable {
+// Metadata corresponding to the validation requirements of a single render pass. This metadata
+// is accumulated while its corresponding render pass is encoded, and is later used to encode
+// validation commands to be inserted into the command buffer just before the render pass's own
+// commands.
+class IndirectDrawMetadata : public NonCopyable {
+  public:
+    struct IndirectDraw {
+        uint64_t inputBufferOffset;
+        // This is a pointer to the command that should be populated with the validated
+        // indirect scratch buffer. It is only valid up until the encoded command buffer
+        // is submitted.
+        DrawIndirectCmd* cmd;
+    };
+
+    struct IndirectValidationBatch {
+        uint64_t minOffset;
+        uint64_t maxOffset;
+        std::vector<IndirectDraw> draws;
+    };
+
+    // Tracks information about every draw call in this render pass which uses the same indirect
+    // buffer and the same-sized index buffer. Calls are grouped by indirect offset ranges so
+    // that validation work can be chunked efficiently if necessary.
+    class IndexedIndirectBufferValidationInfo {
       public:
-        struct IndirectDraw {
-            uint64_t inputBufferOffset;
-            // This is a pointer to the command that should be populated with the validated
-            // indirect scratch buffer. It is only valid up until the encoded command buffer
-            // is submitted.
-            DrawIndirectCmd* cmd;
-        };
+        explicit IndexedIndirectBufferValidationInfo(BufferBase* indirectBuffer);
 
-        struct IndirectValidationBatch {
-            uint64_t minOffset;
-            uint64_t maxOffset;
-            std::vector<IndirectDraw> draws;
-        };
+        // Logs a new drawIndexedIndirect call for the render pass. `cmd` is updated with an
+        // assigned (and deferred) buffer ref and relative offset before returning.
+        void AddIndirectDraw(uint32_t maxDrawCallsPerIndirectValidationBatch,
+                             uint64_t maxBatchOffsetRange,
+                             IndirectDraw draw);
 
-        // Tracks information about every draw call in this render pass which uses the same indirect
-        // buffer and the same-sized index buffer. Calls are grouped by indirect offset ranges so
-        // that validation work can be chunked efficiently if necessary.
-        class IndexedIndirectBufferValidationInfo {
-          public:
-            explicit IndexedIndirectBufferValidationInfo(BufferBase* indirectBuffer);
+        // Adds draw calls from an already-computed batch, e.g. from a previously encoded
+        // RenderBundle. The added batch is merged into an existing batch if possible, otherwise
+        // it's added to mBatch.
+        void AddBatch(uint32_t maxDrawCallsPerIndirectValidationBatch,
+                      uint64_t maxBatchOffsetRange,
+                      const IndirectValidationBatch& batch);
 
-            // Logs a new drawIndexedIndirect call for the render pass. `cmd` is updated with an
-            // assigned (and deferred) buffer ref and relative offset before returning.
-            void AddIndirectDraw(uint32_t maxDrawCallsPerIndirectValidationBatch,
-                                 uint64_t maxBatchOffsetRange,
-                                 IndirectDraw draw);
-
-            // Adds draw calls from an already-computed batch, e.g. from a previously encoded
-            // RenderBundle. The added batch is merged into an existing batch if possible, otherwise
-            // it's added to mBatch.
-            void AddBatch(uint32_t maxDrawCallsPerIndirectValidationBatch,
-                          uint64_t maxBatchOffsetRange,
-                          const IndirectValidationBatch& batch);
-
-            const std::vector<IndirectValidationBatch>& GetBatches() const;
-
-          private:
-            Ref<BufferBase> mIndirectBuffer;
-
-            // A list of information about validation batches that will need to be executed for the
-            // corresponding indirect buffer prior to a single render pass. These are kept sorted by
-            // minOffset and may overlap iff the number of offsets in one batch would otherwise
-            // exceed some large upper bound (roughly ~33M draw calls).
-            //
-            // Since the most common expected cases will overwhelmingly require only a single
-            // validation pass per render pass, this is optimized for efficient updates to a single
-            // batch rather than for efficient manipulation of a large number of batches.
-            std::vector<IndirectValidationBatch> mBatches;
-        };
-
-        enum class DrawType {
-            NonIndexed,
-            Indexed,
-        };
-        struct IndexedIndirectConfig {
-            BufferBase* inputIndirectBuffer;
-            uint64_t numIndexBufferElements;
-            bool duplicateBaseVertexInstance;
-            DrawType drawType;
-
-            bool operator<(const IndexedIndirectConfig& other) const;
-            bool operator==(const IndexedIndirectConfig& other) const;
-        };
-
-        using IndexedIndirectBufferValidationInfoMap =
-            std::map<IndexedIndirectConfig, IndexedIndirectBufferValidationInfo>;
-
-        explicit IndirectDrawMetadata(const CombinedLimits& limits);
-        ~IndirectDrawMetadata();
-
-        IndirectDrawMetadata(IndirectDrawMetadata&&);
-        IndirectDrawMetadata& operator=(IndirectDrawMetadata&&);
-
-        IndexedIndirectBufferValidationInfoMap* GetIndexedIndirectBufferValidationInfo();
-
-        void AddBundle(RenderBundleBase* bundle);
-        void AddIndexedIndirectDraw(wgpu::IndexFormat indexFormat,
-                                    uint64_t indexBufferSize,
-                                    BufferBase* indirectBuffer,
-                                    uint64_t indirectOffset,
-                                    bool duplicateBaseVertexInstance,
-                                    DrawIndexedIndirectCmd* cmd);
-
-        void AddIndirectDraw(BufferBase* indirectBuffer,
-                             uint64_t indirectOffset,
-                             bool duplicateBaseVertexInstance,
-                             DrawIndirectCmd* cmd);
+        const std::vector<IndirectValidationBatch>& GetBatches() const;
 
       private:
-        IndexedIndirectBufferValidationInfoMap mIndexedIndirectBufferValidationInfo;
-        std::set<RenderBundleBase*> mAddedBundles;
+        Ref<BufferBase> mIndirectBuffer;
 
-        uint64_t mMaxBatchOffsetRange;
-        uint32_t mMaxDrawCallsPerBatch;
+        // A list of information about validation batches that will need to be executed for the
+        // corresponding indirect buffer prior to a single render pass. These are kept sorted by
+        // minOffset and may overlap iff the number of offsets in one batch would otherwise
+        // exceed some large upper bound (roughly ~33M draw calls).
+        //
+        // Since the most common expected cases will overwhelmingly require only a single
+        // validation pass per render pass, this is optimized for efficient updates to a single
+        // batch rather than for efficient manipulation of a large number of batches.
+        std::vector<IndirectValidationBatch> mBatches;
     };
 
+    enum class DrawType {
+        NonIndexed,
+        Indexed,
+    };
+    struct IndexedIndirectConfig {
+        BufferBase* inputIndirectBuffer;
+        uint64_t numIndexBufferElements;
+        bool duplicateBaseVertexInstance;
+        DrawType drawType;
+
+        bool operator<(const IndexedIndirectConfig& other) const;
+        bool operator==(const IndexedIndirectConfig& other) const;
+    };
+
+    using IndexedIndirectBufferValidationInfoMap =
+        std::map<IndexedIndirectConfig, IndexedIndirectBufferValidationInfo>;
+
+    explicit IndirectDrawMetadata(const CombinedLimits& limits);
+    ~IndirectDrawMetadata();
+
+    IndirectDrawMetadata(IndirectDrawMetadata&&);
+    IndirectDrawMetadata& operator=(IndirectDrawMetadata&&);
+
+    IndexedIndirectBufferValidationInfoMap* GetIndexedIndirectBufferValidationInfo();
+
+    void AddBundle(RenderBundleBase* bundle);
+    void AddIndexedIndirectDraw(wgpu::IndexFormat indexFormat,
+                                uint64_t indexBufferSize,
+                                BufferBase* indirectBuffer,
+                                uint64_t indirectOffset,
+                                bool duplicateBaseVertexInstance,
+                                DrawIndexedIndirectCmd* cmd);
+
+    void AddIndirectDraw(BufferBase* indirectBuffer,
+                         uint64_t indirectOffset,
+                         bool duplicateBaseVertexInstance,
+                         DrawIndirectCmd* cmd);
+
+  private:
+    IndexedIndirectBufferValidationInfoMap mIndexedIndirectBufferValidationInfo;
+    std::set<RenderBundleBase*> mAddedBundles;
+
+    uint64_t mMaxBatchOffsetRange;
+    uint32_t mMaxDrawCallsPerBatch;
+};
+
 }  // namespace dawn::native
 
 #endif  // SRC_DAWN_NATIVE_INDIRECTDRAWMETADATA_H_
diff --git a/src/dawn/native/IndirectDrawValidationEncoder.cpp b/src/dawn/native/IndirectDrawValidationEncoder.cpp
index 1da7a91..2c20820 100644
--- a/src/dawn/native/IndirectDrawValidationEncoder.cpp
+++ b/src/dawn/native/IndirectDrawValidationEncoder.cpp
@@ -35,25 +35,25 @@
 
 namespace dawn::native {
 
-    namespace {
-        // NOTE: This must match the workgroup_size attribute on the compute entry point below.
-        constexpr uint64_t kWorkgroupSize = 64;
+namespace {
+// NOTE: This must match the workgroup_size attribute on the compute entry point below.
+constexpr uint64_t kWorkgroupSize = 64;
 
-        // Bitmasks for BatchInfo::flags
-        constexpr uint32_t kDuplicateBaseVertexInstance = 1;
-        constexpr uint32_t kIndexedDraw = 2;
-        constexpr uint32_t kValidationEnabled = 4;
+// Bitmasks for BatchInfo::flags
+constexpr uint32_t kDuplicateBaseVertexInstance = 1;
+constexpr uint32_t kIndexedDraw = 2;
+constexpr uint32_t kValidationEnabled = 4;
 
-        // Equivalent to the BatchInfo struct defined in the shader below.
-        struct BatchInfo {
-            uint64_t numIndexBufferElements;
-            uint32_t numDraws;
-            uint32_t flags;
-        };
+// Equivalent to the BatchInfo struct defined in the shader below.
+struct BatchInfo {
+    uint64_t numIndexBufferElements;
+    uint32_t numDraws;
+    uint32_t flags;
+};
 
-        // TODO(https://crbug.com/dawn/1108): Propagate validation feedback from this shader in
-        // various failure modes.
-        static const char sRenderValidationShaderSource[] = R"(
+// TODO(https://crbug.com/dawn/1108): Propagate validation feedback from this shader in
+// various failure modes.
+static const char sRenderValidationShaderSource[] = R"(
 
             let kNumDrawIndirectParams = 4u;
 
@@ -178,276 +178,268 @@
             }
         )";
 
-        ResultOrError<ComputePipelineBase*> GetOrCreateRenderValidationPipeline(
-            DeviceBase* device) {
-            InternalPipelineStore* store = device->GetInternalPipelineStore();
+ResultOrError<ComputePipelineBase*> GetOrCreateRenderValidationPipeline(DeviceBase* device) {
+    InternalPipelineStore* store = device->GetInternalPipelineStore();
 
-            if (store->renderValidationPipeline == nullptr) {
-                // Create compute shader module if not cached before.
-                if (store->renderValidationShader == nullptr) {
-                    DAWN_TRY_ASSIGN(
-                        store->renderValidationShader,
-                        utils::CreateShaderModule(device, sRenderValidationShaderSource));
-                }
-
-                Ref<BindGroupLayoutBase> bindGroupLayout;
-                DAWN_TRY_ASSIGN(
-                    bindGroupLayout,
-                    utils::MakeBindGroupLayout(
-                        device,
-                        {
-                            {0, wgpu::ShaderStage::Compute,
-                             wgpu::BufferBindingType::ReadOnlyStorage},
-                            {1, wgpu::ShaderStage::Compute, kInternalStorageBufferBinding},
-                            {2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
-                        },
-                        /* allowInternalBinding */ true));
-
-                Ref<PipelineLayoutBase> pipelineLayout;
-                DAWN_TRY_ASSIGN(pipelineLayout,
-                                utils::MakeBasicPipelineLayout(device, bindGroupLayout));
-
-                ComputePipelineDescriptor computePipelineDescriptor = {};
-                computePipelineDescriptor.layout = pipelineLayout.Get();
-                computePipelineDescriptor.compute.module = store->renderValidationShader.Get();
-                computePipelineDescriptor.compute.entryPoint = "main";
-
-                DAWN_TRY_ASSIGN(store->renderValidationPipeline,
-                                device->CreateComputePipeline(&computePipelineDescriptor));
-            }
-
-            return store->renderValidationPipeline.Get();
+    if (store->renderValidationPipeline == nullptr) {
+        // Create compute shader module if not cached before.
+        if (store->renderValidationShader == nullptr) {
+            DAWN_TRY_ASSIGN(store->renderValidationShader,
+                            utils::CreateShaderModule(device, sRenderValidationShaderSource));
         }
 
-        size_t GetBatchDataSize(uint32_t numDraws) {
-            return sizeof(BatchInfo) + numDraws * sizeof(uint32_t);
-        }
+        Ref<BindGroupLayoutBase> bindGroupLayout;
+        DAWN_TRY_ASSIGN(
+            bindGroupLayout,
+            utils::MakeBindGroupLayout(
+                device,
+                {
+                    {0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage},
+                    {1, wgpu::ShaderStage::Compute, kInternalStorageBufferBinding},
+                    {2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
+                },
+                /* allowInternalBinding */ true));
 
-    }  // namespace
+        Ref<PipelineLayoutBase> pipelineLayout;
+        DAWN_TRY_ASSIGN(pipelineLayout, utils::MakeBasicPipelineLayout(device, bindGroupLayout));
 
-    uint32_t ComputeMaxDrawCallsPerIndirectValidationBatch(const CombinedLimits& limits) {
-        const uint64_t batchDrawCallLimitByDispatchSize =
-            static_cast<uint64_t>(limits.v1.maxComputeWorkgroupsPerDimension) * kWorkgroupSize;
-        const uint64_t batchDrawCallLimitByStorageBindingSize =
-            (limits.v1.maxStorageBufferBindingSize - sizeof(BatchInfo)) / sizeof(uint32_t);
-        return static_cast<uint32_t>(
-            std::min({batchDrawCallLimitByDispatchSize, batchDrawCallLimitByStorageBindingSize,
-                      uint64_t(std::numeric_limits<uint32_t>::max())}));
+        ComputePipelineDescriptor computePipelineDescriptor = {};
+        computePipelineDescriptor.layout = pipelineLayout.Get();
+        computePipelineDescriptor.compute.module = store->renderValidationShader.Get();
+        computePipelineDescriptor.compute.entryPoint = "main";
+
+        DAWN_TRY_ASSIGN(store->renderValidationPipeline,
+                        device->CreateComputePipeline(&computePipelineDescriptor));
     }
 
-    MaybeError EncodeIndirectDrawValidationCommands(DeviceBase* device,
-                                                    CommandEncoder* commandEncoder,
-                                                    RenderPassResourceUsageTracker* usageTracker,
-                                                    IndirectDrawMetadata* indirectDrawMetadata) {
-        struct Batch {
-            const IndirectDrawMetadata::IndirectValidationBatch* metadata;
-            uint64_t numIndexBufferElements;
-            uint64_t dataBufferOffset;
-            uint64_t dataSize;
-            uint64_t inputIndirectOffset;
-            uint64_t inputIndirectSize;
-            uint64_t outputParamsOffset;
-            uint64_t outputParamsSize;
-            BatchInfo* batchInfo;
-        };
+    return store->renderValidationPipeline.Get();
+}
 
-        struct Pass {
-            uint32_t flags;
-            BufferBase* inputIndirectBuffer;
-            uint64_t outputParamsSize = 0;
-            uint64_t batchDataSize = 0;
-            std::unique_ptr<void, void (*)(void*)> batchData{nullptr, std::free};
-            std::vector<Batch> batches;
-        };
+size_t GetBatchDataSize(uint32_t numDraws) {
+    return sizeof(BatchInfo) + numDraws * sizeof(uint32_t);
+}
 
-        // First stage is grouping all batches into passes. We try to pack as many batches into a
-        // single pass as possible. Batches can be grouped together as long as they're validating
-        // data from the same indirect buffer, but they may still be split into multiple passes if
-        // the number of draw calls in a pass would exceed some (very high) upper bound.
+}  // namespace
+
+uint32_t ComputeMaxDrawCallsPerIndirectValidationBatch(const CombinedLimits& limits) {
+    const uint64_t batchDrawCallLimitByDispatchSize =
+        static_cast<uint64_t>(limits.v1.maxComputeWorkgroupsPerDimension) * kWorkgroupSize;
+    const uint64_t batchDrawCallLimitByStorageBindingSize =
+        (limits.v1.maxStorageBufferBindingSize - sizeof(BatchInfo)) / sizeof(uint32_t);
+    return static_cast<uint32_t>(
+        std::min({batchDrawCallLimitByDispatchSize, batchDrawCallLimitByStorageBindingSize,
+                  uint64_t(std::numeric_limits<uint32_t>::max())}));
+}
+
+MaybeError EncodeIndirectDrawValidationCommands(DeviceBase* device,
+                                                CommandEncoder* commandEncoder,
+                                                RenderPassResourceUsageTracker* usageTracker,
+                                                IndirectDrawMetadata* indirectDrawMetadata) {
+    struct Batch {
+        const IndirectDrawMetadata::IndirectValidationBatch* metadata;
+        uint64_t numIndexBufferElements;
+        uint64_t dataBufferOffset;
+        uint64_t dataSize;
+        uint64_t inputIndirectOffset;
+        uint64_t inputIndirectSize;
+        uint64_t outputParamsOffset;
+        uint64_t outputParamsSize;
+        BatchInfo* batchInfo;
+    };
+
+    struct Pass {
+        uint32_t flags;
+        BufferBase* inputIndirectBuffer;
         uint64_t outputParamsSize = 0;
-        std::vector<Pass> passes;
-        IndirectDrawMetadata::IndexedIndirectBufferValidationInfoMap& bufferInfoMap =
-            *indirectDrawMetadata->GetIndexedIndirectBufferValidationInfo();
-        if (bufferInfoMap.empty()) {
-            return {};
-        }
+        uint64_t batchDataSize = 0;
+        std::unique_ptr<void, void (*)(void*)> batchData{nullptr, std::free};
+        std::vector<Batch> batches;
+    };
 
-        const uint64_t maxStorageBufferBindingSize =
-            device->GetLimits().v1.maxStorageBufferBindingSize;
-        const uint32_t minStorageBufferOffsetAlignment =
-            device->GetLimits().v1.minStorageBufferOffsetAlignment;
-
-        for (auto& [config, validationInfo] : bufferInfoMap) {
-            const uint64_t indirectDrawCommandSize =
-                config.drawType == IndirectDrawMetadata::DrawType::Indexed
-                    ? kDrawIndexedIndirectSize
-                    : kDrawIndirectSize;
-
-            uint64_t outputIndirectSize = indirectDrawCommandSize;
-            if (config.duplicateBaseVertexInstance) {
-                outputIndirectSize += 2 * sizeof(uint32_t);
-            }
-
-            for (const IndirectDrawMetadata::IndirectValidationBatch& batch :
-                 validationInfo.GetBatches()) {
-                const uint64_t minOffsetFromAlignedBoundary =
-                    batch.minOffset % minStorageBufferOffsetAlignment;
-                const uint64_t minOffsetAlignedDown =
-                    batch.minOffset - minOffsetFromAlignedBoundary;
-
-                Batch newBatch;
-                newBatch.metadata = &batch;
-                newBatch.numIndexBufferElements = config.numIndexBufferElements;
-                newBatch.dataSize = GetBatchDataSize(batch.draws.size());
-                newBatch.inputIndirectOffset = minOffsetAlignedDown;
-                newBatch.inputIndirectSize =
-                    batch.maxOffset + indirectDrawCommandSize - minOffsetAlignedDown;
-
-                newBatch.outputParamsSize = batch.draws.size() * outputIndirectSize;
-                newBatch.outputParamsOffset =
-                    Align(outputParamsSize, minStorageBufferOffsetAlignment);
-                outputParamsSize = newBatch.outputParamsOffset + newBatch.outputParamsSize;
-                if (outputParamsSize > maxStorageBufferBindingSize) {
-                    return DAWN_INTERNAL_ERROR("Too many drawIndexedIndirect calls to validate");
-                }
-
-                Pass* currentPass = passes.empty() ? nullptr : &passes.back();
-                if (currentPass && currentPass->inputIndirectBuffer == config.inputIndirectBuffer) {
-                    uint64_t nextBatchDataOffset =
-                        Align(currentPass->batchDataSize, minStorageBufferOffsetAlignment);
-                    uint64_t newPassBatchDataSize = nextBatchDataOffset + newBatch.dataSize;
-                    if (newPassBatchDataSize <= maxStorageBufferBindingSize) {
-                        // We can fit this batch in the current pass.
-                        newBatch.dataBufferOffset = nextBatchDataOffset;
-                        currentPass->batchDataSize = newPassBatchDataSize;
-                        currentPass->batches.push_back(newBatch);
-                        continue;
-                    }
-                }
-
-                // We need to start a new pass for this batch.
-                newBatch.dataBufferOffset = 0;
-
-                Pass newPass{};
-                newPass.inputIndirectBuffer = config.inputIndirectBuffer;
-                newPass.batchDataSize = newBatch.dataSize;
-                newPass.batches.push_back(newBatch);
-                newPass.flags = 0;
-                if (config.duplicateBaseVertexInstance) {
-                    newPass.flags |= kDuplicateBaseVertexInstance;
-                }
-                if (config.drawType == IndirectDrawMetadata::DrawType::Indexed) {
-                    newPass.flags |= kIndexedDraw;
-                }
-                if (device->IsValidationEnabled()) {
-                    newPass.flags |= kValidationEnabled;
-                }
-                passes.push_back(std::move(newPass));
-            }
-        }
-
-        auto* const store = device->GetInternalPipelineStore();
-        ScratchBuffer& outputParamsBuffer = store->scratchIndirectStorage;
-        ScratchBuffer& batchDataBuffer = store->scratchStorage;
-
-        uint64_t requiredBatchDataBufferSize = 0;
-        for (const Pass& pass : passes) {
-            requiredBatchDataBufferSize = std::max(requiredBatchDataBufferSize, pass.batchDataSize);
-        }
-        DAWN_TRY(batchDataBuffer.EnsureCapacity(requiredBatchDataBufferSize));
-        usageTracker->BufferUsedAs(batchDataBuffer.GetBuffer(), wgpu::BufferUsage::Storage);
-
-        DAWN_TRY(outputParamsBuffer.EnsureCapacity(outputParamsSize));
-        usageTracker->BufferUsedAs(outputParamsBuffer.GetBuffer(), wgpu::BufferUsage::Indirect);
-
-        // Now we allocate and populate host-side batch data to be copied to the GPU.
-        for (Pass& pass : passes) {
-            // We use std::malloc here because it guarantees maximal scalar alignment.
-            pass.batchData = {std::malloc(pass.batchDataSize), std::free};
-            memset(pass.batchData.get(), 0, pass.batchDataSize);
-            uint8_t* batchData = static_cast<uint8_t*>(pass.batchData.get());
-            for (Batch& batch : pass.batches) {
-                batch.batchInfo = new (&batchData[batch.dataBufferOffset]) BatchInfo();
-                batch.batchInfo->numIndexBufferElements = batch.numIndexBufferElements;
-                batch.batchInfo->numDraws = static_cast<uint32_t>(batch.metadata->draws.size());
-                batch.batchInfo->flags = pass.flags;
-
-                uint32_t* indirectOffsets = reinterpret_cast<uint32_t*>(batch.batchInfo + 1);
-                uint64_t outputParamsOffset = batch.outputParamsOffset;
-                for (auto& draw : batch.metadata->draws) {
-                    // The shader uses this to index an array of u32, hence the division by 4 bytes.
-                    *indirectOffsets++ = static_cast<uint32_t>(
-                        (draw.inputBufferOffset - batch.inputIndirectOffset) / 4);
-
-                    draw.cmd->indirectBuffer = outputParamsBuffer.GetBuffer();
-                    draw.cmd->indirectOffset = outputParamsOffset;
-                    if (pass.flags & kIndexedDraw) {
-                        outputParamsOffset += kDrawIndexedIndirectSize;
-                    } else {
-                        outputParamsOffset += kDrawIndirectSize;
-                    }
-                }
-            }
-        }
-
-        ComputePipelineBase* pipeline;
-        DAWN_TRY_ASSIGN(pipeline, GetOrCreateRenderValidationPipeline(device));
-
-        Ref<BindGroupLayoutBase> layout;
-        DAWN_TRY_ASSIGN(layout, pipeline->GetBindGroupLayout(0));
-
-        BindGroupEntry bindings[3];
-        BindGroupEntry& bufferDataBinding = bindings[0];
-        bufferDataBinding.binding = 0;
-        bufferDataBinding.buffer = batchDataBuffer.GetBuffer();
-
-        BindGroupEntry& inputIndirectBinding = bindings[1];
-        inputIndirectBinding.binding = 1;
-
-        BindGroupEntry& outputParamsBinding = bindings[2];
-        outputParamsBinding.binding = 2;
-        outputParamsBinding.buffer = outputParamsBuffer.GetBuffer();
-
-        BindGroupDescriptor bindGroupDescriptor = {};
-        bindGroupDescriptor.layout = layout.Get();
-        bindGroupDescriptor.entryCount = 3;
-        bindGroupDescriptor.entries = bindings;
-
-        // Finally, we can now encode our validation and duplication passes. Each pass first does a
-        // two WriteBuffer to get batch and pass data over to the GPU, followed by a single compute
-        // pass. The compute pass encodes a separate SetBindGroup and Dispatch command for each
-        // batch.
-        for (const Pass& pass : passes) {
-            commandEncoder->APIWriteBuffer(batchDataBuffer.GetBuffer(), 0,
-                                           static_cast<const uint8_t*>(pass.batchData.get()),
-                                           pass.batchDataSize);
-
-            Ref<ComputePassEncoder> passEncoder = commandEncoder->BeginComputePass();
-            passEncoder->APISetPipeline(pipeline);
-
-            inputIndirectBinding.buffer = pass.inputIndirectBuffer;
-
-            for (const Batch& batch : pass.batches) {
-                bufferDataBinding.offset = batch.dataBufferOffset;
-                bufferDataBinding.size = batch.dataSize;
-                inputIndirectBinding.offset = batch.inputIndirectOffset;
-                inputIndirectBinding.size = batch.inputIndirectSize;
-                outputParamsBinding.offset = batch.outputParamsOffset;
-                outputParamsBinding.size = batch.outputParamsSize;
-
-                Ref<BindGroupBase> bindGroup;
-                DAWN_TRY_ASSIGN(bindGroup, device->CreateBindGroup(&bindGroupDescriptor));
-
-                const uint32_t numDrawsRoundedUp =
-                    (batch.batchInfo->numDraws + kWorkgroupSize - 1) / kWorkgroupSize;
-                passEncoder->APISetBindGroup(0, bindGroup.Get());
-                passEncoder->APIDispatchWorkgroups(numDrawsRoundedUp);
-            }
-
-            passEncoder->APIEnd();
-        }
-
+    // First stage is grouping all batches into passes. We try to pack as many batches into a
+    // single pass as possible. Batches can be grouped together as long as they're validating
+    // data from the same indirect buffer, but they may still be split into multiple passes if
+    // the number of draw calls in a pass would exceed some (very high) upper bound.
+    uint64_t outputParamsSize = 0;
+    std::vector<Pass> passes;
+    IndirectDrawMetadata::IndexedIndirectBufferValidationInfoMap& bufferInfoMap =
+        *indirectDrawMetadata->GetIndexedIndirectBufferValidationInfo();
+    if (bufferInfoMap.empty()) {
         return {};
     }
 
+    const uint64_t maxStorageBufferBindingSize = device->GetLimits().v1.maxStorageBufferBindingSize;
+    const uint32_t minStorageBufferOffsetAlignment =
+        device->GetLimits().v1.minStorageBufferOffsetAlignment;
+
+    for (auto& [config, validationInfo] : bufferInfoMap) {
+        const uint64_t indirectDrawCommandSize =
+            config.drawType == IndirectDrawMetadata::DrawType::Indexed ? kDrawIndexedIndirectSize
+                                                                       : kDrawIndirectSize;
+
+        uint64_t outputIndirectSize = indirectDrawCommandSize;
+        if (config.duplicateBaseVertexInstance) {
+            outputIndirectSize += 2 * sizeof(uint32_t);
+        }
+
+        for (const IndirectDrawMetadata::IndirectValidationBatch& batch :
+             validationInfo.GetBatches()) {
+            const uint64_t minOffsetFromAlignedBoundary =
+                batch.minOffset % minStorageBufferOffsetAlignment;
+            const uint64_t minOffsetAlignedDown = batch.minOffset - minOffsetFromAlignedBoundary;
+
+            Batch newBatch;
+            newBatch.metadata = &batch;
+            newBatch.numIndexBufferElements = config.numIndexBufferElements;
+            newBatch.dataSize = GetBatchDataSize(batch.draws.size());
+            newBatch.inputIndirectOffset = minOffsetAlignedDown;
+            newBatch.inputIndirectSize =
+                batch.maxOffset + indirectDrawCommandSize - minOffsetAlignedDown;
+
+            newBatch.outputParamsSize = batch.draws.size() * outputIndirectSize;
+            newBatch.outputParamsOffset = Align(outputParamsSize, minStorageBufferOffsetAlignment);
+            outputParamsSize = newBatch.outputParamsOffset + newBatch.outputParamsSize;
+            if (outputParamsSize > maxStorageBufferBindingSize) {
+                return DAWN_INTERNAL_ERROR("Too many drawIndexedIndirect calls to validate");
+            }
+
+            Pass* currentPass = passes.empty() ? nullptr : &passes.back();
+            if (currentPass && currentPass->inputIndirectBuffer == config.inputIndirectBuffer) {
+                uint64_t nextBatchDataOffset =
+                    Align(currentPass->batchDataSize, minStorageBufferOffsetAlignment);
+                uint64_t newPassBatchDataSize = nextBatchDataOffset + newBatch.dataSize;
+                if (newPassBatchDataSize <= maxStorageBufferBindingSize) {
+                    // We can fit this batch in the current pass.
+                    newBatch.dataBufferOffset = nextBatchDataOffset;
+                    currentPass->batchDataSize = newPassBatchDataSize;
+                    currentPass->batches.push_back(newBatch);
+                    continue;
+                }
+            }
+
+            // We need to start a new pass for this batch.
+            newBatch.dataBufferOffset = 0;
+
+            Pass newPass{};
+            newPass.inputIndirectBuffer = config.inputIndirectBuffer;
+            newPass.batchDataSize = newBatch.dataSize;
+            newPass.batches.push_back(newBatch);
+            newPass.flags = 0;
+            if (config.duplicateBaseVertexInstance) {
+                newPass.flags |= kDuplicateBaseVertexInstance;
+            }
+            if (config.drawType == IndirectDrawMetadata::DrawType::Indexed) {
+                newPass.flags |= kIndexedDraw;
+            }
+            if (device->IsValidationEnabled()) {
+                newPass.flags |= kValidationEnabled;
+            }
+            passes.push_back(std::move(newPass));
+        }
+    }
+
+    auto* const store = device->GetInternalPipelineStore();
+    ScratchBuffer& outputParamsBuffer = store->scratchIndirectStorage;
+    ScratchBuffer& batchDataBuffer = store->scratchStorage;
+
+    uint64_t requiredBatchDataBufferSize = 0;
+    for (const Pass& pass : passes) {
+        requiredBatchDataBufferSize = std::max(requiredBatchDataBufferSize, pass.batchDataSize);
+    }
+    DAWN_TRY(batchDataBuffer.EnsureCapacity(requiredBatchDataBufferSize));
+    usageTracker->BufferUsedAs(batchDataBuffer.GetBuffer(), wgpu::BufferUsage::Storage);
+
+    DAWN_TRY(outputParamsBuffer.EnsureCapacity(outputParamsSize));
+    usageTracker->BufferUsedAs(outputParamsBuffer.GetBuffer(), wgpu::BufferUsage::Indirect);
+
+    // Now we allocate and populate host-side batch data to be copied to the GPU.
+    for (Pass& pass : passes) {
+        // We use std::malloc here because it guarantees maximal scalar alignment.
+        pass.batchData = {std::malloc(pass.batchDataSize), std::free};
+        memset(pass.batchData.get(), 0, pass.batchDataSize);
+        uint8_t* batchData = static_cast<uint8_t*>(pass.batchData.get());
+        for (Batch& batch : pass.batches) {
+            batch.batchInfo = new (&batchData[batch.dataBufferOffset]) BatchInfo();
+            batch.batchInfo->numIndexBufferElements = batch.numIndexBufferElements;
+            batch.batchInfo->numDraws = static_cast<uint32_t>(batch.metadata->draws.size());
+            batch.batchInfo->flags = pass.flags;
+
+            uint32_t* indirectOffsets = reinterpret_cast<uint32_t*>(batch.batchInfo + 1);
+            uint64_t outputParamsOffset = batch.outputParamsOffset;
+            for (auto& draw : batch.metadata->draws) {
+                // The shader uses this to index an array of u32, hence the division by 4 bytes.
+                *indirectOffsets++ =
+                    static_cast<uint32_t>((draw.inputBufferOffset - batch.inputIndirectOffset) / 4);
+
+                draw.cmd->indirectBuffer = outputParamsBuffer.GetBuffer();
+                draw.cmd->indirectOffset = outputParamsOffset;
+                if (pass.flags & kIndexedDraw) {
+                    outputParamsOffset += kDrawIndexedIndirectSize;
+                } else {
+                    outputParamsOffset += kDrawIndirectSize;
+                }
+            }
+        }
+    }
+
+    ComputePipelineBase* pipeline;
+    DAWN_TRY_ASSIGN(pipeline, GetOrCreateRenderValidationPipeline(device));
+
+    Ref<BindGroupLayoutBase> layout;
+    DAWN_TRY_ASSIGN(layout, pipeline->GetBindGroupLayout(0));
+
+    BindGroupEntry bindings[3];
+    BindGroupEntry& bufferDataBinding = bindings[0];
+    bufferDataBinding.binding = 0;
+    bufferDataBinding.buffer = batchDataBuffer.GetBuffer();
+
+    BindGroupEntry& inputIndirectBinding = bindings[1];
+    inputIndirectBinding.binding = 1;
+
+    BindGroupEntry& outputParamsBinding = bindings[2];
+    outputParamsBinding.binding = 2;
+    outputParamsBinding.buffer = outputParamsBuffer.GetBuffer();
+
+    BindGroupDescriptor bindGroupDescriptor = {};
+    bindGroupDescriptor.layout = layout.Get();
+    bindGroupDescriptor.entryCount = 3;
+    bindGroupDescriptor.entries = bindings;
+
+    // Finally, we can now encode our validation and duplication passes. Each pass first does a
+    // two WriteBuffer to get batch and pass data over to the GPU, followed by a single compute
+    // pass. The compute pass encodes a separate SetBindGroup and Dispatch command for each
+    // batch.
+    for (const Pass& pass : passes) {
+        commandEncoder->APIWriteBuffer(batchDataBuffer.GetBuffer(), 0,
+                                       static_cast<const uint8_t*>(pass.batchData.get()),
+                                       pass.batchDataSize);
+
+        Ref<ComputePassEncoder> passEncoder = commandEncoder->BeginComputePass();
+        passEncoder->APISetPipeline(pipeline);
+
+        inputIndirectBinding.buffer = pass.inputIndirectBuffer;
+
+        for (const Batch& batch : pass.batches) {
+            bufferDataBinding.offset = batch.dataBufferOffset;
+            bufferDataBinding.size = batch.dataSize;
+            inputIndirectBinding.offset = batch.inputIndirectOffset;
+            inputIndirectBinding.size = batch.inputIndirectSize;
+            outputParamsBinding.offset = batch.outputParamsOffset;
+            outputParamsBinding.size = batch.outputParamsSize;
+
+            Ref<BindGroupBase> bindGroup;
+            DAWN_TRY_ASSIGN(bindGroup, device->CreateBindGroup(&bindGroupDescriptor));
+
+            const uint32_t numDrawsRoundedUp =
+                (batch.batchInfo->numDraws + kWorkgroupSize - 1) / kWorkgroupSize;
+            passEncoder->APISetBindGroup(0, bindGroup.Get());
+            passEncoder->APIDispatchWorkgroups(numDrawsRoundedUp);
+        }
+
+        passEncoder->APIEnd();
+    }
+
+    return {};
+}
+
 }  // namespace dawn::native
diff --git a/src/dawn/native/IndirectDrawValidationEncoder.h b/src/dawn/native/IndirectDrawValidationEncoder.h
index de246c2..21946de 100644
--- a/src/dawn/native/IndirectDrawValidationEncoder.h
+++ b/src/dawn/native/IndirectDrawValidationEncoder.h
@@ -20,20 +20,20 @@
 
 namespace dawn::native {
 
-    class CommandEncoder;
-    struct CombinedLimits;
-    class DeviceBase;
-    class RenderPassResourceUsageTracker;
+class CommandEncoder;
+struct CombinedLimits;
+class DeviceBase;
+class RenderPassResourceUsageTracker;
 
-    // The maximum number of draws call we can fit into a single validation batch. This is
-    // essentially limited by the number of indirect parameter blocks that can fit into the maximum
-    // allowed storage binding size (with the base limits, it is about 6.7M).
-    uint32_t ComputeMaxDrawCallsPerIndirectValidationBatch(const CombinedLimits& limits);
+// The maximum number of draws call we can fit into a single validation batch. This is
+// essentially limited by the number of indirect parameter blocks that can fit into the maximum
+// allowed storage binding size (with the base limits, it is about 6.7M).
+uint32_t ComputeMaxDrawCallsPerIndirectValidationBatch(const CombinedLimits& limits);
 
-    MaybeError EncodeIndirectDrawValidationCommands(DeviceBase* device,
-                                                    CommandEncoder* commandEncoder,
-                                                    RenderPassResourceUsageTracker* usageTracker,
-                                                    IndirectDrawMetadata* indirectDrawMetadata);
+MaybeError EncodeIndirectDrawValidationCommands(DeviceBase* device,
+                                                CommandEncoder* commandEncoder,
+                                                RenderPassResourceUsageTracker* usageTracker,
+                                                IndirectDrawMetadata* indirectDrawMetadata);
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/Instance.cpp b/src/dawn/native/Instance.cpp
index 6c66621..34436b2 100644
--- a/src/dawn/native/Instance.cpp
+++ b/src/dawn/native/Instance.cpp
@@ -28,433 +28,432 @@
 
 // For SwiftShader fallback
 #if defined(DAWN_ENABLE_BACKEND_VULKAN)
-#    include "dawn/native/VulkanBackend.h"
+#include "dawn/native/VulkanBackend.h"
 #endif  // defined(DAWN_ENABLE_BACKEND_VULKAN)
 
 #if defined(DAWN_USE_X11)
-#    include "dawn/native/XlibXcbFunctions.h"
+#include "dawn/native/XlibXcbFunctions.h"
 #endif  // defined(DAWN_USE_X11)
 
 #include <optional>
 
 namespace dawn::native {
 
-    // Forward definitions of each backend's "Connect" function that creates new BackendConnection.
-    // Conditionally compiled declarations are used to avoid using static constructors instead.
+// Forward definitions of each backend's "Connect" function that creates new BackendConnection.
+// Conditionally compiled declarations are used to avoid using static constructors instead.
 #if defined(DAWN_ENABLE_BACKEND_D3D12)
-    namespace d3d12 {
-        BackendConnection* Connect(InstanceBase* instance);
-    }
+namespace d3d12 {
+BackendConnection* Connect(InstanceBase* instance);
+}
 #endif  // defined(DAWN_ENABLE_BACKEND_D3D12)
 #if defined(DAWN_ENABLE_BACKEND_METAL)
-    namespace metal {
-        BackendConnection* Connect(InstanceBase* instance);
-    }
+namespace metal {
+BackendConnection* Connect(InstanceBase* instance);
+}
 #endif  // defined(DAWN_ENABLE_BACKEND_METAL)
 #if defined(DAWN_ENABLE_BACKEND_NULL)
-    namespace null {
-        BackendConnection* Connect(InstanceBase* instance);
-    }
+namespace null {
+BackendConnection* Connect(InstanceBase* instance);
+}
 #endif  // defined(DAWN_ENABLE_BACKEND_NULL)
 #if defined(DAWN_ENABLE_BACKEND_OPENGL)
-    namespace opengl {
-        BackendConnection* Connect(InstanceBase* instance, wgpu::BackendType backendType);
-    }
+namespace opengl {
+BackendConnection* Connect(InstanceBase* instance, wgpu::BackendType backendType);
+}
 #endif  // defined(DAWN_ENABLE_BACKEND_OPENGL)
 #if defined(DAWN_ENABLE_BACKEND_VULKAN)
-    namespace vulkan {
-        BackendConnection* Connect(InstanceBase* instance);
-    }
+namespace vulkan {
+BackendConnection* Connect(InstanceBase* instance);
+}
 #endif  // defined(DAWN_ENABLE_BACKEND_VULKAN)
 
-    namespace {
+namespace {
 
-        BackendsBitset GetEnabledBackends() {
-            BackendsBitset enabledBackends;
+BackendsBitset GetEnabledBackends() {
+    BackendsBitset enabledBackends;
 #if defined(DAWN_ENABLE_BACKEND_NULL)
-            enabledBackends.set(wgpu::BackendType::Null);
+    enabledBackends.set(wgpu::BackendType::Null);
 #endif  // defined(DAWN_ENABLE_BACKEND_NULL)
 #if defined(DAWN_ENABLE_BACKEND_D3D12)
-            enabledBackends.set(wgpu::BackendType::D3D12);
+    enabledBackends.set(wgpu::BackendType::D3D12);
 #endif  // defined(DAWN_ENABLE_BACKEND_D3D12)
 #if defined(DAWN_ENABLE_BACKEND_METAL)
-            enabledBackends.set(wgpu::BackendType::Metal);
+    enabledBackends.set(wgpu::BackendType::Metal);
 #endif  // defined(DAWN_ENABLE_BACKEND_METAL)
 #if defined(DAWN_ENABLE_BACKEND_VULKAN)
-            enabledBackends.set(wgpu::BackendType::Vulkan);
+    enabledBackends.set(wgpu::BackendType::Vulkan);
 #endif  // defined(DAWN_ENABLE_BACKEND_VULKAN)
 #if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
-            enabledBackends.set(wgpu::BackendType::OpenGL);
+    enabledBackends.set(wgpu::BackendType::OpenGL);
 #endif  // defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
 #if defined(DAWN_ENABLE_BACKEND_OPENGLES)
-            enabledBackends.set(wgpu::BackendType::OpenGLES);
+    enabledBackends.set(wgpu::BackendType::OpenGLES);
 #endif  // defined(DAWN_ENABLE_BACKEND_OPENGLES)
 
-            return enabledBackends;
-        }
+    return enabledBackends;
+}
 
-        dawn::platform::CachingInterface* GetCachingInterface(dawn::platform::Platform* platform) {
-            if (platform != nullptr) {
-                return platform->GetCachingInterface(/*fingerprint*/ nullptr,
-                                                     /*fingerprintSize*/ 0);
-            }
-            return nullptr;
-        }
-
-    }  // anonymous namespace
-
-    InstanceBase* APICreateInstance(const InstanceDescriptor* descriptor) {
-        return InstanceBase::Create().Detach();
+dawn::platform::CachingInterface* GetCachingInterface(dawn::platform::Platform* platform) {
+    if (platform != nullptr) {
+        return platform->GetCachingInterface(/*fingerprint*/ nullptr,
+                                             /*fingerprintSize*/ 0);
     }
+    return nullptr;
+}
 
-    // InstanceBase
+}  // anonymous namespace
 
-    // static
-    Ref<InstanceBase> InstanceBase::Create(const InstanceDescriptor* descriptor) {
-        Ref<InstanceBase> instance = AcquireRef(new InstanceBase);
-        static constexpr InstanceDescriptor kDefaultDesc = {};
-        if (descriptor == nullptr) {
-            descriptor = &kDefaultDesc;
-        }
-        if (instance->ConsumedError(instance->Initialize(descriptor))) {
-            return nullptr;
-        }
-        return instance;
+InstanceBase* APICreateInstance(const InstanceDescriptor* descriptor) {
+    return InstanceBase::Create().Detach();
+}
+
+// InstanceBase
+
+// static
+Ref<InstanceBase> InstanceBase::Create(const InstanceDescriptor* descriptor) {
+    Ref<InstanceBase> instance = AcquireRef(new InstanceBase);
+    static constexpr InstanceDescriptor kDefaultDesc = {};
+    if (descriptor == nullptr) {
+        descriptor = &kDefaultDesc;
     }
-
-    // TODO(crbug.com/dawn/832): make the platform an initialization parameter of the instance.
-    MaybeError InstanceBase::Initialize(const InstanceDescriptor* descriptor) {
-        DAWN_TRY(ValidateSingleSType(descriptor->nextInChain, wgpu::SType::DawnInstanceDescriptor));
-        const DawnInstanceDescriptor* dawnDesc = nullptr;
-        FindInChain(descriptor->nextInChain, &dawnDesc);
-        if (dawnDesc != nullptr) {
-            for (uint32_t i = 0; i < dawnDesc->additionalRuntimeSearchPathsCount; ++i) {
-                mRuntimeSearchPaths.push_back(dawnDesc->additionalRuntimeSearchPaths[i]);
-            }
-        }
-        // Default paths to search are next to the shared library, next to the executable, and
-        // no path (just libvulkan.so).
-        if (auto p = GetModuleDirectory()) {
-            mRuntimeSearchPaths.push_back(std::move(*p));
-        }
-        if (auto p = GetExecutableDirectory()) {
-            mRuntimeSearchPaths.push_back(std::move(*p));
-        }
-        mRuntimeSearchPaths.push_back("");
-
-        // Initialize the platform to the default for now.
-        mDefaultPlatform = std::make_unique<dawn::platform::Platform>();
-        SetPlatform(mDefaultPlatform.get());
-
-        return {};
+    if (instance->ConsumedError(instance->Initialize(descriptor))) {
+        return nullptr;
     }
+    return instance;
+}
 
-    void InstanceBase::APIRequestAdapter(const RequestAdapterOptions* options,
-                                         WGPURequestAdapterCallback callback,
-                                         void* userdata) {
-        static constexpr RequestAdapterOptions kDefaultOptions = {};
-        if (options == nullptr) {
-            options = &kDefaultOptions;
-        }
-        auto result = RequestAdapterInternal(options);
-        if (result.IsError()) {
-            auto err = result.AcquireError();
-            std::string msg = err->GetFormattedMessage();
-            // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
-            callback(WGPURequestAdapterStatus_Error, nullptr, msg.c_str(), userdata);
-        } else {
-            Ref<AdapterBase> adapter = result.AcquireSuccess();
-            // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
-            callback(WGPURequestAdapterStatus_Success, ToAPI(adapter.Detach()), nullptr, userdata);
+// TODO(crbug.com/dawn/832): make the platform an initialization parameter of the instance.
+MaybeError InstanceBase::Initialize(const InstanceDescriptor* descriptor) {
+    DAWN_TRY(ValidateSingleSType(descriptor->nextInChain, wgpu::SType::DawnInstanceDescriptor));
+    const DawnInstanceDescriptor* dawnDesc = nullptr;
+    FindInChain(descriptor->nextInChain, &dawnDesc);
+    if (dawnDesc != nullptr) {
+        for (uint32_t i = 0; i < dawnDesc->additionalRuntimeSearchPathsCount; ++i) {
+            mRuntimeSearchPaths.push_back(dawnDesc->additionalRuntimeSearchPaths[i]);
         }
     }
+    // Default paths to search are next to the shared library, next to the executable, and
+    // no path (just libvulkan.so).
+    if (auto p = GetModuleDirectory()) {
+        mRuntimeSearchPaths.push_back(std::move(*p));
+    }
+    if (auto p = GetExecutableDirectory()) {
+        mRuntimeSearchPaths.push_back(std::move(*p));
+    }
+    mRuntimeSearchPaths.push_back("");
 
-    ResultOrError<Ref<AdapterBase>> InstanceBase::RequestAdapterInternal(
-        const RequestAdapterOptions* options) {
-        ASSERT(options != nullptr);
-        if (options->forceFallbackAdapter) {
+    // Initialize the platform to the default for now.
+    mDefaultPlatform = std::make_unique<dawn::platform::Platform>();
+    SetPlatform(mDefaultPlatform.get());
+
+    return {};
+}
+
+void InstanceBase::APIRequestAdapter(const RequestAdapterOptions* options,
+                                     WGPURequestAdapterCallback callback,
+                                     void* userdata) {
+    static constexpr RequestAdapterOptions kDefaultOptions = {};
+    if (options == nullptr) {
+        options = &kDefaultOptions;
+    }
+    auto result = RequestAdapterInternal(options);
+    if (result.IsError()) {
+        auto err = result.AcquireError();
+        std::string msg = err->GetFormattedMessage();
+        // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+        callback(WGPURequestAdapterStatus_Error, nullptr, msg.c_str(), userdata);
+    } else {
+        Ref<AdapterBase> adapter = result.AcquireSuccess();
+        // TODO(crbug.com/dawn/1122): Call callbacks only on wgpuInstanceProcessEvents
+        callback(WGPURequestAdapterStatus_Success, ToAPI(adapter.Detach()), nullptr, userdata);
+    }
+}
+
+ResultOrError<Ref<AdapterBase>> InstanceBase::RequestAdapterInternal(
+    const RequestAdapterOptions* options) {
+    ASSERT(options != nullptr);
+    if (options->forceFallbackAdapter) {
 #if defined(DAWN_ENABLE_BACKEND_VULKAN)
-            if (GetEnabledBackends()[wgpu::BackendType::Vulkan]) {
-                dawn_native::vulkan::AdapterDiscoveryOptions vulkanOptions;
-                vulkanOptions.forceSwiftShader = true;
-                DAWN_TRY(DiscoverAdaptersInternal(&vulkanOptions));
-            }
+        if (GetEnabledBackends()[wgpu::BackendType::Vulkan]) {
+            dawn_native::vulkan::AdapterDiscoveryOptions vulkanOptions;
+            vulkanOptions.forceSwiftShader = true;
+            DAWN_TRY(DiscoverAdaptersInternal(&vulkanOptions));
+        }
 #else
-            return Ref<AdapterBase>(nullptr);
-#endif  // defined(DAWN_ENABLE_BACKEND_VULKAN)
-        } else {
-            DiscoverDefaultAdapters();
-        }
-
-        wgpu::AdapterType preferredType;
-        switch (options->powerPreference) {
-            case wgpu::PowerPreference::LowPower:
-                preferredType = wgpu::AdapterType::IntegratedGPU;
-                break;
-            case wgpu::PowerPreference::Undefined:
-            case wgpu::PowerPreference::HighPerformance:
-                preferredType = wgpu::AdapterType::DiscreteGPU;
-                break;
-        }
-
-        std::optional<size_t> discreteGPUAdapterIndex;
-        std::optional<size_t> integratedGPUAdapterIndex;
-        std::optional<size_t> cpuAdapterIndex;
-        std::optional<size_t> unknownAdapterIndex;
-
-        for (size_t i = 0; i < mAdapters.size(); ++i) {
-            AdapterProperties properties;
-            mAdapters[i]->APIGetProperties(&properties);
-
-            if (options->forceFallbackAdapter) {
-                if (!gpu_info::IsSwiftshader(properties.vendorID, properties.deviceID)) {
-                    continue;
-                }
-                return mAdapters[i];
-            }
-            if (properties.adapterType == preferredType) {
-                return mAdapters[i];
-            }
-            switch (properties.adapterType) {
-                case wgpu::AdapterType::DiscreteGPU:
-                    discreteGPUAdapterIndex = i;
-                    break;
-                case wgpu::AdapterType::IntegratedGPU:
-                    integratedGPUAdapterIndex = i;
-                    break;
-                case wgpu::AdapterType::CPU:
-                    cpuAdapterIndex = i;
-                    break;
-                case wgpu::AdapterType::Unknown:
-                    unknownAdapterIndex = i;
-                    break;
-            }
-        }
-
-        // For now, we always prefer the discrete GPU
-        if (discreteGPUAdapterIndex) {
-            return mAdapters[*discreteGPUAdapterIndex];
-        }
-        if (integratedGPUAdapterIndex) {
-            return mAdapters[*integratedGPUAdapterIndex];
-        }
-        if (cpuAdapterIndex) {
-            return mAdapters[*cpuAdapterIndex];
-        }
-        if (unknownAdapterIndex) {
-            return mAdapters[*unknownAdapterIndex];
-        }
-
         return Ref<AdapterBase>(nullptr);
-    }
-
-    void InstanceBase::DiscoverDefaultAdapters() {
-        for (wgpu::BackendType b : IterateBitSet(GetEnabledBackends())) {
-            EnsureBackendConnection(b);
-        }
-
-        if (mDiscoveredDefaultAdapters) {
-            return;
-        }
-
-        // Query and merge all default adapters for all backends
-        for (std::unique_ptr<BackendConnection>& backend : mBackends) {
-            std::vector<Ref<AdapterBase>> backendAdapters = backend->DiscoverDefaultAdapters();
-
-            for (Ref<AdapterBase>& adapter : backendAdapters) {
-                ASSERT(adapter->GetBackendType() == backend->GetType());
-                ASSERT(adapter->GetInstance() == this);
-                mAdapters.push_back(std::move(adapter));
-            }
-        }
-
-        mDiscoveredDefaultAdapters = true;
-    }
-
-    // This is just a wrapper around the real logic that uses Error.h error handling.
-    bool InstanceBase::DiscoverAdapters(const AdapterDiscoveryOptionsBase* options) {
-        return !ConsumedError(DiscoverAdaptersInternal(options));
-    }
-
-    const ToggleInfo* InstanceBase::GetToggleInfo(const char* toggleName) {
-        return mTogglesInfo.GetToggleInfo(toggleName);
-    }
-
-    Toggle InstanceBase::ToggleNameToEnum(const char* toggleName) {
-        return mTogglesInfo.ToggleNameToEnum(toggleName);
-    }
-
-    const FeatureInfo* InstanceBase::GetFeatureInfo(wgpu::FeatureName feature) {
-        return mFeaturesInfo.GetFeatureInfo(feature);
-    }
-
-    const std::vector<Ref<AdapterBase>>& InstanceBase::GetAdapters() const {
-        return mAdapters;
-    }
-
-    void InstanceBase::EnsureBackendConnection(wgpu::BackendType backendType) {
-        if (mBackendsConnected[backendType]) {
-            return;
-        }
-
-        auto Register = [this](BackendConnection* connection, wgpu::BackendType expectedType) {
-            if (connection != nullptr) {
-                ASSERT(connection->GetType() == expectedType);
-                ASSERT(connection->GetInstance() == this);
-                mBackends.push_back(std::unique_ptr<BackendConnection>(connection));
-            }
-        };
-
-        switch (backendType) {
-#if defined(DAWN_ENABLE_BACKEND_NULL)
-            case wgpu::BackendType::Null:
-                Register(null::Connect(this), wgpu::BackendType::Null);
-                break;
-#endif  // defined(DAWN_ENABLE_BACKEND_NULL)
-
-#if defined(DAWN_ENABLE_BACKEND_D3D12)
-            case wgpu::BackendType::D3D12:
-                Register(d3d12::Connect(this), wgpu::BackendType::D3D12);
-                break;
-#endif  // defined(DAWN_ENABLE_BACKEND_D3D12)
-
-#if defined(DAWN_ENABLE_BACKEND_METAL)
-            case wgpu::BackendType::Metal:
-                Register(metal::Connect(this), wgpu::BackendType::Metal);
-                break;
-#endif  // defined(DAWN_ENABLE_BACKEND_METAL)
-
-#if defined(DAWN_ENABLE_BACKEND_VULKAN)
-            case wgpu::BackendType::Vulkan:
-                Register(vulkan::Connect(this), wgpu::BackendType::Vulkan);
-                break;
 #endif  // defined(DAWN_ENABLE_BACKEND_VULKAN)
-
-#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
-            case wgpu::BackendType::OpenGL:
-                Register(opengl::Connect(this, wgpu::BackendType::OpenGL),
-                         wgpu::BackendType::OpenGL);
-                break;
-#endif  // defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
-
-#if defined(DAWN_ENABLE_BACKEND_OPENGLES)
-            case wgpu::BackendType::OpenGLES:
-                Register(opengl::Connect(this, wgpu::BackendType::OpenGLES),
-                         wgpu::BackendType::OpenGLES);
-                break;
-#endif  // defined(DAWN_ENABLE_BACKEND_OPENGLES)
-
-            default:
-                UNREACHABLE();
-        }
-
-        mBackendsConnected.set(backendType);
+    } else {
+        DiscoverDefaultAdapters();
     }
 
-    MaybeError InstanceBase::DiscoverAdaptersInternal(const AdapterDiscoveryOptionsBase* options) {
-        wgpu::BackendType backendType = static_cast<wgpu::BackendType>(options->backendType);
-        DAWN_TRY(ValidateBackendType(backendType));
+    wgpu::AdapterType preferredType;
+    switch (options->powerPreference) {
+        case wgpu::PowerPreference::LowPower:
+            preferredType = wgpu::AdapterType::IntegratedGPU;
+            break;
+        case wgpu::PowerPreference::Undefined:
+        case wgpu::PowerPreference::HighPerformance:
+            preferredType = wgpu::AdapterType::DiscreteGPU;
+            break;
+    }
 
-        if (!GetEnabledBackends()[backendType]) {
-            return DAWN_FORMAT_VALIDATION_ERROR("%s not supported.", backendType);
-        }
+    std::optional<size_t> discreteGPUAdapterIndex;
+    std::optional<size_t> integratedGPUAdapterIndex;
+    std::optional<size_t> cpuAdapterIndex;
+    std::optional<size_t> unknownAdapterIndex;
 
-        EnsureBackendConnection(backendType);
+    for (size_t i = 0; i < mAdapters.size(); ++i) {
+        AdapterProperties properties;
+        mAdapters[i]->APIGetProperties(&properties);
 
-        bool foundBackend = false;
-        for (std::unique_ptr<BackendConnection>& backend : mBackends) {
-            if (backend->GetType() != backendType) {
+        if (options->forceFallbackAdapter) {
+            if (!gpu_info::IsSwiftshader(properties.vendorID, properties.deviceID)) {
                 continue;
             }
-            foundBackend = true;
-
-            std::vector<Ref<AdapterBase>> newAdapters;
-            DAWN_TRY_ASSIGN(newAdapters, backend->DiscoverAdapters(options));
-
-            for (Ref<AdapterBase>& adapter : newAdapters) {
-                ASSERT(adapter->GetBackendType() == backend->GetType());
-                ASSERT(adapter->GetInstance() == this);
-                mAdapters.push_back(std::move(adapter));
-            }
+            return mAdapters[i];
         }
-
-        DAWN_INVALID_IF(!foundBackend, "%s not available.", backendType);
-        return {};
-    }
-
-    bool InstanceBase::ConsumedError(MaybeError maybeError) {
-        if (maybeError.IsError()) {
-            std::unique_ptr<ErrorData> error = maybeError.AcquireError();
-
-            ASSERT(error != nullptr);
-            dawn::ErrorLog() << error->GetFormattedMessage();
-            return true;
+        if (properties.adapterType == preferredType) {
+            return mAdapters[i];
         }
-        return false;
-    }
-
-    bool InstanceBase::IsBackendValidationEnabled() const {
-        return mBackendValidationLevel != BackendValidationLevel::Disabled;
-    }
-
-    void InstanceBase::SetBackendValidationLevel(BackendValidationLevel level) {
-        mBackendValidationLevel = level;
-    }
-
-    BackendValidationLevel InstanceBase::GetBackendValidationLevel() const {
-        return mBackendValidationLevel;
-    }
-
-    void InstanceBase::EnableBeginCaptureOnStartup(bool beginCaptureOnStartup) {
-        mBeginCaptureOnStartup = beginCaptureOnStartup;
-    }
-
-    bool InstanceBase::IsBeginCaptureOnStartupEnabled() const {
-        return mBeginCaptureOnStartup;
-    }
-
-    void InstanceBase::SetPlatform(dawn::platform::Platform* platform) {
-        if (platform == nullptr) {
-            mPlatform = mDefaultPlatform.get();
-        } else {
-            mPlatform = platform;
+        switch (properties.adapterType) {
+            case wgpu::AdapterType::DiscreteGPU:
+                discreteGPUAdapterIndex = i;
+                break;
+            case wgpu::AdapterType::IntegratedGPU:
+                integratedGPUAdapterIndex = i;
+                break;
+            case wgpu::AdapterType::CPU:
+                cpuAdapterIndex = i;
+                break;
+            case wgpu::AdapterType::Unknown:
+                unknownAdapterIndex = i;
+                break;
         }
-        mBlobCache = std::make_unique<BlobCache>(GetCachingInterface(platform));
     }
 
-    void InstanceBase::SetPlatformForTesting(dawn::platform::Platform* platform) {
-        SetPlatform(platform);
+    // For now, we always prefer the discrete GPU
+    if (discreteGPUAdapterIndex) {
+        return mAdapters[*discreteGPUAdapterIndex];
+    }
+    if (integratedGPUAdapterIndex) {
+        return mAdapters[*integratedGPUAdapterIndex];
+    }
+    if (cpuAdapterIndex) {
+        return mAdapters[*cpuAdapterIndex];
+    }
+    if (unknownAdapterIndex) {
+        return mAdapters[*unknownAdapterIndex];
     }
 
-    dawn::platform::Platform* InstanceBase::GetPlatform() {
-        return mPlatform;
+    return Ref<AdapterBase>(nullptr);
+}
+
+void InstanceBase::DiscoverDefaultAdapters() {
+    for (wgpu::BackendType b : IterateBitSet(GetEnabledBackends())) {
+        EnsureBackendConnection(b);
     }
 
-    BlobCache* InstanceBase::GetBlobCache() {
-        return mBlobCache.get();
+    if (mDiscoveredDefaultAdapters) {
+        return;
     }
 
-    const std::vector<std::string>& InstanceBase::GetRuntimeSearchPaths() const {
-        return mRuntimeSearchPaths;
+    // Query and merge all default adapters for all backends
+    for (std::unique_ptr<BackendConnection>& backend : mBackends) {
+        std::vector<Ref<AdapterBase>> backendAdapters = backend->DiscoverDefaultAdapters();
+
+        for (Ref<AdapterBase>& adapter : backendAdapters) {
+            ASSERT(adapter->GetBackendType() == backend->GetType());
+            ASSERT(adapter->GetInstance() == this);
+            mAdapters.push_back(std::move(adapter));
+        }
     }
 
-    const XlibXcbFunctions* InstanceBase::GetOrCreateXlibXcbFunctions() {
+    mDiscoveredDefaultAdapters = true;
+}
+
+// This is just a wrapper around the real logic that uses Error.h error handling.
+bool InstanceBase::DiscoverAdapters(const AdapterDiscoveryOptionsBase* options) {
+    return !ConsumedError(DiscoverAdaptersInternal(options));
+}
+
+const ToggleInfo* InstanceBase::GetToggleInfo(const char* toggleName) {
+    return mTogglesInfo.GetToggleInfo(toggleName);
+}
+
+Toggle InstanceBase::ToggleNameToEnum(const char* toggleName) {
+    return mTogglesInfo.ToggleNameToEnum(toggleName);
+}
+
+const FeatureInfo* InstanceBase::GetFeatureInfo(wgpu::FeatureName feature) {
+    return mFeaturesInfo.GetFeatureInfo(feature);
+}
+
+const std::vector<Ref<AdapterBase>>& InstanceBase::GetAdapters() const {
+    return mAdapters;
+}
+
+void InstanceBase::EnsureBackendConnection(wgpu::BackendType backendType) {
+    if (mBackendsConnected[backendType]) {
+        return;
+    }
+
+    auto Register = [this](BackendConnection* connection, wgpu::BackendType expectedType) {
+        if (connection != nullptr) {
+            ASSERT(connection->GetType() == expectedType);
+            ASSERT(connection->GetInstance() == this);
+            mBackends.push_back(std::unique_ptr<BackendConnection>(connection));
+        }
+    };
+
+    switch (backendType) {
+#if defined(DAWN_ENABLE_BACKEND_NULL)
+        case wgpu::BackendType::Null:
+            Register(null::Connect(this), wgpu::BackendType::Null);
+            break;
+#endif  // defined(DAWN_ENABLE_BACKEND_NULL)
+
+#if defined(DAWN_ENABLE_BACKEND_D3D12)
+        case wgpu::BackendType::D3D12:
+            Register(d3d12::Connect(this), wgpu::BackendType::D3D12);
+            break;
+#endif  // defined(DAWN_ENABLE_BACKEND_D3D12)
+
+#if defined(DAWN_ENABLE_BACKEND_METAL)
+        case wgpu::BackendType::Metal:
+            Register(metal::Connect(this), wgpu::BackendType::Metal);
+            break;
+#endif  // defined(DAWN_ENABLE_BACKEND_METAL)
+
+#if defined(DAWN_ENABLE_BACKEND_VULKAN)
+        case wgpu::BackendType::Vulkan:
+            Register(vulkan::Connect(this), wgpu::BackendType::Vulkan);
+            break;
+#endif  // defined(DAWN_ENABLE_BACKEND_VULKAN)
+
+#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
+        case wgpu::BackendType::OpenGL:
+            Register(opengl::Connect(this, wgpu::BackendType::OpenGL), wgpu::BackendType::OpenGL);
+            break;
+#endif  // defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
+
+#if defined(DAWN_ENABLE_BACKEND_OPENGLES)
+        case wgpu::BackendType::OpenGLES:
+            Register(opengl::Connect(this, wgpu::BackendType::OpenGLES),
+                     wgpu::BackendType::OpenGLES);
+            break;
+#endif  // defined(DAWN_ENABLE_BACKEND_OPENGLES)
+
+        default:
+            UNREACHABLE();
+    }
+
+    mBackendsConnected.set(backendType);
+}
+
+MaybeError InstanceBase::DiscoverAdaptersInternal(const AdapterDiscoveryOptionsBase* options) {
+    wgpu::BackendType backendType = static_cast<wgpu::BackendType>(options->backendType);
+    DAWN_TRY(ValidateBackendType(backendType));
+
+    if (!GetEnabledBackends()[backendType]) {
+        return DAWN_FORMAT_VALIDATION_ERROR("%s not supported.", backendType);
+    }
+
+    EnsureBackendConnection(backendType);
+
+    bool foundBackend = false;
+    for (std::unique_ptr<BackendConnection>& backend : mBackends) {
+        if (backend->GetType() != backendType) {
+            continue;
+        }
+        foundBackend = true;
+
+        std::vector<Ref<AdapterBase>> newAdapters;
+        DAWN_TRY_ASSIGN(newAdapters, backend->DiscoverAdapters(options));
+
+        for (Ref<AdapterBase>& adapter : newAdapters) {
+            ASSERT(adapter->GetBackendType() == backend->GetType());
+            ASSERT(adapter->GetInstance() == this);
+            mAdapters.push_back(std::move(adapter));
+        }
+    }
+
+    DAWN_INVALID_IF(!foundBackend, "%s not available.", backendType);
+    return {};
+}
+
+bool InstanceBase::ConsumedError(MaybeError maybeError) {
+    if (maybeError.IsError()) {
+        std::unique_ptr<ErrorData> error = maybeError.AcquireError();
+
+        ASSERT(error != nullptr);
+        dawn::ErrorLog() << error->GetFormattedMessage();
+        return true;
+    }
+    return false;
+}
+
+bool InstanceBase::IsBackendValidationEnabled() const {
+    return mBackendValidationLevel != BackendValidationLevel::Disabled;
+}
+
+void InstanceBase::SetBackendValidationLevel(BackendValidationLevel level) {
+    mBackendValidationLevel = level;
+}
+
+BackendValidationLevel InstanceBase::GetBackendValidationLevel() const {
+    return mBackendValidationLevel;
+}
+
+void InstanceBase::EnableBeginCaptureOnStartup(bool beginCaptureOnStartup) {
+    mBeginCaptureOnStartup = beginCaptureOnStartup;
+}
+
+bool InstanceBase::IsBeginCaptureOnStartupEnabled() const {
+    return mBeginCaptureOnStartup;
+}
+
+void InstanceBase::SetPlatform(dawn::platform::Platform* platform) {
+    if (platform == nullptr) {
+        mPlatform = mDefaultPlatform.get();
+    } else {
+        mPlatform = platform;
+    }
+    mBlobCache = std::make_unique<BlobCache>(GetCachingInterface(platform));
+}
+
+void InstanceBase::SetPlatformForTesting(dawn::platform::Platform* platform) {
+    SetPlatform(platform);
+}
+
+dawn::platform::Platform* InstanceBase::GetPlatform() {
+    return mPlatform;
+}
+
+BlobCache* InstanceBase::GetBlobCache() {
+    return mBlobCache.get();
+}
+
+const std::vector<std::string>& InstanceBase::GetRuntimeSearchPaths() const {
+    return mRuntimeSearchPaths;
+}
+
+const XlibXcbFunctions* InstanceBase::GetOrCreateXlibXcbFunctions() {
 #if defined(DAWN_USE_X11)
-        if (mXlibXcbFunctions == nullptr) {
-            mXlibXcbFunctions = std::make_unique<XlibXcbFunctions>();
-        }
-        return mXlibXcbFunctions.get();
+    if (mXlibXcbFunctions == nullptr) {
+        mXlibXcbFunctions = std::make_unique<XlibXcbFunctions>();
+    }
+    return mXlibXcbFunctions.get();
 #else
-        UNREACHABLE();
+    UNREACHABLE();
 #endif  // defined(DAWN_USE_X11)
+}
+
+Surface* InstanceBase::APICreateSurface(const SurfaceDescriptor* descriptor) {
+    if (ConsumedError(ValidateSurfaceDescriptor(this, descriptor))) {
+        return nullptr;
     }
 
-    Surface* InstanceBase::APICreateSurface(const SurfaceDescriptor* descriptor) {
-        if (ConsumedError(ValidateSurfaceDescriptor(this, descriptor))) {
-            return nullptr;
-        }
-
-        return new Surface(this, descriptor);
-    }
+    return new Surface(this, descriptor);
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/Instance.h b/src/dawn/native/Instance.h
index 73f1084..578ee69 100644
--- a/src/dawn/native/Instance.h
+++ b/src/dawn/native/Instance.h
@@ -31,107 +31,106 @@
 #include "dawn/native/dawn_platform.h"
 
 namespace dawn::platform {
-    class Platform;
+class Platform;
 }  // namespace dawn::platform
 
 namespace dawn::native {
 
-    class Surface;
-    class XlibXcbFunctions;
+class Surface;
+class XlibXcbFunctions;
 
-    using BackendsBitset = ityp::bitset<wgpu::BackendType, kEnumCount<wgpu::BackendType>>;
+using BackendsBitset = ityp::bitset<wgpu::BackendType, kEnumCount<wgpu::BackendType>>;
 
-    InstanceBase* APICreateInstance(const InstanceDescriptor* descriptor);
+InstanceBase* APICreateInstance(const InstanceDescriptor* descriptor);
 
-    // This is called InstanceBase for consistency across the frontend, even if the backends don't
-    // specialize this class.
-    class InstanceBase final : public RefCounted {
-      public:
-        static Ref<InstanceBase> Create(const InstanceDescriptor* descriptor = nullptr);
+// This is called InstanceBase for consistency across the frontend, even if the backends don't
+// specialize this class.
+class InstanceBase final : public RefCounted {
+  public:
+    static Ref<InstanceBase> Create(const InstanceDescriptor* descriptor = nullptr);
 
-        void APIRequestAdapter(const RequestAdapterOptions* options,
-                               WGPURequestAdapterCallback callback,
-                               void* userdata);
+    void APIRequestAdapter(const RequestAdapterOptions* options,
+                           WGPURequestAdapterCallback callback,
+                           void* userdata);
 
-        void DiscoverDefaultAdapters();
-        bool DiscoverAdapters(const AdapterDiscoveryOptionsBase* options);
+    void DiscoverDefaultAdapters();
+    bool DiscoverAdapters(const AdapterDiscoveryOptionsBase* options);
 
-        const std::vector<Ref<AdapterBase>>& GetAdapters() const;
+    const std::vector<Ref<AdapterBase>>& GetAdapters() const;
 
-        // Used to handle error that happen up to device creation.
-        bool ConsumedError(MaybeError maybeError);
+    // Used to handle error that happen up to device creation.
+    bool ConsumedError(MaybeError maybeError);
 
-        // Used to query the details of a toggle. Return nullptr if toggleName is not a valid name
-        // of a toggle supported in Dawn.
-        const ToggleInfo* GetToggleInfo(const char* toggleName);
-        Toggle ToggleNameToEnum(const char* toggleName);
+    // Used to query the details of a toggle. Return nullptr if toggleName is not a valid name
+    // of a toggle supported in Dawn.
+    const ToggleInfo* GetToggleInfo(const char* toggleName);
+    Toggle ToggleNameToEnum(const char* toggleName);
 
-        // Used to query the details of an feature. Return nullptr if featureName is not a valid
-        // name of an feature supported in Dawn.
-        const FeatureInfo* GetFeatureInfo(wgpu::FeatureName feature);
+    // Used to query the details of an feature. Return nullptr if featureName is not a valid
+    // name of an feature supported in Dawn.
+    const FeatureInfo* GetFeatureInfo(wgpu::FeatureName feature);
 
-        bool IsBackendValidationEnabled() const;
-        void SetBackendValidationLevel(BackendValidationLevel level);
-        BackendValidationLevel GetBackendValidationLevel() const;
+    bool IsBackendValidationEnabled() const;
+    void SetBackendValidationLevel(BackendValidationLevel level);
+    BackendValidationLevel GetBackendValidationLevel() const;
 
-        void EnableBeginCaptureOnStartup(bool beginCaptureOnStartup);
-        bool IsBeginCaptureOnStartupEnabled() const;
+    void EnableBeginCaptureOnStartup(bool beginCaptureOnStartup);
+    bool IsBeginCaptureOnStartupEnabled() const;
 
-        // TODO(dawn:1374): SetPlatform should become a private helper, and SetPlatformForTesting
-        // will become the NOT thread-safe testing version exposed for special testing cases.
-        void SetPlatform(dawn::platform::Platform* platform);
-        void SetPlatformForTesting(dawn::platform::Platform* platform);
-        dawn::platform::Platform* GetPlatform();
-        BlobCache* GetBlobCache();
+    // TODO(dawn:1374): SetPlatform should become a private helper, and SetPlatformForTesting
+    // will become the NOT thread-safe testing version exposed for special testing cases.
+    void SetPlatform(dawn::platform::Platform* platform);
+    void SetPlatformForTesting(dawn::platform::Platform* platform);
+    dawn::platform::Platform* GetPlatform();
+    BlobCache* GetBlobCache();
 
-        const std::vector<std::string>& GetRuntimeSearchPaths() const;
+    const std::vector<std::string>& GetRuntimeSearchPaths() const;
 
-        // Get backend-independent libraries that need to be loaded dynamically.
-        const XlibXcbFunctions* GetOrCreateXlibXcbFunctions();
+    // Get backend-independent libraries that need to be loaded dynamically.
+    const XlibXcbFunctions* GetOrCreateXlibXcbFunctions();
 
-        // Dawn API
-        Surface* APICreateSurface(const SurfaceDescriptor* descriptor);
+    // Dawn API
+    Surface* APICreateSurface(const SurfaceDescriptor* descriptor);
 
-      private:
-        InstanceBase() = default;
-        ~InstanceBase() = default;
+  private:
+    InstanceBase() = default;
+    ~InstanceBase() = default;
 
-        InstanceBase(const InstanceBase& other) = delete;
-        InstanceBase& operator=(const InstanceBase& other) = delete;
+    InstanceBase(const InstanceBase& other) = delete;
+    InstanceBase& operator=(const InstanceBase& other) = delete;
 
-        MaybeError Initialize(const InstanceDescriptor* descriptor);
+    MaybeError Initialize(const InstanceDescriptor* descriptor);
 
-        // Lazily creates connections to all backends that have been compiled.
-        void EnsureBackendConnection(wgpu::BackendType backendType);
+    // Lazily creates connections to all backends that have been compiled.
+    void EnsureBackendConnection(wgpu::BackendType backendType);
 
-        MaybeError DiscoverAdaptersInternal(const AdapterDiscoveryOptionsBase* options);
+    MaybeError DiscoverAdaptersInternal(const AdapterDiscoveryOptionsBase* options);
 
-        ResultOrError<Ref<AdapterBase>> RequestAdapterInternal(
-            const RequestAdapterOptions* options);
+    ResultOrError<Ref<AdapterBase>> RequestAdapterInternal(const RequestAdapterOptions* options);
 
-        std::vector<std::string> mRuntimeSearchPaths;
+    std::vector<std::string> mRuntimeSearchPaths;
 
-        BackendsBitset mBackendsConnected;
+    BackendsBitset mBackendsConnected;
 
-        bool mDiscoveredDefaultAdapters = false;
+    bool mDiscoveredDefaultAdapters = false;
 
-        bool mBeginCaptureOnStartup = false;
-        BackendValidationLevel mBackendValidationLevel = BackendValidationLevel::Disabled;
+    bool mBeginCaptureOnStartup = false;
+    BackendValidationLevel mBackendValidationLevel = BackendValidationLevel::Disabled;
 
-        dawn::platform::Platform* mPlatform = nullptr;
-        std::unique_ptr<dawn::platform::Platform> mDefaultPlatform;
-        std::unique_ptr<BlobCache> mBlobCache;
+    dawn::platform::Platform* mPlatform = nullptr;
+    std::unique_ptr<dawn::platform::Platform> mDefaultPlatform;
+    std::unique_ptr<BlobCache> mBlobCache;
 
-        std::vector<std::unique_ptr<BackendConnection>> mBackends;
-        std::vector<Ref<AdapterBase>> mAdapters;
+    std::vector<std::unique_ptr<BackendConnection>> mBackends;
+    std::vector<Ref<AdapterBase>> mAdapters;
 
-        FeaturesInfo mFeaturesInfo;
-        TogglesInfo mTogglesInfo;
+    FeaturesInfo mFeaturesInfo;
+    TogglesInfo mTogglesInfo;
 
 #if defined(DAWN_USE_X11)
-        std::unique_ptr<XlibXcbFunctions> mXlibXcbFunctions;
+    std::unique_ptr<XlibXcbFunctions> mXlibXcbFunctions;
 #endif  // defined(DAWN_USE_X11)
-    };
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/IntegerTypes.h b/src/dawn/native/IntegerTypes.h
index 221c2ea..0d5c7f8 100644
--- a/src/dawn/native/IntegerTypes.h
+++ b/src/dawn/native/IntegerTypes.h
@@ -21,55 +21,55 @@
 #include "dawn/common/TypedInteger.h"
 
 namespace dawn::native {
-    // Binding numbers in the shader and BindGroup/BindGroupLayoutDescriptors
-    using BindingNumber = TypedInteger<struct BindingNumberT, uint32_t>;
-    constexpr BindingNumber kMaxBindingNumberTyped = BindingNumber(kMaxBindingNumber);
+// Binding numbers in the shader and BindGroup/BindGroupLayoutDescriptors
+using BindingNumber = TypedInteger<struct BindingNumberT, uint32_t>;
+constexpr BindingNumber kMaxBindingNumberTyped = BindingNumber(kMaxBindingNumber);
 
-    // Binding numbers get mapped to a packed range of indices
-    using BindingIndex = TypedInteger<struct BindingIndexT, uint32_t>;
+// Binding numbers get mapped to a packed range of indices
+using BindingIndex = TypedInteger<struct BindingIndexT, uint32_t>;
 
-    using BindGroupIndex = TypedInteger<struct BindGroupIndexT, uint32_t>;
+using BindGroupIndex = TypedInteger<struct BindGroupIndexT, uint32_t>;
 
-    constexpr BindGroupIndex kMaxBindGroupsTyped = BindGroupIndex(kMaxBindGroups);
+constexpr BindGroupIndex kMaxBindGroupsTyped = BindGroupIndex(kMaxBindGroups);
 
-    using ColorAttachmentIndex = TypedInteger<struct ColorAttachmentIndexT, uint8_t>;
+using ColorAttachmentIndex = TypedInteger<struct ColorAttachmentIndexT, uint8_t>;
 
-    constexpr ColorAttachmentIndex kMaxColorAttachmentsTyped =
-        ColorAttachmentIndex(kMaxColorAttachments);
+constexpr ColorAttachmentIndex kMaxColorAttachmentsTyped =
+    ColorAttachmentIndex(kMaxColorAttachments);
 
-    using VertexBufferSlot = TypedInteger<struct VertexBufferSlotT, uint8_t>;
-    using VertexAttributeLocation = TypedInteger<struct VertexAttributeLocationT, uint8_t>;
+using VertexBufferSlot = TypedInteger<struct VertexBufferSlotT, uint8_t>;
+using VertexAttributeLocation = TypedInteger<struct VertexAttributeLocationT, uint8_t>;
 
-    constexpr VertexBufferSlot kMaxVertexBuffersTyped = VertexBufferSlot(kMaxVertexBuffers);
-    constexpr VertexAttributeLocation kMaxVertexAttributesTyped =
-        VertexAttributeLocation(kMaxVertexAttributes);
+constexpr VertexBufferSlot kMaxVertexBuffersTyped = VertexBufferSlot(kMaxVertexBuffers);
+constexpr VertexAttributeLocation kMaxVertexAttributesTyped =
+    VertexAttributeLocation(kMaxVertexAttributes);
 
-    // Serials are 64bit integers that are incremented by one each time to produce unique values.
-    // Some serials (like queue serials) are compared numerically to know which one is before
-    // another, while some serials are only checked for equality. We call serials only checked
-    // for equality IDs.
+// Serials are 64bit integers that are incremented by one each time to produce unique values.
+// Some serials (like queue serials) are compared numerically to know which one is before
+// another, while some serials are only checked for equality. We call serials only checked
+// for equality IDs.
 
-    // Buffer mapping requests are stored outside of the buffer while they are being processed and
-    // cannot be invalidated. Instead they are associated with an ID, and when a map request is
-    // finished, the mapping callback is fired only if its ID matches the ID if the last request
-    // that was sent.
-    using MapRequestID = TypedInteger<struct MapRequestIDT, uint64_t>;
+// Buffer mapping requests are stored outside of the buffer while they are being processed and
+// cannot be invalidated. Instead they are associated with an ID, and when a map request is
+// finished, the mapping callback is fired only if its ID matches the ID if the last request
+// that was sent.
+using MapRequestID = TypedInteger<struct MapRequestIDT, uint64_t>;
 
-    // The type for the WebGPU API fence serial values.
-    using FenceAPISerial = TypedInteger<struct FenceAPISerialT, uint64_t>;
+// The type for the WebGPU API fence serial values.
+using FenceAPISerial = TypedInteger<struct FenceAPISerialT, uint64_t>;
 
-    // A serial used to watch the progression of GPU execution on a queue, each time operations
-    // that need to be followed individually are scheduled for execution on a queue, the serial
-    // is incremented by one. This way to know if something is done executing, we just need to
-    // compare its serial with the currently completed serial.
-    using ExecutionSerial = TypedInteger<struct QueueSerialT, uint64_t>;
-    constexpr ExecutionSerial kMaxExecutionSerial = ExecutionSerial(~uint64_t(0));
+// A serial used to watch the progression of GPU execution on a queue, each time operations
+// that need to be followed individually are scheduled for execution on a queue, the serial
+// is incremented by one. This way to know if something is done executing, we just need to
+// compare its serial with the currently completed serial.
+using ExecutionSerial = TypedInteger<struct QueueSerialT, uint64_t>;
+constexpr ExecutionSerial kMaxExecutionSerial = ExecutionSerial(~uint64_t(0));
 
-    // An identifier that indicates which Pipeline a BindGroupLayout is compatible with. Pipelines
-    // created with a default layout will produce BindGroupLayouts with a non-zero compatibility
-    // token, which prevents them (and any BindGroups created with them) from being used with any
-    // other pipelines.
-    using PipelineCompatibilityToken = TypedInteger<struct PipelineCompatibilityTokenT, uint64_t>;
+// An identifier that indicates which Pipeline a BindGroupLayout is compatible with. Pipelines
+// created with a default layout will produce BindGroupLayouts with a non-zero compatibility
+// token, which prevents them (and any BindGroups created with them) from being used with any
+// other pipelines.
+using PipelineCompatibilityToken = TypedInteger<struct PipelineCompatibilityTokenT, uint64_t>;
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/InternalPipelineStore.cpp b/src/dawn/native/InternalPipelineStore.cpp
index 2f6f5be..bc28cb7 100644
--- a/src/dawn/native/InternalPipelineStore.cpp
+++ b/src/dawn/native/InternalPipelineStore.cpp
@@ -23,16 +23,15 @@
 
 namespace dawn::native {
 
-    class RenderPipelineBase;
-    class ShaderModuleBase;
+class RenderPipelineBase;
+class ShaderModuleBase;
 
-    InternalPipelineStore::InternalPipelineStore(DeviceBase* device)
-        : scratchStorage(device, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Storage),
-          scratchIndirectStorage(device,
-                                 wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Indirect |
-                                     wgpu::BufferUsage::Storage) {
-    }
+InternalPipelineStore::InternalPipelineStore(DeviceBase* device)
+    : scratchStorage(device, wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Storage),
+      scratchIndirectStorage(
+          device,
+          wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::Indirect | wgpu::BufferUsage::Storage) {}
 
-    InternalPipelineStore::~InternalPipelineStore() = default;
+InternalPipelineStore::~InternalPipelineStore() = default;
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/InternalPipelineStore.h b/src/dawn/native/InternalPipelineStore.h
index d23363e..3defe67 100644
--- a/src/dawn/native/InternalPipelineStore.h
+++ b/src/dawn/native/InternalPipelineStore.h
@@ -23,37 +23,36 @@
 
 namespace dawn::native {
 
-    class DeviceBase;
-    class RenderPipelineBase;
-    class ShaderModuleBase;
+class DeviceBase;
+class RenderPipelineBase;
+class ShaderModuleBase;
 
-    // Every DeviceBase owns an InternalPipelineStore. This is a general-purpose cache for
-    // long-lived objects scoped to a device and used to support arbitrary pipeline operations.
-    struct InternalPipelineStore {
-        explicit InternalPipelineStore(DeviceBase* device);
-        ~InternalPipelineStore();
+// Every DeviceBase owns an InternalPipelineStore. This is a general-purpose cache for
+// long-lived objects scoped to a device and used to support arbitrary pipeline operations.
+struct InternalPipelineStore {
+    explicit InternalPipelineStore(DeviceBase* device);
+    ~InternalPipelineStore();
 
-        std::unordered_map<wgpu::TextureFormat, Ref<RenderPipelineBase>>
-            copyTextureForBrowserPipelines;
+    std::unordered_map<wgpu::TextureFormat, Ref<RenderPipelineBase>> copyTextureForBrowserPipelines;
 
-        Ref<ShaderModuleBase> copyTextureForBrowser;
+    Ref<ShaderModuleBase> copyTextureForBrowser;
 
-        Ref<ComputePipelineBase> timestampComputePipeline;
-        Ref<ShaderModuleBase> timestampCS;
+    Ref<ComputePipelineBase> timestampComputePipeline;
+    Ref<ShaderModuleBase> timestampCS;
 
-        Ref<ShaderModuleBase> placeholderFragmentShader;
+    Ref<ShaderModuleBase> placeholderFragmentShader;
 
-        // A scratch buffer suitable for use as a copy destination and storage binding.
-        ScratchBuffer scratchStorage;
+    // A scratch buffer suitable for use as a copy destination and storage binding.
+    ScratchBuffer scratchStorage;
 
-        // A scratch buffer suitable for use as a copy destination, storage binding, and indirect
-        // buffer for indirect dispatch or draw calls.
-        ScratchBuffer scratchIndirectStorage;
+    // A scratch buffer suitable for use as a copy destination, storage binding, and indirect
+    // buffer for indirect dispatch or draw calls.
+    ScratchBuffer scratchIndirectStorage;
 
-        Ref<ComputePipelineBase> renderValidationPipeline;
-        Ref<ShaderModuleBase> renderValidationShader;
-        Ref<ComputePipelineBase> dispatchIndirectValidationPipeline;
-    };
+    Ref<ComputePipelineBase> renderValidationPipeline;
+    Ref<ShaderModuleBase> renderValidationShader;
+    Ref<ComputePipelineBase> dispatchIndirectValidationPipeline;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/Limits.cpp b/src/dawn/native/Limits.cpp
index 7812fb1..2ee5d69 100644
--- a/src/dawn/native/Limits.cpp
+++ b/src/dawn/native/Limits.cpp
@@ -69,88 +69,88 @@
     LIMITS_OTHER(X)
 
 namespace dawn::native {
-    namespace {
-        template <uint32_t A, uint32_t B>
-        constexpr void StaticAssertSame() {
-            static_assert(A == B, "Mismatching tier count in limit group.");
-        }
+namespace {
+template <uint32_t A, uint32_t B>
+constexpr void StaticAssertSame() {
+    static_assert(A == B, "Mismatching tier count in limit group.");
+}
 
-        template <uint32_t I, uint32_t... Is>
-        constexpr uint32_t ReduceSameValue(std::integer_sequence<uint32_t, I, Is...>) {
-            int unused[] = {0, (StaticAssertSame<I, Is>(), 0)...};
-            DAWN_UNUSED(unused);
-            return I;
-        }
+template <uint32_t I, uint32_t... Is>
+constexpr uint32_t ReduceSameValue(std::integer_sequence<uint32_t, I, Is...>) {
+    int unused[] = {0, (StaticAssertSame<I, Is>(), 0)...};
+    DAWN_UNUSED(unused);
+    return I;
+}
 
-        enum class LimitClass {
-            Alignment,
-            Maximum,
-        };
+enum class LimitClass {
+    Alignment,
+    Maximum,
+};
 
-        template <LimitClass C>
-        struct CheckLimit;
+template <LimitClass C>
+struct CheckLimit;
 
-        template <>
-        struct CheckLimit<LimitClass::Alignment> {
-            template <typename T>
-            static bool IsBetter(T lhs, T rhs) {
-                return lhs < rhs;
-            }
-
-            template <typename T>
-            static MaybeError Validate(T supported, T required) {
-                DAWN_INVALID_IF(IsBetter(required, supported),
-                                "Required limit (%u) is lower than the supported limit (%u).",
-                                required, supported);
-                DAWN_INVALID_IF(!IsPowerOfTwo(required),
-                                "Required limit (%u) is not a power of two.", required);
-                return {};
-            }
-        };
-
-        template <>
-        struct CheckLimit<LimitClass::Maximum> {
-            template <typename T>
-            static bool IsBetter(T lhs, T rhs) {
-                return lhs > rhs;
-            }
-
-            template <typename T>
-            static MaybeError Validate(T supported, T required) {
-                DAWN_INVALID_IF(IsBetter(required, supported),
-                                "Required limit (%u) is greater than the supported limit (%u).",
-                                required, supported);
-                return {};
-            }
-        };
-
-        template <typename T>
-        bool IsLimitUndefined(T value) {
-            static_assert(sizeof(T) != sizeof(T), "IsLimitUndefined not implemented for this type");
-            return false;
-        }
-
-        template <>
-        bool IsLimitUndefined<uint32_t>(uint32_t value) {
-            return value == wgpu::kLimitU32Undefined;
-        }
-
-        template <>
-        bool IsLimitUndefined<uint64_t>(uint64_t value) {
-            return value == wgpu::kLimitU64Undefined;
-        }
-
-    }  // namespace
-
-    void GetDefaultLimits(Limits* limits) {
-        ASSERT(limits != nullptr);
-#define X(Better, limitName, base, ...) limits->limitName = base;
-        LIMITS(X)
-#undef X
+template <>
+struct CheckLimit<LimitClass::Alignment> {
+    template <typename T>
+    static bool IsBetter(T lhs, T rhs) {
+        return lhs < rhs;
     }
 
-    Limits ReifyDefaultLimits(const Limits& limits) {
-        Limits out;
+    template <typename T>
+    static MaybeError Validate(T supported, T required) {
+        DAWN_INVALID_IF(IsBetter(required, supported),
+                        "Required limit (%u) is lower than the supported limit (%u).", required,
+                        supported);
+        DAWN_INVALID_IF(!IsPowerOfTwo(required), "Required limit (%u) is not a power of two.",
+                        required);
+        return {};
+    }
+};
+
+template <>
+struct CheckLimit<LimitClass::Maximum> {
+    template <typename T>
+    static bool IsBetter(T lhs, T rhs) {
+        return lhs > rhs;
+    }
+
+    template <typename T>
+    static MaybeError Validate(T supported, T required) {
+        DAWN_INVALID_IF(IsBetter(required, supported),
+                        "Required limit (%u) is greater than the supported limit (%u).", required,
+                        supported);
+        return {};
+    }
+};
+
+template <typename T>
+bool IsLimitUndefined(T value) {
+    static_assert(sizeof(T) != sizeof(T), "IsLimitUndefined not implemented for this type");
+    return false;
+}
+
+template <>
+bool IsLimitUndefined<uint32_t>(uint32_t value) {
+    return value == wgpu::kLimitU32Undefined;
+}
+
+template <>
+bool IsLimitUndefined<uint64_t>(uint64_t value) {
+    return value == wgpu::kLimitU64Undefined;
+}
+
+}  // namespace
+
+void GetDefaultLimits(Limits* limits) {
+    ASSERT(limits != nullptr);
+#define X(Better, limitName, base, ...) limits->limitName = base;
+    LIMITS(X)
+#undef X
+}
+
+Limits ReifyDefaultLimits(const Limits& limits) {
+    Limits out;
 #define X(Class, limitName, base, ...)                                                         \
     if (IsLimitUndefined(limits.limitName) ||                                                  \
         CheckLimit<LimitClass::Class>::IsBetter(static_cast<decltype(limits.limitName)>(base), \
@@ -160,24 +160,24 @@
     } else {                                                                                   \
         out.limitName = limits.limitName;                                                      \
     }
-        LIMITS(X)
+    LIMITS(X)
 #undef X
-        return out;
-    }
+    return out;
+}
 
-    MaybeError ValidateLimits(const Limits& supportedLimits, const Limits& requiredLimits) {
+MaybeError ValidateLimits(const Limits& supportedLimits, const Limits& requiredLimits) {
 #define X(Class, limitName, ...)                                                            \
     if (!IsLimitUndefined(requiredLimits.limitName)) {                                      \
         DAWN_TRY_CONTEXT(CheckLimit<LimitClass::Class>::Validate(supportedLimits.limitName, \
                                                                  requiredLimits.limitName), \
                          "validating " #limitName);                                         \
     }
-        LIMITS(X)
+    LIMITS(X)
 #undef X
-        return {};
-    }
+    return {};
+}
 
-    Limits ApplyLimitTiers(Limits limits) {
+Limits ApplyLimitTiers(Limits limits) {
 #define X_TIER_COUNT(Better, limitName, ...) , std::integer_sequence<uint64_t, __VA_ARGS__>{}.size()
 #define GET_TIER_COUNT(LIMIT_GROUP) \
     ReduceSameValue(std::integer_sequence<uint32_t LIMIT_GROUP(X_TIER_COUNT)>{})
@@ -205,12 +205,12 @@
         }                                                                                 \
     }
 
-        LIMITS_EACH_GROUP(X_EACH_GROUP)
+    LIMITS_EACH_GROUP(X_EACH_GROUP)
 #undef X_CHECK_BETTER
 #undef X_EACH_GROUP
 #undef GET_TIER_COUNT
 #undef X_TIER_COUNT
-        return limits;
-    }
+    return limits;
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/Limits.h b/src/dawn/native/Limits.h
index c8724e4..cc81742 100644
--- a/src/dawn/native/Limits.h
+++ b/src/dawn/native/Limits.h
@@ -20,23 +20,23 @@
 
 namespace dawn::native {
 
-    struct CombinedLimits {
-        Limits v1;
-    };
+struct CombinedLimits {
+    Limits v1;
+};
 
-    // Populate |limits| with the default limits.
-    void GetDefaultLimits(Limits* limits);
+// Populate |limits| with the default limits.
+void GetDefaultLimits(Limits* limits);
 
-    // Returns a copy of |limits| where all undefined values are replaced
-    // with their defaults. Also clamps to the defaults if the provided limits
-    // are worse.
-    Limits ReifyDefaultLimits(const Limits& limits);
+// Returns a copy of |limits| where all undefined values are replaced
+// with their defaults. Also clamps to the defaults if the provided limits
+// are worse.
+Limits ReifyDefaultLimits(const Limits& limits);
 
-    // Validate that |requiredLimits| are no better than |supportedLimits|.
-    MaybeError ValidateLimits(const Limits& supportedLimits, const Limits& requiredLimits);
+// Validate that |requiredLimits| are no better than |supportedLimits|.
+MaybeError ValidateLimits(const Limits& supportedLimits, const Limits& requiredLimits);
 
-    // Returns a copy of |limits| where limit tiers are applied.
-    Limits ApplyLimitTiers(Limits limits);
+// Returns a copy of |limits| where limit tiers are applied.
+Limits ApplyLimitTiers(Limits limits);
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/ObjectBase.cpp b/src/dawn/native/ObjectBase.cpp
index 57dd59d..d549195 100644
--- a/src/dawn/native/ObjectBase.cpp
+++ b/src/dawn/native/ObjectBase.cpp
@@ -14,77 +14,70 @@
 
 #include <mutex>
 
-#include "dawn/native/ObjectBase.h"
 #include "dawn/native/Device.h"
+#include "dawn/native/ObjectBase.h"
 
 namespace dawn::native {
 
-    static constexpr uint64_t kErrorPayload = 0;
-    static constexpr uint64_t kNotErrorPayload = 1;
+static constexpr uint64_t kErrorPayload = 0;
+static constexpr uint64_t kNotErrorPayload = 1;
 
-    ObjectBase::ObjectBase(DeviceBase* device) : RefCounted(kNotErrorPayload), mDevice(device) {
-    }
+ObjectBase::ObjectBase(DeviceBase* device) : RefCounted(kNotErrorPayload), mDevice(device) {}
 
-    ObjectBase::ObjectBase(DeviceBase* device, ErrorTag)
-        : RefCounted(kErrorPayload), mDevice(device) {
-    }
+ObjectBase::ObjectBase(DeviceBase* device, ErrorTag) : RefCounted(kErrorPayload), mDevice(device) {}
 
-    DeviceBase* ObjectBase::GetDevice() const {
-        return mDevice;
-    }
+DeviceBase* ObjectBase::GetDevice() const {
+    return mDevice;
+}
 
-    bool ObjectBase::IsError() const {
-        return GetRefCountPayload() == kErrorPayload;
-    }
+bool ObjectBase::IsError() const {
+    return GetRefCountPayload() == kErrorPayload;
+}
 
-    ApiObjectBase::ApiObjectBase(DeviceBase* device, const char* label) : ObjectBase(device) {
-        if (label) {
-            mLabel = label;
-        }
-    }
-
-    ApiObjectBase::ApiObjectBase(DeviceBase* device, ErrorTag tag) : ObjectBase(device, tag) {
-    }
-
-    ApiObjectBase::ApiObjectBase(DeviceBase* device, LabelNotImplementedTag tag)
-        : ObjectBase(device) {
-    }
-
-    ApiObjectBase::~ApiObjectBase() {
-        ASSERT(!IsAlive());
-    }
-
-    void ApiObjectBase::APISetLabel(const char* label) {
+ApiObjectBase::ApiObjectBase(DeviceBase* device, const char* label) : ObjectBase(device) {
+    if (label) {
         mLabel = label;
-        SetLabelImpl();
     }
+}
 
-    const std::string& ApiObjectBase::GetLabel() const {
-        return mLabel;
-    }
+ApiObjectBase::ApiObjectBase(DeviceBase* device, ErrorTag tag) : ObjectBase(device, tag) {}
 
-    void ApiObjectBase::SetLabelImpl() {
-    }
+ApiObjectBase::ApiObjectBase(DeviceBase* device, LabelNotImplementedTag tag) : ObjectBase(device) {}
 
-    bool ApiObjectBase::IsAlive() const {
-        return IsInList();
-    }
+ApiObjectBase::~ApiObjectBase() {
+    ASSERT(!IsAlive());
+}
 
-    void ApiObjectBase::DeleteThis() {
-        Destroy();
-        RefCounted::DeleteThis();
-    }
+void ApiObjectBase::APISetLabel(const char* label) {
+    mLabel = label;
+    SetLabelImpl();
+}
 
-    void ApiObjectBase::TrackInDevice() {
-        ASSERT(GetDevice() != nullptr);
-        GetDevice()->TrackObject(this);
-    }
+const std::string& ApiObjectBase::GetLabel() const {
+    return mLabel;
+}
 
-    void ApiObjectBase::Destroy() {
-        const std::lock_guard<std::mutex> lock(*GetDevice()->GetObjectListMutex(GetType()));
-        if (RemoveFromList()) {
-            DestroyImpl();
-        }
+void ApiObjectBase::SetLabelImpl() {}
+
+bool ApiObjectBase::IsAlive() const {
+    return IsInList();
+}
+
+void ApiObjectBase::DeleteThis() {
+    Destroy();
+    RefCounted::DeleteThis();
+}
+
+void ApiObjectBase::TrackInDevice() {
+    ASSERT(GetDevice() != nullptr);
+    GetDevice()->TrackObject(this);
+}
+
+void ApiObjectBase::Destroy() {
+    const std::lock_guard<std::mutex> lock(*GetDevice()->GetObjectListMutex(GetType()));
+    if (RemoveFromList()) {
+        DestroyImpl();
     }
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/ObjectBase.h b/src/dawn/native/ObjectBase.h
index bf3ec14..1060f01 100644
--- a/src/dawn/native/ObjectBase.h
+++ b/src/dawn/native/ObjectBase.h
@@ -23,74 +23,74 @@
 
 namespace dawn::native {
 
-    class DeviceBase;
+class DeviceBase;
 
-    class ObjectBase : public RefCounted {
-      public:
-        struct ErrorTag {};
-        static constexpr ErrorTag kError = {};
+class ObjectBase : public RefCounted {
+  public:
+    struct ErrorTag {};
+    static constexpr ErrorTag kError = {};
 
-        explicit ObjectBase(DeviceBase* device);
-        ObjectBase(DeviceBase* device, ErrorTag tag);
+    explicit ObjectBase(DeviceBase* device);
+    ObjectBase(DeviceBase* device, ErrorTag tag);
 
-        DeviceBase* GetDevice() const;
-        bool IsError() const;
+    DeviceBase* GetDevice() const;
+    bool IsError() const;
 
-      private:
-        // Pointer to owning device.
-        DeviceBase* mDevice;
-    };
+  private:
+    // Pointer to owning device.
+    DeviceBase* mDevice;
+};
 
-    class ApiObjectBase : public ObjectBase, public LinkNode<ApiObjectBase> {
-      public:
-        struct LabelNotImplementedTag {};
-        static constexpr LabelNotImplementedTag kLabelNotImplemented = {};
-        struct UntrackedByDeviceTag {};
-        static constexpr UntrackedByDeviceTag kUntrackedByDevice = {};
+class ApiObjectBase : public ObjectBase, public LinkNode<ApiObjectBase> {
+  public:
+    struct LabelNotImplementedTag {};
+    static constexpr LabelNotImplementedTag kLabelNotImplemented = {};
+    struct UntrackedByDeviceTag {};
+    static constexpr UntrackedByDeviceTag kUntrackedByDevice = {};
 
-        ApiObjectBase(DeviceBase* device, LabelNotImplementedTag tag);
-        ApiObjectBase(DeviceBase* device, const char* label);
-        ApiObjectBase(DeviceBase* device, ErrorTag tag);
-        ~ApiObjectBase() override;
+    ApiObjectBase(DeviceBase* device, LabelNotImplementedTag tag);
+    ApiObjectBase(DeviceBase* device, const char* label);
+    ApiObjectBase(DeviceBase* device, ErrorTag tag);
+    ~ApiObjectBase() override;
 
-        virtual ObjectType GetType() const = 0;
-        const std::string& GetLabel() const;
+    virtual ObjectType GetType() const = 0;
+    const std::string& GetLabel() const;
 
-        // The ApiObjectBase is considered alive if it is tracked in a respective linked list owned
-        // by the owning device.
-        bool IsAlive() const;
+    // The ApiObjectBase is considered alive if it is tracked in a respective linked list owned
+    // by the owning device.
+    bool IsAlive() const;
 
-        // This needs to be public because it can be called from the device owning the object.
-        void Destroy();
+    // This needs to be public because it can be called from the device owning the object.
+    void Destroy();
 
-        // Dawn API
-        void APISetLabel(const char* label);
+    // Dawn API
+    void APISetLabel(const char* label);
 
-      protected:
-        // Overriding of the RefCounted's DeleteThis function ensures that instances of objects
-        // always call their derived class implementation of Destroy prior to the derived
-        // class being destroyed. This guarantees that when ApiObjects' reference counts drop to 0,
-        // then the underlying backend's Destroy calls are executed. We cannot naively put the call
-        // to Destroy in the destructor of this class because it calls DestroyImpl
-        // which is a virtual function often implemented in the Derived class which would already
-        // have been destroyed by the time ApiObject's destructor is called by C++'s destruction
-        // order. Note that some classes like BindGroup may override the DeleteThis function again,
-        // and they should ensure that their overriding versions call this underlying version
-        // somewhere.
-        void DeleteThis() override;
-        void TrackInDevice();
+  protected:
+    // Overriding of the RefCounted's DeleteThis function ensures that instances of objects
+    // always call their derived class implementation of Destroy prior to the derived
+    // class being destroyed. This guarantees that when ApiObjects' reference counts drop to 0,
+    // then the underlying backend's Destroy calls are executed. We cannot naively put the call
+    // to Destroy in the destructor of this class because it calls DestroyImpl
+    // which is a virtual function often implemented in the Derived class which would already
+    // have been destroyed by the time ApiObject's destructor is called by C++'s destruction
+    // order. Note that some classes like BindGroup may override the DeleteThis function again,
+    // and they should ensure that their overriding versions call this underlying version
+    // somewhere.
+    void DeleteThis() override;
+    void TrackInDevice();
 
-        // Sub-classes may override this function multiple times. Whenever overriding this function,
-        // however, users should be sure to call their parent's version in the new override to make
-        // sure that all destroy functionality is kept. This function is guaranteed to only be
-        // called once through the exposed Destroy function.
-        virtual void DestroyImpl() = 0;
+    // Sub-classes may override this function multiple times. Whenever overriding this function,
+    // however, users should be sure to call their parent's version in the new override to make
+    // sure that all destroy functionality is kept. This function is guaranteed to only be
+    // called once through the exposed Destroy function.
+    virtual void DestroyImpl() = 0;
 
-      private:
-        virtual void SetLabelImpl();
+  private:
+    virtual void SetLabelImpl();
 
-        std::string mLabel;
-    };
+    std::string mLabel;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/ObjectContentHasher.cpp b/src/dawn/native/ObjectContentHasher.cpp
index 58c892e..caea392 100644
--- a/src/dawn/native/ObjectContentHasher.cpp
+++ b/src/dawn/native/ObjectContentHasher.cpp
@@ -16,7 +16,7 @@
 
 namespace dawn::native {
 
-    size_t ObjectContentHasher::GetContentHash() const {
-        return mContentHash;
-    }
+size_t ObjectContentHasher::GetContentHash() const {
+    return mContentHash;
+}
 }  // namespace dawn::native
diff --git a/src/dawn/native/ObjectContentHasher.h b/src/dawn/native/ObjectContentHasher.h
index 152a603..4211fb3 100644
--- a/src/dawn/native/ObjectContentHasher.h
+++ b/src/dawn/native/ObjectContentHasher.h
@@ -22,61 +22,61 @@
 
 namespace dawn::native {
 
-    // ObjectContentHasher records a hash that can be used as a key to lookup a cached object in a
-    // cache.
-    class ObjectContentHasher {
-      public:
-        // Record calls the appropriate record function based on the type.
-        template <typename T, typename... Args>
-        void Record(const T& value, const Args&... args) {
-            RecordImpl<T, Args...>::Call(this, value, args...);
-        }
+// ObjectContentHasher records a hash that can be used as a key to lookup a cached object in a
+// cache.
+class ObjectContentHasher {
+  public:
+    // Record calls the appropriate record function based on the type.
+    template <typename T, typename... Args>
+    void Record(const T& value, const Args&... args) {
+        RecordImpl<T, Args...>::Call(this, value, args...);
+    }
 
-        size_t GetContentHash() const;
+    size_t GetContentHash() const;
 
-      private:
-        template <typename T, typename... Args>
-        struct RecordImpl {
-            static constexpr void Call(ObjectContentHasher* recorder,
-                                       const T& value,
-                                       const Args&... args) {
-                HashCombine(&recorder->mContentHash, value, args...);
-            }
-        };
-
-        template <typename T>
-        struct RecordImpl<T*> {
-            static constexpr void Call(ObjectContentHasher* recorder, T* obj) {
-                // Calling Record(objPtr) is not allowed. This check exists to only prevent such
-                // mistakes.
-                static_assert(obj == nullptr);
-            }
-        };
-
-        template <typename T>
-        struct RecordImpl<std::vector<T>> {
-            static constexpr void Call(ObjectContentHasher* recorder, const std::vector<T>& vec) {
-                recorder->RecordIterable<std::vector<T>>(vec);
-            }
-        };
-
-        template <typename IteratorT>
-        constexpr void RecordIterable(const IteratorT& iterable) {
-            for (auto it = iterable.begin(); it != iterable.end(); ++it) {
-                Record(*it);
-            }
-        }
-
-        size_t mContentHash = 0;
-    };
-
-    template <>
-    struct ObjectContentHasher::RecordImpl<std::string> {
-        static constexpr void Call(ObjectContentHasher* recorder, const std::string& str) {
-            recorder->RecordIterable<std::string>(str);
+  private:
+    template <typename T, typename... Args>
+    struct RecordImpl {
+        static constexpr void Call(ObjectContentHasher* recorder,
+                                   const T& value,
+                                   const Args&... args) {
+            HashCombine(&recorder->mContentHash, value, args...);
         }
     };
 
+    template <typename T>
+    struct RecordImpl<T*> {
+        static constexpr void Call(ObjectContentHasher* recorder, T* obj) {
+            // Calling Record(objPtr) is not allowed. This check exists to only prevent such
+            // mistakes.
+            static_assert(obj == nullptr);
+        }
+    };
+
+    template <typename T>
+    struct RecordImpl<std::vector<T>> {
+        static constexpr void Call(ObjectContentHasher* recorder, const std::vector<T>& vec) {
+            recorder->RecordIterable<std::vector<T>>(vec);
+        }
+    };
+
+    template <typename IteratorT>
+    constexpr void RecordIterable(const IteratorT& iterable) {
+        for (auto it = iterable.begin(); it != iterable.end(); ++it) {
+            Record(*it);
+        }
+    }
+
+    size_t mContentHash = 0;
+};
+
+template <>
+struct ObjectContentHasher::RecordImpl<std::string> {
+    static constexpr void Call(ObjectContentHasher* recorder, const std::string& str) {
+        recorder->RecordIterable<std::string>(str);
+    }
+};
+
 }  // namespace dawn::native
 
 #endif  // SRC_DAWN_NATIVE_OBJECTCONTENTHASHER_H_
diff --git a/src/dawn/native/PassResourceUsage.h b/src/dawn/native/PassResourceUsage.h
index 1e3ab56..1fa4130 100644
--- a/src/dawn/native/PassResourceUsage.h
+++ b/src/dawn/native/PassResourceUsage.h
@@ -23,77 +23,77 @@
 
 namespace dawn::native {
 
-    // This file declares various "ResourceUsage" structures. They are produced by the frontend
-    // while recording commands to be used for later validation and also some operations in the
-    // backends. The are produced by the "Encoder" objects that finalize them on "EndPass" or
-    // "Finish". Internally the "Encoder" may use the "StateTracker" to create them.
+// This file declares various "ResourceUsage" structures. They are produced by the frontend
+// while recording commands to be used for later validation and also some operations in the
+// backends. The are produced by the "Encoder" objects that finalize them on "EndPass" or
+// "Finish". Internally the "Encoder" may use the "StateTracker" to create them.
 
-    class BufferBase;
-    class QuerySetBase;
-    class TextureBase;
+class BufferBase;
+class QuerySetBase;
+class TextureBase;
 
-    // The texture usage inside passes must be tracked per-subresource.
-    using TextureSubresourceUsage = SubresourceStorage<wgpu::TextureUsage>;
+// The texture usage inside passes must be tracked per-subresource.
+using TextureSubresourceUsage = SubresourceStorage<wgpu::TextureUsage>;
 
-    // Which resources are used by a synchronization scope and how they are used. The command
-    // buffer validation pre-computes this information so that backends with explicit barriers
-    // don't have to re-compute it.
-    struct SyncScopeResourceUsage {
-        std::vector<BufferBase*> buffers;
-        std::vector<wgpu::BufferUsage> bufferUsages;
+// Which resources are used by a synchronization scope and how they are used. The command
+// buffer validation pre-computes this information so that backends with explicit barriers
+// don't have to re-compute it.
+struct SyncScopeResourceUsage {
+    std::vector<BufferBase*> buffers;
+    std::vector<wgpu::BufferUsage> bufferUsages;
 
-        std::vector<TextureBase*> textures;
-        std::vector<TextureSubresourceUsage> textureUsages;
+    std::vector<TextureBase*> textures;
+    std::vector<TextureSubresourceUsage> textureUsages;
 
-        std::vector<ExternalTextureBase*> externalTextures;
-    };
+    std::vector<ExternalTextureBase*> externalTextures;
+};
 
-    // Contains all the resource usage data for a compute pass.
-    //
-    // Essentially a list of SyncScopeResourceUsage, one per Dispatch as required by the WebGPU
-    // specification. ComputePassResourceUsage also stores nline the set of all buffers and
-    // textures used, because some unused BindGroups may not be used at all in synchronization
-    // scope but their resources still need to be validated on Queue::Submit.
-    struct ComputePassResourceUsage {
-        // Somehow without this defaulted constructor, MSVC or its STDlib have an issue where they
-        // use the copy constructor (that's deleted) when doing operations on a
-        // vector<ComputePassResourceUsage>
-        ComputePassResourceUsage(ComputePassResourceUsage&&) = default;
-        ComputePassResourceUsage() = default;
+// Contains all the resource usage data for a compute pass.
+//
+// Essentially a list of SyncScopeResourceUsage, one per Dispatch as required by the WebGPU
+// specification. ComputePassResourceUsage also stores nline the set of all buffers and
+// textures used, because some unused BindGroups may not be used at all in synchronization
+// scope but their resources still need to be validated on Queue::Submit.
+struct ComputePassResourceUsage {
+    // Somehow without this defaulted constructor, MSVC or its STDlib have an issue where they
+    // use the copy constructor (that's deleted) when doing operations on a
+    // vector<ComputePassResourceUsage>
+    ComputePassResourceUsage(ComputePassResourceUsage&&) = default;
+    ComputePassResourceUsage() = default;
 
-        std::vector<SyncScopeResourceUsage> dispatchUsages;
+    std::vector<SyncScopeResourceUsage> dispatchUsages;
 
-        // All the resources referenced by this compute pass for validation in Queue::Submit.
-        std::set<BufferBase*> referencedBuffers;
-        std::set<TextureBase*> referencedTextures;
-        std::set<ExternalTextureBase*> referencedExternalTextures;
-    };
+    // All the resources referenced by this compute pass for validation in Queue::Submit.
+    std::set<BufferBase*> referencedBuffers;
+    std::set<TextureBase*> referencedTextures;
+    std::set<ExternalTextureBase*> referencedExternalTextures;
+};
 
-    // Contains all the resource usage data for a render pass.
-    //
-    // In the WebGPU specification render passes are synchronization scopes but we also need to
-    // track additional data. It is stored for render passes used by a CommandBuffer, but also in
-    // RenderBundle so they can be merged into the render passes' usage on ExecuteBundles().
-    struct RenderPassResourceUsage : public SyncScopeResourceUsage {
-        // Storage to track the occlusion queries used during the pass.
-        std::vector<QuerySetBase*> querySets;
-        std::vector<std::vector<bool>> queryAvailabilities;
-    };
+// Contains all the resource usage data for a render pass.
+//
+// In the WebGPU specification render passes are synchronization scopes but we also need to
+// track additional data. It is stored for render passes used by a CommandBuffer, but also in
+// RenderBundle so they can be merged into the render passes' usage on ExecuteBundles().
+struct RenderPassResourceUsage : public SyncScopeResourceUsage {
+    // Storage to track the occlusion queries used during the pass.
+    std::vector<QuerySetBase*> querySets;
+    std::vector<std::vector<bool>> queryAvailabilities;
+};
 
-    using RenderPassUsages = std::vector<RenderPassResourceUsage>;
-    using ComputePassUsages = std::vector<ComputePassResourceUsage>;
+using RenderPassUsages = std::vector<RenderPassResourceUsage>;
+using ComputePassUsages = std::vector<ComputePassResourceUsage>;
 
-    // Contains a hierarchy of "ResourceUsage" that mirrors the hierarchy of the CommandBuffer and
-    // is used for validation and to produce barriers and lazy clears in the backends.
-    struct CommandBufferResourceUsage {
-        RenderPassUsages renderPasses;
-        ComputePassUsages computePasses;
+// Contains a hierarchy of "ResourceUsage" that mirrors the hierarchy of the CommandBuffer and
+// is used for validation and to produce barriers and lazy clears in the backends.
+struct CommandBufferResourceUsage {
+    RenderPassUsages renderPasses;
+    ComputePassUsages computePasses;
 
-        // Resources used in commands that aren't in a pass.
-        std::set<BufferBase*> topLevelBuffers;
-        std::set<TextureBase*> topLevelTextures;
-        std::set<QuerySetBase*> usedQuerySets;
-    };
+    // Resources used in commands that aren't in a pass.
+    std::set<BufferBase*> topLevelBuffers;
+    std::set<TextureBase*> topLevelTextures;
+    std::set<QuerySetBase*> usedQuerySets;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/PassResourceUsageTracker.cpp b/src/dawn/native/PassResourceUsageTracker.cpp
index 7f274e1..f6c56a2 100644
--- a/src/dawn/native/PassResourceUsageTracker.cpp
+++ b/src/dawn/native/PassResourceUsageTracker.cpp
@@ -26,218 +26,217 @@
 
 namespace dawn::native {
 
-    void SyncScopeUsageTracker::BufferUsedAs(BufferBase* buffer, wgpu::BufferUsage usage) {
-        // std::map's operator[] will create the key and return 0 if the key didn't exist
-        // before.
-        mBufferUsages[buffer] |= usage;
-    }
+void SyncScopeUsageTracker::BufferUsedAs(BufferBase* buffer, wgpu::BufferUsage usage) {
+    // std::map's operator[] will create the key and return 0 if the key didn't exist
+    // before.
+    mBufferUsages[buffer] |= usage;
+}
 
-    void SyncScopeUsageTracker::TextureViewUsedAs(TextureViewBase* view, wgpu::TextureUsage usage) {
-        TextureBase* texture = view->GetTexture();
-        const SubresourceRange& range = view->GetSubresourceRange();
+void SyncScopeUsageTracker::TextureViewUsedAs(TextureViewBase* view, wgpu::TextureUsage usage) {
+    TextureBase* texture = view->GetTexture();
+    const SubresourceRange& range = view->GetSubresourceRange();
 
-        // Get or create a new TextureSubresourceUsage for that texture (initially filled with
-        // wgpu::TextureUsage::None)
-        auto it = mTextureUsages.emplace(
-            std::piecewise_construct, std::forward_as_tuple(texture),
-            std::forward_as_tuple(texture->GetFormat().aspects, texture->GetArrayLayers(),
-                                  texture->GetNumMipLevels(), wgpu::TextureUsage::None));
-        TextureSubresourceUsage& textureUsage = it.first->second;
+    // Get or create a new TextureSubresourceUsage for that texture (initially filled with
+    // wgpu::TextureUsage::None)
+    auto it = mTextureUsages.emplace(
+        std::piecewise_construct, std::forward_as_tuple(texture),
+        std::forward_as_tuple(texture->GetFormat().aspects, texture->GetArrayLayers(),
+                              texture->GetNumMipLevels(), wgpu::TextureUsage::None));
+    TextureSubresourceUsage& textureUsage = it.first->second;
 
-        textureUsage.Update(range,
-                            [usage](const SubresourceRange&, wgpu::TextureUsage* storedUsage) {
-                                // TODO(crbug.com/dawn/1001): Consider optimizing to have fewer
-                                // branches.
-                                if ((*storedUsage & wgpu::TextureUsage::RenderAttachment) != 0 &&
-                                    (usage & wgpu::TextureUsage::RenderAttachment) != 0) {
-                                    // Using the same subresource as an attachment for two different
-                                    // render attachments is a write-write hazard. Add this internal
-                                    // usage so we will fail the check that a subresource with
-                                    // writable usage is the single usage.
-                                    *storedUsage |= kAgainAsRenderAttachment;
-                                }
-                                *storedUsage |= usage;
+    textureUsage.Update(range, [usage](const SubresourceRange&, wgpu::TextureUsage* storedUsage) {
+        // TODO(crbug.com/dawn/1001): Consider optimizing to have fewer
+        // branches.
+        if ((*storedUsage & wgpu::TextureUsage::RenderAttachment) != 0 &&
+            (usage & wgpu::TextureUsage::RenderAttachment) != 0) {
+            // Using the same subresource as an attachment for two different
+            // render attachments is a write-write hazard. Add this internal
+            // usage so we will fail the check that a subresource with
+            // writable usage is the single usage.
+            *storedUsage |= kAgainAsRenderAttachment;
+        }
+        *storedUsage |= usage;
+    });
+}
+
+void SyncScopeUsageTracker::AddRenderBundleTextureUsage(
+    TextureBase* texture,
+    const TextureSubresourceUsage& textureUsage) {
+    // Get or create a new TextureSubresourceUsage for that texture (initially filled with
+    // wgpu::TextureUsage::None)
+    auto it = mTextureUsages.emplace(
+        std::piecewise_construct, std::forward_as_tuple(texture),
+        std::forward_as_tuple(texture->GetFormat().aspects, texture->GetArrayLayers(),
+                              texture->GetNumMipLevels(), wgpu::TextureUsage::None));
+    TextureSubresourceUsage* passTextureUsage = &it.first->second;
+
+    passTextureUsage->Merge(textureUsage,
+                            [](const SubresourceRange&, wgpu::TextureUsage* storedUsage,
+                               const wgpu::TextureUsage& addedUsage) {
+                                ASSERT((addedUsage & wgpu::TextureUsage::RenderAttachment) == 0);
+                                *storedUsage |= addedUsage;
                             });
-    }
+}
 
-    void SyncScopeUsageTracker::AddRenderBundleTextureUsage(
-        TextureBase* texture,
-        const TextureSubresourceUsage& textureUsage) {
-        // Get or create a new TextureSubresourceUsage for that texture (initially filled with
-        // wgpu::TextureUsage::None)
-        auto it = mTextureUsages.emplace(
-            std::piecewise_construct, std::forward_as_tuple(texture),
-            std::forward_as_tuple(texture->GetFormat().aspects, texture->GetArrayLayers(),
-                                  texture->GetNumMipLevels(), wgpu::TextureUsage::None));
-        TextureSubresourceUsage* passTextureUsage = &it.first->second;
+void SyncScopeUsageTracker::AddBindGroup(BindGroupBase* group) {
+    for (BindingIndex bindingIndex{0}; bindingIndex < group->GetLayout()->GetBindingCount();
+         ++bindingIndex) {
+        const BindingInfo& bindingInfo = group->GetLayout()->GetBindingInfo(bindingIndex);
 
-        passTextureUsage->Merge(
-            textureUsage, [](const SubresourceRange&, wgpu::TextureUsage* storedUsage,
-                             const wgpu::TextureUsage& addedUsage) {
-                ASSERT((addedUsage & wgpu::TextureUsage::RenderAttachment) == 0);
-                *storedUsage |= addedUsage;
-            });
-    }
-
-    void SyncScopeUsageTracker::AddBindGroup(BindGroupBase* group) {
-        for (BindingIndex bindingIndex{0}; bindingIndex < group->GetLayout()->GetBindingCount();
-             ++bindingIndex) {
-            const BindingInfo& bindingInfo = group->GetLayout()->GetBindingInfo(bindingIndex);
-
-            switch (bindingInfo.bindingType) {
-                case BindingInfoType::Buffer: {
-                    BufferBase* buffer = group->GetBindingAsBufferBinding(bindingIndex).buffer;
-                    switch (bindingInfo.buffer.type) {
-                        case wgpu::BufferBindingType::Uniform:
-                            BufferUsedAs(buffer, wgpu::BufferUsage::Uniform);
-                            break;
-                        case wgpu::BufferBindingType::Storage:
-                            BufferUsedAs(buffer, wgpu::BufferUsage::Storage);
-                            break;
-                        case kInternalStorageBufferBinding:
-                            BufferUsedAs(buffer, kInternalStorageBuffer);
-                            break;
-                        case wgpu::BufferBindingType::ReadOnlyStorage:
-                            BufferUsedAs(buffer, kReadOnlyStorageBuffer);
-                            break;
-                        case wgpu::BufferBindingType::Undefined:
-                            UNREACHABLE();
-                    }
-                    break;
+        switch (bindingInfo.bindingType) {
+            case BindingInfoType::Buffer: {
+                BufferBase* buffer = group->GetBindingAsBufferBinding(bindingIndex).buffer;
+                switch (bindingInfo.buffer.type) {
+                    case wgpu::BufferBindingType::Uniform:
+                        BufferUsedAs(buffer, wgpu::BufferUsage::Uniform);
+                        break;
+                    case wgpu::BufferBindingType::Storage:
+                        BufferUsedAs(buffer, wgpu::BufferUsage::Storage);
+                        break;
+                    case kInternalStorageBufferBinding:
+                        BufferUsedAs(buffer, kInternalStorageBuffer);
+                        break;
+                    case wgpu::BufferBindingType::ReadOnlyStorage:
+                        BufferUsedAs(buffer, kReadOnlyStorageBuffer);
+                        break;
+                    case wgpu::BufferBindingType::Undefined:
+                        UNREACHABLE();
                 }
-
-                case BindingInfoType::Texture: {
-                    TextureViewBase* view = group->GetBindingAsTextureView(bindingIndex);
-                    TextureViewUsedAs(view, wgpu::TextureUsage::TextureBinding);
-                    break;
-                }
-
-                case BindingInfoType::StorageTexture: {
-                    TextureViewBase* view = group->GetBindingAsTextureView(bindingIndex);
-                    switch (bindingInfo.storageTexture.access) {
-                        case wgpu::StorageTextureAccess::WriteOnly:
-                            TextureViewUsedAs(view, wgpu::TextureUsage::StorageBinding);
-                            break;
-                        case wgpu::StorageTextureAccess::Undefined:
-                            UNREACHABLE();
-                    }
-                    break;
-                }
-
-                case BindingInfoType::ExternalTexture:
-                    UNREACHABLE();
-                    break;
-
-                case BindingInfoType::Sampler:
-                    break;
+                break;
             }
-        }
 
-        for (const Ref<ExternalTextureBase>& externalTexture : group->GetBoundExternalTextures()) {
-            mExternalTextureUsages.insert(externalTexture.Get());
-        }
-    }
-
-    SyncScopeResourceUsage SyncScopeUsageTracker::AcquireSyncScopeUsage() {
-        SyncScopeResourceUsage result;
-        result.buffers.reserve(mBufferUsages.size());
-        result.bufferUsages.reserve(mBufferUsages.size());
-        result.textures.reserve(mTextureUsages.size());
-        result.textureUsages.reserve(mTextureUsages.size());
-
-        for (auto& [buffer, usage] : mBufferUsages) {
-            result.buffers.push_back(buffer);
-            result.bufferUsages.push_back(usage);
-        }
-
-        for (auto& [texture, usage] : mTextureUsages) {
-            result.textures.push_back(texture);
-            result.textureUsages.push_back(std::move(usage));
-        }
-
-        for (auto& it : mExternalTextureUsages) {
-            result.externalTextures.push_back(it);
-        }
-
-        mBufferUsages.clear();
-        mTextureUsages.clear();
-        mExternalTextureUsages.clear();
-
-        return result;
-    }
-
-    void ComputePassResourceUsageTracker::AddDispatch(SyncScopeResourceUsage scope) {
-        mUsage.dispatchUsages.push_back(std::move(scope));
-    }
-
-    void ComputePassResourceUsageTracker::AddReferencedBuffer(BufferBase* buffer) {
-        mUsage.referencedBuffers.insert(buffer);
-    }
-
-    void ComputePassResourceUsageTracker::AddResourcesReferencedByBindGroup(BindGroupBase* group) {
-        for (BindingIndex index{0}; index < group->GetLayout()->GetBindingCount(); ++index) {
-            const BindingInfo& bindingInfo = group->GetLayout()->GetBindingInfo(index);
-
-            switch (bindingInfo.bindingType) {
-                case BindingInfoType::Buffer: {
-                    mUsage.referencedBuffers.insert(group->GetBindingAsBufferBinding(index).buffer);
-                    break;
-                }
-
-                case BindingInfoType::Texture: {
-                    mUsage.referencedTextures.insert(
-                        group->GetBindingAsTextureView(index)->GetTexture());
-                    break;
-                }
-
-                case BindingInfoType::ExternalTexture:
-                    UNREACHABLE();
-                case BindingInfoType::StorageTexture:
-                case BindingInfoType::Sampler:
-                    break;
+            case BindingInfoType::Texture: {
+                TextureViewBase* view = group->GetBindingAsTextureView(bindingIndex);
+                TextureViewUsedAs(view, wgpu::TextureUsage::TextureBinding);
+                break;
             }
-        }
 
-        for (const Ref<ExternalTextureBase>& externalTexture : group->GetBoundExternalTextures()) {
-            mUsage.referencedExternalTextures.insert(externalTexture.Get());
+            case BindingInfoType::StorageTexture: {
+                TextureViewBase* view = group->GetBindingAsTextureView(bindingIndex);
+                switch (bindingInfo.storageTexture.access) {
+                    case wgpu::StorageTextureAccess::WriteOnly:
+                        TextureViewUsedAs(view, wgpu::TextureUsage::StorageBinding);
+                        break;
+                    case wgpu::StorageTextureAccess::Undefined:
+                        UNREACHABLE();
+                }
+                break;
+            }
+
+            case BindingInfoType::ExternalTexture:
+                UNREACHABLE();
+                break;
+
+            case BindingInfoType::Sampler:
+                break;
         }
     }
 
-    ComputePassResourceUsage ComputePassResourceUsageTracker::AcquireResourceUsage() {
-        return std::move(mUsage);
+    for (const Ref<ExternalTextureBase>& externalTexture : group->GetBoundExternalTextures()) {
+        mExternalTextureUsages.insert(externalTexture.Get());
+    }
+}
+
+SyncScopeResourceUsage SyncScopeUsageTracker::AcquireSyncScopeUsage() {
+    SyncScopeResourceUsage result;
+    result.buffers.reserve(mBufferUsages.size());
+    result.bufferUsages.reserve(mBufferUsages.size());
+    result.textures.reserve(mTextureUsages.size());
+    result.textureUsages.reserve(mTextureUsages.size());
+
+    for (auto& [buffer, usage] : mBufferUsages) {
+        result.buffers.push_back(buffer);
+        result.bufferUsages.push_back(usage);
     }
 
-    RenderPassResourceUsage RenderPassResourceUsageTracker::AcquireResourceUsage() {
-        RenderPassResourceUsage result;
-        *static_cast<SyncScopeResourceUsage*>(&result) = AcquireSyncScopeUsage();
+    for (auto& [texture, usage] : mTextureUsages) {
+        result.textures.push_back(texture);
+        result.textureUsages.push_back(std::move(usage));
+    }
 
-        result.querySets.reserve(mQueryAvailabilities.size());
-        result.queryAvailabilities.reserve(mQueryAvailabilities.size());
+    for (auto& it : mExternalTextureUsages) {
+        result.externalTextures.push_back(it);
+    }
 
-        for (auto& it : mQueryAvailabilities) {
-            result.querySets.push_back(it.first);
-            result.queryAvailabilities.push_back(std::move(it.second));
+    mBufferUsages.clear();
+    mTextureUsages.clear();
+    mExternalTextureUsages.clear();
+
+    return result;
+}
+
+void ComputePassResourceUsageTracker::AddDispatch(SyncScopeResourceUsage scope) {
+    mUsage.dispatchUsages.push_back(std::move(scope));
+}
+
+void ComputePassResourceUsageTracker::AddReferencedBuffer(BufferBase* buffer) {
+    mUsage.referencedBuffers.insert(buffer);
+}
+
+void ComputePassResourceUsageTracker::AddResourcesReferencedByBindGroup(BindGroupBase* group) {
+    for (BindingIndex index{0}; index < group->GetLayout()->GetBindingCount(); ++index) {
+        const BindingInfo& bindingInfo = group->GetLayout()->GetBindingInfo(index);
+
+        switch (bindingInfo.bindingType) {
+            case BindingInfoType::Buffer: {
+                mUsage.referencedBuffers.insert(group->GetBindingAsBufferBinding(index).buffer);
+                break;
+            }
+
+            case BindingInfoType::Texture: {
+                mUsage.referencedTextures.insert(
+                    group->GetBindingAsTextureView(index)->GetTexture());
+                break;
+            }
+
+            case BindingInfoType::ExternalTexture:
+                UNREACHABLE();
+            case BindingInfoType::StorageTexture:
+            case BindingInfoType::Sampler:
+                break;
         }
-
-        mQueryAvailabilities.clear();
-
-        return result;
     }
 
-    void RenderPassResourceUsageTracker::TrackQueryAvailability(QuerySetBase* querySet,
-                                                                uint32_t queryIndex) {
-        // The query availability only needs to be tracked again on render passes for checking
-        // query overwrite on render pass and resetting query sets on the Vulkan backend.
-        DAWN_ASSERT(querySet != nullptr);
+    for (const Ref<ExternalTextureBase>& externalTexture : group->GetBoundExternalTextures()) {
+        mUsage.referencedExternalTextures.insert(externalTexture.Get());
+    }
+}
 
-        // Gets the iterator for that querySet or create a new vector of bool set to false
-        // if the querySet wasn't registered.
-        auto it = mQueryAvailabilities.emplace(querySet, querySet->GetQueryCount()).first;
-        it->second[queryIndex] = true;
+ComputePassResourceUsage ComputePassResourceUsageTracker::AcquireResourceUsage() {
+    return std::move(mUsage);
+}
+
+RenderPassResourceUsage RenderPassResourceUsageTracker::AcquireResourceUsage() {
+    RenderPassResourceUsage result;
+    *static_cast<SyncScopeResourceUsage*>(&result) = AcquireSyncScopeUsage();
+
+    result.querySets.reserve(mQueryAvailabilities.size());
+    result.queryAvailabilities.reserve(mQueryAvailabilities.size());
+
+    for (auto& it : mQueryAvailabilities) {
+        result.querySets.push_back(it.first);
+        result.queryAvailabilities.push_back(std::move(it.second));
     }
 
-    const QueryAvailabilityMap& RenderPassResourceUsageTracker::GetQueryAvailabilityMap() const {
-        return mQueryAvailabilities;
-    }
+    mQueryAvailabilities.clear();
+
+    return result;
+}
+
+void RenderPassResourceUsageTracker::TrackQueryAvailability(QuerySetBase* querySet,
+                                                            uint32_t queryIndex) {
+    // The query availability only needs to be tracked again on render passes for checking
+    // query overwrite on render pass and resetting query sets on the Vulkan backend.
+    DAWN_ASSERT(querySet != nullptr);
+
+    // Gets the iterator for that querySet or create a new vector of bool set to false
+    // if the querySet wasn't registered.
+    auto it = mQueryAvailabilities.emplace(querySet, querySet->GetQueryCount()).first;
+    it->second[queryIndex] = true;
+}
+
+const QueryAvailabilityMap& RenderPassResourceUsageTracker::GetQueryAvailabilityMap() const {
+    return mQueryAvailabilities;
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/PassResourceUsageTracker.h b/src/dawn/native/PassResourceUsageTracker.h
index 1864b2c..89ae2a9 100644
--- a/src/dawn/native/PassResourceUsageTracker.h
+++ b/src/dawn/native/PassResourceUsageTracker.h
@@ -25,63 +25,63 @@
 
 namespace dawn::native {
 
-    class BindGroupBase;
-    class BufferBase;
-    class ExternalTextureBase;
-    class QuerySetBase;
-    class TextureBase;
+class BindGroupBase;
+class BufferBase;
+class ExternalTextureBase;
+class QuerySetBase;
+class TextureBase;
 
-    using QueryAvailabilityMap = std::map<QuerySetBase*, std::vector<bool>>;
+using QueryAvailabilityMap = std::map<QuerySetBase*, std::vector<bool>>;
 
-    // Helper class to build SyncScopeResourceUsages
-    class SyncScopeUsageTracker {
-      public:
-        void BufferUsedAs(BufferBase* buffer, wgpu::BufferUsage usage);
-        void TextureViewUsedAs(TextureViewBase* texture, wgpu::TextureUsage usage);
-        void AddRenderBundleTextureUsage(TextureBase* texture,
-                                         const TextureSubresourceUsage& textureUsage);
+// Helper class to build SyncScopeResourceUsages
+class SyncScopeUsageTracker {
+  public:
+    void BufferUsedAs(BufferBase* buffer, wgpu::BufferUsage usage);
+    void TextureViewUsedAs(TextureViewBase* texture, wgpu::TextureUsage usage);
+    void AddRenderBundleTextureUsage(TextureBase* texture,
+                                     const TextureSubresourceUsage& textureUsage);
 
-        // Walks the bind groups and tracks all its resources.
-        void AddBindGroup(BindGroupBase* group);
+    // Walks the bind groups and tracks all its resources.
+    void AddBindGroup(BindGroupBase* group);
 
-        // Returns the per-pass usage for use by backends for APIs with explicit barriers.
-        SyncScopeResourceUsage AcquireSyncScopeUsage();
+    // Returns the per-pass usage for use by backends for APIs with explicit barriers.
+    SyncScopeResourceUsage AcquireSyncScopeUsage();
 
-      private:
-        std::map<BufferBase*, wgpu::BufferUsage> mBufferUsages;
-        std::map<TextureBase*, TextureSubresourceUsage> mTextureUsages;
-        std::set<ExternalTextureBase*> mExternalTextureUsages;
-    };
+  private:
+    std::map<BufferBase*, wgpu::BufferUsage> mBufferUsages;
+    std::map<TextureBase*, TextureSubresourceUsage> mTextureUsages;
+    std::set<ExternalTextureBase*> mExternalTextureUsages;
+};
 
-    // Helper class to build ComputePassResourceUsages
-    class ComputePassResourceUsageTracker {
-      public:
-        void AddDispatch(SyncScopeResourceUsage scope);
-        void AddReferencedBuffer(BufferBase* buffer);
-        void AddResourcesReferencedByBindGroup(BindGroupBase* group);
+// Helper class to build ComputePassResourceUsages
+class ComputePassResourceUsageTracker {
+  public:
+    void AddDispatch(SyncScopeResourceUsage scope);
+    void AddReferencedBuffer(BufferBase* buffer);
+    void AddResourcesReferencedByBindGroup(BindGroupBase* group);
 
-        ComputePassResourceUsage AcquireResourceUsage();
+    ComputePassResourceUsage AcquireResourceUsage();
 
-      private:
-        ComputePassResourceUsage mUsage;
-    };
+  private:
+    ComputePassResourceUsage mUsage;
+};
 
-    // Helper class to build RenderPassResourceUsages
-    class RenderPassResourceUsageTracker : public SyncScopeUsageTracker {
-      public:
-        void TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex);
-        const QueryAvailabilityMap& GetQueryAvailabilityMap() const;
+// Helper class to build RenderPassResourceUsages
+class RenderPassResourceUsageTracker : public SyncScopeUsageTracker {
+  public:
+    void TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex);
+    const QueryAvailabilityMap& GetQueryAvailabilityMap() const;
 
-        RenderPassResourceUsage AcquireResourceUsage();
+    RenderPassResourceUsage AcquireResourceUsage();
 
-      private:
-        // Hide AcquireSyncScopeUsage since users of this class should use AcquireResourceUsage
-        // instead.
-        using SyncScopeUsageTracker::AcquireSyncScopeUsage;
+  private:
+    // Hide AcquireSyncScopeUsage since users of this class should use AcquireResourceUsage
+    // instead.
+    using SyncScopeUsageTracker::AcquireSyncScopeUsage;
 
-        // Tracks queries used in the render pass to validate that they aren't written twice.
-        QueryAvailabilityMap mQueryAvailabilities;
-    };
+    // Tracks queries used in the render pass to validate that they aren't written twice.
+    QueryAvailabilityMap mQueryAvailabilities;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/PerStage.cpp b/src/dawn/native/PerStage.cpp
index f3d5dc5..a824689 100644
--- a/src/dawn/native/PerStage.cpp
+++ b/src/dawn/native/PerStage.cpp
@@ -16,14 +16,14 @@
 
 namespace dawn::native {
 
-    BitSetIterator<kNumStages, SingleShaderStage> IterateStages(wgpu::ShaderStage stages) {
-        std::bitset<kNumStages> bits(static_cast<uint32_t>(stages));
-        return BitSetIterator<kNumStages, SingleShaderStage>(bits);
-    }
+BitSetIterator<kNumStages, SingleShaderStage> IterateStages(wgpu::ShaderStage stages) {
+    std::bitset<kNumStages> bits(static_cast<uint32_t>(stages));
+    return BitSetIterator<kNumStages, SingleShaderStage>(bits);
+}
 
-    wgpu::ShaderStage StageBit(SingleShaderStage stage) {
-        ASSERT(static_cast<uint32_t>(stage) < kNumStages);
-        return static_cast<wgpu::ShaderStage>(1 << static_cast<uint32_t>(stage));
-    }
+wgpu::ShaderStage StageBit(SingleShaderStage stage) {
+    ASSERT(static_cast<uint32_t>(stage) < kNumStages);
+    return static_cast<wgpu::ShaderStage>(1 << static_cast<uint32_t>(stage));
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/PerStage.h b/src/dawn/native/PerStage.h
index 3441f1e..3983b11 100644
--- a/src/dawn/native/PerStage.h
+++ b/src/dawn/native/PerStage.h
@@ -26,56 +26,54 @@
 
 namespace dawn::native {
 
-    enum class SingleShaderStage { Vertex, Fragment, Compute };
+enum class SingleShaderStage { Vertex, Fragment, Compute };
 
-    static_assert(static_cast<uint32_t>(SingleShaderStage::Vertex) < kNumStages);
-    static_assert(static_cast<uint32_t>(SingleShaderStage::Fragment) < kNumStages);
-    static_assert(static_cast<uint32_t>(SingleShaderStage::Compute) < kNumStages);
+static_assert(static_cast<uint32_t>(SingleShaderStage::Vertex) < kNumStages);
+static_assert(static_cast<uint32_t>(SingleShaderStage::Fragment) < kNumStages);
+static_assert(static_cast<uint32_t>(SingleShaderStage::Compute) < kNumStages);
 
-    static_assert(static_cast<uint32_t>(wgpu::ShaderStage::Vertex) ==
-                  (1 << static_cast<uint32_t>(SingleShaderStage::Vertex)));
-    static_assert(static_cast<uint32_t>(wgpu::ShaderStage::Fragment) ==
-                  (1 << static_cast<uint32_t>(SingleShaderStage::Fragment)));
-    static_assert(static_cast<uint32_t>(wgpu::ShaderStage::Compute) ==
-                  (1 << static_cast<uint32_t>(SingleShaderStage::Compute)));
+static_assert(static_cast<uint32_t>(wgpu::ShaderStage::Vertex) ==
+              (1 << static_cast<uint32_t>(SingleShaderStage::Vertex)));
+static_assert(static_cast<uint32_t>(wgpu::ShaderStage::Fragment) ==
+              (1 << static_cast<uint32_t>(SingleShaderStage::Fragment)));
+static_assert(static_cast<uint32_t>(wgpu::ShaderStage::Compute) ==
+              (1 << static_cast<uint32_t>(SingleShaderStage::Compute)));
 
-    BitSetIterator<kNumStages, SingleShaderStage> IterateStages(wgpu::ShaderStage stages);
-    wgpu::ShaderStage StageBit(SingleShaderStage stage);
+BitSetIterator<kNumStages, SingleShaderStage> IterateStages(wgpu::ShaderStage stages);
+wgpu::ShaderStage StageBit(SingleShaderStage stage);
 
-    static constexpr wgpu::ShaderStage kAllStages =
-        static_cast<wgpu::ShaderStage>((1 << kNumStages) - 1);
+static constexpr wgpu::ShaderStage kAllStages =
+    static_cast<wgpu::ShaderStage>((1 << kNumStages) - 1);
 
-    template <typename T>
-    class PerStage {
-      public:
-        PerStage() = default;
-        explicit PerStage(const T& initialValue) {
-            mData.fill(initialValue);
-        }
+template <typename T>
+class PerStage {
+  public:
+    PerStage() = default;
+    explicit PerStage(const T& initialValue) { mData.fill(initialValue); }
 
-        T& operator[](SingleShaderStage stage) {
-            DAWN_ASSERT(static_cast<uint32_t>(stage) < kNumStages);
-            return mData[static_cast<uint32_t>(stage)];
-        }
-        const T& operator[](SingleShaderStage stage) const {
-            DAWN_ASSERT(static_cast<uint32_t>(stage) < kNumStages);
-            return mData[static_cast<uint32_t>(stage)];
-        }
+    T& operator[](SingleShaderStage stage) {
+        DAWN_ASSERT(static_cast<uint32_t>(stage) < kNumStages);
+        return mData[static_cast<uint32_t>(stage)];
+    }
+    const T& operator[](SingleShaderStage stage) const {
+        DAWN_ASSERT(static_cast<uint32_t>(stage) < kNumStages);
+        return mData[static_cast<uint32_t>(stage)];
+    }
 
-        T& operator[](wgpu::ShaderStage stageBit) {
-            uint32_t bit = static_cast<uint32_t>(stageBit);
-            DAWN_ASSERT(bit != 0 && IsPowerOfTwo(bit) && bit <= (1 << kNumStages));
-            return mData[Log2(bit)];
-        }
-        const T& operator[](wgpu::ShaderStage stageBit) const {
-            uint32_t bit = static_cast<uint32_t>(stageBit);
-            DAWN_ASSERT(bit != 0 && IsPowerOfTwo(bit) && bit <= (1 << kNumStages));
-            return mData[Log2(bit)];
-        }
+    T& operator[](wgpu::ShaderStage stageBit) {
+        uint32_t bit = static_cast<uint32_t>(stageBit);
+        DAWN_ASSERT(bit != 0 && IsPowerOfTwo(bit) && bit <= (1 << kNumStages));
+        return mData[Log2(bit)];
+    }
+    const T& operator[](wgpu::ShaderStage stageBit) const {
+        uint32_t bit = static_cast<uint32_t>(stageBit);
+        DAWN_ASSERT(bit != 0 && IsPowerOfTwo(bit) && bit <= (1 << kNumStages));
+        return mData[Log2(bit)];
+    }
 
-      private:
-        std::array<T, kNumStages> mData;
-    };
+  private:
+    std::array<T, kNumStages> mData;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/Pipeline.cpp b/src/dawn/native/Pipeline.cpp
index ce181ee..6bee1eb 100644
--- a/src/dawn/native/Pipeline.cpp
+++ b/src/dawn/native/Pipeline.cpp
@@ -26,238 +26,234 @@
 #include "dawn/native/ShaderModule.h"
 
 namespace dawn::native {
-    MaybeError ValidateProgrammableStage(DeviceBase* device,
-                                         const ShaderModuleBase* module,
-                                         const std::string& entryPoint,
-                                         uint32_t constantCount,
-                                         const ConstantEntry* constants,
-                                         const PipelineLayoutBase* layout,
-                                         SingleShaderStage stage) {
-        DAWN_TRY(device->ValidateObject(module));
+MaybeError ValidateProgrammableStage(DeviceBase* device,
+                                     const ShaderModuleBase* module,
+                                     const std::string& entryPoint,
+                                     uint32_t constantCount,
+                                     const ConstantEntry* constants,
+                                     const PipelineLayoutBase* layout,
+                                     SingleShaderStage stage) {
+    DAWN_TRY(device->ValidateObject(module));
 
-        DAWN_INVALID_IF(!module->HasEntryPoint(entryPoint),
-                        "Entry point \"%s\" doesn't exist in the shader module %s.", entryPoint,
+    DAWN_INVALID_IF(!module->HasEntryPoint(entryPoint),
+                    "Entry point \"%s\" doesn't exist in the shader module %s.", entryPoint,
+                    module);
+
+    const EntryPointMetadata& metadata = module->GetEntryPoint(entryPoint);
+
+    if (!metadata.infringedLimitErrors.empty()) {
+        std::ostringstream out;
+        out << "Entry point \"" << entryPoint << "\" infringes limits:\n";
+        for (const std::string& limit : metadata.infringedLimitErrors) {
+            out << " - " << limit << "\n";
+        }
+        return DAWN_VALIDATION_ERROR(out.str());
+    }
+
+    DAWN_INVALID_IF(metadata.stage != stage,
+                    "The stage (%s) of the entry point \"%s\" isn't the expected one (%s).",
+                    metadata.stage, entryPoint, stage);
+
+    if (layout != nullptr) {
+        DAWN_TRY(ValidateCompatibilityWithPipelineLayout(device, metadata, layout));
+    }
+
+    if (constantCount > 0u && device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs)) {
+        return DAWN_VALIDATION_ERROR(
+            "Pipeline overridable constants are disallowed because they are partially "
+            "implemented.");
+    }
+
+    // Validate if overridable constants exist in shader module
+    // pipelineBase is not yet constructed at this moment so iterate constants from descriptor
+    size_t numUninitializedConstants = metadata.uninitializedOverridableConstants.size();
+    // Keep an initialized constants sets to handle duplicate initialization cases
+    std::unordered_set<std::string> stageInitializedConstantIdentifiers;
+    for (uint32_t i = 0; i < constantCount; i++) {
+        DAWN_INVALID_IF(metadata.overridableConstants.count(constants[i].key) == 0,
+                        "Pipeline overridable constant \"%s\" not found in %s.", constants[i].key,
                         module);
 
-        const EntryPointMetadata& metadata = module->GetEntryPoint(entryPoint);
-
-        if (!metadata.infringedLimitErrors.empty()) {
-            std::ostringstream out;
-            out << "Entry point \"" << entryPoint << "\" infringes limits:\n";
-            for (const std::string& limit : metadata.infringedLimitErrors) {
-                out << " - " << limit << "\n";
+        if (stageInitializedConstantIdentifiers.count(constants[i].key) == 0) {
+            if (metadata.uninitializedOverridableConstants.count(constants[i].key) > 0) {
+                numUninitializedConstants--;
             }
-            return DAWN_VALIDATION_ERROR(out.str());
-        }
-
-        DAWN_INVALID_IF(metadata.stage != stage,
-                        "The stage (%s) of the entry point \"%s\" isn't the expected one (%s).",
-                        metadata.stage, entryPoint, stage);
-
-        if (layout != nullptr) {
-            DAWN_TRY(ValidateCompatibilityWithPipelineLayout(device, metadata, layout));
-        }
-
-        if (constantCount > 0u && device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs)) {
-            return DAWN_VALIDATION_ERROR(
-                "Pipeline overridable constants are disallowed because they are partially "
-                "implemented.");
-        }
-
-        // Validate if overridable constants exist in shader module
-        // pipelineBase is not yet constructed at this moment so iterate constants from descriptor
-        size_t numUninitializedConstants = metadata.uninitializedOverridableConstants.size();
-        // Keep an initialized constants sets to handle duplicate initialization cases
-        std::unordered_set<std::string> stageInitializedConstantIdentifiers;
-        for (uint32_t i = 0; i < constantCount; i++) {
-            DAWN_INVALID_IF(metadata.overridableConstants.count(constants[i].key) == 0,
-                            "Pipeline overridable constant \"%s\" not found in %s.",
-                            constants[i].key, module);
-
-            if (stageInitializedConstantIdentifiers.count(constants[i].key) == 0) {
-                if (metadata.uninitializedOverridableConstants.count(constants[i].key) > 0) {
-                    numUninitializedConstants--;
-                }
-                stageInitializedConstantIdentifiers.insert(constants[i].key);
-            } else {
-                // There are duplicate initializations
-                return DAWN_FORMAT_VALIDATION_ERROR(
-                    "Pipeline overridable constants \"%s\" is set more than once in %s",
-                    constants[i].key, module);
-            }
-        }
-
-        // Validate if any overridable constant is left uninitialized
-        if (DAWN_UNLIKELY(numUninitializedConstants > 0)) {
-            std::string uninitializedConstantsArray;
-            bool isFirst = true;
-            for (std::string identifier : metadata.uninitializedOverridableConstants) {
-                if (stageInitializedConstantIdentifiers.count(identifier) > 0) {
-                    continue;
-                }
-
-                if (isFirst) {
-                    isFirst = false;
-                } else {
-                    uninitializedConstantsArray.append(", ");
-                }
-                uninitializedConstantsArray.append(identifier);
-            }
-
-            return DAWN_FORMAT_VALIDATION_ERROR(
-                "There are uninitialized pipeline overridable constants in shader module %s, their "
-                "identifiers:[%s]",
-                module, uninitializedConstantsArray);
-        }
-
-        return {};
-    }
-
-    // PipelineBase
-
-    PipelineBase::PipelineBase(DeviceBase* device,
-                               PipelineLayoutBase* layout,
-                               const char* label,
-                               std::vector<StageAndDescriptor> stages)
-        : ApiObjectBase(device, label), mLayout(layout) {
-        ASSERT(!stages.empty());
-
-        for (const StageAndDescriptor& stage : stages) {
-            // Extract argument for this stage.
-            SingleShaderStage shaderStage = stage.shaderStage;
-            ShaderModuleBase* module = stage.module;
-            const char* entryPointName = stage.entryPoint.c_str();
-
-            const EntryPointMetadata& metadata = module->GetEntryPoint(entryPointName);
-            ASSERT(metadata.stage == shaderStage);
-
-            // Record them internally.
-            bool isFirstStage = mStageMask == wgpu::ShaderStage::None;
-            mStageMask |= StageBit(shaderStage);
-            mStages[shaderStage] = {module, entryPointName, &metadata, {}};
-            auto& constants = mStages[shaderStage].constants;
-            for (uint32_t i = 0; i < stage.constantCount; i++) {
-                constants.emplace(stage.constants[i].key, stage.constants[i].value);
-            }
-
-            // Compute the max() of all minBufferSizes across all stages.
-            RequiredBufferSizes stageMinBufferSizes =
-                ComputeRequiredBufferSizesForLayout(metadata, layout);
-
-            if (isFirstStage) {
-                mMinBufferSizes = std::move(stageMinBufferSizes);
-            } else {
-                for (BindGroupIndex group(0); group < mMinBufferSizes.size(); ++group) {
-                    ASSERT(stageMinBufferSizes[group].size() == mMinBufferSizes[group].size());
-
-                    for (size_t i = 0; i < stageMinBufferSizes[group].size(); ++i) {
-                        mMinBufferSizes[group][i] =
-                            std::max(mMinBufferSizes[group][i], stageMinBufferSizes[group][i]);
-                    }
-                }
-            }
-        }
-    }
-
-    PipelineBase::PipelineBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
-    }
-
-    PipelineBase::PipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
-        : ApiObjectBase(device, tag) {
-    }
-
-    PipelineBase::~PipelineBase() = default;
-
-    PipelineLayoutBase* PipelineBase::GetLayout() {
-        ASSERT(!IsError());
-        return mLayout.Get();
-    }
-
-    const PipelineLayoutBase* PipelineBase::GetLayout() const {
-        ASSERT(!IsError());
-        return mLayout.Get();
-    }
-
-    const RequiredBufferSizes& PipelineBase::GetMinBufferSizes() const {
-        ASSERT(!IsError());
-        return mMinBufferSizes;
-    }
-
-    const ProgrammableStage& PipelineBase::GetStage(SingleShaderStage stage) const {
-        ASSERT(!IsError());
-        return mStages[stage];
-    }
-
-    const PerStage<ProgrammableStage>& PipelineBase::GetAllStages() const {
-        return mStages;
-    }
-
-    wgpu::ShaderStage PipelineBase::GetStageMask() const {
-        return mStageMask;
-    }
-
-    MaybeError PipelineBase::ValidateGetBindGroupLayout(uint32_t groupIndex) {
-        DAWN_TRY(GetDevice()->ValidateIsAlive());
-        DAWN_TRY(GetDevice()->ValidateObject(this));
-        DAWN_TRY(GetDevice()->ValidateObject(mLayout.Get()));
-        DAWN_INVALID_IF(
-            groupIndex >= kMaxBindGroups,
-            "Bind group layout index (%u) exceeds the maximum number of bind groups (%u).",
-            groupIndex, kMaxBindGroups);
-        return {};
-    }
-
-    ResultOrError<Ref<BindGroupLayoutBase>> PipelineBase::GetBindGroupLayout(
-        uint32_t groupIndexIn) {
-        DAWN_TRY(ValidateGetBindGroupLayout(groupIndexIn));
-
-        BindGroupIndex groupIndex(groupIndexIn);
-        if (!mLayout->GetBindGroupLayoutsMask()[groupIndex]) {
-            return Ref<BindGroupLayoutBase>(GetDevice()->GetEmptyBindGroupLayout());
+            stageInitializedConstantIdentifiers.insert(constants[i].key);
         } else {
-            return Ref<BindGroupLayoutBase>(mLayout->GetBindGroupLayout(groupIndex));
+            // There are duplicate initializations
+            return DAWN_FORMAT_VALIDATION_ERROR(
+                "Pipeline overridable constants \"%s\" is set more than once in %s",
+                constants[i].key, module);
         }
     }
 
-    BindGroupLayoutBase* PipelineBase::APIGetBindGroupLayout(uint32_t groupIndexIn) {
-        Ref<BindGroupLayoutBase> result;
-        if (GetDevice()->ConsumedError(GetBindGroupLayout(groupIndexIn), &result,
-                                       "Validating GetBindGroupLayout (%u) on %s", groupIndexIn,
-                                       this)) {
-            return BindGroupLayoutBase::MakeError(GetDevice());
-        }
-        return result.Detach();
-    }
+    // Validate if any overridable constant is left uninitialized
+    if (DAWN_UNLIKELY(numUninitializedConstants > 0)) {
+        std::string uninitializedConstantsArray;
+        bool isFirst = true;
+        for (std::string identifier : metadata.uninitializedOverridableConstants) {
+            if (stageInitializedConstantIdentifiers.count(identifier) > 0) {
+                continue;
+            }
 
-    size_t PipelineBase::ComputeContentHash() {
-        ObjectContentHasher recorder;
-        recorder.Record(mLayout->GetContentHash());
-
-        recorder.Record(mStageMask);
-        for (SingleShaderStage stage : IterateStages(mStageMask)) {
-            recorder.Record(mStages[stage].module->GetContentHash());
-            recorder.Record(mStages[stage].entryPoint);
+            if (isFirst) {
+                isFirst = false;
+            } else {
+                uninitializedConstantsArray.append(", ");
+            }
+            uninitializedConstantsArray.append(identifier);
         }
 
-        return recorder.GetContentHash();
+        return DAWN_FORMAT_VALIDATION_ERROR(
+            "There are uninitialized pipeline overridable constants in shader module %s, their "
+            "identifiers:[%s]",
+            module, uninitializedConstantsArray);
     }
 
-    // static
-    bool PipelineBase::EqualForCache(const PipelineBase* a, const PipelineBase* b) {
-        // The layout is deduplicated so it can be compared by pointer.
-        if (a->mLayout.Get() != b->mLayout.Get() || a->mStageMask != b->mStageMask) {
+    return {};
+}
+
+// PipelineBase
+
+PipelineBase::PipelineBase(DeviceBase* device,
+                           PipelineLayoutBase* layout,
+                           const char* label,
+                           std::vector<StageAndDescriptor> stages)
+    : ApiObjectBase(device, label), mLayout(layout) {
+    ASSERT(!stages.empty());
+
+    for (const StageAndDescriptor& stage : stages) {
+        // Extract argument for this stage.
+        SingleShaderStage shaderStage = stage.shaderStage;
+        ShaderModuleBase* module = stage.module;
+        const char* entryPointName = stage.entryPoint.c_str();
+
+        const EntryPointMetadata& metadata = module->GetEntryPoint(entryPointName);
+        ASSERT(metadata.stage == shaderStage);
+
+        // Record them internally.
+        bool isFirstStage = mStageMask == wgpu::ShaderStage::None;
+        mStageMask |= StageBit(shaderStage);
+        mStages[shaderStage] = {module, entryPointName, &metadata, {}};
+        auto& constants = mStages[shaderStage].constants;
+        for (uint32_t i = 0; i < stage.constantCount; i++) {
+            constants.emplace(stage.constants[i].key, stage.constants[i].value);
+        }
+
+        // Compute the max() of all minBufferSizes across all stages.
+        RequiredBufferSizes stageMinBufferSizes =
+            ComputeRequiredBufferSizesForLayout(metadata, layout);
+
+        if (isFirstStage) {
+            mMinBufferSizes = std::move(stageMinBufferSizes);
+        } else {
+            for (BindGroupIndex group(0); group < mMinBufferSizes.size(); ++group) {
+                ASSERT(stageMinBufferSizes[group].size() == mMinBufferSizes[group].size());
+
+                for (size_t i = 0; i < stageMinBufferSizes[group].size(); ++i) {
+                    mMinBufferSizes[group][i] =
+                        std::max(mMinBufferSizes[group][i], stageMinBufferSizes[group][i]);
+                }
+            }
+        }
+    }
+}
+
+PipelineBase::PipelineBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {}
+
+PipelineBase::PipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+    : ApiObjectBase(device, tag) {}
+
+PipelineBase::~PipelineBase() = default;
+
+PipelineLayoutBase* PipelineBase::GetLayout() {
+    ASSERT(!IsError());
+    return mLayout.Get();
+}
+
+const PipelineLayoutBase* PipelineBase::GetLayout() const {
+    ASSERT(!IsError());
+    return mLayout.Get();
+}
+
+const RequiredBufferSizes& PipelineBase::GetMinBufferSizes() const {
+    ASSERT(!IsError());
+    return mMinBufferSizes;
+}
+
+const ProgrammableStage& PipelineBase::GetStage(SingleShaderStage stage) const {
+    ASSERT(!IsError());
+    return mStages[stage];
+}
+
+const PerStage<ProgrammableStage>& PipelineBase::GetAllStages() const {
+    return mStages;
+}
+
+wgpu::ShaderStage PipelineBase::GetStageMask() const {
+    return mStageMask;
+}
+
+MaybeError PipelineBase::ValidateGetBindGroupLayout(uint32_t groupIndex) {
+    DAWN_TRY(GetDevice()->ValidateIsAlive());
+    DAWN_TRY(GetDevice()->ValidateObject(this));
+    DAWN_TRY(GetDevice()->ValidateObject(mLayout.Get()));
+    DAWN_INVALID_IF(groupIndex >= kMaxBindGroups,
+                    "Bind group layout index (%u) exceeds the maximum number of bind groups (%u).",
+                    groupIndex, kMaxBindGroups);
+    return {};
+}
+
+ResultOrError<Ref<BindGroupLayoutBase>> PipelineBase::GetBindGroupLayout(uint32_t groupIndexIn) {
+    DAWN_TRY(ValidateGetBindGroupLayout(groupIndexIn));
+
+    BindGroupIndex groupIndex(groupIndexIn);
+    if (!mLayout->GetBindGroupLayoutsMask()[groupIndex]) {
+        return Ref<BindGroupLayoutBase>(GetDevice()->GetEmptyBindGroupLayout());
+    } else {
+        return Ref<BindGroupLayoutBase>(mLayout->GetBindGroupLayout(groupIndex));
+    }
+}
+
+BindGroupLayoutBase* PipelineBase::APIGetBindGroupLayout(uint32_t groupIndexIn) {
+    Ref<BindGroupLayoutBase> result;
+    if (GetDevice()->ConsumedError(GetBindGroupLayout(groupIndexIn), &result,
+                                   "Validating GetBindGroupLayout (%u) on %s", groupIndexIn,
+                                   this)) {
+        return BindGroupLayoutBase::MakeError(GetDevice());
+    }
+    return result.Detach();
+}
+
+size_t PipelineBase::ComputeContentHash() {
+    ObjectContentHasher recorder;
+    recorder.Record(mLayout->GetContentHash());
+
+    recorder.Record(mStageMask);
+    for (SingleShaderStage stage : IterateStages(mStageMask)) {
+        recorder.Record(mStages[stage].module->GetContentHash());
+        recorder.Record(mStages[stage].entryPoint);
+    }
+
+    return recorder.GetContentHash();
+}
+
+// static
+bool PipelineBase::EqualForCache(const PipelineBase* a, const PipelineBase* b) {
+    // The layout is deduplicated so it can be compared by pointer.
+    if (a->mLayout.Get() != b->mLayout.Get() || a->mStageMask != b->mStageMask) {
+        return false;
+    }
+
+    for (SingleShaderStage stage : IterateStages(a->mStageMask)) {
+        // The module is deduplicated so it can be compared by pointer.
+        if (a->mStages[stage].module.Get() != b->mStages[stage].module.Get() ||
+            a->mStages[stage].entryPoint != b->mStages[stage].entryPoint) {
             return false;
         }
-
-        for (SingleShaderStage stage : IterateStages(a->mStageMask)) {
-            // The module is deduplicated so it can be compared by pointer.
-            if (a->mStages[stage].module.Get() != b->mStages[stage].module.Get() ||
-                a->mStages[stage].entryPoint != b->mStages[stage].entryPoint) {
-                return false;
-            }
-        }
-
-        return true;
     }
 
+    return true;
+}
+
 }  // namespace dawn::native
diff --git a/src/dawn/native/Pipeline.h b/src/dawn/native/Pipeline.h
index da7c95b..2d5b6df 100644
--- a/src/dawn/native/Pipeline.h
+++ b/src/dawn/native/Pipeline.h
@@ -32,69 +32,69 @@
 
 namespace dawn::native {
 
-    MaybeError ValidateProgrammableStage(DeviceBase* device,
-                                         const ShaderModuleBase* module,
-                                         const std::string& entryPoint,
-                                         uint32_t constantCount,
-                                         const ConstantEntry* constants,
-                                         const PipelineLayoutBase* layout,
-                                         SingleShaderStage stage);
+MaybeError ValidateProgrammableStage(DeviceBase* device,
+                                     const ShaderModuleBase* module,
+                                     const std::string& entryPoint,
+                                     uint32_t constantCount,
+                                     const ConstantEntry* constants,
+                                     const PipelineLayoutBase* layout,
+                                     SingleShaderStage stage);
 
-    // Use map to make sure constant keys are sorted for creating shader cache keys
-    using PipelineConstantEntries = std::map<std::string, double>;
+// Use map to make sure constant keys are sorted for creating shader cache keys
+using PipelineConstantEntries = std::map<std::string, double>;
 
-    struct ProgrammableStage {
-        Ref<ShaderModuleBase> module;
-        std::string entryPoint;
+struct ProgrammableStage {
+    Ref<ShaderModuleBase> module;
+    std::string entryPoint;
 
-        // The metadata lives as long as module, that's ref-ed in the same structure.
-        const EntryPointMetadata* metadata = nullptr;
+    // The metadata lives as long as module, that's ref-ed in the same structure.
+    const EntryPointMetadata* metadata = nullptr;
 
-        PipelineConstantEntries constants;
-    };
+    PipelineConstantEntries constants;
+};
 
-    class PipelineBase : public ApiObjectBase, public CachedObject {
-      public:
-        ~PipelineBase() override;
+class PipelineBase : public ApiObjectBase, public CachedObject {
+  public:
+    ~PipelineBase() override;
 
-        PipelineLayoutBase* GetLayout();
-        const PipelineLayoutBase* GetLayout() const;
-        const RequiredBufferSizes& GetMinBufferSizes() const;
-        const ProgrammableStage& GetStage(SingleShaderStage stage) const;
-        const PerStage<ProgrammableStage>& GetAllStages() const;
-        wgpu::ShaderStage GetStageMask() const;
+    PipelineLayoutBase* GetLayout();
+    const PipelineLayoutBase* GetLayout() const;
+    const RequiredBufferSizes& GetMinBufferSizes() const;
+    const ProgrammableStage& GetStage(SingleShaderStage stage) const;
+    const PerStage<ProgrammableStage>& GetAllStages() const;
+    wgpu::ShaderStage GetStageMask() const;
 
-        ResultOrError<Ref<BindGroupLayoutBase>> GetBindGroupLayout(uint32_t groupIndex);
+    ResultOrError<Ref<BindGroupLayoutBase>> GetBindGroupLayout(uint32_t groupIndex);
 
-        // Helper functions for std::unordered_map-based pipeline caches.
-        size_t ComputeContentHash() override;
-        static bool EqualForCache(const PipelineBase* a, const PipelineBase* b);
+    // Helper functions for std::unordered_map-based pipeline caches.
+    size_t ComputeContentHash() override;
+    static bool EqualForCache(const PipelineBase* a, const PipelineBase* b);
 
-        // Implementation of the API entrypoint. Do not use in a reentrant manner.
-        BindGroupLayoutBase* APIGetBindGroupLayout(uint32_t groupIndex);
+    // Implementation of the API entrypoint. Do not use in a reentrant manner.
+    BindGroupLayoutBase* APIGetBindGroupLayout(uint32_t groupIndex);
 
-        // Initialize() should only be called once by the frontend.
-        virtual MaybeError Initialize() = 0;
+    // Initialize() should only be called once by the frontend.
+    virtual MaybeError Initialize() = 0;
 
-      protected:
-        PipelineBase(DeviceBase* device,
-                     PipelineLayoutBase* layout,
-                     const char* label,
-                     std::vector<StageAndDescriptor> stages);
-        PipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+  protected:
+    PipelineBase(DeviceBase* device,
+                 PipelineLayoutBase* layout,
+                 const char* label,
+                 std::vector<StageAndDescriptor> stages);
+    PipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag);
 
-        // Constructor used only for mocking and testing.
-        explicit PipelineBase(DeviceBase* device);
+    // Constructor used only for mocking and testing.
+    explicit PipelineBase(DeviceBase* device);
 
-      private:
-        MaybeError ValidateGetBindGroupLayout(uint32_t group);
+  private:
+    MaybeError ValidateGetBindGroupLayout(uint32_t group);
 
-        wgpu::ShaderStage mStageMask = wgpu::ShaderStage::None;
-        PerStage<ProgrammableStage> mStages;
+    wgpu::ShaderStage mStageMask = wgpu::ShaderStage::None;
+    PerStage<ProgrammableStage> mStages;
 
-        Ref<PipelineLayoutBase> mLayout;
-        RequiredBufferSizes mMinBufferSizes;
-    };
+    Ref<PipelineLayoutBase> mLayout;
+    RequiredBufferSizes mMinBufferSizes;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/PipelineLayout.cpp b/src/dawn/native/PipelineLayout.cpp
index afaf36a..8365f6d 100644
--- a/src/dawn/native/PipelineLayout.cpp
+++ b/src/dawn/native/PipelineLayout.cpp
@@ -29,384 +29,378 @@
 
 namespace dawn::native {
 
-    MaybeError ValidatePipelineLayoutDescriptor(
-        DeviceBase* device,
-        const PipelineLayoutDescriptor* descriptor,
-        PipelineCompatibilityToken pipelineCompatibilityToken) {
-        if (descriptor->nextInChain != nullptr) {
-            return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
+MaybeError ValidatePipelineLayoutDescriptor(DeviceBase* device,
+                                            const PipelineLayoutDescriptor* descriptor,
+                                            PipelineCompatibilityToken pipelineCompatibilityToken) {
+    if (descriptor->nextInChain != nullptr) {
+        return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
+    }
+
+    if (descriptor->bindGroupLayoutCount > kMaxBindGroups) {
+        return DAWN_VALIDATION_ERROR("too many bind group layouts");
+    }
+
+    BindingCounts bindingCounts = {};
+    for (uint32_t i = 0; i < descriptor->bindGroupLayoutCount; ++i) {
+        DAWN_TRY(device->ValidateObject(descriptor->bindGroupLayouts[i]));
+        if (descriptor->bindGroupLayouts[i]->GetPipelineCompatibilityToken() !=
+            pipelineCompatibilityToken) {
+            return DAWN_VALIDATION_ERROR(
+                "cannot create a pipeline layout using a bind group layout that was created as "
+                "part of a pipeline's default layout");
+        }
+        AccumulateBindingCounts(&bindingCounts,
+                                descriptor->bindGroupLayouts[i]->GetBindingCountInfo());
+    }
+
+    DAWN_TRY(ValidateBindingCounts(bindingCounts));
+    return {};
+}
+
+// PipelineLayoutBase
+
+PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device,
+                                       const PipelineLayoutDescriptor* descriptor,
+                                       ApiObjectBase::UntrackedByDeviceTag tag)
+    : ApiObjectBase(device, descriptor->label) {
+    ASSERT(descriptor->bindGroupLayoutCount <= kMaxBindGroups);
+    for (BindGroupIndex group(0); group < BindGroupIndex(descriptor->bindGroupLayoutCount);
+         ++group) {
+        mBindGroupLayouts[group] = descriptor->bindGroupLayouts[static_cast<uint32_t>(group)];
+        mMask.set(group);
+    }
+}
+
+PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device,
+                                       const PipelineLayoutDescriptor* descriptor)
+    : PipelineLayoutBase(device, descriptor, kUntrackedByDevice) {
+    TrackInDevice();
+}
+
+PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device)
+    : ApiObjectBase(device, kLabelNotImplemented) {
+    TrackInDevice();
+}
+
+PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+    : ApiObjectBase(device, tag) {}
+
+PipelineLayoutBase::~PipelineLayoutBase() = default;
+
+void PipelineLayoutBase::DestroyImpl() {
+    if (IsCachedReference()) {
+        // Do not uncache the actual cached object if we are a blueprint.
+        GetDevice()->UncachePipelineLayout(this);
+    }
+}
+
+// static
+PipelineLayoutBase* PipelineLayoutBase::MakeError(DeviceBase* device) {
+    return new PipelineLayoutBase(device, ObjectBase::kError);
+}
+
+// static
+ResultOrError<Ref<PipelineLayoutBase>> PipelineLayoutBase::CreateDefault(
+    DeviceBase* device,
+    std::vector<StageAndDescriptor> stages) {
+    using EntryMap = std::map<BindingNumber, BindGroupLayoutEntry>;
+
+    // Merges two entries at the same location, if they are allowed to be merged.
+    auto MergeEntries = [](BindGroupLayoutEntry* modifiedEntry,
+                           const BindGroupLayoutEntry& mergedEntry) -> MaybeError {
+        // Visibility is excluded because we take the OR across stages.
+        bool compatible =
+            modifiedEntry->binding == mergedEntry.binding &&
+            modifiedEntry->buffer.type == mergedEntry.buffer.type &&
+            modifiedEntry->sampler.type == mergedEntry.sampler.type &&
+            // Compatibility between these sample types is checked below.
+            (modifiedEntry->texture.sampleType != wgpu::TextureSampleType::Undefined) ==
+                (mergedEntry.texture.sampleType != wgpu::TextureSampleType::Undefined) &&
+            modifiedEntry->storageTexture.access == mergedEntry.storageTexture.access;
+
+        // Minimum buffer binding size excluded because we take the maximum seen across stages.
+        if (modifiedEntry->buffer.type != wgpu::BufferBindingType::Undefined) {
+            compatible = compatible && modifiedEntry->buffer.hasDynamicOffset ==
+                                           mergedEntry.buffer.hasDynamicOffset;
         }
 
-        if (descriptor->bindGroupLayoutCount > kMaxBindGroups) {
-            return DAWN_VALIDATION_ERROR("too many bind group layouts");
+        if (modifiedEntry->texture.sampleType != wgpu::TextureSampleType::Undefined) {
+            // Sample types are compatible if they are exactly equal,
+            // or if the |modifiedEntry| is Float and the |mergedEntry| is UnfilterableFloat.
+            // Note that the |mergedEntry| never has type Float. Texture bindings all start
+            // as UnfilterableFloat and are promoted to Float if they are statically used with
+            // a sampler.
+            ASSERT(mergedEntry.texture.sampleType != wgpu::TextureSampleType::Float);
+            bool compatibleSampleTypes =
+                modifiedEntry->texture.sampleType == mergedEntry.texture.sampleType ||
+                (modifiedEntry->texture.sampleType == wgpu::TextureSampleType::Float &&
+                 mergedEntry.texture.sampleType == wgpu::TextureSampleType::UnfilterableFloat);
+            compatible =
+                compatible && compatibleSampleTypes &&
+                modifiedEntry->texture.viewDimension == mergedEntry.texture.viewDimension &&
+                modifiedEntry->texture.multisampled == mergedEntry.texture.multisampled;
         }
 
-        BindingCounts bindingCounts = {};
-        for (uint32_t i = 0; i < descriptor->bindGroupLayoutCount; ++i) {
-            DAWN_TRY(device->ValidateObject(descriptor->bindGroupLayouts[i]));
-            if (descriptor->bindGroupLayouts[i]->GetPipelineCompatibilityToken() !=
-                pipelineCompatibilityToken) {
-                return DAWN_VALIDATION_ERROR(
-                    "cannot create a pipeline layout using a bind group layout that was created as "
-                    "part of a pipeline's default layout");
-            }
-            AccumulateBindingCounts(&bindingCounts,
-                                    descriptor->bindGroupLayouts[i]->GetBindingCountInfo());
+        if (modifiedEntry->storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
+            compatible =
+                compatible &&
+                modifiedEntry->storageTexture.format == mergedEntry.storageTexture.format &&
+                modifiedEntry->storageTexture.viewDimension ==
+                    mergedEntry.storageTexture.viewDimension;
         }
 
-        DAWN_TRY(ValidateBindingCounts(bindingCounts));
+        // Check if any properties are incompatible with existing entry
+        // If compatible, we will merge some properties
+        if (!compatible) {
+            return DAWN_VALIDATION_ERROR(
+                "Duplicate binding in default pipeline layout initialization "
+                "not compatible with previous declaration");
+        }
+
+        // Use the max |minBufferBindingSize| we find.
+        modifiedEntry->buffer.minBindingSize =
+            std::max(modifiedEntry->buffer.minBindingSize, mergedEntry.buffer.minBindingSize);
+
+        // Use the OR of all the stages at which we find this binding.
+        modifiedEntry->visibility |= mergedEntry.visibility;
+
         return {};
-    }
+    };
 
-    // PipelineLayoutBase
-
-    PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device,
-                                           const PipelineLayoutDescriptor* descriptor,
-                                           ApiObjectBase::UntrackedByDeviceTag tag)
-        : ApiObjectBase(device, descriptor->label) {
-        ASSERT(descriptor->bindGroupLayoutCount <= kMaxBindGroups);
-        for (BindGroupIndex group(0); group < BindGroupIndex(descriptor->bindGroupLayoutCount);
-             ++group) {
-            mBindGroupLayouts[group] = descriptor->bindGroupLayouts[static_cast<uint32_t>(group)];
-            mMask.set(group);
-        }
-    }
-
-    PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device,
-                                           const PipelineLayoutDescriptor* descriptor)
-        : PipelineLayoutBase(device, descriptor, kUntrackedByDevice) {
-        TrackInDevice();
-    }
-
-    PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device)
-        : ApiObjectBase(device, kLabelNotImplemented) {
-        TrackInDevice();
-    }
-
-    PipelineLayoutBase::PipelineLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag)
-        : ApiObjectBase(device, tag) {
-    }
-
-    PipelineLayoutBase::~PipelineLayoutBase() = default;
-
-    void PipelineLayoutBase::DestroyImpl() {
-        if (IsCachedReference()) {
-            // Do not uncache the actual cached object if we are a blueprint.
-            GetDevice()->UncachePipelineLayout(this);
-        }
-    }
-
-    // static
-    PipelineLayoutBase* PipelineLayoutBase::MakeError(DeviceBase* device) {
-        return new PipelineLayoutBase(device, ObjectBase::kError);
-    }
-
-    // static
-    ResultOrError<Ref<PipelineLayoutBase>> PipelineLayoutBase::CreateDefault(
-        DeviceBase* device,
-        std::vector<StageAndDescriptor> stages) {
-        using EntryMap = std::map<BindingNumber, BindGroupLayoutEntry>;
-
-        // Merges two entries at the same location, if they are allowed to be merged.
-        auto MergeEntries = [](BindGroupLayoutEntry* modifiedEntry,
-                               const BindGroupLayoutEntry& mergedEntry) -> MaybeError {
-            // Visibility is excluded because we take the OR across stages.
-            bool compatible =
-                modifiedEntry->binding == mergedEntry.binding &&
-                modifiedEntry->buffer.type == mergedEntry.buffer.type &&
-                modifiedEntry->sampler.type == mergedEntry.sampler.type &&
-                // Compatibility between these sample types is checked below.
-                (modifiedEntry->texture.sampleType != wgpu::TextureSampleType::Undefined) ==
-                    (mergedEntry.texture.sampleType != wgpu::TextureSampleType::Undefined) &&
-                modifiedEntry->storageTexture.access == mergedEntry.storageTexture.access;
-
-            // Minimum buffer binding size excluded because we take the maximum seen across stages.
-            if (modifiedEntry->buffer.type != wgpu::BufferBindingType::Undefined) {
-                compatible = compatible && modifiedEntry->buffer.hasDynamicOffset ==
-                                               mergedEntry.buffer.hasDynamicOffset;
-            }
-
-            if (modifiedEntry->texture.sampleType != wgpu::TextureSampleType::Undefined) {
-                // Sample types are compatible if they are exactly equal,
-                // or if the |modifiedEntry| is Float and the |mergedEntry| is UnfilterableFloat.
-                // Note that the |mergedEntry| never has type Float. Texture bindings all start
-                // as UnfilterableFloat and are promoted to Float if they are statically used with
-                // a sampler.
-                ASSERT(mergedEntry.texture.sampleType != wgpu::TextureSampleType::Float);
-                bool compatibleSampleTypes =
-                    modifiedEntry->texture.sampleType == mergedEntry.texture.sampleType ||
-                    (modifiedEntry->texture.sampleType == wgpu::TextureSampleType::Float &&
-                     mergedEntry.texture.sampleType == wgpu::TextureSampleType::UnfilterableFloat);
-                compatible =
-                    compatible && compatibleSampleTypes &&
-                    modifiedEntry->texture.viewDimension == mergedEntry.texture.viewDimension &&
-                    modifiedEntry->texture.multisampled == mergedEntry.texture.multisampled;
-            }
-
-            if (modifiedEntry->storageTexture.access != wgpu::StorageTextureAccess::Undefined) {
-                compatible =
-                    compatible &&
-                    modifiedEntry->storageTexture.format == mergedEntry.storageTexture.format &&
-                    modifiedEntry->storageTexture.viewDimension ==
-                        mergedEntry.storageTexture.viewDimension;
-            }
-
-            // Check if any properties are incompatible with existing entry
-            // If compatible, we will merge some properties
-            if (!compatible) {
-                return DAWN_VALIDATION_ERROR(
-                    "Duplicate binding in default pipeline layout initialization "
-                    "not compatible with previous declaration");
-            }
-
-            // Use the max |minBufferBindingSize| we find.
-            modifiedEntry->buffer.minBindingSize =
-                std::max(modifiedEntry->buffer.minBindingSize, mergedEntry.buffer.minBindingSize);
-
-            // Use the OR of all the stages at which we find this binding.
-            modifiedEntry->visibility |= mergedEntry.visibility;
-
-            return {};
-        };
-
-        // Does the trivial conversions from a ShaderBindingInfo to a BindGroupLayoutEntry
-        auto ConvertMetadataToEntry =
-            [](const ShaderBindingInfo& shaderBinding,
-               const ExternalTextureBindingLayout* externalTextureBindingEntry)
-            -> BindGroupLayoutEntry {
-            BindGroupLayoutEntry entry = {};
-            switch (shaderBinding.bindingType) {
-                case BindingInfoType::Buffer:
-                    entry.buffer.type = shaderBinding.buffer.type;
-                    entry.buffer.hasDynamicOffset = shaderBinding.buffer.hasDynamicOffset;
-                    entry.buffer.minBindingSize = shaderBinding.buffer.minBindingSize;
-                    break;
-                case BindingInfoType::Sampler:
-                    if (shaderBinding.sampler.isComparison) {
-                        entry.sampler.type = wgpu::SamplerBindingType::Comparison;
-                    } else {
-                        entry.sampler.type = wgpu::SamplerBindingType::Filtering;
-                    }
-                    break;
-                case BindingInfoType::Texture:
-                    switch (shaderBinding.texture.compatibleSampleTypes) {
-                        case SampleTypeBit::Depth:
-                            entry.texture.sampleType = wgpu::TextureSampleType::Depth;
-                            break;
-                        case SampleTypeBit::Sint:
-                            entry.texture.sampleType = wgpu::TextureSampleType::Sint;
-                            break;
-                        case SampleTypeBit::Uint:
-                            entry.texture.sampleType = wgpu::TextureSampleType::Uint;
-                            break;
-                        case SampleTypeBit::Float:
-                        case SampleTypeBit::UnfilterableFloat:
-                        case SampleTypeBit::None:
+    // Does the trivial conversions from a ShaderBindingInfo to a BindGroupLayoutEntry
+    auto ConvertMetadataToEntry =
+        [](const ShaderBindingInfo& shaderBinding,
+           const ExternalTextureBindingLayout* externalTextureBindingEntry)
+        -> BindGroupLayoutEntry {
+        BindGroupLayoutEntry entry = {};
+        switch (shaderBinding.bindingType) {
+            case BindingInfoType::Buffer:
+                entry.buffer.type = shaderBinding.buffer.type;
+                entry.buffer.hasDynamicOffset = shaderBinding.buffer.hasDynamicOffset;
+                entry.buffer.minBindingSize = shaderBinding.buffer.minBindingSize;
+                break;
+            case BindingInfoType::Sampler:
+                if (shaderBinding.sampler.isComparison) {
+                    entry.sampler.type = wgpu::SamplerBindingType::Comparison;
+                } else {
+                    entry.sampler.type = wgpu::SamplerBindingType::Filtering;
+                }
+                break;
+            case BindingInfoType::Texture:
+                switch (shaderBinding.texture.compatibleSampleTypes) {
+                    case SampleTypeBit::Depth:
+                        entry.texture.sampleType = wgpu::TextureSampleType::Depth;
+                        break;
+                    case SampleTypeBit::Sint:
+                        entry.texture.sampleType = wgpu::TextureSampleType::Sint;
+                        break;
+                    case SampleTypeBit::Uint:
+                        entry.texture.sampleType = wgpu::TextureSampleType::Uint;
+                        break;
+                    case SampleTypeBit::Float:
+                    case SampleTypeBit::UnfilterableFloat:
+                    case SampleTypeBit::None:
+                        UNREACHABLE();
+                        break;
+                    default:
+                        if (shaderBinding.texture.compatibleSampleTypes ==
+                            (SampleTypeBit::Float | SampleTypeBit::UnfilterableFloat)) {
+                            // Default to UnfilterableFloat. It will be promoted to Float if it
+                            // is used with a sampler.
+                            entry.texture.sampleType = wgpu::TextureSampleType::UnfilterableFloat;
+                        } else {
                             UNREACHABLE();
-                            break;
-                        default:
-                            if (shaderBinding.texture.compatibleSampleTypes ==
-                                (SampleTypeBit::Float | SampleTypeBit::UnfilterableFloat)) {
-                                // Default to UnfilterableFloat. It will be promoted to Float if it
-                                // is used with a sampler.
-                                entry.texture.sampleType =
-                                    wgpu::TextureSampleType::UnfilterableFloat;
-                            } else {
-                                UNREACHABLE();
-                            }
-                    }
-                    entry.texture.viewDimension = shaderBinding.texture.viewDimension;
-                    entry.texture.multisampled = shaderBinding.texture.multisampled;
-                    break;
-                case BindingInfoType::StorageTexture:
-                    entry.storageTexture.access = shaderBinding.storageTexture.access;
-                    entry.storageTexture.format = shaderBinding.storageTexture.format;
-                    entry.storageTexture.viewDimension = shaderBinding.storageTexture.viewDimension;
-                    break;
-                case BindingInfoType::ExternalTexture:
-                    entry.nextInChain = externalTextureBindingEntry;
-                    break;
-            }
-            return entry;
-        };
-
-        PipelineCompatibilityToken pipelineCompatibilityToken =
-            device->GetNextPipelineCompatibilityToken();
-
-        // Creates the BGL from the entries for a stage, checking it is valid.
-        auto CreateBGL = [](DeviceBase* device, const EntryMap& entries,
-                            PipelineCompatibilityToken pipelineCompatibilityToken)
-            -> ResultOrError<Ref<BindGroupLayoutBase>> {
-            std::vector<BindGroupLayoutEntry> entryVec;
-            entryVec.reserve(entries.size());
-            for (auto& [_, entry] : entries) {
-                entryVec.push_back(entry);
-            }
-
-            BindGroupLayoutDescriptor desc = {};
-            desc.entries = entryVec.data();
-            desc.entryCount = entryVec.size();
-
-            if (device->IsValidationEnabled()) {
-                DAWN_TRY_CONTEXT(ValidateBindGroupLayoutDescriptor(device, &desc), "validating %s",
-                                 &desc);
-            }
-            return device->GetOrCreateBindGroupLayout(&desc, pipelineCompatibilityToken);
-        };
-
-        ASSERT(!stages.empty());
-
-        // Data which BindGroupLayoutDescriptor will point to for creation
-        ityp::array<BindGroupIndex, std::map<BindingNumber, BindGroupLayoutEntry>, kMaxBindGroups>
-            entryData = {};
-
-        // External texture binding layouts are chained structs that are set as a pointer within
-        // the bind group layout entry. We declare an entry here so that it can be used when needed
-        // in each BindGroupLayoutEntry and so it can stay alive until the call to
-        // GetOrCreateBindGroupLayout. Because ExternalTextureBindingLayout is an empty struct,
-        // there's no issue with using the same struct multiple times.
-        ExternalTextureBindingLayout externalTextureBindingLayout;
-
-        // Loops over all the reflected BindGroupLayoutEntries from shaders.
-        for (const StageAndDescriptor& stage : stages) {
-            const EntryPointMetadata& metadata = stage.module->GetEntryPoint(stage.entryPoint);
-
-            for (BindGroupIndex group(0); group < metadata.bindings.size(); ++group) {
-                for (const auto& [bindingNumber, shaderBinding] : metadata.bindings[group]) {
-                    // Create the BindGroupLayoutEntry
-                    BindGroupLayoutEntry entry =
-                        ConvertMetadataToEntry(shaderBinding, &externalTextureBindingLayout);
-                    entry.binding = static_cast<uint32_t>(bindingNumber);
-                    entry.visibility = StageBit(stage.shaderStage);
-
-                    // Add it to our map of all entries, if there is an existing entry, then we
-                    // need to merge, if we can.
-                    const auto& [existingEntry, inserted] =
-                        entryData[group].insert({bindingNumber, entry});
-                    if (!inserted) {
-                        DAWN_TRY(MergeEntries(&existingEntry->second, entry));
-                    }
+                        }
                 }
-            }
+                entry.texture.viewDimension = shaderBinding.texture.viewDimension;
+                entry.texture.multisampled = shaderBinding.texture.multisampled;
+                break;
+            case BindingInfoType::StorageTexture:
+                entry.storageTexture.access = shaderBinding.storageTexture.access;
+                entry.storageTexture.format = shaderBinding.storageTexture.format;
+                entry.storageTexture.viewDimension = shaderBinding.storageTexture.viewDimension;
+                break;
+            case BindingInfoType::ExternalTexture:
+                entry.nextInChain = externalTextureBindingEntry;
+                break;
+        }
+        return entry;
+    };
 
-            // Promote any Unfilterable textures used with a sampler to Filtering.
-            for (const EntryPointMetadata::SamplerTexturePair& pair :
-                 metadata.samplerTexturePairs) {
-                BindGroupLayoutEntry* entry = &entryData[pair.texture.group][pair.texture.binding];
-                if (entry->texture.sampleType == wgpu::TextureSampleType::UnfilterableFloat) {
-                    entry->texture.sampleType = wgpu::TextureSampleType::Float;
+    PipelineCompatibilityToken pipelineCompatibilityToken =
+        device->GetNextPipelineCompatibilityToken();
+
+    // Creates the BGL from the entries for a stage, checking it is valid.
+    auto CreateBGL = [](DeviceBase* device, const EntryMap& entries,
+                        PipelineCompatibilityToken pipelineCompatibilityToken)
+        -> ResultOrError<Ref<BindGroupLayoutBase>> {
+        std::vector<BindGroupLayoutEntry> entryVec;
+        entryVec.reserve(entries.size());
+        for (auto& [_, entry] : entries) {
+            entryVec.push_back(entry);
+        }
+
+        BindGroupLayoutDescriptor desc = {};
+        desc.entries = entryVec.data();
+        desc.entryCount = entryVec.size();
+
+        if (device->IsValidationEnabled()) {
+            DAWN_TRY_CONTEXT(ValidateBindGroupLayoutDescriptor(device, &desc), "validating %s",
+                             &desc);
+        }
+        return device->GetOrCreateBindGroupLayout(&desc, pipelineCompatibilityToken);
+    };
+
+    ASSERT(!stages.empty());
+
+    // Data which BindGroupLayoutDescriptor will point to for creation
+    ityp::array<BindGroupIndex, std::map<BindingNumber, BindGroupLayoutEntry>, kMaxBindGroups>
+        entryData = {};
+
+    // External texture binding layouts are chained structs that are set as a pointer within
+    // the bind group layout entry. We declare an entry here so that it can be used when needed
+    // in each BindGroupLayoutEntry and so it can stay alive until the call to
+    // GetOrCreateBindGroupLayout. Because ExternalTextureBindingLayout is an empty struct,
+    // there's no issue with using the same struct multiple times.
+    ExternalTextureBindingLayout externalTextureBindingLayout;
+
+    // Loops over all the reflected BindGroupLayoutEntries from shaders.
+    for (const StageAndDescriptor& stage : stages) {
+        const EntryPointMetadata& metadata = stage.module->GetEntryPoint(stage.entryPoint);
+
+        for (BindGroupIndex group(0); group < metadata.bindings.size(); ++group) {
+            for (const auto& [bindingNumber, shaderBinding] : metadata.bindings[group]) {
+                // Create the BindGroupLayoutEntry
+                BindGroupLayoutEntry entry =
+                    ConvertMetadataToEntry(shaderBinding, &externalTextureBindingLayout);
+                entry.binding = static_cast<uint32_t>(bindingNumber);
+                entry.visibility = StageBit(stage.shaderStage);
+
+                // Add it to our map of all entries, if there is an existing entry, then we
+                // need to merge, if we can.
+                const auto& [existingEntry, inserted] =
+                    entryData[group].insert({bindingNumber, entry});
+                if (!inserted) {
+                    DAWN_TRY(MergeEntries(&existingEntry->second, entry));
                 }
             }
         }
 
-        // Create the bind group layouts. We need to keep track of the last non-empty BGL because
-        // Dawn doesn't yet know that an empty BGL and a null BGL are the same thing.
-        // TODO(cwallez@chromium.org): remove this when Dawn knows that empty and null BGL are the
-        // same.
-        BindGroupIndex pipelineBGLCount = BindGroupIndex(0);
-        ityp::array<BindGroupIndex, Ref<BindGroupLayoutBase>, kMaxBindGroups> bindGroupLayouts = {};
-        for (BindGroupIndex group(0); group < kMaxBindGroupsTyped; ++group) {
-            DAWN_TRY_ASSIGN(bindGroupLayouts[group],
-                            CreateBGL(device, entryData[group], pipelineCompatibilityToken));
-            if (entryData[group].size() != 0) {
-                pipelineBGLCount = group + BindGroupIndex(1);
+        // Promote any Unfilterable textures used with a sampler to Filtering.
+        for (const EntryPointMetadata::SamplerTexturePair& pair : metadata.samplerTexturePairs) {
+            BindGroupLayoutEntry* entry = &entryData[pair.texture.group][pair.texture.binding];
+            if (entry->texture.sampleType == wgpu::TextureSampleType::UnfilterableFloat) {
+                entry->texture.sampleType = wgpu::TextureSampleType::Float;
             }
         }
+    }
 
-        // Create the deduced pipeline layout, validating if it is valid.
-        ityp::array<BindGroupIndex, BindGroupLayoutBase*, kMaxBindGroups> bgls = {};
-        for (BindGroupIndex group(0); group < pipelineBGLCount; ++group) {
-            bgls[group] = bindGroupLayouts[group].Get();
+    // Create the bind group layouts. We need to keep track of the last non-empty BGL because
+    // Dawn doesn't yet know that an empty BGL and a null BGL are the same thing.
+    // TODO(cwallez@chromium.org): remove this when Dawn knows that empty and null BGL are the
+    // same.
+    BindGroupIndex pipelineBGLCount = BindGroupIndex(0);
+    ityp::array<BindGroupIndex, Ref<BindGroupLayoutBase>, kMaxBindGroups> bindGroupLayouts = {};
+    for (BindGroupIndex group(0); group < kMaxBindGroupsTyped; ++group) {
+        DAWN_TRY_ASSIGN(bindGroupLayouts[group],
+                        CreateBGL(device, entryData[group], pipelineCompatibilityToken));
+        if (entryData[group].size() != 0) {
+            pipelineBGLCount = group + BindGroupIndex(1);
         }
+    }
 
-        PipelineLayoutDescriptor desc = {};
-        desc.bindGroupLayouts = bgls.data();
-        desc.bindGroupLayoutCount = static_cast<uint32_t>(pipelineBGLCount);
+    // Create the deduced pipeline layout, validating if it is valid.
+    ityp::array<BindGroupIndex, BindGroupLayoutBase*, kMaxBindGroups> bgls = {};
+    for (BindGroupIndex group(0); group < pipelineBGLCount; ++group) {
+        bgls[group] = bindGroupLayouts[group].Get();
+    }
 
-        DAWN_TRY(ValidatePipelineLayoutDescriptor(device, &desc, pipelineCompatibilityToken));
+    PipelineLayoutDescriptor desc = {};
+    desc.bindGroupLayouts = bgls.data();
+    desc.bindGroupLayoutCount = static_cast<uint32_t>(pipelineBGLCount);
 
-        Ref<PipelineLayoutBase> result;
-        DAWN_TRY_ASSIGN(result, device->GetOrCreatePipelineLayout(&desc));
-        ASSERT(!result->IsError());
+    DAWN_TRY(ValidatePipelineLayoutDescriptor(device, &desc, pipelineCompatibilityToken));
 
-        // Check in debug that the pipeline layout is compatible with the current pipeline.
-        for (const StageAndDescriptor& stage : stages) {
-            const EntryPointMetadata& metadata = stage.module->GetEntryPoint(stage.entryPoint);
-            ASSERT(ValidateCompatibilityWithPipelineLayout(device, metadata, result.Get())
-                       .IsSuccess());
+    Ref<PipelineLayoutBase> result;
+    DAWN_TRY_ASSIGN(result, device->GetOrCreatePipelineLayout(&desc));
+    ASSERT(!result->IsError());
+
+    // Check in debug that the pipeline layout is compatible with the current pipeline.
+    for (const StageAndDescriptor& stage : stages) {
+        const EntryPointMetadata& metadata = stage.module->GetEntryPoint(stage.entryPoint);
+        ASSERT(ValidateCompatibilityWithPipelineLayout(device, metadata, result.Get()).IsSuccess());
+    }
+
+    return std::move(result);
+}
+
+ObjectType PipelineLayoutBase::GetType() const {
+    return ObjectType::PipelineLayout;
+}
+
+const BindGroupLayoutBase* PipelineLayoutBase::GetBindGroupLayout(BindGroupIndex group) const {
+    ASSERT(!IsError());
+    ASSERT(group < kMaxBindGroupsTyped);
+    ASSERT(mMask[group]);
+    const BindGroupLayoutBase* bgl = mBindGroupLayouts[group].Get();
+    ASSERT(bgl != nullptr);
+    return bgl;
+}
+
+BindGroupLayoutBase* PipelineLayoutBase::GetBindGroupLayout(BindGroupIndex group) {
+    ASSERT(!IsError());
+    ASSERT(group < kMaxBindGroupsTyped);
+    ASSERT(mMask[group]);
+    BindGroupLayoutBase* bgl = mBindGroupLayouts[group].Get();
+    ASSERT(bgl != nullptr);
+    return bgl;
+}
+
+const BindGroupLayoutMask& PipelineLayoutBase::GetBindGroupLayoutsMask() const {
+    ASSERT(!IsError());
+    return mMask;
+}
+
+BindGroupLayoutMask PipelineLayoutBase::InheritedGroupsMask(const PipelineLayoutBase* other) const {
+    ASSERT(!IsError());
+    return {(1 << static_cast<uint32_t>(GroupsInheritUpTo(other))) - 1u};
+}
+
+BindGroupIndex PipelineLayoutBase::GroupsInheritUpTo(const PipelineLayoutBase* other) const {
+    ASSERT(!IsError());
+
+    for (BindGroupIndex i(0); i < kMaxBindGroupsTyped; ++i) {
+        if (!mMask[i] || mBindGroupLayouts[i].Get() != other->mBindGroupLayouts[i].Get()) {
+            return i;
         }
+    }
+    return kMaxBindGroupsTyped;
+}
 
-        return std::move(result);
+size_t PipelineLayoutBase::ComputeContentHash() {
+    ObjectContentHasher recorder;
+    recorder.Record(mMask);
+
+    for (BindGroupIndex group : IterateBitSet(mMask)) {
+        recorder.Record(GetBindGroupLayout(group)->GetContentHash());
     }
 
-    ObjectType PipelineLayoutBase::GetType() const {
-        return ObjectType::PipelineLayout;
+    return recorder.GetContentHash();
+}
+
+bool PipelineLayoutBase::EqualityFunc::operator()(const PipelineLayoutBase* a,
+                                                  const PipelineLayoutBase* b) const {
+    if (a->mMask != b->mMask) {
+        return false;
     }
 
-    const BindGroupLayoutBase* PipelineLayoutBase::GetBindGroupLayout(BindGroupIndex group) const {
-        ASSERT(!IsError());
-        ASSERT(group < kMaxBindGroupsTyped);
-        ASSERT(mMask[group]);
-        const BindGroupLayoutBase* bgl = mBindGroupLayouts[group].Get();
-        ASSERT(bgl != nullptr);
-        return bgl;
-    }
-
-    BindGroupLayoutBase* PipelineLayoutBase::GetBindGroupLayout(BindGroupIndex group) {
-        ASSERT(!IsError());
-        ASSERT(group < kMaxBindGroupsTyped);
-        ASSERT(mMask[group]);
-        BindGroupLayoutBase* bgl = mBindGroupLayouts[group].Get();
-        ASSERT(bgl != nullptr);
-        return bgl;
-    }
-
-    const BindGroupLayoutMask& PipelineLayoutBase::GetBindGroupLayoutsMask() const {
-        ASSERT(!IsError());
-        return mMask;
-    }
-
-    BindGroupLayoutMask PipelineLayoutBase::InheritedGroupsMask(
-        const PipelineLayoutBase* other) const {
-        ASSERT(!IsError());
-        return {(1 << static_cast<uint32_t>(GroupsInheritUpTo(other))) - 1u};
-    }
-
-    BindGroupIndex PipelineLayoutBase::GroupsInheritUpTo(const PipelineLayoutBase* other) const {
-        ASSERT(!IsError());
-
-        for (BindGroupIndex i(0); i < kMaxBindGroupsTyped; ++i) {
-            if (!mMask[i] || mBindGroupLayouts[i].Get() != other->mBindGroupLayouts[i].Get()) {
-                return i;
-            }
-        }
-        return kMaxBindGroupsTyped;
-    }
-
-    size_t PipelineLayoutBase::ComputeContentHash() {
-        ObjectContentHasher recorder;
-        recorder.Record(mMask);
-
-        for (BindGroupIndex group : IterateBitSet(mMask)) {
-            recorder.Record(GetBindGroupLayout(group)->GetContentHash());
-        }
-
-        return recorder.GetContentHash();
-    }
-
-    bool PipelineLayoutBase::EqualityFunc::operator()(const PipelineLayoutBase* a,
-                                                      const PipelineLayoutBase* b) const {
-        if (a->mMask != b->mMask) {
+    for (BindGroupIndex group : IterateBitSet(a->mMask)) {
+        if (a->GetBindGroupLayout(group) != b->GetBindGroupLayout(group)) {
             return false;
         }
-
-        for (BindGroupIndex group : IterateBitSet(a->mMask)) {
-            if (a->GetBindGroupLayout(group) != b->GetBindGroupLayout(group)) {
-                return false;
-            }
-        }
-
-        return true;
     }
 
+    return true;
+}
+
 }  // namespace dawn::native
diff --git a/src/dawn/native/PipelineLayout.h b/src/dawn/native/PipelineLayout.h
index c3a7821..c2536c2 100644
--- a/src/dawn/native/PipelineLayout.h
+++ b/src/dawn/native/PipelineLayout.h
@@ -33,66 +33,65 @@
 
 namespace dawn::native {
 
-    MaybeError ValidatePipelineLayoutDescriptor(
-        DeviceBase*,
-        const PipelineLayoutDescriptor* descriptor,
-        PipelineCompatibilityToken pipelineCompatibilityToken = PipelineCompatibilityToken(0));
+MaybeError ValidatePipelineLayoutDescriptor(
+    DeviceBase*,
+    const PipelineLayoutDescriptor* descriptor,
+    PipelineCompatibilityToken pipelineCompatibilityToken = PipelineCompatibilityToken(0));
 
-    using BindGroupLayoutArray =
-        ityp::array<BindGroupIndex, Ref<BindGroupLayoutBase>, kMaxBindGroups>;
-    using BindGroupLayoutMask = ityp::bitset<BindGroupIndex, kMaxBindGroups>;
+using BindGroupLayoutArray = ityp::array<BindGroupIndex, Ref<BindGroupLayoutBase>, kMaxBindGroups>;
+using BindGroupLayoutMask = ityp::bitset<BindGroupIndex, kMaxBindGroups>;
 
-    struct StageAndDescriptor {
-        SingleShaderStage shaderStage;
-        ShaderModuleBase* module;
-        std::string entryPoint;
-        uint32_t constantCount = 0u;
-        ConstantEntry const* constants = nullptr;
+struct StageAndDescriptor {
+    SingleShaderStage shaderStage;
+    ShaderModuleBase* module;
+    std::string entryPoint;
+    uint32_t constantCount = 0u;
+    ConstantEntry const* constants = nullptr;
+};
+
+class PipelineLayoutBase : public ApiObjectBase, public CachedObject {
+  public:
+    PipelineLayoutBase(DeviceBase* device,
+                       const PipelineLayoutDescriptor* descriptor,
+                       ApiObjectBase::UntrackedByDeviceTag tag);
+    PipelineLayoutBase(DeviceBase* device, const PipelineLayoutDescriptor* descriptor);
+    ~PipelineLayoutBase() override;
+
+    static PipelineLayoutBase* MakeError(DeviceBase* device);
+    static ResultOrError<Ref<PipelineLayoutBase>> CreateDefault(
+        DeviceBase* device,
+        std::vector<StageAndDescriptor> stages);
+
+    ObjectType GetType() const override;
+
+    const BindGroupLayoutBase* GetBindGroupLayout(BindGroupIndex group) const;
+    BindGroupLayoutBase* GetBindGroupLayout(BindGroupIndex group);
+    const BindGroupLayoutMask& GetBindGroupLayoutsMask() const;
+
+    // Utility functions to compute inherited bind groups.
+    // Returns the inherited bind groups as a mask.
+    BindGroupLayoutMask InheritedGroupsMask(const PipelineLayoutBase* other) const;
+
+    // Returns the index of the first incompatible bind group in the range
+    // [0, kMaxBindGroups]
+    BindGroupIndex GroupsInheritUpTo(const PipelineLayoutBase* other) const;
+
+    // Functions necessary for the unordered_set<PipelineLayoutBase*>-based cache.
+    size_t ComputeContentHash() override;
+
+    struct EqualityFunc {
+        bool operator()(const PipelineLayoutBase* a, const PipelineLayoutBase* b) const;
     };
 
-    class PipelineLayoutBase : public ApiObjectBase, public CachedObject {
-      public:
-        PipelineLayoutBase(DeviceBase* device,
-                           const PipelineLayoutDescriptor* descriptor,
-                           ApiObjectBase::UntrackedByDeviceTag tag);
-        PipelineLayoutBase(DeviceBase* device, const PipelineLayoutDescriptor* descriptor);
-        ~PipelineLayoutBase() override;
+  protected:
+    // Constructor used only for mocking and testing.
+    explicit PipelineLayoutBase(DeviceBase* device);
+    PipelineLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+    void DestroyImpl() override;
 
-        static PipelineLayoutBase* MakeError(DeviceBase* device);
-        static ResultOrError<Ref<PipelineLayoutBase>> CreateDefault(
-            DeviceBase* device,
-            std::vector<StageAndDescriptor> stages);
-
-        ObjectType GetType() const override;
-
-        const BindGroupLayoutBase* GetBindGroupLayout(BindGroupIndex group) const;
-        BindGroupLayoutBase* GetBindGroupLayout(BindGroupIndex group);
-        const BindGroupLayoutMask& GetBindGroupLayoutsMask() const;
-
-        // Utility functions to compute inherited bind groups.
-        // Returns the inherited bind groups as a mask.
-        BindGroupLayoutMask InheritedGroupsMask(const PipelineLayoutBase* other) const;
-
-        // Returns the index of the first incompatible bind group in the range
-        // [0, kMaxBindGroups]
-        BindGroupIndex GroupsInheritUpTo(const PipelineLayoutBase* other) const;
-
-        // Functions necessary for the unordered_set<PipelineLayoutBase*>-based cache.
-        size_t ComputeContentHash() override;
-
-        struct EqualityFunc {
-            bool operator()(const PipelineLayoutBase* a, const PipelineLayoutBase* b) const;
-        };
-
-      protected:
-        // Constructor used only for mocking and testing.
-        explicit PipelineLayoutBase(DeviceBase* device);
-        PipelineLayoutBase(DeviceBase* device, ObjectBase::ErrorTag tag);
-        void DestroyImpl() override;
-
-        BindGroupLayoutArray mBindGroupLayouts;
-        BindGroupLayoutMask mMask;
-    };
+    BindGroupLayoutArray mBindGroupLayouts;
+    BindGroupLayoutMask mMask;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/PooledResourceMemoryAllocator.cpp b/src/dawn/native/PooledResourceMemoryAllocator.cpp
index 0fb5772..95bca8c 100644
--- a/src/dawn/native/PooledResourceMemoryAllocator.cpp
+++ b/src/dawn/native/PooledResourceMemoryAllocator.cpp
@@ -20,44 +20,42 @@
 
 namespace dawn::native {
 
-    PooledResourceMemoryAllocator::PooledResourceMemoryAllocator(
-        ResourceHeapAllocator* heapAllocator)
-        : mHeapAllocator(heapAllocator) {
+PooledResourceMemoryAllocator::PooledResourceMemoryAllocator(ResourceHeapAllocator* heapAllocator)
+    : mHeapAllocator(heapAllocator) {}
+
+void PooledResourceMemoryAllocator::DestroyPool() {
+    for (auto& resourceHeap : mPool) {
+        ASSERT(resourceHeap != nullptr);
+        mHeapAllocator->DeallocateResourceHeap(std::move(resourceHeap));
     }
 
-    void PooledResourceMemoryAllocator::DestroyPool() {
-        for (auto& resourceHeap : mPool) {
-            ASSERT(resourceHeap != nullptr);
-            mHeapAllocator->DeallocateResourceHeap(std::move(resourceHeap));
-        }
+    mPool.clear();
+}
 
-        mPool.clear();
+ResultOrError<std::unique_ptr<ResourceHeapBase>>
+PooledResourceMemoryAllocator::AllocateResourceHeap(uint64_t size) {
+    // Pooled memory is LIFO because memory can be evicted by LRU. However, this means
+    // pooling is disabled in-frame when the memory is still pending. For high in-frame
+    // memory users, FIFO might be preferable when memory consumption is a higher priority.
+    std::unique_ptr<ResourceHeapBase> memory;
+    if (!mPool.empty()) {
+        memory = std::move(mPool.front());
+        mPool.pop_front();
     }
 
-    ResultOrError<std::unique_ptr<ResourceHeapBase>>
-    PooledResourceMemoryAllocator::AllocateResourceHeap(uint64_t size) {
-        // Pooled memory is LIFO because memory can be evicted by LRU. However, this means
-        // pooling is disabled in-frame when the memory is still pending. For high in-frame
-        // memory users, FIFO might be preferable when memory consumption is a higher priority.
-        std::unique_ptr<ResourceHeapBase> memory;
-        if (!mPool.empty()) {
-            memory = std::move(mPool.front());
-            mPool.pop_front();
-        }
-
-        if (memory == nullptr) {
-            DAWN_TRY_ASSIGN(memory, mHeapAllocator->AllocateResourceHeap(size));
-        }
-
-        return std::move(memory);
+    if (memory == nullptr) {
+        DAWN_TRY_ASSIGN(memory, mHeapAllocator->AllocateResourceHeap(size));
     }
 
-    void PooledResourceMemoryAllocator::DeallocateResourceHeap(
-        std::unique_ptr<ResourceHeapBase> allocation) {
-        mPool.push_front(std::move(allocation));
-    }
+    return std::move(memory);
+}
 
-    uint64_t PooledResourceMemoryAllocator::GetPoolSizeForTesting() const {
-        return mPool.size();
-    }
+void PooledResourceMemoryAllocator::DeallocateResourceHeap(
+    std::unique_ptr<ResourceHeapBase> allocation) {
+    mPool.push_front(std::move(allocation));
+}
+
+uint64_t PooledResourceMemoryAllocator::GetPoolSizeForTesting() const {
+    return mPool.size();
+}
 }  // namespace dawn::native
diff --git a/src/dawn/native/PooledResourceMemoryAllocator.h b/src/dawn/native/PooledResourceMemoryAllocator.h
index b27ed63..e75ad55 100644
--- a/src/dawn/native/PooledResourceMemoryAllocator.h
+++ b/src/dawn/native/PooledResourceMemoryAllocator.h
@@ -23,31 +23,30 @@
 
 namespace dawn::native {
 
-    class DeviceBase;
+class DeviceBase;
 
-    // |PooledResourceMemoryAllocator| allocates a fixed-size resource memory from a resource memory
-    // pool. Internally, it manages a list of heaps using LIFO (newest heaps are recycled first).
-    // The heap is in one of two states: AVAILABLE or not. Upon de-allocate, the heap is returned
-    // the pool and made AVAILABLE.
-    class PooledResourceMemoryAllocator : public ResourceHeapAllocator {
-      public:
-        explicit PooledResourceMemoryAllocator(ResourceHeapAllocator* heapAllocator);
-        ~PooledResourceMemoryAllocator() override = default;
+// |PooledResourceMemoryAllocator| allocates a fixed-size resource memory from a resource memory
+// pool. Internally, it manages a list of heaps using LIFO (newest heaps are recycled first).
+// The heap is in one of two states: AVAILABLE or not. Upon de-allocate, the heap is returned
+// the pool and made AVAILABLE.
+class PooledResourceMemoryAllocator : public ResourceHeapAllocator {
+  public:
+    explicit PooledResourceMemoryAllocator(ResourceHeapAllocator* heapAllocator);
+    ~PooledResourceMemoryAllocator() override = default;
 
-        ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
-            uint64_t size) override;
-        void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override;
+    ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(uint64_t size) override;
+    void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override;
 
-        void DestroyPool();
+    void DestroyPool();
 
-        // For testing purposes.
-        uint64_t GetPoolSizeForTesting() const;
+    // For testing purposes.
+    uint64_t GetPoolSizeForTesting() const;
 
-      private:
-        ResourceHeapAllocator* mHeapAllocator = nullptr;
+  private:
+    ResourceHeapAllocator* mHeapAllocator = nullptr;
 
-        std::deque<std::unique_ptr<ResourceHeapBase>> mPool;
-    };
+    std::deque<std::unique_ptr<ResourceHeapBase>> mPool;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/ProgrammableEncoder.cpp b/src/dawn/native/ProgrammableEncoder.cpp
index 302e73a..29a0f03 100644
--- a/src/dawn/native/ProgrammableEncoder.cpp
+++ b/src/dawn/native/ProgrammableEncoder.cpp
@@ -28,176 +28,172 @@
 
 namespace dawn::native {
 
-    ProgrammableEncoder::ProgrammableEncoder(DeviceBase* device,
-                                             const char* label,
-                                             EncodingContext* encodingContext)
-        : ApiObjectBase(device, label),
-          mEncodingContext(encodingContext),
-          mValidationEnabled(device->IsValidationEnabled()) {
-    }
+ProgrammableEncoder::ProgrammableEncoder(DeviceBase* device,
+                                         const char* label,
+                                         EncodingContext* encodingContext)
+    : ApiObjectBase(device, label),
+      mEncodingContext(encodingContext),
+      mValidationEnabled(device->IsValidationEnabled()) {}
 
-    ProgrammableEncoder::ProgrammableEncoder(DeviceBase* device,
-                                             EncodingContext* encodingContext,
-                                             ErrorTag errorTag)
-        : ApiObjectBase(device, errorTag),
-          mEncodingContext(encodingContext),
-          mValidationEnabled(device->IsValidationEnabled()) {
-    }
+ProgrammableEncoder::ProgrammableEncoder(DeviceBase* device,
+                                         EncodingContext* encodingContext,
+                                         ErrorTag errorTag)
+    : ApiObjectBase(device, errorTag),
+      mEncodingContext(encodingContext),
+      mValidationEnabled(device->IsValidationEnabled()) {}
 
-    bool ProgrammableEncoder::IsValidationEnabled() const {
-        return mValidationEnabled;
-    }
+bool ProgrammableEncoder::IsValidationEnabled() const {
+    return mValidationEnabled;
+}
 
-    MaybeError ProgrammableEncoder::ValidateProgrammableEncoderEnd() const {
-        DAWN_INVALID_IF(mDebugGroupStackSize != 0,
-                        "PushDebugGroup called %u time(s) without a corresponding PopDebugGroup.",
-                        mDebugGroupStackSize);
-        return {};
-    }
+MaybeError ProgrammableEncoder::ValidateProgrammableEncoderEnd() const {
+    DAWN_INVALID_IF(mDebugGroupStackSize != 0,
+                    "PushDebugGroup called %u time(s) without a corresponding PopDebugGroup.",
+                    mDebugGroupStackSize);
+    return {};
+}
 
-    void ProgrammableEncoder::APIInsertDebugMarker(const char* groupLabel) {
-        mEncodingContext->TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                InsertDebugMarkerCmd* cmd =
-                    allocator->Allocate<InsertDebugMarkerCmd>(Command::InsertDebugMarker);
-                cmd->length = strlen(groupLabel);
+void ProgrammableEncoder::APIInsertDebugMarker(const char* groupLabel) {
+    mEncodingContext->TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            InsertDebugMarkerCmd* cmd =
+                allocator->Allocate<InsertDebugMarkerCmd>(Command::InsertDebugMarker);
+            cmd->length = strlen(groupLabel);
 
-                char* label = allocator->AllocateData<char>(cmd->length + 1);
-                memcpy(label, groupLabel, cmd->length + 1);
+            char* label = allocator->AllocateData<char>(cmd->length + 1);
+            memcpy(label, groupLabel, cmd->length + 1);
 
-                return {};
-            },
-            "encoding %s.InsertDebugMarker(\"%s\").", this, groupLabel);
-    }
+            return {};
+        },
+        "encoding %s.InsertDebugMarker(\"%s\").", this, groupLabel);
+}
 
-    void ProgrammableEncoder::APIPopDebugGroup() {
-        mEncodingContext->TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                if (IsValidationEnabled()) {
-                    DAWN_INVALID_IF(
-                        mDebugGroupStackSize == 0,
-                        "PopDebugGroup called when no debug groups are currently pushed.");
-                }
-                allocator->Allocate<PopDebugGroupCmd>(Command::PopDebugGroup);
-                mDebugGroupStackSize--;
-                mEncodingContext->PopDebugGroupLabel();
-
-                return {};
-            },
-            "encoding %s.PopDebugGroup().", this);
-    }
-
-    void ProgrammableEncoder::APIPushDebugGroup(const char* groupLabel) {
-        mEncodingContext->TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                PushDebugGroupCmd* cmd =
-                    allocator->Allocate<PushDebugGroupCmd>(Command::PushDebugGroup);
-                cmd->length = strlen(groupLabel);
-
-                char* label = allocator->AllocateData<char>(cmd->length + 1);
-                memcpy(label, groupLabel, cmd->length + 1);
-
-                mDebugGroupStackSize++;
-                mEncodingContext->PushDebugGroupLabel(groupLabel);
-
-                return {};
-            },
-            "encoding %s.PushDebugGroup(\"%s\").", this, groupLabel);
-    }
-
-    MaybeError ProgrammableEncoder::ValidateSetBindGroup(BindGroupIndex index,
-                                                         BindGroupBase* group,
-                                                         uint32_t dynamicOffsetCountIn,
-                                                         const uint32_t* dynamicOffsetsIn) const {
-        DAWN_TRY(GetDevice()->ValidateObject(group));
-
-        DAWN_INVALID_IF(index >= kMaxBindGroupsTyped,
-                        "Bind group index (%u) exceeds the maximum (%u).",
-                        static_cast<uint32_t>(index), kMaxBindGroups);
-
-        ityp::span<BindingIndex, const uint32_t> dynamicOffsets(dynamicOffsetsIn,
-                                                                BindingIndex(dynamicOffsetCountIn));
-
-        // Dynamic offsets count must match the number required by the layout perfectly.
-        const BindGroupLayoutBase* layout = group->GetLayout();
-        DAWN_INVALID_IF(
-            layout->GetDynamicBufferCount() != dynamicOffsets.size(),
-            "The number of dynamic offsets (%u) does not match the number of dynamic buffers (%u) "
-            "in %s.",
-            static_cast<uint32_t>(dynamicOffsets.size()),
-            static_cast<uint32_t>(layout->GetDynamicBufferCount()), layout);
-
-        for (BindingIndex i{0}; i < dynamicOffsets.size(); ++i) {
-            const BindingInfo& bindingInfo = layout->GetBindingInfo(i);
-
-            // BGL creation sorts bindings such that the dynamic buffer bindings are first.
-            // ASSERT that this true.
-            ASSERT(bindingInfo.bindingType == BindingInfoType::Buffer);
-            ASSERT(bindingInfo.buffer.hasDynamicOffset);
-
-            uint64_t requiredAlignment;
-            switch (bindingInfo.buffer.type) {
-                case wgpu::BufferBindingType::Uniform:
-                    requiredAlignment = GetDevice()->GetLimits().v1.minUniformBufferOffsetAlignment;
-                    break;
-                case wgpu::BufferBindingType::Storage:
-                case wgpu::BufferBindingType::ReadOnlyStorage:
-                case kInternalStorageBufferBinding:
-                    requiredAlignment = GetDevice()->GetLimits().v1.minStorageBufferOffsetAlignment;
-                    break;
-                case wgpu::BufferBindingType::Undefined:
-                    UNREACHABLE();
+void ProgrammableEncoder::APIPopDebugGroup() {
+    mEncodingContext->TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            if (IsValidationEnabled()) {
+                DAWN_INVALID_IF(mDebugGroupStackSize == 0,
+                                "PopDebugGroup called when no debug groups are currently pushed.");
             }
+            allocator->Allocate<PopDebugGroupCmd>(Command::PopDebugGroup);
+            mDebugGroupStackSize--;
+            mEncodingContext->PopDebugGroupLabel();
 
-            DAWN_INVALID_IF(!IsAligned(dynamicOffsets[i], requiredAlignment),
-                            "Dynamic Offset[%u] (%u) is not %u byte aligned.",
-                            static_cast<uint32_t>(i), dynamicOffsets[i], requiredAlignment);
+            return {};
+        },
+        "encoding %s.PopDebugGroup().", this);
+}
 
-            BufferBinding bufferBinding = group->GetBindingAsBufferBinding(i);
+void ProgrammableEncoder::APIPushDebugGroup(const char* groupLabel) {
+    mEncodingContext->TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            PushDebugGroupCmd* cmd =
+                allocator->Allocate<PushDebugGroupCmd>(Command::PushDebugGroup);
+            cmd->length = strlen(groupLabel);
 
-            // During BindGroup creation, validation ensures binding offset + binding size
-            // <= buffer size.
-            ASSERT(bufferBinding.buffer->GetSize() >= bufferBinding.size);
-            ASSERT(bufferBinding.buffer->GetSize() - bufferBinding.size >= bufferBinding.offset);
+            char* label = allocator->AllocateData<char>(cmd->length + 1);
+            memcpy(label, groupLabel, cmd->length + 1);
 
-            if ((dynamicOffsets[i] >
-                 bufferBinding.buffer->GetSize() - bufferBinding.offset - bufferBinding.size)) {
-                DAWN_INVALID_IF(
-                    (bufferBinding.buffer->GetSize() - bufferBinding.offset) == bufferBinding.size,
-                    "Dynamic Offset[%u] (%u) is out of bounds of %s with a size of %u and a bound "
-                    "range of (offset: %u, size: %u). The binding goes to the end of the buffer "
-                    "even with a dynamic offset of 0. Did you forget to specify "
-                    "the binding's size?",
-                    static_cast<uint32_t>(i), dynamicOffsets[i], bufferBinding.buffer,
-                    bufferBinding.buffer->GetSize(), bufferBinding.offset, bufferBinding.size);
+            mDebugGroupStackSize++;
+            mEncodingContext->PushDebugGroupLabel(groupLabel);
 
-                return DAWN_FORMAT_VALIDATION_ERROR(
-                    "Dynamic Offset[%u] (%u) is out of bounds of "
-                    "%s with a size of %u and a bound range of (offset: %u, size: %u).",
-                    static_cast<uint32_t>(i), dynamicOffsets[i], bufferBinding.buffer,
-                    bufferBinding.buffer->GetSize(), bufferBinding.offset, bufferBinding.size);
-            }
+            return {};
+        },
+        "encoding %s.PushDebugGroup(\"%s\").", this, groupLabel);
+}
+
+MaybeError ProgrammableEncoder::ValidateSetBindGroup(BindGroupIndex index,
+                                                     BindGroupBase* group,
+                                                     uint32_t dynamicOffsetCountIn,
+                                                     const uint32_t* dynamicOffsetsIn) const {
+    DAWN_TRY(GetDevice()->ValidateObject(group));
+
+    DAWN_INVALID_IF(index >= kMaxBindGroupsTyped, "Bind group index (%u) exceeds the maximum (%u).",
+                    static_cast<uint32_t>(index), kMaxBindGroups);
+
+    ityp::span<BindingIndex, const uint32_t> dynamicOffsets(dynamicOffsetsIn,
+                                                            BindingIndex(dynamicOffsetCountIn));
+
+    // Dynamic offsets count must match the number required by the layout perfectly.
+    const BindGroupLayoutBase* layout = group->GetLayout();
+    DAWN_INVALID_IF(
+        layout->GetDynamicBufferCount() != dynamicOffsets.size(),
+        "The number of dynamic offsets (%u) does not match the number of dynamic buffers (%u) "
+        "in %s.",
+        static_cast<uint32_t>(dynamicOffsets.size()),
+        static_cast<uint32_t>(layout->GetDynamicBufferCount()), layout);
+
+    for (BindingIndex i{0}; i < dynamicOffsets.size(); ++i) {
+        const BindingInfo& bindingInfo = layout->GetBindingInfo(i);
+
+        // BGL creation sorts bindings such that the dynamic buffer bindings are first.
+        // ASSERT that this true.
+        ASSERT(bindingInfo.bindingType == BindingInfoType::Buffer);
+        ASSERT(bindingInfo.buffer.hasDynamicOffset);
+
+        uint64_t requiredAlignment;
+        switch (bindingInfo.buffer.type) {
+            case wgpu::BufferBindingType::Uniform:
+                requiredAlignment = GetDevice()->GetLimits().v1.minUniformBufferOffsetAlignment;
+                break;
+            case wgpu::BufferBindingType::Storage:
+            case wgpu::BufferBindingType::ReadOnlyStorage:
+            case kInternalStorageBufferBinding:
+                requiredAlignment = GetDevice()->GetLimits().v1.minStorageBufferOffsetAlignment;
+                break;
+            case wgpu::BufferBindingType::Undefined:
+                UNREACHABLE();
         }
 
-        return {};
-    }
+        DAWN_INVALID_IF(!IsAligned(dynamicOffsets[i], requiredAlignment),
+                        "Dynamic Offset[%u] (%u) is not %u byte aligned.", static_cast<uint32_t>(i),
+                        dynamicOffsets[i], requiredAlignment);
 
-    void ProgrammableEncoder::RecordSetBindGroup(CommandAllocator* allocator,
-                                                 BindGroupIndex index,
-                                                 BindGroupBase* group,
-                                                 uint32_t dynamicOffsetCount,
-                                                 const uint32_t* dynamicOffsets) const {
-        SetBindGroupCmd* cmd = allocator->Allocate<SetBindGroupCmd>(Command::SetBindGroup);
-        cmd->index = index;
-        cmd->group = group;
-        cmd->dynamicOffsetCount = dynamicOffsetCount;
-        if (dynamicOffsetCount > 0) {
-            uint32_t* offsets = allocator->AllocateData<uint32_t>(cmd->dynamicOffsetCount);
-            memcpy(offsets, dynamicOffsets, dynamicOffsetCount * sizeof(uint32_t));
+        BufferBinding bufferBinding = group->GetBindingAsBufferBinding(i);
+
+        // During BindGroup creation, validation ensures binding offset + binding size
+        // <= buffer size.
+        ASSERT(bufferBinding.buffer->GetSize() >= bufferBinding.size);
+        ASSERT(bufferBinding.buffer->GetSize() - bufferBinding.size >= bufferBinding.offset);
+
+        if ((dynamicOffsets[i] >
+             bufferBinding.buffer->GetSize() - bufferBinding.offset - bufferBinding.size)) {
+            DAWN_INVALID_IF(
+                (bufferBinding.buffer->GetSize() - bufferBinding.offset) == bufferBinding.size,
+                "Dynamic Offset[%u] (%u) is out of bounds of %s with a size of %u and a bound "
+                "range of (offset: %u, size: %u). The binding goes to the end of the buffer "
+                "even with a dynamic offset of 0. Did you forget to specify "
+                "the binding's size?",
+                static_cast<uint32_t>(i), dynamicOffsets[i], bufferBinding.buffer,
+                bufferBinding.buffer->GetSize(), bufferBinding.offset, bufferBinding.size);
+
+            return DAWN_FORMAT_VALIDATION_ERROR(
+                "Dynamic Offset[%u] (%u) is out of bounds of "
+                "%s with a size of %u and a bound range of (offset: %u, size: %u).",
+                static_cast<uint32_t>(i), dynamicOffsets[i], bufferBinding.buffer,
+                bufferBinding.buffer->GetSize(), bufferBinding.offset, bufferBinding.size);
         }
     }
 
+    return {};
+}
+
+void ProgrammableEncoder::RecordSetBindGroup(CommandAllocator* allocator,
+                                             BindGroupIndex index,
+                                             BindGroupBase* group,
+                                             uint32_t dynamicOffsetCount,
+                                             const uint32_t* dynamicOffsets) const {
+    SetBindGroupCmd* cmd = allocator->Allocate<SetBindGroupCmd>(Command::SetBindGroup);
+    cmd->index = index;
+    cmd->group = group;
+    cmd->dynamicOffsetCount = dynamicOffsetCount;
+    if (dynamicOffsetCount > 0) {
+        uint32_t* offsets = allocator->AllocateData<uint32_t>(cmd->dynamicOffsetCount);
+        memcpy(offsets, dynamicOffsets, dynamicOffsetCount * sizeof(uint32_t));
+    }
+}
+
 }  // namespace dawn::native
diff --git a/src/dawn/native/ProgrammableEncoder.h b/src/dawn/native/ProgrammableEncoder.h
index 6a7918f..0aa53d1 100644
--- a/src/dawn/native/ProgrammableEncoder.h
+++ b/src/dawn/native/ProgrammableEncoder.h
@@ -25,47 +25,43 @@
 
 namespace dawn::native {
 
-    class DeviceBase;
+class DeviceBase;
 
-    // Base class for shared functionality between programmable encoders.
-    class ProgrammableEncoder : public ApiObjectBase {
-      public:
-        ProgrammableEncoder(DeviceBase* device,
-                            const char* label,
-                            EncodingContext* encodingContext);
+// Base class for shared functionality between programmable encoders.
+class ProgrammableEncoder : public ApiObjectBase {
+  public:
+    ProgrammableEncoder(DeviceBase* device, const char* label, EncodingContext* encodingContext);
 
-        void APIInsertDebugMarker(const char* groupLabel);
-        void APIPopDebugGroup();
-        void APIPushDebugGroup(const char* groupLabel);
+    void APIInsertDebugMarker(const char* groupLabel);
+    void APIPopDebugGroup();
+    void APIPushDebugGroup(const char* groupLabel);
 
-      protected:
-        bool IsValidationEnabled() const;
-        MaybeError ValidateProgrammableEncoderEnd() const;
+  protected:
+    bool IsValidationEnabled() const;
+    MaybeError ValidateProgrammableEncoderEnd() const;
 
-        // Compute and render passes do different things on SetBindGroup. These are helper functions
-        // for the logic they have in common.
-        MaybeError ValidateSetBindGroup(BindGroupIndex index,
-                                        BindGroupBase* group,
-                                        uint32_t dynamicOffsetCountIn,
-                                        const uint32_t* dynamicOffsetsIn) const;
-        void RecordSetBindGroup(CommandAllocator* allocator,
-                                BindGroupIndex index,
-                                BindGroupBase* group,
-                                uint32_t dynamicOffsetCount,
-                                const uint32_t* dynamicOffsets) const;
+    // Compute and render passes do different things on SetBindGroup. These are helper functions
+    // for the logic they have in common.
+    MaybeError ValidateSetBindGroup(BindGroupIndex index,
+                                    BindGroupBase* group,
+                                    uint32_t dynamicOffsetCountIn,
+                                    const uint32_t* dynamicOffsetsIn) const;
+    void RecordSetBindGroup(CommandAllocator* allocator,
+                            BindGroupIndex index,
+                            BindGroupBase* group,
+                            uint32_t dynamicOffsetCount,
+                            const uint32_t* dynamicOffsets) const;
 
-        // Construct an "error" programmable pass encoder.
-        ProgrammableEncoder(DeviceBase* device,
-                            EncodingContext* encodingContext,
-                            ErrorTag errorTag);
+    // Construct an "error" programmable pass encoder.
+    ProgrammableEncoder(DeviceBase* device, EncodingContext* encodingContext, ErrorTag errorTag);
 
-        EncodingContext* mEncodingContext = nullptr;
+    EncodingContext* mEncodingContext = nullptr;
 
-        uint64_t mDebugGroupStackSize = 0;
+    uint64_t mDebugGroupStackSize = 0;
 
-      private:
-        const bool mValidationEnabled;
-    };
+  private:
+    const bool mValidationEnabled;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/QueryHelper.cpp b/src/dawn/native/QueryHelper.cpp
index 62bf6fc..e151d2f 100644
--- a/src/dawn/native/QueryHelper.cpp
+++ b/src/dawn/native/QueryHelper.cpp
@@ -29,16 +29,16 @@
 
 namespace dawn::native {
 
-    namespace {
+namespace {
 
-        // Assert the offsets in dawn::native::TimestampParams are same with the ones in the shader
-        static_assert(offsetof(dawn::native::TimestampParams, first) == 0);
-        static_assert(offsetof(dawn::native::TimestampParams, count) == 4);
-        static_assert(offsetof(dawn::native::TimestampParams, offset) == 8);
-        static_assert(offsetof(dawn::native::TimestampParams, multiplier) == 12);
-        static_assert(offsetof(dawn::native::TimestampParams, rightShift) == 16);
+// Assert the offsets in dawn::native::TimestampParams are same with the ones in the shader
+static_assert(offsetof(dawn::native::TimestampParams, first) == 0);
+static_assert(offsetof(dawn::native::TimestampParams, count) == 4);
+static_assert(offsetof(dawn::native::TimestampParams, offset) == 8);
+static_assert(offsetof(dawn::native::TimestampParams, multiplier) == 12);
+static_assert(offsetof(dawn::native::TimestampParams, rightShift) == 16);
 
-        static const char sConvertTimestampsToNanoseconds[] = R"(
+static const char sConvertTimestampsToNanoseconds[] = R"(
             struct Timestamp {
                 low  : u32;
                 high : u32;
@@ -116,103 +116,100 @@
             }
         )";
 
-        ResultOrError<ComputePipelineBase*> GetOrCreateTimestampComputePipeline(
-            DeviceBase* device) {
-            InternalPipelineStore* store = device->GetInternalPipelineStore();
+ResultOrError<ComputePipelineBase*> GetOrCreateTimestampComputePipeline(DeviceBase* device) {
+    InternalPipelineStore* store = device->GetInternalPipelineStore();
 
-            if (store->timestampComputePipeline == nullptr) {
-                // Create compute shader module if not cached before.
-                if (store->timestampCS == nullptr) {
-                    DAWN_TRY_ASSIGN(
-                        store->timestampCS,
-                        utils::CreateShaderModule(device, sConvertTimestampsToNanoseconds));
-                }
-
-                // Create binding group layout
-                Ref<BindGroupLayoutBase> bgl;
-                DAWN_TRY_ASSIGN(
-                    bgl, utils::MakeBindGroupLayout(
-                             device,
-                             {
-                                 {0, wgpu::ShaderStage::Compute, kInternalStorageBufferBinding},
-                                 {1, wgpu::ShaderStage::Compute,
-                                  wgpu::BufferBindingType::ReadOnlyStorage},
-                                 {2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform},
-                             },
-                             /* allowInternalBinding */ true));
-
-                // Create pipeline layout
-                Ref<PipelineLayoutBase> layout;
-                DAWN_TRY_ASSIGN(layout, utils::MakeBasicPipelineLayout(device, bgl));
-
-                // Create ComputePipeline.
-                ComputePipelineDescriptor computePipelineDesc = {};
-                // Generate the layout based on shader module.
-                computePipelineDesc.layout = layout.Get();
-                computePipelineDesc.compute.module = store->timestampCS.Get();
-                computePipelineDesc.compute.entryPoint = "main";
-
-                DAWN_TRY_ASSIGN(store->timestampComputePipeline,
-                                device->CreateComputePipeline(&computePipelineDesc));
-            }
-
-            return store->timestampComputePipeline.Get();
+    if (store->timestampComputePipeline == nullptr) {
+        // Create compute shader module if not cached before.
+        if (store->timestampCS == nullptr) {
+            DAWN_TRY_ASSIGN(store->timestampCS,
+                            utils::CreateShaderModule(device, sConvertTimestampsToNanoseconds));
         }
 
-    }  // anonymous namespace
+        // Create binding group layout
+        Ref<BindGroupLayoutBase> bgl;
+        DAWN_TRY_ASSIGN(
+            bgl, utils::MakeBindGroupLayout(
+                     device,
+                     {
+                         {0, wgpu::ShaderStage::Compute, kInternalStorageBufferBinding},
+                         {1, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage},
+                         {2, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform},
+                     },
+                     /* allowInternalBinding */ true));
 
-    TimestampParams::TimestampParams(uint32_t first, uint32_t count, uint32_t offset, float period)
-        : first(first), count(count), offset(offset) {
-        // The overall conversion happening, if p is the period, m the multiplier, s the shift, is::
-        //
-        //   m = round(p * 2^s)
-        //
-        // Then in the shader we compute:
-        //
-        //   m / 2^s = round(p * 2^s) / 2*s ~= p
-        //
-        // The goal is to find the best shift to keep the precision of computations. The
-        // conversion shader uses chunks of 16 bits to compute the multiplication with the perios,
-        // so we need to keep the multiplier under 2^16. At the same time, the larger the
-        // multiplier, the better the precision, so we maximize the value of the right shift while
-        // keeping the multiplier under 2 ^ 16
-        uint32_t upperLog2 = ceil(log2(period));
+        // Create pipeline layout
+        Ref<PipelineLayoutBase> layout;
+        DAWN_TRY_ASSIGN(layout, utils::MakeBasicPipelineLayout(device, bgl));
 
-        // Clamp the shift to 16 because we're doing computations in 16bit chunks. The
-        // multiplication by the period will overflow the chunks, but timestamps are mostly
-        // informational so that's ok.
-        rightShift = 16u - std::min(upperLog2, 16u);
-        multiplier = uint32_t(period * (1 << rightShift));
+        // Create ComputePipeline.
+        ComputePipelineDescriptor computePipelineDesc = {};
+        // Generate the layout based on shader module.
+        computePipelineDesc.layout = layout.Get();
+        computePipelineDesc.compute.module = store->timestampCS.Get();
+        computePipelineDesc.compute.entryPoint = "main";
+
+        DAWN_TRY_ASSIGN(store->timestampComputePipeline,
+                        device->CreateComputePipeline(&computePipelineDesc));
     }
 
-    MaybeError EncodeConvertTimestampsToNanoseconds(CommandEncoder* encoder,
-                                                    BufferBase* timestamps,
-                                                    BufferBase* availability,
-                                                    BufferBase* params) {
-        DeviceBase* device = encoder->GetDevice();
+    return store->timestampComputePipeline.Get();
+}
 
-        ComputePipelineBase* pipeline;
-        DAWN_TRY_ASSIGN(pipeline, GetOrCreateTimestampComputePipeline(device));
+}  // anonymous namespace
 
-        // Prepare bind group layout.
-        Ref<BindGroupLayoutBase> layout;
-        DAWN_TRY_ASSIGN(layout, pipeline->GetBindGroupLayout(0));
+TimestampParams::TimestampParams(uint32_t first, uint32_t count, uint32_t offset, float period)
+    : first(first), count(count), offset(offset) {
+    // The overall conversion happening, if p is the period, m the multiplier, s the shift, is::
+    //
+    //   m = round(p * 2^s)
+    //
+    // Then in the shader we compute:
+    //
+    //   m / 2^s = round(p * 2^s) / 2*s ~= p
+    //
+    // The goal is to find the best shift to keep the precision of computations. The
+    // conversion shader uses chunks of 16 bits to compute the multiplication with the perios,
+    // so we need to keep the multiplier under 2^16. At the same time, the larger the
+    // multiplier, the better the precision, so we maximize the value of the right shift while
+    // keeping the multiplier under 2 ^ 16
+    uint32_t upperLog2 = ceil(log2(period));
 
-        // Create bind group after all binding entries are set.
-        Ref<BindGroupBase> bindGroup;
-        DAWN_TRY_ASSIGN(bindGroup,
-                        utils::MakeBindGroup(device, layout,
-                                             {{0, timestamps}, {1, availability}, {2, params}}));
+    // Clamp the shift to 16 because we're doing computations in 16bit chunks. The
+    // multiplication by the period will overflow the chunks, but timestamps are mostly
+    // informational so that's ok.
+    rightShift = 16u - std::min(upperLog2, 16u);
+    multiplier = uint32_t(period * (1 << rightShift));
+}
 
-        // Create compute encoder and issue dispatch.
-        Ref<ComputePassEncoder> pass = encoder->BeginComputePass();
-        pass->APISetPipeline(pipeline);
-        pass->APISetBindGroup(0, bindGroup.Get());
-        pass->APIDispatchWorkgroups(
-            static_cast<uint32_t>((timestamps->GetSize() / sizeof(uint64_t) + 7) / 8));
-        pass->APIEnd();
+MaybeError EncodeConvertTimestampsToNanoseconds(CommandEncoder* encoder,
+                                                BufferBase* timestamps,
+                                                BufferBase* availability,
+                                                BufferBase* params) {
+    DeviceBase* device = encoder->GetDevice();
 
-        return {};
-    }
+    ComputePipelineBase* pipeline;
+    DAWN_TRY_ASSIGN(pipeline, GetOrCreateTimestampComputePipeline(device));
+
+    // Prepare bind group layout.
+    Ref<BindGroupLayoutBase> layout;
+    DAWN_TRY_ASSIGN(layout, pipeline->GetBindGroupLayout(0));
+
+    // Create bind group after all binding entries are set.
+    Ref<BindGroupBase> bindGroup;
+    DAWN_TRY_ASSIGN(
+        bindGroup,
+        utils::MakeBindGroup(device, layout, {{0, timestamps}, {1, availability}, {2, params}}));
+
+    // Create compute encoder and issue dispatch.
+    Ref<ComputePassEncoder> pass = encoder->BeginComputePass();
+    pass->APISetPipeline(pipeline);
+    pass->APISetBindGroup(0, bindGroup.Get());
+    pass->APIDispatchWorkgroups(
+        static_cast<uint32_t>((timestamps->GetSize() / sizeof(uint64_t) + 7) / 8));
+    pass->APIEnd();
+
+    return {};
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/QueryHelper.h b/src/dawn/native/QueryHelper.h
index 744e612..11aadd0 100644
--- a/src/dawn/native/QueryHelper.h
+++ b/src/dawn/native/QueryHelper.h
@@ -20,23 +20,23 @@
 
 namespace dawn::native {
 
-    class BufferBase;
-    class CommandEncoder;
+class BufferBase;
+class CommandEncoder;
 
-    struct TimestampParams {
-        TimestampParams(uint32_t first, uint32_t count, uint32_t offset, float period);
+struct TimestampParams {
+    TimestampParams(uint32_t first, uint32_t count, uint32_t offset, float period);
 
-        uint32_t first;
-        uint32_t count;
-        uint32_t offset;
-        uint32_t multiplier;
-        uint32_t rightShift;
-    };
+    uint32_t first;
+    uint32_t count;
+    uint32_t offset;
+    uint32_t multiplier;
+    uint32_t rightShift;
+};
 
-    MaybeError EncodeConvertTimestampsToNanoseconds(CommandEncoder* encoder,
-                                                    BufferBase* timestamps,
-                                                    BufferBase* availability,
-                                                    BufferBase* params);
+MaybeError EncodeConvertTimestampsToNanoseconds(CommandEncoder* encoder,
+                                                BufferBase* timestamps,
+                                                BufferBase* availability,
+                                                BufferBase* params);
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/QuerySet.cpp b/src/dawn/native/QuerySet.cpp
index 568d606..b798266 100644
--- a/src/dawn/native/QuerySet.cpp
+++ b/src/dawn/native/QuerySet.cpp
@@ -23,158 +23,153 @@
 
 namespace dawn::native {
 
-    namespace {
+namespace {
 
-        class ErrorQuerySet final : public QuerySetBase {
-          public:
-            explicit ErrorQuerySet(DeviceBase* device) : QuerySetBase(device, ObjectBase::kError) {
+class ErrorQuerySet final : public QuerySetBase {
+  public:
+    explicit ErrorQuerySet(DeviceBase* device) : QuerySetBase(device, ObjectBase::kError) {}
+
+  private:
+    void DestroyImpl() override { UNREACHABLE(); }
+};
+
+}  // anonymous namespace
+
+MaybeError ValidateQuerySetDescriptor(DeviceBase* device, const QuerySetDescriptor* descriptor) {
+    DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
+
+    DAWN_TRY(ValidateQueryType(descriptor->type));
+
+    DAWN_INVALID_IF(descriptor->count > kMaxQueryCount,
+                    "Query count (%u) exceeds the maximum query count (%u).", descriptor->count,
+                    kMaxQueryCount);
+
+    switch (descriptor->type) {
+        case wgpu::QueryType::Occlusion:
+            DAWN_INVALID_IF(descriptor->pipelineStatisticsCount != 0,
+                            "Pipeline statistics specified for a query of type %s.",
+                            descriptor->type);
+            break;
+
+        case wgpu::QueryType::PipelineStatistics: {
+            // TODO(crbug.com/1177506): Pipeline statistics query is not fully implemented.
+            // Disallow it as unsafe until the implementaion is completed.
+            DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
+                            "Pipeline statistics queries are disallowed because they are not "
+                            "fully implemented");
+
+            DAWN_INVALID_IF(
+                !device->IsFeatureEnabled(Feature::PipelineStatisticsQuery),
+                "Pipeline statistics query set created without the feature being enabled.");
+
+            DAWN_INVALID_IF(descriptor->pipelineStatisticsCount == 0,
+                            "Pipeline statistics query set created with 0 statistics.");
+
+            std::set<wgpu::PipelineStatisticName> pipelineStatisticsSet;
+            for (uint32_t i = 0; i < descriptor->pipelineStatisticsCount; i++) {
+                DAWN_TRY(ValidatePipelineStatisticName(descriptor->pipelineStatistics[i]));
+
+                auto [_, inserted] =
+                    pipelineStatisticsSet.insert((descriptor->pipelineStatistics[i]));
+                DAWN_INVALID_IF(!inserted, "Statistic %s is specified more than once.",
+                                descriptor->pipelineStatistics[i]);
             }
+        } break;
 
-          private:
-            void DestroyImpl() override {
-                UNREACHABLE();
-            }
-        };
+        case wgpu::QueryType::Timestamp:
+            DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
+                            "Timestamp queries are disallowed because they may expose precise "
+                            "timing information.");
 
-    }  // anonymous namespace
+            DAWN_INVALID_IF(!device->IsFeatureEnabled(Feature::TimestampQuery),
+                            "Timestamp query set created without the feature being enabled.");
 
-    MaybeError ValidateQuerySetDescriptor(DeviceBase* device,
-                                          const QuerySetDescriptor* descriptor) {
-        DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
+            DAWN_INVALID_IF(descriptor->pipelineStatisticsCount != 0,
+                            "Pipeline statistics specified for a query of type %s.",
+                            descriptor->type);
+            break;
 
-        DAWN_TRY(ValidateQueryType(descriptor->type));
-
-        DAWN_INVALID_IF(descriptor->count > kMaxQueryCount,
-                        "Query count (%u) exceeds the maximum query count (%u).", descriptor->count,
-                        kMaxQueryCount);
-
-        switch (descriptor->type) {
-            case wgpu::QueryType::Occlusion:
-                DAWN_INVALID_IF(descriptor->pipelineStatisticsCount != 0,
-                                "Pipeline statistics specified for a query of type %s.",
-                                descriptor->type);
-                break;
-
-            case wgpu::QueryType::PipelineStatistics: {
-                // TODO(crbug.com/1177506): Pipeline statistics query is not fully implemented.
-                // Disallow it as unsafe until the implementaion is completed.
-                DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
-                                "Pipeline statistics queries are disallowed because they are not "
-                                "fully implemented");
-
-                DAWN_INVALID_IF(
-                    !device->IsFeatureEnabled(Feature::PipelineStatisticsQuery),
-                    "Pipeline statistics query set created without the feature being enabled.");
-
-                DAWN_INVALID_IF(descriptor->pipelineStatisticsCount == 0,
-                                "Pipeline statistics query set created with 0 statistics.");
-
-                std::set<wgpu::PipelineStatisticName> pipelineStatisticsSet;
-                for (uint32_t i = 0; i < descriptor->pipelineStatisticsCount; i++) {
-                    DAWN_TRY(ValidatePipelineStatisticName(descriptor->pipelineStatistics[i]));
-
-                    auto [_, inserted] =
-                        pipelineStatisticsSet.insert((descriptor->pipelineStatistics[i]));
-                    DAWN_INVALID_IF(!inserted, "Statistic %s is specified more than once.",
-                                    descriptor->pipelineStatistics[i]);
-                }
-            } break;
-
-            case wgpu::QueryType::Timestamp:
-                DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
-                                "Timestamp queries are disallowed because they may expose precise "
-                                "timing information.");
-
-                DAWN_INVALID_IF(!device->IsFeatureEnabled(Feature::TimestampQuery),
-                                "Timestamp query set created without the feature being enabled.");
-
-                DAWN_INVALID_IF(descriptor->pipelineStatisticsCount != 0,
-                                "Pipeline statistics specified for a query of type %s.",
-                                descriptor->type);
-                break;
-
-            default:
-                break;
-        }
-
-        return {};
+        default:
+            break;
     }
 
-    QuerySetBase::QuerySetBase(DeviceBase* device, const QuerySetDescriptor* descriptor)
-        : ApiObjectBase(device, descriptor->label),
-          mQueryType(descriptor->type),
-          mQueryCount(descriptor->count),
-          mState(QuerySetState::Available) {
-        for (uint32_t i = 0; i < descriptor->pipelineStatisticsCount; i++) {
-            mPipelineStatistics.push_back(descriptor->pipelineStatistics[i]);
-        }
+    return {};
+}
 
-        mQueryAvailability.resize(descriptor->count);
-        TrackInDevice();
+QuerySetBase::QuerySetBase(DeviceBase* device, const QuerySetDescriptor* descriptor)
+    : ApiObjectBase(device, descriptor->label),
+      mQueryType(descriptor->type),
+      mQueryCount(descriptor->count),
+      mState(QuerySetState::Available) {
+    for (uint32_t i = 0; i < descriptor->pipelineStatisticsCount; i++) {
+        mPipelineStatistics.push_back(descriptor->pipelineStatistics[i]);
     }
 
-    QuerySetBase::QuerySetBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
-        TrackInDevice();
-    }
+    mQueryAvailability.resize(descriptor->count);
+    TrackInDevice();
+}
 
-    QuerySetBase::QuerySetBase(DeviceBase* device, ObjectBase::ErrorTag tag)
-        : ApiObjectBase(device, tag) {
-    }
+QuerySetBase::QuerySetBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
+    TrackInDevice();
+}
 
-    QuerySetBase::~QuerySetBase() {
-        // Uninitialized or already destroyed
-        ASSERT(mState == QuerySetState::Unavailable || mState == QuerySetState::Destroyed);
-    }
+QuerySetBase::QuerySetBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+    : ApiObjectBase(device, tag) {}
 
-    void QuerySetBase::DestroyImpl() {
-        mState = QuerySetState::Destroyed;
-    }
+QuerySetBase::~QuerySetBase() {
+    // Uninitialized or already destroyed
+    ASSERT(mState == QuerySetState::Unavailable || mState == QuerySetState::Destroyed);
+}
 
-    // static
-    QuerySetBase* QuerySetBase::MakeError(DeviceBase* device) {
-        return new ErrorQuerySet(device);
-    }
+void QuerySetBase::DestroyImpl() {
+    mState = QuerySetState::Destroyed;
+}
 
-    ObjectType QuerySetBase::GetType() const {
-        return ObjectType::QuerySet;
-    }
+// static
+QuerySetBase* QuerySetBase::MakeError(DeviceBase* device) {
+    return new ErrorQuerySet(device);
+}
 
-    wgpu::QueryType QuerySetBase::GetQueryType() const {
-        return mQueryType;
-    }
+ObjectType QuerySetBase::GetType() const {
+    return ObjectType::QuerySet;
+}
 
-    uint32_t QuerySetBase::GetQueryCount() const {
-        return mQueryCount;
-    }
+wgpu::QueryType QuerySetBase::GetQueryType() const {
+    return mQueryType;
+}
 
-    const std::vector<wgpu::PipelineStatisticName>& QuerySetBase::GetPipelineStatistics() const {
-        return mPipelineStatistics;
-    }
+uint32_t QuerySetBase::GetQueryCount() const {
+    return mQueryCount;
+}
 
-    const std::vector<bool>& QuerySetBase::GetQueryAvailability() const {
-        return mQueryAvailability;
-    }
+const std::vector<wgpu::PipelineStatisticName>& QuerySetBase::GetPipelineStatistics() const {
+    return mPipelineStatistics;
+}
 
-    void QuerySetBase::SetQueryAvailability(uint32_t index, bool available) {
-        mQueryAvailability[index] = available;
-    }
+const std::vector<bool>& QuerySetBase::GetQueryAvailability() const {
+    return mQueryAvailability;
+}
 
-    MaybeError QuerySetBase::ValidateCanUseInSubmitNow() const {
-        ASSERT(!IsError());
-        DAWN_INVALID_IF(mState == QuerySetState::Destroyed, "%s used while destroyed.", this);
-        return {};
-    }
+void QuerySetBase::SetQueryAvailability(uint32_t index, bool available) {
+    mQueryAvailability[index] = available;
+}
 
-    void QuerySetBase::APIDestroy() {
-        if (GetDevice()->ConsumedError(ValidateDestroy())) {
-            return;
-        }
-        Destroy();
-    }
+MaybeError QuerySetBase::ValidateCanUseInSubmitNow() const {
+    ASSERT(!IsError());
+    DAWN_INVALID_IF(mState == QuerySetState::Destroyed, "%s used while destroyed.", this);
+    return {};
+}
 
-    MaybeError QuerySetBase::ValidateDestroy() const {
-        DAWN_TRY(GetDevice()->ValidateObject(this));
-        return {};
+void QuerySetBase::APIDestroy() {
+    if (GetDevice()->ConsumedError(ValidateDestroy())) {
+        return;
     }
+    Destroy();
+}
+
+MaybeError QuerySetBase::ValidateDestroy() const {
+    DAWN_TRY(GetDevice()->ValidateObject(this));
+    return {};
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/QuerySet.h b/src/dawn/native/QuerySet.h
index 4e5c363..db85120 100644
--- a/src/dawn/native/QuerySet.h
+++ b/src/dawn/native/QuerySet.h
@@ -25,49 +25,49 @@
 
 namespace dawn::native {
 
-    MaybeError ValidateQuerySetDescriptor(DeviceBase* device, const QuerySetDescriptor* descriptor);
+MaybeError ValidateQuerySetDescriptor(DeviceBase* device, const QuerySetDescriptor* descriptor);
 
-    class QuerySetBase : public ApiObjectBase {
-      public:
-        QuerySetBase(DeviceBase* device, const QuerySetDescriptor* descriptor);
+class QuerySetBase : public ApiObjectBase {
+  public:
+    QuerySetBase(DeviceBase* device, const QuerySetDescriptor* descriptor);
 
-        static QuerySetBase* MakeError(DeviceBase* device);
+    static QuerySetBase* MakeError(DeviceBase* device);
 
-        ObjectType GetType() const override;
+    ObjectType GetType() const override;
 
-        wgpu::QueryType GetQueryType() const;
-        uint32_t GetQueryCount() const;
-        const std::vector<wgpu::PipelineStatisticName>& GetPipelineStatistics() const;
+    wgpu::QueryType GetQueryType() const;
+    uint32_t GetQueryCount() const;
+    const std::vector<wgpu::PipelineStatisticName>& GetPipelineStatistics() const;
 
-        const std::vector<bool>& GetQueryAvailability() const;
-        void SetQueryAvailability(uint32_t index, bool available);
+    const std::vector<bool>& GetQueryAvailability() const;
+    void SetQueryAvailability(uint32_t index, bool available);
 
-        MaybeError ValidateCanUseInSubmitNow() const;
+    MaybeError ValidateCanUseInSubmitNow() const;
 
-        void APIDestroy();
+    void APIDestroy();
 
-      protected:
-        QuerySetBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+  protected:
+    QuerySetBase(DeviceBase* device, ObjectBase::ErrorTag tag);
 
-        // Constructor used only for mocking and testing.
-        explicit QuerySetBase(DeviceBase* device);
-        void DestroyImpl() override;
+    // Constructor used only for mocking and testing.
+    explicit QuerySetBase(DeviceBase* device);
+    void DestroyImpl() override;
 
-        ~QuerySetBase() override;
+    ~QuerySetBase() override;
 
-      private:
-        MaybeError ValidateDestroy() const;
+  private:
+    MaybeError ValidateDestroy() const;
 
-        wgpu::QueryType mQueryType;
-        uint32_t mQueryCount;
-        std::vector<wgpu::PipelineStatisticName> mPipelineStatistics;
+    wgpu::QueryType mQueryType;
+    uint32_t mQueryCount;
+    std::vector<wgpu::PipelineStatisticName> mPipelineStatistics;
 
-        enum class QuerySetState { Unavailable, Available, Destroyed };
-        QuerySetState mState = QuerySetState::Unavailable;
+    enum class QuerySetState { Unavailable, Available, Destroyed };
+    QuerySetState mState = QuerySetState::Unavailable;
 
-        // Indicates the available queries on the query set for resolving
-        std::vector<bool> mQueryAvailability;
-    };
+    // Indicates the available queries on the query set for resolving
+    std::vector<bool> mQueryAvailability;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/Queue.cpp b/src/dawn/native/Queue.cpp
index 9f26d2b..c17ed8f 100644
--- a/src/dawn/native/Queue.cpp
+++ b/src/dawn/native/Queue.cpp
@@ -39,486 +39,470 @@
 
 namespace dawn::native {
 
-    namespace {
+namespace {
 
-        void CopyTextureData(uint8_t* dstPointer,
-                             const uint8_t* srcPointer,
-                             uint32_t depth,
-                             uint32_t rowsPerImage,
-                             uint64_t imageAdditionalStride,
-                             uint32_t actualBytesPerRow,
-                             uint32_t dstBytesPerRow,
-                             uint32_t srcBytesPerRow) {
-            bool copyWholeLayer =
-                actualBytesPerRow == dstBytesPerRow && dstBytesPerRow == srcBytesPerRow;
-            bool copyWholeData = copyWholeLayer && imageAdditionalStride == 0;
+void CopyTextureData(uint8_t* dstPointer,
+                     const uint8_t* srcPointer,
+                     uint32_t depth,
+                     uint32_t rowsPerImage,
+                     uint64_t imageAdditionalStride,
+                     uint32_t actualBytesPerRow,
+                     uint32_t dstBytesPerRow,
+                     uint32_t srcBytesPerRow) {
+    bool copyWholeLayer = actualBytesPerRow == dstBytesPerRow && dstBytesPerRow == srcBytesPerRow;
+    bool copyWholeData = copyWholeLayer && imageAdditionalStride == 0;
 
-            if (!copyWholeLayer) {  // copy row by row
-                for (uint32_t d = 0; d < depth; ++d) {
-                    for (uint32_t h = 0; h < rowsPerImage; ++h) {
-                        memcpy(dstPointer, srcPointer, actualBytesPerRow);
-                        dstPointer += dstBytesPerRow;
-                        srcPointer += srcBytesPerRow;
-                    }
-                    srcPointer += imageAdditionalStride;
-                }
-            } else {
-                uint64_t layerSize = uint64_t(rowsPerImage) * actualBytesPerRow;
-                if (!copyWholeData) {  // copy layer by layer
-                    for (uint32_t d = 0; d < depth; ++d) {
-                        memcpy(dstPointer, srcPointer, layerSize);
-                        dstPointer += layerSize;
-                        srcPointer += layerSize + imageAdditionalStride;
-                    }
-                } else {  // do a single copy
-                    memcpy(dstPointer, srcPointer, layerSize * depth);
-                }
+    if (!copyWholeLayer) {  // copy row by row
+        for (uint32_t d = 0; d < depth; ++d) {
+            for (uint32_t h = 0; h < rowsPerImage; ++h) {
+                memcpy(dstPointer, srcPointer, actualBytesPerRow);
+                dstPointer += dstBytesPerRow;
+                srcPointer += srcBytesPerRow;
             }
+            srcPointer += imageAdditionalStride;
         }
-
-        ResultOrError<UploadHandle> UploadTextureDataAligningBytesPerRowAndOffset(
-            DeviceBase* device,
-            const void* data,
-            uint32_t alignedBytesPerRow,
-            uint32_t optimallyAlignedBytesPerRow,
-            uint32_t alignedRowsPerImage,
-            const TextureDataLayout& dataLayout,
-            bool hasDepthOrStencil,
-            const TexelBlockInfo& blockInfo,
-            const Extent3D& writeSizePixel) {
-            uint64_t newDataSizeBytes;
-            DAWN_TRY_ASSIGN(
-                newDataSizeBytes,
-                ComputeRequiredBytesInCopy(blockInfo, writeSizePixel, optimallyAlignedBytesPerRow,
-                                           alignedRowsPerImage));
-
-            uint64_t optimalOffsetAlignment =
-                device->GetOptimalBufferToTextureCopyOffsetAlignment();
-            ASSERT(IsPowerOfTwo(optimalOffsetAlignment));
-            ASSERT(IsPowerOfTwo(blockInfo.byteSize));
-            // We need the offset to be aligned to both optimalOffsetAlignment and blockByteSize,
-            // since both of them are powers of two, we only need to align to the max value.
-            uint64_t offsetAlignment =
-                std::max(optimalOffsetAlignment, uint64_t(blockInfo.byteSize));
-
-            // For depth-stencil texture, buffer offset must be a multiple of 4, which is required
-            // by WebGPU and Vulkan SPEC.
-            if (hasDepthOrStencil) {
-                constexpr uint64_t kOffsetAlignmentForDepthStencil = 4;
-                offsetAlignment = std::max(offsetAlignment, kOffsetAlignmentForDepthStencil);
+    } else {
+        uint64_t layerSize = uint64_t(rowsPerImage) * actualBytesPerRow;
+        if (!copyWholeData) {  // copy layer by layer
+            for (uint32_t d = 0; d < depth; ++d) {
+                memcpy(dstPointer, srcPointer, layerSize);
+                dstPointer += layerSize;
+                srcPointer += layerSize + imageAdditionalStride;
             }
-
-            UploadHandle uploadHandle;
-            DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
-                                              newDataSizeBytes, device->GetPendingCommandSerial(),
-                                              offsetAlignment));
-            ASSERT(uploadHandle.mappedBuffer != nullptr);
-
-            uint8_t* dstPointer = static_cast<uint8_t*>(uploadHandle.mappedBuffer);
-            const uint8_t* srcPointer = static_cast<const uint8_t*>(data);
-            srcPointer += dataLayout.offset;
-
-            uint32_t dataRowsPerImage = dataLayout.rowsPerImage;
-            if (dataRowsPerImage == 0) {
-                dataRowsPerImage = writeSizePixel.height / blockInfo.height;
-            }
-
-            ASSERT(dataRowsPerImage >= alignedRowsPerImage);
-            uint64_t imageAdditionalStride =
-                dataLayout.bytesPerRow * (dataRowsPerImage - alignedRowsPerImage);
-
-            CopyTextureData(dstPointer, srcPointer, writeSizePixel.depthOrArrayLayers,
-                            alignedRowsPerImage, imageAdditionalStride, alignedBytesPerRow,
-                            optimallyAlignedBytesPerRow, dataLayout.bytesPerRow);
-
-            return uploadHandle;
-        }
-
-        struct SubmittedWorkDone : QueueBase::TaskInFlight {
-            SubmittedWorkDone(WGPUQueueWorkDoneCallback callback, void* userdata)
-                : mCallback(callback), mUserdata(userdata) {
-            }
-            void Finish(dawn::platform::Platform* platform, ExecutionSerial serial) override {
-                ASSERT(mCallback != nullptr);
-                TRACE_EVENT1(platform, General, "Queue::SubmittedWorkDone::Finished", "serial",
-                             uint64_t(serial));
-                mCallback(WGPUQueueWorkDoneStatus_Success, mUserdata);
-                mCallback = nullptr;
-            }
-            void HandleDeviceLoss() override {
-                ASSERT(mCallback != nullptr);
-                mCallback(WGPUQueueWorkDoneStatus_DeviceLost, mUserdata);
-                mCallback = nullptr;
-            }
-            ~SubmittedWorkDone() override = default;
-
-          private:
-            WGPUQueueWorkDoneCallback mCallback = nullptr;
-            void* mUserdata;
-        };
-
-        class ErrorQueue : public QueueBase {
-          public:
-            explicit ErrorQueue(DeviceBase* device) : QueueBase(device, ObjectBase::kError) {
-            }
-
-          private:
-            MaybeError SubmitImpl(uint32_t commandCount,
-                                  CommandBufferBase* const* commands) override {
-                UNREACHABLE();
-            }
-        };
-    }  // namespace
-
-    // QueueBase
-
-    QueueBase::TaskInFlight::~TaskInFlight() {
-    }
-
-    QueueBase::QueueBase(DeviceBase* device, const QueueDescriptor* descriptor)
-        : ApiObjectBase(device, descriptor->label) {
-    }
-
-    QueueBase::QueueBase(DeviceBase* device, ObjectBase::ErrorTag tag)
-        : ApiObjectBase(device, tag) {
-    }
-
-    QueueBase::~QueueBase() {
-        ASSERT(mTasksInFlight.Empty());
-    }
-
-    void QueueBase::DestroyImpl() {
-    }
-
-    // static
-    QueueBase* QueueBase::MakeError(DeviceBase* device) {
-        return new ErrorQueue(device);
-    }
-
-    ObjectType QueueBase::GetType() const {
-        return ObjectType::Queue;
-    }
-
-    void QueueBase::APISubmit(uint32_t commandCount, CommandBufferBase* const* commands) {
-        SubmitInternal(commandCount, commands);
-
-        for (uint32_t i = 0; i < commandCount; ++i) {
-            commands[i]->Destroy();
+        } else {  // do a single copy
+            memcpy(dstPointer, srcPointer, layerSize * depth);
         }
     }
+}
 
-    void QueueBase::APIOnSubmittedWorkDone(uint64_t signalValue,
-                                           WGPUQueueWorkDoneCallback callback,
-                                           void* userdata) {
-        // The error status depends on the type of error so we let the validation function choose it
-        WGPUQueueWorkDoneStatus status;
-        if (GetDevice()->ConsumedError(ValidateOnSubmittedWorkDone(signalValue, &status))) {
-            callback(status, userdata);
-            return;
-        }
+ResultOrError<UploadHandle> UploadTextureDataAligningBytesPerRowAndOffset(
+    DeviceBase* device,
+    const void* data,
+    uint32_t alignedBytesPerRow,
+    uint32_t optimallyAlignedBytesPerRow,
+    uint32_t alignedRowsPerImage,
+    const TextureDataLayout& dataLayout,
+    bool hasDepthOrStencil,
+    const TexelBlockInfo& blockInfo,
+    const Extent3D& writeSizePixel) {
+    uint64_t newDataSizeBytes;
+    DAWN_TRY_ASSIGN(newDataSizeBytes,
+                    ComputeRequiredBytesInCopy(blockInfo, writeSizePixel,
+                                               optimallyAlignedBytesPerRow, alignedRowsPerImage));
 
-        std::unique_ptr<SubmittedWorkDone> task =
-            std::make_unique<SubmittedWorkDone>(callback, userdata);
+    uint64_t optimalOffsetAlignment = device->GetOptimalBufferToTextureCopyOffsetAlignment();
+    ASSERT(IsPowerOfTwo(optimalOffsetAlignment));
+    ASSERT(IsPowerOfTwo(blockInfo.byteSize));
+    // We need the offset to be aligned to both optimalOffsetAlignment and blockByteSize,
+    // since both of them are powers of two, we only need to align to the max value.
+    uint64_t offsetAlignment = std::max(optimalOffsetAlignment, uint64_t(blockInfo.byteSize));
 
-        // Technically we only need to wait for previously submitted work but OnSubmittedWorkDone is
-        // also used to make sure ALL queue work is finished in tests, so we also wait for pending
-        // commands (this is non-observable outside of tests so it's ok to do deviate a bit from the
-        // spec).
-        TrackTask(std::move(task), GetDevice()->GetPendingCommandSerial());
-
-        TRACE_EVENT1(GetDevice()->GetPlatform(), General, "Queue::APIOnSubmittedWorkDone", "serial",
-                     uint64_t(GetDevice()->GetPendingCommandSerial()));
+    // For depth-stencil texture, buffer offset must be a multiple of 4, which is required
+    // by WebGPU and Vulkan SPEC.
+    if (hasDepthOrStencil) {
+        constexpr uint64_t kOffsetAlignmentForDepthStencil = 4;
+        offsetAlignment = std::max(offsetAlignment, kOffsetAlignmentForDepthStencil);
     }
 
-    void QueueBase::TrackTask(std::unique_ptr<TaskInFlight> task, ExecutionSerial serial) {
-        mTasksInFlight.Enqueue(std::move(task), serial);
-        GetDevice()->AddFutureSerial(serial);
+    UploadHandle uploadHandle;
+    DAWN_TRY_ASSIGN(uploadHandle,
+                    device->GetDynamicUploader()->Allocate(
+                        newDataSizeBytes, device->GetPendingCommandSerial(), offsetAlignment));
+    ASSERT(uploadHandle.mappedBuffer != nullptr);
+
+    uint8_t* dstPointer = static_cast<uint8_t*>(uploadHandle.mappedBuffer);
+    const uint8_t* srcPointer = static_cast<const uint8_t*>(data);
+    srcPointer += dataLayout.offset;
+
+    uint32_t dataRowsPerImage = dataLayout.rowsPerImage;
+    if (dataRowsPerImage == 0) {
+        dataRowsPerImage = writeSizePixel.height / blockInfo.height;
     }
 
-    void QueueBase::Tick(ExecutionSerial finishedSerial) {
-        // If a user calls Queue::Submit inside a task, for example in a Buffer::MapAsync callback,
-        // then the device will be ticked, which in turns ticks the queue, causing reentrance here.
-        // To prevent the reentrant call from invalidating mTasksInFlight while in use by the first
-        // call, we remove the tasks to finish from the queue, update mTasksInFlight, then run the
-        // callbacks.
-        TRACE_EVENT1(GetDevice()->GetPlatform(), General, "Queue::Tick", "finishedSerial",
-                     uint64_t(finishedSerial));
+    ASSERT(dataRowsPerImage >= alignedRowsPerImage);
+    uint64_t imageAdditionalStride =
+        dataLayout.bytesPerRow * (dataRowsPerImage - alignedRowsPerImage);
 
-        std::vector<std::unique_ptr<TaskInFlight>> tasks;
-        for (auto& task : mTasksInFlight.IterateUpTo(finishedSerial)) {
-            tasks.push_back(std::move(task));
-        }
-        mTasksInFlight.ClearUpTo(finishedSerial);
+    CopyTextureData(dstPointer, srcPointer, writeSizePixel.depthOrArrayLayers, alignedRowsPerImage,
+                    imageAdditionalStride, alignedBytesPerRow, optimallyAlignedBytesPerRow,
+                    dataLayout.bytesPerRow);
 
-        for (auto& task : tasks) {
-            task->Finish(GetDevice()->GetPlatform(), finishedSerial);
-        }
+    return uploadHandle;
+}
+
+struct SubmittedWorkDone : QueueBase::TaskInFlight {
+    SubmittedWorkDone(WGPUQueueWorkDoneCallback callback, void* userdata)
+        : mCallback(callback), mUserdata(userdata) {}
+    void Finish(dawn::platform::Platform* platform, ExecutionSerial serial) override {
+        ASSERT(mCallback != nullptr);
+        TRACE_EVENT1(platform, General, "Queue::SubmittedWorkDone::Finished", "serial",
+                     uint64_t(serial));
+        mCallback(WGPUQueueWorkDoneStatus_Success, mUserdata);
+        mCallback = nullptr;
+    }
+    void HandleDeviceLoss() override {
+        ASSERT(mCallback != nullptr);
+        mCallback(WGPUQueueWorkDoneStatus_DeviceLost, mUserdata);
+        mCallback = nullptr;
+    }
+    ~SubmittedWorkDone() override = default;
+
+  private:
+    WGPUQueueWorkDoneCallback mCallback = nullptr;
+    void* mUserdata;
+};
+
+class ErrorQueue : public QueueBase {
+  public:
+    explicit ErrorQueue(DeviceBase* device) : QueueBase(device, ObjectBase::kError) {}
+
+  private:
+    MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override {
+        UNREACHABLE();
+    }
+};
+}  // namespace
+
+// QueueBase
+
+QueueBase::TaskInFlight::~TaskInFlight() {}
+
+QueueBase::QueueBase(DeviceBase* device, const QueueDescriptor* descriptor)
+    : ApiObjectBase(device, descriptor->label) {}
+
+QueueBase::QueueBase(DeviceBase* device, ObjectBase::ErrorTag tag) : ApiObjectBase(device, tag) {}
+
+QueueBase::~QueueBase() {
+    ASSERT(mTasksInFlight.Empty());
+}
+
+void QueueBase::DestroyImpl() {}
+
+// static
+QueueBase* QueueBase::MakeError(DeviceBase* device) {
+    return new ErrorQueue(device);
+}
+
+ObjectType QueueBase::GetType() const {
+    return ObjectType::Queue;
+}
+
+void QueueBase::APISubmit(uint32_t commandCount, CommandBufferBase* const* commands) {
+    SubmitInternal(commandCount, commands);
+
+    for (uint32_t i = 0; i < commandCount; ++i) {
+        commands[i]->Destroy();
+    }
+}
+
+void QueueBase::APIOnSubmittedWorkDone(uint64_t signalValue,
+                                       WGPUQueueWorkDoneCallback callback,
+                                       void* userdata) {
+    // The error status depends on the type of error so we let the validation function choose it
+    WGPUQueueWorkDoneStatus status;
+    if (GetDevice()->ConsumedError(ValidateOnSubmittedWorkDone(signalValue, &status))) {
+        callback(status, userdata);
+        return;
     }
 
-    void QueueBase::HandleDeviceLoss() {
-        for (auto& task : mTasksInFlight.IterateAll()) {
-            task->HandleDeviceLoss();
-        }
-        mTasksInFlight.Clear();
-    }
+    std::unique_ptr<SubmittedWorkDone> task =
+        std::make_unique<SubmittedWorkDone>(callback, userdata);
 
-    void QueueBase::APIWriteBuffer(BufferBase* buffer,
-                                   uint64_t bufferOffset,
-                                   const void* data,
-                                   size_t size) {
-        GetDevice()->ConsumedError(WriteBuffer(buffer, bufferOffset, data, size));
-    }
+    // Technically we only need to wait for previously submitted work but OnSubmittedWorkDone is
+    // also used to make sure ALL queue work is finished in tests, so we also wait for pending
+    // commands (this is non-observable outside of tests so it's ok to do deviate a bit from the
+    // spec).
+    TrackTask(std::move(task), GetDevice()->GetPendingCommandSerial());
 
-    MaybeError QueueBase::WriteBuffer(BufferBase* buffer,
+    TRACE_EVENT1(GetDevice()->GetPlatform(), General, "Queue::APIOnSubmittedWorkDone", "serial",
+                 uint64_t(GetDevice()->GetPendingCommandSerial()));
+}
+
+void QueueBase::TrackTask(std::unique_ptr<TaskInFlight> task, ExecutionSerial serial) {
+    mTasksInFlight.Enqueue(std::move(task), serial);
+    GetDevice()->AddFutureSerial(serial);
+}
+
+void QueueBase::Tick(ExecutionSerial finishedSerial) {
+    // If a user calls Queue::Submit inside a task, for example in a Buffer::MapAsync callback,
+    // then the device will be ticked, which in turns ticks the queue, causing reentrance here.
+    // To prevent the reentrant call from invalidating mTasksInFlight while in use by the first
+    // call, we remove the tasks to finish from the queue, update mTasksInFlight, then run the
+    // callbacks.
+    TRACE_EVENT1(GetDevice()->GetPlatform(), General, "Queue::Tick", "finishedSerial",
+                 uint64_t(finishedSerial));
+
+    std::vector<std::unique_ptr<TaskInFlight>> tasks;
+    for (auto& task : mTasksInFlight.IterateUpTo(finishedSerial)) {
+        tasks.push_back(std::move(task));
+    }
+    mTasksInFlight.ClearUpTo(finishedSerial);
+
+    for (auto& task : tasks) {
+        task->Finish(GetDevice()->GetPlatform(), finishedSerial);
+    }
+}
+
+void QueueBase::HandleDeviceLoss() {
+    for (auto& task : mTasksInFlight.IterateAll()) {
+        task->HandleDeviceLoss();
+    }
+    mTasksInFlight.Clear();
+}
+
+void QueueBase::APIWriteBuffer(BufferBase* buffer,
+                               uint64_t bufferOffset,
+                               const void* data,
+                               size_t size) {
+    GetDevice()->ConsumedError(WriteBuffer(buffer, bufferOffset, data, size));
+}
+
+MaybeError QueueBase::WriteBuffer(BufferBase* buffer,
+                                  uint64_t bufferOffset,
+                                  const void* data,
+                                  size_t size) {
+    DAWN_TRY(GetDevice()->ValidateIsAlive());
+    DAWN_TRY(GetDevice()->ValidateObject(this));
+    DAWN_TRY(ValidateWriteBuffer(GetDevice(), buffer, bufferOffset, size));
+    DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
+    return WriteBufferImpl(buffer, bufferOffset, data, size);
+}
+
+MaybeError QueueBase::WriteBufferImpl(BufferBase* buffer,
                                       uint64_t bufferOffset,
                                       const void* data,
                                       size_t size) {
-        DAWN_TRY(GetDevice()->ValidateIsAlive());
-        DAWN_TRY(GetDevice()->ValidateObject(this));
-        DAWN_TRY(ValidateWriteBuffer(GetDevice(), buffer, bufferOffset, size));
-        DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
-        return WriteBufferImpl(buffer, bufferOffset, data, size);
+    if (size == 0) {
+        return {};
     }
 
-    MaybeError QueueBase::WriteBufferImpl(BufferBase* buffer,
-                                          uint64_t bufferOffset,
-                                          const void* data,
-                                          size_t size) {
-        if (size == 0) {
-            return {};
-        }
+    DeviceBase* device = GetDevice();
 
-        DeviceBase* device = GetDevice();
+    UploadHandle uploadHandle;
+    DAWN_TRY_ASSIGN(uploadHandle,
+                    device->GetDynamicUploader()->Allocate(size, device->GetPendingCommandSerial(),
+                                                           kCopyBufferToBufferOffsetAlignment));
+    ASSERT(uploadHandle.mappedBuffer != nullptr);
 
-        UploadHandle uploadHandle;
-        DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
-                                          size, device->GetPendingCommandSerial(),
-                                          kCopyBufferToBufferOffsetAlignment));
-        ASSERT(uploadHandle.mappedBuffer != nullptr);
+    memcpy(uploadHandle.mappedBuffer, data, size);
 
-        memcpy(uploadHandle.mappedBuffer, data, size);
+    device->AddFutureSerial(device->GetPendingCommandSerial());
 
-        device->AddFutureSerial(device->GetPendingCommandSerial());
+    return device->CopyFromStagingToBuffer(uploadHandle.stagingBuffer, uploadHandle.startOffset,
+                                           buffer, bufferOffset, size);
+}
 
-        return device->CopyFromStagingToBuffer(uploadHandle.stagingBuffer, uploadHandle.startOffset,
-                                               buffer, bufferOffset, size);
-    }
+void QueueBase::APIWriteTexture(const ImageCopyTexture* destination,
+                                const void* data,
+                                size_t dataSize,
+                                const TextureDataLayout* dataLayout,
+                                const Extent3D* writeSize) {
+    GetDevice()->ConsumedError(
+        WriteTextureInternal(destination, data, dataSize, *dataLayout, writeSize));
+}
 
-    void QueueBase::APIWriteTexture(const ImageCopyTexture* destination,
-                                    const void* data,
-                                    size_t dataSize,
-                                    const TextureDataLayout* dataLayout,
-                                    const Extent3D* writeSize) {
-        GetDevice()->ConsumedError(
-            WriteTextureInternal(destination, data, dataSize, *dataLayout, writeSize));
-    }
-
-    MaybeError QueueBase::WriteTextureInternal(const ImageCopyTexture* destination,
-                                               const void* data,
-                                               size_t dataSize,
-                                               const TextureDataLayout& dataLayout,
-                                               const Extent3D* writeSize) {
-        DAWN_TRY(ValidateWriteTexture(destination, dataSize, dataLayout, writeSize));
-
-        if (writeSize->width == 0 || writeSize->height == 0 || writeSize->depthOrArrayLayers == 0) {
-            return {};
-        }
-
-        const TexelBlockInfo& blockInfo =
-            destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
-        TextureDataLayout layout = dataLayout;
-        ApplyDefaultTextureDataLayoutOptions(&layout, blockInfo, *writeSize);
-        return WriteTextureImpl(*destination, data, layout, *writeSize);
-    }
-
-    MaybeError QueueBase::WriteTextureImpl(const ImageCopyTexture& destination,
+MaybeError QueueBase::WriteTextureInternal(const ImageCopyTexture* destination,
                                            const void* data,
+                                           size_t dataSize,
                                            const TextureDataLayout& dataLayout,
-                                           const Extent3D& writeSizePixel) {
-        const Format& format = destination.texture->GetFormat();
-        const TexelBlockInfo& blockInfo = format.GetAspectInfo(destination.aspect).block;
+                                           const Extent3D* writeSize) {
+    DAWN_TRY(ValidateWriteTexture(destination, dataSize, dataLayout, writeSize));
 
-        // We are only copying the part of the data that will appear in the texture.
-        // Note that validating texture copy range ensures that writeSizePixel->width and
-        // writeSizePixel->height are multiples of blockWidth and blockHeight respectively.
-        ASSERT(writeSizePixel.width % blockInfo.width == 0);
-        ASSERT(writeSizePixel.height % blockInfo.height == 0);
-        uint32_t alignedBytesPerRow = writeSizePixel.width / blockInfo.width * blockInfo.byteSize;
-        uint32_t alignedRowsPerImage = writeSizePixel.height / blockInfo.height;
-
-        uint32_t optimalBytesPerRowAlignment = GetDevice()->GetOptimalBytesPerRowAlignment();
-        uint32_t optimallyAlignedBytesPerRow =
-            Align(alignedBytesPerRow, optimalBytesPerRowAlignment);
-
-        UploadHandle uploadHandle;
-        DAWN_TRY_ASSIGN(uploadHandle,
-                        UploadTextureDataAligningBytesPerRowAndOffset(
-                            GetDevice(), data, alignedBytesPerRow, optimallyAlignedBytesPerRow,
-                            alignedRowsPerImage, dataLayout, format.HasDepthOrStencil(), blockInfo,
-                            writeSizePixel));
-
-        TextureDataLayout passDataLayout = dataLayout;
-        passDataLayout.offset = uploadHandle.startOffset;
-        passDataLayout.bytesPerRow = optimallyAlignedBytesPerRow;
-        passDataLayout.rowsPerImage = alignedRowsPerImage;
-
-        TextureCopy textureCopy;
-        textureCopy.texture = destination.texture;
-        textureCopy.mipLevel = destination.mipLevel;
-        textureCopy.origin = destination.origin;
-        textureCopy.aspect = ConvertAspect(format, destination.aspect);
-
-        DeviceBase* device = GetDevice();
-
-        device->AddFutureSerial(device->GetPendingCommandSerial());
-
-        return device->CopyFromStagingToTexture(uploadHandle.stagingBuffer, passDataLayout,
-                                                &textureCopy, writeSizePixel);
+    if (writeSize->width == 0 || writeSize->height == 0 || writeSize->depthOrArrayLayers == 0) {
+        return {};
     }
 
-    void QueueBase::APICopyTextureForBrowser(const ImageCopyTexture* source,
-                                             const ImageCopyTexture* destination,
-                                             const Extent3D* copySize,
-                                             const CopyTextureForBrowserOptions* options) {
-        GetDevice()->ConsumedError(
-            CopyTextureForBrowserInternal(source, destination, copySize, options));
+    const TexelBlockInfo& blockInfo =
+        destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
+    TextureDataLayout layout = dataLayout;
+    ApplyDefaultTextureDataLayoutOptions(&layout, blockInfo, *writeSize);
+    return WriteTextureImpl(*destination, data, layout, *writeSize);
+}
+
+MaybeError QueueBase::WriteTextureImpl(const ImageCopyTexture& destination,
+                                       const void* data,
+                                       const TextureDataLayout& dataLayout,
+                                       const Extent3D& writeSizePixel) {
+    const Format& format = destination.texture->GetFormat();
+    const TexelBlockInfo& blockInfo = format.GetAspectInfo(destination.aspect).block;
+
+    // We are only copying the part of the data that will appear in the texture.
+    // Note that validating texture copy range ensures that writeSizePixel->width and
+    // writeSizePixel->height are multiples of blockWidth and blockHeight respectively.
+    ASSERT(writeSizePixel.width % blockInfo.width == 0);
+    ASSERT(writeSizePixel.height % blockInfo.height == 0);
+    uint32_t alignedBytesPerRow = writeSizePixel.width / blockInfo.width * blockInfo.byteSize;
+    uint32_t alignedRowsPerImage = writeSizePixel.height / blockInfo.height;
+
+    uint32_t optimalBytesPerRowAlignment = GetDevice()->GetOptimalBytesPerRowAlignment();
+    uint32_t optimallyAlignedBytesPerRow = Align(alignedBytesPerRow, optimalBytesPerRowAlignment);
+
+    UploadHandle uploadHandle;
+    DAWN_TRY_ASSIGN(uploadHandle, UploadTextureDataAligningBytesPerRowAndOffset(
+                                      GetDevice(), data, alignedBytesPerRow,
+                                      optimallyAlignedBytesPerRow, alignedRowsPerImage, dataLayout,
+                                      format.HasDepthOrStencil(), blockInfo, writeSizePixel));
+
+    TextureDataLayout passDataLayout = dataLayout;
+    passDataLayout.offset = uploadHandle.startOffset;
+    passDataLayout.bytesPerRow = optimallyAlignedBytesPerRow;
+    passDataLayout.rowsPerImage = alignedRowsPerImage;
+
+    TextureCopy textureCopy;
+    textureCopy.texture = destination.texture;
+    textureCopy.mipLevel = destination.mipLevel;
+    textureCopy.origin = destination.origin;
+    textureCopy.aspect = ConvertAspect(format, destination.aspect);
+
+    DeviceBase* device = GetDevice();
+
+    device->AddFutureSerial(device->GetPendingCommandSerial());
+
+    return device->CopyFromStagingToTexture(uploadHandle.stagingBuffer, passDataLayout,
+                                            &textureCopy, writeSizePixel);
+}
+
+void QueueBase::APICopyTextureForBrowser(const ImageCopyTexture* source,
+                                         const ImageCopyTexture* destination,
+                                         const Extent3D* copySize,
+                                         const CopyTextureForBrowserOptions* options) {
+    GetDevice()->ConsumedError(
+        CopyTextureForBrowserInternal(source, destination, copySize, options));
+}
+
+MaybeError QueueBase::CopyTextureForBrowserInternal(const ImageCopyTexture* source,
+                                                    const ImageCopyTexture* destination,
+                                                    const Extent3D* copySize,
+                                                    const CopyTextureForBrowserOptions* options) {
+    if (GetDevice()->IsValidationEnabled()) {
+        DAWN_TRY_CONTEXT(
+            ValidateCopyTextureForBrowser(GetDevice(), source, destination, copySize, options),
+            "validating CopyTextureForBrowser from %s to %s", source->texture,
+            destination->texture);
     }
 
-    MaybeError QueueBase::CopyTextureForBrowserInternal(
-        const ImageCopyTexture* source,
-        const ImageCopyTexture* destination,
-        const Extent3D* copySize,
-        const CopyTextureForBrowserOptions* options) {
-        if (GetDevice()->IsValidationEnabled()) {
-            DAWN_TRY_CONTEXT(
-                ValidateCopyTextureForBrowser(GetDevice(), source, destination, copySize, options),
-                "validating CopyTextureForBrowser from %s to %s", source->texture,
-                destination->texture);
-        }
+    return DoCopyTextureForBrowser(GetDevice(), source, destination, copySize, options);
+}
 
-        return DoCopyTextureForBrowser(GetDevice(), source, destination, copySize, options);
-    }
+MaybeError QueueBase::ValidateSubmit(uint32_t commandCount,
+                                     CommandBufferBase* const* commands) const {
+    TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "Queue::ValidateSubmit");
+    DAWN_TRY(GetDevice()->ValidateObject(this));
 
-    MaybeError QueueBase::ValidateSubmit(uint32_t commandCount,
-                                         CommandBufferBase* const* commands) const {
-        TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "Queue::ValidateSubmit");
-        DAWN_TRY(GetDevice()->ValidateObject(this));
+    for (uint32_t i = 0; i < commandCount; ++i) {
+        DAWN_TRY(GetDevice()->ValidateObject(commands[i]));
+        DAWN_TRY(commands[i]->ValidateCanUseInSubmitNow());
 
-        for (uint32_t i = 0; i < commandCount; ++i) {
-            DAWN_TRY(GetDevice()->ValidateObject(commands[i]));
-            DAWN_TRY(commands[i]->ValidateCanUseInSubmitNow());
+        const CommandBufferResourceUsage& usages = commands[i]->GetResourceUsages();
 
-            const CommandBufferResourceUsage& usages = commands[i]->GetResourceUsages();
-
-            for (const SyncScopeResourceUsage& scope : usages.renderPasses) {
-                for (const BufferBase* buffer : scope.buffers) {
-                    DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
-                }
-
-                for (const TextureBase* texture : scope.textures) {
-                    DAWN_TRY(texture->ValidateCanUseInSubmitNow());
-                }
-
-                for (const ExternalTextureBase* externalTexture : scope.externalTextures) {
-                    DAWN_TRY(externalTexture->ValidateCanUseInSubmitNow());
-                }
-            }
-
-            for (const ComputePassResourceUsage& pass : usages.computePasses) {
-                for (const BufferBase* buffer : pass.referencedBuffers) {
-                    DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
-                }
-                for (const TextureBase* texture : pass.referencedTextures) {
-                    DAWN_TRY(texture->ValidateCanUseInSubmitNow());
-                }
-                for (const ExternalTextureBase* externalTexture : pass.referencedExternalTextures) {
-                    DAWN_TRY(externalTexture->ValidateCanUseInSubmitNow());
-                }
-            }
-
-            for (const BufferBase* buffer : usages.topLevelBuffers) {
+        for (const SyncScopeResourceUsage& scope : usages.renderPasses) {
+            for (const BufferBase* buffer : scope.buffers) {
                 DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
             }
-            for (const TextureBase* texture : usages.topLevelTextures) {
+
+            for (const TextureBase* texture : scope.textures) {
                 DAWN_TRY(texture->ValidateCanUseInSubmitNow());
             }
-            for (const QuerySetBase* querySet : usages.usedQuerySets) {
-                DAWN_TRY(querySet->ValidateCanUseInSubmitNow());
+
+            for (const ExternalTextureBase* externalTexture : scope.externalTextures) {
+                DAWN_TRY(externalTexture->ValidateCanUseInSubmitNow());
             }
         }
 
-        return {};
-    }
-
-    MaybeError QueueBase::ValidateOnSubmittedWorkDone(uint64_t signalValue,
-                                                      WGPUQueueWorkDoneStatus* status) const {
-        *status = WGPUQueueWorkDoneStatus_DeviceLost;
-        DAWN_TRY(GetDevice()->ValidateIsAlive());
-
-        *status = WGPUQueueWorkDoneStatus_Error;
-        DAWN_TRY(GetDevice()->ValidateObject(this));
-
-        DAWN_INVALID_IF(signalValue != 0, "SignalValue (%u) is not 0.", signalValue);
-
-        return {};
-    }
-
-    MaybeError QueueBase::ValidateWriteTexture(const ImageCopyTexture* destination,
-                                               size_t dataSize,
-                                               const TextureDataLayout& dataLayout,
-                                               const Extent3D* writeSize) const {
-        DAWN_TRY(GetDevice()->ValidateIsAlive());
-        DAWN_TRY(GetDevice()->ValidateObject(this));
-        DAWN_TRY(GetDevice()->ValidateObject(destination->texture));
-
-        DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *destination, *writeSize));
-
-        DAWN_INVALID_IF(dataLayout.offset > dataSize,
-                        "Data offset (%u) is greater than the data size (%u).", dataLayout.offset,
-                        dataSize);
-
-        DAWN_INVALID_IF(!(destination->texture->GetUsage() & wgpu::TextureUsage::CopyDst),
-                        "Usage (%s) of %s does not include %s.", destination->texture->GetUsage(),
-                        destination->texture, wgpu::TextureUsage::CopyDst);
-
-        DAWN_INVALID_IF(destination->texture->GetSampleCount() > 1,
-                        "Sample count (%u) of %s is not 1", destination->texture->GetSampleCount(),
-                        destination->texture);
-
-        DAWN_TRY(ValidateLinearToDepthStencilCopyRestrictions(*destination));
-        // We validate texture copy range before validating linear texture data,
-        // because in the latter we divide copyExtent.width by blockWidth and
-        // copyExtent.height by blockHeight while the divisibility conditions are
-        // checked in validating texture copy range.
-        DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *destination, *writeSize));
-
-        const TexelBlockInfo& blockInfo =
-            destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
-
-        DAWN_TRY(ValidateLinearTextureData(dataLayout, dataSize, blockInfo, *writeSize));
-
-        DAWN_TRY(destination->texture->ValidateCanUseInSubmitNow());
-
-        return {};
-    }
-
-    void QueueBase::SubmitInternal(uint32_t commandCount, CommandBufferBase* const* commands) {
-        DeviceBase* device = GetDevice();
-        if (device->ConsumedError(device->ValidateIsAlive())) {
-            // If device is lost, don't let any commands be submitted
-            return;
+        for (const ComputePassResourceUsage& pass : usages.computePasses) {
+            for (const BufferBase* buffer : pass.referencedBuffers) {
+                DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
+            }
+            for (const TextureBase* texture : pass.referencedTextures) {
+                DAWN_TRY(texture->ValidateCanUseInSubmitNow());
+            }
+            for (const ExternalTextureBase* externalTexture : pass.referencedExternalTextures) {
+                DAWN_TRY(externalTexture->ValidateCanUseInSubmitNow());
+            }
         }
 
-        TRACE_EVENT0(device->GetPlatform(), General, "Queue::Submit");
-        if (device->IsValidationEnabled() &&
-            device->ConsumedError(ValidateSubmit(commandCount, commands))) {
-            return;
+        for (const BufferBase* buffer : usages.topLevelBuffers) {
+            DAWN_TRY(buffer->ValidateCanUseOnQueueNow());
         }
-        ASSERT(!IsError());
-
-        if (device->ConsumedError(SubmitImpl(commandCount, commands))) {
-            return;
+        for (const TextureBase* texture : usages.topLevelTextures) {
+            DAWN_TRY(texture->ValidateCanUseInSubmitNow());
+        }
+        for (const QuerySetBase* querySet : usages.usedQuerySets) {
+            DAWN_TRY(querySet->ValidateCanUseInSubmitNow());
         }
     }
 
+    return {};
+}
+
+MaybeError QueueBase::ValidateOnSubmittedWorkDone(uint64_t signalValue,
+                                                  WGPUQueueWorkDoneStatus* status) const {
+    *status = WGPUQueueWorkDoneStatus_DeviceLost;
+    DAWN_TRY(GetDevice()->ValidateIsAlive());
+
+    *status = WGPUQueueWorkDoneStatus_Error;
+    DAWN_TRY(GetDevice()->ValidateObject(this));
+
+    DAWN_INVALID_IF(signalValue != 0, "SignalValue (%u) is not 0.", signalValue);
+
+    return {};
+}
+
+MaybeError QueueBase::ValidateWriteTexture(const ImageCopyTexture* destination,
+                                           size_t dataSize,
+                                           const TextureDataLayout& dataLayout,
+                                           const Extent3D* writeSize) const {
+    DAWN_TRY(GetDevice()->ValidateIsAlive());
+    DAWN_TRY(GetDevice()->ValidateObject(this));
+    DAWN_TRY(GetDevice()->ValidateObject(destination->texture));
+
+    DAWN_TRY(ValidateImageCopyTexture(GetDevice(), *destination, *writeSize));
+
+    DAWN_INVALID_IF(dataLayout.offset > dataSize,
+                    "Data offset (%u) is greater than the data size (%u).", dataLayout.offset,
+                    dataSize);
+
+    DAWN_INVALID_IF(!(destination->texture->GetUsage() & wgpu::TextureUsage::CopyDst),
+                    "Usage (%s) of %s does not include %s.", destination->texture->GetUsage(),
+                    destination->texture, wgpu::TextureUsage::CopyDst);
+
+    DAWN_INVALID_IF(destination->texture->GetSampleCount() > 1, "Sample count (%u) of %s is not 1",
+                    destination->texture->GetSampleCount(), destination->texture);
+
+    DAWN_TRY(ValidateLinearToDepthStencilCopyRestrictions(*destination));
+    // We validate texture copy range before validating linear texture data,
+    // because in the latter we divide copyExtent.width by blockWidth and
+    // copyExtent.height by blockHeight while the divisibility conditions are
+    // checked in validating texture copy range.
+    DAWN_TRY(ValidateTextureCopyRange(GetDevice(), *destination, *writeSize));
+
+    const TexelBlockInfo& blockInfo =
+        destination->texture->GetFormat().GetAspectInfo(destination->aspect).block;
+
+    DAWN_TRY(ValidateLinearTextureData(dataLayout, dataSize, blockInfo, *writeSize));
+
+    DAWN_TRY(destination->texture->ValidateCanUseInSubmitNow());
+
+    return {};
+}
+
+void QueueBase::SubmitInternal(uint32_t commandCount, CommandBufferBase* const* commands) {
+    DeviceBase* device = GetDevice();
+    if (device->ConsumedError(device->ValidateIsAlive())) {
+        // If device is lost, don't let any commands be submitted
+        return;
+    }
+
+    TRACE_EVENT0(device->GetPlatform(), General, "Queue::Submit");
+    if (device->IsValidationEnabled() &&
+        device->ConsumedError(ValidateSubmit(commandCount, commands))) {
+        return;
+    }
+    ASSERT(!IsError());
+
+    if (device->ConsumedError(SubmitImpl(commandCount, commands))) {
+        return;
+    }
+}
+
 }  // namespace dawn::native
diff --git a/src/dawn/native/Queue.h b/src/dawn/native/Queue.h
index ea3ffb4..5bfd9a0 100644
--- a/src/dawn/native/Queue.h
+++ b/src/dawn/native/Queue.h
@@ -29,87 +29,83 @@
 
 namespace dawn::native {
 
-    class QueueBase : public ApiObjectBase {
-      public:
-        struct TaskInFlight {
-            virtual ~TaskInFlight();
-            virtual void Finish(dawn::platform::Platform* platform, ExecutionSerial serial) = 0;
-            virtual void HandleDeviceLoss() = 0;
-        };
-
-        ~QueueBase() override;
-
-        static QueueBase* MakeError(DeviceBase* device);
-
-        ObjectType GetType() const override;
-
-        // Dawn API
-        void APISubmit(uint32_t commandCount, CommandBufferBase* const* commands);
-        void APIOnSubmittedWorkDone(uint64_t signalValue,
-                                    WGPUQueueWorkDoneCallback callback,
-                                    void* userdata);
-        void APIWriteBuffer(BufferBase* buffer,
-                            uint64_t bufferOffset,
-                            const void* data,
-                            size_t size);
-        void APIWriteTexture(const ImageCopyTexture* destination,
-                             const void* data,
-                             size_t dataSize,
-                             const TextureDataLayout* dataLayout,
-                             const Extent3D* writeSize);
-        void APICopyTextureForBrowser(const ImageCopyTexture* source,
-                                      const ImageCopyTexture* destination,
-                                      const Extent3D* copySize,
-                                      const CopyTextureForBrowserOptions* options);
-
-        MaybeError WriteBuffer(BufferBase* buffer,
-                               uint64_t bufferOffset,
-                               const void* data,
-                               size_t size);
-        void TrackTask(std::unique_ptr<TaskInFlight> task, ExecutionSerial serial);
-        void Tick(ExecutionSerial finishedSerial);
-        void HandleDeviceLoss();
-
-      protected:
-        QueueBase(DeviceBase* device, const QueueDescriptor* descriptor);
-        QueueBase(DeviceBase* device, ObjectBase::ErrorTag tag);
-        void DestroyImpl() override;
-
-      private:
-        MaybeError WriteTextureInternal(const ImageCopyTexture* destination,
-                                        const void* data,
-                                        size_t dataSize,
-                                        const TextureDataLayout& dataLayout,
-                                        const Extent3D* writeSize);
-        MaybeError CopyTextureForBrowserInternal(const ImageCopyTexture* source,
-                                                 const ImageCopyTexture* destination,
-                                                 const Extent3D* copySize,
-                                                 const CopyTextureForBrowserOptions* options);
-
-        virtual MaybeError SubmitImpl(uint32_t commandCount,
-                                      CommandBufferBase* const* commands) = 0;
-        virtual MaybeError WriteBufferImpl(BufferBase* buffer,
-                                           uint64_t bufferOffset,
-                                           const void* data,
-                                           size_t size);
-        virtual MaybeError WriteTextureImpl(const ImageCopyTexture& destination,
-                                            const void* data,
-                                            const TextureDataLayout& dataLayout,
-                                            const Extent3D& writeSize);
-
-        MaybeError ValidateSubmit(uint32_t commandCount, CommandBufferBase* const* commands) const;
-        MaybeError ValidateOnSubmittedWorkDone(uint64_t signalValue,
-                                               WGPUQueueWorkDoneStatus* status) const;
-        MaybeError ValidateWriteTexture(const ImageCopyTexture* destination,
-                                        size_t dataSize,
-                                        const TextureDataLayout& dataLayout,
-                                        const Extent3D* writeSize) const;
-
-        void SubmitInternal(uint32_t commandCount, CommandBufferBase* const* commands);
-
-        SerialQueue<ExecutionSerial, std::unique_ptr<TaskInFlight>> mTasksInFlight;
+class QueueBase : public ApiObjectBase {
+  public:
+    struct TaskInFlight {
+        virtual ~TaskInFlight();
+        virtual void Finish(dawn::platform::Platform* platform, ExecutionSerial serial) = 0;
+        virtual void HandleDeviceLoss() = 0;
     };
 
+    ~QueueBase() override;
+
+    static QueueBase* MakeError(DeviceBase* device);
+
+    ObjectType GetType() const override;
+
+    // Dawn API
+    void APISubmit(uint32_t commandCount, CommandBufferBase* const* commands);
+    void APIOnSubmittedWorkDone(uint64_t signalValue,
+                                WGPUQueueWorkDoneCallback callback,
+                                void* userdata);
+    void APIWriteBuffer(BufferBase* buffer, uint64_t bufferOffset, const void* data, size_t size);
+    void APIWriteTexture(const ImageCopyTexture* destination,
+                         const void* data,
+                         size_t dataSize,
+                         const TextureDataLayout* dataLayout,
+                         const Extent3D* writeSize);
+    void APICopyTextureForBrowser(const ImageCopyTexture* source,
+                                  const ImageCopyTexture* destination,
+                                  const Extent3D* copySize,
+                                  const CopyTextureForBrowserOptions* options);
+
+    MaybeError WriteBuffer(BufferBase* buffer,
+                           uint64_t bufferOffset,
+                           const void* data,
+                           size_t size);
+    void TrackTask(std::unique_ptr<TaskInFlight> task, ExecutionSerial serial);
+    void Tick(ExecutionSerial finishedSerial);
+    void HandleDeviceLoss();
+
+  protected:
+    QueueBase(DeviceBase* device, const QueueDescriptor* descriptor);
+    QueueBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+    void DestroyImpl() override;
+
+  private:
+    MaybeError WriteTextureInternal(const ImageCopyTexture* destination,
+                                    const void* data,
+                                    size_t dataSize,
+                                    const TextureDataLayout& dataLayout,
+                                    const Extent3D* writeSize);
+    MaybeError CopyTextureForBrowserInternal(const ImageCopyTexture* source,
+                                             const ImageCopyTexture* destination,
+                                             const Extent3D* copySize,
+                                             const CopyTextureForBrowserOptions* options);
+
+    virtual MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) = 0;
+    virtual MaybeError WriteBufferImpl(BufferBase* buffer,
+                                       uint64_t bufferOffset,
+                                       const void* data,
+                                       size_t size);
+    virtual MaybeError WriteTextureImpl(const ImageCopyTexture& destination,
+                                        const void* data,
+                                        const TextureDataLayout& dataLayout,
+                                        const Extent3D& writeSize);
+
+    MaybeError ValidateSubmit(uint32_t commandCount, CommandBufferBase* const* commands) const;
+    MaybeError ValidateOnSubmittedWorkDone(uint64_t signalValue,
+                                           WGPUQueueWorkDoneStatus* status) const;
+    MaybeError ValidateWriteTexture(const ImageCopyTexture* destination,
+                                    size_t dataSize,
+                                    const TextureDataLayout& dataLayout,
+                                    const Extent3D* writeSize) const;
+
+    void SubmitInternal(uint32_t commandCount, CommandBufferBase* const* commands);
+
+    SerialQueue<ExecutionSerial, std::unique_ptr<TaskInFlight>> mTasksInFlight;
+};
+
 }  // namespace dawn::native
 
 #endif  // SRC_DAWN_NATIVE_QUEUE_H_
diff --git a/src/dawn/native/RenderBundle.cpp b/src/dawn/native/RenderBundle.cpp
index 730fc66..2781983d 100644
--- a/src/dawn/native/RenderBundle.cpp
+++ b/src/dawn/native/RenderBundle.cpp
@@ -24,70 +24,69 @@
 
 namespace dawn::native {
 
-    RenderBundleBase::RenderBundleBase(RenderBundleEncoder* encoder,
-                                       const RenderBundleDescriptor* descriptor,
-                                       Ref<AttachmentState> attachmentState,
-                                       bool depthReadOnly,
-                                       bool stencilReadOnly,
-                                       RenderPassResourceUsage resourceUsage,
-                                       IndirectDrawMetadata indirectDrawMetadata)
-        : ApiObjectBase(encoder->GetDevice(), kLabelNotImplemented),
-          mCommands(encoder->AcquireCommands()),
-          mIndirectDrawMetadata(std::move(indirectDrawMetadata)),
-          mAttachmentState(std::move(attachmentState)),
-          mDepthReadOnly(depthReadOnly),
-          mStencilReadOnly(stencilReadOnly),
-          mResourceUsage(std::move(resourceUsage)) {
-        TrackInDevice();
-    }
+RenderBundleBase::RenderBundleBase(RenderBundleEncoder* encoder,
+                                   const RenderBundleDescriptor* descriptor,
+                                   Ref<AttachmentState> attachmentState,
+                                   bool depthReadOnly,
+                                   bool stencilReadOnly,
+                                   RenderPassResourceUsage resourceUsage,
+                                   IndirectDrawMetadata indirectDrawMetadata)
+    : ApiObjectBase(encoder->GetDevice(), kLabelNotImplemented),
+      mCommands(encoder->AcquireCommands()),
+      mIndirectDrawMetadata(std::move(indirectDrawMetadata)),
+      mAttachmentState(std::move(attachmentState)),
+      mDepthReadOnly(depthReadOnly),
+      mStencilReadOnly(stencilReadOnly),
+      mResourceUsage(std::move(resourceUsage)) {
+    TrackInDevice();
+}
 
-    void RenderBundleBase::DestroyImpl() {
-        FreeCommands(&mCommands);
+void RenderBundleBase::DestroyImpl() {
+    FreeCommands(&mCommands);
 
-        // Remove reference to the attachment state so that we don't have lingering references to
-        // it preventing it from being uncached in the device.
-        mAttachmentState = nullptr;
-    }
+    // Remove reference to the attachment state so that we don't have lingering references to
+    // it preventing it from being uncached in the device.
+    mAttachmentState = nullptr;
+}
 
-    // static
-    RenderBundleBase* RenderBundleBase::MakeError(DeviceBase* device) {
-        return new RenderBundleBase(device, ObjectBase::kError);
-    }
+// static
+RenderBundleBase* RenderBundleBase::MakeError(DeviceBase* device) {
+    return new RenderBundleBase(device, ObjectBase::kError);
+}
 
-    RenderBundleBase::RenderBundleBase(DeviceBase* device, ErrorTag errorTag)
-        : ApiObjectBase(device, errorTag), mIndirectDrawMetadata(device->GetLimits()) {
-    }
+RenderBundleBase::RenderBundleBase(DeviceBase* device, ErrorTag errorTag)
+    : ApiObjectBase(device, errorTag), mIndirectDrawMetadata(device->GetLimits()) {}
 
-    ObjectType RenderBundleBase::GetType() const {
-        return ObjectType::RenderBundle;
-    }
+ObjectType RenderBundleBase::GetType() const {
+    return ObjectType::RenderBundle;
+}
 
-    CommandIterator* RenderBundleBase::GetCommands() {
-        return &mCommands;
-    }
+CommandIterator* RenderBundleBase::GetCommands() {
+    return &mCommands;
+}
 
-    const AttachmentState* RenderBundleBase::GetAttachmentState() const {
-        ASSERT(!IsError());
-        return mAttachmentState.Get();
-    }
+const AttachmentState* RenderBundleBase::GetAttachmentState() const {
+    ASSERT(!IsError());
+    return mAttachmentState.Get();
+}
 
-    bool RenderBundleBase::IsDepthReadOnly() const {
-        ASSERT(!IsError());
-        return mDepthReadOnly;
-    }
+bool RenderBundleBase::IsDepthReadOnly() const {
+    ASSERT(!IsError());
+    return mDepthReadOnly;
+}
 
-    bool RenderBundleBase::IsStencilReadOnly() const {
-        ASSERT(!IsError());
-        return mStencilReadOnly;
-    }
+bool RenderBundleBase::IsStencilReadOnly() const {
+    ASSERT(!IsError());
+    return mStencilReadOnly;
+}
 
-    const RenderPassResourceUsage& RenderBundleBase::GetResourceUsage() const {
-        ASSERT(!IsError());
-        return mResourceUsage;
-    }
+const RenderPassResourceUsage& RenderBundleBase::GetResourceUsage() const {
+    ASSERT(!IsError());
+    return mResourceUsage;
+}
 
-    const IndirectDrawMetadata& RenderBundleBase::GetIndirectDrawMetadata() {
-        return mIndirectDrawMetadata;
-    }
+const IndirectDrawMetadata& RenderBundleBase::GetIndirectDrawMetadata() {
+    return mIndirectDrawMetadata;
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/RenderBundle.h b/src/dawn/native/RenderBundle.h
index dceb4ee..9297e01 100644
--- a/src/dawn/native/RenderBundle.h
+++ b/src/dawn/native/RenderBundle.h
@@ -30,43 +30,43 @@
 
 namespace dawn::native {
 
-    struct RenderBundleDescriptor;
-    class RenderBundleEncoder;
+struct RenderBundleDescriptor;
+class RenderBundleEncoder;
 
-    class RenderBundleBase final : public ApiObjectBase {
-      public:
-        RenderBundleBase(RenderBundleEncoder* encoder,
-                         const RenderBundleDescriptor* descriptor,
-                         Ref<AttachmentState> attachmentState,
-                         bool depthReadOnly,
-                         bool stencilReadOnly,
-                         RenderPassResourceUsage resourceUsage,
-                         IndirectDrawMetadata indirectDrawMetadata);
+class RenderBundleBase final : public ApiObjectBase {
+  public:
+    RenderBundleBase(RenderBundleEncoder* encoder,
+                     const RenderBundleDescriptor* descriptor,
+                     Ref<AttachmentState> attachmentState,
+                     bool depthReadOnly,
+                     bool stencilReadOnly,
+                     RenderPassResourceUsage resourceUsage,
+                     IndirectDrawMetadata indirectDrawMetadata);
 
-        static RenderBundleBase* MakeError(DeviceBase* device);
+    static RenderBundleBase* MakeError(DeviceBase* device);
 
-        ObjectType GetType() const override;
+    ObjectType GetType() const override;
 
-        CommandIterator* GetCommands();
+    CommandIterator* GetCommands();
 
-        const AttachmentState* GetAttachmentState() const;
-        bool IsDepthReadOnly() const;
-        bool IsStencilReadOnly() const;
-        const RenderPassResourceUsage& GetResourceUsage() const;
-        const IndirectDrawMetadata& GetIndirectDrawMetadata();
+    const AttachmentState* GetAttachmentState() const;
+    bool IsDepthReadOnly() const;
+    bool IsStencilReadOnly() const;
+    const RenderPassResourceUsage& GetResourceUsage() const;
+    const IndirectDrawMetadata& GetIndirectDrawMetadata();
 
-      private:
-        RenderBundleBase(DeviceBase* device, ErrorTag errorTag);
+  private:
+    RenderBundleBase(DeviceBase* device, ErrorTag errorTag);
 
-        void DestroyImpl() override;
+    void DestroyImpl() override;
 
-        CommandIterator mCommands;
-        IndirectDrawMetadata mIndirectDrawMetadata;
-        Ref<AttachmentState> mAttachmentState;
-        bool mDepthReadOnly;
-        bool mStencilReadOnly;
-        RenderPassResourceUsage mResourceUsage;
-    };
+    CommandIterator mCommands;
+    IndirectDrawMetadata mIndirectDrawMetadata;
+    Ref<AttachmentState> mAttachmentState;
+    bool mDepthReadOnly;
+    bool mStencilReadOnly;
+    RenderPassResourceUsage mResourceUsage;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/RenderBundleEncoder.cpp b/src/dawn/native/RenderBundleEncoder.cpp
index 81fd0f9..56a9d66 100644
--- a/src/dawn/native/RenderBundleEncoder.cpp
+++ b/src/dawn/native/RenderBundleEncoder.cpp
@@ -28,147 +28,144 @@
 
 namespace dawn::native {
 
-    MaybeError ValidateColorAttachmentFormat(const DeviceBase* device,
-                                             wgpu::TextureFormat textureFormat) {
-        DAWN_TRY(ValidateTextureFormat(textureFormat));
-        const Format* format = nullptr;
-        DAWN_TRY_ASSIGN(format, device->GetInternalFormat(textureFormat));
-        DAWN_INVALID_IF(!format->IsColor() || !format->isRenderable,
-                        "Texture format %s is not color renderable.", textureFormat);
-        return {};
+MaybeError ValidateColorAttachmentFormat(const DeviceBase* device,
+                                         wgpu::TextureFormat textureFormat) {
+    DAWN_TRY(ValidateTextureFormat(textureFormat));
+    const Format* format = nullptr;
+    DAWN_TRY_ASSIGN(format, device->GetInternalFormat(textureFormat));
+    DAWN_INVALID_IF(!format->IsColor() || !format->isRenderable,
+                    "Texture format %s is not color renderable.", textureFormat);
+    return {};
+}
+
+MaybeError ValidateDepthStencilAttachmentFormat(const DeviceBase* device,
+                                                wgpu::TextureFormat textureFormat,
+                                                bool depthReadOnly,
+                                                bool stencilReadOnly) {
+    DAWN_TRY(ValidateTextureFormat(textureFormat));
+    const Format* format = nullptr;
+    DAWN_TRY_ASSIGN(format, device->GetInternalFormat(textureFormat));
+    DAWN_INVALID_IF(!format->HasDepthOrStencil() || !format->isRenderable,
+                    "Texture format %s is not depth/stencil renderable.", textureFormat);
+
+    DAWN_INVALID_IF(
+        format->HasDepth() && format->HasStencil() && depthReadOnly != stencilReadOnly,
+        "depthReadOnly (%u) and stencilReadOnly (%u) must be the same when format %s has "
+        "both depth and stencil aspects.",
+        depthReadOnly, stencilReadOnly, textureFormat);
+
+    return {};
+}
+
+MaybeError ValidateRenderBundleEncoderDescriptor(const DeviceBase* device,
+                                                 const RenderBundleEncoderDescriptor* descriptor) {
+    DAWN_INVALID_IF(!IsValidSampleCount(descriptor->sampleCount),
+                    "Sample count (%u) is not supported.", descriptor->sampleCount);
+
+    DAWN_INVALID_IF(descriptor->colorFormatsCount > kMaxColorAttachments,
+                    "Color formats count (%u) exceeds maximum number of color attachements (%u).",
+                    descriptor->colorFormatsCount, kMaxColorAttachments);
+
+    bool allColorFormatsUndefined = true;
+    for (uint32_t i = 0; i < descriptor->colorFormatsCount; ++i) {
+        wgpu::TextureFormat format = descriptor->colorFormats[i];
+        if (format != wgpu::TextureFormat::Undefined) {
+            DAWN_TRY_CONTEXT(ValidateColorAttachmentFormat(device, format),
+                             "validating colorFormats[%u]", i);
+            allColorFormatsUndefined = false;
+        }
     }
 
-    MaybeError ValidateDepthStencilAttachmentFormat(const DeviceBase* device,
-                                                    wgpu::TextureFormat textureFormat,
-                                                    bool depthReadOnly,
-                                                    bool stencilReadOnly) {
-        DAWN_TRY(ValidateTextureFormat(textureFormat));
-        const Format* format = nullptr;
-        DAWN_TRY_ASSIGN(format, device->GetInternalFormat(textureFormat));
-        DAWN_INVALID_IF(!format->HasDepthOrStencil() || !format->isRenderable,
-                        "Texture format %s is not depth/stencil renderable.", textureFormat);
-
+    if (descriptor->depthStencilFormat != wgpu::TextureFormat::Undefined) {
+        DAWN_TRY_CONTEXT(ValidateDepthStencilAttachmentFormat(
+                             device, descriptor->depthStencilFormat, descriptor->depthReadOnly,
+                             descriptor->stencilReadOnly),
+                         "validating depthStencilFormat");
+    } else {
         DAWN_INVALID_IF(
-            format->HasDepth() && format->HasStencil() && depthReadOnly != stencilReadOnly,
-            "depthReadOnly (%u) and stencilReadOnly (%u) must be the same when format %s has "
-            "both depth and stencil aspects.",
-            depthReadOnly, stencilReadOnly, textureFormat);
-
-        return {};
+            allColorFormatsUndefined,
+            "No color or depthStencil attachments specified. At least one is required.");
     }
 
-    MaybeError ValidateRenderBundleEncoderDescriptor(
-        const DeviceBase* device,
-        const RenderBundleEncoderDescriptor* descriptor) {
-        DAWN_INVALID_IF(!IsValidSampleCount(descriptor->sampleCount),
-                        "Sample count (%u) is not supported.", descriptor->sampleCount);
+    return {};
+}
 
-        DAWN_INVALID_IF(
-            descriptor->colorFormatsCount > kMaxColorAttachments,
-            "Color formats count (%u) exceeds maximum number of color attachements (%u).",
-            descriptor->colorFormatsCount, kMaxColorAttachments);
+RenderBundleEncoder::RenderBundleEncoder(DeviceBase* device,
+                                         const RenderBundleEncoderDescriptor* descriptor)
+    : RenderEncoderBase(device,
+                        descriptor->label,
+                        &mBundleEncodingContext,
+                        device->GetOrCreateAttachmentState(descriptor),
+                        descriptor->depthReadOnly,
+                        descriptor->stencilReadOnly),
+      mBundleEncodingContext(device, this) {
+    TrackInDevice();
+}
 
-        bool allColorFormatsUndefined = true;
-        for (uint32_t i = 0; i < descriptor->colorFormatsCount; ++i) {
-            wgpu::TextureFormat format = descriptor->colorFormats[i];
-            if (format != wgpu::TextureFormat::Undefined) {
-                DAWN_TRY_CONTEXT(ValidateColorAttachmentFormat(device, format),
-                                 "validating colorFormats[%u]", i);
-                allColorFormatsUndefined = false;
-            }
-        }
+RenderBundleEncoder::RenderBundleEncoder(DeviceBase* device, ErrorTag errorTag)
+    : RenderEncoderBase(device, &mBundleEncodingContext, errorTag),
+      mBundleEncodingContext(device, this) {}
 
-        if (descriptor->depthStencilFormat != wgpu::TextureFormat::Undefined) {
-            DAWN_TRY_CONTEXT(ValidateDepthStencilAttachmentFormat(
-                                 device, descriptor->depthStencilFormat, descriptor->depthReadOnly,
-                                 descriptor->stencilReadOnly),
-                             "validating depthStencilFormat");
-        } else {
-            DAWN_INVALID_IF(
-                allColorFormatsUndefined,
-                "No color or depthStencil attachments specified. At least one is required.");
-        }
+void RenderBundleEncoder::DestroyImpl() {
+    RenderEncoderBase::DestroyImpl();
+    mBundleEncodingContext.Destroy();
+}
 
-        return {};
+// static
+Ref<RenderBundleEncoder> RenderBundleEncoder::Create(
+    DeviceBase* device,
+    const RenderBundleEncoderDescriptor* descriptor) {
+    return AcquireRef(new RenderBundleEncoder(device, descriptor));
+}
+
+// static
+RenderBundleEncoder* RenderBundleEncoder::MakeError(DeviceBase* device) {
+    return new RenderBundleEncoder(device, ObjectBase::kError);
+}
+
+ObjectType RenderBundleEncoder::GetType() const {
+    return ObjectType::RenderBundleEncoder;
+}
+
+CommandIterator RenderBundleEncoder::AcquireCommands() {
+    return mBundleEncodingContext.AcquireCommands();
+}
+
+RenderBundleBase* RenderBundleEncoder::APIFinish(const RenderBundleDescriptor* descriptor) {
+    RenderBundleBase* result = nullptr;
+
+    if (GetDevice()->ConsumedError(FinishImpl(descriptor), &result, "calling %s.Finish(%s).", this,
+                                   descriptor)) {
+        return RenderBundleBase::MakeError(GetDevice());
     }
 
-    RenderBundleEncoder::RenderBundleEncoder(DeviceBase* device,
-                                             const RenderBundleEncoderDescriptor* descriptor)
-        : RenderEncoderBase(device,
-                            descriptor->label,
-                            &mBundleEncodingContext,
-                            device->GetOrCreateAttachmentState(descriptor),
-                            descriptor->depthReadOnly,
-                            descriptor->stencilReadOnly),
-          mBundleEncodingContext(device, this) {
-        TrackInDevice();
-    }
+    return result;
+}
 
-    RenderBundleEncoder::RenderBundleEncoder(DeviceBase* device, ErrorTag errorTag)
-        : RenderEncoderBase(device, &mBundleEncodingContext, errorTag),
-          mBundleEncodingContext(device, this) {
-    }
+ResultOrError<RenderBundleBase*> RenderBundleEncoder::FinishImpl(
+    const RenderBundleDescriptor* descriptor) {
+    // Even if mBundleEncodingContext.Finish() validation fails, calling it will mutate the
+    // internal state of the encoding context. Subsequent calls to encode commands will generate
+    // errors.
+    DAWN_TRY(mBundleEncodingContext.Finish());
 
-    void RenderBundleEncoder::DestroyImpl() {
-        RenderEncoderBase::DestroyImpl();
-        mBundleEncodingContext.Destroy();
-    }
-
-    // static
-    Ref<RenderBundleEncoder> RenderBundleEncoder::Create(
-        DeviceBase* device,
-        const RenderBundleEncoderDescriptor* descriptor) {
-        return AcquireRef(new RenderBundleEncoder(device, descriptor));
-    }
-
-    // static
-    RenderBundleEncoder* RenderBundleEncoder::MakeError(DeviceBase* device) {
-        return new RenderBundleEncoder(device, ObjectBase::kError);
-    }
-
-    ObjectType RenderBundleEncoder::GetType() const {
-        return ObjectType::RenderBundleEncoder;
-    }
-
-    CommandIterator RenderBundleEncoder::AcquireCommands() {
-        return mBundleEncodingContext.AcquireCommands();
-    }
-
-    RenderBundleBase* RenderBundleEncoder::APIFinish(const RenderBundleDescriptor* descriptor) {
-        RenderBundleBase* result = nullptr;
-
-        if (GetDevice()->ConsumedError(FinishImpl(descriptor), &result, "calling %s.Finish(%s).",
-                                       this, descriptor)) {
-            return RenderBundleBase::MakeError(GetDevice());
-        }
-
-        return result;
-    }
-
-    ResultOrError<RenderBundleBase*> RenderBundleEncoder::FinishImpl(
-        const RenderBundleDescriptor* descriptor) {
-        // Even if mBundleEncodingContext.Finish() validation fails, calling it will mutate the
-        // internal state of the encoding context. Subsequent calls to encode commands will generate
-        // errors.
-        DAWN_TRY(mBundleEncodingContext.Finish());
-
-        RenderPassResourceUsage usages = mUsageTracker.AcquireResourceUsage();
-        if (IsValidationEnabled()) {
-            DAWN_TRY(GetDevice()->ValidateObject(this));
-            DAWN_TRY(ValidateProgrammableEncoderEnd());
-            DAWN_TRY(ValidateFinish(usages));
-        }
-
-        return new RenderBundleBase(this, descriptor, AcquireAttachmentState(), IsDepthReadOnly(),
-                                    IsStencilReadOnly(), std::move(usages),
-                                    std::move(mIndirectDrawMetadata));
-    }
-
-    MaybeError RenderBundleEncoder::ValidateFinish(const RenderPassResourceUsage& usages) const {
-        TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "RenderBundleEncoder::ValidateFinish");
+    RenderPassResourceUsage usages = mUsageTracker.AcquireResourceUsage();
+    if (IsValidationEnabled()) {
         DAWN_TRY(GetDevice()->ValidateObject(this));
-        DAWN_TRY(ValidateSyncScopeResourceUsage(usages));
-        return {};
+        DAWN_TRY(ValidateProgrammableEncoderEnd());
+        DAWN_TRY(ValidateFinish(usages));
     }
 
+    return new RenderBundleBase(this, descriptor, AcquireAttachmentState(), IsDepthReadOnly(),
+                                IsStencilReadOnly(), std::move(usages),
+                                std::move(mIndirectDrawMetadata));
+}
+
+MaybeError RenderBundleEncoder::ValidateFinish(const RenderPassResourceUsage& usages) const {
+    TRACE_EVENT0(GetDevice()->GetPlatform(), Validation, "RenderBundleEncoder::ValidateFinish");
+    DAWN_TRY(GetDevice()->ValidateObject(this));
+    DAWN_TRY(ValidateSyncScopeResourceUsage(usages));
+    return {};
+}
+
 }  // namespace dawn::native
diff --git a/src/dawn/native/RenderBundleEncoder.h b/src/dawn/native/RenderBundleEncoder.h
index 53ab119..24ee19e 100644
--- a/src/dawn/native/RenderBundleEncoder.h
+++ b/src/dawn/native/RenderBundleEncoder.h
@@ -23,33 +23,32 @@
 
 namespace dawn::native {
 
-    MaybeError ValidateRenderBundleEncoderDescriptor(
-        const DeviceBase* device,
-        const RenderBundleEncoderDescriptor* descriptor);
+MaybeError ValidateRenderBundleEncoderDescriptor(const DeviceBase* device,
+                                                 const RenderBundleEncoderDescriptor* descriptor);
 
-    class RenderBundleEncoder final : public RenderEncoderBase {
-      public:
-        static Ref<RenderBundleEncoder> Create(DeviceBase* device,
-                                               const RenderBundleEncoderDescriptor* descriptor);
-        static RenderBundleEncoder* MakeError(DeviceBase* device);
+class RenderBundleEncoder final : public RenderEncoderBase {
+  public:
+    static Ref<RenderBundleEncoder> Create(DeviceBase* device,
+                                           const RenderBundleEncoderDescriptor* descriptor);
+    static RenderBundleEncoder* MakeError(DeviceBase* device);
 
-        ObjectType GetType() const override;
+    ObjectType GetType() const override;
 
-        RenderBundleBase* APIFinish(const RenderBundleDescriptor* descriptor);
+    RenderBundleBase* APIFinish(const RenderBundleDescriptor* descriptor);
 
-        CommandIterator AcquireCommands();
+    CommandIterator AcquireCommands();
 
-      private:
-        RenderBundleEncoder(DeviceBase* device, const RenderBundleEncoderDescriptor* descriptor);
-        RenderBundleEncoder(DeviceBase* device, ErrorTag errorTag);
+  private:
+    RenderBundleEncoder(DeviceBase* device, const RenderBundleEncoderDescriptor* descriptor);
+    RenderBundleEncoder(DeviceBase* device, ErrorTag errorTag);
 
-        void DestroyImpl() override;
+    void DestroyImpl() override;
 
-        ResultOrError<RenderBundleBase*> FinishImpl(const RenderBundleDescriptor* descriptor);
-        MaybeError ValidateFinish(const RenderPassResourceUsage& usages) const;
+    ResultOrError<RenderBundleBase*> FinishImpl(const RenderBundleDescriptor* descriptor);
+    MaybeError ValidateFinish(const RenderPassResourceUsage& usages) const;
 
-        EncodingContext mBundleEncodingContext;
-    };
+    EncodingContext mBundleEncodingContext;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/RenderEncoderBase.cpp b/src/dawn/native/RenderEncoderBase.cpp
index 87cd32a..06e1458 100644
--- a/src/dawn/native/RenderEncoderBase.cpp
+++ b/src/dawn/native/RenderEncoderBase.cpp
@@ -30,409 +30,403 @@
 
 namespace dawn::native {
 
-    RenderEncoderBase::RenderEncoderBase(DeviceBase* device,
-                                         const char* label,
-                                         EncodingContext* encodingContext,
-                                         Ref<AttachmentState> attachmentState,
-                                         bool depthReadOnly,
-                                         bool stencilReadOnly)
-        : ProgrammableEncoder(device, label, encodingContext),
-          mIndirectDrawMetadata(device->GetLimits()),
-          mAttachmentState(std::move(attachmentState)),
-          mDisableBaseVertex(device->IsToggleEnabled(Toggle::DisableBaseVertex)),
-          mDisableBaseInstance(device->IsToggleEnabled(Toggle::DisableBaseInstance)) {
-        mDepthReadOnly = depthReadOnly;
-        mStencilReadOnly = stencilReadOnly;
-    }
+RenderEncoderBase::RenderEncoderBase(DeviceBase* device,
+                                     const char* label,
+                                     EncodingContext* encodingContext,
+                                     Ref<AttachmentState> attachmentState,
+                                     bool depthReadOnly,
+                                     bool stencilReadOnly)
+    : ProgrammableEncoder(device, label, encodingContext),
+      mIndirectDrawMetadata(device->GetLimits()),
+      mAttachmentState(std::move(attachmentState)),
+      mDisableBaseVertex(device->IsToggleEnabled(Toggle::DisableBaseVertex)),
+      mDisableBaseInstance(device->IsToggleEnabled(Toggle::DisableBaseInstance)) {
+    mDepthReadOnly = depthReadOnly;
+    mStencilReadOnly = stencilReadOnly;
+}
 
-    RenderEncoderBase::RenderEncoderBase(DeviceBase* device,
-                                         EncodingContext* encodingContext,
-                                         ErrorTag errorTag)
-        : ProgrammableEncoder(device, encodingContext, errorTag),
-          mIndirectDrawMetadata(device->GetLimits()),
-          mDisableBaseVertex(device->IsToggleEnabled(Toggle::DisableBaseVertex)),
-          mDisableBaseInstance(device->IsToggleEnabled(Toggle::DisableBaseInstance)) {
-    }
+RenderEncoderBase::RenderEncoderBase(DeviceBase* device,
+                                     EncodingContext* encodingContext,
+                                     ErrorTag errorTag)
+    : ProgrammableEncoder(device, encodingContext, errorTag),
+      mIndirectDrawMetadata(device->GetLimits()),
+      mDisableBaseVertex(device->IsToggleEnabled(Toggle::DisableBaseVertex)),
+      mDisableBaseInstance(device->IsToggleEnabled(Toggle::DisableBaseInstance)) {}
 
-    void RenderEncoderBase::DestroyImpl() {
-        // Remove reference to the attachment state so that we don't have lingering references to
-        // it preventing it from being uncached in the device.
-        mAttachmentState = nullptr;
-    }
+void RenderEncoderBase::DestroyImpl() {
+    // Remove reference to the attachment state so that we don't have lingering references to
+    // it preventing it from being uncached in the device.
+    mAttachmentState = nullptr;
+}
 
-    const AttachmentState* RenderEncoderBase::GetAttachmentState() const {
-        ASSERT(!IsError());
-        ASSERT(mAttachmentState != nullptr);
-        return mAttachmentState.Get();
-    }
+const AttachmentState* RenderEncoderBase::GetAttachmentState() const {
+    ASSERT(!IsError());
+    ASSERT(mAttachmentState != nullptr);
+    return mAttachmentState.Get();
+}
 
-    bool RenderEncoderBase::IsDepthReadOnly() const {
-        ASSERT(!IsError());
-        return mDepthReadOnly;
-    }
+bool RenderEncoderBase::IsDepthReadOnly() const {
+    ASSERT(!IsError());
+    return mDepthReadOnly;
+}
 
-    bool RenderEncoderBase::IsStencilReadOnly() const {
-        ASSERT(!IsError());
-        return mStencilReadOnly;
-    }
+bool RenderEncoderBase::IsStencilReadOnly() const {
+    ASSERT(!IsError());
+    return mStencilReadOnly;
+}
 
-    Ref<AttachmentState> RenderEncoderBase::AcquireAttachmentState() {
-        return std::move(mAttachmentState);
-    }
+Ref<AttachmentState> RenderEncoderBase::AcquireAttachmentState() {
+    return std::move(mAttachmentState);
+}
 
-    void RenderEncoderBase::APIDraw(uint32_t vertexCount,
-                                    uint32_t instanceCount,
-                                    uint32_t firstVertex,
-                                    uint32_t firstInstance) {
-        mEncodingContext->TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                if (IsValidationEnabled()) {
-                    DAWN_TRY(mCommandBufferState.ValidateCanDraw());
+void RenderEncoderBase::APIDraw(uint32_t vertexCount,
+                                uint32_t instanceCount,
+                                uint32_t firstVertex,
+                                uint32_t firstInstance) {
+    mEncodingContext->TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            if (IsValidationEnabled()) {
+                DAWN_TRY(mCommandBufferState.ValidateCanDraw());
 
-                    DAWN_INVALID_IF(mDisableBaseInstance && firstInstance != 0,
-                                    "First instance (%u) must be zero.", firstInstance);
+                DAWN_INVALID_IF(mDisableBaseInstance && firstInstance != 0,
+                                "First instance (%u) must be zero.", firstInstance);
 
-                    DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForVertexBuffer(vertexCount,
-                                                                                      firstVertex));
-                    DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForInstanceBuffer(
-                        instanceCount, firstInstance));
-                }
+                DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForVertexBuffer(vertexCount,
+                                                                                  firstVertex));
+                DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForInstanceBuffer(instanceCount,
+                                                                                    firstInstance));
+            }
 
-                DrawCmd* draw = allocator->Allocate<DrawCmd>(Command::Draw);
-                draw->vertexCount = vertexCount;
-                draw->instanceCount = instanceCount;
-                draw->firstVertex = firstVertex;
-                draw->firstInstance = firstInstance;
+            DrawCmd* draw = allocator->Allocate<DrawCmd>(Command::Draw);
+            draw->vertexCount = vertexCount;
+            draw->instanceCount = instanceCount;
+            draw->firstVertex = firstVertex;
+            draw->firstInstance = firstInstance;
 
-                return {};
-            },
-            "encoding %s.Draw(%u, %u, %u, %u).", this, vertexCount, instanceCount, firstVertex,
-            firstInstance);
-    }
+            return {};
+        },
+        "encoding %s.Draw(%u, %u, %u, %u).", this, vertexCount, instanceCount, firstVertex,
+        firstInstance);
+}
 
-    void RenderEncoderBase::APIDrawIndexed(uint32_t indexCount,
-                                           uint32_t instanceCount,
-                                           uint32_t firstIndex,
-                                           int32_t baseVertex,
-                                           uint32_t firstInstance) {
-        mEncodingContext->TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                if (IsValidationEnabled()) {
-                    DAWN_TRY(mCommandBufferState.ValidateCanDrawIndexed());
+void RenderEncoderBase::APIDrawIndexed(uint32_t indexCount,
+                                       uint32_t instanceCount,
+                                       uint32_t firstIndex,
+                                       int32_t baseVertex,
+                                       uint32_t firstInstance) {
+    mEncodingContext->TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            if (IsValidationEnabled()) {
+                DAWN_TRY(mCommandBufferState.ValidateCanDrawIndexed());
 
-                    DAWN_INVALID_IF(mDisableBaseInstance && firstInstance != 0,
-                                    "First instance (%u) must be zero.", firstInstance);
+                DAWN_INVALID_IF(mDisableBaseInstance && firstInstance != 0,
+                                "First instance (%u) must be zero.", firstInstance);
 
-                    DAWN_INVALID_IF(mDisableBaseVertex && baseVertex != 0,
-                                    "Base vertex (%u) must be zero.", baseVertex);
+                DAWN_INVALID_IF(mDisableBaseVertex && baseVertex != 0,
+                                "Base vertex (%u) must be zero.", baseVertex);
 
-                    DAWN_TRY(
-                        mCommandBufferState.ValidateIndexBufferInRange(indexCount, firstIndex));
+                DAWN_TRY(mCommandBufferState.ValidateIndexBufferInRange(indexCount, firstIndex));
 
-                    // Although we don't know actual vertex access range in CPU, we still call the
-                    // ValidateBufferInRangeForVertexBuffer in order to deal with those vertex step
-                    // mode vertex buffer with an array stride of zero.
-                    DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForVertexBuffer(0, 0));
-                    DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForInstanceBuffer(
-                        instanceCount, firstInstance));
-                }
+                // Although we don't know actual vertex access range in CPU, we still call the
+                // ValidateBufferInRangeForVertexBuffer in order to deal with those vertex step
+                // mode vertex buffer with an array stride of zero.
+                DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForVertexBuffer(0, 0));
+                DAWN_TRY(mCommandBufferState.ValidateBufferInRangeForInstanceBuffer(instanceCount,
+                                                                                    firstInstance));
+            }
 
-                DrawIndexedCmd* draw = allocator->Allocate<DrawIndexedCmd>(Command::DrawIndexed);
-                draw->indexCount = indexCount;
-                draw->instanceCount = instanceCount;
-                draw->firstIndex = firstIndex;
-                draw->baseVertex = baseVertex;
-                draw->firstInstance = firstInstance;
+            DrawIndexedCmd* draw = allocator->Allocate<DrawIndexedCmd>(Command::DrawIndexed);
+            draw->indexCount = indexCount;
+            draw->instanceCount = instanceCount;
+            draw->firstIndex = firstIndex;
+            draw->baseVertex = baseVertex;
+            draw->firstInstance = firstInstance;
 
-                return {};
-            },
-            "encoding %s.DrawIndexed(%u, %u, %u, %i, %u).", this, indexCount, instanceCount,
-            firstIndex, baseVertex, firstInstance);
-    }
+            return {};
+        },
+        "encoding %s.DrawIndexed(%u, %u, %u, %i, %u).", this, indexCount, instanceCount, firstIndex,
+        baseVertex, firstInstance);
+}
 
-    void RenderEncoderBase::APIDrawIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset) {
-        mEncodingContext->TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                if (IsValidationEnabled()) {
-                    DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
-                    DAWN_TRY(ValidateCanUseAs(indirectBuffer, wgpu::BufferUsage::Indirect));
-                    DAWN_TRY(mCommandBufferState.ValidateCanDraw());
+void RenderEncoderBase::APIDrawIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset) {
+    mEncodingContext->TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            if (IsValidationEnabled()) {
+                DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
+                DAWN_TRY(ValidateCanUseAs(indirectBuffer, wgpu::BufferUsage::Indirect));
+                DAWN_TRY(mCommandBufferState.ValidateCanDraw());
 
-                    DAWN_INVALID_IF(indirectOffset % 4 != 0,
-                                    "Indirect offset (%u) is not a multiple of 4.", indirectOffset);
+                DAWN_INVALID_IF(indirectOffset % 4 != 0,
+                                "Indirect offset (%u) is not a multiple of 4.", indirectOffset);
 
-                    DAWN_INVALID_IF(
-                        indirectOffset >= indirectBuffer->GetSize() ||
-                            kDrawIndirectSize > indirectBuffer->GetSize() - indirectOffset,
-                        "Indirect offset (%u) is out of bounds of indirect buffer %s size (%u).",
-                        indirectOffset, indirectBuffer, indirectBuffer->GetSize());
-                }
+                DAWN_INVALID_IF(
+                    indirectOffset >= indirectBuffer->GetSize() ||
+                        kDrawIndirectSize > indirectBuffer->GetSize() - indirectOffset,
+                    "Indirect offset (%u) is out of bounds of indirect buffer %s size (%u).",
+                    indirectOffset, indirectBuffer, indirectBuffer->GetSize());
+            }
 
-                DrawIndirectCmd* cmd = allocator->Allocate<DrawIndirectCmd>(Command::DrawIndirect);
+            DrawIndirectCmd* cmd = allocator->Allocate<DrawIndirectCmd>(Command::DrawIndirect);
 
-                bool duplicateBaseVertexInstance =
-                    GetDevice()->ShouldDuplicateParametersForDrawIndirect(
-                        mCommandBufferState.GetRenderPipeline());
-                if (IsValidationEnabled() || duplicateBaseVertexInstance) {
-                    // Later, EncodeIndirectDrawValidationCommands will allocate a scratch storage
-                    // buffer which will store the validated or duplicated indirect data. The buffer
-                    // and offset will be updated to point to it.
-                    // |EncodeIndirectDrawValidationCommands| is called at the end of encoding the
-                    // render pass, while the |cmd| pointer is still valid.
-                    cmd->indirectBuffer = nullptr;
+            bool duplicateBaseVertexInstance =
+                GetDevice()->ShouldDuplicateParametersForDrawIndirect(
+                    mCommandBufferState.GetRenderPipeline());
+            if (IsValidationEnabled() || duplicateBaseVertexInstance) {
+                // Later, EncodeIndirectDrawValidationCommands will allocate a scratch storage
+                // buffer which will store the validated or duplicated indirect data. The buffer
+                // and offset will be updated to point to it.
+                // |EncodeIndirectDrawValidationCommands| is called at the end of encoding the
+                // render pass, while the |cmd| pointer is still valid.
+                cmd->indirectBuffer = nullptr;
 
-                    mIndirectDrawMetadata.AddIndirectDraw(indirectBuffer, indirectOffset,
-                                                          duplicateBaseVertexInstance, cmd);
+                mIndirectDrawMetadata.AddIndirectDraw(indirectBuffer, indirectOffset,
+                                                      duplicateBaseVertexInstance, cmd);
+            } else {
+                cmd->indirectBuffer = indirectBuffer;
+                cmd->indirectOffset = indirectOffset;
+            }
+
+            // TODO(crbug.com/dawn/1166): Adding the indirectBuffer is needed for correct usage
+            // validation, but it will unnecessarily transition to indirectBuffer usage in the
+            // backend.
+            mUsageTracker.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
+
+            return {};
+        },
+        "encoding %s.DrawIndirect(%s, %u).", this, indirectBuffer, indirectOffset);
+}
+
+void RenderEncoderBase::APIDrawIndexedIndirect(BufferBase* indirectBuffer,
+                                               uint64_t indirectOffset) {
+    mEncodingContext->TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            if (IsValidationEnabled()) {
+                DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
+                DAWN_TRY(ValidateCanUseAs(indirectBuffer, wgpu::BufferUsage::Indirect));
+                DAWN_TRY(mCommandBufferState.ValidateCanDrawIndexed());
+
+                DAWN_INVALID_IF(indirectOffset % 4 != 0,
+                                "Indirect offset (%u) is not a multiple of 4.", indirectOffset);
+
+                DAWN_INVALID_IF(
+                    (indirectOffset >= indirectBuffer->GetSize() ||
+                     kDrawIndexedIndirectSize > indirectBuffer->GetSize() - indirectOffset),
+                    "Indirect offset (%u) is out of bounds of indirect buffer %s size (%u).",
+                    indirectOffset, indirectBuffer, indirectBuffer->GetSize());
+            }
+
+            DrawIndexedIndirectCmd* cmd =
+                allocator->Allocate<DrawIndexedIndirectCmd>(Command::DrawIndexedIndirect);
+
+            bool duplicateBaseVertexInstance =
+                GetDevice()->ShouldDuplicateParametersForDrawIndirect(
+                    mCommandBufferState.GetRenderPipeline());
+            if (IsValidationEnabled() || duplicateBaseVertexInstance) {
+                // Later, EncodeIndirectDrawValidationCommands will allocate a scratch storage
+                // buffer which will store the validated or duplicated indirect data. The buffer
+                // and offset will be updated to point to it.
+                // |EncodeIndirectDrawValidationCommands| is called at the end of encoding the
+                // render pass, while the |cmd| pointer is still valid.
+                cmd->indirectBuffer = nullptr;
+
+                mIndirectDrawMetadata.AddIndexedIndirectDraw(
+                    mCommandBufferState.GetIndexFormat(), mCommandBufferState.GetIndexBufferSize(),
+                    indirectBuffer, indirectOffset, duplicateBaseVertexInstance, cmd);
+            } else {
+                cmd->indirectBuffer = indirectBuffer;
+                cmd->indirectOffset = indirectOffset;
+            }
+
+            // TODO(crbug.com/dawn/1166): Adding the indirectBuffer is needed for correct usage
+            // validation, but it will unecessarily transition to indirectBuffer usage in the
+            // backend.
+            mUsageTracker.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
+
+            return {};
+        },
+        "encoding %s.DrawIndexedIndirect(%s, %u).", this, indirectBuffer, indirectOffset);
+}
+
+void RenderEncoderBase::APISetPipeline(RenderPipelineBase* pipeline) {
+    mEncodingContext->TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            if (IsValidationEnabled()) {
+                DAWN_TRY(GetDevice()->ValidateObject(pipeline));
+
+                DAWN_INVALID_IF(pipeline->GetAttachmentState() != mAttachmentState.Get(),
+                                "Attachment state of %s is not compatible with %s.\n"
+                                "%s expects an attachment state of %s.\n"
+                                "%s has an attachment state of %s.",
+                                pipeline, this, this, mAttachmentState.Get(), pipeline,
+                                pipeline->GetAttachmentState());
+
+                DAWN_INVALID_IF(pipeline->WritesDepth() && mDepthReadOnly,
+                                "%s writes depth while %s's depthReadOnly is true", pipeline, this);
+
+                DAWN_INVALID_IF(pipeline->WritesStencil() && mStencilReadOnly,
+                                "%s writes stencil while %s's stencilReadOnly is true", pipeline,
+                                this);
+            }
+
+            mCommandBufferState.SetRenderPipeline(pipeline);
+
+            SetRenderPipelineCmd* cmd =
+                allocator->Allocate<SetRenderPipelineCmd>(Command::SetRenderPipeline);
+            cmd->pipeline = pipeline;
+
+            return {};
+        },
+        "encoding %s.SetPipeline(%s).", this, pipeline);
+}
+
+void RenderEncoderBase::APISetIndexBuffer(BufferBase* buffer,
+                                          wgpu::IndexFormat format,
+                                          uint64_t offset,
+                                          uint64_t size) {
+    mEncodingContext->TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            if (IsValidationEnabled()) {
+                DAWN_TRY(GetDevice()->ValidateObject(buffer));
+                DAWN_TRY(ValidateCanUseAs(buffer, wgpu::BufferUsage::Index));
+
+                DAWN_TRY(ValidateIndexFormat(format));
+
+                DAWN_INVALID_IF(format == wgpu::IndexFormat::Undefined,
+                                "Index format must be specified");
+
+                DAWN_INVALID_IF(offset % uint64_t(IndexFormatSize(format)) != 0,
+                                "Index buffer offset (%u) is not a multiple of the size (%u) "
+                                "of %s.",
+                                offset, IndexFormatSize(format), format);
+
+                uint64_t bufferSize = buffer->GetSize();
+                DAWN_INVALID_IF(offset > bufferSize,
+                                "Index buffer offset (%u) is larger than the size (%u) of %s.",
+                                offset, bufferSize, buffer);
+
+                uint64_t remainingSize = bufferSize - offset;
+
+                if (size == wgpu::kWholeSize) {
+                    size = remainingSize;
                 } else {
-                    cmd->indirectBuffer = indirectBuffer;
-                    cmd->indirectOffset = indirectOffset;
+                    DAWN_INVALID_IF(size > remainingSize,
+                                    "Index buffer range (offset: %u, size: %u) doesn't fit in "
+                                    "the size (%u) of "
+                                    "%s.",
+                                    offset, size, bufferSize, buffer);
                 }
-
-                // TODO(crbug.com/dawn/1166): Adding the indirectBuffer is needed for correct usage
-                // validation, but it will unnecessarily transition to indirectBuffer usage in the
-                // backend.
-                mUsageTracker.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
-
-                return {};
-            },
-            "encoding %s.DrawIndirect(%s, %u).", this, indirectBuffer, indirectOffset);
-    }
-
-    void RenderEncoderBase::APIDrawIndexedIndirect(BufferBase* indirectBuffer,
-                                                   uint64_t indirectOffset) {
-        mEncodingContext->TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                if (IsValidationEnabled()) {
-                    DAWN_TRY(GetDevice()->ValidateObject(indirectBuffer));
-                    DAWN_TRY(ValidateCanUseAs(indirectBuffer, wgpu::BufferUsage::Indirect));
-                    DAWN_TRY(mCommandBufferState.ValidateCanDrawIndexed());
-
-                    DAWN_INVALID_IF(indirectOffset % 4 != 0,
-                                    "Indirect offset (%u) is not a multiple of 4.", indirectOffset);
-
-                    DAWN_INVALID_IF(
-                        (indirectOffset >= indirectBuffer->GetSize() ||
-                         kDrawIndexedIndirectSize > indirectBuffer->GetSize() - indirectOffset),
-                        "Indirect offset (%u) is out of bounds of indirect buffer %s size (%u).",
-                        indirectOffset, indirectBuffer, indirectBuffer->GetSize());
+            } else {
+                if (size == wgpu::kWholeSize) {
+                    DAWN_ASSERT(buffer->GetSize() >= offset);
+                    size = buffer->GetSize() - offset;
                 }
+            }
 
-                DrawIndexedIndirectCmd* cmd =
-                    allocator->Allocate<DrawIndexedIndirectCmd>(Command::DrawIndexedIndirect);
+            mCommandBufferState.SetIndexBuffer(format, size);
 
-                bool duplicateBaseVertexInstance =
-                    GetDevice()->ShouldDuplicateParametersForDrawIndirect(
-                        mCommandBufferState.GetRenderPipeline());
-                if (IsValidationEnabled() || duplicateBaseVertexInstance) {
-                    // Later, EncodeIndirectDrawValidationCommands will allocate a scratch storage
-                    // buffer which will store the validated or duplicated indirect data. The buffer
-                    // and offset will be updated to point to it.
-                    // |EncodeIndirectDrawValidationCommands| is called at the end of encoding the
-                    // render pass, while the |cmd| pointer is still valid.
-                    cmd->indirectBuffer = nullptr;
+            SetIndexBufferCmd* cmd =
+                allocator->Allocate<SetIndexBufferCmd>(Command::SetIndexBuffer);
+            cmd->buffer = buffer;
+            cmd->format = format;
+            cmd->offset = offset;
+            cmd->size = size;
 
-                    mIndirectDrawMetadata.AddIndexedIndirectDraw(
-                        mCommandBufferState.GetIndexFormat(),
-                        mCommandBufferState.GetIndexBufferSize(), indirectBuffer, indirectOffset,
-                        duplicateBaseVertexInstance, cmd);
+            mUsageTracker.BufferUsedAs(buffer, wgpu::BufferUsage::Index);
+
+            return {};
+        },
+        "encoding %s.SetIndexBuffer(%s, %s, %u, %u).", this, buffer, format, offset, size);
+}
+
+void RenderEncoderBase::APISetVertexBuffer(uint32_t slot,
+                                           BufferBase* buffer,
+                                           uint64_t offset,
+                                           uint64_t size) {
+    mEncodingContext->TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            if (IsValidationEnabled()) {
+                DAWN_TRY(GetDevice()->ValidateObject(buffer));
+                DAWN_TRY(ValidateCanUseAs(buffer, wgpu::BufferUsage::Vertex));
+
+                DAWN_INVALID_IF(slot >= kMaxVertexBuffers,
+                                "Vertex buffer slot (%u) is larger the maximum (%u)", slot,
+                                kMaxVertexBuffers - 1);
+
+                DAWN_INVALID_IF(offset % 4 != 0, "Vertex buffer offset (%u) is not a multiple of 4",
+                                offset);
+
+                uint64_t bufferSize = buffer->GetSize();
+                DAWN_INVALID_IF(offset > bufferSize,
+                                "Vertex buffer offset (%u) is larger than the size (%u) of %s.",
+                                offset, bufferSize, buffer);
+
+                uint64_t remainingSize = bufferSize - offset;
+
+                if (size == wgpu::kWholeSize) {
+                    size = remainingSize;
                 } else {
-                    cmd->indirectBuffer = indirectBuffer;
-                    cmd->indirectOffset = indirectOffset;
-                }
-
-                // TODO(crbug.com/dawn/1166): Adding the indirectBuffer is needed for correct usage
-                // validation, but it will unecessarily transition to indirectBuffer usage in the
-                // backend.
-                mUsageTracker.BufferUsedAs(indirectBuffer, wgpu::BufferUsage::Indirect);
-
-                return {};
-            },
-            "encoding %s.DrawIndexedIndirect(%s, %u).", this, indirectBuffer, indirectOffset);
-    }
-
-    void RenderEncoderBase::APISetPipeline(RenderPipelineBase* pipeline) {
-        mEncodingContext->TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                if (IsValidationEnabled()) {
-                    DAWN_TRY(GetDevice()->ValidateObject(pipeline));
-
-                    DAWN_INVALID_IF(pipeline->GetAttachmentState() != mAttachmentState.Get(),
-                                    "Attachment state of %s is not compatible with %s.\n"
-                                    "%s expects an attachment state of %s.\n"
-                                    "%s has an attachment state of %s.",
-                                    pipeline, this, this, mAttachmentState.Get(), pipeline,
-                                    pipeline->GetAttachmentState());
-
-                    DAWN_INVALID_IF(pipeline->WritesDepth() && mDepthReadOnly,
-                                    "%s writes depth while %s's depthReadOnly is true", pipeline,
-                                    this);
-
-                    DAWN_INVALID_IF(pipeline->WritesStencil() && mStencilReadOnly,
-                                    "%s writes stencil while %s's stencilReadOnly is true",
-                                    pipeline, this);
-                }
-
-                mCommandBufferState.SetRenderPipeline(pipeline);
-
-                SetRenderPipelineCmd* cmd =
-                    allocator->Allocate<SetRenderPipelineCmd>(Command::SetRenderPipeline);
-                cmd->pipeline = pipeline;
-
-                return {};
-            },
-            "encoding %s.SetPipeline(%s).", this, pipeline);
-    }
-
-    void RenderEncoderBase::APISetIndexBuffer(BufferBase* buffer,
-                                              wgpu::IndexFormat format,
-                                              uint64_t offset,
-                                              uint64_t size) {
-        mEncodingContext->TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                if (IsValidationEnabled()) {
-                    DAWN_TRY(GetDevice()->ValidateObject(buffer));
-                    DAWN_TRY(ValidateCanUseAs(buffer, wgpu::BufferUsage::Index));
-
-                    DAWN_TRY(ValidateIndexFormat(format));
-
-                    DAWN_INVALID_IF(format == wgpu::IndexFormat::Undefined,
-                                    "Index format must be specified");
-
-                    DAWN_INVALID_IF(offset % uint64_t(IndexFormatSize(format)) != 0,
-                                    "Index buffer offset (%u) is not a multiple of the size (%u) "
+                    DAWN_INVALID_IF(size > remainingSize,
+                                    "Vertex buffer range (offset: %u, size: %u) doesn't fit in "
+                                    "the size (%u) "
                                     "of %s.",
-                                    offset, IndexFormatSize(format), format);
-
-                    uint64_t bufferSize = buffer->GetSize();
-                    DAWN_INVALID_IF(offset > bufferSize,
-                                    "Index buffer offset (%u) is larger than the size (%u) of %s.",
-                                    offset, bufferSize, buffer);
-
-                    uint64_t remainingSize = bufferSize - offset;
-
-                    if (size == wgpu::kWholeSize) {
-                        size = remainingSize;
-                    } else {
-                        DAWN_INVALID_IF(size > remainingSize,
-                                        "Index buffer range (offset: %u, size: %u) doesn't fit in "
-                                        "the size (%u) of "
-                                        "%s.",
-                                        offset, size, bufferSize, buffer);
-                    }
-                } else {
-                    if (size == wgpu::kWholeSize) {
-                        DAWN_ASSERT(buffer->GetSize() >= offset);
-                        size = buffer->GetSize() - offset;
-                    }
+                                    offset, size, bufferSize, buffer);
                 }
-
-                mCommandBufferState.SetIndexBuffer(format, size);
-
-                SetIndexBufferCmd* cmd =
-                    allocator->Allocate<SetIndexBufferCmd>(Command::SetIndexBuffer);
-                cmd->buffer = buffer;
-                cmd->format = format;
-                cmd->offset = offset;
-                cmd->size = size;
-
-                mUsageTracker.BufferUsedAs(buffer, wgpu::BufferUsage::Index);
-
-                return {};
-            },
-            "encoding %s.SetIndexBuffer(%s, %s, %u, %u).", this, buffer, format, offset, size);
-    }
-
-    void RenderEncoderBase::APISetVertexBuffer(uint32_t slot,
-                                               BufferBase* buffer,
-                                               uint64_t offset,
-                                               uint64_t size) {
-        mEncodingContext->TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                if (IsValidationEnabled()) {
-                    DAWN_TRY(GetDevice()->ValidateObject(buffer));
-                    DAWN_TRY(ValidateCanUseAs(buffer, wgpu::BufferUsage::Vertex));
-
-                    DAWN_INVALID_IF(slot >= kMaxVertexBuffers,
-                                    "Vertex buffer slot (%u) is larger the maximum (%u)", slot,
-                                    kMaxVertexBuffers - 1);
-
-                    DAWN_INVALID_IF(offset % 4 != 0,
-                                    "Vertex buffer offset (%u) is not a multiple of 4", offset);
-
-                    uint64_t bufferSize = buffer->GetSize();
-                    DAWN_INVALID_IF(offset > bufferSize,
-                                    "Vertex buffer offset (%u) is larger than the size (%u) of %s.",
-                                    offset, bufferSize, buffer);
-
-                    uint64_t remainingSize = bufferSize - offset;
-
-                    if (size == wgpu::kWholeSize) {
-                        size = remainingSize;
-                    } else {
-                        DAWN_INVALID_IF(size > remainingSize,
-                                        "Vertex buffer range (offset: %u, size: %u) doesn't fit in "
-                                        "the size (%u) "
-                                        "of %s.",
-                                        offset, size, bufferSize, buffer);
-                    }
-                } else {
-                    if (size == wgpu::kWholeSize) {
-                        DAWN_ASSERT(buffer->GetSize() >= offset);
-                        size = buffer->GetSize() - offset;
-                    }
+            } else {
+                if (size == wgpu::kWholeSize) {
+                    DAWN_ASSERT(buffer->GetSize() >= offset);
+                    size = buffer->GetSize() - offset;
                 }
+            }
 
-                mCommandBufferState.SetVertexBuffer(VertexBufferSlot(uint8_t(slot)), size);
+            mCommandBufferState.SetVertexBuffer(VertexBufferSlot(uint8_t(slot)), size);
 
-                SetVertexBufferCmd* cmd =
-                    allocator->Allocate<SetVertexBufferCmd>(Command::SetVertexBuffer);
-                cmd->slot = VertexBufferSlot(static_cast<uint8_t>(slot));
-                cmd->buffer = buffer;
-                cmd->offset = offset;
-                cmd->size = size;
+            SetVertexBufferCmd* cmd =
+                allocator->Allocate<SetVertexBufferCmd>(Command::SetVertexBuffer);
+            cmd->slot = VertexBufferSlot(static_cast<uint8_t>(slot));
+            cmd->buffer = buffer;
+            cmd->offset = offset;
+            cmd->size = size;
 
-                mUsageTracker.BufferUsedAs(buffer, wgpu::BufferUsage::Vertex);
+            mUsageTracker.BufferUsedAs(buffer, wgpu::BufferUsage::Vertex);
 
-                return {};
-            },
-            "encoding %s.SetVertexBuffer(%u, %s, %u, %u).", this, slot, buffer, offset, size);
-    }
+            return {};
+        },
+        "encoding %s.SetVertexBuffer(%u, %s, %u, %u).", this, slot, buffer, offset, size);
+}
 
-    void RenderEncoderBase::APISetBindGroup(uint32_t groupIndexIn,
-                                            BindGroupBase* group,
-                                            uint32_t dynamicOffsetCount,
-                                            const uint32_t* dynamicOffsets) {
-        mEncodingContext->TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                BindGroupIndex groupIndex(groupIndexIn);
+void RenderEncoderBase::APISetBindGroup(uint32_t groupIndexIn,
+                                        BindGroupBase* group,
+                                        uint32_t dynamicOffsetCount,
+                                        const uint32_t* dynamicOffsets) {
+    mEncodingContext->TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            BindGroupIndex groupIndex(groupIndexIn);
 
-                if (IsValidationEnabled()) {
-                    DAWN_TRY(ValidateSetBindGroup(groupIndex, group, dynamicOffsetCount,
-                                                  dynamicOffsets));
-                }
+            if (IsValidationEnabled()) {
+                DAWN_TRY(
+                    ValidateSetBindGroup(groupIndex, group, dynamicOffsetCount, dynamicOffsets));
+            }
 
-                RecordSetBindGroup(allocator, groupIndex, group, dynamicOffsetCount,
-                                   dynamicOffsets);
-                mCommandBufferState.SetBindGroup(groupIndex, group, dynamicOffsetCount,
-                                                 dynamicOffsets);
-                mUsageTracker.AddBindGroup(group);
+            RecordSetBindGroup(allocator, groupIndex, group, dynamicOffsetCount, dynamicOffsets);
+            mCommandBufferState.SetBindGroup(groupIndex, group, dynamicOffsetCount, dynamicOffsets);
+            mUsageTracker.AddBindGroup(group);
 
-                return {};
-            },
-            // TODO(dawn:1190): For unknown reasons formatting this message fails if `group` is used
-            // as a string value in the message. This despite the exact same code working as
-            // intended in ComputePassEncoder::APISetBindGroup. Replacing with a static [BindGroup]
-            // until the reason for the failure can be determined.
-            "encoding %s.SetBindGroup(%u, [BindGroup], %u, ...).", this, groupIndexIn,
-            dynamicOffsetCount);
-    }
+            return {};
+        },
+        // TODO(dawn:1190): For unknown reasons formatting this message fails if `group` is used
+        // as a string value in the message. This despite the exact same code working as
+        // intended in ComputePassEncoder::APISetBindGroup. Replacing with a static [BindGroup]
+        // until the reason for the failure can be determined.
+        "encoding %s.SetBindGroup(%u, [BindGroup], %u, ...).", this, groupIndexIn,
+        dynamicOffsetCount);
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/RenderEncoderBase.h b/src/dawn/native/RenderEncoderBase.h
index 18ac91a..0bdcc4d 100644
--- a/src/dawn/native/RenderEncoderBase.h
+++ b/src/dawn/native/RenderEncoderBase.h
@@ -24,63 +24,63 @@
 
 namespace dawn::native {
 
-    class RenderEncoderBase : public ProgrammableEncoder {
-      public:
-        RenderEncoderBase(DeviceBase* device,
-                          const char* label,
-                          EncodingContext* encodingContext,
-                          Ref<AttachmentState> attachmentState,
-                          bool depthReadOnly,
-                          bool stencilReadOnly);
+class RenderEncoderBase : public ProgrammableEncoder {
+  public:
+    RenderEncoderBase(DeviceBase* device,
+                      const char* label,
+                      EncodingContext* encodingContext,
+                      Ref<AttachmentState> attachmentState,
+                      bool depthReadOnly,
+                      bool stencilReadOnly);
 
-        void APIDraw(uint32_t vertexCount,
-                     uint32_t instanceCount = 1,
-                     uint32_t firstVertex = 0,
-                     uint32_t firstInstance = 0);
-        void APIDrawIndexed(uint32_t vertexCount,
-                            uint32_t instanceCount,
-                            uint32_t firstIndex,
-                            int32_t baseVertex,
-                            uint32_t firstInstance);
+    void APIDraw(uint32_t vertexCount,
+                 uint32_t instanceCount = 1,
+                 uint32_t firstVertex = 0,
+                 uint32_t firstInstance = 0);
+    void APIDrawIndexed(uint32_t vertexCount,
+                        uint32_t instanceCount,
+                        uint32_t firstIndex,
+                        int32_t baseVertex,
+                        uint32_t firstInstance);
 
-        void APIDrawIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
-        void APIDrawIndexedIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
+    void APIDrawIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
+    void APIDrawIndexedIndirect(BufferBase* indirectBuffer, uint64_t indirectOffset);
 
-        void APISetPipeline(RenderPipelineBase* pipeline);
+    void APISetPipeline(RenderPipelineBase* pipeline);
 
-        void APISetVertexBuffer(uint32_t slot, BufferBase* buffer, uint64_t offset, uint64_t size);
-        void APISetIndexBuffer(BufferBase* buffer,
-                               wgpu::IndexFormat format,
-                               uint64_t offset,
-                               uint64_t size);
+    void APISetVertexBuffer(uint32_t slot, BufferBase* buffer, uint64_t offset, uint64_t size);
+    void APISetIndexBuffer(BufferBase* buffer,
+                           wgpu::IndexFormat format,
+                           uint64_t offset,
+                           uint64_t size);
 
-        void APISetBindGroup(uint32_t groupIndex,
-                             BindGroupBase* group,
-                             uint32_t dynamicOffsetCount = 0,
-                             const uint32_t* dynamicOffsets = nullptr);
+    void APISetBindGroup(uint32_t groupIndex,
+                         BindGroupBase* group,
+                         uint32_t dynamicOffsetCount = 0,
+                         const uint32_t* dynamicOffsets = nullptr);
 
-        const AttachmentState* GetAttachmentState() const;
-        bool IsDepthReadOnly() const;
-        bool IsStencilReadOnly() const;
-        Ref<AttachmentState> AcquireAttachmentState();
+    const AttachmentState* GetAttachmentState() const;
+    bool IsDepthReadOnly() const;
+    bool IsStencilReadOnly() const;
+    Ref<AttachmentState> AcquireAttachmentState();
 
-      protected:
-        // Construct an "error" render encoder base.
-        RenderEncoderBase(DeviceBase* device, EncodingContext* encodingContext, ErrorTag errorTag);
+  protected:
+    // Construct an "error" render encoder base.
+    RenderEncoderBase(DeviceBase* device, EncodingContext* encodingContext, ErrorTag errorTag);
 
-        void DestroyImpl() override;
+    void DestroyImpl() override;
 
-        CommandBufferStateTracker mCommandBufferState;
-        RenderPassResourceUsageTracker mUsageTracker;
-        IndirectDrawMetadata mIndirectDrawMetadata;
+    CommandBufferStateTracker mCommandBufferState;
+    RenderPassResourceUsageTracker mUsageTracker;
+    IndirectDrawMetadata mIndirectDrawMetadata;
 
-      private:
-        Ref<AttachmentState> mAttachmentState;
-        const bool mDisableBaseVertex;
-        const bool mDisableBaseInstance;
-        bool mDepthReadOnly = false;
-        bool mStencilReadOnly = false;
-    };
+  private:
+    Ref<AttachmentState> mAttachmentState;
+    const bool mDisableBaseVertex;
+    const bool mDisableBaseInstance;
+    bool mDepthReadOnly = false;
+    bool mStencilReadOnly = false;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/RenderPassEncoder.cpp b/src/dawn/native/RenderPassEncoder.cpp
index 1d35b48..716ce97 100644
--- a/src/dawn/native/RenderPassEncoder.cpp
+++ b/src/dawn/native/RenderPassEncoder.cpp
@@ -30,396 +30,387 @@
 #include "dawn/native/RenderPipeline.h"
 
 namespace dawn::native {
-    namespace {
+namespace {
 
-        // Check the query at queryIndex is unavailable, otherwise it cannot be written.
-        MaybeError ValidateQueryIndexOverwrite(QuerySetBase* querySet,
-                                               uint32_t queryIndex,
-                                               const QueryAvailabilityMap& queryAvailabilityMap) {
-            auto it = queryAvailabilityMap.find(querySet);
-            DAWN_INVALID_IF(it != queryAvailabilityMap.end() && it->second[queryIndex],
-                            "Query index %u of %s is written to twice in a render pass.",
-                            queryIndex, querySet);
+// Check the query at queryIndex is unavailable, otherwise it cannot be written.
+MaybeError ValidateQueryIndexOverwrite(QuerySetBase* querySet,
+                                       uint32_t queryIndex,
+                                       const QueryAvailabilityMap& queryAvailabilityMap) {
+    auto it = queryAvailabilityMap.find(querySet);
+    DAWN_INVALID_IF(it != queryAvailabilityMap.end() && it->second[queryIndex],
+                    "Query index %u of %s is written to twice in a render pass.", queryIndex,
+                    querySet);
+
+    return {};
+}
+
+}  // namespace
+
+// The usage tracker is passed in here, because it is prepopulated with usages from the
+// BeginRenderPassCmd. If we had RenderPassEncoder responsible for recording the
+// command, then this wouldn't be necessary.
+RenderPassEncoder::RenderPassEncoder(DeviceBase* device,
+                                     const RenderPassDescriptor* descriptor,
+                                     CommandEncoder* commandEncoder,
+                                     EncodingContext* encodingContext,
+                                     RenderPassResourceUsageTracker usageTracker,
+                                     Ref<AttachmentState> attachmentState,
+                                     std::vector<TimestampWrite> timestampWritesAtEnd,
+                                     uint32_t renderTargetWidth,
+                                     uint32_t renderTargetHeight,
+                                     bool depthReadOnly,
+                                     bool stencilReadOnly)
+    : RenderEncoderBase(device,
+                        descriptor->label,
+                        encodingContext,
+                        std::move(attachmentState),
+                        depthReadOnly,
+                        stencilReadOnly),
+      mCommandEncoder(commandEncoder),
+      mRenderTargetWidth(renderTargetWidth),
+      mRenderTargetHeight(renderTargetHeight),
+      mOcclusionQuerySet(descriptor->occlusionQuerySet),
+      mTimestampWritesAtEnd(std::move(timestampWritesAtEnd)) {
+    mUsageTracker = std::move(usageTracker);
+    TrackInDevice();
+}
+
+// static
+Ref<RenderPassEncoder> RenderPassEncoder::Create(DeviceBase* device,
+                                                 const RenderPassDescriptor* descriptor,
+                                                 CommandEncoder* commandEncoder,
+                                                 EncodingContext* encodingContext,
+                                                 RenderPassResourceUsageTracker usageTracker,
+                                                 Ref<AttachmentState> attachmentState,
+                                                 std::vector<TimestampWrite> timestampWritesAtEnd,
+                                                 uint32_t renderTargetWidth,
+                                                 uint32_t renderTargetHeight,
+                                                 bool depthReadOnly,
+                                                 bool stencilReadOnly) {
+    return AcquireRef(new RenderPassEncoder(device, descriptor, commandEncoder, encodingContext,
+                                            std::move(usageTracker), std::move(attachmentState),
+                                            std::move(timestampWritesAtEnd), renderTargetWidth,
+                                            renderTargetHeight, depthReadOnly, stencilReadOnly));
+}
+
+RenderPassEncoder::RenderPassEncoder(DeviceBase* device,
+                                     CommandEncoder* commandEncoder,
+                                     EncodingContext* encodingContext,
+                                     ErrorTag errorTag)
+    : RenderEncoderBase(device, encodingContext, errorTag), mCommandEncoder(commandEncoder) {}
+
+// static
+Ref<RenderPassEncoder> RenderPassEncoder::MakeError(DeviceBase* device,
+                                                    CommandEncoder* commandEncoder,
+                                                    EncodingContext* encodingContext) {
+    return AcquireRef(
+        new RenderPassEncoder(device, commandEncoder, encodingContext, ObjectBase::kError));
+}
+
+void RenderPassEncoder::DestroyImpl() {
+    RenderEncoderBase::DestroyImpl();
+    // Ensure that the pass has exited. This is done for passes only since validation requires
+    // they exit before destruction while bundles do not.
+    mEncodingContext->EnsurePassExited(this);
+}
+
+ObjectType RenderPassEncoder::GetType() const {
+    return ObjectType::RenderPassEncoder;
+}
+
+void RenderPassEncoder::TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex) {
+    DAWN_ASSERT(querySet != nullptr);
+
+    // Track the query availability with true on render pass for rewrite validation and query
+    // reset on render pass on Vulkan
+    mUsageTracker.TrackQueryAvailability(querySet, queryIndex);
+
+    // Track it again on command encoder for zero-initializing when resolving unused queries.
+    mCommandEncoder->TrackQueryAvailability(querySet, queryIndex);
+}
+
+void RenderPassEncoder::APIEnd() {
+    mEncodingContext->TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            if (IsValidationEnabled()) {
+                DAWN_TRY(ValidateProgrammableEncoderEnd());
+
+                DAWN_INVALID_IF(
+                    mOcclusionQueryActive,
+                    "Render pass %s ended with incomplete occlusion query index %u of %s.", this,
+                    mCurrentOcclusionQueryIndex, mOcclusionQuerySet.Get());
+            }
+
+            EndRenderPassCmd* cmd = allocator->Allocate<EndRenderPassCmd>(Command::EndRenderPass);
+            // The query availability has already been updated at the beginning of render
+            // pass, and no need to do update here.
+            cmd->timestampWrites = std::move(mTimestampWritesAtEnd);
+
+            DAWN_TRY(mEncodingContext->ExitRenderPass(this, std::move(mUsageTracker),
+                                                      mCommandEncoder.Get(),
+                                                      std::move(mIndirectDrawMetadata)));
+            return {};
+        },
+        "encoding %s.End().", this);
+}
+
+void RenderPassEncoder::APIEndPass() {
+    GetDevice()->EmitDeprecationWarning("endPass() has been deprecated. Use end() instead.");
+    APIEnd();
+}
+
+void RenderPassEncoder::APISetStencilReference(uint32_t reference) {
+    mEncodingContext->TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            SetStencilReferenceCmd* cmd =
+                allocator->Allocate<SetStencilReferenceCmd>(Command::SetStencilReference);
+            cmd->reference = reference;
 
             return {};
-        }
+        },
+        "encoding %s.SetStencilReference(%u).", this, reference);
+}
 
-    }  // namespace
+void RenderPassEncoder::APISetBlendConstant(const Color* color) {
+    mEncodingContext->TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            SetBlendConstantCmd* cmd =
+                allocator->Allocate<SetBlendConstantCmd>(Command::SetBlendConstant);
+            cmd->color = *color;
 
-    // The usage tracker is passed in here, because it is prepopulated with usages from the
-    // BeginRenderPassCmd. If we had RenderPassEncoder responsible for recording the
-    // command, then this wouldn't be necessary.
-    RenderPassEncoder::RenderPassEncoder(DeviceBase* device,
-                                         const RenderPassDescriptor* descriptor,
-                                         CommandEncoder* commandEncoder,
-                                         EncodingContext* encodingContext,
-                                         RenderPassResourceUsageTracker usageTracker,
-                                         Ref<AttachmentState> attachmentState,
-                                         std::vector<TimestampWrite> timestampWritesAtEnd,
-                                         uint32_t renderTargetWidth,
-                                         uint32_t renderTargetHeight,
-                                         bool depthReadOnly,
-                                         bool stencilReadOnly)
-        : RenderEncoderBase(device,
-                            descriptor->label,
-                            encodingContext,
-                            std::move(attachmentState),
-                            depthReadOnly,
-                            stencilReadOnly),
-          mCommandEncoder(commandEncoder),
-          mRenderTargetWidth(renderTargetWidth),
-          mRenderTargetHeight(renderTargetHeight),
-          mOcclusionQuerySet(descriptor->occlusionQuerySet),
-          mTimestampWritesAtEnd(std::move(timestampWritesAtEnd)) {
-        mUsageTracker = std::move(usageTracker);
-        TrackInDevice();
-    }
+            return {};
+        },
+        "encoding %s.SetBlendConstant(%s).", this, color);
+}
 
-    // static
-    Ref<RenderPassEncoder> RenderPassEncoder::Create(
-        DeviceBase* device,
-        const RenderPassDescriptor* descriptor,
-        CommandEncoder* commandEncoder,
-        EncodingContext* encodingContext,
-        RenderPassResourceUsageTracker usageTracker,
-        Ref<AttachmentState> attachmentState,
-        std::vector<TimestampWrite> timestampWritesAtEnd,
-        uint32_t renderTargetWidth,
-        uint32_t renderTargetHeight,
-        bool depthReadOnly,
-        bool stencilReadOnly) {
-        return AcquireRef(new RenderPassEncoder(
-            device, descriptor, commandEncoder, encodingContext, std::move(usageTracker),
-            std::move(attachmentState), std::move(timestampWritesAtEnd), renderTargetWidth,
-            renderTargetHeight, depthReadOnly, stencilReadOnly));
-    }
+void RenderPassEncoder::APISetViewport(float x,
+                                       float y,
+                                       float width,
+                                       float height,
+                                       float minDepth,
+                                       float maxDepth) {
+    mEncodingContext->TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            if (IsValidationEnabled()) {
+                DAWN_INVALID_IF((isnan(x) || isnan(y) || isnan(width) || isnan(height) ||
+                                 isnan(minDepth) || isnan(maxDepth)),
+                                "A parameter of the viewport (x: %f, y: %f, width: %f, height: %f, "
+                                "minDepth: %f, maxDepth: %f) is NaN.",
+                                x, y, width, height, minDepth, maxDepth);
 
-    RenderPassEncoder::RenderPassEncoder(DeviceBase* device,
-                                         CommandEncoder* commandEncoder,
-                                         EncodingContext* encodingContext,
-                                         ErrorTag errorTag)
-        : RenderEncoderBase(device, encodingContext, errorTag), mCommandEncoder(commandEncoder) {
-    }
+                DAWN_INVALID_IF(
+                    x < 0 || y < 0 || width < 0 || height < 0,
+                    "Viewport bounds (x: %f, y: %f, width: %f, height: %f) contains a negative "
+                    "value.",
+                    x, y, width, height);
 
-    // static
-    Ref<RenderPassEncoder> RenderPassEncoder::MakeError(DeviceBase* device,
-                                                        CommandEncoder* commandEncoder,
-                                                        EncodingContext* encodingContext) {
-        return AcquireRef(
-            new RenderPassEncoder(device, commandEncoder, encodingContext, ObjectBase::kError));
-    }
+                DAWN_INVALID_IF(
+                    x + width > mRenderTargetWidth || y + height > mRenderTargetHeight,
+                    "Viewport bounds (x: %f, y: %f, width: %f, height: %f) are not contained "
+                    "in "
+                    "the render target dimensions (%u x %u).",
+                    x, y, width, height, mRenderTargetWidth, mRenderTargetHeight);
 
-    void RenderPassEncoder::DestroyImpl() {
-        RenderEncoderBase::DestroyImpl();
-        // Ensure that the pass has exited. This is done for passes only since validation requires
-        // they exit before destruction while bundles do not.
-        mEncodingContext->EnsurePassExited(this);
-    }
+                // Check for depths being in [0, 1] and min <= max in 3 checks instead of 5.
+                DAWN_INVALID_IF(minDepth < 0 || minDepth > maxDepth || maxDepth > 1,
+                                "Viewport minDepth (%f) and maxDepth (%f) are not in [0, 1] or "
+                                "minDepth was "
+                                "greater than maxDepth.",
+                                minDepth, maxDepth);
+            }
 
-    ObjectType RenderPassEncoder::GetType() const {
-        return ObjectType::RenderPassEncoder;
-    }
+            SetViewportCmd* cmd = allocator->Allocate<SetViewportCmd>(Command::SetViewport);
+            cmd->x = x;
+            cmd->y = y;
+            cmd->width = width;
+            cmd->height = height;
+            cmd->minDepth = minDepth;
+            cmd->maxDepth = maxDepth;
 
-    void RenderPassEncoder::TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex) {
-        DAWN_ASSERT(querySet != nullptr);
+            return {};
+        },
+        "encoding %s.SetViewport(%f, %f, %f, %f, %f, %f).", this, x, y, width, height, minDepth,
+        maxDepth);
+}
 
-        // Track the query availability with true on render pass for rewrite validation and query
-        // reset on render pass on Vulkan
-        mUsageTracker.TrackQueryAvailability(querySet, queryIndex);
+void RenderPassEncoder::APISetScissorRect(uint32_t x, uint32_t y, uint32_t width, uint32_t height) {
+    mEncodingContext->TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            if (IsValidationEnabled()) {
+                DAWN_INVALID_IF(
+                    width > mRenderTargetWidth || height > mRenderTargetHeight ||
+                        x > mRenderTargetWidth - width || y > mRenderTargetHeight - height,
+                    "Scissor rect (x: %u, y: %u, width: %u, height: %u) is not contained in "
+                    "the render target dimensions (%u x %u).",
+                    x, y, width, height, mRenderTargetWidth, mRenderTargetHeight);
+            }
 
-        // Track it again on command encoder for zero-initializing when resolving unused queries.
-        mCommandEncoder->TrackQueryAvailability(querySet, queryIndex);
-    }
+            SetScissorRectCmd* cmd =
+                allocator->Allocate<SetScissorRectCmd>(Command::SetScissorRect);
+            cmd->x = x;
+            cmd->y = y;
+            cmd->width = width;
+            cmd->height = height;
 
-    void RenderPassEncoder::APIEnd() {
-        mEncodingContext->TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                if (IsValidationEnabled()) {
-                    DAWN_TRY(ValidateProgrammableEncoderEnd());
+            return {};
+        },
+        "encoding %s.SetScissorRect(%u, %u, %u, %u).", this, x, y, width, height);
+}
 
-                    DAWN_INVALID_IF(
-                        mOcclusionQueryActive,
-                        "Render pass %s ended with incomplete occlusion query index %u of %s.",
-                        this, mCurrentOcclusionQueryIndex, mOcclusionQuerySet.Get());
-                }
-
-                EndRenderPassCmd* cmd =
-                    allocator->Allocate<EndRenderPassCmd>(Command::EndRenderPass);
-                // The query availability has already been updated at the beginning of render
-                // pass, and no need to do update here.
-                cmd->timestampWrites = std::move(mTimestampWritesAtEnd);
-
-                DAWN_TRY(mEncodingContext->ExitRenderPass(this, std::move(mUsageTracker),
-                                                          mCommandEncoder.Get(),
-                                                          std::move(mIndirectDrawMetadata)));
-                return {};
-            },
-            "encoding %s.End().", this);
-    }
-
-    void RenderPassEncoder::APIEndPass() {
-        GetDevice()->EmitDeprecationWarning("endPass() has been deprecated. Use end() instead.");
-        APIEnd();
-    }
-
-    void RenderPassEncoder::APISetStencilReference(uint32_t reference) {
-        mEncodingContext->TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                SetStencilReferenceCmd* cmd =
-                    allocator->Allocate<SetStencilReferenceCmd>(Command::SetStencilReference);
-                cmd->reference = reference;
-
-                return {};
-            },
-            "encoding %s.SetStencilReference(%u).", this, reference);
-    }
-
-    void RenderPassEncoder::APISetBlendConstant(const Color* color) {
-        mEncodingContext->TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                SetBlendConstantCmd* cmd =
-                    allocator->Allocate<SetBlendConstantCmd>(Command::SetBlendConstant);
-                cmd->color = *color;
-
-                return {};
-            },
-            "encoding %s.SetBlendConstant(%s).", this, color);
-    }
-
-    void RenderPassEncoder::APISetViewport(float x,
-                                           float y,
-                                           float width,
-                                           float height,
-                                           float minDepth,
-                                           float maxDepth) {
-        mEncodingContext->TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                if (IsValidationEnabled()) {
-                    DAWN_INVALID_IF(
-                        (isnan(x) || isnan(y) || isnan(width) || isnan(height) || isnan(minDepth) ||
-                         isnan(maxDepth)),
-                        "A parameter of the viewport (x: %f, y: %f, width: %f, height: %f, "
-                        "minDepth: %f, maxDepth: %f) is NaN.",
-                        x, y, width, height, minDepth, maxDepth);
-
-                    DAWN_INVALID_IF(
-                        x < 0 || y < 0 || width < 0 || height < 0,
-                        "Viewport bounds (x: %f, y: %f, width: %f, height: %f) contains a negative "
-                        "value.",
-                        x, y, width, height);
-
-                    DAWN_INVALID_IF(
-                        x + width > mRenderTargetWidth || y + height > mRenderTargetHeight,
-                        "Viewport bounds (x: %f, y: %f, width: %f, height: %f) are not contained "
-                        "in "
-                        "the render target dimensions (%u x %u).",
-                        x, y, width, height, mRenderTargetWidth, mRenderTargetHeight);
-
-                    // Check for depths being in [0, 1] and min <= max in 3 checks instead of 5.
-                    DAWN_INVALID_IF(minDepth < 0 || minDepth > maxDepth || maxDepth > 1,
-                                    "Viewport minDepth (%f) and maxDepth (%f) are not in [0, 1] or "
-                                    "minDepth was "
-                                    "greater than maxDepth.",
-                                    minDepth, maxDepth);
-                }
-
-                SetViewportCmd* cmd = allocator->Allocate<SetViewportCmd>(Command::SetViewport);
-                cmd->x = x;
-                cmd->y = y;
-                cmd->width = width;
-                cmd->height = height;
-                cmd->minDepth = minDepth;
-                cmd->maxDepth = maxDepth;
-
-                return {};
-            },
-            "encoding %s.SetViewport(%f, %f, %f, %f, %f, %f).", this, x, y, width, height, minDepth,
-            maxDepth);
-    }
-
-    void RenderPassEncoder::APISetScissorRect(uint32_t x,
-                                              uint32_t y,
-                                              uint32_t width,
-                                              uint32_t height) {
-        mEncodingContext->TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                if (IsValidationEnabled()) {
-                    DAWN_INVALID_IF(
-                        width > mRenderTargetWidth || height > mRenderTargetHeight ||
-                            x > mRenderTargetWidth - width || y > mRenderTargetHeight - height,
-                        "Scissor rect (x: %u, y: %u, width: %u, height: %u) is not contained in "
-                        "the render target dimensions (%u x %u).",
-                        x, y, width, height, mRenderTargetWidth, mRenderTargetHeight);
-                }
-
-                SetScissorRectCmd* cmd =
-                    allocator->Allocate<SetScissorRectCmd>(Command::SetScissorRect);
-                cmd->x = x;
-                cmd->y = y;
-                cmd->width = width;
-                cmd->height = height;
-
-                return {};
-            },
-            "encoding %s.SetScissorRect(%u, %u, %u, %u).", this, x, y, width, height);
-    }
-
-    void RenderPassEncoder::APIExecuteBundles(uint32_t count,
-                                              RenderBundleBase* const* renderBundles) {
-        mEncodingContext->TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                if (IsValidationEnabled()) {
-                    const AttachmentState* attachmentState = GetAttachmentState();
-                    bool depthReadOnlyInPass = IsDepthReadOnly();
-                    bool stencilReadOnlyInPass = IsStencilReadOnly();
-                    for (uint32_t i = 0; i < count; ++i) {
-                        DAWN_TRY(GetDevice()->ValidateObject(renderBundles[i]));
-
-                        DAWN_INVALID_IF(attachmentState != renderBundles[i]->GetAttachmentState(),
-                                        "Attachment state of renderBundles[%i] (%s) is not "
-                                        "compatible with %s.\n"
-                                        "%s expects an attachment state of %s.\n"
-                                        "renderBundles[%i] (%s) has an attachment state of %s.",
-                                        i, renderBundles[i], this, this, attachmentState, i,
-                                        renderBundles[i], renderBundles[i]->GetAttachmentState());
-
-                        bool depthReadOnlyInBundle = renderBundles[i]->IsDepthReadOnly();
-                        DAWN_INVALID_IF(
-                            depthReadOnlyInPass && !depthReadOnlyInBundle,
-                            "DepthReadOnly (%u) of renderBundle[%i] (%s) is not compatible "
-                            "with DepthReadOnly (%u) of %s.",
-                            depthReadOnlyInBundle, i, renderBundles[i], depthReadOnlyInPass, this);
-
-                        bool stencilReadOnlyInBundle = renderBundles[i]->IsStencilReadOnly();
-                        DAWN_INVALID_IF(stencilReadOnlyInPass && !stencilReadOnlyInBundle,
-                                        "StencilReadOnly (%u) of renderBundle[%i] (%s) is not "
-                                        "compatible with StencilReadOnly (%u) of %s.",
-                                        stencilReadOnlyInBundle, i, renderBundles[i],
-                                        stencilReadOnlyInPass, this);
-                    }
-                }
-
-                mCommandBufferState = CommandBufferStateTracker{};
-
-                ExecuteBundlesCmd* cmd =
-                    allocator->Allocate<ExecuteBundlesCmd>(Command::ExecuteBundles);
-                cmd->count = count;
-
-                Ref<RenderBundleBase>* bundles =
-                    allocator->AllocateData<Ref<RenderBundleBase>>(count);
+void RenderPassEncoder::APIExecuteBundles(uint32_t count, RenderBundleBase* const* renderBundles) {
+    mEncodingContext->TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            if (IsValidationEnabled()) {
+                const AttachmentState* attachmentState = GetAttachmentState();
+                bool depthReadOnlyInPass = IsDepthReadOnly();
+                bool stencilReadOnlyInPass = IsStencilReadOnly();
                 for (uint32_t i = 0; i < count; ++i) {
-                    bundles[i] = renderBundles[i];
+                    DAWN_TRY(GetDevice()->ValidateObject(renderBundles[i]));
 
-                    const RenderPassResourceUsage& usages = bundles[i]->GetResourceUsage();
-                    for (uint32_t i = 0; i < usages.buffers.size(); ++i) {
-                        mUsageTracker.BufferUsedAs(usages.buffers[i], usages.bufferUsages[i]);
-                    }
+                    DAWN_INVALID_IF(attachmentState != renderBundles[i]->GetAttachmentState(),
+                                    "Attachment state of renderBundles[%i] (%s) is not "
+                                    "compatible with %s.\n"
+                                    "%s expects an attachment state of %s.\n"
+                                    "renderBundles[%i] (%s) has an attachment state of %s.",
+                                    i, renderBundles[i], this, this, attachmentState, i,
+                                    renderBundles[i], renderBundles[i]->GetAttachmentState());
 
-                    for (uint32_t i = 0; i < usages.textures.size(); ++i) {
-                        mUsageTracker.AddRenderBundleTextureUsage(usages.textures[i],
-                                                                  usages.textureUsages[i]);
-                    }
+                    bool depthReadOnlyInBundle = renderBundles[i]->IsDepthReadOnly();
+                    DAWN_INVALID_IF(depthReadOnlyInPass && !depthReadOnlyInBundle,
+                                    "DepthReadOnly (%u) of renderBundle[%i] (%s) is not compatible "
+                                    "with DepthReadOnly (%u) of %s.",
+                                    depthReadOnlyInBundle, i, renderBundles[i], depthReadOnlyInPass,
+                                    this);
 
-                    if (IsValidationEnabled()) {
-                        mIndirectDrawMetadata.AddBundle(renderBundles[i]);
-                    }
+                    bool stencilReadOnlyInBundle = renderBundles[i]->IsStencilReadOnly();
+                    DAWN_INVALID_IF(stencilReadOnlyInPass && !stencilReadOnlyInBundle,
+                                    "StencilReadOnly (%u) of renderBundle[%i] (%s) is not "
+                                    "compatible with StencilReadOnly (%u) of %s.",
+                                    stencilReadOnlyInBundle, i, renderBundles[i],
+                                    stencilReadOnlyInPass, this);
+                }
+            }
+
+            mCommandBufferState = CommandBufferStateTracker{};
+
+            ExecuteBundlesCmd* cmd =
+                allocator->Allocate<ExecuteBundlesCmd>(Command::ExecuteBundles);
+            cmd->count = count;
+
+            Ref<RenderBundleBase>* bundles = allocator->AllocateData<Ref<RenderBundleBase>>(count);
+            for (uint32_t i = 0; i < count; ++i) {
+                bundles[i] = renderBundles[i];
+
+                const RenderPassResourceUsage& usages = bundles[i]->GetResourceUsage();
+                for (uint32_t i = 0; i < usages.buffers.size(); ++i) {
+                    mUsageTracker.BufferUsedAs(usages.buffers[i], usages.bufferUsages[i]);
                 }
 
-                return {};
-            },
-            "encoding %s.ExecuteBundles(%u, ...).", this, count);
-    }
+                for (uint32_t i = 0; i < usages.textures.size(); ++i) {
+                    mUsageTracker.AddRenderBundleTextureUsage(usages.textures[i],
+                                                              usages.textureUsages[i]);
+                }
 
-    void RenderPassEncoder::APIBeginOcclusionQuery(uint32_t queryIndex) {
-        mEncodingContext->TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
                 if (IsValidationEnabled()) {
-                    DAWN_INVALID_IF(mOcclusionQuerySet.Get() == nullptr,
-                                    "The occlusionQuerySet in RenderPassDescriptor is not set.");
-
-                    // The type of querySet has been validated by ValidateRenderPassDescriptor
-
-                    DAWN_INVALID_IF(queryIndex >= mOcclusionQuerySet->GetQueryCount(),
-                                    "Query index (%u) exceeds the number of queries (%u) in %s.",
-                                    queryIndex, mOcclusionQuerySet->GetQueryCount(),
-                                    mOcclusionQuerySet.Get());
-
-                    DAWN_INVALID_IF(mOcclusionQueryActive,
-                                    "An occlusion query (%u) in %s is already active.",
-                                    mCurrentOcclusionQueryIndex, mOcclusionQuerySet.Get());
-
-                    DAWN_TRY_CONTEXT(
-                        ValidateQueryIndexOverwrite(mOcclusionQuerySet.Get(), queryIndex,
-                                                    mUsageTracker.GetQueryAvailabilityMap()),
-                        "validating the occlusion query index (%u) in %s", queryIndex,
-                        mOcclusionQuerySet.Get());
+                    mIndirectDrawMetadata.AddBundle(renderBundles[i]);
                 }
+            }
 
-                // Record the current query index for endOcclusionQuery.
-                mCurrentOcclusionQueryIndex = queryIndex;
-                mOcclusionQueryActive = true;
+            return {};
+        },
+        "encoding %s.ExecuteBundles(%u, ...).", this, count);
+}
 
-                BeginOcclusionQueryCmd* cmd =
-                    allocator->Allocate<BeginOcclusionQueryCmd>(Command::BeginOcclusionQuery);
-                cmd->querySet = mOcclusionQuerySet.Get();
-                cmd->queryIndex = queryIndex;
+void RenderPassEncoder::APIBeginOcclusionQuery(uint32_t queryIndex) {
+    mEncodingContext->TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            if (IsValidationEnabled()) {
+                DAWN_INVALID_IF(mOcclusionQuerySet.Get() == nullptr,
+                                "The occlusionQuerySet in RenderPassDescriptor is not set.");
 
-                return {};
-            },
-            "encoding %s.BeginOcclusionQuery(%u).", this, queryIndex);
-    }
+                // The type of querySet has been validated by ValidateRenderPassDescriptor
 
-    void RenderPassEncoder::APIEndOcclusionQuery() {
-        mEncodingContext->TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                if (IsValidationEnabled()) {
-                    DAWN_INVALID_IF(!mOcclusionQueryActive, "No occlusion queries are active.");
-                }
+                DAWN_INVALID_IF(queryIndex >= mOcclusionQuerySet->GetQueryCount(),
+                                "Query index (%u) exceeds the number of queries (%u) in %s.",
+                                queryIndex, mOcclusionQuerySet->GetQueryCount(),
+                                mOcclusionQuerySet.Get());
 
-                TrackQueryAvailability(mOcclusionQuerySet.Get(), mCurrentOcclusionQueryIndex);
+                DAWN_INVALID_IF(mOcclusionQueryActive,
+                                "An occlusion query (%u) in %s is already active.",
+                                mCurrentOcclusionQueryIndex, mOcclusionQuerySet.Get());
 
-                mOcclusionQueryActive = false;
+                DAWN_TRY_CONTEXT(
+                    ValidateQueryIndexOverwrite(mOcclusionQuerySet.Get(), queryIndex,
+                                                mUsageTracker.GetQueryAvailabilityMap()),
+                    "validating the occlusion query index (%u) in %s", queryIndex,
+                    mOcclusionQuerySet.Get());
+            }
 
-                EndOcclusionQueryCmd* cmd =
-                    allocator->Allocate<EndOcclusionQueryCmd>(Command::EndOcclusionQuery);
-                cmd->querySet = mOcclusionQuerySet.Get();
-                cmd->queryIndex = mCurrentOcclusionQueryIndex;
+            // Record the current query index for endOcclusionQuery.
+            mCurrentOcclusionQueryIndex = queryIndex;
+            mOcclusionQueryActive = true;
 
-                return {};
-            },
-            "encoding %s.EndOcclusionQuery().", this);
-    }
+            BeginOcclusionQueryCmd* cmd =
+                allocator->Allocate<BeginOcclusionQueryCmd>(Command::BeginOcclusionQuery);
+            cmd->querySet = mOcclusionQuerySet.Get();
+            cmd->queryIndex = queryIndex;
 
-    void RenderPassEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
-        mEncodingContext->TryEncode(
-            this,
-            [&](CommandAllocator* allocator) -> MaybeError {
-                if (IsValidationEnabled()) {
-                    DAWN_TRY(ValidateTimestampQuery(GetDevice(), querySet, queryIndex));
-                    DAWN_TRY_CONTEXT(
-                        ValidateQueryIndexOverwrite(querySet, queryIndex,
-                                                    mUsageTracker.GetQueryAvailabilityMap()),
-                        "validating the timestamp query index (%u) of %s", queryIndex, querySet);
-                }
+            return {};
+        },
+        "encoding %s.BeginOcclusionQuery(%u).", this, queryIndex);
+}
 
-                TrackQueryAvailability(querySet, queryIndex);
+void RenderPassEncoder::APIEndOcclusionQuery() {
+    mEncodingContext->TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            if (IsValidationEnabled()) {
+                DAWN_INVALID_IF(!mOcclusionQueryActive, "No occlusion queries are active.");
+            }
 
-                WriteTimestampCmd* cmd =
-                    allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
-                cmd->querySet = querySet;
-                cmd->queryIndex = queryIndex;
+            TrackQueryAvailability(mOcclusionQuerySet.Get(), mCurrentOcclusionQueryIndex);
 
-                return {};
-            },
-            "encoding %s.WriteTimestamp(%s, %u).", this, querySet, queryIndex);
-    }
+            mOcclusionQueryActive = false;
+
+            EndOcclusionQueryCmd* cmd =
+                allocator->Allocate<EndOcclusionQueryCmd>(Command::EndOcclusionQuery);
+            cmd->querySet = mOcclusionQuerySet.Get();
+            cmd->queryIndex = mCurrentOcclusionQueryIndex;
+
+            return {};
+        },
+        "encoding %s.EndOcclusionQuery().", this);
+}
+
+void RenderPassEncoder::APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex) {
+    mEncodingContext->TryEncode(
+        this,
+        [&](CommandAllocator* allocator) -> MaybeError {
+            if (IsValidationEnabled()) {
+                DAWN_TRY(ValidateTimestampQuery(GetDevice(), querySet, queryIndex));
+                DAWN_TRY_CONTEXT(ValidateQueryIndexOverwrite(
+                                     querySet, queryIndex, mUsageTracker.GetQueryAvailabilityMap()),
+                                 "validating the timestamp query index (%u) of %s", queryIndex,
+                                 querySet);
+            }
+
+            TrackQueryAvailability(querySet, queryIndex);
+
+            WriteTimestampCmd* cmd =
+                allocator->Allocate<WriteTimestampCmd>(Command::WriteTimestamp);
+            cmd->querySet = querySet;
+            cmd->queryIndex = queryIndex;
+
+            return {};
+        },
+        "encoding %s.WriteTimestamp(%s, %u).", this, querySet, queryIndex);
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/RenderPassEncoder.h b/src/dawn/native/RenderPassEncoder.h
index c531caf..ad4c130 100644
--- a/src/dawn/native/RenderPassEncoder.h
+++ b/src/dawn/native/RenderPassEncoder.h
@@ -23,82 +23,82 @@
 
 namespace dawn::native {
 
-    class RenderBundleBase;
+class RenderBundleBase;
 
-    class RenderPassEncoder final : public RenderEncoderBase {
-      public:
-        static Ref<RenderPassEncoder> Create(DeviceBase* device,
-                                             const RenderPassDescriptor* descriptor,
-                                             CommandEncoder* commandEncoder,
-                                             EncodingContext* encodingContext,
-                                             RenderPassResourceUsageTracker usageTracker,
-                                             Ref<AttachmentState> attachmentState,
-                                             std::vector<TimestampWrite> timestampWritesAtEnd,
-                                             uint32_t renderTargetWidth,
-                                             uint32_t renderTargetHeight,
-                                             bool depthReadOnly,
-                                             bool stencilReadOnly);
-        static Ref<RenderPassEncoder> MakeError(DeviceBase* device,
-                                                CommandEncoder* commandEncoder,
-                                                EncodingContext* encodingContext);
+class RenderPassEncoder final : public RenderEncoderBase {
+  public:
+    static Ref<RenderPassEncoder> Create(DeviceBase* device,
+                                         const RenderPassDescriptor* descriptor,
+                                         CommandEncoder* commandEncoder,
+                                         EncodingContext* encodingContext,
+                                         RenderPassResourceUsageTracker usageTracker,
+                                         Ref<AttachmentState> attachmentState,
+                                         std::vector<TimestampWrite> timestampWritesAtEnd,
+                                         uint32_t renderTargetWidth,
+                                         uint32_t renderTargetHeight,
+                                         bool depthReadOnly,
+                                         bool stencilReadOnly);
+    static Ref<RenderPassEncoder> MakeError(DeviceBase* device,
+                                            CommandEncoder* commandEncoder,
+                                            EncodingContext* encodingContext);
 
-        ObjectType GetType() const override;
+    ObjectType GetType() const override;
 
-        void APIEnd();
-        void APIEndPass();  // TODO(dawn:1286): Remove after deprecation period.
+    void APIEnd();
+    void APIEndPass();  // TODO(dawn:1286): Remove after deprecation period.
 
-        void APISetStencilReference(uint32_t reference);
-        void APISetBlendConstant(const Color* color);
-        void APISetViewport(float x,
-                            float y,
-                            float width,
-                            float height,
-                            float minDepth,
-                            float maxDepth);
-        void APISetScissorRect(uint32_t x, uint32_t y, uint32_t width, uint32_t height);
-        void APIExecuteBundles(uint32_t count, RenderBundleBase* const* renderBundles);
+    void APISetStencilReference(uint32_t reference);
+    void APISetBlendConstant(const Color* color);
+    void APISetViewport(float x,
+                        float y,
+                        float width,
+                        float height,
+                        float minDepth,
+                        float maxDepth);
+    void APISetScissorRect(uint32_t x, uint32_t y, uint32_t width, uint32_t height);
+    void APIExecuteBundles(uint32_t count, RenderBundleBase* const* renderBundles);
 
-        void APIBeginOcclusionQuery(uint32_t queryIndex);
-        void APIEndOcclusionQuery();
+    void APIBeginOcclusionQuery(uint32_t queryIndex);
+    void APIEndOcclusionQuery();
 
-        void APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
+    void APIWriteTimestamp(QuerySetBase* querySet, uint32_t queryIndex);
 
-      protected:
-        RenderPassEncoder(DeviceBase* device,
-                          const RenderPassDescriptor* descriptor,
-                          CommandEncoder* commandEncoder,
-                          EncodingContext* encodingContext,
-                          RenderPassResourceUsageTracker usageTracker,
-                          Ref<AttachmentState> attachmentState,
-                          std::vector<TimestampWrite> timestampWritesAtEnd,
-                          uint32_t renderTargetWidth,
-                          uint32_t renderTargetHeight,
-                          bool depthReadOnly,
-                          bool stencilReadOnly);
-        RenderPassEncoder(DeviceBase* device,
-                          CommandEncoder* commandEncoder,
-                          EncodingContext* encodingContext,
-                          ErrorTag errorTag);
+  protected:
+    RenderPassEncoder(DeviceBase* device,
+                      const RenderPassDescriptor* descriptor,
+                      CommandEncoder* commandEncoder,
+                      EncodingContext* encodingContext,
+                      RenderPassResourceUsageTracker usageTracker,
+                      Ref<AttachmentState> attachmentState,
+                      std::vector<TimestampWrite> timestampWritesAtEnd,
+                      uint32_t renderTargetWidth,
+                      uint32_t renderTargetHeight,
+                      bool depthReadOnly,
+                      bool stencilReadOnly);
+    RenderPassEncoder(DeviceBase* device,
+                      CommandEncoder* commandEncoder,
+                      EncodingContext* encodingContext,
+                      ErrorTag errorTag);
 
-      private:
-        void DestroyImpl() override;
+  private:
+    void DestroyImpl() override;
 
-        void TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex);
+    void TrackQueryAvailability(QuerySetBase* querySet, uint32_t queryIndex);
 
-        // For render and compute passes, the encoding context is borrowed from the command encoder.
-        // Keep a reference to the encoder to make sure the context isn't freed.
-        Ref<CommandEncoder> mCommandEncoder;
+    // For render and compute passes, the encoding context is borrowed from the command encoder.
+    // Keep a reference to the encoder to make sure the context isn't freed.
+    Ref<CommandEncoder> mCommandEncoder;
 
-        uint32_t mRenderTargetWidth;
-        uint32_t mRenderTargetHeight;
+    uint32_t mRenderTargetWidth;
+    uint32_t mRenderTargetHeight;
 
-        // The resources for occlusion query
-        Ref<QuerySetBase> mOcclusionQuerySet;
-        uint32_t mCurrentOcclusionQueryIndex = 0;
-        bool mOcclusionQueryActive = false;
+    // The resources for occlusion query
+    Ref<QuerySetBase> mOcclusionQuerySet;
+    uint32_t mCurrentOcclusionQueryIndex = 0;
+    bool mOcclusionQueryActive = false;
 
-        std::vector<TimestampWrite> mTimestampWritesAtEnd;
-    };
+    std::vector<TimestampWrite> mTimestampWritesAtEnd;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/RenderPipeline.cpp b/src/dawn/native/RenderPipeline.cpp
index 35ee9a1..0b74bf3 100644
--- a/src/dawn/native/RenderPipeline.cpp
+++ b/src/dawn/native/RenderPipeline.cpp
@@ -30,989 +30,969 @@
 
 namespace dawn::native {
 
-    // Helper functions
-    namespace {
-        MaybeError ValidateVertexAttribute(
-            DeviceBase* device,
-            const VertexAttribute* attribute,
-            const EntryPointMetadata& metadata,
-            uint64_t vertexBufferStride,
-            ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>* attributesSetMask) {
-            DAWN_TRY(ValidateVertexFormat(attribute->format));
-            const VertexFormatInfo& formatInfo = GetVertexFormatInfo(attribute->format);
+// Helper functions
+namespace {
+MaybeError ValidateVertexAttribute(
+    DeviceBase* device,
+    const VertexAttribute* attribute,
+    const EntryPointMetadata& metadata,
+    uint64_t vertexBufferStride,
+    ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>* attributesSetMask) {
+    DAWN_TRY(ValidateVertexFormat(attribute->format));
+    const VertexFormatInfo& formatInfo = GetVertexFormatInfo(attribute->format);
 
-            DAWN_INVALID_IF(
-                attribute->shaderLocation >= kMaxVertexAttributes,
-                "Attribute shader location (%u) exceeds the maximum number of vertex attributes "
-                "(%u).",
-                attribute->shaderLocation, kMaxVertexAttributes);
+    DAWN_INVALID_IF(
+        attribute->shaderLocation >= kMaxVertexAttributes,
+        "Attribute shader location (%u) exceeds the maximum number of vertex attributes "
+        "(%u).",
+        attribute->shaderLocation, kMaxVertexAttributes);
 
-            VertexAttributeLocation location(static_cast<uint8_t>(attribute->shaderLocation));
+    VertexAttributeLocation location(static_cast<uint8_t>(attribute->shaderLocation));
 
-            // No underflow is possible because the max vertex format size is smaller than
-            // kMaxVertexBufferArrayStride.
-            ASSERT(kMaxVertexBufferArrayStride >= formatInfo.byteSize);
-            DAWN_INVALID_IF(
-                attribute->offset > kMaxVertexBufferArrayStride - formatInfo.byteSize,
-                "Attribute offset (%u) with format %s (size: %u) doesn't fit in the maximum vertex "
-                "buffer stride (%u).",
-                attribute->offset, attribute->format, formatInfo.byteSize,
-                kMaxVertexBufferArrayStride);
+    // No underflow is possible because the max vertex format size is smaller than
+    // kMaxVertexBufferArrayStride.
+    ASSERT(kMaxVertexBufferArrayStride >= formatInfo.byteSize);
+    DAWN_INVALID_IF(
+        attribute->offset > kMaxVertexBufferArrayStride - formatInfo.byteSize,
+        "Attribute offset (%u) with format %s (size: %u) doesn't fit in the maximum vertex "
+        "buffer stride (%u).",
+        attribute->offset, attribute->format, formatInfo.byteSize, kMaxVertexBufferArrayStride);
 
-            // No overflow is possible because the offset is already validated to be less
-            // than kMaxVertexBufferArrayStride.
-            ASSERT(attribute->offset < kMaxVertexBufferArrayStride);
-            DAWN_INVALID_IF(
-                vertexBufferStride > 0 &&
-                    attribute->offset + formatInfo.byteSize > vertexBufferStride,
-                "Attribute offset (%u) with format %s (size: %u) doesn't fit in the vertex buffer "
-                "stride (%u).",
-                attribute->offset, attribute->format, formatInfo.byteSize, vertexBufferStride);
+    // No overflow is possible because the offset is already validated to be less
+    // than kMaxVertexBufferArrayStride.
+    ASSERT(attribute->offset < kMaxVertexBufferArrayStride);
+    DAWN_INVALID_IF(
+        vertexBufferStride > 0 && attribute->offset + formatInfo.byteSize > vertexBufferStride,
+        "Attribute offset (%u) with format %s (size: %u) doesn't fit in the vertex buffer "
+        "stride (%u).",
+        attribute->offset, attribute->format, formatInfo.byteSize, vertexBufferStride);
 
-            DAWN_INVALID_IF(attribute->offset % std::min(4u, formatInfo.byteSize) != 0,
-                            "Attribute offset (%u) in not a multiple of %u.", attribute->offset,
-                            std::min(4u, formatInfo.byteSize));
+    DAWN_INVALID_IF(attribute->offset % std::min(4u, formatInfo.byteSize) != 0,
+                    "Attribute offset (%u) in not a multiple of %u.", attribute->offset,
+                    std::min(4u, formatInfo.byteSize));
 
-            DAWN_INVALID_IF(metadata.usedVertexInputs[location] &&
-                                formatInfo.baseType != metadata.vertexInputBaseTypes[location],
-                            "Attribute base type (%s) does not match the "
-                            "shader's base type (%s) in location (%u).",
-                            formatInfo.baseType, metadata.vertexInputBaseTypes[location],
-                            attribute->shaderLocation);
+    DAWN_INVALID_IF(metadata.usedVertexInputs[location] &&
+                        formatInfo.baseType != metadata.vertexInputBaseTypes[location],
+                    "Attribute base type (%s) does not match the "
+                    "shader's base type (%s) in location (%u).",
+                    formatInfo.baseType, metadata.vertexInputBaseTypes[location],
+                    attribute->shaderLocation);
 
-            DAWN_INVALID_IF((*attributesSetMask)[location],
-                            "Attribute shader location (%u) is used more than once.",
-                            attribute->shaderLocation);
+    DAWN_INVALID_IF((*attributesSetMask)[location],
+                    "Attribute shader location (%u) is used more than once.",
+                    attribute->shaderLocation);
 
-            attributesSetMask->set(location);
-            return {};
-        }
+    attributesSetMask->set(location);
+    return {};
+}
 
-        MaybeError ValidateVertexBufferLayout(
-            DeviceBase* device,
-            const VertexBufferLayout* buffer,
-            const EntryPointMetadata& metadata,
-            ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>* attributesSetMask) {
-            DAWN_TRY(ValidateVertexStepMode(buffer->stepMode));
-            DAWN_INVALID_IF(
-                buffer->arrayStride > kMaxVertexBufferArrayStride,
-                "Vertex buffer arrayStride (%u) is larger than the maximum array stride (%u).",
-                buffer->arrayStride, kMaxVertexBufferArrayStride);
+MaybeError ValidateVertexBufferLayout(
+    DeviceBase* device,
+    const VertexBufferLayout* buffer,
+    const EntryPointMetadata& metadata,
+    ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>* attributesSetMask) {
+    DAWN_TRY(ValidateVertexStepMode(buffer->stepMode));
+    DAWN_INVALID_IF(buffer->arrayStride > kMaxVertexBufferArrayStride,
+                    "Vertex buffer arrayStride (%u) is larger than the maximum array stride (%u).",
+                    buffer->arrayStride, kMaxVertexBufferArrayStride);
 
-            DAWN_INVALID_IF(buffer->arrayStride % 4 != 0,
-                            "Vertex buffer arrayStride (%u) is not a multiple of 4.",
-                            buffer->arrayStride);
+    DAWN_INVALID_IF(buffer->arrayStride % 4 != 0,
+                    "Vertex buffer arrayStride (%u) is not a multiple of 4.", buffer->arrayStride);
 
-            for (uint32_t i = 0; i < buffer->attributeCount; ++i) {
-                DAWN_TRY_CONTEXT(ValidateVertexAttribute(device, &buffer->attributes[i], metadata,
-                                                         buffer->arrayStride, attributesSetMask),
-                                 "validating attributes[%u].", i);
+    for (uint32_t i = 0; i < buffer->attributeCount; ++i) {
+        DAWN_TRY_CONTEXT(ValidateVertexAttribute(device, &buffer->attributes[i], metadata,
+                                                 buffer->arrayStride, attributesSetMask),
+                         "validating attributes[%u].", i);
+    }
+
+    return {};
+}
+
+MaybeError ValidateVertexState(DeviceBase* device,
+                               const VertexState* descriptor,
+                               const PipelineLayoutBase* layout) {
+    DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+
+    DAWN_INVALID_IF(descriptor->bufferCount > kMaxVertexBuffers,
+                    "Vertex buffer count (%u) exceeds the maximum number of vertex buffers (%u).",
+                    descriptor->bufferCount, kMaxVertexBuffers);
+
+    DAWN_TRY_CONTEXT(ValidateProgrammableStage(device, descriptor->module, descriptor->entryPoint,
+                                               descriptor->constantCount, descriptor->constants,
+                                               layout, SingleShaderStage::Vertex),
+                     "validating vertex stage (module: %s, entryPoint: %s).", descriptor->module,
+                     descriptor->entryPoint);
+    const EntryPointMetadata& vertexMetadata =
+        descriptor->module->GetEntryPoint(descriptor->entryPoint);
+
+    ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> attributesSetMask;
+    uint32_t totalAttributesNum = 0;
+    for (uint32_t i = 0; i < descriptor->bufferCount; ++i) {
+        DAWN_TRY_CONTEXT(ValidateVertexBufferLayout(device, &descriptor->buffers[i], vertexMetadata,
+                                                    &attributesSetMask),
+                         "validating buffers[%u].", i);
+        totalAttributesNum += descriptor->buffers[i].attributeCount;
+    }
+
+    // Every vertex attribute has a member called shaderLocation, and there are some
+    // requirements for shaderLocation: 1) >=0, 2) values are different across different
+    // attributes, 3) can't exceed kMaxVertexAttributes. So it can ensure that total
+    // attribute number never exceed kMaxVertexAttributes.
+    ASSERT(totalAttributesNum <= kMaxVertexAttributes);
+
+    // TODO(dawn:563): Specify which inputs were not used in error message.
+    DAWN_INVALID_IF(!IsSubset(vertexMetadata.usedVertexInputs, attributesSetMask),
+                    "Pipeline vertex stage uses vertex buffers not in the vertex state");
+
+    return {};
+}
+
+MaybeError ValidatePrimitiveState(const DeviceBase* device, const PrimitiveState* descriptor) {
+    DAWN_TRY(
+        ValidateSingleSType(descriptor->nextInChain, wgpu::SType::PrimitiveDepthClampingState));
+    const PrimitiveDepthClampingState* clampInfo = nullptr;
+    FindInChain(descriptor->nextInChain, &clampInfo);
+    if (clampInfo && !device->IsFeatureEnabled(Feature::DepthClamping)) {
+        return DAWN_VALIDATION_ERROR("The depth clamping feature is not supported");
+    }
+    DAWN_TRY(ValidatePrimitiveTopology(descriptor->topology));
+    DAWN_TRY(ValidateIndexFormat(descriptor->stripIndexFormat));
+    DAWN_TRY(ValidateFrontFace(descriptor->frontFace));
+    DAWN_TRY(ValidateCullMode(descriptor->cullMode));
+
+    // Pipeline descriptors must have stripIndexFormat == undefined if they are using
+    // non-strip topologies.
+    if (!IsStripPrimitiveTopology(descriptor->topology)) {
+        DAWN_INVALID_IF(descriptor->stripIndexFormat != wgpu::IndexFormat::Undefined,
+                        "StripIndexFormat (%s) is not undefined when using a non-strip primitive "
+                        "topology (%s).",
+                        descriptor->stripIndexFormat, descriptor->topology);
+    }
+
+    return {};
+}
+
+MaybeError ValidateDepthStencilState(const DeviceBase* device,
+                                     const DepthStencilState* descriptor) {
+    if (descriptor->nextInChain != nullptr) {
+        return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
+    }
+
+    DAWN_TRY(ValidateCompareFunction(descriptor->depthCompare));
+    DAWN_TRY(ValidateCompareFunction(descriptor->stencilFront.compare));
+    DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.failOp));
+    DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.depthFailOp));
+    DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.passOp));
+    DAWN_TRY(ValidateCompareFunction(descriptor->stencilBack.compare));
+    DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.failOp));
+    DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.depthFailOp));
+    DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.passOp));
+
+    const Format* format;
+    DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
+    DAWN_INVALID_IF(!format->HasDepthOrStencil() || !format->isRenderable,
+                    "Depth stencil format (%s) is not depth-stencil renderable.",
+                    descriptor->format);
+
+    DAWN_INVALID_IF(
+        std::isnan(descriptor->depthBiasSlopeScale) || std::isnan(descriptor->depthBiasClamp),
+        "Either depthBiasSlopeScale (%f) or depthBiasClamp (%f) is NaN.",
+        descriptor->depthBiasSlopeScale, descriptor->depthBiasClamp);
+
+    DAWN_INVALID_IF(
+        !format->HasDepth() && (descriptor->depthCompare != wgpu::CompareFunction::Always ||
+                                descriptor->depthWriteEnabled),
+        "Depth stencil format (%s) doesn't have depth aspect while depthCompare (%s) is "
+        "not %s or depthWriteEnabled (%u) is true.",
+        descriptor->format, descriptor->depthCompare, wgpu::CompareFunction::Always,
+        descriptor->depthWriteEnabled);
+
+    DAWN_INVALID_IF(!format->HasStencil() && StencilTestEnabled(descriptor),
+                    "Depth stencil format (%s) doesn't have stencil aspect while stencil "
+                    "test or stencil write is enabled.",
+                    descriptor->format);
+
+    return {};
+}
+
+MaybeError ValidateMultisampleState(const MultisampleState* descriptor) {
+    DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+
+    DAWN_INVALID_IF(!IsValidSampleCount(descriptor->count),
+                    "Multisample count (%u) is not supported.", descriptor->count);
+
+    DAWN_INVALID_IF(descriptor->alphaToCoverageEnabled && descriptor->count <= 1,
+                    "Multisample count (%u) must be > 1 when alphaToCoverage is enabled.",
+                    descriptor->count);
+
+    return {};
+}
+
+MaybeError ValidateBlendComponent(BlendComponent blendComponent) {
+    if (blendComponent.operation == wgpu::BlendOperation::Min ||
+        blendComponent.operation == wgpu::BlendOperation::Max) {
+        DAWN_INVALID_IF(blendComponent.srcFactor != wgpu::BlendFactor::One ||
+                            blendComponent.dstFactor != wgpu::BlendFactor::One,
+                        "Blend factor is not %s when blend operation is %s.",
+                        wgpu::BlendFactor::One, blendComponent.operation);
+    }
+
+    return {};
+}
+
+MaybeError ValidateBlendState(DeviceBase* device, const BlendState* descriptor) {
+    DAWN_TRY(ValidateBlendOperation(descriptor->alpha.operation));
+    DAWN_TRY(ValidateBlendFactor(descriptor->alpha.srcFactor));
+    DAWN_TRY(ValidateBlendFactor(descriptor->alpha.dstFactor));
+    DAWN_TRY(ValidateBlendOperation(descriptor->color.operation));
+    DAWN_TRY(ValidateBlendFactor(descriptor->color.srcFactor));
+    DAWN_TRY(ValidateBlendFactor(descriptor->color.dstFactor));
+    DAWN_TRY(ValidateBlendComponent(descriptor->alpha));
+    DAWN_TRY(ValidateBlendComponent(descriptor->color));
+
+    return {};
+}
+
+bool BlendFactorContainsSrcAlpha(const wgpu::BlendFactor& blendFactor) {
+    return blendFactor == wgpu::BlendFactor::SrcAlpha ||
+           blendFactor == wgpu::BlendFactor::OneMinusSrcAlpha ||
+           blendFactor == wgpu::BlendFactor::SrcAlphaSaturated;
+}
+
+MaybeError ValidateColorTargetState(
+    DeviceBase* device,
+    const ColorTargetState* descriptor,
+    bool fragmentWritten,
+    const EntryPointMetadata::FragmentOutputVariableInfo& fragmentOutputVariable) {
+    DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+
+    if (descriptor->blend) {
+        DAWN_TRY_CONTEXT(ValidateBlendState(device, descriptor->blend), "validating blend state.");
+    }
+
+    DAWN_TRY(ValidateColorWriteMask(descriptor->writeMask));
+
+    const Format* format;
+    DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
+    DAWN_INVALID_IF(!format->IsColor() || !format->isRenderable,
+                    "Color format (%s) is not color renderable.", descriptor->format);
+
+    DAWN_INVALID_IF(
+        descriptor->blend &&
+            !(format->GetAspectInfo(Aspect::Color).supportedSampleTypes & SampleTypeBit::Float),
+        "Blending is enabled but color format (%s) is not blendable.", descriptor->format);
+
+    if (fragmentWritten) {
+        DAWN_INVALID_IF(
+            fragmentOutputVariable.baseType != format->GetAspectInfo(Aspect::Color).baseType,
+            "Color format (%s) base type (%s) doesn't match the fragment "
+            "module output type (%s).",
+            descriptor->format, format->GetAspectInfo(Aspect::Color).baseType,
+            fragmentOutputVariable.baseType);
+
+        DAWN_INVALID_IF(fragmentOutputVariable.componentCount < format->componentCount,
+                        "The fragment stage has fewer output components (%u) than the color format "
+                        "(%s) component count (%u).",
+                        fragmentOutputVariable.componentCount, descriptor->format,
+                        format->componentCount);
+
+        if (descriptor->blend) {
+            if (fragmentOutputVariable.componentCount < 4u) {
+                // No alpha channel output
+                // Make sure there's no alpha involved in the blending operation
+                DAWN_INVALID_IF(BlendFactorContainsSrcAlpha(descriptor->blend->color.srcFactor) ||
+                                    BlendFactorContainsSrcAlpha(descriptor->blend->color.dstFactor),
+                                "Color blending srcfactor (%s) or dstFactor (%s) is reading alpha "
+                                "but it is missing from fragment output.",
+                                descriptor->blend->color.srcFactor,
+                                descriptor->blend->color.dstFactor);
             }
-
-            return {};
         }
+    } else {
+        DAWN_INVALID_IF(
+            descriptor->writeMask != wgpu::ColorWriteMask::None,
+            "Color target has no corresponding fragment stage output but writeMask (%s) is "
+            "not zero.",
+            descriptor->writeMask);
+    }
 
-        MaybeError ValidateVertexState(DeviceBase* device,
-                                       const VertexState* descriptor,
-                                       const PipelineLayoutBase* layout) {
-            DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+    return {};
+}
 
-            DAWN_INVALID_IF(
-                descriptor->bufferCount > kMaxVertexBuffers,
-                "Vertex buffer count (%u) exceeds the maximum number of vertex buffers (%u).",
-                descriptor->bufferCount, kMaxVertexBuffers);
+MaybeError ValidateFragmentState(DeviceBase* device,
+                                 const FragmentState* descriptor,
+                                 const PipelineLayoutBase* layout) {
+    DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
 
+    DAWN_TRY_CONTEXT(ValidateProgrammableStage(device, descriptor->module, descriptor->entryPoint,
+                                               descriptor->constantCount, descriptor->constants,
+                                               layout, SingleShaderStage::Fragment),
+                     "validating fragment stage (module: %s, entryPoint: %s).", descriptor->module,
+                     descriptor->entryPoint);
+
+    DAWN_INVALID_IF(descriptor->targetCount > kMaxColorAttachments,
+                    "Number of targets (%u) exceeds the maximum (%u).", descriptor->targetCount,
+                    kMaxColorAttachments);
+
+    const EntryPointMetadata& fragmentMetadata =
+        descriptor->module->GetEntryPoint(descriptor->entryPoint);
+    for (ColorAttachmentIndex i(uint8_t(0));
+         i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->targetCount)); ++i) {
+        const ColorTargetState* target = &descriptor->targets[static_cast<uint8_t>(i)];
+        if (target->format != wgpu::TextureFormat::Undefined) {
             DAWN_TRY_CONTEXT(
-                ValidateProgrammableStage(device, descriptor->module, descriptor->entryPoint,
-                                          descriptor->constantCount, descriptor->constants, layout,
-                                          SingleShaderStage::Vertex),
-                "validating vertex stage (module: %s, entryPoint: %s).", descriptor->module,
-                descriptor->entryPoint);
-            const EntryPointMetadata& vertexMetadata =
-                descriptor->module->GetEntryPoint(descriptor->entryPoint);
-
-            ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> attributesSetMask;
-            uint32_t totalAttributesNum = 0;
-            for (uint32_t i = 0; i < descriptor->bufferCount; ++i) {
-                DAWN_TRY_CONTEXT(ValidateVertexBufferLayout(device, &descriptor->buffers[i],
-                                                            vertexMetadata, &attributesSetMask),
-                                 "validating buffers[%u].", i);
-                totalAttributesNum += descriptor->buffers[i].attributeCount;
-            }
-
-            // Every vertex attribute has a member called shaderLocation, and there are some
-            // requirements for shaderLocation: 1) >=0, 2) values are different across different
-            // attributes, 3) can't exceed kMaxVertexAttributes. So it can ensure that total
-            // attribute number never exceed kMaxVertexAttributes.
-            ASSERT(totalAttributesNum <= kMaxVertexAttributes);
-
-            // TODO(dawn:563): Specify which inputs were not used in error message.
-            DAWN_INVALID_IF(!IsSubset(vertexMetadata.usedVertexInputs, attributesSetMask),
-                            "Pipeline vertex stage uses vertex buffers not in the vertex state");
-
-            return {};
-        }
-
-        MaybeError ValidatePrimitiveState(const DeviceBase* device,
-                                          const PrimitiveState* descriptor) {
-            DAWN_TRY(ValidateSingleSType(descriptor->nextInChain,
-                                         wgpu::SType::PrimitiveDepthClampingState));
-            const PrimitiveDepthClampingState* clampInfo = nullptr;
-            FindInChain(descriptor->nextInChain, &clampInfo);
-            if (clampInfo && !device->IsFeatureEnabled(Feature::DepthClamping)) {
-                return DAWN_VALIDATION_ERROR("The depth clamping feature is not supported");
-            }
-            DAWN_TRY(ValidatePrimitiveTopology(descriptor->topology));
-            DAWN_TRY(ValidateIndexFormat(descriptor->stripIndexFormat));
-            DAWN_TRY(ValidateFrontFace(descriptor->frontFace));
-            DAWN_TRY(ValidateCullMode(descriptor->cullMode));
-
-            // Pipeline descriptors must have stripIndexFormat == undefined if they are using
-            // non-strip topologies.
-            if (!IsStripPrimitiveTopology(descriptor->topology)) {
-                DAWN_INVALID_IF(
-                    descriptor->stripIndexFormat != wgpu::IndexFormat::Undefined,
-                    "StripIndexFormat (%s) is not undefined when using a non-strip primitive "
-                    "topology (%s).",
-                    descriptor->stripIndexFormat, descriptor->topology);
-            }
-
-            return {};
-        }
-
-        MaybeError ValidateDepthStencilState(const DeviceBase* device,
-                                             const DepthStencilState* descriptor) {
-            if (descriptor->nextInChain != nullptr) {
-                return DAWN_VALIDATION_ERROR("nextInChain must be nullptr");
-            }
-
-            DAWN_TRY(ValidateCompareFunction(descriptor->depthCompare));
-            DAWN_TRY(ValidateCompareFunction(descriptor->stencilFront.compare));
-            DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.failOp));
-            DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.depthFailOp));
-            DAWN_TRY(ValidateStencilOperation(descriptor->stencilFront.passOp));
-            DAWN_TRY(ValidateCompareFunction(descriptor->stencilBack.compare));
-            DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.failOp));
-            DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.depthFailOp));
-            DAWN_TRY(ValidateStencilOperation(descriptor->stencilBack.passOp));
-
-            const Format* format;
-            DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
-            DAWN_INVALID_IF(!format->HasDepthOrStencil() || !format->isRenderable,
-                            "Depth stencil format (%s) is not depth-stencil renderable.",
-                            descriptor->format);
-
-            DAWN_INVALID_IF(std::isnan(descriptor->depthBiasSlopeScale) ||
-                                std::isnan(descriptor->depthBiasClamp),
-                            "Either depthBiasSlopeScale (%f) or depthBiasClamp (%f) is NaN.",
-                            descriptor->depthBiasSlopeScale, descriptor->depthBiasClamp);
-
-            DAWN_INVALID_IF(
-                !format->HasDepth() && (descriptor->depthCompare != wgpu::CompareFunction::Always ||
-                                        descriptor->depthWriteEnabled),
-                "Depth stencil format (%s) doesn't have depth aspect while depthCompare (%s) is "
-                "not %s or depthWriteEnabled (%u) is true.",
-                descriptor->format, descriptor->depthCompare, wgpu::CompareFunction::Always,
-                descriptor->depthWriteEnabled);
-
-            DAWN_INVALID_IF(!format->HasStencil() && StencilTestEnabled(descriptor),
-                            "Depth stencil format (%s) doesn't have stencil aspect while stencil "
-                            "test or stencil write is enabled.",
-                            descriptor->format);
-
-            return {};
-        }
-
-        MaybeError ValidateMultisampleState(const MultisampleState* descriptor) {
-            DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
-
-            DAWN_INVALID_IF(!IsValidSampleCount(descriptor->count),
-                            "Multisample count (%u) is not supported.", descriptor->count);
-
-            DAWN_INVALID_IF(descriptor->alphaToCoverageEnabled && descriptor->count <= 1,
-                            "Multisample count (%u) must be > 1 when alphaToCoverage is enabled.",
-                            descriptor->count);
-
-            return {};
-        }
-
-        MaybeError ValidateBlendComponent(BlendComponent blendComponent) {
-            if (blendComponent.operation == wgpu::BlendOperation::Min ||
-                blendComponent.operation == wgpu::BlendOperation::Max) {
-                DAWN_INVALID_IF(blendComponent.srcFactor != wgpu::BlendFactor::One ||
-                                    blendComponent.dstFactor != wgpu::BlendFactor::One,
-                                "Blend factor is not %s when blend operation is %s.",
-                                wgpu::BlendFactor::One, blendComponent.operation);
-            }
-
-            return {};
-        }
-
-        MaybeError ValidateBlendState(DeviceBase* device, const BlendState* descriptor) {
-            DAWN_TRY(ValidateBlendOperation(descriptor->alpha.operation));
-            DAWN_TRY(ValidateBlendFactor(descriptor->alpha.srcFactor));
-            DAWN_TRY(ValidateBlendFactor(descriptor->alpha.dstFactor));
-            DAWN_TRY(ValidateBlendOperation(descriptor->color.operation));
-            DAWN_TRY(ValidateBlendFactor(descriptor->color.srcFactor));
-            DAWN_TRY(ValidateBlendFactor(descriptor->color.dstFactor));
-            DAWN_TRY(ValidateBlendComponent(descriptor->alpha));
-            DAWN_TRY(ValidateBlendComponent(descriptor->color));
-
-            return {};
-        }
-
-        bool BlendFactorContainsSrcAlpha(const wgpu::BlendFactor& blendFactor) {
-            return blendFactor == wgpu::BlendFactor::SrcAlpha ||
-                   blendFactor == wgpu::BlendFactor::OneMinusSrcAlpha ||
-                   blendFactor == wgpu::BlendFactor::SrcAlphaSaturated;
-        }
-
-        MaybeError ValidateColorTargetState(
-            DeviceBase* device,
-            const ColorTargetState* descriptor,
-            bool fragmentWritten,
-            const EntryPointMetadata::FragmentOutputVariableInfo& fragmentOutputVariable) {
-            DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
-
-            if (descriptor->blend) {
-                DAWN_TRY_CONTEXT(ValidateBlendState(device, descriptor->blend),
-                                 "validating blend state.");
-            }
-
-            DAWN_TRY(ValidateColorWriteMask(descriptor->writeMask));
-
-            const Format* format;
-            DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
-            DAWN_INVALID_IF(!format->IsColor() || !format->isRenderable,
-                            "Color format (%s) is not color renderable.", descriptor->format);
-
-            DAWN_INVALID_IF(
-                descriptor->blend && !(format->GetAspectInfo(Aspect::Color).supportedSampleTypes &
-                                       SampleTypeBit::Float),
-                "Blending is enabled but color format (%s) is not blendable.", descriptor->format);
-
-            if (fragmentWritten) {
-                DAWN_INVALID_IF(fragmentOutputVariable.baseType !=
-                                    format->GetAspectInfo(Aspect::Color).baseType,
-                                "Color format (%s) base type (%s) doesn't match the fragment "
-                                "module output type (%s).",
-                                descriptor->format, format->GetAspectInfo(Aspect::Color).baseType,
-                                fragmentOutputVariable.baseType);
-
-                DAWN_INVALID_IF(
-                    fragmentOutputVariable.componentCount < format->componentCount,
-                    "The fragment stage has fewer output components (%u) than the color format "
-                    "(%s) component count (%u).",
-                    fragmentOutputVariable.componentCount, descriptor->format,
-                    format->componentCount);
-
-                if (descriptor->blend) {
-                    if (fragmentOutputVariable.componentCount < 4u) {
-                        // No alpha channel output
-                        // Make sure there's no alpha involved in the blending operation
-                        DAWN_INVALID_IF(
-                            BlendFactorContainsSrcAlpha(descriptor->blend->color.srcFactor) ||
-                                BlendFactorContainsSrcAlpha(descriptor->blend->color.dstFactor),
-                            "Color blending srcfactor (%s) or dstFactor (%s) is reading alpha "
-                            "but it is missing from fragment output.",
-                            descriptor->blend->color.srcFactor, descriptor->blend->color.dstFactor);
-                    }
-                }
-            } else {
-                DAWN_INVALID_IF(
-                    descriptor->writeMask != wgpu::ColorWriteMask::None,
-                    "Color target has no corresponding fragment stage output but writeMask (%s) is "
-                    "not zero.",
-                    descriptor->writeMask);
-            }
-
-            return {};
-        }
-
-        MaybeError ValidateFragmentState(DeviceBase* device,
-                                         const FragmentState* descriptor,
-                                         const PipelineLayoutBase* layout) {
-            DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
-
-            DAWN_TRY_CONTEXT(
-                ValidateProgrammableStage(device, descriptor->module, descriptor->entryPoint,
-                                          descriptor->constantCount, descriptor->constants, layout,
-                                          SingleShaderStage::Fragment),
-                "validating fragment stage (module: %s, entryPoint: %s).", descriptor->module,
-                descriptor->entryPoint);
-
-            DAWN_INVALID_IF(descriptor->targetCount > kMaxColorAttachments,
-                            "Number of targets (%u) exceeds the maximum (%u).",
-                            descriptor->targetCount, kMaxColorAttachments);
-
-            const EntryPointMetadata& fragmentMetadata =
-                descriptor->module->GetEntryPoint(descriptor->entryPoint);
-            for (ColorAttachmentIndex i(uint8_t(0));
-                 i < ColorAttachmentIndex(static_cast<uint8_t>(descriptor->targetCount)); ++i) {
-                const ColorTargetState* target = &descriptor->targets[static_cast<uint8_t>(i)];
-                if (target->format != wgpu::TextureFormat::Undefined) {
-                    DAWN_TRY_CONTEXT(ValidateColorTargetState(
-                                         device, target, fragmentMetadata.fragmentOutputsWritten[i],
+                ValidateColorTargetState(device, target, fragmentMetadata.fragmentOutputsWritten[i],
                                          fragmentMetadata.fragmentOutputVariables[i]),
-                                     "validating targets[%u].", static_cast<uint8_t>(i));
-                } else {
-                    DAWN_INVALID_IF(
-                        target->blend,
-                        "Color target[%u] blend state is set when the format is undefined.",
-                        static_cast<uint8_t>(i));
-                    DAWN_INVALID_IF(
-                        target->writeMask != wgpu::ColorWriteMask::None,
-                        "Color target[%u] write mask is set to (%s) when the format is undefined.",
-                        static_cast<uint8_t>(i), target->writeMask);
-                }
-            }
-
-            return {};
-        }
-
-        MaybeError ValidateInterStageMatching(DeviceBase* device,
-                                              const VertexState& vertexState,
-                                              const FragmentState& fragmentState) {
-            const EntryPointMetadata& vertexMetadata =
-                vertexState.module->GetEntryPoint(vertexState.entryPoint);
-            const EntryPointMetadata& fragmentMetadata =
-                fragmentState.module->GetEntryPoint(fragmentState.entryPoint);
-
-            // TODO(dawn:563): Can this message give more details?
-            DAWN_INVALID_IF(
-                vertexMetadata.usedInterStageVariables != fragmentMetadata.usedInterStageVariables,
-                "One or more fragment inputs and vertex outputs are not one-to-one matching");
-
-            // TODO(dawn:802): Validate interpolation types and interpolition sampling types
-            for (size_t i : IterateBitSet(vertexMetadata.usedInterStageVariables)) {
-                const auto& vertexOutputInfo = vertexMetadata.interStageVariables[i];
-                const auto& fragmentInputInfo = fragmentMetadata.interStageVariables[i];
-                DAWN_INVALID_IF(
-                    vertexOutputInfo.baseType != fragmentInputInfo.baseType,
-                    "The base type (%s) of the vertex output at location %u is different from the "
-                    "base type (%s) of the fragment input at location %u.",
-                    vertexOutputInfo.baseType, i, fragmentInputInfo.baseType, i);
-
-                DAWN_INVALID_IF(
-                    vertexOutputInfo.componentCount != fragmentInputInfo.componentCount,
-                    "The component count (%u) of the vertex output at location %u is different "
-                    "from the component count (%u) of the fragment input at location %u.",
-                    vertexOutputInfo.componentCount, i, fragmentInputInfo.componentCount, i);
-
-                DAWN_INVALID_IF(
-                    vertexOutputInfo.interpolationType != fragmentInputInfo.interpolationType,
-                    "The interpolation type (%s) of the vertex output at location %u is different "
-                    "from the interpolation type (%s) of the fragment input at location %u.",
-                    vertexOutputInfo.interpolationType, i, fragmentInputInfo.interpolationType, i);
-
-                DAWN_INVALID_IF(
-                    vertexOutputInfo.interpolationSampling !=
-                        fragmentInputInfo.interpolationSampling,
-                    "The interpolation sampling (%s) of the vertex output at location %u is "
-                    "different from the interpolation sampling (%s) of the fragment input at "
-                    "location %u.",
-                    vertexOutputInfo.interpolationSampling, i,
-                    fragmentInputInfo.interpolationSampling, i);
-            }
-
-            return {};
-        }
-    }  // anonymous namespace
-
-    // Helper functions
-    size_t IndexFormatSize(wgpu::IndexFormat format) {
-        switch (format) {
-            case wgpu::IndexFormat::Uint16:
-                return sizeof(uint16_t);
-            case wgpu::IndexFormat::Uint32:
-                return sizeof(uint32_t);
-            case wgpu::IndexFormat::Undefined:
-                break;
-        }
-        UNREACHABLE();
-    }
-
-    bool IsStripPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
-        return primitiveTopology == wgpu::PrimitiveTopology::LineStrip ||
-               primitiveTopology == wgpu::PrimitiveTopology::TriangleStrip;
-    }
-
-    MaybeError ValidateRenderPipelineDescriptor(DeviceBase* device,
-                                                const RenderPipelineDescriptor* descriptor) {
-        DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
-
-        if (descriptor->layout != nullptr) {
-            DAWN_TRY(device->ValidateObject(descriptor->layout));
-        }
-
-        DAWN_TRY_CONTEXT(ValidateVertexState(device, &descriptor->vertex, descriptor->layout),
-                         "validating vertex state.");
-
-        DAWN_TRY_CONTEXT(ValidatePrimitiveState(device, &descriptor->primitive),
-                         "validating primitive state.");
-
-        if (descriptor->depthStencil) {
-            DAWN_TRY_CONTEXT(ValidateDepthStencilState(device, descriptor->depthStencil),
-                             "validating depthStencil state.");
-        }
-
-        DAWN_TRY_CONTEXT(ValidateMultisampleState(&descriptor->multisample),
-                         "validating multisample state.");
-
-        if (descriptor->fragment != nullptr) {
-            DAWN_TRY_CONTEXT(
-                ValidateFragmentState(device, descriptor->fragment, descriptor->layout),
-                "validating fragment state.");
-
-            DAWN_INVALID_IF(descriptor->fragment->targetCount == 0 && !descriptor->depthStencil,
-                            "Must have at least one color or depthStencil target.");
-
-            DAWN_TRY(
-                ValidateInterStageMatching(device, descriptor->vertex, *(descriptor->fragment)));
-        }
-
-        return {};
-    }
-
-    std::vector<StageAndDescriptor> GetRenderStagesAndSetPlaceholderShader(
-        DeviceBase* device,
-        const RenderPipelineDescriptor* descriptor) {
-        std::vector<StageAndDescriptor> stages;
-        stages.push_back({SingleShaderStage::Vertex, descriptor->vertex.module,
-                          descriptor->vertex.entryPoint, descriptor->vertex.constantCount,
-                          descriptor->vertex.constants});
-        if (descriptor->fragment != nullptr) {
-            stages.push_back({SingleShaderStage::Fragment, descriptor->fragment->module,
-                              descriptor->fragment->entryPoint, descriptor->fragment->constantCount,
-                              descriptor->fragment->constants});
-        } else if (device->IsToggleEnabled(Toggle::UsePlaceholderFragmentInVertexOnlyPipeline)) {
-            InternalPipelineStore* store = device->GetInternalPipelineStore();
-            // The placeholder fragment shader module should already be initialized
-            DAWN_ASSERT(store->placeholderFragmentShader != nullptr);
-            ShaderModuleBase* placeholderFragmentShader = store->placeholderFragmentShader.Get();
-            stages.push_back({SingleShaderStage::Fragment, placeholderFragmentShader,
-                              "fs_empty_main", 0, nullptr});
-        }
-        return stages;
-    }
-
-    bool StencilTestEnabled(const DepthStencilState* depthStencil) {
-        return depthStencil->stencilBack.compare != wgpu::CompareFunction::Always ||
-               depthStencil->stencilBack.failOp != wgpu::StencilOperation::Keep ||
-               depthStencil->stencilBack.depthFailOp != wgpu::StencilOperation::Keep ||
-               depthStencil->stencilBack.passOp != wgpu::StencilOperation::Keep ||
-               depthStencil->stencilFront.compare != wgpu::CompareFunction::Always ||
-               depthStencil->stencilFront.failOp != wgpu::StencilOperation::Keep ||
-               depthStencil->stencilFront.depthFailOp != wgpu::StencilOperation::Keep ||
-               depthStencil->stencilFront.passOp != wgpu::StencilOperation::Keep;
-    }
-
-    // RenderPipelineBase
-
-    RenderPipelineBase::RenderPipelineBase(DeviceBase* device,
-                                           const RenderPipelineDescriptor* descriptor)
-        : PipelineBase(device,
-                       descriptor->layout,
-                       descriptor->label,
-                       GetRenderStagesAndSetPlaceholderShader(device, descriptor)),
-          mAttachmentState(device->GetOrCreateAttachmentState(descriptor)) {
-        mVertexBufferCount = descriptor->vertex.bufferCount;
-        const VertexBufferLayout* buffers = descriptor->vertex.buffers;
-        for (uint8_t slot = 0; slot < mVertexBufferCount; ++slot) {
-            if (buffers[slot].attributeCount == 0) {
-                continue;
-            }
-
-            VertexBufferSlot typedSlot(slot);
-
-            mVertexBufferSlotsUsed.set(typedSlot);
-            mVertexBufferInfos[typedSlot].arrayStride = buffers[slot].arrayStride;
-            mVertexBufferInfos[typedSlot].stepMode = buffers[slot].stepMode;
-            mVertexBufferInfos[typedSlot].usedBytesInStride = 0;
-            mVertexBufferInfos[typedSlot].lastStride = 0;
-            switch (buffers[slot].stepMode) {
-                case wgpu::VertexStepMode::Vertex:
-                    mVertexBufferSlotsUsedAsVertexBuffer.set(typedSlot);
-                    break;
-                case wgpu::VertexStepMode::Instance:
-                    mVertexBufferSlotsUsedAsInstanceBuffer.set(typedSlot);
-                    break;
-                default:
-                    DAWN_UNREACHABLE();
-            }
-
-            for (uint32_t i = 0; i < buffers[slot].attributeCount; ++i) {
-                VertexAttributeLocation location = VertexAttributeLocation(
-                    static_cast<uint8_t>(buffers[slot].attributes[i].shaderLocation));
-                mAttributeLocationsUsed.set(location);
-                mAttributeInfos[location].shaderLocation = location;
-                mAttributeInfos[location].vertexBufferSlot = typedSlot;
-                mAttributeInfos[location].offset = buffers[slot].attributes[i].offset;
-                mAttributeInfos[location].format = buffers[slot].attributes[i].format;
-                // Compute the access boundary of this attribute by adding attribute format size to
-                // attribute offset. Although offset is in uint64_t, such sum must be no larger than
-                // maxVertexBufferArrayStride (2048), which is promised by the GPUVertexBufferLayout
-                // validation of creating render pipeline. Therefore, calculating in uint16_t will
-                // cause no overflow.
-                uint32_t formatByteSize =
-                    GetVertexFormatInfo(buffers[slot].attributes[i].format).byteSize;
-                DAWN_ASSERT(buffers[slot].attributes[i].offset <= 2048);
-                uint16_t accessBoundary =
-                    uint16_t(buffers[slot].attributes[i].offset) + uint16_t(formatByteSize);
-                mVertexBufferInfos[typedSlot].usedBytesInStride =
-                    std::max(mVertexBufferInfos[typedSlot].usedBytesInStride, accessBoundary);
-                mVertexBufferInfos[typedSlot].lastStride =
-                    std::max(mVertexBufferInfos[typedSlot].lastStride,
-                             mAttributeInfos[location].offset + formatByteSize);
-            }
-        }
-
-        mPrimitive = descriptor->primitive;
-        const PrimitiveDepthClampingState* clampInfo = nullptr;
-        FindInChain(mPrimitive.nextInChain, &clampInfo);
-        if (clampInfo) {
-            mClampDepth = clampInfo->clampDepth;
-        }
-        mMultisample = descriptor->multisample;
-
-        if (mAttachmentState->HasDepthStencilAttachment()) {
-            mDepthStencil = *descriptor->depthStencil;
-            mWritesDepth = mDepthStencil.depthWriteEnabled;
-            if (mDepthStencil.stencilWriteMask) {
-                if ((mPrimitive.cullMode != wgpu::CullMode::Front &&
-                     (mDepthStencil.stencilFront.failOp != wgpu::StencilOperation::Keep ||
-                      mDepthStencil.stencilFront.depthFailOp != wgpu::StencilOperation::Keep ||
-                      mDepthStencil.stencilFront.passOp != wgpu::StencilOperation::Keep)) ||
-                    (mPrimitive.cullMode != wgpu::CullMode::Back &&
-                     (mDepthStencil.stencilBack.failOp != wgpu::StencilOperation::Keep ||
-                      mDepthStencil.stencilBack.depthFailOp != wgpu::StencilOperation::Keep ||
-                      mDepthStencil.stencilBack.passOp != wgpu::StencilOperation::Keep))) {
-                    mWritesStencil = true;
-                }
-            }
+                "validating targets[%u].", static_cast<uint8_t>(i));
         } else {
-            // These default values below are useful for backends to fill information.
-            // The values indicate that depth and stencil test are disabled when backends
-            // set their own depth stencil states/descriptors according to the values in
-            // mDepthStencil.
-            mDepthStencil.format = wgpu::TextureFormat::Undefined;
-            mDepthStencil.depthWriteEnabled = false;
-            mDepthStencil.depthCompare = wgpu::CompareFunction::Always;
-            mDepthStencil.stencilBack.compare = wgpu::CompareFunction::Always;
-            mDepthStencil.stencilBack.failOp = wgpu::StencilOperation::Keep;
-            mDepthStencil.stencilBack.depthFailOp = wgpu::StencilOperation::Keep;
-            mDepthStencil.stencilBack.passOp = wgpu::StencilOperation::Keep;
-            mDepthStencil.stencilFront.compare = wgpu::CompareFunction::Always;
-            mDepthStencil.stencilFront.failOp = wgpu::StencilOperation::Keep;
-            mDepthStencil.stencilFront.depthFailOp = wgpu::StencilOperation::Keep;
-            mDepthStencil.stencilFront.passOp = wgpu::StencilOperation::Keep;
-            mDepthStencil.stencilReadMask = 0xff;
-            mDepthStencil.stencilWriteMask = 0xff;
-            mDepthStencil.depthBias = 0;
-            mDepthStencil.depthBiasSlopeScale = 0.0f;
-            mDepthStencil.depthBiasClamp = 0.0f;
+            DAWN_INVALID_IF(target->blend,
+                            "Color target[%u] blend state is set when the format is undefined.",
+                            static_cast<uint8_t>(i));
+            DAWN_INVALID_IF(
+                target->writeMask != wgpu::ColorWriteMask::None,
+                "Color target[%u] write mask is set to (%s) when the format is undefined.",
+                static_cast<uint8_t>(i), target->writeMask);
+        }
+    }
+
+    return {};
+}
+
+MaybeError ValidateInterStageMatching(DeviceBase* device,
+                                      const VertexState& vertexState,
+                                      const FragmentState& fragmentState) {
+    const EntryPointMetadata& vertexMetadata =
+        vertexState.module->GetEntryPoint(vertexState.entryPoint);
+    const EntryPointMetadata& fragmentMetadata =
+        fragmentState.module->GetEntryPoint(fragmentState.entryPoint);
+
+    // TODO(dawn:563): Can this message give more details?
+    DAWN_INVALID_IF(
+        vertexMetadata.usedInterStageVariables != fragmentMetadata.usedInterStageVariables,
+        "One or more fragment inputs and vertex outputs are not one-to-one matching");
+
+    // TODO(dawn:802): Validate interpolation types and interpolition sampling types
+    for (size_t i : IterateBitSet(vertexMetadata.usedInterStageVariables)) {
+        const auto& vertexOutputInfo = vertexMetadata.interStageVariables[i];
+        const auto& fragmentInputInfo = fragmentMetadata.interStageVariables[i];
+        DAWN_INVALID_IF(
+            vertexOutputInfo.baseType != fragmentInputInfo.baseType,
+            "The base type (%s) of the vertex output at location %u is different from the "
+            "base type (%s) of the fragment input at location %u.",
+            vertexOutputInfo.baseType, i, fragmentInputInfo.baseType, i);
+
+        DAWN_INVALID_IF(vertexOutputInfo.componentCount != fragmentInputInfo.componentCount,
+                        "The component count (%u) of the vertex output at location %u is different "
+                        "from the component count (%u) of the fragment input at location %u.",
+                        vertexOutputInfo.componentCount, i, fragmentInputInfo.componentCount, i);
+
+        DAWN_INVALID_IF(
+            vertexOutputInfo.interpolationType != fragmentInputInfo.interpolationType,
+            "The interpolation type (%s) of the vertex output at location %u is different "
+            "from the interpolation type (%s) of the fragment input at location %u.",
+            vertexOutputInfo.interpolationType, i, fragmentInputInfo.interpolationType, i);
+
+        DAWN_INVALID_IF(
+            vertexOutputInfo.interpolationSampling != fragmentInputInfo.interpolationSampling,
+            "The interpolation sampling (%s) of the vertex output at location %u is "
+            "different from the interpolation sampling (%s) of the fragment input at "
+            "location %u.",
+            vertexOutputInfo.interpolationSampling, i, fragmentInputInfo.interpolationSampling, i);
+    }
+
+    return {};
+}
+}  // anonymous namespace
+
+// Helper functions
+size_t IndexFormatSize(wgpu::IndexFormat format) {
+    switch (format) {
+        case wgpu::IndexFormat::Uint16:
+            return sizeof(uint16_t);
+        case wgpu::IndexFormat::Uint32:
+            return sizeof(uint32_t);
+        case wgpu::IndexFormat::Undefined:
+            break;
+    }
+    UNREACHABLE();
+}
+
+bool IsStripPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
+    return primitiveTopology == wgpu::PrimitiveTopology::LineStrip ||
+           primitiveTopology == wgpu::PrimitiveTopology::TriangleStrip;
+}
+
+MaybeError ValidateRenderPipelineDescriptor(DeviceBase* device,
+                                            const RenderPipelineDescriptor* descriptor) {
+    DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+
+    if (descriptor->layout != nullptr) {
+        DAWN_TRY(device->ValidateObject(descriptor->layout));
+    }
+
+    DAWN_TRY_CONTEXT(ValidateVertexState(device, &descriptor->vertex, descriptor->layout),
+                     "validating vertex state.");
+
+    DAWN_TRY_CONTEXT(ValidatePrimitiveState(device, &descriptor->primitive),
+                     "validating primitive state.");
+
+    if (descriptor->depthStencil) {
+        DAWN_TRY_CONTEXT(ValidateDepthStencilState(device, descriptor->depthStencil),
+                         "validating depthStencil state.");
+    }
+
+    DAWN_TRY_CONTEXT(ValidateMultisampleState(&descriptor->multisample),
+                     "validating multisample state.");
+
+    if (descriptor->fragment != nullptr) {
+        DAWN_TRY_CONTEXT(ValidateFragmentState(device, descriptor->fragment, descriptor->layout),
+                         "validating fragment state.");
+
+        DAWN_INVALID_IF(descriptor->fragment->targetCount == 0 && !descriptor->depthStencil,
+                        "Must have at least one color or depthStencil target.");
+
+        DAWN_TRY(ValidateInterStageMatching(device, descriptor->vertex, *(descriptor->fragment)));
+    }
+
+    return {};
+}
+
+std::vector<StageAndDescriptor> GetRenderStagesAndSetPlaceholderShader(
+    DeviceBase* device,
+    const RenderPipelineDescriptor* descriptor) {
+    std::vector<StageAndDescriptor> stages;
+    stages.push_back({SingleShaderStage::Vertex, descriptor->vertex.module,
+                      descriptor->vertex.entryPoint, descriptor->vertex.constantCount,
+                      descriptor->vertex.constants});
+    if (descriptor->fragment != nullptr) {
+        stages.push_back({SingleShaderStage::Fragment, descriptor->fragment->module,
+                          descriptor->fragment->entryPoint, descriptor->fragment->constantCount,
+                          descriptor->fragment->constants});
+    } else if (device->IsToggleEnabled(Toggle::UsePlaceholderFragmentInVertexOnlyPipeline)) {
+        InternalPipelineStore* store = device->GetInternalPipelineStore();
+        // The placeholder fragment shader module should already be initialized
+        DAWN_ASSERT(store->placeholderFragmentShader != nullptr);
+        ShaderModuleBase* placeholderFragmentShader = store->placeholderFragmentShader.Get();
+        stages.push_back(
+            {SingleShaderStage::Fragment, placeholderFragmentShader, "fs_empty_main", 0, nullptr});
+    }
+    return stages;
+}
+
+bool StencilTestEnabled(const DepthStencilState* depthStencil) {
+    return depthStencil->stencilBack.compare != wgpu::CompareFunction::Always ||
+           depthStencil->stencilBack.failOp != wgpu::StencilOperation::Keep ||
+           depthStencil->stencilBack.depthFailOp != wgpu::StencilOperation::Keep ||
+           depthStencil->stencilBack.passOp != wgpu::StencilOperation::Keep ||
+           depthStencil->stencilFront.compare != wgpu::CompareFunction::Always ||
+           depthStencil->stencilFront.failOp != wgpu::StencilOperation::Keep ||
+           depthStencil->stencilFront.depthFailOp != wgpu::StencilOperation::Keep ||
+           depthStencil->stencilFront.passOp != wgpu::StencilOperation::Keep;
+}
+
+// RenderPipelineBase
+
+RenderPipelineBase::RenderPipelineBase(DeviceBase* device,
+                                       const RenderPipelineDescriptor* descriptor)
+    : PipelineBase(device,
+                   descriptor->layout,
+                   descriptor->label,
+                   GetRenderStagesAndSetPlaceholderShader(device, descriptor)),
+      mAttachmentState(device->GetOrCreateAttachmentState(descriptor)) {
+    mVertexBufferCount = descriptor->vertex.bufferCount;
+    const VertexBufferLayout* buffers = descriptor->vertex.buffers;
+    for (uint8_t slot = 0; slot < mVertexBufferCount; ++slot) {
+        if (buffers[slot].attributeCount == 0) {
+            continue;
         }
 
-        for (ColorAttachmentIndex i : IterateBitSet(mAttachmentState->GetColorAttachmentsMask())) {
-            // Vertex-only render pipeline have no color attachment. For a render pipeline with
-            // color attachments, there must be a valid FragmentState.
-            ASSERT(descriptor->fragment != nullptr);
-            const ColorTargetState* target =
-                &descriptor->fragment->targets[static_cast<uint8_t>(i)];
-            mTargets[i] = *target;
+        VertexBufferSlot typedSlot(slot);
 
-            if (target->blend != nullptr) {
-                mTargetBlend[i] = *target->blend;
-                mTargets[i].blend = &mTargetBlend[i];
+        mVertexBufferSlotsUsed.set(typedSlot);
+        mVertexBufferInfos[typedSlot].arrayStride = buffers[slot].arrayStride;
+        mVertexBufferInfos[typedSlot].stepMode = buffers[slot].stepMode;
+        mVertexBufferInfos[typedSlot].usedBytesInStride = 0;
+        mVertexBufferInfos[typedSlot].lastStride = 0;
+        switch (buffers[slot].stepMode) {
+            case wgpu::VertexStepMode::Vertex:
+                mVertexBufferSlotsUsedAsVertexBuffer.set(typedSlot);
+                break;
+            case wgpu::VertexStepMode::Instance:
+                mVertexBufferSlotsUsedAsInstanceBuffer.set(typedSlot);
+                break;
+            default:
+                DAWN_UNREACHABLE();
+        }
+
+        for (uint32_t i = 0; i < buffers[slot].attributeCount; ++i) {
+            VertexAttributeLocation location = VertexAttributeLocation(
+                static_cast<uint8_t>(buffers[slot].attributes[i].shaderLocation));
+            mAttributeLocationsUsed.set(location);
+            mAttributeInfos[location].shaderLocation = location;
+            mAttributeInfos[location].vertexBufferSlot = typedSlot;
+            mAttributeInfos[location].offset = buffers[slot].attributes[i].offset;
+            mAttributeInfos[location].format = buffers[slot].attributes[i].format;
+            // Compute the access boundary of this attribute by adding attribute format size to
+            // attribute offset. Although offset is in uint64_t, such sum must be no larger than
+            // maxVertexBufferArrayStride (2048), which is promised by the GPUVertexBufferLayout
+            // validation of creating render pipeline. Therefore, calculating in uint16_t will
+            // cause no overflow.
+            uint32_t formatByteSize =
+                GetVertexFormatInfo(buffers[slot].attributes[i].format).byteSize;
+            DAWN_ASSERT(buffers[slot].attributes[i].offset <= 2048);
+            uint16_t accessBoundary =
+                uint16_t(buffers[slot].attributes[i].offset) + uint16_t(formatByteSize);
+            mVertexBufferInfos[typedSlot].usedBytesInStride =
+                std::max(mVertexBufferInfos[typedSlot].usedBytesInStride, accessBoundary);
+            mVertexBufferInfos[typedSlot].lastStride =
+                std::max(mVertexBufferInfos[typedSlot].lastStride,
+                         mAttributeInfos[location].offset + formatByteSize);
+        }
+    }
+
+    mPrimitive = descriptor->primitive;
+    const PrimitiveDepthClampingState* clampInfo = nullptr;
+    FindInChain(mPrimitive.nextInChain, &clampInfo);
+    if (clampInfo) {
+        mClampDepth = clampInfo->clampDepth;
+    }
+    mMultisample = descriptor->multisample;
+
+    if (mAttachmentState->HasDepthStencilAttachment()) {
+        mDepthStencil = *descriptor->depthStencil;
+        mWritesDepth = mDepthStencil.depthWriteEnabled;
+        if (mDepthStencil.stencilWriteMask) {
+            if ((mPrimitive.cullMode != wgpu::CullMode::Front &&
+                 (mDepthStencil.stencilFront.failOp != wgpu::StencilOperation::Keep ||
+                  mDepthStencil.stencilFront.depthFailOp != wgpu::StencilOperation::Keep ||
+                  mDepthStencil.stencilFront.passOp != wgpu::StencilOperation::Keep)) ||
+                (mPrimitive.cullMode != wgpu::CullMode::Back &&
+                 (mDepthStencil.stencilBack.failOp != wgpu::StencilOperation::Keep ||
+                  mDepthStencil.stencilBack.depthFailOp != wgpu::StencilOperation::Keep ||
+                  mDepthStencil.stencilBack.passOp != wgpu::StencilOperation::Keep))) {
+                mWritesStencil = true;
             }
         }
-
-        SetContentHash(ComputeContentHash());
-        TrackInDevice();
-
-        // Initialize the cache key to include the cache type and device information.
-        GetCacheKey()->Record(CacheKey::Type::RenderPipeline, device->GetCacheKey());
+    } else {
+        // These default values below are useful for backends to fill information.
+        // The values indicate that depth and stencil test are disabled when backends
+        // set their own depth stencil states/descriptors according to the values in
+        // mDepthStencil.
+        mDepthStencil.format = wgpu::TextureFormat::Undefined;
+        mDepthStencil.depthWriteEnabled = false;
+        mDepthStencil.depthCompare = wgpu::CompareFunction::Always;
+        mDepthStencil.stencilBack.compare = wgpu::CompareFunction::Always;
+        mDepthStencil.stencilBack.failOp = wgpu::StencilOperation::Keep;
+        mDepthStencil.stencilBack.depthFailOp = wgpu::StencilOperation::Keep;
+        mDepthStencil.stencilBack.passOp = wgpu::StencilOperation::Keep;
+        mDepthStencil.stencilFront.compare = wgpu::CompareFunction::Always;
+        mDepthStencil.stencilFront.failOp = wgpu::StencilOperation::Keep;
+        mDepthStencil.stencilFront.depthFailOp = wgpu::StencilOperation::Keep;
+        mDepthStencil.stencilFront.passOp = wgpu::StencilOperation::Keep;
+        mDepthStencil.stencilReadMask = 0xff;
+        mDepthStencil.stencilWriteMask = 0xff;
+        mDepthStencil.depthBias = 0;
+        mDepthStencil.depthBiasSlopeScale = 0.0f;
+        mDepthStencil.depthBiasClamp = 0.0f;
     }
 
-    RenderPipelineBase::RenderPipelineBase(DeviceBase* device) : PipelineBase(device) {
-        TrackInDevice();
-    }
+    for (ColorAttachmentIndex i : IterateBitSet(mAttachmentState->GetColorAttachmentsMask())) {
+        // Vertex-only render pipeline have no color attachment. For a render pipeline with
+        // color attachments, there must be a valid FragmentState.
+        ASSERT(descriptor->fragment != nullptr);
+        const ColorTargetState* target = &descriptor->fragment->targets[static_cast<uint8_t>(i)];
+        mTargets[i] = *target;
 
-    RenderPipelineBase::RenderPipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
-        : PipelineBase(device, tag) {
-    }
-
-    RenderPipelineBase::~RenderPipelineBase() = default;
-
-    void RenderPipelineBase::DestroyImpl() {
-        if (IsCachedReference()) {
-            // Do not uncache the actual cached object if we are a blueprint.
-            GetDevice()->UncacheRenderPipeline(this);
+        if (target->blend != nullptr) {
+            mTargetBlend[i] = *target->blend;
+            mTargets[i].blend = &mTargetBlend[i];
         }
-
-        // Remove reference to the attachment state so that we don't have lingering references to
-        // it preventing it from being uncached in the device.
-        mAttachmentState = nullptr;
     }
 
-    // static
-    RenderPipelineBase* RenderPipelineBase::MakeError(DeviceBase* device) {
-        class ErrorRenderPipeline final : public RenderPipelineBase {
-          public:
-            explicit ErrorRenderPipeline(DeviceBase* device)
-                : RenderPipelineBase(device, ObjectBase::kError) {
+    SetContentHash(ComputeContentHash());
+    TrackInDevice();
+
+    // Initialize the cache key to include the cache type and device information.
+    GetCacheKey()->Record(CacheKey::Type::RenderPipeline, device->GetCacheKey());
+}
+
+RenderPipelineBase::RenderPipelineBase(DeviceBase* device) : PipelineBase(device) {
+    TrackInDevice();
+}
+
+RenderPipelineBase::RenderPipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+    : PipelineBase(device, tag) {}
+
+RenderPipelineBase::~RenderPipelineBase() = default;
+
+void RenderPipelineBase::DestroyImpl() {
+    if (IsCachedReference()) {
+        // Do not uncache the actual cached object if we are a blueprint.
+        GetDevice()->UncacheRenderPipeline(this);
+    }
+
+    // Remove reference to the attachment state so that we don't have lingering references to
+    // it preventing it from being uncached in the device.
+    mAttachmentState = nullptr;
+}
+
+// static
+RenderPipelineBase* RenderPipelineBase::MakeError(DeviceBase* device) {
+    class ErrorRenderPipeline final : public RenderPipelineBase {
+      public:
+        explicit ErrorRenderPipeline(DeviceBase* device)
+            : RenderPipelineBase(device, ObjectBase::kError) {}
+
+        MaybeError Initialize() override {
+            UNREACHABLE();
+            return {};
+        }
+    };
+
+    return new ErrorRenderPipeline(device);
+}
+
+ObjectType RenderPipelineBase::GetType() const {
+    return ObjectType::RenderPipeline;
+}
+
+const ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>&
+RenderPipelineBase::GetAttributeLocationsUsed() const {
+    ASSERT(!IsError());
+    return mAttributeLocationsUsed;
+}
+
+const VertexAttributeInfo& RenderPipelineBase::GetAttribute(
+    VertexAttributeLocation location) const {
+    ASSERT(!IsError());
+    ASSERT(mAttributeLocationsUsed[location]);
+    return mAttributeInfos[location];
+}
+
+const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
+RenderPipelineBase::GetVertexBufferSlotsUsed() const {
+    ASSERT(!IsError());
+    return mVertexBufferSlotsUsed;
+}
+
+const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
+RenderPipelineBase::GetVertexBufferSlotsUsedAsVertexBuffer() const {
+    ASSERT(!IsError());
+    return mVertexBufferSlotsUsedAsVertexBuffer;
+}
+
+const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
+RenderPipelineBase::GetVertexBufferSlotsUsedAsInstanceBuffer() const {
+    ASSERT(!IsError());
+    return mVertexBufferSlotsUsedAsInstanceBuffer;
+}
+
+const VertexBufferInfo& RenderPipelineBase::GetVertexBuffer(VertexBufferSlot slot) const {
+    ASSERT(!IsError());
+    ASSERT(mVertexBufferSlotsUsed[slot]);
+    return mVertexBufferInfos[slot];
+}
+
+uint32_t RenderPipelineBase::GetVertexBufferCount() const {
+    ASSERT(!IsError());
+    return mVertexBufferCount;
+}
+
+const ColorTargetState* RenderPipelineBase::GetColorTargetState(
+    ColorAttachmentIndex attachmentSlot) const {
+    ASSERT(!IsError());
+    ASSERT(attachmentSlot < mTargets.size());
+    return &mTargets[attachmentSlot];
+}
+
+const DepthStencilState* RenderPipelineBase::GetDepthStencilState() const {
+    ASSERT(!IsError());
+    return &mDepthStencil;
+}
+
+wgpu::PrimitiveTopology RenderPipelineBase::GetPrimitiveTopology() const {
+    ASSERT(!IsError());
+    return mPrimitive.topology;
+}
+
+wgpu::IndexFormat RenderPipelineBase::GetStripIndexFormat() const {
+    ASSERT(!IsError());
+    return mPrimitive.stripIndexFormat;
+}
+
+wgpu::CullMode RenderPipelineBase::GetCullMode() const {
+    ASSERT(!IsError());
+    return mPrimitive.cullMode;
+}
+
+wgpu::FrontFace RenderPipelineBase::GetFrontFace() const {
+    ASSERT(!IsError());
+    return mPrimitive.frontFace;
+}
+
+bool RenderPipelineBase::IsDepthBiasEnabled() const {
+    ASSERT(!IsError());
+    return mDepthStencil.depthBias != 0 || mDepthStencil.depthBiasSlopeScale != 0;
+}
+
+int32_t RenderPipelineBase::GetDepthBias() const {
+    ASSERT(!IsError());
+    return mDepthStencil.depthBias;
+}
+
+float RenderPipelineBase::GetDepthBiasSlopeScale() const {
+    ASSERT(!IsError());
+    return mDepthStencil.depthBiasSlopeScale;
+}
+
+float RenderPipelineBase::GetDepthBiasClamp() const {
+    ASSERT(!IsError());
+    return mDepthStencil.depthBiasClamp;
+}
+
+bool RenderPipelineBase::ShouldClampDepth() const {
+    ASSERT(!IsError());
+    return mClampDepth;
+}
+
+ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments>
+RenderPipelineBase::GetColorAttachmentsMask() const {
+    ASSERT(!IsError());
+    return mAttachmentState->GetColorAttachmentsMask();
+}
+
+bool RenderPipelineBase::HasDepthStencilAttachment() const {
+    ASSERT(!IsError());
+    return mAttachmentState->HasDepthStencilAttachment();
+}
+
+wgpu::TextureFormat RenderPipelineBase::GetColorAttachmentFormat(
+    ColorAttachmentIndex attachment) const {
+    ASSERT(!IsError());
+    return mTargets[attachment].format;
+}
+
+wgpu::TextureFormat RenderPipelineBase::GetDepthStencilFormat() const {
+    ASSERT(!IsError());
+    ASSERT(mAttachmentState->HasDepthStencilAttachment());
+    return mDepthStencil.format;
+}
+
+uint32_t RenderPipelineBase::GetSampleCount() const {
+    ASSERT(!IsError());
+    return mAttachmentState->GetSampleCount();
+}
+
+uint32_t RenderPipelineBase::GetSampleMask() const {
+    ASSERT(!IsError());
+    return mMultisample.mask;
+}
+
+bool RenderPipelineBase::IsAlphaToCoverageEnabled() const {
+    ASSERT(!IsError());
+    return mMultisample.alphaToCoverageEnabled;
+}
+
+const AttachmentState* RenderPipelineBase::GetAttachmentState() const {
+    ASSERT(!IsError());
+
+    return mAttachmentState.Get();
+}
+
+bool RenderPipelineBase::WritesDepth() const {
+    ASSERT(!IsError());
+
+    return mWritesDepth;
+}
+
+bool RenderPipelineBase::WritesStencil() const {
+    ASSERT(!IsError());
+
+    return mWritesStencil;
+}
+
+size_t RenderPipelineBase::ComputeContentHash() {
+    ObjectContentHasher recorder;
+
+    // Record modules and layout
+    recorder.Record(PipelineBase::ComputeContentHash());
+
+    // Hierarchically record the attachment state.
+    // It contains the attachments set, texture formats, and sample count.
+    recorder.Record(mAttachmentState->GetContentHash());
+
+    // Record attachments
+    for (ColorAttachmentIndex i : IterateBitSet(mAttachmentState->GetColorAttachmentsMask())) {
+        const ColorTargetState& desc = *GetColorTargetState(i);
+        recorder.Record(desc.writeMask);
+        if (desc.blend != nullptr) {
+            recorder.Record(desc.blend->color.operation, desc.blend->color.srcFactor,
+                            desc.blend->color.dstFactor);
+            recorder.Record(desc.blend->alpha.operation, desc.blend->alpha.srcFactor,
+                            desc.blend->alpha.dstFactor);
+        }
+    }
+
+    if (mAttachmentState->HasDepthStencilAttachment()) {
+        const DepthStencilState& desc = mDepthStencil;
+        recorder.Record(desc.depthWriteEnabled, desc.depthCompare);
+        recorder.Record(desc.stencilReadMask, desc.stencilWriteMask);
+        recorder.Record(desc.stencilFront.compare, desc.stencilFront.failOp,
+                        desc.stencilFront.depthFailOp, desc.stencilFront.passOp);
+        recorder.Record(desc.stencilBack.compare, desc.stencilBack.failOp,
+                        desc.stencilBack.depthFailOp, desc.stencilBack.passOp);
+        recorder.Record(desc.depthBias, desc.depthBiasSlopeScale, desc.depthBiasClamp);
+    }
+
+    // Record vertex state
+    recorder.Record(mAttributeLocationsUsed);
+    for (VertexAttributeLocation location : IterateBitSet(mAttributeLocationsUsed)) {
+        const VertexAttributeInfo& desc = GetAttribute(location);
+        recorder.Record(desc.shaderLocation, desc.vertexBufferSlot, desc.offset, desc.format);
+    }
+
+    recorder.Record(mVertexBufferSlotsUsed);
+    for (VertexBufferSlot slot : IterateBitSet(mVertexBufferSlotsUsed)) {
+        const VertexBufferInfo& desc = GetVertexBuffer(slot);
+        recorder.Record(desc.arrayStride, desc.stepMode);
+    }
+
+    // Record primitive state
+    recorder.Record(mPrimitive.topology, mPrimitive.stripIndexFormat, mPrimitive.frontFace,
+                    mPrimitive.cullMode, mClampDepth);
+
+    // Record multisample state
+    // Sample count hashed as part of the attachment state
+    recorder.Record(mMultisample.mask, mMultisample.alphaToCoverageEnabled);
+
+    return recorder.GetContentHash();
+}
+
+bool RenderPipelineBase::EqualityFunc::operator()(const RenderPipelineBase* a,
+                                                  const RenderPipelineBase* b) const {
+    // Check the layout and shader stages.
+    if (!PipelineBase::EqualForCache(a, b)) {
+        return false;
+    }
+
+    // Check the attachment state.
+    // It contains the attachments set, texture formats, and sample count.
+    if (a->mAttachmentState.Get() != b->mAttachmentState.Get()) {
+        return false;
+    }
+
+    if (a->mAttachmentState.Get() != nullptr) {
+        for (ColorAttachmentIndex i :
+             IterateBitSet(a->mAttachmentState->GetColorAttachmentsMask())) {
+            const ColorTargetState& descA = *a->GetColorTargetState(i);
+            const ColorTargetState& descB = *b->GetColorTargetState(i);
+            if (descA.writeMask != descB.writeMask) {
+                return false;
             }
-
-            MaybeError Initialize() override {
-                UNREACHABLE();
-                return {};
+            if ((descA.blend == nullptr) != (descB.blend == nullptr)) {
+                return false;
             }
-        };
-
-        return new ErrorRenderPipeline(device);
-    }
-
-    ObjectType RenderPipelineBase::GetType() const {
-        return ObjectType::RenderPipeline;
-    }
-
-    const ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>&
-    RenderPipelineBase::GetAttributeLocationsUsed() const {
-        ASSERT(!IsError());
-        return mAttributeLocationsUsed;
-    }
-
-    const VertexAttributeInfo& RenderPipelineBase::GetAttribute(
-        VertexAttributeLocation location) const {
-        ASSERT(!IsError());
-        ASSERT(mAttributeLocationsUsed[location]);
-        return mAttributeInfos[location];
-    }
-
-    const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
-    RenderPipelineBase::GetVertexBufferSlotsUsed() const {
-        ASSERT(!IsError());
-        return mVertexBufferSlotsUsed;
-    }
-
-    const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
-    RenderPipelineBase::GetVertexBufferSlotsUsedAsVertexBuffer() const {
-        ASSERT(!IsError());
-        return mVertexBufferSlotsUsedAsVertexBuffer;
-    }
-
-    const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
-    RenderPipelineBase::GetVertexBufferSlotsUsedAsInstanceBuffer() const {
-        ASSERT(!IsError());
-        return mVertexBufferSlotsUsedAsInstanceBuffer;
-    }
-
-    const VertexBufferInfo& RenderPipelineBase::GetVertexBuffer(VertexBufferSlot slot) const {
-        ASSERT(!IsError());
-        ASSERT(mVertexBufferSlotsUsed[slot]);
-        return mVertexBufferInfos[slot];
-    }
-
-    uint32_t RenderPipelineBase::GetVertexBufferCount() const {
-        ASSERT(!IsError());
-        return mVertexBufferCount;
-    }
-
-    const ColorTargetState* RenderPipelineBase::GetColorTargetState(
-        ColorAttachmentIndex attachmentSlot) const {
-        ASSERT(!IsError());
-        ASSERT(attachmentSlot < mTargets.size());
-        return &mTargets[attachmentSlot];
-    }
-
-    const DepthStencilState* RenderPipelineBase::GetDepthStencilState() const {
-        ASSERT(!IsError());
-        return &mDepthStencil;
-    }
-
-    wgpu::PrimitiveTopology RenderPipelineBase::GetPrimitiveTopology() const {
-        ASSERT(!IsError());
-        return mPrimitive.topology;
-    }
-
-    wgpu::IndexFormat RenderPipelineBase::GetStripIndexFormat() const {
-        ASSERT(!IsError());
-        return mPrimitive.stripIndexFormat;
-    }
-
-    wgpu::CullMode RenderPipelineBase::GetCullMode() const {
-        ASSERT(!IsError());
-        return mPrimitive.cullMode;
-    }
-
-    wgpu::FrontFace RenderPipelineBase::GetFrontFace() const {
-        ASSERT(!IsError());
-        return mPrimitive.frontFace;
-    }
-
-    bool RenderPipelineBase::IsDepthBiasEnabled() const {
-        ASSERT(!IsError());
-        return mDepthStencil.depthBias != 0 || mDepthStencil.depthBiasSlopeScale != 0;
-    }
-
-    int32_t RenderPipelineBase::GetDepthBias() const {
-        ASSERT(!IsError());
-        return mDepthStencil.depthBias;
-    }
-
-    float RenderPipelineBase::GetDepthBiasSlopeScale() const {
-        ASSERT(!IsError());
-        return mDepthStencil.depthBiasSlopeScale;
-    }
-
-    float RenderPipelineBase::GetDepthBiasClamp() const {
-        ASSERT(!IsError());
-        return mDepthStencil.depthBiasClamp;
-    }
-
-    bool RenderPipelineBase::ShouldClampDepth() const {
-        ASSERT(!IsError());
-        return mClampDepth;
-    }
-
-    ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments>
-    RenderPipelineBase::GetColorAttachmentsMask() const {
-        ASSERT(!IsError());
-        return mAttachmentState->GetColorAttachmentsMask();
-    }
-
-    bool RenderPipelineBase::HasDepthStencilAttachment() const {
-        ASSERT(!IsError());
-        return mAttachmentState->HasDepthStencilAttachment();
-    }
-
-    wgpu::TextureFormat RenderPipelineBase::GetColorAttachmentFormat(
-        ColorAttachmentIndex attachment) const {
-        ASSERT(!IsError());
-        return mTargets[attachment].format;
-    }
-
-    wgpu::TextureFormat RenderPipelineBase::GetDepthStencilFormat() const {
-        ASSERT(!IsError());
-        ASSERT(mAttachmentState->HasDepthStencilAttachment());
-        return mDepthStencil.format;
-    }
-
-    uint32_t RenderPipelineBase::GetSampleCount() const {
-        ASSERT(!IsError());
-        return mAttachmentState->GetSampleCount();
-    }
-
-    uint32_t RenderPipelineBase::GetSampleMask() const {
-        ASSERT(!IsError());
-        return mMultisample.mask;
-    }
-
-    bool RenderPipelineBase::IsAlphaToCoverageEnabled() const {
-        ASSERT(!IsError());
-        return mMultisample.alphaToCoverageEnabled;
-    }
-
-    const AttachmentState* RenderPipelineBase::GetAttachmentState() const {
-        ASSERT(!IsError());
-
-        return mAttachmentState.Get();
-    }
-
-    bool RenderPipelineBase::WritesDepth() const {
-        ASSERT(!IsError());
-
-        return mWritesDepth;
-    }
-
-    bool RenderPipelineBase::WritesStencil() const {
-        ASSERT(!IsError());
-
-        return mWritesStencil;
-    }
-
-    size_t RenderPipelineBase::ComputeContentHash() {
-        ObjectContentHasher recorder;
-
-        // Record modules and layout
-        recorder.Record(PipelineBase::ComputeContentHash());
-
-        // Hierarchically record the attachment state.
-        // It contains the attachments set, texture formats, and sample count.
-        recorder.Record(mAttachmentState->GetContentHash());
-
-        // Record attachments
-        for (ColorAttachmentIndex i : IterateBitSet(mAttachmentState->GetColorAttachmentsMask())) {
-            const ColorTargetState& desc = *GetColorTargetState(i);
-            recorder.Record(desc.writeMask);
-            if (desc.blend != nullptr) {
-                recorder.Record(desc.blend->color.operation, desc.blend->color.srcFactor,
-                                desc.blend->color.dstFactor);
-                recorder.Record(desc.blend->alpha.operation, desc.blend->alpha.srcFactor,
-                                desc.blend->alpha.dstFactor);
-            }
-        }
-
-        if (mAttachmentState->HasDepthStencilAttachment()) {
-            const DepthStencilState& desc = mDepthStencil;
-            recorder.Record(desc.depthWriteEnabled, desc.depthCompare);
-            recorder.Record(desc.stencilReadMask, desc.stencilWriteMask);
-            recorder.Record(desc.stencilFront.compare, desc.stencilFront.failOp,
-                            desc.stencilFront.depthFailOp, desc.stencilFront.passOp);
-            recorder.Record(desc.stencilBack.compare, desc.stencilBack.failOp,
-                            desc.stencilBack.depthFailOp, desc.stencilBack.passOp);
-            recorder.Record(desc.depthBias, desc.depthBiasSlopeScale, desc.depthBiasClamp);
-        }
-
-        // Record vertex state
-        recorder.Record(mAttributeLocationsUsed);
-        for (VertexAttributeLocation location : IterateBitSet(mAttributeLocationsUsed)) {
-            const VertexAttributeInfo& desc = GetAttribute(location);
-            recorder.Record(desc.shaderLocation, desc.vertexBufferSlot, desc.offset, desc.format);
-        }
-
-        recorder.Record(mVertexBufferSlotsUsed);
-        for (VertexBufferSlot slot : IterateBitSet(mVertexBufferSlotsUsed)) {
-            const VertexBufferInfo& desc = GetVertexBuffer(slot);
-            recorder.Record(desc.arrayStride, desc.stepMode);
-        }
-
-        // Record primitive state
-        recorder.Record(mPrimitive.topology, mPrimitive.stripIndexFormat, mPrimitive.frontFace,
-                        mPrimitive.cullMode, mClampDepth);
-
-        // Record multisample state
-        // Sample count hashed as part of the attachment state
-        recorder.Record(mMultisample.mask, mMultisample.alphaToCoverageEnabled);
-
-        return recorder.GetContentHash();
-    }
-
-    bool RenderPipelineBase::EqualityFunc::operator()(const RenderPipelineBase* a,
-                                                      const RenderPipelineBase* b) const {
-        // Check the layout and shader stages.
-        if (!PipelineBase::EqualForCache(a, b)) {
-            return false;
-        }
-
-        // Check the attachment state.
-        // It contains the attachments set, texture formats, and sample count.
-        if (a->mAttachmentState.Get() != b->mAttachmentState.Get()) {
-            return false;
-        }
-
-        if (a->mAttachmentState.Get() != nullptr) {
-            for (ColorAttachmentIndex i :
-                 IterateBitSet(a->mAttachmentState->GetColorAttachmentsMask())) {
-                const ColorTargetState& descA = *a->GetColorTargetState(i);
-                const ColorTargetState& descB = *b->GetColorTargetState(i);
-                if (descA.writeMask != descB.writeMask) {
+            if (descA.blend != nullptr) {
+                if (descA.blend->color.operation != descB.blend->color.operation ||
+                    descA.blend->color.srcFactor != descB.blend->color.srcFactor ||
+                    descA.blend->color.dstFactor != descB.blend->color.dstFactor) {
                     return false;
                 }
-                if ((descA.blend == nullptr) != (descB.blend == nullptr)) {
-                    return false;
-                }
-                if (descA.blend != nullptr) {
-                    if (descA.blend->color.operation != descB.blend->color.operation ||
-                        descA.blend->color.srcFactor != descB.blend->color.srcFactor ||
-                        descA.blend->color.dstFactor != descB.blend->color.dstFactor) {
-                        return false;
-                    }
-                    if (descA.blend->alpha.operation != descB.blend->alpha.operation ||
-                        descA.blend->alpha.srcFactor != descB.blend->alpha.srcFactor ||
-                        descA.blend->alpha.dstFactor != descB.blend->alpha.dstFactor) {
-                        return false;
-                    }
-                }
-            }
-
-            // Check depth/stencil state
-            if (a->mAttachmentState->HasDepthStencilAttachment()) {
-                const DepthStencilState& stateA = a->mDepthStencil;
-                const DepthStencilState& stateB = b->mDepthStencil;
-
-                ASSERT(!std::isnan(stateA.depthBiasSlopeScale));
-                ASSERT(!std::isnan(stateB.depthBiasSlopeScale));
-                ASSERT(!std::isnan(stateA.depthBiasClamp));
-                ASSERT(!std::isnan(stateB.depthBiasClamp));
-
-                if (stateA.depthWriteEnabled != stateB.depthWriteEnabled ||
-                    stateA.depthCompare != stateB.depthCompare ||
-                    stateA.depthBias != stateB.depthBias ||
-                    stateA.depthBiasSlopeScale != stateB.depthBiasSlopeScale ||
-                    stateA.depthBiasClamp != stateB.depthBiasClamp) {
-                    return false;
-                }
-                if (stateA.stencilFront.compare != stateB.stencilFront.compare ||
-                    stateA.stencilFront.failOp != stateB.stencilFront.failOp ||
-                    stateA.stencilFront.depthFailOp != stateB.stencilFront.depthFailOp ||
-                    stateA.stencilFront.passOp != stateB.stencilFront.passOp) {
-                    return false;
-                }
-                if (stateA.stencilBack.compare != stateB.stencilBack.compare ||
-                    stateA.stencilBack.failOp != stateB.stencilBack.failOp ||
-                    stateA.stencilBack.depthFailOp != stateB.stencilBack.depthFailOp ||
-                    stateA.stencilBack.passOp != stateB.stencilBack.passOp) {
-                    return false;
-                }
-                if (stateA.stencilReadMask != stateB.stencilReadMask ||
-                    stateA.stencilWriteMask != stateB.stencilWriteMask) {
+                if (descA.blend->alpha.operation != descB.blend->alpha.operation ||
+                    descA.blend->alpha.srcFactor != descB.blend->alpha.srcFactor ||
+                    descA.blend->alpha.dstFactor != descB.blend->alpha.dstFactor) {
                     return false;
                 }
             }
         }
 
-        // Check vertex state
-        if (a->mAttributeLocationsUsed != b->mAttributeLocationsUsed) {
-            return false;
-        }
+        // Check depth/stencil state
+        if (a->mAttachmentState->HasDepthStencilAttachment()) {
+            const DepthStencilState& stateA = a->mDepthStencil;
+            const DepthStencilState& stateB = b->mDepthStencil;
 
-        for (VertexAttributeLocation loc : IterateBitSet(a->mAttributeLocationsUsed)) {
-            const VertexAttributeInfo& descA = a->GetAttribute(loc);
-            const VertexAttributeInfo& descB = b->GetAttribute(loc);
-            if (descA.shaderLocation != descB.shaderLocation ||
-                descA.vertexBufferSlot != descB.vertexBufferSlot || descA.offset != descB.offset ||
-                descA.format != descB.format) {
+            ASSERT(!std::isnan(stateA.depthBiasSlopeScale));
+            ASSERT(!std::isnan(stateB.depthBiasSlopeScale));
+            ASSERT(!std::isnan(stateA.depthBiasClamp));
+            ASSERT(!std::isnan(stateB.depthBiasClamp));
+
+            if (stateA.depthWriteEnabled != stateB.depthWriteEnabled ||
+                stateA.depthCompare != stateB.depthCompare ||
+                stateA.depthBias != stateB.depthBias ||
+                stateA.depthBiasSlopeScale != stateB.depthBiasSlopeScale ||
+                stateA.depthBiasClamp != stateB.depthBiasClamp) {
+                return false;
+            }
+            if (stateA.stencilFront.compare != stateB.stencilFront.compare ||
+                stateA.stencilFront.failOp != stateB.stencilFront.failOp ||
+                stateA.stencilFront.depthFailOp != stateB.stencilFront.depthFailOp ||
+                stateA.stencilFront.passOp != stateB.stencilFront.passOp) {
+                return false;
+            }
+            if (stateA.stencilBack.compare != stateB.stencilBack.compare ||
+                stateA.stencilBack.failOp != stateB.stencilBack.failOp ||
+                stateA.stencilBack.depthFailOp != stateB.stencilBack.depthFailOp ||
+                stateA.stencilBack.passOp != stateB.stencilBack.passOp) {
+                return false;
+            }
+            if (stateA.stencilReadMask != stateB.stencilReadMask ||
+                stateA.stencilWriteMask != stateB.stencilWriteMask) {
                 return false;
             }
         }
+    }
 
-        if (a->mVertexBufferSlotsUsed != b->mVertexBufferSlotsUsed) {
+    // Check vertex state
+    if (a->mAttributeLocationsUsed != b->mAttributeLocationsUsed) {
+        return false;
+    }
+
+    for (VertexAttributeLocation loc : IterateBitSet(a->mAttributeLocationsUsed)) {
+        const VertexAttributeInfo& descA = a->GetAttribute(loc);
+        const VertexAttributeInfo& descB = b->GetAttribute(loc);
+        if (descA.shaderLocation != descB.shaderLocation ||
+            descA.vertexBufferSlot != descB.vertexBufferSlot || descA.offset != descB.offset ||
+            descA.format != descB.format) {
             return false;
         }
-
-        for (VertexBufferSlot slot : IterateBitSet(a->mVertexBufferSlotsUsed)) {
-            const VertexBufferInfo& descA = a->GetVertexBuffer(slot);
-            const VertexBufferInfo& descB = b->GetVertexBuffer(slot);
-            if (descA.arrayStride != descB.arrayStride || descA.stepMode != descB.stepMode) {
-                return false;
-            }
-        }
-
-        // Check primitive state
-        {
-            const PrimitiveState& stateA = a->mPrimitive;
-            const PrimitiveState& stateB = b->mPrimitive;
-            if (stateA.topology != stateB.topology ||
-                stateA.stripIndexFormat != stateB.stripIndexFormat ||
-                stateA.frontFace != stateB.frontFace || stateA.cullMode != stateB.cullMode ||
-                a->mClampDepth != b->mClampDepth) {
-                return false;
-            }
-        }
-
-        // Check multisample state
-        {
-            const MultisampleState& stateA = a->mMultisample;
-            const MultisampleState& stateB = b->mMultisample;
-            // Sample count already checked as part of the attachment state.
-            if (stateA.mask != stateB.mask ||
-                stateA.alphaToCoverageEnabled != stateB.alphaToCoverageEnabled) {
-                return false;
-            }
-        }
-
-        return true;
     }
 
+    if (a->mVertexBufferSlotsUsed != b->mVertexBufferSlotsUsed) {
+        return false;
+    }
+
+    for (VertexBufferSlot slot : IterateBitSet(a->mVertexBufferSlotsUsed)) {
+        const VertexBufferInfo& descA = a->GetVertexBuffer(slot);
+        const VertexBufferInfo& descB = b->GetVertexBuffer(slot);
+        if (descA.arrayStride != descB.arrayStride || descA.stepMode != descB.stepMode) {
+            return false;
+        }
+    }
+
+    // Check primitive state
+    {
+        const PrimitiveState& stateA = a->mPrimitive;
+        const PrimitiveState& stateB = b->mPrimitive;
+        if (stateA.topology != stateB.topology ||
+            stateA.stripIndexFormat != stateB.stripIndexFormat ||
+            stateA.frontFace != stateB.frontFace || stateA.cullMode != stateB.cullMode ||
+            a->mClampDepth != b->mClampDepth) {
+            return false;
+        }
+    }
+
+    // Check multisample state
+    {
+        const MultisampleState& stateA = a->mMultisample;
+        const MultisampleState& stateB = b->mMultisample;
+        // Sample count already checked as part of the attachment state.
+        if (stateA.mask != stateB.mask ||
+            stateA.alphaToCoverageEnabled != stateB.alphaToCoverageEnabled) {
+            return false;
+        }
+    }
+
+    return true;
+}
+
 }  // namespace dawn::native
diff --git a/src/dawn/native/RenderPipeline.h b/src/dawn/native/RenderPipeline.h
index 54fe939..f904f8a 100644
--- a/src/dawn/native/RenderPipeline.h
+++ b/src/dawn/native/RenderPipeline.h
@@ -29,119 +29,118 @@
 
 namespace dawn::native {
 
-    class DeviceBase;
+class DeviceBase;
 
-    MaybeError ValidateRenderPipelineDescriptor(DeviceBase* device,
-                                                const RenderPipelineDescriptor* descriptor);
+MaybeError ValidateRenderPipelineDescriptor(DeviceBase* device,
+                                            const RenderPipelineDescriptor* descriptor);
 
-    std::vector<StageAndDescriptor> GetRenderStagesAndSetPlaceholderShader(
-        DeviceBase* device,
-        const RenderPipelineDescriptor* descriptor);
+std::vector<StageAndDescriptor> GetRenderStagesAndSetPlaceholderShader(
+    DeviceBase* device,
+    const RenderPipelineDescriptor* descriptor);
 
-    size_t IndexFormatSize(wgpu::IndexFormat format);
+size_t IndexFormatSize(wgpu::IndexFormat format);
 
-    bool IsStripPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology);
+bool IsStripPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology);
 
-    bool StencilTestEnabled(const DepthStencilState* depthStencil);
+bool StencilTestEnabled(const DepthStencilState* depthStencil);
 
-    struct VertexAttributeInfo {
-        wgpu::VertexFormat format;
-        uint64_t offset;
-        VertexAttributeLocation shaderLocation;
-        VertexBufferSlot vertexBufferSlot;
+struct VertexAttributeInfo {
+    wgpu::VertexFormat format;
+    uint64_t offset;
+    VertexAttributeLocation shaderLocation;
+    VertexBufferSlot vertexBufferSlot;
+};
+
+struct VertexBufferInfo {
+    uint64_t arrayStride;
+    wgpu::VertexStepMode stepMode;
+    uint16_t usedBytesInStride;
+    // As indicated in the spec, the lastStride is max(attribute.offset +
+    // sizeof(attribute.format)) for each attribute in the buffer[slot]
+    uint64_t lastStride;
+};
+
+class RenderPipelineBase : public PipelineBase {
+  public:
+    RenderPipelineBase(DeviceBase* device, const RenderPipelineDescriptor* descriptor);
+    ~RenderPipelineBase() override;
+
+    static RenderPipelineBase* MakeError(DeviceBase* device);
+
+    ObjectType GetType() const override;
+
+    const ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>& GetAttributeLocationsUsed()
+        const;
+    const VertexAttributeInfo& GetAttribute(VertexAttributeLocation location) const;
+    const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& GetVertexBufferSlotsUsed() const;
+    const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
+    GetVertexBufferSlotsUsedAsVertexBuffer() const;
+    const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
+    GetVertexBufferSlotsUsedAsInstanceBuffer() const;
+    const VertexBufferInfo& GetVertexBuffer(VertexBufferSlot slot) const;
+    uint32_t GetVertexBufferCount() const;
+
+    const ColorTargetState* GetColorTargetState(ColorAttachmentIndex attachmentSlot) const;
+    const DepthStencilState* GetDepthStencilState() const;
+    wgpu::PrimitiveTopology GetPrimitiveTopology() const;
+    wgpu::IndexFormat GetStripIndexFormat() const;
+    wgpu::CullMode GetCullMode() const;
+    wgpu::FrontFace GetFrontFace() const;
+    bool IsDepthBiasEnabled() const;
+    int32_t GetDepthBias() const;
+    float GetDepthBiasSlopeScale() const;
+    float GetDepthBiasClamp() const;
+    bool ShouldClampDepth() const;
+
+    ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> GetColorAttachmentsMask() const;
+    bool HasDepthStencilAttachment() const;
+    wgpu::TextureFormat GetColorAttachmentFormat(ColorAttachmentIndex attachment) const;
+    wgpu::TextureFormat GetDepthStencilFormat() const;
+    uint32_t GetSampleCount() const;
+    uint32_t GetSampleMask() const;
+    bool IsAlphaToCoverageEnabled() const;
+    bool WritesDepth() const;
+    bool WritesStencil() const;
+
+    const AttachmentState* GetAttachmentState() const;
+
+    // Functions necessary for the unordered_set<RenderPipelineBase*>-based cache.
+    size_t ComputeContentHash() override;
+
+    struct EqualityFunc {
+        bool operator()(const RenderPipelineBase* a, const RenderPipelineBase* b) const;
     };
 
-    struct VertexBufferInfo {
-        uint64_t arrayStride;
-        wgpu::VertexStepMode stepMode;
-        uint16_t usedBytesInStride;
-        // As indicated in the spec, the lastStride is max(attribute.offset +
-        // sizeof(attribute.format)) for each attribute in the buffer[slot]
-        uint64_t lastStride;
-    };
+  protected:
+    // Constructor used only for mocking and testing.
+    explicit RenderPipelineBase(DeviceBase* device);
+    void DestroyImpl() override;
 
-    class RenderPipelineBase : public PipelineBase {
-      public:
-        RenderPipelineBase(DeviceBase* device, const RenderPipelineDescriptor* descriptor);
-        ~RenderPipelineBase() override;
+  private:
+    RenderPipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag);
 
-        static RenderPipelineBase* MakeError(DeviceBase* device);
+    // Vertex state
+    uint32_t mVertexBufferCount;
+    ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> mAttributeLocationsUsed;
+    ityp::array<VertexAttributeLocation, VertexAttributeInfo, kMaxVertexAttributes> mAttributeInfos;
+    ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsed;
+    ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsedAsVertexBuffer;
+    ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsedAsInstanceBuffer;
+    ityp::array<VertexBufferSlot, VertexBufferInfo, kMaxVertexBuffers> mVertexBufferInfos;
 
-        ObjectType GetType() const override;
+    // Attachments
+    Ref<AttachmentState> mAttachmentState;
+    ityp::array<ColorAttachmentIndex, ColorTargetState, kMaxColorAttachments> mTargets;
+    ityp::array<ColorAttachmentIndex, BlendState, kMaxColorAttachments> mTargetBlend;
 
-        const ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>&
-        GetAttributeLocationsUsed() const;
-        const VertexAttributeInfo& GetAttribute(VertexAttributeLocation location) const;
-        const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>& GetVertexBufferSlotsUsed() const;
-        const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
-        GetVertexBufferSlotsUsedAsVertexBuffer() const;
-        const ityp::bitset<VertexBufferSlot, kMaxVertexBuffers>&
-        GetVertexBufferSlotsUsedAsInstanceBuffer() const;
-        const VertexBufferInfo& GetVertexBuffer(VertexBufferSlot slot) const;
-        uint32_t GetVertexBufferCount() const;
-
-        const ColorTargetState* GetColorTargetState(ColorAttachmentIndex attachmentSlot) const;
-        const DepthStencilState* GetDepthStencilState() const;
-        wgpu::PrimitiveTopology GetPrimitiveTopology() const;
-        wgpu::IndexFormat GetStripIndexFormat() const;
-        wgpu::CullMode GetCullMode() const;
-        wgpu::FrontFace GetFrontFace() const;
-        bool IsDepthBiasEnabled() const;
-        int32_t GetDepthBias() const;
-        float GetDepthBiasSlopeScale() const;
-        float GetDepthBiasClamp() const;
-        bool ShouldClampDepth() const;
-
-        ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> GetColorAttachmentsMask() const;
-        bool HasDepthStencilAttachment() const;
-        wgpu::TextureFormat GetColorAttachmentFormat(ColorAttachmentIndex attachment) const;
-        wgpu::TextureFormat GetDepthStencilFormat() const;
-        uint32_t GetSampleCount() const;
-        uint32_t GetSampleMask() const;
-        bool IsAlphaToCoverageEnabled() const;
-        bool WritesDepth() const;
-        bool WritesStencil() const;
-
-        const AttachmentState* GetAttachmentState() const;
-
-        // Functions necessary for the unordered_set<RenderPipelineBase*>-based cache.
-        size_t ComputeContentHash() override;
-
-        struct EqualityFunc {
-            bool operator()(const RenderPipelineBase* a, const RenderPipelineBase* b) const;
-        };
-
-      protected:
-        // Constructor used only for mocking and testing.
-        explicit RenderPipelineBase(DeviceBase* device);
-        void DestroyImpl() override;
-
-      private:
-        RenderPipelineBase(DeviceBase* device, ObjectBase::ErrorTag tag);
-
-        // Vertex state
-        uint32_t mVertexBufferCount;
-        ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> mAttributeLocationsUsed;
-        ityp::array<VertexAttributeLocation, VertexAttributeInfo, kMaxVertexAttributes>
-            mAttributeInfos;
-        ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsed;
-        ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsedAsVertexBuffer;
-        ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mVertexBufferSlotsUsedAsInstanceBuffer;
-        ityp::array<VertexBufferSlot, VertexBufferInfo, kMaxVertexBuffers> mVertexBufferInfos;
-
-        // Attachments
-        Ref<AttachmentState> mAttachmentState;
-        ityp::array<ColorAttachmentIndex, ColorTargetState, kMaxColorAttachments> mTargets;
-        ityp::array<ColorAttachmentIndex, BlendState, kMaxColorAttachments> mTargetBlend;
-
-        // Other state
-        PrimitiveState mPrimitive;
-        DepthStencilState mDepthStencil;
-        MultisampleState mMultisample;
-        bool mClampDepth = false;
-        bool mWritesDepth = false;
-        bool mWritesStencil = false;
-    };
+    // Other state
+    PrimitiveState mPrimitive;
+    DepthStencilState mDepthStencil;
+    MultisampleState mMultisample;
+    bool mClampDepth = false;
+    bool mWritesDepth = false;
+    bool mWritesStencil = false;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/ResourceHeap.h b/src/dawn/native/ResourceHeap.h
index b0ea037..1e5347b 100644
--- a/src/dawn/native/ResourceHeap.h
+++ b/src/dawn/native/ResourceHeap.h
@@ -19,12 +19,12 @@
 
 namespace dawn::native {
 
-    // Wrapper for a resource backed by a heap.
-    class ResourceHeapBase {
-      public:
-        ResourceHeapBase() = default;
-        virtual ~ResourceHeapBase() = default;
-    };
+// Wrapper for a resource backed by a heap.
+class ResourceHeapBase {
+  public:
+    ResourceHeapBase() = default;
+    virtual ~ResourceHeapBase() = default;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/ResourceHeapAllocator.h b/src/dawn/native/ResourceHeapAllocator.h
index cbeb0f7..1d77f7e 100644
--- a/src/dawn/native/ResourceHeapAllocator.h
+++ b/src/dawn/native/ResourceHeapAllocator.h
@@ -22,15 +22,15 @@
 
 namespace dawn::native {
 
-    // Interface for backend allocators that create memory heaps resoruces can be suballocated in.
-    class ResourceHeapAllocator {
-      public:
-        virtual ~ResourceHeapAllocator() = default;
+// Interface for backend allocators that create memory heaps resoruces can be suballocated in.
+class ResourceHeapAllocator {
+  public:
+    virtual ~ResourceHeapAllocator() = default;
 
-        virtual ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
-            uint64_t size) = 0;
-        virtual void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) = 0;
-    };
+    virtual ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
+        uint64_t size) = 0;
+    virtual void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) = 0;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/ResourceMemoryAllocation.cpp b/src/dawn/native/ResourceMemoryAllocation.cpp
index 8848c18..58a315e 100644
--- a/src/dawn/native/ResourceMemoryAllocation.cpp
+++ b/src/dawn/native/ResourceMemoryAllocation.cpp
@@ -17,37 +17,35 @@
 
 namespace dawn::native {
 
-    ResourceMemoryAllocation::ResourceMemoryAllocation()
-        : mOffset(0), mResourceHeap(nullptr), mMappedPointer(nullptr) {
-    }
+ResourceMemoryAllocation::ResourceMemoryAllocation()
+    : mOffset(0), mResourceHeap(nullptr), mMappedPointer(nullptr) {}
 
-    ResourceMemoryAllocation::ResourceMemoryAllocation(const AllocationInfo& info,
-                                                       uint64_t offset,
-                                                       ResourceHeapBase* resourceHeap,
-                                                       uint8_t* mappedPointer)
-        : mInfo(info), mOffset(offset), mResourceHeap(resourceHeap), mMappedPointer(mappedPointer) {
-    }
+ResourceMemoryAllocation::ResourceMemoryAllocation(const AllocationInfo& info,
+                                                   uint64_t offset,
+                                                   ResourceHeapBase* resourceHeap,
+                                                   uint8_t* mappedPointer)
+    : mInfo(info), mOffset(offset), mResourceHeap(resourceHeap), mMappedPointer(mappedPointer) {}
 
-    ResourceHeapBase* ResourceMemoryAllocation::GetResourceHeap() const {
-        ASSERT(mInfo.mMethod != AllocationMethod::kInvalid);
-        return mResourceHeap;
-    }
+ResourceHeapBase* ResourceMemoryAllocation::GetResourceHeap() const {
+    ASSERT(mInfo.mMethod != AllocationMethod::kInvalid);
+    return mResourceHeap;
+}
 
-    uint64_t ResourceMemoryAllocation::GetOffset() const {
-        ASSERT(mInfo.mMethod != AllocationMethod::kInvalid);
-        return mOffset;
-    }
+uint64_t ResourceMemoryAllocation::GetOffset() const {
+    ASSERT(mInfo.mMethod != AllocationMethod::kInvalid);
+    return mOffset;
+}
 
-    AllocationInfo ResourceMemoryAllocation::GetInfo() const {
-        return mInfo;
-    }
+AllocationInfo ResourceMemoryAllocation::GetInfo() const {
+    return mInfo;
+}
 
-    uint8_t* ResourceMemoryAllocation::GetMappedPointer() const {
-        return mMappedPointer;
-    }
+uint8_t* ResourceMemoryAllocation::GetMappedPointer() const {
+    return mMappedPointer;
+}
 
-    void ResourceMemoryAllocation::Invalidate() {
-        mResourceHeap = nullptr;
-        mInfo = {};
-    }
+void ResourceMemoryAllocation::Invalidate() {
+    mResourceHeap = nullptr;
+    mInfo = {};
+}
 }  // namespace dawn::native
diff --git a/src/dawn/native/ResourceMemoryAllocation.h b/src/dawn/native/ResourceMemoryAllocation.h
index fee117e..7a05d10 100644
--- a/src/dawn/native/ResourceMemoryAllocation.h
+++ b/src/dawn/native/ResourceMemoryAllocation.h
@@ -19,61 +19,61 @@
 
 namespace dawn::native {
 
-    class ResourceHeapBase;
+class ResourceHeapBase;
 
-    // Allocation method determines how memory was sub-divided.
-    // Used by the device to get the allocator that was responsible for the allocation.
-    enum class AllocationMethod {
-        // Memory not sub-divided.
-        kDirect,
+// Allocation method determines how memory was sub-divided.
+// Used by the device to get the allocator that was responsible for the allocation.
+enum class AllocationMethod {
+    // Memory not sub-divided.
+    kDirect,
 
-        // Memory sub-divided using one or more blocks of various sizes.
-        kSubAllocated,
+    // Memory sub-divided using one or more blocks of various sizes.
+    kSubAllocated,
 
-        // Memory was allocated outside of Dawn.
-        kExternal,
+    // Memory was allocated outside of Dawn.
+    kExternal,
 
-        // Memory not allocated or freed.
-        kInvalid
-    };
+    // Memory not allocated or freed.
+    kInvalid
+};
 
-    // Metadata that describes how the allocation was allocated.
-    struct AllocationInfo {
-        // AllocationInfo contains a separate offset to not confuse block vs memory offsets.
-        // The block offset is within the entire allocator memory range and only required by the
-        // buddy sub-allocator to get the corresponding memory. Unlike the block offset, the
-        // allocation offset is always local to the memory.
-        uint64_t mBlockOffset = 0;
+// Metadata that describes how the allocation was allocated.
+struct AllocationInfo {
+    // AllocationInfo contains a separate offset to not confuse block vs memory offsets.
+    // The block offset is within the entire allocator memory range and only required by the
+    // buddy sub-allocator to get the corresponding memory. Unlike the block offset, the
+    // allocation offset is always local to the memory.
+    uint64_t mBlockOffset = 0;
 
-        AllocationMethod mMethod = AllocationMethod::kInvalid;
-    };
+    AllocationMethod mMethod = AllocationMethod::kInvalid;
+};
 
-    // Handle into a resource heap pool.
-    class ResourceMemoryAllocation {
-      public:
-        ResourceMemoryAllocation();
-        ResourceMemoryAllocation(const AllocationInfo& info,
-                                 uint64_t offset,
-                                 ResourceHeapBase* resourceHeap,
-                                 uint8_t* mappedPointer = nullptr);
-        virtual ~ResourceMemoryAllocation() = default;
+// Handle into a resource heap pool.
+class ResourceMemoryAllocation {
+  public:
+    ResourceMemoryAllocation();
+    ResourceMemoryAllocation(const AllocationInfo& info,
+                             uint64_t offset,
+                             ResourceHeapBase* resourceHeap,
+                             uint8_t* mappedPointer = nullptr);
+    virtual ~ResourceMemoryAllocation() = default;
 
-        ResourceMemoryAllocation(const ResourceMemoryAllocation&) = default;
-        ResourceMemoryAllocation& operator=(const ResourceMemoryAllocation&) = default;
+    ResourceMemoryAllocation(const ResourceMemoryAllocation&) = default;
+    ResourceMemoryAllocation& operator=(const ResourceMemoryAllocation&) = default;
 
-        ResourceHeapBase* GetResourceHeap() const;
-        uint64_t GetOffset() const;
-        uint8_t* GetMappedPointer() const;
-        AllocationInfo GetInfo() const;
+    ResourceHeapBase* GetResourceHeap() const;
+    uint64_t GetOffset() const;
+    uint8_t* GetMappedPointer() const;
+    AllocationInfo GetInfo() const;
 
-        virtual void Invalidate();
+    virtual void Invalidate();
 
-      private:
-        AllocationInfo mInfo;
-        uint64_t mOffset;
-        ResourceHeapBase* mResourceHeap;
-        uint8_t* mMappedPointer;
-    };
+  private:
+    AllocationInfo mInfo;
+    uint64_t mOffset;
+    ResourceHeapBase* mResourceHeap;
+    uint8_t* mMappedPointer;
+};
 }  // namespace dawn::native
 
 #endif  // SRC_DAWN_NATIVE_RESOURCEMEMORYALLOCATION_H_
diff --git a/src/dawn/native/RingBufferAllocator.cpp b/src/dawn/native/RingBufferAllocator.cpp
index 20e6505..d384383 100644
--- a/src/dawn/native/RingBufferAllocator.cpp
+++ b/src/dawn/native/RingBufferAllocator.cpp
@@ -30,94 +30,93 @@
 // used bytes.
 namespace dawn::native {
 
-    RingBufferAllocator::RingBufferAllocator(uint64_t maxSize) : mMaxBlockSize(maxSize) {
+RingBufferAllocator::RingBufferAllocator(uint64_t maxSize) : mMaxBlockSize(maxSize) {}
+
+void RingBufferAllocator::Deallocate(ExecutionSerial lastCompletedSerial) {
+    // Reclaim memory from previously recorded blocks.
+    for (Request& request : mInflightRequests.IterateUpTo(lastCompletedSerial)) {
+        mUsedStartOffset = request.endOffset;
+        mUsedSize -= request.size;
     }
 
-    void RingBufferAllocator::Deallocate(ExecutionSerial lastCompletedSerial) {
-        // Reclaim memory from previously recorded blocks.
-        for (Request& request : mInflightRequests.IterateUpTo(lastCompletedSerial)) {
-            mUsedStartOffset = request.endOffset;
-            mUsedSize -= request.size;
-        }
+    // Dequeue previously recorded requests.
+    mInflightRequests.ClearUpTo(lastCompletedSerial);
+}
 
-        // Dequeue previously recorded requests.
-        mInflightRequests.ClearUpTo(lastCompletedSerial);
+uint64_t RingBufferAllocator::GetSize() const {
+    return mMaxBlockSize;
+}
+
+uint64_t RingBufferAllocator::GetUsedSize() const {
+    return mUsedSize;
+}
+
+bool RingBufferAllocator::Empty() const {
+    return mInflightRequests.Empty();
+}
+
+// Sub-allocate the ring-buffer by requesting a chunk of the specified size.
+// This is a serial-based resource scheme, the life-span of resources (and the allocations) get
+// tracked by GPU progress via serials. Memory can be reused by determining if the GPU has
+// completed up to a given serial. Each sub-allocation request is tracked in the serial offset
+// queue, which identifies an existing (or new) frames-worth of resources. Internally, the
+// ring-buffer maintains offsets of 3 "memory" states: Free, Reclaimed, and Used. This is done
+// in FIFO order as older frames would free resources before newer ones.
+uint64_t RingBufferAllocator::Allocate(uint64_t allocationSize, ExecutionSerial serial) {
+    // Check if the buffer is full by comparing the used size.
+    // If the buffer is not split where waste occurs (e.g. cannot fit new sub-alloc in front), a
+    // subsequent sub-alloc could fail where the used size was previously adjusted to include
+    // the wasted.
+    if (mUsedSize >= mMaxBlockSize) {
+        return kInvalidOffset;
     }
 
-    uint64_t RingBufferAllocator::GetSize() const {
-        return mMaxBlockSize;
+    // Ensure adding allocationSize does not overflow.
+    const uint64_t remainingSize = (mMaxBlockSize - mUsedSize);
+    if (allocationSize > remainingSize) {
+        return kInvalidOffset;
     }
 
-    uint64_t RingBufferAllocator::GetUsedSize() const {
-        return mUsedSize;
-    }
+    uint64_t startOffset = kInvalidOffset;
 
-    bool RingBufferAllocator::Empty() const {
-        return mInflightRequests.Empty();
-    }
-
-    // Sub-allocate the ring-buffer by requesting a chunk of the specified size.
-    // This is a serial-based resource scheme, the life-span of resources (and the allocations) get
-    // tracked by GPU progress via serials. Memory can be reused by determining if the GPU has
-    // completed up to a given serial. Each sub-allocation request is tracked in the serial offset
-    // queue, which identifies an existing (or new) frames-worth of resources. Internally, the
-    // ring-buffer maintains offsets of 3 "memory" states: Free, Reclaimed, and Used. This is done
-    // in FIFO order as older frames would free resources before newer ones.
-    uint64_t RingBufferAllocator::Allocate(uint64_t allocationSize, ExecutionSerial serial) {
-        // Check if the buffer is full by comparing the used size.
-        // If the buffer is not split where waste occurs (e.g. cannot fit new sub-alloc in front), a
-        // subsequent sub-alloc could fail where the used size was previously adjusted to include
-        // the wasted.
-        if (mUsedSize >= mMaxBlockSize) {
-            return kInvalidOffset;
-        }
-
-        // Ensure adding allocationSize does not overflow.
-        const uint64_t remainingSize = (mMaxBlockSize - mUsedSize);
-        if (allocationSize > remainingSize) {
-            return kInvalidOffset;
-        }
-
-        uint64_t startOffset = kInvalidOffset;
-
-        // Check if the buffer is NOT split (i.e sub-alloc on ends)
-        if (mUsedStartOffset <= mUsedEndOffset) {
-            // Order is important (try to sub-alloc at end first).
-            // This is due to FIFO order where sub-allocs are inserted from left-to-right (when not
-            // wrapped).
-            if (mUsedEndOffset + allocationSize <= mMaxBlockSize) {
-                startOffset = mUsedEndOffset;
-                mUsedEndOffset += allocationSize;
-                mUsedSize += allocationSize;
-                mCurrentRequestSize += allocationSize;
-            } else if (allocationSize <= mUsedStartOffset) {  // Try to sub-alloc at front.
-                // Count the space at the end so that a subsequent
-                // sub-alloc cannot not succeed when the buffer is full.
-                const uint64_t requestSize = (mMaxBlockSize - mUsedEndOffset) + allocationSize;
-
-                startOffset = 0;
-                mUsedEndOffset = allocationSize;
-                mUsedSize += requestSize;
-                mCurrentRequestSize += requestSize;
-            }
-        } else if (mUsedEndOffset + allocationSize <=
-                   mUsedStartOffset) {  // Otherwise, buffer is split where sub-alloc must be
-                                        // in-between.
+    // Check if the buffer is NOT split (i.e sub-alloc on ends)
+    if (mUsedStartOffset <= mUsedEndOffset) {
+        // Order is important (try to sub-alloc at end first).
+        // This is due to FIFO order where sub-allocs are inserted from left-to-right (when not
+        // wrapped).
+        if (mUsedEndOffset + allocationSize <= mMaxBlockSize) {
             startOffset = mUsedEndOffset;
             mUsedEndOffset += allocationSize;
             mUsedSize += allocationSize;
             mCurrentRequestSize += allocationSize;
+        } else if (allocationSize <= mUsedStartOffset) {  // Try to sub-alloc at front.
+            // Count the space at the end so that a subsequent
+            // sub-alloc cannot not succeed when the buffer is full.
+            const uint64_t requestSize = (mMaxBlockSize - mUsedEndOffset) + allocationSize;
+
+            startOffset = 0;
+            mUsedEndOffset = allocationSize;
+            mUsedSize += requestSize;
+            mCurrentRequestSize += requestSize;
         }
-
-        if (startOffset != kInvalidOffset) {
-            Request request;
-            request.endOffset = mUsedEndOffset;
-            request.size = mCurrentRequestSize;
-
-            mInflightRequests.Enqueue(std::move(request), serial);
-            mCurrentRequestSize = 0;  // reset
-        }
-
-        return startOffset;
+    } else if (mUsedEndOffset + allocationSize <=
+               mUsedStartOffset) {  // Otherwise, buffer is split where sub-alloc must be
+                                    // in-between.
+        startOffset = mUsedEndOffset;
+        mUsedEndOffset += allocationSize;
+        mUsedSize += allocationSize;
+        mCurrentRequestSize += allocationSize;
     }
+
+    if (startOffset != kInvalidOffset) {
+        Request request;
+        request.endOffset = mUsedEndOffset;
+        request.size = mCurrentRequestSize;
+
+        mInflightRequests.Enqueue(std::move(request), serial);
+        mCurrentRequestSize = 0;  // reset
+    }
+
+    return startOffset;
+}
 }  // namespace dawn::native
diff --git a/src/dawn/native/RingBufferAllocator.h b/src/dawn/native/RingBufferAllocator.h
index e39555b..cbb01b7 100644
--- a/src/dawn/native/RingBufferAllocator.h
+++ b/src/dawn/native/RingBufferAllocator.h
@@ -24,40 +24,39 @@
 // RingBufferAllocator is the front-end implementation used to manage a ring buffer in GPU memory.
 namespace dawn::native {
 
-    class RingBufferAllocator {
-      public:
-        RingBufferAllocator() = default;
-        explicit RingBufferAllocator(uint64_t maxSize);
-        ~RingBufferAllocator() = default;
-        RingBufferAllocator(const RingBufferAllocator&) = default;
-        RingBufferAllocator& operator=(const RingBufferAllocator&) = default;
+class RingBufferAllocator {
+  public:
+    RingBufferAllocator() = default;
+    explicit RingBufferAllocator(uint64_t maxSize);
+    ~RingBufferAllocator() = default;
+    RingBufferAllocator(const RingBufferAllocator&) = default;
+    RingBufferAllocator& operator=(const RingBufferAllocator&) = default;
 
-        uint64_t Allocate(uint64_t allocationSize, ExecutionSerial serial);
-        void Deallocate(ExecutionSerial lastCompletedSerial);
+    uint64_t Allocate(uint64_t allocationSize, ExecutionSerial serial);
+    void Deallocate(ExecutionSerial lastCompletedSerial);
 
-        uint64_t GetSize() const;
-        bool Empty() const;
-        uint64_t GetUsedSize() const;
+    uint64_t GetSize() const;
+    bool Empty() const;
+    uint64_t GetUsedSize() const;
 
-        static constexpr uint64_t kInvalidOffset = std::numeric_limits<uint64_t>::max();
+    static constexpr uint64_t kInvalidOffset = std::numeric_limits<uint64_t>::max();
 
-      private:
-        struct Request {
-            uint64_t endOffset;
-            uint64_t size;
-        };
-
-        SerialQueue<ExecutionSerial, Request>
-            mInflightRequests;  // Queue of the recorded sub-alloc requests
-                                // (e.g. frame of resources).
-
-        uint64_t mUsedEndOffset = 0;    // Tail of used sub-alloc requests (in bytes).
-        uint64_t mUsedStartOffset = 0;  // Head of used sub-alloc requests (in bytes).
-        uint64_t mMaxBlockSize = 0;     // Max size of the ring buffer (in bytes).
-        uint64_t mUsedSize = 0;  // Size of the sub-alloc requests (in bytes) of the ring buffer.
-        uint64_t mCurrentRequestSize =
-            0;  // Size of the sub-alloc requests (in bytes) of the current serial.
+  private:
+    struct Request {
+        uint64_t endOffset;
+        uint64_t size;
     };
+
+    SerialQueue<ExecutionSerial, Request> mInflightRequests;  // Queue of the recorded sub-alloc
+                                                              // requests (e.g. frame of resources).
+
+    uint64_t mUsedEndOffset = 0;    // Tail of used sub-alloc requests (in bytes).
+    uint64_t mUsedStartOffset = 0;  // Head of used sub-alloc requests (in bytes).
+    uint64_t mMaxBlockSize = 0;     // Max size of the ring buffer (in bytes).
+    uint64_t mUsedSize = 0;         // Size of the sub-alloc requests (in bytes) of the ring buffer.
+    uint64_t mCurrentRequestSize =
+        0;  // Size of the sub-alloc requests (in bytes) of the current serial.
+};
 }  // namespace dawn::native
 
 #endif  // SRC_DAWN_NATIVE_RINGBUFFERALLOCATOR_H_
diff --git a/src/dawn/native/Sampler.cpp b/src/dawn/native/Sampler.cpp
index d5861c3..7e436fc 100644
--- a/src/dawn/native/Sampler.cpp
+++ b/src/dawn/native/Sampler.cpp
@@ -22,132 +22,129 @@
 
 namespace dawn::native {
 
-    MaybeError ValidateSamplerDescriptor(DeviceBase*, const SamplerDescriptor* descriptor) {
-        DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
+MaybeError ValidateSamplerDescriptor(DeviceBase*, const SamplerDescriptor* descriptor) {
+    DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr");
 
-        DAWN_INVALID_IF(std::isnan(descriptor->lodMinClamp) || std::isnan(descriptor->lodMaxClamp),
-                        "LOD clamp bounds [%f, %f] contain a NaN.", descriptor->lodMinClamp,
-                        descriptor->lodMaxClamp);
+    DAWN_INVALID_IF(std::isnan(descriptor->lodMinClamp) || std::isnan(descriptor->lodMaxClamp),
+                    "LOD clamp bounds [%f, %f] contain a NaN.", descriptor->lodMinClamp,
+                    descriptor->lodMaxClamp);
 
-        DAWN_INVALID_IF(descriptor->lodMinClamp < 0 || descriptor->lodMaxClamp < 0,
-                        "LOD clamp bounds [%f, %f] contain contain a negative number.",
-                        descriptor->lodMinClamp, descriptor->lodMaxClamp);
+    DAWN_INVALID_IF(descriptor->lodMinClamp < 0 || descriptor->lodMaxClamp < 0,
+                    "LOD clamp bounds [%f, %f] contain contain a negative number.",
+                    descriptor->lodMinClamp, descriptor->lodMaxClamp);
 
-        DAWN_INVALID_IF(descriptor->lodMinClamp > descriptor->lodMaxClamp,
-                        "LOD min clamp (%f) is larger than the max clamp (%f).",
-                        descriptor->lodMinClamp, descriptor->lodMaxClamp);
+    DAWN_INVALID_IF(descriptor->lodMinClamp > descriptor->lodMaxClamp,
+                    "LOD min clamp (%f) is larger than the max clamp (%f).",
+                    descriptor->lodMinClamp, descriptor->lodMaxClamp);
 
-        if (descriptor->maxAnisotropy > 1) {
-            DAWN_INVALID_IF(descriptor->minFilter != wgpu::FilterMode::Linear ||
-                                descriptor->magFilter != wgpu::FilterMode::Linear ||
-                                descriptor->mipmapFilter != wgpu::FilterMode::Linear,
-                            "One of minFilter (%s), magFilter (%s) or mipmapFilter (%s) is not %s "
-                            "while using anisotropic filter (maxAnisotropy is %f)",
-                            descriptor->magFilter, descriptor->minFilter, descriptor->mipmapFilter,
-                            wgpu::FilterMode::Linear, descriptor->maxAnisotropy);
-        } else if (descriptor->maxAnisotropy == 0u) {
-            return DAWN_FORMAT_VALIDATION_ERROR("Max anisotropy (%f) is less than 1.",
-                                                descriptor->maxAnisotropy);
-        }
-
-        DAWN_TRY(ValidateFilterMode(descriptor->minFilter));
-        DAWN_TRY(ValidateFilterMode(descriptor->magFilter));
-        DAWN_TRY(ValidateFilterMode(descriptor->mipmapFilter));
-        DAWN_TRY(ValidateAddressMode(descriptor->addressModeU));
-        DAWN_TRY(ValidateAddressMode(descriptor->addressModeV));
-        DAWN_TRY(ValidateAddressMode(descriptor->addressModeW));
-
-        // CompareFunction::Undefined is tagged as invalid because it can't be used, except for the
-        // SamplerDescriptor where it is a special value that means the sampler is not a
-        // comparison-sampler.
-        if (descriptor->compare != wgpu::CompareFunction::Undefined) {
-            DAWN_TRY(ValidateCompareFunction(descriptor->compare));
-        }
-
-        return {};
+    if (descriptor->maxAnisotropy > 1) {
+        DAWN_INVALID_IF(descriptor->minFilter != wgpu::FilterMode::Linear ||
+                            descriptor->magFilter != wgpu::FilterMode::Linear ||
+                            descriptor->mipmapFilter != wgpu::FilterMode::Linear,
+                        "One of minFilter (%s), magFilter (%s) or mipmapFilter (%s) is not %s "
+                        "while using anisotropic filter (maxAnisotropy is %f)",
+                        descriptor->magFilter, descriptor->minFilter, descriptor->mipmapFilter,
+                        wgpu::FilterMode::Linear, descriptor->maxAnisotropy);
+    } else if (descriptor->maxAnisotropy == 0u) {
+        return DAWN_FORMAT_VALIDATION_ERROR("Max anisotropy (%f) is less than 1.",
+                                            descriptor->maxAnisotropy);
     }
 
-    // SamplerBase
+    DAWN_TRY(ValidateFilterMode(descriptor->minFilter));
+    DAWN_TRY(ValidateFilterMode(descriptor->magFilter));
+    DAWN_TRY(ValidateFilterMode(descriptor->mipmapFilter));
+    DAWN_TRY(ValidateAddressMode(descriptor->addressModeU));
+    DAWN_TRY(ValidateAddressMode(descriptor->addressModeV));
+    DAWN_TRY(ValidateAddressMode(descriptor->addressModeW));
 
-    SamplerBase::SamplerBase(DeviceBase* device,
-                             const SamplerDescriptor* descriptor,
-                             ApiObjectBase::UntrackedByDeviceTag tag)
-        : ApiObjectBase(device, descriptor->label),
-          mAddressModeU(descriptor->addressModeU),
-          mAddressModeV(descriptor->addressModeV),
-          mAddressModeW(descriptor->addressModeW),
-          mMagFilter(descriptor->magFilter),
-          mMinFilter(descriptor->minFilter),
-          mMipmapFilter(descriptor->mipmapFilter),
-          mLodMinClamp(descriptor->lodMinClamp),
-          mLodMaxClamp(descriptor->lodMaxClamp),
-          mCompareFunction(descriptor->compare),
-          mMaxAnisotropy(descriptor->maxAnisotropy) {
+    // CompareFunction::Undefined is tagged as invalid because it can't be used, except for the
+    // SamplerDescriptor where it is a special value that means the sampler is not a
+    // comparison-sampler.
+    if (descriptor->compare != wgpu::CompareFunction::Undefined) {
+        DAWN_TRY(ValidateCompareFunction(descriptor->compare));
     }
 
-    SamplerBase::SamplerBase(DeviceBase* device, const SamplerDescriptor* descriptor)
-        : SamplerBase(device, descriptor, kUntrackedByDevice) {
-        TrackInDevice();
+    return {};
+}
+
+// SamplerBase
+
+SamplerBase::SamplerBase(DeviceBase* device,
+                         const SamplerDescriptor* descriptor,
+                         ApiObjectBase::UntrackedByDeviceTag tag)
+    : ApiObjectBase(device, descriptor->label),
+      mAddressModeU(descriptor->addressModeU),
+      mAddressModeV(descriptor->addressModeV),
+      mAddressModeW(descriptor->addressModeW),
+      mMagFilter(descriptor->magFilter),
+      mMinFilter(descriptor->minFilter),
+      mMipmapFilter(descriptor->mipmapFilter),
+      mLodMinClamp(descriptor->lodMinClamp),
+      mLodMaxClamp(descriptor->lodMaxClamp),
+      mCompareFunction(descriptor->compare),
+      mMaxAnisotropy(descriptor->maxAnisotropy) {}
+
+SamplerBase::SamplerBase(DeviceBase* device, const SamplerDescriptor* descriptor)
+    : SamplerBase(device, descriptor, kUntrackedByDevice) {
+    TrackInDevice();
+}
+
+SamplerBase::SamplerBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
+    TrackInDevice();
+}
+
+SamplerBase::SamplerBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+    : ApiObjectBase(device, tag) {}
+
+SamplerBase::~SamplerBase() = default;
+
+void SamplerBase::DestroyImpl() {
+    if (IsCachedReference()) {
+        // Do not uncache the actual cached object if we are a blueprint.
+        GetDevice()->UncacheSampler(this);
+    }
+}
+
+// static
+SamplerBase* SamplerBase::MakeError(DeviceBase* device) {
+    return new SamplerBase(device, ObjectBase::kError);
+}
+
+ObjectType SamplerBase::GetType() const {
+    return ObjectType::Sampler;
+}
+
+bool SamplerBase::IsComparison() const {
+    return mCompareFunction != wgpu::CompareFunction::Undefined;
+}
+
+bool SamplerBase::IsFiltering() const {
+    return mMinFilter == wgpu::FilterMode::Linear || mMagFilter == wgpu::FilterMode::Linear ||
+           mMipmapFilter == wgpu::FilterMode::Linear;
+}
+
+size_t SamplerBase::ComputeContentHash() {
+    ObjectContentHasher recorder;
+    recorder.Record(mAddressModeU, mAddressModeV, mAddressModeW, mMagFilter, mMinFilter,
+                    mMipmapFilter, mLodMinClamp, mLodMaxClamp, mCompareFunction, mMaxAnisotropy);
+    return recorder.GetContentHash();
+}
+
+bool SamplerBase::EqualityFunc::operator()(const SamplerBase* a, const SamplerBase* b) const {
+    if (a == b) {
+        return true;
     }
 
-    SamplerBase::SamplerBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
-        TrackInDevice();
-    }
+    ASSERT(!std::isnan(a->mLodMinClamp));
+    ASSERT(!std::isnan(b->mLodMinClamp));
+    ASSERT(!std::isnan(a->mLodMaxClamp));
+    ASSERT(!std::isnan(b->mLodMaxClamp));
 
-    SamplerBase::SamplerBase(DeviceBase* device, ObjectBase::ErrorTag tag)
-        : ApiObjectBase(device, tag) {
-    }
-
-    SamplerBase::~SamplerBase() = default;
-
-    void SamplerBase::DestroyImpl() {
-        if (IsCachedReference()) {
-            // Do not uncache the actual cached object if we are a blueprint.
-            GetDevice()->UncacheSampler(this);
-        }
-    }
-
-    // static
-    SamplerBase* SamplerBase::MakeError(DeviceBase* device) {
-        return new SamplerBase(device, ObjectBase::kError);
-    }
-
-    ObjectType SamplerBase::GetType() const {
-        return ObjectType::Sampler;
-    }
-
-    bool SamplerBase::IsComparison() const {
-        return mCompareFunction != wgpu::CompareFunction::Undefined;
-    }
-
-    bool SamplerBase::IsFiltering() const {
-        return mMinFilter == wgpu::FilterMode::Linear || mMagFilter == wgpu::FilterMode::Linear ||
-               mMipmapFilter == wgpu::FilterMode::Linear;
-    }
-
-    size_t SamplerBase::ComputeContentHash() {
-        ObjectContentHasher recorder;
-        recorder.Record(mAddressModeU, mAddressModeV, mAddressModeW, mMagFilter, mMinFilter,
-                        mMipmapFilter, mLodMinClamp, mLodMaxClamp, mCompareFunction,
-                        mMaxAnisotropy);
-        return recorder.GetContentHash();
-    }
-
-    bool SamplerBase::EqualityFunc::operator()(const SamplerBase* a, const SamplerBase* b) const {
-        if (a == b) {
-            return true;
-        }
-
-        ASSERT(!std::isnan(a->mLodMinClamp));
-        ASSERT(!std::isnan(b->mLodMinClamp));
-        ASSERT(!std::isnan(a->mLodMaxClamp));
-        ASSERT(!std::isnan(b->mLodMaxClamp));
-
-        return a->mAddressModeU == b->mAddressModeU && a->mAddressModeV == b->mAddressModeV &&
-               a->mAddressModeW == b->mAddressModeW && a->mMagFilter == b->mMagFilter &&
-               a->mMinFilter == b->mMinFilter && a->mMipmapFilter == b->mMipmapFilter &&
-               a->mLodMinClamp == b->mLodMinClamp && a->mLodMaxClamp == b->mLodMaxClamp &&
-               a->mCompareFunction == b->mCompareFunction && a->mMaxAnisotropy == b->mMaxAnisotropy;
-    }
+    return a->mAddressModeU == b->mAddressModeU && a->mAddressModeV == b->mAddressModeV &&
+           a->mAddressModeW == b->mAddressModeW && a->mMagFilter == b->mMagFilter &&
+           a->mMinFilter == b->mMinFilter && a->mMipmapFilter == b->mMipmapFilter &&
+           a->mLodMinClamp == b->mLodMinClamp && a->mLodMaxClamp == b->mLodMaxClamp &&
+           a->mCompareFunction == b->mCompareFunction && a->mMaxAnisotropy == b->mMaxAnisotropy;
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/Sampler.h b/src/dawn/native/Sampler.h
index 73391a1..eac3446 100644
--- a/src/dawn/native/Sampler.h
+++ b/src/dawn/native/Sampler.h
@@ -24,57 +24,55 @@
 
 namespace dawn::native {
 
-    class DeviceBase;
+class DeviceBase;
 
-    MaybeError ValidateSamplerDescriptor(DeviceBase* device, const SamplerDescriptor* descriptor);
+MaybeError ValidateSamplerDescriptor(DeviceBase* device, const SamplerDescriptor* descriptor);
 
-    class SamplerBase : public ApiObjectBase, public CachedObject {
-      public:
-        SamplerBase(DeviceBase* device,
-                    const SamplerDescriptor* descriptor,
-                    ApiObjectBase::UntrackedByDeviceTag tag);
-        SamplerBase(DeviceBase* device, const SamplerDescriptor* descriptor);
-        ~SamplerBase() override;
+class SamplerBase : public ApiObjectBase, public CachedObject {
+  public:
+    SamplerBase(DeviceBase* device,
+                const SamplerDescriptor* descriptor,
+                ApiObjectBase::UntrackedByDeviceTag tag);
+    SamplerBase(DeviceBase* device, const SamplerDescriptor* descriptor);
+    ~SamplerBase() override;
 
-        static SamplerBase* MakeError(DeviceBase* device);
+    static SamplerBase* MakeError(DeviceBase* device);
 
-        ObjectType GetType() const override;
+    ObjectType GetType() const override;
 
-        bool IsComparison() const;
-        bool IsFiltering() const;
+    bool IsComparison() const;
+    bool IsFiltering() const;
 
-        // Functions necessary for the unordered_set<SamplerBase*>-based cache.
-        size_t ComputeContentHash() override;
+    // Functions necessary for the unordered_set<SamplerBase*>-based cache.
+    size_t ComputeContentHash() override;
 
-        struct EqualityFunc {
-            bool operator()(const SamplerBase* a, const SamplerBase* b) const;
-        };
-
-        uint16_t GetMaxAnisotropy() const {
-            return mMaxAnisotropy;
-        }
-
-      protected:
-        // Constructor used only for mocking and testing.
-        explicit SamplerBase(DeviceBase* device);
-        void DestroyImpl() override;
-
-      private:
-        SamplerBase(DeviceBase* device, ObjectBase::ErrorTag tag);
-
-        // TODO(cwallez@chromium.org): Store a crypto hash of the items instead?
-        wgpu::AddressMode mAddressModeU;
-        wgpu::AddressMode mAddressModeV;
-        wgpu::AddressMode mAddressModeW;
-        wgpu::FilterMode mMagFilter;
-        wgpu::FilterMode mMinFilter;
-        wgpu::FilterMode mMipmapFilter;
-        float mLodMinClamp;
-        float mLodMaxClamp;
-        wgpu::CompareFunction mCompareFunction;
-        uint16_t mMaxAnisotropy;
+    struct EqualityFunc {
+        bool operator()(const SamplerBase* a, const SamplerBase* b) const;
     };
 
+    uint16_t GetMaxAnisotropy() const { return mMaxAnisotropy; }
+
+  protected:
+    // Constructor used only for mocking and testing.
+    explicit SamplerBase(DeviceBase* device);
+    void DestroyImpl() override;
+
+  private:
+    SamplerBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+
+    // TODO(cwallez@chromium.org): Store a crypto hash of the items instead?
+    wgpu::AddressMode mAddressModeU;
+    wgpu::AddressMode mAddressModeV;
+    wgpu::AddressMode mAddressModeW;
+    wgpu::FilterMode mMagFilter;
+    wgpu::FilterMode mMinFilter;
+    wgpu::FilterMode mMipmapFilter;
+    float mLodMinClamp;
+    float mLodMaxClamp;
+    wgpu::CompareFunction mCompareFunction;
+    uint16_t mMaxAnisotropy;
+};
+
 }  // namespace dawn::native
 
 #endif  // SRC_DAWN_NATIVE_SAMPLER_H_
diff --git a/src/dawn/native/ScratchBuffer.cpp b/src/dawn/native/ScratchBuffer.cpp
index be53683..7902555 100644
--- a/src/dawn/native/ScratchBuffer.cpp
+++ b/src/dawn/native/ScratchBuffer.cpp
@@ -18,30 +18,29 @@
 
 namespace dawn::native {
 
-    ScratchBuffer::ScratchBuffer(DeviceBase* device, wgpu::BufferUsage usage)
-        : mDevice(device), mUsage(usage) {
-    }
+ScratchBuffer::ScratchBuffer(DeviceBase* device, wgpu::BufferUsage usage)
+    : mDevice(device), mUsage(usage) {}
 
-    ScratchBuffer::~ScratchBuffer() = default;
+ScratchBuffer::~ScratchBuffer() = default;
 
-    void ScratchBuffer::Reset() {
-        mBuffer = nullptr;
-    }
+void ScratchBuffer::Reset() {
+    mBuffer = nullptr;
+}
 
-    MaybeError ScratchBuffer::EnsureCapacity(uint64_t capacity) {
-        if (!mBuffer.Get() || mBuffer->GetSize() < capacity) {
-            BufferDescriptor descriptor;
-            descriptor.size = capacity;
-            descriptor.usage = mUsage;
-            DAWN_TRY_ASSIGN(mBuffer, mDevice->CreateBuffer(&descriptor));
-            mBuffer->SetIsDataInitialized();
-        }
-        return {};
+MaybeError ScratchBuffer::EnsureCapacity(uint64_t capacity) {
+    if (!mBuffer.Get() || mBuffer->GetSize() < capacity) {
+        BufferDescriptor descriptor;
+        descriptor.size = capacity;
+        descriptor.usage = mUsage;
+        DAWN_TRY_ASSIGN(mBuffer, mDevice->CreateBuffer(&descriptor));
+        mBuffer->SetIsDataInitialized();
     }
+    return {};
+}
 
-    BufferBase* ScratchBuffer::GetBuffer() const {
-        ASSERT(mBuffer.Get() != nullptr);
-        return mBuffer.Get();
-    }
+BufferBase* ScratchBuffer::GetBuffer() const {
+    ASSERT(mBuffer.Get() != nullptr);
+    return mBuffer.Get();
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/ScratchBuffer.h b/src/dawn/native/ScratchBuffer.h
index 9ecd81c..4cfd1b9 100644
--- a/src/dawn/native/ScratchBuffer.h
+++ b/src/dawn/native/ScratchBuffer.h
@@ -22,33 +22,33 @@
 
 namespace dawn::native {
 
-    class DeviceBase;
+class DeviceBase;
 
-    // A ScratchBuffer is a lazily allocated and lazily grown GPU buffer for intermittent use by
-    // commands in the GPU queue. Note that scratch buffers are not zero-initialized, so users must
-    // be careful not to exposed uninitialized bytes to client shaders.
-    class ScratchBuffer {
-      public:
-        // Note that this object does not retain a reference to `device`, so `device` MUST outlive
-        // this object.
-        ScratchBuffer(DeviceBase* device, wgpu::BufferUsage usage);
-        ~ScratchBuffer();
+// A ScratchBuffer is a lazily allocated and lazily grown GPU buffer for intermittent use by
+// commands in the GPU queue. Note that scratch buffers are not zero-initialized, so users must
+// be careful not to exposed uninitialized bytes to client shaders.
+class ScratchBuffer {
+  public:
+    // Note that this object does not retain a reference to `device`, so `device` MUST outlive
+    // this object.
+    ScratchBuffer(DeviceBase* device, wgpu::BufferUsage usage);
+    ~ScratchBuffer();
 
-        // Resets this ScratchBuffer, guaranteeing that the next EnsureCapacity call allocates a
-        // fresh buffer.
-        void Reset();
+    // Resets this ScratchBuffer, guaranteeing that the next EnsureCapacity call allocates a
+    // fresh buffer.
+    void Reset();
 
-        // Ensures that this ScratchBuffer is backed by a buffer on `device` with at least
-        // `capacity` bytes of storage.
-        MaybeError EnsureCapacity(uint64_t capacity);
+    // Ensures that this ScratchBuffer is backed by a buffer on `device` with at least
+    // `capacity` bytes of storage.
+    MaybeError EnsureCapacity(uint64_t capacity);
 
-        BufferBase* GetBuffer() const;
+    BufferBase* GetBuffer() const;
 
-      private:
-        DeviceBase* const mDevice;
-        const wgpu::BufferUsage mUsage;
-        Ref<BufferBase> mBuffer;
-    };
+  private:
+    DeviceBase* const mDevice;
+    const wgpu::BufferUsage mUsage;
+    Ref<BufferBase> mBuffer;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/ShaderModule.cpp b/src/dawn/native/ShaderModule.cpp
index fe8984d..20d2728 100644
--- a/src/dawn/native/ShaderModule.cpp
+++ b/src/dawn/native/ShaderModule.cpp
@@ -35,590 +35,572 @@
 
 namespace dawn::native {
 
-    namespace {
+namespace {
 
-        tint::transform::VertexFormat ToTintVertexFormat(wgpu::VertexFormat format) {
-            switch (format) {
-                case wgpu::VertexFormat::Uint8x2:
-                    return tint::transform::VertexFormat::kUint8x2;
-                case wgpu::VertexFormat::Uint8x4:
-                    return tint::transform::VertexFormat::kUint8x4;
-                case wgpu::VertexFormat::Sint8x2:
-                    return tint::transform::VertexFormat::kSint8x2;
-                case wgpu::VertexFormat::Sint8x4:
-                    return tint::transform::VertexFormat::kSint8x4;
-                case wgpu::VertexFormat::Unorm8x2:
-                    return tint::transform::VertexFormat::kUnorm8x2;
-                case wgpu::VertexFormat::Unorm8x4:
-                    return tint::transform::VertexFormat::kUnorm8x4;
-                case wgpu::VertexFormat::Snorm8x2:
-                    return tint::transform::VertexFormat::kSnorm8x2;
-                case wgpu::VertexFormat::Snorm8x4:
-                    return tint::transform::VertexFormat::kSnorm8x4;
-                case wgpu::VertexFormat::Uint16x2:
-                    return tint::transform::VertexFormat::kUint16x2;
-                case wgpu::VertexFormat::Uint16x4:
-                    return tint::transform::VertexFormat::kUint16x4;
-                case wgpu::VertexFormat::Sint16x2:
-                    return tint::transform::VertexFormat::kSint16x2;
-                case wgpu::VertexFormat::Sint16x4:
-                    return tint::transform::VertexFormat::kSint16x4;
-                case wgpu::VertexFormat::Unorm16x2:
-                    return tint::transform::VertexFormat::kUnorm16x2;
-                case wgpu::VertexFormat::Unorm16x4:
-                    return tint::transform::VertexFormat::kUnorm16x4;
-                case wgpu::VertexFormat::Snorm16x2:
-                    return tint::transform::VertexFormat::kSnorm16x2;
-                case wgpu::VertexFormat::Snorm16x4:
-                    return tint::transform::VertexFormat::kSnorm16x4;
-                case wgpu::VertexFormat::Float16x2:
-                    return tint::transform::VertexFormat::kFloat16x2;
-                case wgpu::VertexFormat::Float16x4:
-                    return tint::transform::VertexFormat::kFloat16x4;
-                case wgpu::VertexFormat::Float32:
-                    return tint::transform::VertexFormat::kFloat32;
-                case wgpu::VertexFormat::Float32x2:
-                    return tint::transform::VertexFormat::kFloat32x2;
-                case wgpu::VertexFormat::Float32x3:
-                    return tint::transform::VertexFormat::kFloat32x3;
-                case wgpu::VertexFormat::Float32x4:
-                    return tint::transform::VertexFormat::kFloat32x4;
-                case wgpu::VertexFormat::Uint32:
-                    return tint::transform::VertexFormat::kUint32;
-                case wgpu::VertexFormat::Uint32x2:
-                    return tint::transform::VertexFormat::kUint32x2;
-                case wgpu::VertexFormat::Uint32x3:
-                    return tint::transform::VertexFormat::kUint32x3;
-                case wgpu::VertexFormat::Uint32x4:
-                    return tint::transform::VertexFormat::kUint32x4;
-                case wgpu::VertexFormat::Sint32:
-                    return tint::transform::VertexFormat::kSint32;
-                case wgpu::VertexFormat::Sint32x2:
-                    return tint::transform::VertexFormat::kSint32x2;
-                case wgpu::VertexFormat::Sint32x3:
-                    return tint::transform::VertexFormat::kSint32x3;
-                case wgpu::VertexFormat::Sint32x4:
-                    return tint::transform::VertexFormat::kSint32x4;
+tint::transform::VertexFormat ToTintVertexFormat(wgpu::VertexFormat format) {
+    switch (format) {
+        case wgpu::VertexFormat::Uint8x2:
+            return tint::transform::VertexFormat::kUint8x2;
+        case wgpu::VertexFormat::Uint8x4:
+            return tint::transform::VertexFormat::kUint8x4;
+        case wgpu::VertexFormat::Sint8x2:
+            return tint::transform::VertexFormat::kSint8x2;
+        case wgpu::VertexFormat::Sint8x4:
+            return tint::transform::VertexFormat::kSint8x4;
+        case wgpu::VertexFormat::Unorm8x2:
+            return tint::transform::VertexFormat::kUnorm8x2;
+        case wgpu::VertexFormat::Unorm8x4:
+            return tint::transform::VertexFormat::kUnorm8x4;
+        case wgpu::VertexFormat::Snorm8x2:
+            return tint::transform::VertexFormat::kSnorm8x2;
+        case wgpu::VertexFormat::Snorm8x4:
+            return tint::transform::VertexFormat::kSnorm8x4;
+        case wgpu::VertexFormat::Uint16x2:
+            return tint::transform::VertexFormat::kUint16x2;
+        case wgpu::VertexFormat::Uint16x4:
+            return tint::transform::VertexFormat::kUint16x4;
+        case wgpu::VertexFormat::Sint16x2:
+            return tint::transform::VertexFormat::kSint16x2;
+        case wgpu::VertexFormat::Sint16x4:
+            return tint::transform::VertexFormat::kSint16x4;
+        case wgpu::VertexFormat::Unorm16x2:
+            return tint::transform::VertexFormat::kUnorm16x2;
+        case wgpu::VertexFormat::Unorm16x4:
+            return tint::transform::VertexFormat::kUnorm16x4;
+        case wgpu::VertexFormat::Snorm16x2:
+            return tint::transform::VertexFormat::kSnorm16x2;
+        case wgpu::VertexFormat::Snorm16x4:
+            return tint::transform::VertexFormat::kSnorm16x4;
+        case wgpu::VertexFormat::Float16x2:
+            return tint::transform::VertexFormat::kFloat16x2;
+        case wgpu::VertexFormat::Float16x4:
+            return tint::transform::VertexFormat::kFloat16x4;
+        case wgpu::VertexFormat::Float32:
+            return tint::transform::VertexFormat::kFloat32;
+        case wgpu::VertexFormat::Float32x2:
+            return tint::transform::VertexFormat::kFloat32x2;
+        case wgpu::VertexFormat::Float32x3:
+            return tint::transform::VertexFormat::kFloat32x3;
+        case wgpu::VertexFormat::Float32x4:
+            return tint::transform::VertexFormat::kFloat32x4;
+        case wgpu::VertexFormat::Uint32:
+            return tint::transform::VertexFormat::kUint32;
+        case wgpu::VertexFormat::Uint32x2:
+            return tint::transform::VertexFormat::kUint32x2;
+        case wgpu::VertexFormat::Uint32x3:
+            return tint::transform::VertexFormat::kUint32x3;
+        case wgpu::VertexFormat::Uint32x4:
+            return tint::transform::VertexFormat::kUint32x4;
+        case wgpu::VertexFormat::Sint32:
+            return tint::transform::VertexFormat::kSint32;
+        case wgpu::VertexFormat::Sint32x2:
+            return tint::transform::VertexFormat::kSint32x2;
+        case wgpu::VertexFormat::Sint32x3:
+            return tint::transform::VertexFormat::kSint32x3;
+        case wgpu::VertexFormat::Sint32x4:
+            return tint::transform::VertexFormat::kSint32x4;
 
-                case wgpu::VertexFormat::Undefined:
-                    break;
-            }
+        case wgpu::VertexFormat::Undefined:
+            break;
+    }
+    UNREACHABLE();
+}
+
+tint::transform::VertexStepMode ToTintVertexStepMode(wgpu::VertexStepMode mode) {
+    switch (mode) {
+        case wgpu::VertexStepMode::Vertex:
+            return tint::transform::VertexStepMode::kVertex;
+        case wgpu::VertexStepMode::Instance:
+            return tint::transform::VertexStepMode::kInstance;
+    }
+    UNREACHABLE();
+}
+
+ResultOrError<SingleShaderStage> TintPipelineStageToShaderStage(tint::ast::PipelineStage stage) {
+    switch (stage) {
+        case tint::ast::PipelineStage::kVertex:
+            return SingleShaderStage::Vertex;
+        case tint::ast::PipelineStage::kFragment:
+            return SingleShaderStage::Fragment;
+        case tint::ast::PipelineStage::kCompute:
+            return SingleShaderStage::Compute;
+        case tint::ast::PipelineStage::kNone:
+            break;
+    }
+    UNREACHABLE();
+}
+
+BindingInfoType TintResourceTypeToBindingInfoType(
+    tint::inspector::ResourceBinding::ResourceType type) {
+    switch (type) {
+        case tint::inspector::ResourceBinding::ResourceType::kUniformBuffer:
+        case tint::inspector::ResourceBinding::ResourceType::kStorageBuffer:
+        case tint::inspector::ResourceBinding::ResourceType::kReadOnlyStorageBuffer:
+            return BindingInfoType::Buffer;
+        case tint::inspector::ResourceBinding::ResourceType::kSampler:
+        case tint::inspector::ResourceBinding::ResourceType::kComparisonSampler:
+            return BindingInfoType::Sampler;
+        case tint::inspector::ResourceBinding::ResourceType::kSampledTexture:
+        case tint::inspector::ResourceBinding::ResourceType::kMultisampledTexture:
+        case tint::inspector::ResourceBinding::ResourceType::kDepthTexture:
+        case tint::inspector::ResourceBinding::ResourceType::kDepthMultisampledTexture:
+            return BindingInfoType::Texture;
+        case tint::inspector::ResourceBinding::ResourceType::kWriteOnlyStorageTexture:
+            return BindingInfoType::StorageTexture;
+        case tint::inspector::ResourceBinding::ResourceType::kExternalTexture:
+            return BindingInfoType::ExternalTexture;
+
+        default:
             UNREACHABLE();
-        }
+            return BindingInfoType::Buffer;
+    }
+}
 
-        tint::transform::VertexStepMode ToTintVertexStepMode(wgpu::VertexStepMode mode) {
-            switch (mode) {
-                case wgpu::VertexStepMode::Vertex:
-                    return tint::transform::VertexStepMode::kVertex;
-                case wgpu::VertexStepMode::Instance:
-                    return tint::transform::VertexStepMode::kInstance;
-            }
+wgpu::TextureFormat TintImageFormatToTextureFormat(
+    tint::inspector::ResourceBinding::TexelFormat format) {
+    switch (format) {
+        case tint::inspector::ResourceBinding::TexelFormat::kR32Uint:
+            return wgpu::TextureFormat::R32Uint;
+        case tint::inspector::ResourceBinding::TexelFormat::kR32Sint:
+            return wgpu::TextureFormat::R32Sint;
+        case tint::inspector::ResourceBinding::TexelFormat::kR32Float:
+            return wgpu::TextureFormat::R32Float;
+        case tint::inspector::ResourceBinding::TexelFormat::kRgba8Unorm:
+            return wgpu::TextureFormat::RGBA8Unorm;
+        case tint::inspector::ResourceBinding::TexelFormat::kRgba8Snorm:
+            return wgpu::TextureFormat::RGBA8Snorm;
+        case tint::inspector::ResourceBinding::TexelFormat::kRgba8Uint:
+            return wgpu::TextureFormat::RGBA8Uint;
+        case tint::inspector::ResourceBinding::TexelFormat::kRgba8Sint:
+            return wgpu::TextureFormat::RGBA8Sint;
+        case tint::inspector::ResourceBinding::TexelFormat::kRg32Uint:
+            return wgpu::TextureFormat::RG32Uint;
+        case tint::inspector::ResourceBinding::TexelFormat::kRg32Sint:
+            return wgpu::TextureFormat::RG32Sint;
+        case tint::inspector::ResourceBinding::TexelFormat::kRg32Float:
+            return wgpu::TextureFormat::RG32Float;
+        case tint::inspector::ResourceBinding::TexelFormat::kRgba16Uint:
+            return wgpu::TextureFormat::RGBA16Uint;
+        case tint::inspector::ResourceBinding::TexelFormat::kRgba16Sint:
+            return wgpu::TextureFormat::RGBA16Sint;
+        case tint::inspector::ResourceBinding::TexelFormat::kRgba16Float:
+            return wgpu::TextureFormat::RGBA16Float;
+        case tint::inspector::ResourceBinding::TexelFormat::kRgba32Uint:
+            return wgpu::TextureFormat::RGBA32Uint;
+        case tint::inspector::ResourceBinding::TexelFormat::kRgba32Sint:
+            return wgpu::TextureFormat::RGBA32Sint;
+        case tint::inspector::ResourceBinding::TexelFormat::kRgba32Float:
+            return wgpu::TextureFormat::RGBA32Float;
+        case tint::inspector::ResourceBinding::TexelFormat::kNone:
+            return wgpu::TextureFormat::Undefined;
+
+        default:
             UNREACHABLE();
-        }
+            return wgpu::TextureFormat::Undefined;
+    }
+}
 
-        ResultOrError<SingleShaderStage> TintPipelineStageToShaderStage(
-            tint::ast::PipelineStage stage) {
-            switch (stage) {
-                case tint::ast::PipelineStage::kVertex:
-                    return SingleShaderStage::Vertex;
-                case tint::ast::PipelineStage::kFragment:
-                    return SingleShaderStage::Fragment;
-                case tint::ast::PipelineStage::kCompute:
-                    return SingleShaderStage::Compute;
-                case tint::ast::PipelineStage::kNone:
-                    break;
-            }
-            UNREACHABLE();
-        }
+wgpu::TextureViewDimension TintTextureDimensionToTextureViewDimension(
+    tint::inspector::ResourceBinding::TextureDimension dim) {
+    switch (dim) {
+        case tint::inspector::ResourceBinding::TextureDimension::k1d:
+            return wgpu::TextureViewDimension::e1D;
+        case tint::inspector::ResourceBinding::TextureDimension::k2d:
+            return wgpu::TextureViewDimension::e2D;
+        case tint::inspector::ResourceBinding::TextureDimension::k2dArray:
+            return wgpu::TextureViewDimension::e2DArray;
+        case tint::inspector::ResourceBinding::TextureDimension::k3d:
+            return wgpu::TextureViewDimension::e3D;
+        case tint::inspector::ResourceBinding::TextureDimension::kCube:
+            return wgpu::TextureViewDimension::Cube;
+        case tint::inspector::ResourceBinding::TextureDimension::kCubeArray:
+            return wgpu::TextureViewDimension::CubeArray;
+        case tint::inspector::ResourceBinding::TextureDimension::kNone:
+            return wgpu::TextureViewDimension::Undefined;
+    }
+    UNREACHABLE();
+}
 
-        BindingInfoType TintResourceTypeToBindingInfoType(
-            tint::inspector::ResourceBinding::ResourceType type) {
-            switch (type) {
-                case tint::inspector::ResourceBinding::ResourceType::kUniformBuffer:
-                case tint::inspector::ResourceBinding::ResourceType::kStorageBuffer:
-                case tint::inspector::ResourceBinding::ResourceType::kReadOnlyStorageBuffer:
-                    return BindingInfoType::Buffer;
-                case tint::inspector::ResourceBinding::ResourceType::kSampler:
-                case tint::inspector::ResourceBinding::ResourceType::kComparisonSampler:
-                    return BindingInfoType::Sampler;
-                case tint::inspector::ResourceBinding::ResourceType::kSampledTexture:
-                case tint::inspector::ResourceBinding::ResourceType::kMultisampledTexture:
-                case tint::inspector::ResourceBinding::ResourceType::kDepthTexture:
-                case tint::inspector::ResourceBinding::ResourceType::kDepthMultisampledTexture:
-                    return BindingInfoType::Texture;
-                case tint::inspector::ResourceBinding::ResourceType::kWriteOnlyStorageTexture:
-                    return BindingInfoType::StorageTexture;
-                case tint::inspector::ResourceBinding::ResourceType::kExternalTexture:
-                    return BindingInfoType::ExternalTexture;
+SampleTypeBit TintSampledKindToSampleTypeBit(tint::inspector::ResourceBinding::SampledKind s) {
+    switch (s) {
+        case tint::inspector::ResourceBinding::SampledKind::kSInt:
+            return SampleTypeBit::Sint;
+        case tint::inspector::ResourceBinding::SampledKind::kUInt:
+            return SampleTypeBit::Uint;
+        case tint::inspector::ResourceBinding::SampledKind::kFloat:
+            return SampleTypeBit::Float | SampleTypeBit::UnfilterableFloat;
+        case tint::inspector::ResourceBinding::SampledKind::kUnknown:
+            return SampleTypeBit::None;
+    }
+    UNREACHABLE();
+}
 
-                default:
-                    UNREACHABLE();
-                    return BindingInfoType::Buffer;
-            }
-        }
+ResultOrError<wgpu::TextureComponentType> TintComponentTypeToTextureComponentType(
+    tint::inspector::ComponentType type) {
+    switch (type) {
+        case tint::inspector::ComponentType::kFloat:
+            return wgpu::TextureComponentType::Float;
+        case tint::inspector::ComponentType::kSInt:
+            return wgpu::TextureComponentType::Sint;
+        case tint::inspector::ComponentType::kUInt:
+            return wgpu::TextureComponentType::Uint;
+        case tint::inspector::ComponentType::kUnknown:
+            return DAWN_VALIDATION_ERROR("Attempted to convert 'Unknown' component type from Tint");
+    }
+    UNREACHABLE();
+}
 
-        wgpu::TextureFormat TintImageFormatToTextureFormat(
-            tint::inspector::ResourceBinding::TexelFormat format) {
-            switch (format) {
-                case tint::inspector::ResourceBinding::TexelFormat::kR32Uint:
-                    return wgpu::TextureFormat::R32Uint;
-                case tint::inspector::ResourceBinding::TexelFormat::kR32Sint:
-                    return wgpu::TextureFormat::R32Sint;
-                case tint::inspector::ResourceBinding::TexelFormat::kR32Float:
-                    return wgpu::TextureFormat::R32Float;
-                case tint::inspector::ResourceBinding::TexelFormat::kRgba8Unorm:
-                    return wgpu::TextureFormat::RGBA8Unorm;
-                case tint::inspector::ResourceBinding::TexelFormat::kRgba8Snorm:
-                    return wgpu::TextureFormat::RGBA8Snorm;
-                case tint::inspector::ResourceBinding::TexelFormat::kRgba8Uint:
-                    return wgpu::TextureFormat::RGBA8Uint;
-                case tint::inspector::ResourceBinding::TexelFormat::kRgba8Sint:
-                    return wgpu::TextureFormat::RGBA8Sint;
-                case tint::inspector::ResourceBinding::TexelFormat::kRg32Uint:
-                    return wgpu::TextureFormat::RG32Uint;
-                case tint::inspector::ResourceBinding::TexelFormat::kRg32Sint:
-                    return wgpu::TextureFormat::RG32Sint;
-                case tint::inspector::ResourceBinding::TexelFormat::kRg32Float:
-                    return wgpu::TextureFormat::RG32Float;
-                case tint::inspector::ResourceBinding::TexelFormat::kRgba16Uint:
-                    return wgpu::TextureFormat::RGBA16Uint;
-                case tint::inspector::ResourceBinding::TexelFormat::kRgba16Sint:
-                    return wgpu::TextureFormat::RGBA16Sint;
-                case tint::inspector::ResourceBinding::TexelFormat::kRgba16Float:
-                    return wgpu::TextureFormat::RGBA16Float;
-                case tint::inspector::ResourceBinding::TexelFormat::kRgba32Uint:
-                    return wgpu::TextureFormat::RGBA32Uint;
-                case tint::inspector::ResourceBinding::TexelFormat::kRgba32Sint:
-                    return wgpu::TextureFormat::RGBA32Sint;
-                case tint::inspector::ResourceBinding::TexelFormat::kRgba32Float:
-                    return wgpu::TextureFormat::RGBA32Float;
-                case tint::inspector::ResourceBinding::TexelFormat::kNone:
-                    return wgpu::TextureFormat::Undefined;
+ResultOrError<VertexFormatBaseType> TintComponentTypeToVertexFormatBaseType(
+    tint::inspector::ComponentType type) {
+    switch (type) {
+        case tint::inspector::ComponentType::kFloat:
+            return VertexFormatBaseType::Float;
+        case tint::inspector::ComponentType::kSInt:
+            return VertexFormatBaseType::Sint;
+        case tint::inspector::ComponentType::kUInt:
+            return VertexFormatBaseType::Uint;
+        case tint::inspector::ComponentType::kUnknown:
+            return DAWN_VALIDATION_ERROR("Attempted to convert 'Unknown' component type from Tint");
+    }
+    UNREACHABLE();
+}
 
-                default:
-                    UNREACHABLE();
-                    return wgpu::TextureFormat::Undefined;
-            }
-        }
+ResultOrError<wgpu::BufferBindingType> TintResourceTypeToBufferBindingType(
+    tint::inspector::ResourceBinding::ResourceType resource_type) {
+    switch (resource_type) {
+        case tint::inspector::ResourceBinding::ResourceType::kUniformBuffer:
+            return wgpu::BufferBindingType::Uniform;
+        case tint::inspector::ResourceBinding::ResourceType::kStorageBuffer:
+            return wgpu::BufferBindingType::Storage;
+        case tint::inspector::ResourceBinding::ResourceType::kReadOnlyStorageBuffer:
+            return wgpu::BufferBindingType::ReadOnlyStorage;
+        default:
+            return DAWN_VALIDATION_ERROR("Attempted to convert non-buffer resource type");
+    }
+    UNREACHABLE();
+}
 
-        wgpu::TextureViewDimension TintTextureDimensionToTextureViewDimension(
-            tint::inspector::ResourceBinding::TextureDimension dim) {
-            switch (dim) {
-                case tint::inspector::ResourceBinding::TextureDimension::k1d:
-                    return wgpu::TextureViewDimension::e1D;
-                case tint::inspector::ResourceBinding::TextureDimension::k2d:
-                    return wgpu::TextureViewDimension::e2D;
-                case tint::inspector::ResourceBinding::TextureDimension::k2dArray:
-                    return wgpu::TextureViewDimension::e2DArray;
-                case tint::inspector::ResourceBinding::TextureDimension::k3d:
-                    return wgpu::TextureViewDimension::e3D;
-                case tint::inspector::ResourceBinding::TextureDimension::kCube:
-                    return wgpu::TextureViewDimension::Cube;
-                case tint::inspector::ResourceBinding::TextureDimension::kCubeArray:
-                    return wgpu::TextureViewDimension::CubeArray;
-                case tint::inspector::ResourceBinding::TextureDimension::kNone:
-                    return wgpu::TextureViewDimension::Undefined;
-            }
-            UNREACHABLE();
-        }
+ResultOrError<wgpu::StorageTextureAccess> TintResourceTypeToStorageTextureAccess(
+    tint::inspector::ResourceBinding::ResourceType resource_type) {
+    switch (resource_type) {
+        case tint::inspector::ResourceBinding::ResourceType::kWriteOnlyStorageTexture:
+            return wgpu::StorageTextureAccess::WriteOnly;
+        default:
+            return DAWN_VALIDATION_ERROR("Attempted to convert non-storage texture resource type");
+    }
+    UNREACHABLE();
+}
 
-        SampleTypeBit TintSampledKindToSampleTypeBit(
-            tint::inspector::ResourceBinding::SampledKind s) {
-            switch (s) {
-                case tint::inspector::ResourceBinding::SampledKind::kSInt:
-                    return SampleTypeBit::Sint;
-                case tint::inspector::ResourceBinding::SampledKind::kUInt:
-                    return SampleTypeBit::Uint;
-                case tint::inspector::ResourceBinding::SampledKind::kFloat:
-                    return SampleTypeBit::Float | SampleTypeBit::UnfilterableFloat;
-                case tint::inspector::ResourceBinding::SampledKind::kUnknown:
-                    return SampleTypeBit::None;
-            }
-            UNREACHABLE();
-        }
+ResultOrError<InterStageComponentType> TintComponentTypeToInterStageComponentType(
+    tint::inspector::ComponentType type) {
+    switch (type) {
+        case tint::inspector::ComponentType::kFloat:
+            return InterStageComponentType::Float;
+        case tint::inspector::ComponentType::kSInt:
+            return InterStageComponentType::Sint;
+        case tint::inspector::ComponentType::kUInt:
+            return InterStageComponentType::Uint;
+        case tint::inspector::ComponentType::kUnknown:
+            return DAWN_VALIDATION_ERROR("Attempted to convert 'Unknown' component type from Tint");
+    }
+    UNREACHABLE();
+}
 
-        ResultOrError<wgpu::TextureComponentType> TintComponentTypeToTextureComponentType(
-            tint::inspector::ComponentType type) {
-            switch (type) {
-                case tint::inspector::ComponentType::kFloat:
-                    return wgpu::TextureComponentType::Float;
-                case tint::inspector::ComponentType::kSInt:
-                    return wgpu::TextureComponentType::Sint;
-                case tint::inspector::ComponentType::kUInt:
-                    return wgpu::TextureComponentType::Uint;
-                case tint::inspector::ComponentType::kUnknown:
-                    return DAWN_VALIDATION_ERROR(
-                        "Attempted to convert 'Unknown' component type from Tint");
-            }
-            UNREACHABLE();
-        }
+ResultOrError<uint32_t> TintCompositionTypeToInterStageComponentCount(
+    tint::inspector::CompositionType type) {
+    switch (type) {
+        case tint::inspector::CompositionType::kScalar:
+            return 1u;
+        case tint::inspector::CompositionType::kVec2:
+            return 2u;
+        case tint::inspector::CompositionType::kVec3:
+            return 3u;
+        case tint::inspector::CompositionType::kVec4:
+            return 4u;
+        case tint::inspector::CompositionType::kUnknown:
+            return DAWN_VALIDATION_ERROR("Attempt to convert 'Unknown' composition type from Tint");
+    }
+    UNREACHABLE();
+}
 
-        ResultOrError<VertexFormatBaseType> TintComponentTypeToVertexFormatBaseType(
-            tint::inspector::ComponentType type) {
-            switch (type) {
-                case tint::inspector::ComponentType::kFloat:
-                    return VertexFormatBaseType::Float;
-                case tint::inspector::ComponentType::kSInt:
-                    return VertexFormatBaseType::Sint;
-                case tint::inspector::ComponentType::kUInt:
-                    return VertexFormatBaseType::Uint;
-                case tint::inspector::ComponentType::kUnknown:
-                    return DAWN_VALIDATION_ERROR(
-                        "Attempted to convert 'Unknown' component type from Tint");
-            }
-            UNREACHABLE();
-        }
+ResultOrError<InterpolationType> TintInterpolationTypeToInterpolationType(
+    tint::inspector::InterpolationType type) {
+    switch (type) {
+        case tint::inspector::InterpolationType::kPerspective:
+            return InterpolationType::Perspective;
+        case tint::inspector::InterpolationType::kLinear:
+            return InterpolationType::Linear;
+        case tint::inspector::InterpolationType::kFlat:
+            return InterpolationType::Flat;
+        case tint::inspector::InterpolationType::kUnknown:
+            return DAWN_VALIDATION_ERROR(
+                "Attempted to convert 'Unknown' interpolation type from Tint");
+    }
+    UNREACHABLE();
+}
 
-        ResultOrError<wgpu::BufferBindingType> TintResourceTypeToBufferBindingType(
-            tint::inspector::ResourceBinding::ResourceType resource_type) {
-            switch (resource_type) {
-                case tint::inspector::ResourceBinding::ResourceType::kUniformBuffer:
-                    return wgpu::BufferBindingType::Uniform;
-                case tint::inspector::ResourceBinding::ResourceType::kStorageBuffer:
-                    return wgpu::BufferBindingType::Storage;
-                case tint::inspector::ResourceBinding::ResourceType::kReadOnlyStorageBuffer:
-                    return wgpu::BufferBindingType::ReadOnlyStorage;
-                default:
-                    return DAWN_VALIDATION_ERROR("Attempted to convert non-buffer resource type");
-            }
-            UNREACHABLE();
-        }
+ResultOrError<InterpolationSampling> TintInterpolationSamplingToInterpolationSamplingType(
+    tint::inspector::InterpolationSampling type) {
+    switch (type) {
+        case tint::inspector::InterpolationSampling::kNone:
+            return InterpolationSampling::None;
+        case tint::inspector::InterpolationSampling::kCenter:
+            return InterpolationSampling::Center;
+        case tint::inspector::InterpolationSampling::kCentroid:
+            return InterpolationSampling::Centroid;
+        case tint::inspector::InterpolationSampling::kSample:
+            return InterpolationSampling::Sample;
+        case tint::inspector::InterpolationSampling::kUnknown:
+            return DAWN_VALIDATION_ERROR(
+                "Attempted to convert 'Unknown' interpolation sampling type from Tint");
+    }
+    UNREACHABLE();
+}
 
-        ResultOrError<wgpu::StorageTextureAccess> TintResourceTypeToStorageTextureAccess(
-            tint::inspector::ResourceBinding::ResourceType resource_type) {
-            switch (resource_type) {
-                case tint::inspector::ResourceBinding::ResourceType::kWriteOnlyStorageTexture:
-                    return wgpu::StorageTextureAccess::WriteOnly;
-                default:
-                    return DAWN_VALIDATION_ERROR(
-                        "Attempted to convert non-storage texture resource type");
-            }
-            UNREACHABLE();
-        }
+EntryPointMetadata::OverridableConstant::Type FromTintOverridableConstantType(
+    tint::inspector::OverridableConstant::Type type) {
+    switch (type) {
+        case tint::inspector::OverridableConstant::Type::kBool:
+            return EntryPointMetadata::OverridableConstant::Type::Boolean;
+        case tint::inspector::OverridableConstant::Type::kFloat32:
+            return EntryPointMetadata::OverridableConstant::Type::Float32;
+        case tint::inspector::OverridableConstant::Type::kInt32:
+            return EntryPointMetadata::OverridableConstant::Type::Int32;
+        case tint::inspector::OverridableConstant::Type::kUint32:
+            return EntryPointMetadata::OverridableConstant::Type::Uint32;
+    }
+    UNREACHABLE();
+}
 
-        ResultOrError<InterStageComponentType> TintComponentTypeToInterStageComponentType(
-            tint::inspector::ComponentType type) {
-            switch (type) {
-                case tint::inspector::ComponentType::kFloat:
-                    return InterStageComponentType::Float;
-                case tint::inspector::ComponentType::kSInt:
-                    return InterStageComponentType::Sint;
-                case tint::inspector::ComponentType::kUInt:
-                    return InterStageComponentType::Uint;
-                case tint::inspector::ComponentType::kUnknown:
-                    return DAWN_VALIDATION_ERROR(
-                        "Attempted to convert 'Unknown' component type from Tint");
-            }
-            UNREACHABLE();
-        }
-
-        ResultOrError<uint32_t> TintCompositionTypeToInterStageComponentCount(
-            tint::inspector::CompositionType type) {
-            switch (type) {
-                case tint::inspector::CompositionType::kScalar:
-                    return 1u;
-                case tint::inspector::CompositionType::kVec2:
-                    return 2u;
-                case tint::inspector::CompositionType::kVec3:
-                    return 3u;
-                case tint::inspector::CompositionType::kVec4:
-                    return 4u;
-                case tint::inspector::CompositionType::kUnknown:
-                    return DAWN_VALIDATION_ERROR(
-                        "Attempt to convert 'Unknown' composition type from Tint");
-            }
-            UNREACHABLE();
-        }
-
-        ResultOrError<InterpolationType> TintInterpolationTypeToInterpolationType(
-            tint::inspector::InterpolationType type) {
-            switch (type) {
-                case tint::inspector::InterpolationType::kPerspective:
-                    return InterpolationType::Perspective;
-                case tint::inspector::InterpolationType::kLinear:
-                    return InterpolationType::Linear;
-                case tint::inspector::InterpolationType::kFlat:
-                    return InterpolationType::Flat;
-                case tint::inspector::InterpolationType::kUnknown:
-                    return DAWN_VALIDATION_ERROR(
-                        "Attempted to convert 'Unknown' interpolation type from Tint");
-            }
-            UNREACHABLE();
-        }
-
-        ResultOrError<InterpolationSampling> TintInterpolationSamplingToInterpolationSamplingType(
-            tint::inspector::InterpolationSampling type) {
-            switch (type) {
-                case tint::inspector::InterpolationSampling::kNone:
-                    return InterpolationSampling::None;
-                case tint::inspector::InterpolationSampling::kCenter:
-                    return InterpolationSampling::Center;
-                case tint::inspector::InterpolationSampling::kCentroid:
-                    return InterpolationSampling::Centroid;
-                case tint::inspector::InterpolationSampling::kSample:
-                    return InterpolationSampling::Sample;
-                case tint::inspector::InterpolationSampling::kUnknown:
-                    return DAWN_VALIDATION_ERROR(
-                        "Attempted to convert 'Unknown' interpolation sampling type from Tint");
-            }
-            UNREACHABLE();
-        }
-
-        EntryPointMetadata::OverridableConstant::Type FromTintOverridableConstantType(
-            tint::inspector::OverridableConstant::Type type) {
-            switch (type) {
-                case tint::inspector::OverridableConstant::Type::kBool:
-                    return EntryPointMetadata::OverridableConstant::Type::Boolean;
-                case tint::inspector::OverridableConstant::Type::kFloat32:
-                    return EntryPointMetadata::OverridableConstant::Type::Float32;
-                case tint::inspector::OverridableConstant::Type::kInt32:
-                    return EntryPointMetadata::OverridableConstant::Type::Int32;
-                case tint::inspector::OverridableConstant::Type::kUint32:
-                    return EntryPointMetadata::OverridableConstant::Type::Uint32;
-            }
-            UNREACHABLE();
-        }
-
-        ResultOrError<tint::Program> ParseWGSL(const tint::Source::File* file,
-                                               OwnedCompilationMessages* outMessages) {
+ResultOrError<tint::Program> ParseWGSL(const tint::Source::File* file,
+                                       OwnedCompilationMessages* outMessages) {
 #if TINT_BUILD_WGSL_READER
-            tint::Program program = tint::reader::wgsl::Parse(file);
-            if (outMessages != nullptr) {
-                outMessages->AddMessages(program.Diagnostics());
-            }
-            if (!program.IsValid()) {
-                return DAWN_FORMAT_VALIDATION_ERROR(
-                    "Tint WGSL reader failure:\nParser: %s\nShader:\n%s\n",
-                    program.Diagnostics().str(), file->content.data);
-            }
+    tint::Program program = tint::reader::wgsl::Parse(file);
+    if (outMessages != nullptr) {
+        outMessages->AddMessages(program.Diagnostics());
+    }
+    if (!program.IsValid()) {
+        return DAWN_FORMAT_VALIDATION_ERROR("Tint WGSL reader failure:\nParser: %s\nShader:\n%s\n",
+                                            program.Diagnostics().str(), file->content.data);
+    }
 
-            return std::move(program);
+    return std::move(program);
 #else
-            return DAWN_FORMAT_VALIDATION_ERROR("TINT_BUILD_WGSL_READER is not defined.");
+    return DAWN_FORMAT_VALIDATION_ERROR("TINT_BUILD_WGSL_READER is not defined.");
 #endif
-        }
+}
 
-        ResultOrError<tint::Program> ParseSPIRV(const std::vector<uint32_t>& spirv,
-                                                OwnedCompilationMessages* outMessages) {
+ResultOrError<tint::Program> ParseSPIRV(const std::vector<uint32_t>& spirv,
+                                        OwnedCompilationMessages* outMessages) {
 #if TINT_BUILD_SPV_READER
-            tint::Program program = tint::reader::spirv::Parse(spirv);
-            if (outMessages != nullptr) {
-                outMessages->AddMessages(program.Diagnostics());
-            }
-            if (!program.IsValid()) {
-                return DAWN_FORMAT_VALIDATION_ERROR("Tint SPIR-V reader failure:\nParser: %s\n",
-                                                    program.Diagnostics().str());
-            }
+    tint::Program program = tint::reader::spirv::Parse(spirv);
+    if (outMessages != nullptr) {
+        outMessages->AddMessages(program.Diagnostics());
+    }
+    if (!program.IsValid()) {
+        return DAWN_FORMAT_VALIDATION_ERROR("Tint SPIR-V reader failure:\nParser: %s\n",
+                                            program.Diagnostics().str());
+    }
 
-            return std::move(program);
+    return std::move(program);
 #else
-            return DAWN_FORMAT_VALIDATION_ERROR("TINT_BUILD_SPV_READER is not defined.");
+    return DAWN_FORMAT_VALIDATION_ERROR("TINT_BUILD_SPV_READER is not defined.");
 
 #endif
+}
+
+std::vector<uint64_t> GetBindGroupMinBufferSizes(const BindingGroupInfoMap& shaderBindings,
+                                                 const BindGroupLayoutBase* layout) {
+    std::vector<uint64_t> requiredBufferSizes(layout->GetUnverifiedBufferCount());
+    uint32_t packedIdx = 0;
+
+    for (BindingIndex bindingIndex{0}; bindingIndex < layout->GetBufferCount(); ++bindingIndex) {
+        const BindingInfo& bindingInfo = layout->GetBindingInfo(bindingIndex);
+        if (bindingInfo.buffer.minBindingSize != 0) {
+            // Skip bindings that have minimum buffer size set in the layout
+            continue;
         }
 
-        std::vector<uint64_t> GetBindGroupMinBufferSizes(const BindingGroupInfoMap& shaderBindings,
-                                                         const BindGroupLayoutBase* layout) {
-            std::vector<uint64_t> requiredBufferSizes(layout->GetUnverifiedBufferCount());
-            uint32_t packedIdx = 0;
-
-            for (BindingIndex bindingIndex{0}; bindingIndex < layout->GetBufferCount();
-                 ++bindingIndex) {
-                const BindingInfo& bindingInfo = layout->GetBindingInfo(bindingIndex);
-                if (bindingInfo.buffer.minBindingSize != 0) {
-                    // Skip bindings that have minimum buffer size set in the layout
-                    continue;
-                }
-
-                ASSERT(packedIdx < requiredBufferSizes.size());
-                const auto& shaderInfo = shaderBindings.find(bindingInfo.binding);
-                if (shaderInfo != shaderBindings.end()) {
-                    requiredBufferSizes[packedIdx] = shaderInfo->second.buffer.minBindingSize;
-                } else {
-                    // We have to include buffers if they are included in the bind group's
-                    // packed vector. We don't actually need to check these at draw time, so
-                    // if this is a problem in the future we can optimize it further.
-                    requiredBufferSizes[packedIdx] = 0;
-                }
-                ++packedIdx;
-            }
-
-            return requiredBufferSizes;
+        ASSERT(packedIdx < requiredBufferSizes.size());
+        const auto& shaderInfo = shaderBindings.find(bindingInfo.binding);
+        if (shaderInfo != shaderBindings.end()) {
+            requiredBufferSizes[packedIdx] = shaderInfo->second.buffer.minBindingSize;
+        } else {
+            // We have to include buffers if they are included in the bind group's
+            // packed vector. We don't actually need to check these at draw time, so
+            // if this is a problem in the future we can optimize it further.
+            requiredBufferSizes[packedIdx] = 0;
         }
+        ++packedIdx;
+    }
 
-        MaybeError ValidateCompatibilityOfSingleBindingWithLayout(
-            const DeviceBase* device,
-            const BindGroupLayoutBase* layout,
-            SingleShaderStage entryPointStage,
-            BindingNumber bindingNumber,
-            const ShaderBindingInfo& shaderInfo) {
-            const BindGroupLayoutBase::BindingMap& layoutBindings = layout->GetBindingMap();
+    return requiredBufferSizes;
+}
 
-            // An external texture binding found in the shader will later be expanded into multiple
-            // bindings at compile time. This expansion will have already happened in the bgl - so
-            // the shader and bgl will always mismatch at this point. Expansion info is contained in
-            // the bgl object, so we can still verify the bgl used to have an external texture in
-            // the slot corresponding to the shader reflection.
-            if (shaderInfo.bindingType == BindingInfoType::ExternalTexture) {
-                // If an external texture binding used to exist in the bgl, it will be found as a
-                // key in the ExternalTextureBindingExpansions map.
-                ExternalTextureBindingExpansionMap expansions =
-                    layout->GetExternalTextureBindingExpansionMap();
-                std::map<BindingNumber, dawn_native::ExternalTextureBindingExpansion>::iterator it =
-                    expansions.find(bindingNumber);
-                // TODO(dawn:563): Provide info about the binding types.
-                DAWN_INVALID_IF(it == expansions.end(),
-                                "Binding type in the shader (texture_external) doesn't match the "
-                                "type in the layout.");
+MaybeError ValidateCompatibilityOfSingleBindingWithLayout(const DeviceBase* device,
+                                                          const BindGroupLayoutBase* layout,
+                                                          SingleShaderStage entryPointStage,
+                                                          BindingNumber bindingNumber,
+                                                          const ShaderBindingInfo& shaderInfo) {
+    const BindGroupLayoutBase::BindingMap& layoutBindings = layout->GetBindingMap();
 
-                return {};
-            }
+    // An external texture binding found in the shader will later be expanded into multiple
+    // bindings at compile time. This expansion will have already happened in the bgl - so
+    // the shader and bgl will always mismatch at this point. Expansion info is contained in
+    // the bgl object, so we can still verify the bgl used to have an external texture in
+    // the slot corresponding to the shader reflection.
+    if (shaderInfo.bindingType == BindingInfoType::ExternalTexture) {
+        // If an external texture binding used to exist in the bgl, it will be found as a
+        // key in the ExternalTextureBindingExpansions map.
+        ExternalTextureBindingExpansionMap expansions =
+            layout->GetExternalTextureBindingExpansionMap();
+        std::map<BindingNumber, dawn_native::ExternalTextureBindingExpansion>::iterator it =
+            expansions.find(bindingNumber);
+        // TODO(dawn:563): Provide info about the binding types.
+        DAWN_INVALID_IF(it == expansions.end(),
+                        "Binding type in the shader (texture_external) doesn't match the "
+                        "type in the layout.");
 
-            const auto& bindingIt = layoutBindings.find(bindingNumber);
-            DAWN_INVALID_IF(bindingIt == layoutBindings.end(), "Binding doesn't exist in %s.",
-                            layout);
+        return {};
+    }
 
-            BindingIndex bindingIndex(bindingIt->second);
-            const BindingInfo& layoutInfo = layout->GetBindingInfo(bindingIndex);
+    const auto& bindingIt = layoutBindings.find(bindingNumber);
+    DAWN_INVALID_IF(bindingIt == layoutBindings.end(), "Binding doesn't exist in %s.", layout);
 
-            // TODO(dawn:563): Provide info about the binding types.
+    BindingIndex bindingIndex(bindingIt->second);
+    const BindingInfo& layoutInfo = layout->GetBindingInfo(bindingIndex);
+
+    // TODO(dawn:563): Provide info about the binding types.
+    DAWN_INVALID_IF(
+        layoutInfo.bindingType != shaderInfo.bindingType,
+        "Binding type (buffer vs. texture vs. sampler vs. external) doesn't match the type "
+        "in the layout.");
+
+    ExternalTextureBindingExpansionMap expansions = layout->GetExternalTextureBindingExpansionMap();
+    DAWN_INVALID_IF(expansions.find(bindingNumber) != expansions.end(),
+                    "Binding type (buffer vs. texture vs. sampler vs. external) doesn't "
+                    "match the type in the layout.");
+
+    // TODO(dawn:563): Provide info about the visibility.
+    DAWN_INVALID_IF((layoutInfo.visibility & StageBit(entryPointStage)) == 0,
+                    "Entry point's stage is not in the binding visibility in the layout (%s)",
+                    layoutInfo.visibility);
+
+    switch (layoutInfo.bindingType) {
+        case BindingInfoType::Texture: {
             DAWN_INVALID_IF(
-                layoutInfo.bindingType != shaderInfo.bindingType,
-                "Binding type (buffer vs. texture vs. sampler vs. external) doesn't match the type "
-                "in the layout.");
+                layoutInfo.texture.multisampled != shaderInfo.texture.multisampled,
+                "Binding multisampled flag (%u) doesn't match the layout's multisampled "
+                "flag (%u)",
+                layoutInfo.texture.multisampled, shaderInfo.texture.multisampled);
 
-            ExternalTextureBindingExpansionMap expansions =
-                layout->GetExternalTextureBindingExpansionMap();
-            DAWN_INVALID_IF(expansions.find(bindingNumber) != expansions.end(),
-                            "Binding type (buffer vs. texture vs. sampler vs. external) doesn't "
-                            "match the type in the layout.");
+            // TODO(dawn:563): Provide info about the sample types.
+            DAWN_INVALID_IF((SampleTypeToSampleTypeBit(layoutInfo.texture.sampleType) &
+                             shaderInfo.texture.compatibleSampleTypes) == 0,
+                            "The sample type in the shader is not compatible with the "
+                            "sample type of the layout.");
 
-            // TODO(dawn:563): Provide info about the visibility.
             DAWN_INVALID_IF(
-                (layoutInfo.visibility & StageBit(entryPointStage)) == 0,
-                "Entry point's stage is not in the binding visibility in the layout (%s)",
-                layoutInfo.visibility);
-
-            switch (layoutInfo.bindingType) {
-                case BindingInfoType::Texture: {
-                    DAWN_INVALID_IF(
-                        layoutInfo.texture.multisampled != shaderInfo.texture.multisampled,
-                        "Binding multisampled flag (%u) doesn't match the layout's multisampled "
-                        "flag (%u)",
-                        layoutInfo.texture.multisampled, shaderInfo.texture.multisampled);
-
-                    // TODO(dawn:563): Provide info about the sample types.
-                    DAWN_INVALID_IF((SampleTypeToSampleTypeBit(layoutInfo.texture.sampleType) &
-                                     shaderInfo.texture.compatibleSampleTypes) == 0,
-                                    "The sample type in the shader is not compatible with the "
-                                    "sample type of the layout.");
-
-                    DAWN_INVALID_IF(
-                        layoutInfo.texture.viewDimension != shaderInfo.texture.viewDimension,
-                        "The shader's binding dimension (%s) doesn't match the shader's binding "
-                        "dimension (%s).",
-                        layoutInfo.texture.viewDimension, shaderInfo.texture.viewDimension);
-                    break;
-                }
-
-                case BindingInfoType::StorageTexture: {
-                    ASSERT(layoutInfo.storageTexture.format != wgpu::TextureFormat::Undefined);
-                    ASSERT(shaderInfo.storageTexture.format != wgpu::TextureFormat::Undefined);
-
-                    DAWN_INVALID_IF(
-                        layoutInfo.storageTexture.access != shaderInfo.storageTexture.access,
-                        "The layout's binding access (%s) isn't compatible with the shader's "
-                        "binding access (%s).",
-                        layoutInfo.storageTexture.access, shaderInfo.storageTexture.access);
-
-                    DAWN_INVALID_IF(
-                        layoutInfo.storageTexture.format != shaderInfo.storageTexture.format,
-                        "The layout's binding format (%s) doesn't match the shader's binding "
-                        "format (%s).",
-                        layoutInfo.storageTexture.format, shaderInfo.storageTexture.format);
-
-                    DAWN_INVALID_IF(layoutInfo.storageTexture.viewDimension !=
-                                        shaderInfo.storageTexture.viewDimension,
-                                    "The layout's binding dimension (%s) doesn't match the "
-                                    "shader's binding dimension (%s).",
-                                    layoutInfo.storageTexture.viewDimension,
-                                    shaderInfo.storageTexture.viewDimension);
-                    break;
-                }
-
-                case BindingInfoType::Buffer: {
-                    // Binding mismatch between shader and bind group is invalid. For example, a
-                    // writable binding in the shader with a readonly storage buffer in the bind
-                    // group layout is invalid. For internal usage with internal shaders, a storage
-                    // binding in the shader with an internal storage buffer in the bind group
-                    // layout is also valid.
-                    bool validBindingConversion =
-                        (layoutInfo.buffer.type == kInternalStorageBufferBinding &&
-                         shaderInfo.buffer.type == wgpu::BufferBindingType::Storage);
-
-                    DAWN_INVALID_IF(
-                        layoutInfo.buffer.type != shaderInfo.buffer.type && !validBindingConversion,
-                        "The buffer type in the shader (%s) is not compatible with the type in the "
-                        "layout (%s).",
-                        shaderInfo.buffer.type, layoutInfo.buffer.type);
-
-                    DAWN_INVALID_IF(
-                        layoutInfo.buffer.minBindingSize != 0 &&
-                            shaderInfo.buffer.minBindingSize > layoutInfo.buffer.minBindingSize,
-                        "The shader uses more bytes of the buffer (%u) than the layout's "
-                        "minBindingSize (%u).",
-                        shaderInfo.buffer.minBindingSize, layoutInfo.buffer.minBindingSize);
-                    break;
-                }
-
-                case BindingInfoType::Sampler:
-                    DAWN_INVALID_IF(
-                        (layoutInfo.sampler.type == wgpu::SamplerBindingType::Comparison) !=
-                            shaderInfo.sampler.isComparison,
-                        "The sampler type in the shader (comparison: %u) doesn't match the type in "
-                        "the layout (comparison: %u).",
-                        shaderInfo.sampler.isComparison,
-                        layoutInfo.sampler.type == wgpu::SamplerBindingType::Comparison);
-                    break;
-
-                case BindingInfoType::ExternalTexture: {
-                    UNREACHABLE();
-                    break;
-                }
-            }
-
-            return {};
-        }
-        MaybeError ValidateCompatibilityWithBindGroupLayout(DeviceBase* device,
-                                                            BindGroupIndex group,
-                                                            const EntryPointMetadata& entryPoint,
-                                                            const BindGroupLayoutBase* layout) {
-            // Iterate over all bindings used by this group in the shader, and find the
-            // corresponding binding in the BindGroupLayout, if it exists.
-            for (const auto& [bindingId, bindingInfo] : entryPoint.bindings[group]) {
-                DAWN_TRY_CONTEXT(ValidateCompatibilityOfSingleBindingWithLayout(
-                                     device, layout, entryPoint.stage, bindingId, bindingInfo),
-                                 "validating that the entry-point's declaration for @group(%u) "
-                                 "@binding(%u) matches %s",
-                                 static_cast<uint32_t>(group), static_cast<uint32_t>(bindingId),
-                                 layout);
-            }
-
-            return {};
+                layoutInfo.texture.viewDimension != shaderInfo.texture.viewDimension,
+                "The shader's binding dimension (%s) doesn't match the shader's binding "
+                "dimension (%s).",
+                layoutInfo.texture.viewDimension, shaderInfo.texture.viewDimension);
+            break;
         }
 
-        ResultOrError<std::unique_ptr<EntryPointMetadata>> ReflectEntryPointUsingTint(
-            const DeviceBase* device,
-            tint::inspector::Inspector* inspector,
-            const tint::inspector::EntryPoint& entryPoint) {
-            const CombinedLimits& limits = device->GetLimits();
-            constexpr uint32_t kMaxInterStageShaderLocation = kMaxInterStageShaderVariables - 1;
+        case BindingInfoType::StorageTexture: {
+            ASSERT(layoutInfo.storageTexture.format != wgpu::TextureFormat::Undefined);
+            ASSERT(shaderInfo.storageTexture.format != wgpu::TextureFormat::Undefined);
 
-            std::unique_ptr<EntryPointMetadata> metadata = std::make_unique<EntryPointMetadata>();
+            DAWN_INVALID_IF(layoutInfo.storageTexture.access != shaderInfo.storageTexture.access,
+                            "The layout's binding access (%s) isn't compatible with the shader's "
+                            "binding access (%s).",
+                            layoutInfo.storageTexture.access, shaderInfo.storageTexture.access);
 
-            // Returns the invalid argument, and if it is true additionally store the formatted
-            // error in metadata.infringedLimits. This is to delay the emission of these validation
-            // errors until the entry point is used.
+            DAWN_INVALID_IF(layoutInfo.storageTexture.format != shaderInfo.storageTexture.format,
+                            "The layout's binding format (%s) doesn't match the shader's binding "
+                            "format (%s).",
+                            layoutInfo.storageTexture.format, shaderInfo.storageTexture.format);
+
+            DAWN_INVALID_IF(
+                layoutInfo.storageTexture.viewDimension != shaderInfo.storageTexture.viewDimension,
+                "The layout's binding dimension (%s) doesn't match the "
+                "shader's binding dimension (%s).",
+                layoutInfo.storageTexture.viewDimension, shaderInfo.storageTexture.viewDimension);
+            break;
+        }
+
+        case BindingInfoType::Buffer: {
+            // Binding mismatch between shader and bind group is invalid. For example, a
+            // writable binding in the shader with a readonly storage buffer in the bind
+            // group layout is invalid. For internal usage with internal shaders, a storage
+            // binding in the shader with an internal storage buffer in the bind group
+            // layout is also valid.
+            bool validBindingConversion =
+                (layoutInfo.buffer.type == kInternalStorageBufferBinding &&
+                 shaderInfo.buffer.type == wgpu::BufferBindingType::Storage);
+
+            DAWN_INVALID_IF(
+                layoutInfo.buffer.type != shaderInfo.buffer.type && !validBindingConversion,
+                "The buffer type in the shader (%s) is not compatible with the type in the "
+                "layout (%s).",
+                shaderInfo.buffer.type, layoutInfo.buffer.type);
+
+            DAWN_INVALID_IF(layoutInfo.buffer.minBindingSize != 0 &&
+                                shaderInfo.buffer.minBindingSize > layoutInfo.buffer.minBindingSize,
+                            "The shader uses more bytes of the buffer (%u) than the layout's "
+                            "minBindingSize (%u).",
+                            shaderInfo.buffer.minBindingSize, layoutInfo.buffer.minBindingSize);
+            break;
+        }
+
+        case BindingInfoType::Sampler:
+            DAWN_INVALID_IF(
+                (layoutInfo.sampler.type == wgpu::SamplerBindingType::Comparison) !=
+                    shaderInfo.sampler.isComparison,
+                "The sampler type in the shader (comparison: %u) doesn't match the type in "
+                "the layout (comparison: %u).",
+                shaderInfo.sampler.isComparison,
+                layoutInfo.sampler.type == wgpu::SamplerBindingType::Comparison);
+            break;
+
+        case BindingInfoType::ExternalTexture: {
+            UNREACHABLE();
+            break;
+        }
+    }
+
+    return {};
+}
+MaybeError ValidateCompatibilityWithBindGroupLayout(DeviceBase* device,
+                                                    BindGroupIndex group,
+                                                    const EntryPointMetadata& entryPoint,
+                                                    const BindGroupLayoutBase* layout) {
+    // Iterate over all bindings used by this group in the shader, and find the
+    // corresponding binding in the BindGroupLayout, if it exists.
+    for (const auto& [bindingId, bindingInfo] : entryPoint.bindings[group]) {
+        DAWN_TRY_CONTEXT(ValidateCompatibilityOfSingleBindingWithLayout(
+                             device, layout, entryPoint.stage, bindingId, bindingInfo),
+                         "validating that the entry-point's declaration for @group(%u) "
+                         "@binding(%u) matches %s",
+                         static_cast<uint32_t>(group), static_cast<uint32_t>(bindingId), layout);
+    }
+
+    return {};
+}
+
+ResultOrError<std::unique_ptr<EntryPointMetadata>> ReflectEntryPointUsingTint(
+    const DeviceBase* device,
+    tint::inspector::Inspector* inspector,
+    const tint::inspector::EntryPoint& entryPoint) {
+    const CombinedLimits& limits = device->GetLimits();
+    constexpr uint32_t kMaxInterStageShaderLocation = kMaxInterStageShaderVariables - 1;
+
+    std::unique_ptr<EntryPointMetadata> metadata = std::make_unique<EntryPointMetadata>();
+
+    // Returns the invalid argument, and if it is true additionally store the formatted
+    // error in metadata.infringedLimits. This is to delay the emission of these validation
+    // errors until the entry point is used.
 #define DelayedInvalidIf(invalid, ...)                                              \
     ([&]() {                                                                        \
         if (invalid) {                                                              \
@@ -627,719 +609,697 @@
         return invalid;                                                             \
     })()
 
-            if (!entryPoint.overridable_constants.empty()) {
-                DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
-                                "Pipeline overridable constants are disallowed because they "
-                                "are partially implemented.");
+    if (!entryPoint.overridable_constants.empty()) {
+        DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs),
+                        "Pipeline overridable constants are disallowed because they "
+                        "are partially implemented.");
 
-                const auto& name2Id = inspector->GetConstantNameToIdMap();
-                const auto& id2Scalar = inspector->GetConstantIDs();
+        const auto& name2Id = inspector->GetConstantNameToIdMap();
+        const auto& id2Scalar = inspector->GetConstantIDs();
 
-                for (auto& c : entryPoint.overridable_constants) {
-                    uint32_t id = name2Id.at(c.name);
-                    OverridableConstantScalar defaultValue;
-                    if (c.is_initialized) {
-                        // if it is initialized, the scalar must exist
-                        const auto& scalar = id2Scalar.at(id);
-                        if (scalar.IsBool()) {
-                            defaultValue.b = scalar.AsBool();
-                        } else if (scalar.IsU32()) {
-                            defaultValue.u32 = scalar.AsU32();
-                        } else if (scalar.IsI32()) {
-                            defaultValue.i32 = scalar.AsI32();
-                        } else if (scalar.IsFloat()) {
-                            defaultValue.f32 = scalar.AsFloat();
-                        } else {
-                            UNREACHABLE();
-                        }
-                    }
-                    EntryPointMetadata::OverridableConstant constant = {
-                        id, FromTintOverridableConstantType(c.type), c.is_initialized,
-                        defaultValue};
-
-                    std::string identifier =
-                        c.is_numeric_id_specified ? std::to_string(constant.id) : c.name;
-                    metadata->overridableConstants[identifier] = constant;
-
-                    if (!c.is_initialized) {
-                        auto [_, inserted] = metadata->uninitializedOverridableConstants.emplace(
-                            std::move(identifier));
-                        // The insertion should have taken place
-                        ASSERT(inserted);
-                    } else {
-                        auto [_, inserted] = metadata->initializedOverridableConstants.emplace(
-                            std::move(identifier));
-                        // The insertion should have taken place
-                        ASSERT(inserted);
-                    }
+        for (auto& c : entryPoint.overridable_constants) {
+            uint32_t id = name2Id.at(c.name);
+            OverridableConstantScalar defaultValue;
+            if (c.is_initialized) {
+                // if it is initialized, the scalar must exist
+                const auto& scalar = id2Scalar.at(id);
+                if (scalar.IsBool()) {
+                    defaultValue.b = scalar.AsBool();
+                } else if (scalar.IsU32()) {
+                    defaultValue.u32 = scalar.AsU32();
+                } else if (scalar.IsI32()) {
+                    defaultValue.i32 = scalar.AsI32();
+                } else if (scalar.IsFloat()) {
+                    defaultValue.f32 = scalar.AsFloat();
+                } else {
+                    UNREACHABLE();
                 }
             }
+            EntryPointMetadata::OverridableConstant constant = {
+                id, FromTintOverridableConstantType(c.type), c.is_initialized, defaultValue};
 
-            DAWN_TRY_ASSIGN(metadata->stage, TintPipelineStageToShaderStage(entryPoint.stage));
+            std::string identifier =
+                c.is_numeric_id_specified ? std::to_string(constant.id) : c.name;
+            metadata->overridableConstants[identifier] = constant;
 
-            if (metadata->stage == SingleShaderStage::Compute) {
-                DelayedInvalidIf(
-                    entryPoint.workgroup_size_x > limits.v1.maxComputeWorkgroupSizeX ||
-                        entryPoint.workgroup_size_y > limits.v1.maxComputeWorkgroupSizeY ||
-                        entryPoint.workgroup_size_z > limits.v1.maxComputeWorkgroupSizeZ,
-                    "Entry-point uses workgroup_size(%u, %u, %u) that exceeds the "
-                    "maximum allowed (%u, %u, %u).",
-                    entryPoint.workgroup_size_x, entryPoint.workgroup_size_y,
-                    entryPoint.workgroup_size_z, limits.v1.maxComputeWorkgroupSizeX,
-                    limits.v1.maxComputeWorkgroupSizeY, limits.v1.maxComputeWorkgroupSizeZ);
+            if (!c.is_initialized) {
+                auto [_, inserted] =
+                    metadata->uninitializedOverridableConstants.emplace(std::move(identifier));
+                // The insertion should have taken place
+                ASSERT(inserted);
+            } else {
+                auto [_, inserted] =
+                    metadata->initializedOverridableConstants.emplace(std::move(identifier));
+                // The insertion should have taken place
+                ASSERT(inserted);
+            }
+        }
+    }
 
-                // Dimensions have already been validated against their individual limits above.
-                // Cast to uint64_t to avoid overflow in this multiplication.
-                uint64_t numInvocations = static_cast<uint64_t>(entryPoint.workgroup_size_x) *
-                                          entryPoint.workgroup_size_y * entryPoint.workgroup_size_z;
-                DelayedInvalidIf(numInvocations > limits.v1.maxComputeInvocationsPerWorkgroup,
-                                 "The total number of workgroup invocations (%u) exceeds the "
-                                 "maximum allowed (%u).",
-                                 numInvocations, limits.v1.maxComputeInvocationsPerWorkgroup);
+    DAWN_TRY_ASSIGN(metadata->stage, TintPipelineStageToShaderStage(entryPoint.stage));
 
-                const size_t workgroupStorageSize =
-                    inspector->GetWorkgroupStorageSize(entryPoint.name);
-                DelayedInvalidIf(workgroupStorageSize > limits.v1.maxComputeWorkgroupStorageSize,
-                                 "The total use of workgroup storage (%u bytes) is larger than "
-                                 "the maximum allowed (%u bytes).",
-                                 workgroupStorageSize, limits.v1.maxComputeWorkgroupStorageSize);
+    if (metadata->stage == SingleShaderStage::Compute) {
+        DelayedInvalidIf(entryPoint.workgroup_size_x > limits.v1.maxComputeWorkgroupSizeX ||
+                             entryPoint.workgroup_size_y > limits.v1.maxComputeWorkgroupSizeY ||
+                             entryPoint.workgroup_size_z > limits.v1.maxComputeWorkgroupSizeZ,
+                         "Entry-point uses workgroup_size(%u, %u, %u) that exceeds the "
+                         "maximum allowed (%u, %u, %u).",
+                         entryPoint.workgroup_size_x, entryPoint.workgroup_size_y,
+                         entryPoint.workgroup_size_z, limits.v1.maxComputeWorkgroupSizeX,
+                         limits.v1.maxComputeWorkgroupSizeY, limits.v1.maxComputeWorkgroupSizeZ);
 
-                metadata->localWorkgroupSize.x = entryPoint.workgroup_size_x;
-                metadata->localWorkgroupSize.y = entryPoint.workgroup_size_y;
-                metadata->localWorkgroupSize.z = entryPoint.workgroup_size_z;
+        // Dimensions have already been validated against their individual limits above.
+        // Cast to uint64_t to avoid overflow in this multiplication.
+        uint64_t numInvocations = static_cast<uint64_t>(entryPoint.workgroup_size_x) *
+                                  entryPoint.workgroup_size_y * entryPoint.workgroup_size_z;
+        DelayedInvalidIf(numInvocations > limits.v1.maxComputeInvocationsPerWorkgroup,
+                         "The total number of workgroup invocations (%u) exceeds the "
+                         "maximum allowed (%u).",
+                         numInvocations, limits.v1.maxComputeInvocationsPerWorkgroup);
 
-                metadata->usesNumWorkgroups = entryPoint.num_workgroups_used;
+        const size_t workgroupStorageSize = inspector->GetWorkgroupStorageSize(entryPoint.name);
+        DelayedInvalidIf(workgroupStorageSize > limits.v1.maxComputeWorkgroupStorageSize,
+                         "The total use of workgroup storage (%u bytes) is larger than "
+                         "the maximum allowed (%u bytes).",
+                         workgroupStorageSize, limits.v1.maxComputeWorkgroupStorageSize);
+
+        metadata->localWorkgroupSize.x = entryPoint.workgroup_size_x;
+        metadata->localWorkgroupSize.y = entryPoint.workgroup_size_y;
+        metadata->localWorkgroupSize.z = entryPoint.workgroup_size_z;
+
+        metadata->usesNumWorkgroups = entryPoint.num_workgroups_used;
+    }
+
+    if (metadata->stage == SingleShaderStage::Vertex) {
+        for (const auto& inputVar : entryPoint.input_variables) {
+            uint32_t unsanitizedLocation = inputVar.location_decoration;
+            if (DelayedInvalidIf(unsanitizedLocation >= kMaxVertexAttributes,
+                                 "Vertex input variable \"%s\" has a location (%u) that "
+                                 "exceeds the maximum (%u)",
+                                 inputVar.name, unsanitizedLocation, kMaxVertexAttributes)) {
+                continue;
             }
 
-            if (metadata->stage == SingleShaderStage::Vertex) {
-                for (const auto& inputVar : entryPoint.input_variables) {
-                    uint32_t unsanitizedLocation = inputVar.location_decoration;
-                    if (DelayedInvalidIf(unsanitizedLocation >= kMaxVertexAttributes,
-                                         "Vertex input variable \"%s\" has a location (%u) that "
-                                         "exceeds the maximum (%u)",
-                                         inputVar.name, unsanitizedLocation,
-                                         kMaxVertexAttributes)) {
-                        continue;
-                    }
+            VertexAttributeLocation location(static_cast<uint8_t>(unsanitizedLocation));
+            DAWN_TRY_ASSIGN(metadata->vertexInputBaseTypes[location],
+                            TintComponentTypeToVertexFormatBaseType(inputVar.component_type));
+            metadata->usedVertexInputs.set(location);
+        }
 
-                    VertexAttributeLocation location(static_cast<uint8_t>(unsanitizedLocation));
-                    DAWN_TRY_ASSIGN(
-                        metadata->vertexInputBaseTypes[location],
-                        TintComponentTypeToVertexFormatBaseType(inputVar.component_type));
-                    metadata->usedVertexInputs.set(location);
-                }
+        // [[position]] must be declared in a vertex shader but is not exposed as an
+        // output variable by Tint so we directly add its components to the total.
+        uint32_t totalInterStageShaderComponents = 4;
+        for (const auto& outputVar : entryPoint.output_variables) {
+            EntryPointMetadata::InterStageVariableInfo variable;
+            DAWN_TRY_ASSIGN(variable.baseType,
+                            TintComponentTypeToInterStageComponentType(outputVar.component_type));
+            DAWN_TRY_ASSIGN(variable.componentCount, TintCompositionTypeToInterStageComponentCount(
+                                                         outputVar.composition_type));
+            DAWN_TRY_ASSIGN(variable.interpolationType,
+                            TintInterpolationTypeToInterpolationType(outputVar.interpolation_type));
+            DAWN_TRY_ASSIGN(variable.interpolationSampling,
+                            TintInterpolationSamplingToInterpolationSamplingType(
+                                outputVar.interpolation_sampling));
+            totalInterStageShaderComponents += variable.componentCount;
 
-                // [[position]] must be declared in a vertex shader but is not exposed as an
-                // output variable by Tint so we directly add its components to the total.
-                uint32_t totalInterStageShaderComponents = 4;
-                for (const auto& outputVar : entryPoint.output_variables) {
-                    EntryPointMetadata::InterStageVariableInfo variable;
-                    DAWN_TRY_ASSIGN(variable.baseType, TintComponentTypeToInterStageComponentType(
-                                                           outputVar.component_type));
-                    DAWN_TRY_ASSIGN(
-                        variable.componentCount,
-                        TintCompositionTypeToInterStageComponentCount(outputVar.composition_type));
-                    DAWN_TRY_ASSIGN(
-                        variable.interpolationType,
-                        TintInterpolationTypeToInterpolationType(outputVar.interpolation_type));
-                    DAWN_TRY_ASSIGN(variable.interpolationSampling,
-                                    TintInterpolationSamplingToInterpolationSamplingType(
-                                        outputVar.interpolation_sampling));
-                    totalInterStageShaderComponents += variable.componentCount;
-
-                    uint32_t location = outputVar.location_decoration;
-                    if (DelayedInvalidIf(location > kMaxInterStageShaderLocation,
-                                         "Vertex output variable \"%s\" has a location (%u) that "
-                                         "exceeds the maximum (%u).",
-                                         outputVar.name, location, kMaxInterStageShaderLocation)) {
-                        continue;
-                    }
-
-                    metadata->usedInterStageVariables.set(location);
-                    metadata->interStageVariables[location] = variable;
-                }
-
-                DelayedInvalidIf(
-                    totalInterStageShaderComponents > kMaxInterStageShaderComponents,
-                    "Total vertex output components count (%u) exceeds the maximum (%u).",
-                    totalInterStageShaderComponents, kMaxInterStageShaderComponents);
+            uint32_t location = outputVar.location_decoration;
+            if (DelayedInvalidIf(location > kMaxInterStageShaderLocation,
+                                 "Vertex output variable \"%s\" has a location (%u) that "
+                                 "exceeds the maximum (%u).",
+                                 outputVar.name, location, kMaxInterStageShaderLocation)) {
+                continue;
             }
 
-            if (metadata->stage == SingleShaderStage::Fragment) {
-                uint32_t totalInterStageShaderComponents = 0;
-                for (const auto& inputVar : entryPoint.input_variables) {
-                    EntryPointMetadata::InterStageVariableInfo variable;
-                    DAWN_TRY_ASSIGN(variable.baseType, TintComponentTypeToInterStageComponentType(
-                                                           inputVar.component_type));
-                    DAWN_TRY_ASSIGN(
-                        variable.componentCount,
-                        TintCompositionTypeToInterStageComponentCount(inputVar.composition_type));
-                    DAWN_TRY_ASSIGN(
-                        variable.interpolationType,
-                        TintInterpolationTypeToInterpolationType(inputVar.interpolation_type));
-                    DAWN_TRY_ASSIGN(variable.interpolationSampling,
-                                    TintInterpolationSamplingToInterpolationSamplingType(
-                                        inputVar.interpolation_sampling));
-                    totalInterStageShaderComponents += variable.componentCount;
+            metadata->usedInterStageVariables.set(location);
+            metadata->interStageVariables[location] = variable;
+        }
 
-                    uint32_t location = inputVar.location_decoration;
-                    if (DelayedInvalidIf(location > kMaxInterStageShaderLocation,
-                                         "Fragment input variable \"%s\" has a location (%u) that "
-                                         "exceeds the maximum (%u).",
-                                         inputVar.name, location, kMaxInterStageShaderLocation)) {
-                        continue;
-                    }
+        DelayedInvalidIf(totalInterStageShaderComponents > kMaxInterStageShaderComponents,
+                         "Total vertex output components count (%u) exceeds the maximum (%u).",
+                         totalInterStageShaderComponents, kMaxInterStageShaderComponents);
+    }
 
-                    metadata->usedInterStageVariables.set(location);
-                    metadata->interStageVariables[location] = variable;
-                }
+    if (metadata->stage == SingleShaderStage::Fragment) {
+        uint32_t totalInterStageShaderComponents = 0;
+        for (const auto& inputVar : entryPoint.input_variables) {
+            EntryPointMetadata::InterStageVariableInfo variable;
+            DAWN_TRY_ASSIGN(variable.baseType,
+                            TintComponentTypeToInterStageComponentType(inputVar.component_type));
+            DAWN_TRY_ASSIGN(variable.componentCount, TintCompositionTypeToInterStageComponentCount(
+                                                         inputVar.composition_type));
+            DAWN_TRY_ASSIGN(variable.interpolationType,
+                            TintInterpolationTypeToInterpolationType(inputVar.interpolation_type));
+            DAWN_TRY_ASSIGN(variable.interpolationSampling,
+                            TintInterpolationSamplingToInterpolationSamplingType(
+                                inputVar.interpolation_sampling));
+            totalInterStageShaderComponents += variable.componentCount;
 
-                if (entryPoint.front_facing_used) {
-                    totalInterStageShaderComponents += 1;
-                }
-                if (entryPoint.input_sample_mask_used) {
-                    totalInterStageShaderComponents += 1;
-                }
-                if (entryPoint.sample_index_used) {
-                    totalInterStageShaderComponents += 1;
-                }
-                if (entryPoint.input_position_used) {
-                    totalInterStageShaderComponents += 4;
-                }
-
-                DelayedInvalidIf(
-                    totalInterStageShaderComponents > kMaxInterStageShaderComponents,
-                    "Total fragment input components count (%u) exceeds the maximum (%u).",
-                    totalInterStageShaderComponents, kMaxInterStageShaderComponents);
-
-                for (const auto& outputVar : entryPoint.output_variables) {
-                    EntryPointMetadata::FragmentOutputVariableInfo variable;
-                    DAWN_TRY_ASSIGN(variable.baseType, TintComponentTypeToTextureComponentType(
-                                                           outputVar.component_type));
-                    DAWN_TRY_ASSIGN(
-                        variable.componentCount,
-                        TintCompositionTypeToInterStageComponentCount(outputVar.composition_type));
-                    ASSERT(variable.componentCount <= 4);
-
-                    uint32_t unsanitizedAttachment = outputVar.location_decoration;
-                    if (DelayedInvalidIf(unsanitizedAttachment >= kMaxColorAttachments,
-                                         "Fragment output variable \"%s\" has a location (%u) that "
-                                         "exceeds the maximum (%u).",
-                                         outputVar.name, unsanitizedAttachment,
-                                         kMaxColorAttachments)) {
-                        continue;
-                    }
-
-                    ColorAttachmentIndex attachment(static_cast<uint8_t>(unsanitizedAttachment));
-                    metadata->fragmentOutputVariables[attachment] = variable;
-                    metadata->fragmentOutputsWritten.set(attachment);
-                }
+            uint32_t location = inputVar.location_decoration;
+            if (DelayedInvalidIf(location > kMaxInterStageShaderLocation,
+                                 "Fragment input variable \"%s\" has a location (%u) that "
+                                 "exceeds the maximum (%u).",
+                                 inputVar.name, location, kMaxInterStageShaderLocation)) {
+                continue;
             }
 
-            for (const tint::inspector::ResourceBinding& resource :
-                 inspector->GetResourceBindings(entryPoint.name)) {
-                ShaderBindingInfo info;
+            metadata->usedInterStageVariables.set(location);
+            metadata->interStageVariables[location] = variable;
+        }
 
-                info.bindingType = TintResourceTypeToBindingInfoType(resource.resource_type);
+        if (entryPoint.front_facing_used) {
+            totalInterStageShaderComponents += 1;
+        }
+        if (entryPoint.input_sample_mask_used) {
+            totalInterStageShaderComponents += 1;
+        }
+        if (entryPoint.sample_index_used) {
+            totalInterStageShaderComponents += 1;
+        }
+        if (entryPoint.input_position_used) {
+            totalInterStageShaderComponents += 4;
+        }
 
-                switch (info.bindingType) {
-                    case BindingInfoType::Buffer:
-                        info.buffer.minBindingSize = resource.size_no_padding;
-                        DAWN_TRY_ASSIGN(info.buffer.type, TintResourceTypeToBufferBindingType(
-                                                              resource.resource_type));
-                        break;
-                    case BindingInfoType::Sampler:
-                        switch (resource.resource_type) {
-                            case tint::inspector::ResourceBinding::ResourceType::kSampler:
-                                info.sampler.isComparison = false;
-                                break;
-                            case tint::inspector::ResourceBinding::ResourceType::kComparisonSampler:
-                                info.sampler.isComparison = true;
-                                break;
-                            default:
-                                UNREACHABLE();
-                        }
-                        break;
-                    case BindingInfoType::Texture:
-                        info.texture.viewDimension =
-                            TintTextureDimensionToTextureViewDimension(resource.dim);
-                        if (resource.resource_type ==
-                                tint::inspector::ResourceBinding::ResourceType::kDepthTexture ||
-                            resource.resource_type == tint::inspector::ResourceBinding::
-                                                          ResourceType::kDepthMultisampledTexture) {
-                            info.texture.compatibleSampleTypes = SampleTypeBit::Depth;
-                        } else {
-                            info.texture.compatibleSampleTypes =
-                                TintSampledKindToSampleTypeBit(resource.sampled_kind);
-                        }
-                        info.texture.multisampled =
-                            resource.resource_type == tint::inspector::ResourceBinding::
-                                                          ResourceType::kMultisampledTexture ||
-                            resource.resource_type == tint::inspector::ResourceBinding::
-                                                          ResourceType::kDepthMultisampledTexture;
+        DelayedInvalidIf(totalInterStageShaderComponents > kMaxInterStageShaderComponents,
+                         "Total fragment input components count (%u) exceeds the maximum (%u).",
+                         totalInterStageShaderComponents, kMaxInterStageShaderComponents);
 
-                        break;
-                    case BindingInfoType::StorageTexture:
-                        DAWN_TRY_ASSIGN(
-                            info.storageTexture.access,
-                            TintResourceTypeToStorageTextureAccess(resource.resource_type));
-                        info.storageTexture.format =
-                            TintImageFormatToTextureFormat(resource.image_format);
-                        info.storageTexture.viewDimension =
-                            TintTextureDimensionToTextureViewDimension(resource.dim);
+        for (const auto& outputVar : entryPoint.output_variables) {
+            EntryPointMetadata::FragmentOutputVariableInfo variable;
+            DAWN_TRY_ASSIGN(variable.baseType,
+                            TintComponentTypeToTextureComponentType(outputVar.component_type));
+            DAWN_TRY_ASSIGN(variable.componentCount, TintCompositionTypeToInterStageComponentCount(
+                                                         outputVar.composition_type));
+            ASSERT(variable.componentCount <= 4);
 
+            uint32_t unsanitizedAttachment = outputVar.location_decoration;
+            if (DelayedInvalidIf(unsanitizedAttachment >= kMaxColorAttachments,
+                                 "Fragment output variable \"%s\" has a location (%u) that "
+                                 "exceeds the maximum (%u).",
+                                 outputVar.name, unsanitizedAttachment, kMaxColorAttachments)) {
+                continue;
+            }
+
+            ColorAttachmentIndex attachment(static_cast<uint8_t>(unsanitizedAttachment));
+            metadata->fragmentOutputVariables[attachment] = variable;
+            metadata->fragmentOutputsWritten.set(attachment);
+        }
+    }
+
+    for (const tint::inspector::ResourceBinding& resource :
+         inspector->GetResourceBindings(entryPoint.name)) {
+        ShaderBindingInfo info;
+
+        info.bindingType = TintResourceTypeToBindingInfoType(resource.resource_type);
+
+        switch (info.bindingType) {
+            case BindingInfoType::Buffer:
+                info.buffer.minBindingSize = resource.size_no_padding;
+                DAWN_TRY_ASSIGN(info.buffer.type,
+                                TintResourceTypeToBufferBindingType(resource.resource_type));
+                break;
+            case BindingInfoType::Sampler:
+                switch (resource.resource_type) {
+                    case tint::inspector::ResourceBinding::ResourceType::kSampler:
+                        info.sampler.isComparison = false;
                         break;
-                    case BindingInfoType::ExternalTexture:
+                    case tint::inspector::ResourceBinding::ResourceType::kComparisonSampler:
+                        info.sampler.isComparison = true;
                         break;
                     default:
-                        return DAWN_VALIDATION_ERROR("Unknown binding type in Shader");
+                        UNREACHABLE();
                 }
-
-                BindingNumber bindingNumber(resource.binding);
-                BindGroupIndex bindGroupIndex(resource.bind_group);
-
-                if (DelayedInvalidIf(bindGroupIndex >= kMaxBindGroupsTyped,
-                                     "The entry-point uses a binding with a group decoration (%u) "
-                                     "that exceeds the maximum (%u).",
-                                     resource.bind_group, kMaxBindGroups) ||
-                    DelayedInvalidIf(bindingNumber > kMaxBindingNumberTyped,
-                                     "Binding number (%u) exceeds the maximum binding number (%u).",
-                                     uint32_t(bindingNumber), uint32_t(kMaxBindingNumberTyped))) {
-                    continue;
+                break;
+            case BindingInfoType::Texture:
+                info.texture.viewDimension =
+                    TintTextureDimensionToTextureViewDimension(resource.dim);
+                if (resource.resource_type ==
+                        tint::inspector::ResourceBinding::ResourceType::kDepthTexture ||
+                    resource.resource_type ==
+                        tint::inspector::ResourceBinding::ResourceType::kDepthMultisampledTexture) {
+                    info.texture.compatibleSampleTypes = SampleTypeBit::Depth;
+                } else {
+                    info.texture.compatibleSampleTypes =
+                        TintSampledKindToSampleTypeBit(resource.sampled_kind);
                 }
+                info.texture.multisampled =
+                    resource.resource_type ==
+                        tint::inspector::ResourceBinding::ResourceType::kMultisampledTexture ||
+                    resource.resource_type ==
+                        tint::inspector::ResourceBinding::ResourceType::kDepthMultisampledTexture;
 
-                const auto& [binding, inserted] =
-                    metadata->bindings[bindGroupIndex].emplace(bindingNumber, info);
-                DAWN_INVALID_IF(!inserted,
-                                "Entry-point has a duplicate binding for (group:%u, binding:%u).",
-                                resource.binding, resource.bind_group);
-            }
+                break;
+            case BindingInfoType::StorageTexture:
+                DAWN_TRY_ASSIGN(info.storageTexture.access,
+                                TintResourceTypeToStorageTextureAccess(resource.resource_type));
+                info.storageTexture.format = TintImageFormatToTextureFormat(resource.image_format);
+                info.storageTexture.viewDimension =
+                    TintTextureDimensionToTextureViewDimension(resource.dim);
 
-            std::vector<tint::inspector::SamplerTexturePair> samplerTextureUses =
-                inspector->GetSamplerTextureUses(entryPoint.name);
-            metadata->samplerTexturePairs.reserve(samplerTextureUses.size());
-            std::transform(samplerTextureUses.begin(), samplerTextureUses.end(),
-                           std::back_inserter(metadata->samplerTexturePairs),
-                           [](const tint::inspector::SamplerTexturePair& pair) {
-                               EntryPointMetadata::SamplerTexturePair result;
-                               result.sampler = {BindGroupIndex(pair.sampler_binding_point.group),
-                                                 BindingNumber(pair.sampler_binding_point.binding)};
-                               result.texture = {BindGroupIndex(pair.texture_binding_point.group),
-                                                 BindingNumber(pair.texture_binding_point.binding)};
-                               return result;
-                           });
+                break;
+            case BindingInfoType::ExternalTexture:
+                break;
+            default:
+                return DAWN_VALIDATION_ERROR("Unknown binding type in Shader");
+        }
+
+        BindingNumber bindingNumber(resource.binding);
+        BindGroupIndex bindGroupIndex(resource.bind_group);
+
+        if (DelayedInvalidIf(bindGroupIndex >= kMaxBindGroupsTyped,
+                             "The entry-point uses a binding with a group decoration (%u) "
+                             "that exceeds the maximum (%u).",
+                             resource.bind_group, kMaxBindGroups) ||
+            DelayedInvalidIf(bindingNumber > kMaxBindingNumberTyped,
+                             "Binding number (%u) exceeds the maximum binding number (%u).",
+                             uint32_t(bindingNumber), uint32_t(kMaxBindingNumberTyped))) {
+            continue;
+        }
+
+        const auto& [binding, inserted] =
+            metadata->bindings[bindGroupIndex].emplace(bindingNumber, info);
+        DAWN_INVALID_IF(!inserted,
+                        "Entry-point has a duplicate binding for (group:%u, binding:%u).",
+                        resource.binding, resource.bind_group);
+    }
+
+    std::vector<tint::inspector::SamplerTexturePair> samplerTextureUses =
+        inspector->GetSamplerTextureUses(entryPoint.name);
+    metadata->samplerTexturePairs.reserve(samplerTextureUses.size());
+    std::transform(samplerTextureUses.begin(), samplerTextureUses.end(),
+                   std::back_inserter(metadata->samplerTexturePairs),
+                   [](const tint::inspector::SamplerTexturePair& pair) {
+                       EntryPointMetadata::SamplerTexturePair result;
+                       result.sampler = {BindGroupIndex(pair.sampler_binding_point.group),
+                                         BindingNumber(pair.sampler_binding_point.binding)};
+                       result.texture = {BindGroupIndex(pair.texture_binding_point.group),
+                                         BindingNumber(pair.texture_binding_point.binding)};
+                       return result;
+                   });
 
 #undef DelayedInvalidIf
-            return std::move(metadata);
-        }
+    return std::move(metadata);
+}
 
-        ResultOrError<EntryPointMetadataTable> ReflectShaderUsingTint(
-            const DeviceBase* device,
-            const tint::Program* program) {
-            ASSERT(program->IsValid());
+ResultOrError<EntryPointMetadataTable> ReflectShaderUsingTint(const DeviceBase* device,
+                                                              const tint::Program* program) {
+    ASSERT(program->IsValid());
 
-            tint::inspector::Inspector inspector(program);
-            std::vector<tint::inspector::EntryPoint> entryPoints = inspector.GetEntryPoints();
-            DAWN_INVALID_IF(inspector.has_error(), "Tint Reflection failure: Inspector: %s\n",
-                            inspector.error());
+    tint::inspector::Inspector inspector(program);
+    std::vector<tint::inspector::EntryPoint> entryPoints = inspector.GetEntryPoints();
+    DAWN_INVALID_IF(inspector.has_error(), "Tint Reflection failure: Inspector: %s\n",
+                    inspector.error());
 
-            EntryPointMetadataTable result;
+    EntryPointMetadataTable result;
 
-            for (const tint::inspector::EntryPoint& entryPoint : entryPoints) {
-                std::unique_ptr<EntryPointMetadata> metadata;
-                DAWN_TRY_ASSIGN_CONTEXT(metadata,
-                                        ReflectEntryPointUsingTint(device, &inspector, entryPoint),
-                                        "processing entry point \"%s\".", entryPoint.name);
+    for (const tint::inspector::EntryPoint& entryPoint : entryPoints) {
+        std::unique_ptr<EntryPointMetadata> metadata;
+        DAWN_TRY_ASSIGN_CONTEXT(metadata,
+                                ReflectEntryPointUsingTint(device, &inspector, entryPoint),
+                                "processing entry point \"%s\".", entryPoint.name);
 
-                ASSERT(result.count(entryPoint.name) == 0);
-                result[entryPoint.name] = std::move(metadata);
-            }
-            return std::move(result);
-        }
-    }  // anonymous namespace
-
-    ShaderModuleParseResult::ShaderModuleParseResult() = default;
-    ShaderModuleParseResult::~ShaderModuleParseResult() = default;
-
-    ShaderModuleParseResult::ShaderModuleParseResult(ShaderModuleParseResult&& rhs) = default;
-
-    ShaderModuleParseResult& ShaderModuleParseResult::operator=(ShaderModuleParseResult&& rhs) =
-        default;
-
-    bool ShaderModuleParseResult::HasParsedShader() const {
-        return tintProgram != nullptr;
+        ASSERT(result.count(entryPoint.name) == 0);
+        result[entryPoint.name] = std::move(metadata);
     }
+    return std::move(result);
+}
+}  // anonymous namespace
 
-    // TintSource is a PIMPL container for a tint::Source::File, which needs to be kept alive for as
-    // long as tint diagnostics are inspected / printed.
-    class TintSource {
-      public:
-        template <typename... ARGS>
-        explicit TintSource(ARGS&&... args) : file(std::forward<ARGS>(args)...) {
-        }
+ShaderModuleParseResult::ShaderModuleParseResult() = default;
+ShaderModuleParseResult::~ShaderModuleParseResult() = default;
 
-        tint::Source::File file;
-    };
+ShaderModuleParseResult::ShaderModuleParseResult(ShaderModuleParseResult&& rhs) = default;
 
-    MaybeError ValidateShaderModuleDescriptor(DeviceBase* device,
-                                              const ShaderModuleDescriptor* descriptor,
-                                              ShaderModuleParseResult* parseResult,
-                                              OwnedCompilationMessages* outMessages) {
-        ASSERT(parseResult != nullptr);
+ShaderModuleParseResult& ShaderModuleParseResult::operator=(ShaderModuleParseResult&& rhs) =
+    default;
 
-        const ChainedStruct* chainedDescriptor = descriptor->nextInChain;
-        DAWN_INVALID_IF(chainedDescriptor == nullptr,
-                        "Shader module descriptor missing chained descriptor");
+bool ShaderModuleParseResult::HasParsedShader() const {
+    return tintProgram != nullptr;
+}
 
-        // For now only a single SPIRV or WGSL subdescriptor is allowed.
-        DAWN_TRY(ValidateSingleSType(chainedDescriptor, wgpu::SType::ShaderModuleSPIRVDescriptor,
-                                     wgpu::SType::ShaderModuleWGSLDescriptor));
+// TintSource is a PIMPL container for a tint::Source::File, which needs to be kept alive for as
+// long as tint diagnostics are inspected / printed.
+class TintSource {
+  public:
+    template <typename... ARGS>
+    explicit TintSource(ARGS&&... args) : file(std::forward<ARGS>(args)...) {}
 
-        ScopedTintICEHandler scopedICEHandler(device);
+    tint::Source::File file;
+};
 
-        const ShaderModuleSPIRVDescriptor* spirvDesc = nullptr;
-        FindInChain(chainedDescriptor, &spirvDesc);
-        const ShaderModuleWGSLDescriptor* wgslDesc = nullptr;
-        FindInChain(chainedDescriptor, &wgslDesc);
+MaybeError ValidateShaderModuleDescriptor(DeviceBase* device,
+                                          const ShaderModuleDescriptor* descriptor,
+                                          ShaderModuleParseResult* parseResult,
+                                          OwnedCompilationMessages* outMessages) {
+    ASSERT(parseResult != nullptr);
 
-        // We have a temporary toggle to force the SPIRV ingestion to go through a WGSL
-        // intermediate step. It is done by switching the spirvDesc for a wgslDesc below.
-        ShaderModuleWGSLDescriptor newWgslDesc;
-        std::string newWgslCode;
-        if (spirvDesc && device->IsToggleEnabled(Toggle::ForceWGSLStep)) {
+    const ChainedStruct* chainedDescriptor = descriptor->nextInChain;
+    DAWN_INVALID_IF(chainedDescriptor == nullptr,
+                    "Shader module descriptor missing chained descriptor");
+
+    // For now only a single SPIRV or WGSL subdescriptor is allowed.
+    DAWN_TRY(ValidateSingleSType(chainedDescriptor, wgpu::SType::ShaderModuleSPIRVDescriptor,
+                                 wgpu::SType::ShaderModuleWGSLDescriptor));
+
+    ScopedTintICEHandler scopedICEHandler(device);
+
+    const ShaderModuleSPIRVDescriptor* spirvDesc = nullptr;
+    FindInChain(chainedDescriptor, &spirvDesc);
+    const ShaderModuleWGSLDescriptor* wgslDesc = nullptr;
+    FindInChain(chainedDescriptor, &wgslDesc);
+
+    // We have a temporary toggle to force the SPIRV ingestion to go through a WGSL
+    // intermediate step. It is done by switching the spirvDesc for a wgslDesc below.
+    ShaderModuleWGSLDescriptor newWgslDesc;
+    std::string newWgslCode;
+    if (spirvDesc && device->IsToggleEnabled(Toggle::ForceWGSLStep)) {
 #if TINT_BUILD_WGSL_WRITER
-            std::vector<uint32_t> spirv(spirvDesc->code, spirvDesc->code + spirvDesc->codeSize);
-            tint::Program program;
-            DAWN_TRY_ASSIGN(program, ParseSPIRV(spirv, outMessages));
+        std::vector<uint32_t> spirv(spirvDesc->code, spirvDesc->code + spirvDesc->codeSize);
+        tint::Program program;
+        DAWN_TRY_ASSIGN(program, ParseSPIRV(spirv, outMessages));
 
-            tint::writer::wgsl::Options options;
-            auto result = tint::writer::wgsl::Generate(&program, options);
-            DAWN_INVALID_IF(!result.success, "Tint WGSL failure: Generator: %s", result.error);
+        tint::writer::wgsl::Options options;
+        auto result = tint::writer::wgsl::Generate(&program, options);
+        DAWN_INVALID_IF(!result.success, "Tint WGSL failure: Generator: %s", result.error);
 
-            newWgslCode = std::move(result.wgsl);
-            newWgslDesc.source = newWgslCode.c_str();
+        newWgslCode = std::move(result.wgsl);
+        newWgslDesc.source = newWgslCode.c_str();
 
-            spirvDesc = nullptr;
-            wgslDesc = &newWgslDesc;
+        spirvDesc = nullptr;
+        wgslDesc = &newWgslDesc;
 #else
-            device->EmitLog(
-                WGPULoggingType_Info,
-                "Toggle::ForceWGSLStep skipped because TINT_BUILD_WGSL_WRITER is not defined\n");
+        device->EmitLog(
+            WGPULoggingType_Info,
+            "Toggle::ForceWGSLStep skipped because TINT_BUILD_WGSL_WRITER is not defined\n");
 #endif
-        }
-
-        if (spirvDesc) {
-            DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowSpirv),
-                            "SPIR-V is disallowed.");
-
-            std::vector<uint32_t> spirv(spirvDesc->code, spirvDesc->code + spirvDesc->codeSize);
-            tint::Program program;
-            DAWN_TRY_ASSIGN(program, ParseSPIRV(spirv, outMessages));
-            parseResult->tintProgram = std::make_unique<tint::Program>(std::move(program));
-        } else if (wgslDesc) {
-            auto tintSource = std::make_unique<TintSource>("", wgslDesc->source);
-
-            if (device->IsToggleEnabled(Toggle::DumpShaders)) {
-                std::ostringstream dumpedMsg;
-                dumpedMsg << "// Dumped WGSL:" << std::endl << wgslDesc->source;
-                device->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
-            }
-
-            tint::Program program;
-            DAWN_TRY_ASSIGN(program, ParseWGSL(&tintSource->file, outMessages));
-            parseResult->tintProgram = std::make_unique<tint::Program>(std::move(program));
-            parseResult->tintSource = std::move(tintSource);
-        }
-
-        return {};
     }
 
-    RequiredBufferSizes ComputeRequiredBufferSizesForLayout(const EntryPointMetadata& entryPoint,
-                                                            const PipelineLayoutBase* layout) {
-        RequiredBufferSizes bufferSizes;
-        for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
-            bufferSizes[group] = GetBindGroupMinBufferSizes(entryPoint.bindings[group],
-                                                            layout->GetBindGroupLayout(group));
+    if (spirvDesc) {
+        DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowSpirv), "SPIR-V is disallowed.");
+
+        std::vector<uint32_t> spirv(spirvDesc->code, spirvDesc->code + spirvDesc->codeSize);
+        tint::Program program;
+        DAWN_TRY_ASSIGN(program, ParseSPIRV(spirv, outMessages));
+        parseResult->tintProgram = std::make_unique<tint::Program>(std::move(program));
+    } else if (wgslDesc) {
+        auto tintSource = std::make_unique<TintSource>("", wgslDesc->source);
+
+        if (device->IsToggleEnabled(Toggle::DumpShaders)) {
+            std::ostringstream dumpedMsg;
+            dumpedMsg << "// Dumped WGSL:" << std::endl << wgslDesc->source;
+            device->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
         }
 
-        return bufferSizes;
+        tint::Program program;
+        DAWN_TRY_ASSIGN(program, ParseWGSL(&tintSource->file, outMessages));
+        parseResult->tintProgram = std::make_unique<tint::Program>(std::move(program));
+        parseResult->tintSource = std::move(tintSource);
     }
 
-    ResultOrError<tint::Program> RunTransforms(tint::transform::Transform* transform,
-                                               const tint::Program* program,
-                                               const tint::transform::DataMap& inputs,
-                                               tint::transform::DataMap* outputs,
-                                               OwnedCompilationMessages* outMessages) {
-        tint::transform::Output output = transform->Run(program, inputs);
-        if (outMessages != nullptr) {
-            outMessages->AddMessages(output.program.Diagnostics());
-        }
-        DAWN_INVALID_IF(!output.program.IsValid(), "Tint program failure: %s\n",
-                        output.program.Diagnostics().str());
-        if (outputs != nullptr) {
-            *outputs = std::move(output.data);
-        }
-        return std::move(output.program);
+    return {};
+}
+
+RequiredBufferSizes ComputeRequiredBufferSizesForLayout(const EntryPointMetadata& entryPoint,
+                                                        const PipelineLayoutBase* layout) {
+    RequiredBufferSizes bufferSizes;
+    for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+        bufferSizes[group] = GetBindGroupMinBufferSizes(entryPoint.bindings[group],
+                                                        layout->GetBindGroupLayout(group));
     }
 
-    void AddVertexPullingTransformConfig(const RenderPipelineBase& renderPipeline,
-                                         const std::string& entryPoint,
-                                         BindGroupIndex pullingBufferBindingSet,
-                                         tint::transform::DataMap* transformInputs) {
-        tint::transform::VertexPulling::Config cfg;
-        cfg.entry_point_name = entryPoint;
-        cfg.pulling_group = static_cast<uint32_t>(pullingBufferBindingSet);
+    return bufferSizes;
+}
 
-        cfg.vertex_state.resize(renderPipeline.GetVertexBufferCount());
-        for (VertexBufferSlot slot : IterateBitSet(renderPipeline.GetVertexBufferSlotsUsed())) {
-            const VertexBufferInfo& dawnInfo = renderPipeline.GetVertexBuffer(slot);
-            tint::transform::VertexBufferLayoutDescriptor* tintInfo =
-                &cfg.vertex_state[static_cast<uint8_t>(slot)];
+ResultOrError<tint::Program> RunTransforms(tint::transform::Transform* transform,
+                                           const tint::Program* program,
+                                           const tint::transform::DataMap& inputs,
+                                           tint::transform::DataMap* outputs,
+                                           OwnedCompilationMessages* outMessages) {
+    tint::transform::Output output = transform->Run(program, inputs);
+    if (outMessages != nullptr) {
+        outMessages->AddMessages(output.program.Diagnostics());
+    }
+    DAWN_INVALID_IF(!output.program.IsValid(), "Tint program failure: %s\n",
+                    output.program.Diagnostics().str());
+    if (outputs != nullptr) {
+        *outputs = std::move(output.data);
+    }
+    return std::move(output.program);
+}
 
-            tintInfo->array_stride = dawnInfo.arrayStride;
-            tintInfo->step_mode = ToTintVertexStepMode(dawnInfo.stepMode);
-        }
+void AddVertexPullingTransformConfig(const RenderPipelineBase& renderPipeline,
+                                     const std::string& entryPoint,
+                                     BindGroupIndex pullingBufferBindingSet,
+                                     tint::transform::DataMap* transformInputs) {
+    tint::transform::VertexPulling::Config cfg;
+    cfg.entry_point_name = entryPoint;
+    cfg.pulling_group = static_cast<uint32_t>(pullingBufferBindingSet);
 
-        for (VertexAttributeLocation location :
-             IterateBitSet(renderPipeline.GetAttributeLocationsUsed())) {
-            const VertexAttributeInfo& dawnInfo = renderPipeline.GetAttribute(location);
-            tint::transform::VertexAttributeDescriptor tintInfo;
-            tintInfo.format = ToTintVertexFormat(dawnInfo.format);
-            tintInfo.offset = dawnInfo.offset;
-            tintInfo.shader_location = static_cast<uint32_t>(static_cast<uint8_t>(location));
+    cfg.vertex_state.resize(renderPipeline.GetVertexBufferCount());
+    for (VertexBufferSlot slot : IterateBitSet(renderPipeline.GetVertexBufferSlotsUsed())) {
+        const VertexBufferInfo& dawnInfo = renderPipeline.GetVertexBuffer(slot);
+        tint::transform::VertexBufferLayoutDescriptor* tintInfo =
+            &cfg.vertex_state[static_cast<uint8_t>(slot)];
 
-            uint8_t vertexBufferSlot = static_cast<uint8_t>(dawnInfo.vertexBufferSlot);
-            cfg.vertex_state[vertexBufferSlot].attributes.push_back(tintInfo);
-        }
-
-        transformInputs->Add<tint::transform::VertexPulling::Config>(cfg);
+        tintInfo->array_stride = dawnInfo.arrayStride;
+        tintInfo->step_mode = ToTintVertexStepMode(dawnInfo.stepMode);
     }
 
-    MaybeError ValidateCompatibilityWithPipelineLayout(DeviceBase* device,
-                                                       const EntryPointMetadata& entryPoint,
-                                                       const PipelineLayoutBase* layout) {
-        for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
-            DAWN_TRY_CONTEXT(ValidateCompatibilityWithBindGroupLayout(
-                                 device, group, entryPoint, layout->GetBindGroupLayout(group)),
-                             "validating the entry-point's compatibility for group %u with %s",
-                             static_cast<uint32_t>(group), layout->GetBindGroupLayout(group));
-        }
+    for (VertexAttributeLocation location :
+         IterateBitSet(renderPipeline.GetAttributeLocationsUsed())) {
+        const VertexAttributeInfo& dawnInfo = renderPipeline.GetAttribute(location);
+        tint::transform::VertexAttributeDescriptor tintInfo;
+        tintInfo.format = ToTintVertexFormat(dawnInfo.format);
+        tintInfo.offset = dawnInfo.offset;
+        tintInfo.shader_location = static_cast<uint32_t>(static_cast<uint8_t>(location));
 
-        for (BindGroupIndex group : IterateBitSet(~layout->GetBindGroupLayoutsMask())) {
-            DAWN_INVALID_IF(entryPoint.bindings[group].size() > 0,
-                            "The entry-point uses bindings in group %u but %s doesn't have a "
-                            "BindGroupLayout for this index",
-                            static_cast<uint32_t>(group), layout);
-        }
-
-        // Validate that filtering samplers are not used with unfilterable textures.
-        for (const auto& pair : entryPoint.samplerTexturePairs) {
-            const BindGroupLayoutBase* samplerBGL = layout->GetBindGroupLayout(pair.sampler.group);
-            const BindingInfo& samplerInfo =
-                samplerBGL->GetBindingInfo(samplerBGL->GetBindingIndex(pair.sampler.binding));
-            if (samplerInfo.sampler.type != wgpu::SamplerBindingType::Filtering) {
-                continue;
-            }
-            const BindGroupLayoutBase* textureBGL = layout->GetBindGroupLayout(pair.texture.group);
-            const BindingInfo& textureInfo =
-                textureBGL->GetBindingInfo(textureBGL->GetBindingIndex(pair.texture.binding));
-
-            ASSERT(textureInfo.bindingType != BindingInfoType::Buffer &&
-                   textureInfo.bindingType != BindingInfoType::Sampler &&
-                   textureInfo.bindingType != BindingInfoType::StorageTexture);
-
-            if (textureInfo.bindingType != BindingInfoType::Texture) {
-                continue;
-            }
-
-            // Uint/sint can't be statically used with a sampler, so they any
-            // texture bindings reflected must be float or depth textures. If
-            // the shader uses a float/depth texture but the bind group layout
-            // specifies a uint/sint texture binding,
-            // |ValidateCompatibilityWithBindGroupLayout| will fail since the
-            // sampleType does not match.
-            ASSERT(textureInfo.texture.sampleType != wgpu::TextureSampleType::Undefined &&
-                   textureInfo.texture.sampleType != wgpu::TextureSampleType::Uint &&
-                   textureInfo.texture.sampleType != wgpu::TextureSampleType::Sint);
-
-            DAWN_INVALID_IF(
-                textureInfo.texture.sampleType == wgpu::TextureSampleType::UnfilterableFloat,
-                "Texture binding (group:%u, binding:%u) is %s but used statically with a sampler "
-                "(group:%u, binding:%u) that's %s",
-                static_cast<uint32_t>(pair.texture.group),
-                static_cast<uint32_t>(pair.texture.binding),
-                wgpu::TextureSampleType::UnfilterableFloat,
-                static_cast<uint32_t>(pair.sampler.group),
-                static_cast<uint32_t>(pair.sampler.binding), wgpu::SamplerBindingType::Filtering);
-        }
-
-        return {};
+        uint8_t vertexBufferSlot = static_cast<uint8_t>(dawnInfo.vertexBufferSlot);
+        cfg.vertex_state[vertexBufferSlot].attributes.push_back(tintInfo);
     }
 
-    // ShaderModuleBase
+    transformInputs->Add<tint::transform::VertexPulling::Config>(cfg);
+}
 
-    ShaderModuleBase::ShaderModuleBase(DeviceBase* device,
-                                       const ShaderModuleDescriptor* descriptor,
-                                       ApiObjectBase::UntrackedByDeviceTag tag)
-        : ApiObjectBase(device, descriptor->label), mType(Type::Undefined) {
-        ASSERT(descriptor->nextInChain != nullptr);
-        const ShaderModuleSPIRVDescriptor* spirvDesc = nullptr;
-        FindInChain(descriptor->nextInChain, &spirvDesc);
-        const ShaderModuleWGSLDescriptor* wgslDesc = nullptr;
-        FindInChain(descriptor->nextInChain, &wgslDesc);
-        ASSERT(spirvDesc || wgslDesc);
+MaybeError ValidateCompatibilityWithPipelineLayout(DeviceBase* device,
+                                                   const EntryPointMetadata& entryPoint,
+                                                   const PipelineLayoutBase* layout) {
+    for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+        DAWN_TRY_CONTEXT(ValidateCompatibilityWithBindGroupLayout(
+                             device, group, entryPoint, layout->GetBindGroupLayout(group)),
+                         "validating the entry-point's compatibility for group %u with %s",
+                         static_cast<uint32_t>(group), layout->GetBindGroupLayout(group));
+    }
 
-        if (spirvDesc) {
-            mType = Type::Spirv;
-            mOriginalSpirv.assign(spirvDesc->code, spirvDesc->code + spirvDesc->codeSize);
-        } else if (wgslDesc) {
-            mType = Type::Wgsl;
-            mWgsl = std::string(wgslDesc->source);
+    for (BindGroupIndex group : IterateBitSet(~layout->GetBindGroupLayoutsMask())) {
+        DAWN_INVALID_IF(entryPoint.bindings[group].size() > 0,
+                        "The entry-point uses bindings in group %u but %s doesn't have a "
+                        "BindGroupLayout for this index",
+                        static_cast<uint32_t>(group), layout);
+    }
+
+    // Validate that filtering samplers are not used with unfilterable textures.
+    for (const auto& pair : entryPoint.samplerTexturePairs) {
+        const BindGroupLayoutBase* samplerBGL = layout->GetBindGroupLayout(pair.sampler.group);
+        const BindingInfo& samplerInfo =
+            samplerBGL->GetBindingInfo(samplerBGL->GetBindingIndex(pair.sampler.binding));
+        if (samplerInfo.sampler.type != wgpu::SamplerBindingType::Filtering) {
+            continue;
+        }
+        const BindGroupLayoutBase* textureBGL = layout->GetBindGroupLayout(pair.texture.group);
+        const BindingInfo& textureInfo =
+            textureBGL->GetBindingInfo(textureBGL->GetBindingIndex(pair.texture.binding));
+
+        ASSERT(textureInfo.bindingType != BindingInfoType::Buffer &&
+               textureInfo.bindingType != BindingInfoType::Sampler &&
+               textureInfo.bindingType != BindingInfoType::StorageTexture);
+
+        if (textureInfo.bindingType != BindingInfoType::Texture) {
+            continue;
+        }
+
+        // Uint/sint can't be statically used with a sampler, so they any
+        // texture bindings reflected must be float or depth textures. If
+        // the shader uses a float/depth texture but the bind group layout
+        // specifies a uint/sint texture binding,
+        // |ValidateCompatibilityWithBindGroupLayout| will fail since the
+        // sampleType does not match.
+        ASSERT(textureInfo.texture.sampleType != wgpu::TextureSampleType::Undefined &&
+               textureInfo.texture.sampleType != wgpu::TextureSampleType::Uint &&
+               textureInfo.texture.sampleType != wgpu::TextureSampleType::Sint);
+
+        DAWN_INVALID_IF(
+            textureInfo.texture.sampleType == wgpu::TextureSampleType::UnfilterableFloat,
+            "Texture binding (group:%u, binding:%u) is %s but used statically with a sampler "
+            "(group:%u, binding:%u) that's %s",
+            static_cast<uint32_t>(pair.texture.group), static_cast<uint32_t>(pair.texture.binding),
+            wgpu::TextureSampleType::UnfilterableFloat, static_cast<uint32_t>(pair.sampler.group),
+            static_cast<uint32_t>(pair.sampler.binding), wgpu::SamplerBindingType::Filtering);
+    }
+
+    return {};
+}
+
+// ShaderModuleBase
+
+ShaderModuleBase::ShaderModuleBase(DeviceBase* device,
+                                   const ShaderModuleDescriptor* descriptor,
+                                   ApiObjectBase::UntrackedByDeviceTag tag)
+    : ApiObjectBase(device, descriptor->label), mType(Type::Undefined) {
+    ASSERT(descriptor->nextInChain != nullptr);
+    const ShaderModuleSPIRVDescriptor* spirvDesc = nullptr;
+    FindInChain(descriptor->nextInChain, &spirvDesc);
+    const ShaderModuleWGSLDescriptor* wgslDesc = nullptr;
+    FindInChain(descriptor->nextInChain, &wgslDesc);
+    ASSERT(spirvDesc || wgslDesc);
+
+    if (spirvDesc) {
+        mType = Type::Spirv;
+        mOriginalSpirv.assign(spirvDesc->code, spirvDesc->code + spirvDesc->codeSize);
+    } else if (wgslDesc) {
+        mType = Type::Wgsl;
+        mWgsl = std::string(wgslDesc->source);
+    }
+}
+
+ShaderModuleBase::ShaderModuleBase(DeviceBase* device, const ShaderModuleDescriptor* descriptor)
+    : ShaderModuleBase(device, descriptor, kUntrackedByDevice) {
+    TrackInDevice();
+}
+
+ShaderModuleBase::ShaderModuleBase(DeviceBase* device)
+    : ApiObjectBase(device, kLabelNotImplemented) {
+    TrackInDevice();
+}
+
+ShaderModuleBase::ShaderModuleBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+    : ApiObjectBase(device, tag), mType(Type::Undefined) {}
+
+ShaderModuleBase::~ShaderModuleBase() = default;
+
+void ShaderModuleBase::DestroyImpl() {
+    if (IsCachedReference()) {
+        // Do not uncache the actual cached object if we are a blueprint.
+        GetDevice()->UncacheShaderModule(this);
+    }
+}
+
+// static
+Ref<ShaderModuleBase> ShaderModuleBase::MakeError(DeviceBase* device) {
+    return AcquireRef(new ShaderModuleBase(device, ObjectBase::kError));
+}
+
+ObjectType ShaderModuleBase::GetType() const {
+    return ObjectType::ShaderModule;
+}
+
+bool ShaderModuleBase::HasEntryPoint(const std::string& entryPoint) const {
+    return mEntryPoints.count(entryPoint) > 0;
+}
+
+const EntryPointMetadata& ShaderModuleBase::GetEntryPoint(const std::string& entryPoint) const {
+    ASSERT(HasEntryPoint(entryPoint));
+    return *mEntryPoints.at(entryPoint);
+}
+
+size_t ShaderModuleBase::ComputeContentHash() {
+    ObjectContentHasher recorder;
+    recorder.Record(mType);
+    recorder.Record(mOriginalSpirv);
+    recorder.Record(mWgsl);
+    return recorder.GetContentHash();
+}
+
+bool ShaderModuleBase::EqualityFunc::operator()(const ShaderModuleBase* a,
+                                                const ShaderModuleBase* b) const {
+    return a->mType == b->mType && a->mOriginalSpirv == b->mOriginalSpirv && a->mWgsl == b->mWgsl;
+}
+
+const tint::Program* ShaderModuleBase::GetTintProgram() const {
+    ASSERT(mTintProgram);
+    return mTintProgram.get();
+}
+
+void ShaderModuleBase::APIGetCompilationInfo(wgpu::CompilationInfoCallback callback,
+                                             void* userdata) {
+    if (callback == nullptr) {
+        return;
+    }
+
+    callback(WGPUCompilationInfoRequestStatus_Success, mCompilationMessages->GetCompilationInfo(),
+             userdata);
+}
+
+void ShaderModuleBase::InjectCompilationMessages(
+    std::unique_ptr<OwnedCompilationMessages> compilationMessages) {
+    // TODO(dawn:944): ensure the InjectCompilationMessages is properly handled for shader
+    // module returned from cache.
+    // InjectCompilationMessages should be called only once for a shader module, after it is
+    // created. However currently InjectCompilationMessages may be called on a shader module
+    // returned from cache rather than newly created, and violate the rule. We just skip the
+    // injection in this case for now, but a proper solution including ensure the cache goes
+    // before the validation is required.
+    if (mCompilationMessages != nullptr) {
+        return;
+    }
+    // Move the compilationMessages into the shader module and emit the tint errors and warnings
+    mCompilationMessages = std::move(compilationMessages);
+
+    // Emit the formatted Tint errors and warnings within the moved compilationMessages
+    const std::vector<std::string>& formattedTintMessages =
+        mCompilationMessages->GetFormattedTintMessages();
+    if (formattedTintMessages.empty()) {
+        return;
+    }
+    std::ostringstream t;
+    for (auto pMessage = formattedTintMessages.begin(); pMessage != formattedTintMessages.end();
+         pMessage++) {
+        if (pMessage != formattedTintMessages.begin()) {
+            t << std::endl;
+        }
+        t << *pMessage;
+    }
+    this->GetDevice()->EmitLog(WGPULoggingType_Warning, t.str().c_str());
+}
+
+OwnedCompilationMessages* ShaderModuleBase::GetCompilationMessages() const {
+    return mCompilationMessages.get();
+}
+
+// static
+void ShaderModuleBase::AddExternalTextureTransform(const PipelineLayoutBase* layout,
+                                                   tint::transform::Manager* transformManager,
+                                                   tint::transform::DataMap* transformInputs) {
+    tint::transform::MultiplanarExternalTexture::BindingsMap newBindingsMap;
+    for (BindGroupIndex i : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+        const BindGroupLayoutBase* bgl = layout->GetBindGroupLayout(i);
+
+        for (const auto& expansion : bgl->GetExternalTextureBindingExpansionMap()) {
+            newBindingsMap[{static_cast<uint32_t>(i),
+                            static_cast<uint32_t>(expansion.second.plane0)}] = {
+                {static_cast<uint32_t>(i), static_cast<uint32_t>(expansion.second.plane1)},
+                {static_cast<uint32_t>(i), static_cast<uint32_t>(expansion.second.params)}};
         }
     }
 
-    ShaderModuleBase::ShaderModuleBase(DeviceBase* device, const ShaderModuleDescriptor* descriptor)
-        : ShaderModuleBase(device, descriptor, kUntrackedByDevice) {
-        TrackInDevice();
+    if (!newBindingsMap.empty()) {
+        transformManager->Add<tint::transform::MultiplanarExternalTexture>();
+        transformInputs->Add<tint::transform::MultiplanarExternalTexture::NewBindingPoints>(
+            newBindingsMap);
     }
+}
 
-    ShaderModuleBase::ShaderModuleBase(DeviceBase* device)
-        : ApiObjectBase(device, kLabelNotImplemented) {
-        TrackInDevice();
-    }
+MaybeError ShaderModuleBase::InitializeBase(ShaderModuleParseResult* parseResult) {
+    mTintProgram = std::move(parseResult->tintProgram);
+    mTintSource = std::move(parseResult->tintSource);
 
-    ShaderModuleBase::ShaderModuleBase(DeviceBase* device, ObjectBase::ErrorTag tag)
-        : ApiObjectBase(device, tag), mType(Type::Undefined) {
-    }
+    DAWN_TRY_ASSIGN(mEntryPoints, ReflectShaderUsingTint(GetDevice(), mTintProgram.get()));
+    return {};
+}
 
-    ShaderModuleBase::~ShaderModuleBase() = default;
-
-    void ShaderModuleBase::DestroyImpl() {
-        if (IsCachedReference()) {
-            // Do not uncache the actual cached object if we are a blueprint.
-            GetDevice()->UncacheShaderModule(this);
-        }
-    }
-
-    // static
-    Ref<ShaderModuleBase> ShaderModuleBase::MakeError(DeviceBase* device) {
-        return AcquireRef(new ShaderModuleBase(device, ObjectBase::kError));
-    }
-
-    ObjectType ShaderModuleBase::GetType() const {
-        return ObjectType::ShaderModule;
-    }
-
-    bool ShaderModuleBase::HasEntryPoint(const std::string& entryPoint) const {
-        return mEntryPoints.count(entryPoint) > 0;
-    }
-
-    const EntryPointMetadata& ShaderModuleBase::GetEntryPoint(const std::string& entryPoint) const {
-        ASSERT(HasEntryPoint(entryPoint));
-        return *mEntryPoints.at(entryPoint);
-    }
-
-    size_t ShaderModuleBase::ComputeContentHash() {
-        ObjectContentHasher recorder;
-        recorder.Record(mType);
-        recorder.Record(mOriginalSpirv);
-        recorder.Record(mWgsl);
-        return recorder.GetContentHash();
-    }
-
-    bool ShaderModuleBase::EqualityFunc::operator()(const ShaderModuleBase* a,
-                                                    const ShaderModuleBase* b) const {
-        return a->mType == b->mType && a->mOriginalSpirv == b->mOriginalSpirv &&
-               a->mWgsl == b->mWgsl;
-    }
-
-    const tint::Program* ShaderModuleBase::GetTintProgram() const {
-        ASSERT(mTintProgram);
-        return mTintProgram.get();
-    }
-
-    void ShaderModuleBase::APIGetCompilationInfo(wgpu::CompilationInfoCallback callback,
-                                                 void* userdata) {
-        if (callback == nullptr) {
-            return;
-        }
-
-        callback(WGPUCompilationInfoRequestStatus_Success,
-                 mCompilationMessages->GetCompilationInfo(), userdata);
-    }
-
-    void ShaderModuleBase::InjectCompilationMessages(
-        std::unique_ptr<OwnedCompilationMessages> compilationMessages) {
-        // TODO(dawn:944): ensure the InjectCompilationMessages is properly handled for shader
-        // module returned from cache.
-        // InjectCompilationMessages should be called only once for a shader module, after it is
-        // created. However currently InjectCompilationMessages may be called on a shader module
-        // returned from cache rather than newly created, and violate the rule. We just skip the
-        // injection in this case for now, but a proper solution including ensure the cache goes
-        // before the validation is required.
-        if (mCompilationMessages != nullptr) {
-            return;
-        }
-        // Move the compilationMessages into the shader module and emit the tint errors and warnings
-        mCompilationMessages = std::move(compilationMessages);
-
-        // Emit the formatted Tint errors and warnings within the moved compilationMessages
-        const std::vector<std::string>& formattedTintMessages =
-            mCompilationMessages->GetFormattedTintMessages();
-        if (formattedTintMessages.empty()) {
-            return;
-        }
-        std::ostringstream t;
-        for (auto pMessage = formattedTintMessages.begin(); pMessage != formattedTintMessages.end();
-             pMessage++) {
-            if (pMessage != formattedTintMessages.begin()) {
-                t << std::endl;
-            }
-            t << *pMessage;
-        }
-        this->GetDevice()->EmitLog(WGPULoggingType_Warning, t.str().c_str());
-    }
-
-    OwnedCompilationMessages* ShaderModuleBase::GetCompilationMessages() const {
-        return mCompilationMessages.get();
-    }
-
-    // static
-    void ShaderModuleBase::AddExternalTextureTransform(const PipelineLayoutBase* layout,
-                                                       tint::transform::Manager* transformManager,
-                                                       tint::transform::DataMap* transformInputs) {
-        tint::transform::MultiplanarExternalTexture::BindingsMap newBindingsMap;
-        for (BindGroupIndex i : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
-            const BindGroupLayoutBase* bgl = layout->GetBindGroupLayout(i);
-
-            for (const auto& expansion : bgl->GetExternalTextureBindingExpansionMap()) {
-                newBindingsMap[{static_cast<uint32_t>(i),
-                                static_cast<uint32_t>(expansion.second.plane0)}] = {
-                    {static_cast<uint32_t>(i), static_cast<uint32_t>(expansion.second.plane1)},
-                    {static_cast<uint32_t>(i), static_cast<uint32_t>(expansion.second.params)}};
-            }
-        }
-
-        if (!newBindingsMap.empty()) {
-            transformManager->Add<tint::transform::MultiplanarExternalTexture>();
-            transformInputs->Add<tint::transform::MultiplanarExternalTexture::NewBindingPoints>(
-                newBindingsMap);
-        }
-    }
-
-    MaybeError ShaderModuleBase::InitializeBase(ShaderModuleParseResult* parseResult) {
-        mTintProgram = std::move(parseResult->tintProgram);
-        mTintSource = std::move(parseResult->tintSource);
-
-        DAWN_TRY_ASSIGN(mEntryPoints, ReflectShaderUsingTint(GetDevice(), mTintProgram.get()));
-        return {};
-    }
-
-    size_t PipelineLayoutEntryPointPairHashFunc::operator()(
-        const PipelineLayoutEntryPointPair& pair) const {
-        size_t hash = 0;
-        HashCombine(&hash, pair.first, pair.second);
-        return hash;
-    }
+size_t PipelineLayoutEntryPointPairHashFunc::operator()(
+    const PipelineLayoutEntryPointPair& pair) const {
+    size_t hash = 0;
+    HashCombine(&hash, pair.first, pair.second);
+    return hash;
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/ShaderModule.h b/src/dawn/native/ShaderModule.h
index a080fa2..70fe74d 100644
--- a/src/dawn/native/ShaderModule.h
+++ b/src/dawn/native/ShaderModule.h
@@ -40,277 +40,276 @@
 
 namespace tint {
 
-    class Program;
+class Program;
 
-    namespace transform {
-        class DataMap;
-        class Manager;
-        class Transform;
-        class VertexPulling;
-    }  // namespace transform
+namespace transform {
+class DataMap;
+class Manager;
+class Transform;
+class VertexPulling;
+}  // namespace transform
 
 }  // namespace tint
 
 namespace dawn::native {
 
-    struct EntryPointMetadata;
+struct EntryPointMetadata;
 
-    // Base component type of an inter-stage variable
-    enum class InterStageComponentType {
-        Sint,
-        Uint,
-        Float,
+// Base component type of an inter-stage variable
+enum class InterStageComponentType {
+    Sint,
+    Uint,
+    Float,
+};
+
+enum class InterpolationType {
+    Perspective,
+    Linear,
+    Flat,
+};
+
+enum class InterpolationSampling {
+    None,
+    Center,
+    Centroid,
+    Sample,
+};
+
+using PipelineLayoutEntryPointPair = std::pair<PipelineLayoutBase*, std::string>;
+struct PipelineLayoutEntryPointPairHashFunc {
+    size_t operator()(const PipelineLayoutEntryPointPair& pair) const;
+};
+
+// A map from name to EntryPointMetadata.
+using EntryPointMetadataTable =
+    std::unordered_map<std::string, std::unique_ptr<EntryPointMetadata>>;
+
+// Source for a tint program
+class TintSource;
+
+struct ShaderModuleParseResult {
+    ShaderModuleParseResult();
+    ~ShaderModuleParseResult();
+    ShaderModuleParseResult(ShaderModuleParseResult&& rhs);
+    ShaderModuleParseResult& operator=(ShaderModuleParseResult&& rhs);
+
+    bool HasParsedShader() const;
+
+    std::unique_ptr<tint::Program> tintProgram;
+    std::unique_ptr<TintSource> tintSource;
+};
+
+MaybeError ValidateShaderModuleDescriptor(DeviceBase* device,
+                                          const ShaderModuleDescriptor* descriptor,
+                                          ShaderModuleParseResult* parseResult,
+                                          OwnedCompilationMessages* outMessages);
+MaybeError ValidateCompatibilityWithPipelineLayout(DeviceBase* device,
+                                                   const EntryPointMetadata& entryPoint,
+                                                   const PipelineLayoutBase* layout);
+
+RequiredBufferSizes ComputeRequiredBufferSizesForLayout(const EntryPointMetadata& entryPoint,
+                                                        const PipelineLayoutBase* layout);
+ResultOrError<tint::Program> RunTransforms(tint::transform::Transform* transform,
+                                           const tint::Program* program,
+                                           const tint::transform::DataMap& inputs,
+                                           tint::transform::DataMap* outputs,
+                                           OwnedCompilationMessages* messages);
+
+/// Creates and adds the tint::transform::VertexPulling::Config to transformInputs.
+void AddVertexPullingTransformConfig(const RenderPipelineBase& renderPipeline,
+                                     const std::string& entryPoint,
+                                     BindGroupIndex pullingBufferBindingSet,
+                                     tint::transform::DataMap* transformInputs);
+
+// Mirrors wgpu::SamplerBindingLayout but instead stores a single boolean
+// for isComparison instead of a wgpu::SamplerBindingType enum.
+struct ShaderSamplerBindingInfo {
+    bool isComparison;
+};
+
+// Mirrors wgpu::TextureBindingLayout but instead has a set of compatible sampleTypes
+// instead of a single enum.
+struct ShaderTextureBindingInfo {
+    SampleTypeBit compatibleSampleTypes;
+    wgpu::TextureViewDimension viewDimension;
+    bool multisampled;
+};
+
+// Per-binding shader metadata contains some SPIRV specific information in addition to
+// most of the frontend per-binding information.
+struct ShaderBindingInfo {
+    // The SPIRV ID of the resource.
+    uint32_t id;
+    uint32_t base_type_id;
+
+    BindingNumber binding;
+    BindingInfoType bindingType;
+
+    BufferBindingLayout buffer;
+    ShaderSamplerBindingInfo sampler;
+    ShaderTextureBindingInfo texture;
+    StorageTextureBindingLayout storageTexture;
+};
+
+using BindingGroupInfoMap = std::map<BindingNumber, ShaderBindingInfo>;
+using BindingInfoArray = ityp::array<BindGroupIndex, BindingGroupInfoMap, kMaxBindGroups>;
+
+// The WebGPU overridable constants only support these scalar types
+union OverridableConstantScalar {
+    // Use int32_t for boolean to initialize the full 32bit
+    int32_t b;
+    float f32;
+    int32_t i32;
+    uint32_t u32;
+};
+
+// Contains all the reflection data for a valid (ShaderModule, entryPoint, stage). They are
+// stored in the ShaderModuleBase and destroyed only when the shader program is destroyed so
+// pointers to EntryPointMetadata are safe to store as long as you also keep a Ref to the
+// ShaderModuleBase.
+struct EntryPointMetadata {
+    // It is valid for a shader to contain entry points that go over limits. To keep this
+    // structure with packed arrays and bitsets, we still validate against limits when
+    // doing reflection, but store the errors in this vector, for later use if the application
+    // tries to use the entry point.
+    std::vector<std::string> infringedLimitErrors;
+
+    // bindings[G][B] is the reflection data for the binding defined with
+    // @group(G) @binding(B) in WGSL / SPIRV.
+    BindingInfoArray bindings;
+
+    struct SamplerTexturePair {
+        BindingSlot sampler;
+        BindingSlot texture;
     };
+    std::vector<SamplerTexturePair> samplerTexturePairs;
 
-    enum class InterpolationType {
-        Perspective,
-        Linear,
-        Flat,
+    // The set of vertex attributes this entryPoint uses.
+    ityp::array<VertexAttributeLocation, VertexFormatBaseType, kMaxVertexAttributes>
+        vertexInputBaseTypes;
+    ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> usedVertexInputs;
+
+    // An array to record the basic types (float, int and uint) of the fragment shader outputs.
+    struct FragmentOutputVariableInfo {
+        wgpu::TextureComponentType baseType;
+        uint8_t componentCount;
     };
+    ityp::array<ColorAttachmentIndex, FragmentOutputVariableInfo, kMaxColorAttachments>
+        fragmentOutputVariables;
+    ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> fragmentOutputsWritten;
 
-    enum class InterpolationSampling {
-        None,
-        Center,
-        Centroid,
-        Sample,
+    struct InterStageVariableInfo {
+        InterStageComponentType baseType;
+        uint32_t componentCount;
+        InterpolationType interpolationType;
+        InterpolationSampling interpolationSampling;
     };
+    // Now that we only support vertex and fragment stages, there can't be both inter-stage
+    // inputs and outputs in one shader stage.
+    std::bitset<kMaxInterStageShaderVariables> usedInterStageVariables;
+    std::array<InterStageVariableInfo, kMaxInterStageShaderVariables> interStageVariables;
 
-    using PipelineLayoutEntryPointPair = std::pair<PipelineLayoutBase*, std::string>;
-    struct PipelineLayoutEntryPointPairHashFunc {
-        size_t operator()(const PipelineLayoutEntryPointPair& pair) const;
-    };
+    // The local workgroup size declared for a compute entry point (or 0s otehrwise).
+    Origin3D localWorkgroupSize;
 
-    // A map from name to EntryPointMetadata.
-    using EntryPointMetadataTable =
-        std::unordered_map<std::string, std::unique_ptr<EntryPointMetadata>>;
+    // The shader stage for this binding.
+    SingleShaderStage stage;
 
-    // Source for a tint program
-    class TintSource;
-
-    struct ShaderModuleParseResult {
-        ShaderModuleParseResult();
-        ~ShaderModuleParseResult();
-        ShaderModuleParseResult(ShaderModuleParseResult&& rhs);
-        ShaderModuleParseResult& operator=(ShaderModuleParseResult&& rhs);
-
-        bool HasParsedShader() const;
-
-        std::unique_ptr<tint::Program> tintProgram;
-        std::unique_ptr<TintSource> tintSource;
-    };
-
-    MaybeError ValidateShaderModuleDescriptor(DeviceBase* device,
-                                              const ShaderModuleDescriptor* descriptor,
-                                              ShaderModuleParseResult* parseResult,
-                                              OwnedCompilationMessages* outMessages);
-    MaybeError ValidateCompatibilityWithPipelineLayout(DeviceBase* device,
-                                                       const EntryPointMetadata& entryPoint,
-                                                       const PipelineLayoutBase* layout);
-
-    RequiredBufferSizes ComputeRequiredBufferSizesForLayout(const EntryPointMetadata& entryPoint,
-                                                            const PipelineLayoutBase* layout);
-    ResultOrError<tint::Program> RunTransforms(tint::transform::Transform* transform,
-                                               const tint::Program* program,
-                                               const tint::transform::DataMap& inputs,
-                                               tint::transform::DataMap* outputs,
-                                               OwnedCompilationMessages* messages);
-
-    /// Creates and adds the tint::transform::VertexPulling::Config to transformInputs.
-    void AddVertexPullingTransformConfig(const RenderPipelineBase& renderPipeline,
-                                         const std::string& entryPoint,
-                                         BindGroupIndex pullingBufferBindingSet,
-                                         tint::transform::DataMap* transformInputs);
-
-    // Mirrors wgpu::SamplerBindingLayout but instead stores a single boolean
-    // for isComparison instead of a wgpu::SamplerBindingType enum.
-    struct ShaderSamplerBindingInfo {
-        bool isComparison;
-    };
-
-    // Mirrors wgpu::TextureBindingLayout but instead has a set of compatible sampleTypes
-    // instead of a single enum.
-    struct ShaderTextureBindingInfo {
-        SampleTypeBit compatibleSampleTypes;
-        wgpu::TextureViewDimension viewDimension;
-        bool multisampled;
-    };
-
-    // Per-binding shader metadata contains some SPIRV specific information in addition to
-    // most of the frontend per-binding information.
-    struct ShaderBindingInfo {
-        // The SPIRV ID of the resource.
+    struct OverridableConstant {
         uint32_t id;
-        uint32_t base_type_id;
+        // Match tint::inspector::OverridableConstant::Type
+        // Bool is defined as a macro on linux X11 and cannot compile
+        enum class Type { Boolean, Float32, Uint32, Int32 } type;
 
-        BindingNumber binding;
-        BindingInfoType bindingType;
+        // If the constant doesn't not have an initializer in the shader
+        // Then it is required for the pipeline stage to have a constant record to initialize a
+        // value
+        bool isInitialized;
 
-        BufferBindingLayout buffer;
-        ShaderSamplerBindingInfo sampler;
-        ShaderTextureBindingInfo texture;
-        StorageTextureBindingLayout storageTexture;
+        // Store the default initialized value in shader
+        // This is used by metal backend as the function_constant does not have dafault values
+        // Initialized when isInitialized == true
+        OverridableConstantScalar defaultValue;
     };
 
-    using BindingGroupInfoMap = std::map<BindingNumber, ShaderBindingInfo>;
-    using BindingInfoArray = ityp::array<BindGroupIndex, BindingGroupInfoMap, kMaxBindGroups>;
+    using OverridableConstantsMap = std::unordered_map<std::string, OverridableConstant>;
 
-    // The WebGPU overridable constants only support these scalar types
-    union OverridableConstantScalar {
-        // Use int32_t for boolean to initialize the full 32bit
-        int32_t b;
-        float f32;
-        int32_t i32;
-        uint32_t u32;
+    // Map identifier to overridable constant
+    // Identifier is unique: either the variable name or the numeric ID if specified
+    OverridableConstantsMap overridableConstants;
+
+    // Overridable constants that are not initialized in shaders
+    // They need value initialization from pipeline stage or it is a validation error
+    std::unordered_set<std::string> uninitializedOverridableConstants;
+
+    // Store constants with shader initialized values as well
+    // This is used by metal backend to set values with default initializers that are not
+    // overridden
+    std::unordered_set<std::string> initializedOverridableConstants;
+
+    bool usesNumWorkgroups = false;
+};
+
+class ShaderModuleBase : public ApiObjectBase, public CachedObject {
+  public:
+    ShaderModuleBase(DeviceBase* device,
+                     const ShaderModuleDescriptor* descriptor,
+                     ApiObjectBase::UntrackedByDeviceTag tag);
+    ShaderModuleBase(DeviceBase* device, const ShaderModuleDescriptor* descriptor);
+    ~ShaderModuleBase() override;
+
+    static Ref<ShaderModuleBase> MakeError(DeviceBase* device);
+
+    ObjectType GetType() const override;
+
+    // Return true iff the program has an entrypoint called `entryPoint`.
+    bool HasEntryPoint(const std::string& entryPoint) const;
+
+    // Return the metadata for the given `entryPoint`. HasEntryPoint with the same argument
+    // must be true.
+    const EntryPointMetadata& GetEntryPoint(const std::string& entryPoint) const;
+
+    // Functions necessary for the unordered_set<ShaderModuleBase*>-based cache.
+    size_t ComputeContentHash() override;
+
+    struct EqualityFunc {
+        bool operator()(const ShaderModuleBase* a, const ShaderModuleBase* b) const;
     };
 
-    // Contains all the reflection data for a valid (ShaderModule, entryPoint, stage). They are
-    // stored in the ShaderModuleBase and destroyed only when the shader program is destroyed so
-    // pointers to EntryPointMetadata are safe to store as long as you also keep a Ref to the
-    // ShaderModuleBase.
-    struct EntryPointMetadata {
-        // It is valid for a shader to contain entry points that go over limits. To keep this
-        // structure with packed arrays and bitsets, we still validate against limits when
-        // doing reflection, but store the errors in this vector, for later use if the application
-        // tries to use the entry point.
-        std::vector<std::string> infringedLimitErrors;
+    const tint::Program* GetTintProgram() const;
 
-        // bindings[G][B] is the reflection data for the binding defined with
-        // @group(G) @binding(B) in WGSL / SPIRV.
-        BindingInfoArray bindings;
+    void APIGetCompilationInfo(wgpu::CompilationInfoCallback callback, void* userdata);
 
-        struct SamplerTexturePair {
-            BindingSlot sampler;
-            BindingSlot texture;
-        };
-        std::vector<SamplerTexturePair> samplerTexturePairs;
+    void InjectCompilationMessages(std::unique_ptr<OwnedCompilationMessages> compilationMessages);
 
-        // The set of vertex attributes this entryPoint uses.
-        ityp::array<VertexAttributeLocation, VertexFormatBaseType, kMaxVertexAttributes>
-            vertexInputBaseTypes;
-        ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> usedVertexInputs;
+    OwnedCompilationMessages* GetCompilationMessages() const;
 
-        // An array to record the basic types (float, int and uint) of the fragment shader outputs.
-        struct FragmentOutputVariableInfo {
-            wgpu::TextureComponentType baseType;
-            uint8_t componentCount;
-        };
-        ityp::array<ColorAttachmentIndex, FragmentOutputVariableInfo, kMaxColorAttachments>
-            fragmentOutputVariables;
-        ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> fragmentOutputsWritten;
+  protected:
+    // Constructor used only for mocking and testing.
+    explicit ShaderModuleBase(DeviceBase* device);
+    void DestroyImpl() override;
 
-        struct InterStageVariableInfo {
-            InterStageComponentType baseType;
-            uint32_t componentCount;
-            InterpolationType interpolationType;
-            InterpolationSampling interpolationSampling;
-        };
-        // Now that we only support vertex and fragment stages, there can't be both inter-stage
-        // inputs and outputs in one shader stage.
-        std::bitset<kMaxInterStageShaderVariables> usedInterStageVariables;
-        std::array<InterStageVariableInfo, kMaxInterStageShaderVariables> interStageVariables;
+    MaybeError InitializeBase(ShaderModuleParseResult* parseResult);
 
-        // The local workgroup size declared for a compute entry point (or 0s otehrwise).
-        Origin3D localWorkgroupSize;
+    static void AddExternalTextureTransform(const PipelineLayoutBase* layout,
+                                            tint::transform::Manager* transformManager,
+                                            tint::transform::DataMap* transformInputs);
 
-        // The shader stage for this binding.
-        SingleShaderStage stage;
+  private:
+    ShaderModuleBase(DeviceBase* device, ObjectBase::ErrorTag tag);
 
-        struct OverridableConstant {
-            uint32_t id;
-            // Match tint::inspector::OverridableConstant::Type
-            // Bool is defined as a macro on linux X11 and cannot compile
-            enum class Type { Boolean, Float32, Uint32, Int32 } type;
+    // The original data in the descriptor for caching.
+    enum class Type { Undefined, Spirv, Wgsl };
+    Type mType;
+    std::vector<uint32_t> mOriginalSpirv;
+    std::string mWgsl;
 
-            // If the constant doesn't not have an initializer in the shader
-            // Then it is required for the pipeline stage to have a constant record to initialize a
-            // value
-            bool isInitialized;
+    EntryPointMetadataTable mEntryPoints;
+    std::unique_ptr<tint::Program> mTintProgram;
+    std::unique_ptr<TintSource> mTintSource;  // Keep the tint::Source::File alive
 
-            // Store the default initialized value in shader
-            // This is used by metal backend as the function_constant does not have dafault values
-            // Initialized when isInitialized == true
-            OverridableConstantScalar defaultValue;
-        };
-
-        using OverridableConstantsMap = std::unordered_map<std::string, OverridableConstant>;
-
-        // Map identifier to overridable constant
-        // Identifier is unique: either the variable name or the numeric ID if specified
-        OverridableConstantsMap overridableConstants;
-
-        // Overridable constants that are not initialized in shaders
-        // They need value initialization from pipeline stage or it is a validation error
-        std::unordered_set<std::string> uninitializedOverridableConstants;
-
-        // Store constants with shader initialized values as well
-        // This is used by metal backend to set values with default initializers that are not
-        // overridden
-        std::unordered_set<std::string> initializedOverridableConstants;
-
-        bool usesNumWorkgroups = false;
-    };
-
-    class ShaderModuleBase : public ApiObjectBase, public CachedObject {
-      public:
-        ShaderModuleBase(DeviceBase* device,
-                         const ShaderModuleDescriptor* descriptor,
-                         ApiObjectBase::UntrackedByDeviceTag tag);
-        ShaderModuleBase(DeviceBase* device, const ShaderModuleDescriptor* descriptor);
-        ~ShaderModuleBase() override;
-
-        static Ref<ShaderModuleBase> MakeError(DeviceBase* device);
-
-        ObjectType GetType() const override;
-
-        // Return true iff the program has an entrypoint called `entryPoint`.
-        bool HasEntryPoint(const std::string& entryPoint) const;
-
-        // Return the metadata for the given `entryPoint`. HasEntryPoint with the same argument
-        // must be true.
-        const EntryPointMetadata& GetEntryPoint(const std::string& entryPoint) const;
-
-        // Functions necessary for the unordered_set<ShaderModuleBase*>-based cache.
-        size_t ComputeContentHash() override;
-
-        struct EqualityFunc {
-            bool operator()(const ShaderModuleBase* a, const ShaderModuleBase* b) const;
-        };
-
-        const tint::Program* GetTintProgram() const;
-
-        void APIGetCompilationInfo(wgpu::CompilationInfoCallback callback, void* userdata);
-
-        void InjectCompilationMessages(
-            std::unique_ptr<OwnedCompilationMessages> compilationMessages);
-
-        OwnedCompilationMessages* GetCompilationMessages() const;
-
-      protected:
-        // Constructor used only for mocking and testing.
-        explicit ShaderModuleBase(DeviceBase* device);
-        void DestroyImpl() override;
-
-        MaybeError InitializeBase(ShaderModuleParseResult* parseResult);
-
-        static void AddExternalTextureTransform(const PipelineLayoutBase* layout,
-                                                tint::transform::Manager* transformManager,
-                                                tint::transform::DataMap* transformInputs);
-
-      private:
-        ShaderModuleBase(DeviceBase* device, ObjectBase::ErrorTag tag);
-
-        // The original data in the descriptor for caching.
-        enum class Type { Undefined, Spirv, Wgsl };
-        Type mType;
-        std::vector<uint32_t> mOriginalSpirv;
-        std::string mWgsl;
-
-        EntryPointMetadataTable mEntryPoints;
-        std::unique_ptr<tint::Program> mTintProgram;
-        std::unique_ptr<TintSource> mTintSource;  // Keep the tint::Source::File alive
-
-        std::unique_ptr<OwnedCompilationMessages> mCompilationMessages;
-    };
+    std::unique_ptr<OwnedCompilationMessages> mCompilationMessages;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/SpirvValidation.cpp b/src/dawn/native/SpirvValidation.cpp
index 116e648..b40a803 100644
--- a/src/dawn/native/SpirvValidation.cpp
+++ b/src/dawn/native/SpirvValidation.cpp
@@ -23,54 +23,50 @@
 
 namespace dawn::native {
 
-    MaybeError ValidateSpirv(DeviceBase* device,
-                             const std::vector<uint32_t>& spirv,
-                             bool dumpSpirv) {
-        spvtools::SpirvTools spirvTools(SPV_ENV_VULKAN_1_1);
-        spirvTools.SetMessageConsumer([device](spv_message_level_t level, const char*,
-                                               const spv_position_t& position,
-                                               const char* message) {
-            WGPULoggingType wgpuLogLevel;
-            switch (level) {
-                case SPV_MSG_FATAL:
-                case SPV_MSG_INTERNAL_ERROR:
-                case SPV_MSG_ERROR:
-                    wgpuLogLevel = WGPULoggingType_Error;
-                    break;
-                case SPV_MSG_WARNING:
-                    wgpuLogLevel = WGPULoggingType_Warning;
-                    break;
-                case SPV_MSG_INFO:
-                    wgpuLogLevel = WGPULoggingType_Info;
-                    break;
-                default:
-                    wgpuLogLevel = WGPULoggingType_Error;
-                    break;
-            }
-
-            std::ostringstream ss;
-            ss << "SPIRV line " << position.index << ": " << message << std::endl;
-            device->EmitLog(wgpuLogLevel, ss.str().c_str());
-        });
-
-        const bool valid = spirvTools.Validate(spirv);
-        if (dumpSpirv || !valid) {
-            std::ostringstream dumpedMsg;
-            std::string disassembly;
-            if (spirvTools.Disassemble(
-                    spirv, &disassembly,
-                    SPV_BINARY_TO_TEXT_OPTION_FRIENDLY_NAMES | SPV_BINARY_TO_TEXT_OPTION_INDENT)) {
-                dumpedMsg << "/* Dumped generated SPIRV disassembly */" << std::endl << disassembly;
-            } else {
-                dumpedMsg << "/* Failed to disassemble generated SPIRV */";
-            }
-            device->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
+MaybeError ValidateSpirv(DeviceBase* device, const std::vector<uint32_t>& spirv, bool dumpSpirv) {
+    spvtools::SpirvTools spirvTools(SPV_ENV_VULKAN_1_1);
+    spirvTools.SetMessageConsumer([device](spv_message_level_t level, const char*,
+                                           const spv_position_t& position, const char* message) {
+        WGPULoggingType wgpuLogLevel;
+        switch (level) {
+            case SPV_MSG_FATAL:
+            case SPV_MSG_INTERNAL_ERROR:
+            case SPV_MSG_ERROR:
+                wgpuLogLevel = WGPULoggingType_Error;
+                break;
+            case SPV_MSG_WARNING:
+                wgpuLogLevel = WGPULoggingType_Warning;
+                break;
+            case SPV_MSG_INFO:
+                wgpuLogLevel = WGPULoggingType_Info;
+                break;
+            default:
+                wgpuLogLevel = WGPULoggingType_Error;
+                break;
         }
 
-        DAWN_INVALID_IF(!valid,
-                        "Produced invalid SPIRV. Please file a bug at https://crbug.com/tint.");
+        std::ostringstream ss;
+        ss << "SPIRV line " << position.index << ": " << message << std::endl;
+        device->EmitLog(wgpuLogLevel, ss.str().c_str());
+    });
 
-        return {};
+    const bool valid = spirvTools.Validate(spirv);
+    if (dumpSpirv || !valid) {
+        std::ostringstream dumpedMsg;
+        std::string disassembly;
+        if (spirvTools.Disassemble(
+                spirv, &disassembly,
+                SPV_BINARY_TO_TEXT_OPTION_FRIENDLY_NAMES | SPV_BINARY_TO_TEXT_OPTION_INDENT)) {
+            dumpedMsg << "/* Dumped generated SPIRV disassembly */" << std::endl << disassembly;
+        } else {
+            dumpedMsg << "/* Failed to disassemble generated SPIRV */";
+        }
+        device->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
     }
 
+    DAWN_INVALID_IF(!valid, "Produced invalid SPIRV. Please file a bug at https://crbug.com/tint.");
+
+    return {};
+}
+
 }  // namespace dawn::native
diff --git a/src/dawn/native/SpirvValidation.h b/src/dawn/native/SpirvValidation.h
index 3583990..b50d38a 100644
--- a/src/dawn/native/SpirvValidation.h
+++ b/src/dawn/native/SpirvValidation.h
@@ -21,11 +21,9 @@
 
 namespace dawn::native {
 
-    class DeviceBase;
+class DeviceBase;
 
-    MaybeError ValidateSpirv(DeviceBase* device,
-                             const std::vector<uint32_t>& spirv,
-                             bool dumpSpirv);
+MaybeError ValidateSpirv(DeviceBase* device, const std::vector<uint32_t>& spirv, bool dumpSpirv);
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/StagingBuffer.cpp b/src/dawn/native/StagingBuffer.cpp
index a6c258c..2b40323 100644
--- a/src/dawn/native/StagingBuffer.cpp
+++ b/src/dawn/native/StagingBuffer.cpp
@@ -16,14 +16,13 @@
 
 namespace dawn::native {
 
-    StagingBufferBase::StagingBufferBase(size_t size) : mBufferSize(size) {
-    }
+StagingBufferBase::StagingBufferBase(size_t size) : mBufferSize(size) {}
 
-    size_t StagingBufferBase::GetSize() const {
-        return mBufferSize;
-    }
+size_t StagingBufferBase::GetSize() const {
+    return mBufferSize;
+}
 
-    void* StagingBufferBase::GetMappedPointer() const {
-        return mMappedPointer;
-    }
+void* StagingBufferBase::GetMappedPointer() const {
+    return mMappedPointer;
+}
 }  // namespace dawn::native
diff --git a/src/dawn/native/StagingBuffer.h b/src/dawn/native/StagingBuffer.h
index 4bda9c6..741d213 100644
--- a/src/dawn/native/StagingBuffer.h
+++ b/src/dawn/native/StagingBuffer.h
@@ -19,22 +19,22 @@
 
 namespace dawn::native {
 
-    class StagingBufferBase {
-      public:
-        explicit StagingBufferBase(size_t size);
-        virtual ~StagingBufferBase() = default;
+class StagingBufferBase {
+  public:
+    explicit StagingBufferBase(size_t size);
+    virtual ~StagingBufferBase() = default;
 
-        virtual MaybeError Initialize() = 0;
+    virtual MaybeError Initialize() = 0;
 
-        void* GetMappedPointer() const;
-        size_t GetSize() const;
+    void* GetMappedPointer() const;
+    size_t GetSize() const;
 
-      protected:
-        void* mMappedPointer = nullptr;
+  protected:
+    void* mMappedPointer = nullptr;
 
-      private:
-        const size_t mBufferSize;
-    };
+  private:
+    const size_t mBufferSize;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/Subresource.cpp b/src/dawn/native/Subresource.cpp
index 6ebba9f..e8fce4d 100644
--- a/src/dawn/native/Subresource.cpp
+++ b/src/dawn/native/Subresource.cpp
@@ -19,118 +19,116 @@
 
 namespace dawn::native {
 
-    Aspect ConvertSingleAspect(const Format& format, wgpu::TextureAspect aspect) {
-        Aspect aspectMask = ConvertAspect(format, aspect);
-        ASSERT(HasOneBit(aspectMask));
-        return aspectMask;
-    }
+Aspect ConvertSingleAspect(const Format& format, wgpu::TextureAspect aspect) {
+    Aspect aspectMask = ConvertAspect(format, aspect);
+    ASSERT(HasOneBit(aspectMask));
+    return aspectMask;
+}
 
-    Aspect ConvertAspect(const Format& format, wgpu::TextureAspect aspect) {
-        Aspect aspectMask = SelectFormatAspects(format, aspect);
-        ASSERT(aspectMask != Aspect::None);
-        return aspectMask;
-    }
+Aspect ConvertAspect(const Format& format, wgpu::TextureAspect aspect) {
+    Aspect aspectMask = SelectFormatAspects(format, aspect);
+    ASSERT(aspectMask != Aspect::None);
+    return aspectMask;
+}
 
-    Aspect ConvertViewAspect(const Format& format, wgpu::TextureAspect aspect) {
-        // Color view |format| must be treated as the same plane |aspect|.
-        if (format.aspects == Aspect::Color) {
-            switch (aspect) {
-                case wgpu::TextureAspect::Plane0Only:
-                    return Aspect::Plane0;
-                case wgpu::TextureAspect::Plane1Only:
-                    return Aspect::Plane1;
-                default:
-                    break;
-            }
-        }
-        return ConvertAspect(format, aspect);
-    }
-
-    Aspect SelectFormatAspects(const Format& format, wgpu::TextureAspect aspect) {
+Aspect ConvertViewAspect(const Format& format, wgpu::TextureAspect aspect) {
+    // Color view |format| must be treated as the same plane |aspect|.
+    if (format.aspects == Aspect::Color) {
         switch (aspect) {
-            case wgpu::TextureAspect::All:
-                return format.aspects;
-            case wgpu::TextureAspect::DepthOnly:
-                return format.aspects & Aspect::Depth;
-            case wgpu::TextureAspect::StencilOnly:
-                return format.aspects & Aspect::Stencil;
             case wgpu::TextureAspect::Plane0Only:
-                return format.aspects & Aspect::Plane0;
+                return Aspect::Plane0;
             case wgpu::TextureAspect::Plane1Only:
-                return format.aspects & Aspect::Plane1;
-        }
-        UNREACHABLE();
-    }
-
-    uint8_t GetAspectIndex(Aspect aspect) {
-        ASSERT(HasOneBit(aspect));
-        switch (aspect) {
-            case Aspect::Color:
-            case Aspect::Depth:
-            case Aspect::Plane0:
-            case Aspect::CombinedDepthStencil:
-                return 0;
-            case Aspect::Plane1:
-            case Aspect::Stencil:
-                return 1;
+                return Aspect::Plane1;
             default:
-                UNREACHABLE();
+                break;
         }
     }
+    return ConvertAspect(format, aspect);
+}
 
-    uint8_t GetAspectCount(Aspect aspects) {
-        // TODO(crbug.com/dawn/829): This should use popcount once Dawn has such a function.
-        // Note that we can't do a switch because compilers complain that Depth | Stencil is not
-        // a valid enum value.
-        if (aspects == Aspect::Color || aspects == Aspect::Depth ||
-            aspects == Aspect::CombinedDepthStencil) {
+Aspect SelectFormatAspects(const Format& format, wgpu::TextureAspect aspect) {
+    switch (aspect) {
+        case wgpu::TextureAspect::All:
+            return format.aspects;
+        case wgpu::TextureAspect::DepthOnly:
+            return format.aspects & Aspect::Depth;
+        case wgpu::TextureAspect::StencilOnly:
+            return format.aspects & Aspect::Stencil;
+        case wgpu::TextureAspect::Plane0Only:
+            return format.aspects & Aspect::Plane0;
+        case wgpu::TextureAspect::Plane1Only:
+            return format.aspects & Aspect::Plane1;
+    }
+    UNREACHABLE();
+}
+
+uint8_t GetAspectIndex(Aspect aspect) {
+    ASSERT(HasOneBit(aspect));
+    switch (aspect) {
+        case Aspect::Color:
+        case Aspect::Depth:
+        case Aspect::Plane0:
+        case Aspect::CombinedDepthStencil:
+            return 0;
+        case Aspect::Plane1:
+        case Aspect::Stencil:
             return 1;
-        } else if (aspects == (Aspect::Plane0 | Aspect::Plane1)) {
-            return 2;
-        } else if (aspects == Aspect::Stencil) {
-            // Fake a the existence of a depth aspect so that the stencil data stays at index 1.
-            ASSERT(GetAspectIndex(Aspect::Stencil) == 1);
-            return 2;
-        } else {
-            ASSERT(aspects == (Aspect::Depth | Aspect::Stencil));
-            return 2;
-        }
+        default:
+            UNREACHABLE();
     }
+}
 
-    SubresourceRange::SubresourceRange(Aspect aspects,
-                                       FirstAndCountRange<uint32_t> arrayLayerParam,
-                                       FirstAndCountRange<uint32_t> mipLevelParams)
-        : aspects(aspects),
-          baseArrayLayer(arrayLayerParam.first),
-          layerCount(arrayLayerParam.count),
-          baseMipLevel(mipLevelParams.first),
-          levelCount(mipLevelParams.count) {
+uint8_t GetAspectCount(Aspect aspects) {
+    // TODO(crbug.com/dawn/829): This should use popcount once Dawn has such a function.
+    // Note that we can't do a switch because compilers complain that Depth | Stencil is not
+    // a valid enum value.
+    if (aspects == Aspect::Color || aspects == Aspect::Depth ||
+        aspects == Aspect::CombinedDepthStencil) {
+        return 1;
+    } else if (aspects == (Aspect::Plane0 | Aspect::Plane1)) {
+        return 2;
+    } else if (aspects == Aspect::Stencil) {
+        // Fake a the existence of a depth aspect so that the stencil data stays at index 1.
+        ASSERT(GetAspectIndex(Aspect::Stencil) == 1);
+        return 2;
+    } else {
+        ASSERT(aspects == (Aspect::Depth | Aspect::Stencil));
+        return 2;
     }
+}
 
-    SubresourceRange::SubresourceRange()
-        : aspects(Aspect::None), baseArrayLayer(0), layerCount(0), baseMipLevel(0), levelCount(0) {
-    }
+SubresourceRange::SubresourceRange(Aspect aspects,
+                                   FirstAndCountRange<uint32_t> arrayLayerParam,
+                                   FirstAndCountRange<uint32_t> mipLevelParams)
+    : aspects(aspects),
+      baseArrayLayer(arrayLayerParam.first),
+      layerCount(arrayLayerParam.count),
+      baseMipLevel(mipLevelParams.first),
+      levelCount(mipLevelParams.count) {}
 
-    // static
-    SubresourceRange SubresourceRange::SingleMipAndLayer(uint32_t baseMipLevel,
-                                                         uint32_t baseArrayLayer,
-                                                         Aspect aspects) {
-        return {aspects, {baseArrayLayer, 1}, {baseMipLevel, 1}};
-    }
+SubresourceRange::SubresourceRange()
+    : aspects(Aspect::None), baseArrayLayer(0), layerCount(0), baseMipLevel(0), levelCount(0) {}
 
-    // static
-    SubresourceRange SubresourceRange::MakeSingle(Aspect aspect,
-                                                  uint32_t baseArrayLayer,
-                                                  uint32_t baseMipLevel) {
-        ASSERT(HasOneBit(aspect));
-        return {aspect, {baseArrayLayer, 1}, {baseMipLevel, 1}};
-    }
+// static
+SubresourceRange SubresourceRange::SingleMipAndLayer(uint32_t baseMipLevel,
+                                                     uint32_t baseArrayLayer,
+                                                     Aspect aspects) {
+    return {aspects, {baseArrayLayer, 1}, {baseMipLevel, 1}};
+}
 
-    // static
-    SubresourceRange SubresourceRange::MakeFull(Aspect aspects,
-                                                uint32_t layerCount,
-                                                uint32_t levelCount) {
-        return {aspects, {0, layerCount}, {0, levelCount}};
-    }
+// static
+SubresourceRange SubresourceRange::MakeSingle(Aspect aspect,
+                                              uint32_t baseArrayLayer,
+                                              uint32_t baseMipLevel) {
+    ASSERT(HasOneBit(aspect));
+    return {aspect, {baseArrayLayer, 1}, {baseMipLevel, 1}};
+}
+
+// static
+SubresourceRange SubresourceRange::MakeFull(Aspect aspects,
+                                            uint32_t layerCount,
+                                            uint32_t levelCount) {
+    return {aspects, {0, layerCount}, {0, levelCount}};
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/Subresource.h b/src/dawn/native/Subresource.h
index f985c68..473631a 100644
--- a/src/dawn/native/Subresource.h
+++ b/src/dawn/native/Subresource.h
@@ -20,92 +20,92 @@
 
 namespace dawn::native {
 
-    // Note: Subresource indices are computed by iterating the aspects in increasing order.
-    // D3D12 uses these directly, so the order much match D3D12's indices.
-    //  - Depth/Stencil textures have Depth as Plane 0, and Stencil as Plane 1.
-    enum class Aspect : uint8_t {
-        None = 0x0,
-        Color = 0x1,
-        Depth = 0x2,
-        Stencil = 0x4,
+// Note: Subresource indices are computed by iterating the aspects in increasing order.
+// D3D12 uses these directly, so the order much match D3D12's indices.
+//  - Depth/Stencil textures have Depth as Plane 0, and Stencil as Plane 1.
+enum class Aspect : uint8_t {
+    None = 0x0,
+    Color = 0x1,
+    Depth = 0x2,
+    Stencil = 0x4,
 
-        // Aspects used to select individual planes in a multi-planar format.
-        Plane0 = 0x8,
-        Plane1 = 0x10,
+    // Aspects used to select individual planes in a multi-planar format.
+    Plane0 = 0x8,
+    Plane1 = 0x10,
 
-        // An aspect for that represents the combination of both the depth and stencil aspects. It
-        // can be ignored outside of the Vulkan backend.
-        CombinedDepthStencil = 0x20,
-    };
+    // An aspect for that represents the combination of both the depth and stencil aspects. It
+    // can be ignored outside of the Vulkan backend.
+    CombinedDepthStencil = 0x20,
+};
 
-    template <>
-    struct EnumBitmaskSize<Aspect> {
-        static constexpr unsigned value = 6;
-    };
+template <>
+struct EnumBitmaskSize<Aspect> {
+    static constexpr unsigned value = 6;
+};
 
-    // Convert the TextureAspect to an Aspect mask for the format. ASSERTs if the aspect
-    // does not exist in the format.
-    // Also ASSERTs if "All" is selected and results in more than one aspect.
-    Aspect ConvertSingleAspect(const Format& format, wgpu::TextureAspect aspect);
+// Convert the TextureAspect to an Aspect mask for the format. ASSERTs if the aspect
+// does not exist in the format.
+// Also ASSERTs if "All" is selected and results in more than one aspect.
+Aspect ConvertSingleAspect(const Format& format, wgpu::TextureAspect aspect);
 
-    // Convert the TextureAspect to an Aspect mask for the format. ASSERTs if the aspect
-    // does not exist in the format.
-    Aspect ConvertAspect(const Format& format, wgpu::TextureAspect aspect);
+// Convert the TextureAspect to an Aspect mask for the format. ASSERTs if the aspect
+// does not exist in the format.
+Aspect ConvertAspect(const Format& format, wgpu::TextureAspect aspect);
 
-    // Returns the Aspects of the Format that are selected by the wgpu::TextureAspect.
-    // Note that this can return Aspect::None if the Format doesn't have any of the
-    // selected aspects.
-    Aspect SelectFormatAspects(const Format& format, wgpu::TextureAspect aspect);
+// Returns the Aspects of the Format that are selected by the wgpu::TextureAspect.
+// Note that this can return Aspect::None if the Format doesn't have any of the
+// selected aspects.
+Aspect SelectFormatAspects(const Format& format, wgpu::TextureAspect aspect);
 
-    // Convert TextureAspect to the aspect which corresponds to the view format. This
-    // special cases per plane view formats before calling ConvertAspect.
-    Aspect ConvertViewAspect(const Format& format, wgpu::TextureAspect aspect);
+// Convert TextureAspect to the aspect which corresponds to the view format. This
+// special cases per plane view formats before calling ConvertAspect.
+Aspect ConvertViewAspect(const Format& format, wgpu::TextureAspect aspect);
 
-    // Helper struct to make it clear that what the parameters of a range mean.
-    template <typename T>
-    struct FirstAndCountRange {
-        T first;
-        T count;
-    };
+// Helper struct to make it clear that what the parameters of a range mean.
+template <typename T>
+struct FirstAndCountRange {
+    T first;
+    T count;
+};
 
-    struct SubresourceRange {
-        SubresourceRange(Aspect aspects,
-                         FirstAndCountRange<uint32_t> arrayLayerParam,
-                         FirstAndCountRange<uint32_t> mipLevelParams);
-        SubresourceRange();
+struct SubresourceRange {
+    SubresourceRange(Aspect aspects,
+                     FirstAndCountRange<uint32_t> arrayLayerParam,
+                     FirstAndCountRange<uint32_t> mipLevelParams);
+    SubresourceRange();
 
-        Aspect aspects;
-        uint32_t baseArrayLayer;
-        uint32_t layerCount;
-        uint32_t baseMipLevel;
-        uint32_t levelCount;
+    Aspect aspects;
+    uint32_t baseArrayLayer;
+    uint32_t layerCount;
+    uint32_t baseMipLevel;
+    uint32_t levelCount;
 
-        static SubresourceRange SingleMipAndLayer(uint32_t baseMipLevel,
-                                                  uint32_t baseArrayLayer,
-                                                  Aspect aspects);
-        static SubresourceRange MakeSingle(Aspect aspect,
-                                           uint32_t baseArrayLayer,
-                                           uint32_t baseMipLevel);
+    static SubresourceRange SingleMipAndLayer(uint32_t baseMipLevel,
+                                              uint32_t baseArrayLayer,
+                                              Aspect aspects);
+    static SubresourceRange MakeSingle(Aspect aspect,
+                                       uint32_t baseArrayLayer,
+                                       uint32_t baseMipLevel);
 
-        static SubresourceRange MakeFull(Aspect aspects, uint32_t layerCount, uint32_t levelCount);
-    };
+    static SubresourceRange MakeFull(Aspect aspects, uint32_t layerCount, uint32_t levelCount);
+};
 
-    // Helper function to use aspects as linear indices in arrays.
-    uint8_t GetAspectIndex(Aspect aspect);
-    uint8_t GetAspectCount(Aspect aspects);
+// Helper function to use aspects as linear indices in arrays.
+uint8_t GetAspectIndex(Aspect aspect);
+uint8_t GetAspectCount(Aspect aspects);
 
-    // The maximum number of planes per format Dawn knows about. Asserts in BuildFormatTable that
-    // the per plane index does not exceed the known maximum plane count.
-    static constexpr uint32_t kMaxPlanesPerFormat = 3;
+// The maximum number of planes per format Dawn knows about. Asserts in BuildFormatTable that
+// the per plane index does not exceed the known maximum plane count.
+static constexpr uint32_t kMaxPlanesPerFormat = 3;
 
 }  // namespace dawn::native
 
 namespace dawn {
 
-    template <>
-    struct IsDawnBitmask<dawn::native::Aspect> {
-        static constexpr bool enable = true;
-    };
+template <>
+struct IsDawnBitmask<dawn::native::Aspect> {
+    static constexpr bool enable = true;
+};
 
 }  // namespace dawn
 
diff --git a/src/dawn/native/SubresourceStorage.h b/src/dawn/native/SubresourceStorage.h
index b2d2cbb..cc3d10d 100644
--- a/src/dawn/native/SubresourceStorage.h
+++ b/src/dawn/native/SubresourceStorage.h
@@ -27,528 +27,521 @@
 
 namespace dawn::native {
 
-    // SubresourceStorage<T> acts like a simple map from subresource (aspect, layer, level) to a
-    // value of type T except that it tries to compress similar subresources so that algorithms
-    // can act on a whole range of subresources at once if they have the same state.
+// SubresourceStorage<T> acts like a simple map from subresource (aspect, layer, level) to a
+// value of type T except that it tries to compress similar subresources so that algorithms
+// can act on a whole range of subresources at once if they have the same state.
+//
+// For example a very common case to optimize for is the tracking of the usage of texture
+// subresources inside a render pass: the vast majority of texture views will select the whole
+// texture while a small minority will select a sub-range. We want to optimize the common case
+// by setting and checking a single "usage" value when a full subresource is used but at the
+// same time allow per-subresource data when needed.
+//
+// Another example is barrier tracking per-subresource in the backends: it will often happen
+// that during texture upload each mip level will have a different "barrier state". However
+// when the texture is fully uploaded and after it is used for sampling (with a full view) for
+// the first time, the barrier state will likely be the same across all the subresources.
+// That's why some form of "recompression" of subresource state must be possibe.
+//
+// In order to keep the implementation details private and to avoid iterator-hell, this
+// container uses a more functional approach of calling a closure on the interesting ranges.
+// This is for example how to look at the state of all subresources.
+//
+//   subresources.Iterate([](const SubresourceRange& range, const T& data) {
+//      // Do something with the knowledge that all the subresources in `range` have value
+//      // `data`.
+//   });
+//
+// SubresourceStorage internally tracks compression state per aspect and then per layer of each
+// aspect. This means that a 2-aspect texture can have the following compression state:
+//
+//  - Aspect 0 is fully compressed.
+//  - Aspect 1 is partially compressed:
+//    - Aspect 1 layer 3 is decompressed.
+//    - Aspect 1 layer 0-2 and 4-42 are compressed.
+//
+// A useful model to reason about SubresourceStorage is to represent is as a tree:
+//
+//  - SubresourceStorage is the root.
+//    |-> Nodes 1 deep represent each aspect. If an aspect is compressed, its node doesn't have
+//       any children because the data is constant across all of the subtree.
+//      |-> Nodes 2 deep represent layers (for uncompressed aspects). If a layer is compressed,
+//         its node doesn't have any children because the data is constant across all of the
+//         subtree.
+//        |-> Nodes 3 deep represent individial mip levels (for uncompressed layers).
+//
+// The concept of recompression is the removal of all child nodes of a non-leaf node when the
+// data is constant across them. Decompression is the addition of child nodes to a leaf node
+// and copying of its data to all its children.
+//
+// The choice of having secondary compression for array layers is to optimize for the cases
+// where transfer operations are used to update specific layers of texture with render or
+// transfer operations, while the rest is untouched. It seems much less likely that there
+// would be operations that touch all Nth mips of a 2D array texture without touching the
+// others.
+//
+// There are several hot code paths that create new SubresourceStorage like the tracking of
+// resource usage per-pass. We don't want to allocate a container for the decompressed data
+// unless we have to because it would dramatically lower performance. Instead
+// SubresourceStorage contains an inline array that contains the per-aspect compressed data
+// and only allocates a per-subresource on aspect decompression.
+//
+// T must be a copyable type that supports equality comparison with ==.
+//
+// The implementation of functions in this file can have a lot of control flow and corner cases
+// so each modification should come with extensive tests and ensure 100% code coverage of the
+// modified functions. See instructions at
+// https://chromium.googlesource.com/chromium/src/+/main/docs/testing/code_coverage.md#local-coverage-script
+// to run the test with code coverage. A command line that worked in the past (with the right
+// GN args for the out/coverage directory in a Chromium checkout) is:
+//
+/*
+   python tools/code_coverage/coverage.py dawn_unittests -b out/coverage -o out/report -c \
+       "out/coverage/dawn_unittests --gtest_filter=SubresourceStorage\*" -f \
+       third_party/dawn/src/dawn/native
+*/
+//
+// TODO(crbug.com/dawn/836): Make the recompression optional, the calling code should know
+// if recompression can happen or not in Update() and Merge()
+template <typename T>
+class SubresourceStorage {
+  public:
+    static_assert(std::is_copy_assignable<T>::value, "T must be copyable");
+    static_assert(HasEqualityOperator<T>::value, "T requires bool operator == (T, T)");
+
+    // Creates the storage with the given "dimensions" and all subresources starting with the
+    // initial value.
+    SubresourceStorage(Aspect aspects,
+                       uint32_t arrayLayerCount,
+                       uint32_t mipLevelCount,
+                       T initialValue = {});
+
+    // Returns the data for a single subresource. Note that the reference returned might be the
+    // same for multiple subresources.
+    const T& Get(Aspect aspect, uint32_t arrayLayer, uint32_t mipLevel) const;
+
+    // Given an iterateFunc that's a function or function-like objet that can be called with
+    // arguments of type (const SubresourceRange& range, const T& data) and returns void,
+    // calls it with aggregate ranges if possible, such that each subresource is part of
+    // exactly one of the ranges iterateFunc is called with (and obviously data is the value
+    // stored for that subresource). For example:
     //
-    // For example a very common case to optimize for is the tracking of the usage of texture
-    // subresources inside a render pass: the vast majority of texture views will select the whole
-    // texture while a small minority will select a sub-range. We want to optimize the common case
-    // by setting and checking a single "usage" value when a full subresource is used but at the
-    // same time allow per-subresource data when needed.
+    //   subresources.Iterate([&](const SubresourceRange& range, const T& data) {
+    //       // Do something with range and data.
+    //   });
+    template <typename F>
+    void Iterate(F&& iterateFunc) const;
+
+    // Given an updateFunc that's a function or function-like objet that can be called with
+    // arguments of type (const SubresourceRange& range, T* data) and returns void,
+    // calls it with ranges that in aggregate form `range` and pass for each of the
+    // sub-ranges a pointer to modify the value for that sub-range. For example:
     //
-    // Another example is barrier tracking per-subresource in the backends: it will often happen
-    // that during texture upload each mip level will have a different "barrier state". However
-    // when the texture is fully uploaded and after it is used for sampling (with a full view) for
-    // the first time, the barrier state will likely be the same across all the subresources.
-    // That's why some form of "recompression" of subresource state must be possibe.
-    //
-    // In order to keep the implementation details private and to avoid iterator-hell, this
-    // container uses a more functional approach of calling a closure on the interesting ranges.
-    // This is for example how to look at the state of all subresources.
-    //
-    //   subresources.Iterate([](const SubresourceRange& range, const T& data) {
-    //      // Do something with the knowledge that all the subresources in `range` have value
-    //      // `data`.
+    //   subresources.Update(view->GetRange(), [](const SubresourceRange&, T* data) {
+    //       *data |= wgpu::TextureUsage::Stuff;
     //   });
     //
-    // SubresourceStorage internally tracks compression state per aspect and then per layer of each
-    // aspect. This means that a 2-aspect texture can have the following compression state:
-    //
-    //  - Aspect 0 is fully compressed.
-    //  - Aspect 1 is partially compressed:
-    //    - Aspect 1 layer 3 is decompressed.
-    //    - Aspect 1 layer 0-2 and 4-42 are compressed.
-    //
-    // A useful model to reason about SubresourceStorage is to represent is as a tree:
-    //
-    //  - SubresourceStorage is the root.
-    //    |-> Nodes 1 deep represent each aspect. If an aspect is compressed, its node doesn't have
-    //       any children because the data is constant across all of the subtree.
-    //      |-> Nodes 2 deep represent layers (for uncompressed aspects). If a layer is compressed,
-    //         its node doesn't have any children because the data is constant across all of the
-    //         subtree.
-    //        |-> Nodes 3 deep represent individial mip levels (for uncompressed layers).
-    //
-    // The concept of recompression is the removal of all child nodes of a non-leaf node when the
-    // data is constant across them. Decompression is the addition of child nodes to a leaf node
-    // and copying of its data to all its children.
-    //
-    // The choice of having secondary compression for array layers is to optimize for the cases
-    // where transfer operations are used to update specific layers of texture with render or
-    // transfer operations, while the rest is untouched. It seems much less likely that there
-    // would be operations that touch all Nth mips of a 2D array texture without touching the
-    // others.
-    //
-    // There are several hot code paths that create new SubresourceStorage like the tracking of
-    // resource usage per-pass. We don't want to allocate a container for the decompressed data
-    // unless we have to because it would dramatically lower performance. Instead
-    // SubresourceStorage contains an inline array that contains the per-aspect compressed data
-    // and only allocates a per-subresource on aspect decompression.
-    //
-    // T must be a copyable type that supports equality comparison with ==.
-    //
-    // The implementation of functions in this file can have a lot of control flow and corner cases
-    // so each modification should come with extensive tests and ensure 100% code coverage of the
-    // modified functions. See instructions at
-    // https://chromium.googlesource.com/chromium/src/+/main/docs/testing/code_coverage.md#local-coverage-script
-    // to run the test with code coverage. A command line that worked in the past (with the right
-    // GN args for the out/coverage directory in a Chromium checkout) is:
-    //
-    /*
-       python tools/code_coverage/coverage.py dawn_unittests -b out/coverage -o out/report -c \
-           "out/coverage/dawn_unittests --gtest_filter=SubresourceStorage\*" -f \
-           third_party/dawn/src/dawn/native
-    */
-    //
-    // TODO(crbug.com/dawn/836): Make the recompression optional, the calling code should know
-    // if recompression can happen or not in Update() and Merge()
-    template <typename T>
-    class SubresourceStorage {
-      public:
-        static_assert(std::is_copy_assignable<T>::value, "T must be copyable");
-        static_assert(HasEqualityOperator<T>::value, "T requires bool operator == (T, T)");
-
-        // Creates the storage with the given "dimensions" and all subresources starting with the
-        // initial value.
-        SubresourceStorage(Aspect aspects,
-                           uint32_t arrayLayerCount,
-                           uint32_t mipLevelCount,
-                           T initialValue = {});
-
-        // Returns the data for a single subresource. Note that the reference returned might be the
-        // same for multiple subresources.
-        const T& Get(Aspect aspect, uint32_t arrayLayer, uint32_t mipLevel) const;
-
-        // Given an iterateFunc that's a function or function-like objet that can be called with
-        // arguments of type (const SubresourceRange& range, const T& data) and returns void,
-        // calls it with aggregate ranges if possible, such that each subresource is part of
-        // exactly one of the ranges iterateFunc is called with (and obviously data is the value
-        // stored for that subresource). For example:
-        //
-        //   subresources.Iterate([&](const SubresourceRange& range, const T& data) {
-        //       // Do something with range and data.
-        //   });
-        template <typename F>
-        void Iterate(F&& iterateFunc) const;
-
-        // Given an updateFunc that's a function or function-like objet that can be called with
-        // arguments of type (const SubresourceRange& range, T* data) and returns void,
-        // calls it with ranges that in aggregate form `range` and pass for each of the
-        // sub-ranges a pointer to modify the value for that sub-range. For example:
-        //
-        //   subresources.Update(view->GetRange(), [](const SubresourceRange&, T* data) {
-        //       *data |= wgpu::TextureUsage::Stuff;
-        //   });
-        //
-        // /!\ WARNING: updateFunc should never use range to compute the update to data otherwise
-        // your code is likely to break when compression happens. Range should only be used for
-        // side effects like using it to compute a Vulkan pipeline barrier.
-        template <typename F>
-        void Update(const SubresourceRange& range, F&& updateFunc);
-
-        // Given a mergeFunc that's a function or a function-like object that can be called with
-        // arguments of type (const SubresourceRange& range, T* data, const U& otherData) and
-        // returns void, calls it with ranges that in aggregate form the full resources and pass
-        // for each of the sub-ranges a pointer to modify the value for that sub-range and the
-        // corresponding value from other for that sub-range. For example:
-        //
-        //   subresources.Merge(otherUsages,
-        //       [](const SubresourceRange&, T* data, const T& otherData) {
-        //          *data |= otherData;
-        //       });
-        //
-        // /!\ WARNING: mergeFunc should never use range to compute the update to data otherwise
-        // your code is likely to break when compression happens. Range should only be used for
-        // side effects like using it to compute a Vulkan pipeline barrier.
-        template <typename U, typename F>
-        void Merge(const SubresourceStorage<U>& other, F&& mergeFunc);
-
-        // Other operations to consider:
-        //
-        //  - UpdateTo(Range, T) that updates the range to a constant value.
-
-        // Methods to query the internal state of SubresourceStorage for testing.
-        Aspect GetAspectsForTesting() const;
-        uint32_t GetArrayLayerCountForTesting() const;
-        uint32_t GetMipLevelCountForTesting() const;
-        bool IsAspectCompressedForTesting(Aspect aspect) const;
-        bool IsLayerCompressedForTesting(Aspect aspect, uint32_t layer) const;
-
-      private:
-        template <typename U>
-        friend class SubresourceStorage;
-
-        void DecompressAspect(uint32_t aspectIndex);
-        void RecompressAspect(uint32_t aspectIndex);
-
-        void DecompressLayer(uint32_t aspectIndex, uint32_t layer);
-        void RecompressLayer(uint32_t aspectIndex, uint32_t layer);
-
-        SubresourceRange GetFullLayerRange(Aspect aspect, uint32_t layer) const;
-
-        // LayerCompressed should never be called when the aspect is compressed otherwise it would
-        // need to check that mLayerCompressed is not null before indexing it.
-        bool& LayerCompressed(uint32_t aspectIndex, uint32_t layerIndex);
-        bool LayerCompressed(uint32_t aspectIndex, uint32_t layerIndex) const;
-
-        // Return references to the data for a compressed plane / layer or subresource.
-        // Each variant should be called exactly under the correct compression level.
-        T& DataInline(uint32_t aspectIndex);
-        T& Data(uint32_t aspectIndex, uint32_t layer, uint32_t level = 0);
-        const T& DataInline(uint32_t aspectIndex) const;
-        const T& Data(uint32_t aspectIndex, uint32_t layer, uint32_t level = 0) const;
-
-        Aspect mAspects;
-        uint8_t mMipLevelCount;
-        uint16_t mArrayLayerCount;
-
-        // Invariant: if an aspect is marked compressed, then all it's layers are marked as
-        // compressed.
-        static constexpr size_t kMaxAspects = 2;
-        std::array<bool, kMaxAspects> mAspectCompressed;
-        std::array<T, kMaxAspects> mInlineAspectData;
-
-        // Indexed as mLayerCompressed[aspectIndex * mArrayLayerCount + layer].
-        std::unique_ptr<bool[]> mLayerCompressed;
-
-        // Indexed as mData[(aspectIndex * mArrayLayerCount + layer) * mMipLevelCount + level].
-        // The data for a compressed aspect is stored in the slot for (aspect, 0, 0). Similarly
-        // the data for a compressed layer of aspect if in the slot for (aspect, layer, 0).
-        std::unique_ptr<T[]> mData;
-    };
-
-    template <typename T>
-    SubresourceStorage<T>::SubresourceStorage(Aspect aspects,
-                                              uint32_t arrayLayerCount,
-                                              uint32_t mipLevelCount,
-                                              T initialValue)
-        : mAspects(aspects), mMipLevelCount(mipLevelCount), mArrayLayerCount(arrayLayerCount) {
-        ASSERT(arrayLayerCount <= std::numeric_limits<decltype(mArrayLayerCount)>::max());
-        ASSERT(mipLevelCount <= std::numeric_limits<decltype(mMipLevelCount)>::max());
-
-        uint32_t aspectCount = GetAspectCount(aspects);
-        ASSERT(aspectCount <= kMaxAspects);
-
-        for (uint32_t aspectIndex = 0; aspectIndex < aspectCount; aspectIndex++) {
-            mAspectCompressed[aspectIndex] = true;
-            DataInline(aspectIndex) = initialValue;
-        }
-    }
-
-    template <typename T>
+    // /!\ WARNING: updateFunc should never use range to compute the update to data otherwise
+    // your code is likely to break when compression happens. Range should only be used for
+    // side effects like using it to compute a Vulkan pipeline barrier.
     template <typename F>
-    void SubresourceStorage<T>::Update(const SubresourceRange& range, F&& updateFunc) {
-        bool fullLayers = range.baseMipLevel == 0 && range.levelCount == mMipLevelCount;
-        bool fullAspects =
-            range.baseArrayLayer == 0 && range.layerCount == mArrayLayerCount && fullLayers;
+    void Update(const SubresourceRange& range, F&& updateFunc);
 
-        for (Aspect aspect : IterateEnumMask(range.aspects)) {
-            uint32_t aspectIndex = GetAspectIndex(aspect);
+    // Given a mergeFunc that's a function or a function-like object that can be called with
+    // arguments of type (const SubresourceRange& range, T* data, const U& otherData) and
+    // returns void, calls it with ranges that in aggregate form the full resources and pass
+    // for each of the sub-ranges a pointer to modify the value for that sub-range and the
+    // corresponding value from other for that sub-range. For example:
+    //
+    //   subresources.Merge(otherUsages,
+    //       [](const SubresourceRange&, T* data, const T& otherData) {
+    //          *data |= otherData;
+    //       });
+    //
+    // /!\ WARNING: mergeFunc should never use range to compute the update to data otherwise
+    // your code is likely to break when compression happens. Range should only be used for
+    // side effects like using it to compute a Vulkan pipeline barrier.
+    template <typename U, typename F>
+    void Merge(const SubresourceStorage<U>& other, F&& mergeFunc);
 
-            // Call the updateFunc once for the whole aspect if possible or decompress and fallback
-            // to per-layer handling.
-            if (mAspectCompressed[aspectIndex]) {
-                if (fullAspects) {
-                    SubresourceRange updateRange =
-                        SubresourceRange::MakeFull(aspect, mArrayLayerCount, mMipLevelCount);
-                    updateFunc(updateRange, &DataInline(aspectIndex));
+    // Other operations to consider:
+    //
+    //  - UpdateTo(Range, T) that updates the range to a constant value.
+
+    // Methods to query the internal state of SubresourceStorage for testing.
+    Aspect GetAspectsForTesting() const;
+    uint32_t GetArrayLayerCountForTesting() const;
+    uint32_t GetMipLevelCountForTesting() const;
+    bool IsAspectCompressedForTesting(Aspect aspect) const;
+    bool IsLayerCompressedForTesting(Aspect aspect, uint32_t layer) const;
+
+  private:
+    template <typename U>
+    friend class SubresourceStorage;
+
+    void DecompressAspect(uint32_t aspectIndex);
+    void RecompressAspect(uint32_t aspectIndex);
+
+    void DecompressLayer(uint32_t aspectIndex, uint32_t layer);
+    void RecompressLayer(uint32_t aspectIndex, uint32_t layer);
+
+    SubresourceRange GetFullLayerRange(Aspect aspect, uint32_t layer) const;
+
+    // LayerCompressed should never be called when the aspect is compressed otherwise it would
+    // need to check that mLayerCompressed is not null before indexing it.
+    bool& LayerCompressed(uint32_t aspectIndex, uint32_t layerIndex);
+    bool LayerCompressed(uint32_t aspectIndex, uint32_t layerIndex) const;
+
+    // Return references to the data for a compressed plane / layer or subresource.
+    // Each variant should be called exactly under the correct compression level.
+    T& DataInline(uint32_t aspectIndex);
+    T& Data(uint32_t aspectIndex, uint32_t layer, uint32_t level = 0);
+    const T& DataInline(uint32_t aspectIndex) const;
+    const T& Data(uint32_t aspectIndex, uint32_t layer, uint32_t level = 0) const;
+
+    Aspect mAspects;
+    uint8_t mMipLevelCount;
+    uint16_t mArrayLayerCount;
+
+    // Invariant: if an aspect is marked compressed, then all it's layers are marked as
+    // compressed.
+    static constexpr size_t kMaxAspects = 2;
+    std::array<bool, kMaxAspects> mAspectCompressed;
+    std::array<T, kMaxAspects> mInlineAspectData;
+
+    // Indexed as mLayerCompressed[aspectIndex * mArrayLayerCount + layer].
+    std::unique_ptr<bool[]> mLayerCompressed;
+
+    // Indexed as mData[(aspectIndex * mArrayLayerCount + layer) * mMipLevelCount + level].
+    // The data for a compressed aspect is stored in the slot for (aspect, 0, 0). Similarly
+    // the data for a compressed layer of aspect if in the slot for (aspect, layer, 0).
+    std::unique_ptr<T[]> mData;
+};
+
+template <typename T>
+SubresourceStorage<T>::SubresourceStorage(Aspect aspects,
+                                          uint32_t arrayLayerCount,
+                                          uint32_t mipLevelCount,
+                                          T initialValue)
+    : mAspects(aspects), mMipLevelCount(mipLevelCount), mArrayLayerCount(arrayLayerCount) {
+    ASSERT(arrayLayerCount <= std::numeric_limits<decltype(mArrayLayerCount)>::max());
+    ASSERT(mipLevelCount <= std::numeric_limits<decltype(mMipLevelCount)>::max());
+
+    uint32_t aspectCount = GetAspectCount(aspects);
+    ASSERT(aspectCount <= kMaxAspects);
+
+    for (uint32_t aspectIndex = 0; aspectIndex < aspectCount; aspectIndex++) {
+        mAspectCompressed[aspectIndex] = true;
+        DataInline(aspectIndex) = initialValue;
+    }
+}
+
+template <typename T>
+template <typename F>
+void SubresourceStorage<T>::Update(const SubresourceRange& range, F&& updateFunc) {
+    bool fullLayers = range.baseMipLevel == 0 && range.levelCount == mMipLevelCount;
+    bool fullAspects =
+        range.baseArrayLayer == 0 && range.layerCount == mArrayLayerCount && fullLayers;
+
+    for (Aspect aspect : IterateEnumMask(range.aspects)) {
+        uint32_t aspectIndex = GetAspectIndex(aspect);
+
+        // Call the updateFunc once for the whole aspect if possible or decompress and fallback
+        // to per-layer handling.
+        if (mAspectCompressed[aspectIndex]) {
+            if (fullAspects) {
+                SubresourceRange updateRange =
+                    SubresourceRange::MakeFull(aspect, mArrayLayerCount, mMipLevelCount);
+                updateFunc(updateRange, &DataInline(aspectIndex));
+                continue;
+            }
+            DecompressAspect(aspectIndex);
+        }
+
+        uint32_t layerEnd = range.baseArrayLayer + range.layerCount;
+        for (uint32_t layer = range.baseArrayLayer; layer < layerEnd; layer++) {
+            // Call the updateFunc once for the whole layer if possible or decompress and
+            // fallback to per-level handling.
+            if (LayerCompressed(aspectIndex, layer)) {
+                if (fullLayers) {
+                    SubresourceRange updateRange = GetFullLayerRange(aspect, layer);
+                    updateFunc(updateRange, &Data(aspectIndex, layer));
                     continue;
                 }
-                DecompressAspect(aspectIndex);
+                DecompressLayer(aspectIndex, layer);
             }
 
-            uint32_t layerEnd = range.baseArrayLayer + range.layerCount;
-            for (uint32_t layer = range.baseArrayLayer; layer < layerEnd; layer++) {
-                // Call the updateFunc once for the whole layer if possible or decompress and
-                // fallback to per-level handling.
-                if (LayerCompressed(aspectIndex, layer)) {
-                    if (fullLayers) {
-                        SubresourceRange updateRange = GetFullLayerRange(aspect, layer);
-                        updateFunc(updateRange, &Data(aspectIndex, layer));
-                        continue;
-                    }
-                    DecompressLayer(aspectIndex, layer);
-                }
-
-                // Worst case: call updateFunc per level.
-                uint32_t levelEnd = range.baseMipLevel + range.levelCount;
-                for (uint32_t level = range.baseMipLevel; level < levelEnd; level++) {
-                    SubresourceRange updateRange =
-                        SubresourceRange::MakeSingle(aspect, layer, level);
-                    updateFunc(updateRange, &Data(aspectIndex, layer, level));
-                }
-
-                // If the range has fullLayers then it is likely we can recompress after the calls
-                // to updateFunc (this branch is skipped if updateFunc was called for the whole
-                // layer).
-                if (fullLayers) {
-                    RecompressLayer(aspectIndex, layer);
-                }
+            // Worst case: call updateFunc per level.
+            uint32_t levelEnd = range.baseMipLevel + range.levelCount;
+            for (uint32_t level = range.baseMipLevel; level < levelEnd; level++) {
+                SubresourceRange updateRange = SubresourceRange::MakeSingle(aspect, layer, level);
+                updateFunc(updateRange, &Data(aspectIndex, layer, level));
             }
 
-            // If the range has fullAspects then it is likely we can recompress after the calls to
-            // updateFunc (this branch is skipped if updateFunc was called for the whole aspect).
-            if (fullAspects) {
-                RecompressAspect(aspectIndex);
+            // If the range has fullLayers then it is likely we can recompress after the calls
+            // to updateFunc (this branch is skipped if updateFunc was called for the whole
+            // layer).
+            if (fullLayers) {
+                RecompressLayer(aspectIndex, layer);
             }
         }
+
+        // If the range has fullAspects then it is likely we can recompress after the calls to
+        // updateFunc (this branch is skipped if updateFunc was called for the whole aspect).
+        if (fullAspects) {
+            RecompressAspect(aspectIndex);
+        }
     }
+}
 
-    template <typename T>
-    template <typename U, typename F>
-    void SubresourceStorage<T>::Merge(const SubresourceStorage<U>& other, F&& mergeFunc) {
-        ASSERT(mAspects == other.mAspects);
-        ASSERT(mArrayLayerCount == other.mArrayLayerCount);
-        ASSERT(mMipLevelCount == other.mMipLevelCount);
+template <typename T>
+template <typename U, typename F>
+void SubresourceStorage<T>::Merge(const SubresourceStorage<U>& other, F&& mergeFunc) {
+    ASSERT(mAspects == other.mAspects);
+    ASSERT(mArrayLayerCount == other.mArrayLayerCount);
+    ASSERT(mMipLevelCount == other.mMipLevelCount);
 
-        for (Aspect aspect : IterateEnumMask(mAspects)) {
-            uint32_t aspectIndex = GetAspectIndex(aspect);
+    for (Aspect aspect : IterateEnumMask(mAspects)) {
+        uint32_t aspectIndex = GetAspectIndex(aspect);
 
-            // If the other storage's aspect is compressed we don't need to decompress anything
-            // in `this` and can just iterate through it, merging with `other`'s constant value for
-            // the aspect. For code simplicity this can be done with a call to Update().
-            if (other.mAspectCompressed[aspectIndex]) {
-                const U& otherData = other.DataInline(aspectIndex);
-                Update(SubresourceRange::MakeFull(aspect, mArrayLayerCount, mMipLevelCount),
+        // If the other storage's aspect is compressed we don't need to decompress anything
+        // in `this` and can just iterate through it, merging with `other`'s constant value for
+        // the aspect. For code simplicity this can be done with a call to Update().
+        if (other.mAspectCompressed[aspectIndex]) {
+            const U& otherData = other.DataInline(aspectIndex);
+            Update(SubresourceRange::MakeFull(aspect, mArrayLayerCount, mMipLevelCount),
+                   [&](const SubresourceRange& subrange, T* data) {
+                       mergeFunc(subrange, data, otherData);
+                   });
+            continue;
+        }
+
+        // Other doesn't have the aspect compressed so we must do at least per-layer merging.
+        if (mAspectCompressed[aspectIndex]) {
+            DecompressAspect(aspectIndex);
+        }
+
+        for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
+            // Similarly to above, use a fast path if other's layer is compressed.
+            if (other.LayerCompressed(aspectIndex, layer)) {
+                const U& otherData = other.Data(aspectIndex, layer);
+                Update(GetFullLayerRange(aspect, layer),
                        [&](const SubresourceRange& subrange, T* data) {
                            mergeFunc(subrange, data, otherData);
                        });
                 continue;
             }
 
-            // Other doesn't have the aspect compressed so we must do at least per-layer merging.
-            if (mAspectCompressed[aspectIndex]) {
-                DecompressAspect(aspectIndex);
+            // Sad case, other is decompressed for this layer, do per-level merging.
+            if (LayerCompressed(aspectIndex, layer)) {
+                DecompressLayer(aspectIndex, layer);
             }
 
-            for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
-                // Similarly to above, use a fast path if other's layer is compressed.
-                if (other.LayerCompressed(aspectIndex, layer)) {
-                    const U& otherData = other.Data(aspectIndex, layer);
-                    Update(GetFullLayerRange(aspect, layer),
-                           [&](const SubresourceRange& subrange, T* data) {
-                               mergeFunc(subrange, data, otherData);
-                           });
-                    continue;
-                }
-
-                // Sad case, other is decompressed for this layer, do per-level merging.
-                if (LayerCompressed(aspectIndex, layer)) {
-                    DecompressLayer(aspectIndex, layer);
-                }
-
-                for (uint32_t level = 0; level < mMipLevelCount; level++) {
-                    SubresourceRange updateRange =
-                        SubresourceRange::MakeSingle(aspect, layer, level);
-                    mergeFunc(updateRange, &Data(aspectIndex, layer, level),
-                              other.Data(aspectIndex, layer, level));
-                }
-
-                RecompressLayer(aspectIndex, layer);
+            for (uint32_t level = 0; level < mMipLevelCount; level++) {
+                SubresourceRange updateRange = SubresourceRange::MakeSingle(aspect, layer, level);
+                mergeFunc(updateRange, &Data(aspectIndex, layer, level),
+                          other.Data(aspectIndex, layer, level));
             }
 
-            RecompressAspect(aspectIndex);
+            RecompressLayer(aspectIndex, layer);
         }
+
+        RecompressAspect(aspectIndex);
     }
+}
 
-    template <typename T>
-    template <typename F>
-    void SubresourceStorage<T>::Iterate(F&& iterateFunc) const {
-        for (Aspect aspect : IterateEnumMask(mAspects)) {
-            uint32_t aspectIndex = GetAspectIndex(aspect);
+template <typename T>
+template <typename F>
+void SubresourceStorage<T>::Iterate(F&& iterateFunc) const {
+    for (Aspect aspect : IterateEnumMask(mAspects)) {
+        uint32_t aspectIndex = GetAspectIndex(aspect);
 
-            // Fastest path, call iterateFunc on the whole aspect at once.
-            if (mAspectCompressed[aspectIndex]) {
-                SubresourceRange range =
-                    SubresourceRange::MakeFull(aspect, mArrayLayerCount, mMipLevelCount);
-                iterateFunc(range, DataInline(aspectIndex));
+        // Fastest path, call iterateFunc on the whole aspect at once.
+        if (mAspectCompressed[aspectIndex]) {
+            SubresourceRange range =
+                SubresourceRange::MakeFull(aspect, mArrayLayerCount, mMipLevelCount);
+            iterateFunc(range, DataInline(aspectIndex));
+            continue;
+        }
+
+        for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
+            // Fast path, call iterateFunc on the whole array layer at once.
+            if (LayerCompressed(aspectIndex, layer)) {
+                SubresourceRange range = GetFullLayerRange(aspect, layer);
+                iterateFunc(range, Data(aspectIndex, layer));
                 continue;
             }
 
-            for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
-                // Fast path, call iterateFunc on the whole array layer at once.
-                if (LayerCompressed(aspectIndex, layer)) {
-                    SubresourceRange range = GetFullLayerRange(aspect, layer);
-                    iterateFunc(range, Data(aspectIndex, layer));
-                    continue;
-                }
-
-                // Slow path, call iterateFunc for each mip level.
-                for (uint32_t level = 0; level < mMipLevelCount; level++) {
-                    SubresourceRange range = SubresourceRange::MakeSingle(aspect, layer, level);
-                    iterateFunc(range, Data(aspectIndex, layer, level));
-                }
+            // Slow path, call iterateFunc for each mip level.
+            for (uint32_t level = 0; level < mMipLevelCount; level++) {
+                SubresourceRange range = SubresourceRange::MakeSingle(aspect, layer, level);
+                iterateFunc(range, Data(aspectIndex, layer, level));
             }
         }
     }
+}
 
-    template <typename T>
-    const T& SubresourceStorage<T>::Get(Aspect aspect,
-                                        uint32_t arrayLayer,
-                                        uint32_t mipLevel) const {
-        uint32_t aspectIndex = GetAspectIndex(aspect);
-        ASSERT(aspectIndex < GetAspectCount(mAspects));
-        ASSERT(arrayLayer < mArrayLayerCount);
-        ASSERT(mipLevel < mMipLevelCount);
+template <typename T>
+const T& SubresourceStorage<T>::Get(Aspect aspect, uint32_t arrayLayer, uint32_t mipLevel) const {
+    uint32_t aspectIndex = GetAspectIndex(aspect);
+    ASSERT(aspectIndex < GetAspectCount(mAspects));
+    ASSERT(arrayLayer < mArrayLayerCount);
+    ASSERT(mipLevel < mMipLevelCount);
 
-        // Fastest path, the aspect is compressed!
-        if (mAspectCompressed[aspectIndex]) {
-            return DataInline(aspectIndex);
-        }
-
-        // Fast path, the array layer is compressed.
-        if (LayerCompressed(aspectIndex, arrayLayer)) {
-            return Data(aspectIndex, arrayLayer);
-        }
-
-        return Data(aspectIndex, arrayLayer, mipLevel);
+    // Fastest path, the aspect is compressed!
+    if (mAspectCompressed[aspectIndex]) {
+        return DataInline(aspectIndex);
     }
 
-    template <typename T>
-    Aspect SubresourceStorage<T>::GetAspectsForTesting() const {
-        return mAspects;
+    // Fast path, the array layer is compressed.
+    if (LayerCompressed(aspectIndex, arrayLayer)) {
+        return Data(aspectIndex, arrayLayer);
     }
 
-    template <typename T>
-    uint32_t SubresourceStorage<T>::GetArrayLayerCountForTesting() const {
-        return mArrayLayerCount;
-    }
+    return Data(aspectIndex, arrayLayer, mipLevel);
+}
 
-    template <typename T>
-    uint32_t SubresourceStorage<T>::GetMipLevelCountForTesting() const {
-        return mMipLevelCount;
-    }
+template <typename T>
+Aspect SubresourceStorage<T>::GetAspectsForTesting() const {
+    return mAspects;
+}
 
-    template <typename T>
-    bool SubresourceStorage<T>::IsAspectCompressedForTesting(Aspect aspect) const {
-        return mAspectCompressed[GetAspectIndex(aspect)];
-    }
+template <typename T>
+uint32_t SubresourceStorage<T>::GetArrayLayerCountForTesting() const {
+    return mArrayLayerCount;
+}
 
-    template <typename T>
-    bool SubresourceStorage<T>::IsLayerCompressedForTesting(Aspect aspect, uint32_t layer) const {
-        return mAspectCompressed[GetAspectIndex(aspect)] ||
-               mLayerCompressed[GetAspectIndex(aspect) * mArrayLayerCount + layer];
-    }
+template <typename T>
+uint32_t SubresourceStorage<T>::GetMipLevelCountForTesting() const {
+    return mMipLevelCount;
+}
 
-    template <typename T>
-    void SubresourceStorage<T>::DecompressAspect(uint32_t aspectIndex) {
-        ASSERT(mAspectCompressed[aspectIndex]);
-        const T& aspectData = DataInline(aspectIndex);
-        mAspectCompressed[aspectIndex] = false;
+template <typename T>
+bool SubresourceStorage<T>::IsAspectCompressedForTesting(Aspect aspect) const {
+    return mAspectCompressed[GetAspectIndex(aspect)];
+}
 
-        // Extra allocations are only needed when aspects are decompressed. Create them lazily.
-        if (mData == nullptr) {
-            ASSERT(mLayerCompressed == nullptr);
+template <typename T>
+bool SubresourceStorage<T>::IsLayerCompressedForTesting(Aspect aspect, uint32_t layer) const {
+    return mAspectCompressed[GetAspectIndex(aspect)] ||
+           mLayerCompressed[GetAspectIndex(aspect) * mArrayLayerCount + layer];
+}
 
-            uint32_t aspectCount = GetAspectCount(mAspects);
-            mLayerCompressed = std::make_unique<bool[]>(aspectCount * mArrayLayerCount);
-            mData = std::make_unique<T[]>(aspectCount * mArrayLayerCount * mMipLevelCount);
+template <typename T>
+void SubresourceStorage<T>::DecompressAspect(uint32_t aspectIndex) {
+    ASSERT(mAspectCompressed[aspectIndex]);
+    const T& aspectData = DataInline(aspectIndex);
+    mAspectCompressed[aspectIndex] = false;
 
-            for (uint32_t layerIndex = 0; layerIndex < aspectCount * mArrayLayerCount;
-                 layerIndex++) {
-                mLayerCompressed[layerIndex] = true;
-            }
-        }
+    // Extra allocations are only needed when aspects are decompressed. Create them lazily.
+    if (mData == nullptr) {
+        ASSERT(mLayerCompressed == nullptr);
 
-        ASSERT(LayerCompressed(aspectIndex, 0));
-        for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
-            Data(aspectIndex, layer) = aspectData;
-            ASSERT(LayerCompressed(aspectIndex, layer));
+        uint32_t aspectCount = GetAspectCount(mAspects);
+        mLayerCompressed = std::make_unique<bool[]>(aspectCount * mArrayLayerCount);
+        mData = std::make_unique<T[]>(aspectCount * mArrayLayerCount * mMipLevelCount);
+
+        for (uint32_t layerIndex = 0; layerIndex < aspectCount * mArrayLayerCount; layerIndex++) {
+            mLayerCompressed[layerIndex] = true;
         }
     }
 
-    template <typename T>
-    void SubresourceStorage<T>::RecompressAspect(uint32_t aspectIndex) {
-        ASSERT(!mAspectCompressed[aspectIndex]);
-        // All layers of the aspect must be compressed for the aspect to possibly recompress.
-        for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
-            if (!LayerCompressed(aspectIndex, layer)) {
-                return;
-            }
-        }
-
-        T layer0Data = Data(aspectIndex, 0);
-        for (uint32_t layer = 1; layer < mArrayLayerCount; layer++) {
-            if (!(Data(aspectIndex, layer) == layer0Data)) {
-                return;
-            }
-        }
-
-        mAspectCompressed[aspectIndex] = true;
-        DataInline(aspectIndex) = layer0Data;
-    }
-
-    template <typename T>
-    void SubresourceStorage<T>::DecompressLayer(uint32_t aspectIndex, uint32_t layer) {
+    ASSERT(LayerCompressed(aspectIndex, 0));
+    for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
+        Data(aspectIndex, layer) = aspectData;
         ASSERT(LayerCompressed(aspectIndex, layer));
-        ASSERT(!mAspectCompressed[aspectIndex]);
-        const T& layerData = Data(aspectIndex, layer);
-        LayerCompressed(aspectIndex, layer) = false;
+    }
+}
 
-        // We assume that (aspect, layer, 0) is stored at the same place as (aspect, layer) which
-        // allows starting the iteration at level 1.
-        for (uint32_t level = 1; level < mMipLevelCount; level++) {
-            Data(aspectIndex, layer, level) = layerData;
+template <typename T>
+void SubresourceStorage<T>::RecompressAspect(uint32_t aspectIndex) {
+    ASSERT(!mAspectCompressed[aspectIndex]);
+    // All layers of the aspect must be compressed for the aspect to possibly recompress.
+    for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
+        if (!LayerCompressed(aspectIndex, layer)) {
+            return;
         }
     }
 
-    template <typename T>
-    void SubresourceStorage<T>::RecompressLayer(uint32_t aspectIndex, uint32_t layer) {
-        ASSERT(!LayerCompressed(aspectIndex, layer));
-        ASSERT(!mAspectCompressed[aspectIndex]);
-        const T& level0Data = Data(aspectIndex, layer, 0);
-
-        for (uint32_t level = 1; level < mMipLevelCount; level++) {
-            if (!(Data(aspectIndex, layer, level) == level0Data)) {
-                return;
-            }
+    T layer0Data = Data(aspectIndex, 0);
+    for (uint32_t layer = 1; layer < mArrayLayerCount; layer++) {
+        if (!(Data(aspectIndex, layer) == layer0Data)) {
+            return;
         }
-
-        LayerCompressed(aspectIndex, layer) = true;
     }
 
-    template <typename T>
-    SubresourceRange SubresourceStorage<T>::GetFullLayerRange(Aspect aspect, uint32_t layer) const {
-        return {aspect, {layer, 1}, {0, mMipLevelCount}};
+    mAspectCompressed[aspectIndex] = true;
+    DataInline(aspectIndex) = layer0Data;
+}
+
+template <typename T>
+void SubresourceStorage<T>::DecompressLayer(uint32_t aspectIndex, uint32_t layer) {
+    ASSERT(LayerCompressed(aspectIndex, layer));
+    ASSERT(!mAspectCompressed[aspectIndex]);
+    const T& layerData = Data(aspectIndex, layer);
+    LayerCompressed(aspectIndex, layer) = false;
+
+    // We assume that (aspect, layer, 0) is stored at the same place as (aspect, layer) which
+    // allows starting the iteration at level 1.
+    for (uint32_t level = 1; level < mMipLevelCount; level++) {
+        Data(aspectIndex, layer, level) = layerData;
+    }
+}
+
+template <typename T>
+void SubresourceStorage<T>::RecompressLayer(uint32_t aspectIndex, uint32_t layer) {
+    ASSERT(!LayerCompressed(aspectIndex, layer));
+    ASSERT(!mAspectCompressed[aspectIndex]);
+    const T& level0Data = Data(aspectIndex, layer, 0);
+
+    for (uint32_t level = 1; level < mMipLevelCount; level++) {
+        if (!(Data(aspectIndex, layer, level) == level0Data)) {
+            return;
+        }
     }
 
-    template <typename T>
-    bool& SubresourceStorage<T>::LayerCompressed(uint32_t aspectIndex, uint32_t layer) {
-        ASSERT(!mAspectCompressed[aspectIndex]);
-        return mLayerCompressed[aspectIndex * mArrayLayerCount + layer];
-    }
+    LayerCompressed(aspectIndex, layer) = true;
+}
 
-    template <typename T>
-    bool SubresourceStorage<T>::LayerCompressed(uint32_t aspectIndex, uint32_t layer) const {
-        ASSERT(!mAspectCompressed[aspectIndex]);
-        return mLayerCompressed[aspectIndex * mArrayLayerCount + layer];
-    }
+template <typename T>
+SubresourceRange SubresourceStorage<T>::GetFullLayerRange(Aspect aspect, uint32_t layer) const {
+    return {aspect, {layer, 1}, {0, mMipLevelCount}};
+}
 
-    template <typename T>
-    T& SubresourceStorage<T>::DataInline(uint32_t aspectIndex) {
-        ASSERT(mAspectCompressed[aspectIndex]);
-        return mInlineAspectData[aspectIndex];
-    }
-    template <typename T>
-    T& SubresourceStorage<T>::Data(uint32_t aspectIndex, uint32_t layer, uint32_t level) {
-        ASSERT(level == 0 || !LayerCompressed(aspectIndex, layer));
-        ASSERT(!mAspectCompressed[aspectIndex]);
-        return mData[(aspectIndex * mArrayLayerCount + layer) * mMipLevelCount + level];
-    }
-    template <typename T>
-    const T& SubresourceStorage<T>::DataInline(uint32_t aspectIndex) const {
-        ASSERT(mAspectCompressed[aspectIndex]);
-        return mInlineAspectData[aspectIndex];
-    }
-    template <typename T>
-    const T& SubresourceStorage<T>::Data(uint32_t aspectIndex,
-                                         uint32_t layer,
-                                         uint32_t level) const {
-        ASSERT(level == 0 || !LayerCompressed(aspectIndex, layer));
-        ASSERT(!mAspectCompressed[aspectIndex]);
-        return mData[(aspectIndex * mArrayLayerCount + layer) * mMipLevelCount + level];
-    }
+template <typename T>
+bool& SubresourceStorage<T>::LayerCompressed(uint32_t aspectIndex, uint32_t layer) {
+    ASSERT(!mAspectCompressed[aspectIndex]);
+    return mLayerCompressed[aspectIndex * mArrayLayerCount + layer];
+}
+
+template <typename T>
+bool SubresourceStorage<T>::LayerCompressed(uint32_t aspectIndex, uint32_t layer) const {
+    ASSERT(!mAspectCompressed[aspectIndex]);
+    return mLayerCompressed[aspectIndex * mArrayLayerCount + layer];
+}
+
+template <typename T>
+T& SubresourceStorage<T>::DataInline(uint32_t aspectIndex) {
+    ASSERT(mAspectCompressed[aspectIndex]);
+    return mInlineAspectData[aspectIndex];
+}
+template <typename T>
+T& SubresourceStorage<T>::Data(uint32_t aspectIndex, uint32_t layer, uint32_t level) {
+    ASSERT(level == 0 || !LayerCompressed(aspectIndex, layer));
+    ASSERT(!mAspectCompressed[aspectIndex]);
+    return mData[(aspectIndex * mArrayLayerCount + layer) * mMipLevelCount + level];
+}
+template <typename T>
+const T& SubresourceStorage<T>::DataInline(uint32_t aspectIndex) const {
+    ASSERT(mAspectCompressed[aspectIndex]);
+    return mInlineAspectData[aspectIndex];
+}
+template <typename T>
+const T& SubresourceStorage<T>::Data(uint32_t aspectIndex, uint32_t layer, uint32_t level) const {
+    ASSERT(level == 0 || !LayerCompressed(aspectIndex, layer));
+    ASSERT(!mAspectCompressed[aspectIndex]);
+    return mData[(aspectIndex * mArrayLayerCount + layer) * mMipLevelCount + level];
+}
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/Surface.cpp b/src/dawn/native/Surface.cpp
index ff6fd07..6fc1e80 100644
--- a/src/dawn/native/Surface.cpp
+++ b/src/dawn/native/Surface.cpp
@@ -20,251 +20,247 @@
 #include "dawn/native/SwapChain.h"
 
 #if defined(DAWN_PLATFORM_WINDOWS)
-#    include <windows.ui.core.h>
-#    include <windows.ui.xaml.controls.h>
+#include <windows.ui.core.h>
+#include <windows.ui.xaml.controls.h>
 #endif  // defined(DAWN_PLATFORM_WINDOWS)
 
 #if defined(DAWN_USE_X11)
-#    include "dawn/common/xlib_with_undefs.h"
+#include "dawn/common/xlib_with_undefs.h"
 #endif  // defined(DAWN_USE_X11)
 
 namespace dawn::native {
 
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        Surface::Type value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s) {
-        switch (value) {
-            case Surface::Type::AndroidWindow:
-                s->Append("AndroidWindow");
-                break;
-            case Surface::Type::MetalLayer:
-                s->Append("MetalLayer");
-                break;
-            case Surface::Type::WindowsHWND:
-                s->Append("WindowsHWND");
-                break;
-            case Surface::Type::WindowsCoreWindow:
-                s->Append("WindowsCoreWindow");
-                break;
-            case Surface::Type::WindowsSwapChainPanel:
-                s->Append("WindowsSwapChainPanel");
-                break;
-            case Surface::Type::XlibWindow:
-                s->Append("XlibWindow");
-                break;
-        }
-        return {true};
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+    Surface::Type value,
+    const absl::FormatConversionSpec& spec,
+    absl::FormatSink* s) {
+    switch (value) {
+        case Surface::Type::AndroidWindow:
+            s->Append("AndroidWindow");
+            break;
+        case Surface::Type::MetalLayer:
+            s->Append("MetalLayer");
+            break;
+        case Surface::Type::WindowsHWND:
+            s->Append("WindowsHWND");
+            break;
+        case Surface::Type::WindowsCoreWindow:
+            s->Append("WindowsCoreWindow");
+            break;
+        case Surface::Type::WindowsSwapChainPanel:
+            s->Append("WindowsSwapChainPanel");
+            break;
+        case Surface::Type::XlibWindow:
+            s->Append("XlibWindow");
+            break;
     }
+    return {true};
+}
 
 #if defined(DAWN_ENABLE_BACKEND_METAL)
-    bool InheritsFromCAMetalLayer(void* obj);
+bool InheritsFromCAMetalLayer(void* obj);
 #endif  // defined(DAWN_ENABLE_BACKEND_METAL)
 
-    MaybeError ValidateSurfaceDescriptor(const InstanceBase* instance,
-                                         const SurfaceDescriptor* descriptor) {
-        DAWN_INVALID_IF(descriptor->nextInChain == nullptr,
-                        "Surface cannot be created with %s. nextInChain is not specified.",
-                        descriptor);
+MaybeError ValidateSurfaceDescriptor(const InstanceBase* instance,
+                                     const SurfaceDescriptor* descriptor) {
+    DAWN_INVALID_IF(descriptor->nextInChain == nullptr,
+                    "Surface cannot be created with %s. nextInChain is not specified.", descriptor);
 
-        DAWN_TRY(ValidateSingleSType(descriptor->nextInChain,
-                                     wgpu::SType::SurfaceDescriptorFromAndroidNativeWindow,
-                                     wgpu::SType::SurfaceDescriptorFromMetalLayer,
-                                     wgpu::SType::SurfaceDescriptorFromWindowsHWND,
-                                     wgpu::SType::SurfaceDescriptorFromWindowsCoreWindow,
-                                     wgpu::SType::SurfaceDescriptorFromWindowsSwapChainPanel,
-                                     wgpu::SType::SurfaceDescriptorFromXlibWindow));
+    DAWN_TRY(ValidateSingleSType(
+        descriptor->nextInChain, wgpu::SType::SurfaceDescriptorFromAndroidNativeWindow,
+        wgpu::SType::SurfaceDescriptorFromMetalLayer, wgpu::SType::SurfaceDescriptorFromWindowsHWND,
+        wgpu::SType::SurfaceDescriptorFromWindowsCoreWindow,
+        wgpu::SType::SurfaceDescriptorFromWindowsSwapChainPanel,
+        wgpu::SType::SurfaceDescriptorFromXlibWindow));
 
 #if defined(DAWN_ENABLE_BACKEND_METAL)
-        const SurfaceDescriptorFromMetalLayer* metalDesc = nullptr;
-        FindInChain(descriptor->nextInChain, &metalDesc);
-        if (metalDesc) {
-            // Check that the layer is a CAMetalLayer (or a derived class).
-            DAWN_INVALID_IF(!InheritsFromCAMetalLayer(metalDesc->layer),
-                            "Layer must be a CAMetalLayer");
-            return {};
-        }
+    const SurfaceDescriptorFromMetalLayer* metalDesc = nullptr;
+    FindInChain(descriptor->nextInChain, &metalDesc);
+    if (metalDesc) {
+        // Check that the layer is a CAMetalLayer (or a derived class).
+        DAWN_INVALID_IF(!InheritsFromCAMetalLayer(metalDesc->layer),
+                        "Layer must be a CAMetalLayer");
+        return {};
+    }
 #endif  // defined(DAWN_ENABLE_BACKEND_METAL)
 
 #if defined(DAWN_PLATFORM_ANDROID)
-        const SurfaceDescriptorFromAndroidNativeWindow* androidDesc = nullptr;
-        FindInChain(descriptor->nextInChain, &androidDesc);
-        // Currently the best validation we can do since it's not possible to check if the pointer
-        // to a ANativeWindow is valid.
-        if (androidDesc) {
-            DAWN_INVALID_IF(androidDesc->window == nullptr, "Android window is not set.");
-            return {};
-        }
+    const SurfaceDescriptorFromAndroidNativeWindow* androidDesc = nullptr;
+    FindInChain(descriptor->nextInChain, &androidDesc);
+    // Currently the best validation we can do since it's not possible to check if the pointer
+    // to a ANativeWindow is valid.
+    if (androidDesc) {
+        DAWN_INVALID_IF(androidDesc->window == nullptr, "Android window is not set.");
+        return {};
+    }
 #endif  // defined(DAWN_PLATFORM_ANDROID)
 
 #if defined(DAWN_PLATFORM_WINDOWS)
-#    if defined(DAWN_PLATFORM_WIN32)
-        const SurfaceDescriptorFromWindowsHWND* hwndDesc = nullptr;
-        FindInChain(descriptor->nextInChain, &hwndDesc);
-        if (hwndDesc) {
-            DAWN_INVALID_IF(IsWindow(static_cast<HWND>(hwndDesc->hwnd)) == 0, "Invalid HWND");
-            return {};
-        }
-#    endif  // defined(DAWN_PLATFORM_WIN32)
-        const SurfaceDescriptorFromWindowsCoreWindow* coreWindowDesc = nullptr;
-        FindInChain(descriptor->nextInChain, &coreWindowDesc);
-        if (coreWindowDesc) {
-            // Validate the coreWindow by query for ICoreWindow interface
-            ComPtr<ABI::Windows::UI::Core::ICoreWindow> coreWindow;
-            DAWN_INVALID_IF(coreWindowDesc->coreWindow == nullptr ||
-                                FAILED(static_cast<IUnknown*>(coreWindowDesc->coreWindow)
-                                           ->QueryInterface(IID_PPV_ARGS(&coreWindow))),
-                            "Invalid CoreWindow");
-            return {};
-        }
-        const SurfaceDescriptorFromWindowsSwapChainPanel* swapChainPanelDesc = nullptr;
-        FindInChain(descriptor->nextInChain, &swapChainPanelDesc);
-        if (swapChainPanelDesc) {
-            // Validate the swapChainPanel by querying for ISwapChainPanel interface
-            ComPtr<ABI::Windows::UI::Xaml::Controls::ISwapChainPanel> swapChainPanel;
-            DAWN_INVALID_IF(swapChainPanelDesc->swapChainPanel == nullptr ||
-                                FAILED(static_cast<IUnknown*>(swapChainPanelDesc->swapChainPanel)
-                                           ->QueryInterface(IID_PPV_ARGS(&swapChainPanel))),
-                            "Invalid SwapChainPanel");
-            return {};
-        }
+#if defined(DAWN_PLATFORM_WIN32)
+    const SurfaceDescriptorFromWindowsHWND* hwndDesc = nullptr;
+    FindInChain(descriptor->nextInChain, &hwndDesc);
+    if (hwndDesc) {
+        DAWN_INVALID_IF(IsWindow(static_cast<HWND>(hwndDesc->hwnd)) == 0, "Invalid HWND");
+        return {};
+    }
+#endif  // defined(DAWN_PLATFORM_WIN32)
+    const SurfaceDescriptorFromWindowsCoreWindow* coreWindowDesc = nullptr;
+    FindInChain(descriptor->nextInChain, &coreWindowDesc);
+    if (coreWindowDesc) {
+        // Validate the coreWindow by query for ICoreWindow interface
+        ComPtr<ABI::Windows::UI::Core::ICoreWindow> coreWindow;
+        DAWN_INVALID_IF(coreWindowDesc->coreWindow == nullptr ||
+                            FAILED(static_cast<IUnknown*>(coreWindowDesc->coreWindow)
+                                       ->QueryInterface(IID_PPV_ARGS(&coreWindow))),
+                        "Invalid CoreWindow");
+        return {};
+    }
+    const SurfaceDescriptorFromWindowsSwapChainPanel* swapChainPanelDesc = nullptr;
+    FindInChain(descriptor->nextInChain, &swapChainPanelDesc);
+    if (swapChainPanelDesc) {
+        // Validate the swapChainPanel by querying for ISwapChainPanel interface
+        ComPtr<ABI::Windows::UI::Xaml::Controls::ISwapChainPanel> swapChainPanel;
+        DAWN_INVALID_IF(swapChainPanelDesc->swapChainPanel == nullptr ||
+                            FAILED(static_cast<IUnknown*>(swapChainPanelDesc->swapChainPanel)
+                                       ->QueryInterface(IID_PPV_ARGS(&swapChainPanel))),
+                        "Invalid SwapChainPanel");
+        return {};
+    }
 #endif  // defined(DAWN_PLATFORM_WINDOWS)
 
 #if defined(DAWN_USE_X11)
-        const SurfaceDescriptorFromXlibWindow* xDesc = nullptr;
-        FindInChain(descriptor->nextInChain, &xDesc);
-        if (xDesc) {
-            // Check the validity of the window by calling a getter function on the window that
-            // returns a status code. If the window is bad the call return a status of zero. We
-            // need to set a temporary X11 error handler while doing this because the default
-            // X11 error handler exits the program on any error.
-            XErrorHandler oldErrorHandler =
-                XSetErrorHandler([](Display*, XErrorEvent*) { return 0; });
-            XWindowAttributes attributes;
-            int status = XGetWindowAttributes(reinterpret_cast<Display*>(xDesc->display),
-                                              xDesc->window, &attributes);
-            XSetErrorHandler(oldErrorHandler);
+    const SurfaceDescriptorFromXlibWindow* xDesc = nullptr;
+    FindInChain(descriptor->nextInChain, &xDesc);
+    if (xDesc) {
+        // Check the validity of the window by calling a getter function on the window that
+        // returns a status code. If the window is bad the call return a status of zero. We
+        // need to set a temporary X11 error handler while doing this because the default
+        // X11 error handler exits the program on any error.
+        XErrorHandler oldErrorHandler = XSetErrorHandler([](Display*, XErrorEvent*) { return 0; });
+        XWindowAttributes attributes;
+        int status = XGetWindowAttributes(reinterpret_cast<Display*>(xDesc->display), xDesc->window,
+                                          &attributes);
+        XSetErrorHandler(oldErrorHandler);
 
-            DAWN_INVALID_IF(status == 0, "Invalid X Window");
-            return {};
-        }
+        DAWN_INVALID_IF(status == 0, "Invalid X Window");
+        return {};
+    }
 #endif  // defined(DAWN_USE_X11)
 
-        return DAWN_FORMAT_VALIDATION_ERROR("Unsupported sType (%s)",
-                                            descriptor->nextInChain->sType);
-    }
+    return DAWN_FORMAT_VALIDATION_ERROR("Unsupported sType (%s)", descriptor->nextInChain->sType);
+}
 
-    Surface::Surface(InstanceBase* instance, const SurfaceDescriptor* descriptor)
-        : mInstance(instance) {
-        ASSERT(descriptor->nextInChain != nullptr);
-        const SurfaceDescriptorFromAndroidNativeWindow* androidDesc = nullptr;
-        const SurfaceDescriptorFromMetalLayer* metalDesc = nullptr;
-        const SurfaceDescriptorFromWindowsHWND* hwndDesc = nullptr;
-        const SurfaceDescriptorFromWindowsCoreWindow* coreWindowDesc = nullptr;
-        const SurfaceDescriptorFromWindowsSwapChainPanel* swapChainPanelDesc = nullptr;
-        const SurfaceDescriptorFromXlibWindow* xDesc = nullptr;
-        FindInChain(descriptor->nextInChain, &androidDesc);
-        FindInChain(descriptor->nextInChain, &metalDesc);
-        FindInChain(descriptor->nextInChain, &hwndDesc);
-        FindInChain(descriptor->nextInChain, &coreWindowDesc);
-        FindInChain(descriptor->nextInChain, &swapChainPanelDesc);
-        FindInChain(descriptor->nextInChain, &xDesc);
-        if (metalDesc) {
-            mType = Type::MetalLayer;
-            mMetalLayer = metalDesc->layer;
-        } else if (androidDesc) {
-            mType = Type::AndroidWindow;
-            mAndroidNativeWindow = androidDesc->window;
-        } else if (hwndDesc) {
-            mType = Type::WindowsHWND;
-            mHInstance = hwndDesc->hinstance;
-            mHWND = hwndDesc->hwnd;
-        } else if (coreWindowDesc) {
+Surface::Surface(InstanceBase* instance, const SurfaceDescriptor* descriptor)
+    : mInstance(instance) {
+    ASSERT(descriptor->nextInChain != nullptr);
+    const SurfaceDescriptorFromAndroidNativeWindow* androidDesc = nullptr;
+    const SurfaceDescriptorFromMetalLayer* metalDesc = nullptr;
+    const SurfaceDescriptorFromWindowsHWND* hwndDesc = nullptr;
+    const SurfaceDescriptorFromWindowsCoreWindow* coreWindowDesc = nullptr;
+    const SurfaceDescriptorFromWindowsSwapChainPanel* swapChainPanelDesc = nullptr;
+    const SurfaceDescriptorFromXlibWindow* xDesc = nullptr;
+    FindInChain(descriptor->nextInChain, &androidDesc);
+    FindInChain(descriptor->nextInChain, &metalDesc);
+    FindInChain(descriptor->nextInChain, &hwndDesc);
+    FindInChain(descriptor->nextInChain, &coreWindowDesc);
+    FindInChain(descriptor->nextInChain, &swapChainPanelDesc);
+    FindInChain(descriptor->nextInChain, &xDesc);
+    if (metalDesc) {
+        mType = Type::MetalLayer;
+        mMetalLayer = metalDesc->layer;
+    } else if (androidDesc) {
+        mType = Type::AndroidWindow;
+        mAndroidNativeWindow = androidDesc->window;
+    } else if (hwndDesc) {
+        mType = Type::WindowsHWND;
+        mHInstance = hwndDesc->hinstance;
+        mHWND = hwndDesc->hwnd;
+    } else if (coreWindowDesc) {
 #if defined(DAWN_PLATFORM_WINDOWS)
-            mType = Type::WindowsCoreWindow;
-            mCoreWindow = static_cast<IUnknown*>(coreWindowDesc->coreWindow);
+        mType = Type::WindowsCoreWindow;
+        mCoreWindow = static_cast<IUnknown*>(coreWindowDesc->coreWindow);
 #endif  // defined(DAWN_PLATFORM_WINDOWS)
-        } else if (swapChainPanelDesc) {
+    } else if (swapChainPanelDesc) {
 #if defined(DAWN_PLATFORM_WINDOWS)
-            mType = Type::WindowsSwapChainPanel;
-            mSwapChainPanel = static_cast<IUnknown*>(swapChainPanelDesc->swapChainPanel);
+        mType = Type::WindowsSwapChainPanel;
+        mSwapChainPanel = static_cast<IUnknown*>(swapChainPanelDesc->swapChainPanel);
 #endif  // defined(DAWN_PLATFORM_WINDOWS)
-        } else if (xDesc) {
-            mType = Type::XlibWindow;
-            mXDisplay = xDesc->display;
-            mXWindow = xDesc->window;
-        } else {
-            UNREACHABLE();
-        }
+    } else if (xDesc) {
+        mType = Type::XlibWindow;
+        mXDisplay = xDesc->display;
+        mXWindow = xDesc->window;
+    } else {
+        UNREACHABLE();
     }
+}
 
-    Surface::~Surface() {
-        if (mSwapChain != nullptr) {
-            mSwapChain->DetachFromSurface();
-            mSwapChain = nullptr;
-        }
+Surface::~Surface() {
+    if (mSwapChain != nullptr) {
+        mSwapChain->DetachFromSurface();
+        mSwapChain = nullptr;
     }
+}
 
-    NewSwapChainBase* Surface::GetAttachedSwapChain() {
-        return mSwapChain.Get();
-    }
+NewSwapChainBase* Surface::GetAttachedSwapChain() {
+    return mSwapChain.Get();
+}
 
-    void Surface::SetAttachedSwapChain(NewSwapChainBase* swapChain) {
-        mSwapChain = swapChain;
-    }
+void Surface::SetAttachedSwapChain(NewSwapChainBase* swapChain) {
+    mSwapChain = swapChain;
+}
 
-    InstanceBase* Surface::GetInstance() {
-        return mInstance.Get();
-    }
+InstanceBase* Surface::GetInstance() {
+    return mInstance.Get();
+}
 
-    Surface::Type Surface::GetType() const {
-        return mType;
-    }
+Surface::Type Surface::GetType() const {
+    return mType;
+}
 
-    void* Surface::GetAndroidNativeWindow() const {
-        ASSERT(mType == Type::AndroidWindow);
-        return mAndroidNativeWindow;
-    }
+void* Surface::GetAndroidNativeWindow() const {
+    ASSERT(mType == Type::AndroidWindow);
+    return mAndroidNativeWindow;
+}
 
-    void* Surface::GetMetalLayer() const {
-        ASSERT(mType == Type::MetalLayer);
-        return mMetalLayer;
-    }
+void* Surface::GetMetalLayer() const {
+    ASSERT(mType == Type::MetalLayer);
+    return mMetalLayer;
+}
 
-    void* Surface::GetHInstance() const {
-        ASSERT(mType == Type::WindowsHWND);
-        return mHInstance;
-    }
-    void* Surface::GetHWND() const {
-        ASSERT(mType == Type::WindowsHWND);
-        return mHWND;
-    }
+void* Surface::GetHInstance() const {
+    ASSERT(mType == Type::WindowsHWND);
+    return mHInstance;
+}
+void* Surface::GetHWND() const {
+    ASSERT(mType == Type::WindowsHWND);
+    return mHWND;
+}
 
-    IUnknown* Surface::GetCoreWindow() const {
-        ASSERT(mType == Type::WindowsCoreWindow);
+IUnknown* Surface::GetCoreWindow() const {
+    ASSERT(mType == Type::WindowsCoreWindow);
 #if defined(DAWN_PLATFORM_WINDOWS)
-        return mCoreWindow.Get();
+    return mCoreWindow.Get();
 #else
-        return nullptr;
+    return nullptr;
 #endif
-    }
+}
 
-    IUnknown* Surface::GetSwapChainPanel() const {
-        ASSERT(mType == Type::WindowsSwapChainPanel);
+IUnknown* Surface::GetSwapChainPanel() const {
+    ASSERT(mType == Type::WindowsSwapChainPanel);
 #if defined(DAWN_PLATFORM_WINDOWS)
-        return mSwapChainPanel.Get();
+    return mSwapChainPanel.Get();
 #else
-        return nullptr;
+    return nullptr;
 #endif
-    }
+}
 
-    void* Surface::GetXDisplay() const {
-        ASSERT(mType == Type::XlibWindow);
-        return mXDisplay;
-    }
-    uint32_t Surface::GetXWindow() const {
-        ASSERT(mType == Type::XlibWindow);
-        return mXWindow;
-    }
+void* Surface::GetXDisplay() const {
+    ASSERT(mType == Type::XlibWindow);
+    return mXDisplay;
+}
+uint32_t Surface::GetXWindow() const {
+    ASSERT(mType == Type::XlibWindow);
+    return mXWindow;
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/Surface.h b/src/dawn/native/Surface.h
index c66253a..b3430f8 100644
--- a/src/dawn/native/Surface.h
+++ b/src/dawn/native/Surface.h
@@ -24,7 +24,7 @@
 #include "dawn/common/Platform.h"
 
 #if defined(DAWN_PLATFORM_WINDOWS)
-#    include "dawn/native/d3d12/d3d12_platform.h"
+#include "dawn/native/d3d12/d3d12_platform.h"
 #endif  // defined(DAWN_PLATFORM_WINDOWS)
 
 // Forward declare IUnknown
@@ -34,90 +34,88 @@
 
 namespace dawn::native {
 
-    MaybeError ValidateSurfaceDescriptor(const InstanceBase* instance,
-                                         const SurfaceDescriptor* descriptor);
+MaybeError ValidateSurfaceDescriptor(const InstanceBase* instance,
+                                     const SurfaceDescriptor* descriptor);
 
-    // A surface is a sum types of all the kind of windows Dawn supports. The OS-specific types
-    // aren't used because they would cause compilation errors on other OSes (or require
-    // ObjectiveC).
-    // The surface is also used to store the current swapchain so that we can detach it when it is
-    // replaced.
-    class Surface final : public RefCounted {
-      public:
-        Surface(InstanceBase* instance, const SurfaceDescriptor* descriptor);
+// A surface is a sum types of all the kind of windows Dawn supports. The OS-specific types
+// aren't used because they would cause compilation errors on other OSes (or require
+// ObjectiveC).
+// The surface is also used to store the current swapchain so that we can detach it when it is
+// replaced.
+class Surface final : public RefCounted {
+  public:
+    Surface(InstanceBase* instance, const SurfaceDescriptor* descriptor);
 
-        void SetAttachedSwapChain(NewSwapChainBase* swapChain);
-        NewSwapChainBase* GetAttachedSwapChain();
+    void SetAttachedSwapChain(NewSwapChainBase* swapChain);
+    NewSwapChainBase* GetAttachedSwapChain();
 
-        // These are valid to call on all Surfaces.
-        enum class Type {
-            AndroidWindow,
-            MetalLayer,
-            WindowsHWND,
-            WindowsCoreWindow,
-            WindowsSwapChainPanel,
-            XlibWindow,
-        };
-        Type GetType() const;
-        InstanceBase* GetInstance();
+    // These are valid to call on all Surfaces.
+    enum class Type {
+        AndroidWindow,
+        MetalLayer,
+        WindowsHWND,
+        WindowsCoreWindow,
+        WindowsSwapChainPanel,
+        XlibWindow,
+    };
+    Type GetType() const;
+    InstanceBase* GetInstance();
 
-        // Valid to call if the type is MetalLayer
-        void* GetMetalLayer() const;
+    // Valid to call if the type is MetalLayer
+    void* GetMetalLayer() const;
 
-        // Valid to call if the type is Android
-        void* GetAndroidNativeWindow() const;
+    // Valid to call if the type is Android
+    void* GetAndroidNativeWindow() const;
 
-        // Valid to call if the type is WindowsHWND
-        void* GetHInstance() const;
-        void* GetHWND() const;
+    // Valid to call if the type is WindowsHWND
+    void* GetHInstance() const;
+    void* GetHWND() const;
 
-        // Valid to call if the type is WindowsCoreWindow
-        IUnknown* GetCoreWindow() const;
+    // Valid to call if the type is WindowsCoreWindow
+    IUnknown* GetCoreWindow() const;
 
-        // Valid to call if the type is WindowsSwapChainPanel
-        IUnknown* GetSwapChainPanel() const;
+    // Valid to call if the type is WindowsSwapChainPanel
+    IUnknown* GetSwapChainPanel() const;
 
-        // Valid to call if the type is WindowsXlib
-        void* GetXDisplay() const;
-        uint32_t GetXWindow() const;
+    // Valid to call if the type is WindowsXlib
+    void* GetXDisplay() const;
+    uint32_t GetXWindow() const;
 
-      private:
-        ~Surface() override;
+  private:
+    ~Surface() override;
 
-        Ref<InstanceBase> mInstance;
-        Type mType;
+    Ref<InstanceBase> mInstance;
+    Type mType;
 
-        // The swapchain will set this to null when it is destroyed.
-        Ref<NewSwapChainBase> mSwapChain;
+    // The swapchain will set this to null when it is destroyed.
+    Ref<NewSwapChainBase> mSwapChain;
 
-        // MetalLayer
-        void* mMetalLayer = nullptr;
+    // MetalLayer
+    void* mMetalLayer = nullptr;
 
-        // ANativeWindow
-        void* mAndroidNativeWindow = nullptr;
+    // ANativeWindow
+    void* mAndroidNativeWindow = nullptr;
 
-        // WindowsHwnd
-        void* mHInstance = nullptr;
-        void* mHWND = nullptr;
+    // WindowsHwnd
+    void* mHInstance = nullptr;
+    void* mHWND = nullptr;
 
 #if defined(DAWN_PLATFORM_WINDOWS)
-        // WindowsCoreWindow
-        ComPtr<IUnknown> mCoreWindow;
+    // WindowsCoreWindow
+    ComPtr<IUnknown> mCoreWindow;
 
-        // WindowsSwapChainPanel
-        ComPtr<IUnknown> mSwapChainPanel;
+    // WindowsSwapChainPanel
+    ComPtr<IUnknown> mSwapChainPanel;
 #endif  // defined(DAWN_PLATFORM_WINDOWS)
 
-        // Xlib
-        void* mXDisplay = nullptr;
-        uint32_t mXWindow = 0;
-    };
+    // Xlib
+    void* mXDisplay = nullptr;
+    uint32_t mXWindow = 0;
+};
 
-    // Not defined in webgpu_absl_format.h/cpp because you can't forward-declare a nested type.
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        Surface::Type value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s);
+// Not defined in webgpu_absl_format.h/cpp because you can't forward-declare a nested type.
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString>
+AbslFormatConvert(Surface::Type value, const absl::FormatConversionSpec& spec, absl::FormatSink* s);
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/Surface_metal.mm b/src/dawn/native/Surface_metal.mm
index ecb5d88..c4f9792 100644
--- a/src/dawn/native/Surface_metal.mm
+++ b/src/dawn/native/Surface_metal.mm
@@ -15,16 +15,16 @@
 // Contains a helper function for Surface.cpp that needs to be written in ObjectiveC.
 
 #if !defined(DAWN_ENABLE_BACKEND_METAL)
-#    error "Surface_metal.mm requires the Metal backend to be enabled."
+#error "Surface_metal.mm requires the Metal backend to be enabled."
 #endif  // !defined(DAWN_ENABLE_BACKEND_METAL)
 
 #import <QuartzCore/CAMetalLayer.h>
 
 namespace dawn::native {
 
-    bool InheritsFromCAMetalLayer(void* obj) {
-        id<NSObject> object = static_cast<id>(obj);
-        return [object isKindOfClass:[CAMetalLayer class]];
-    }
+bool InheritsFromCAMetalLayer(void* obj) {
+    id<NSObject> object = static_cast<id>(obj);
+    return [object isKindOfClass:[CAMetalLayer class]];
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/SwapChain.cpp b/src/dawn/native/SwapChain.cpp
index a2e735a..16fedc4 100644
--- a/src/dawn/native/SwapChain.cpp
+++ b/src/dawn/native/SwapChain.cpp
@@ -24,400 +24,386 @@
 
 namespace dawn::native {
 
-    namespace {
+namespace {
 
-        class ErrorSwapChain final : public SwapChainBase {
-          public:
-            explicit ErrorSwapChain(DeviceBase* device)
-                : SwapChainBase(device, ObjectBase::kError) {
-            }
+class ErrorSwapChain final : public SwapChainBase {
+  public:
+    explicit ErrorSwapChain(DeviceBase* device) : SwapChainBase(device, ObjectBase::kError) {}
 
-          private:
-            void APIConfigure(wgpu::TextureFormat format,
-                              wgpu::TextureUsage allowedUsage,
-                              uint32_t width,
-                              uint32_t height) override {
-                GetDevice()->ConsumedError(
-                    DAWN_FORMAT_VALIDATION_ERROR("%s is an error swapchain.", this));
-            }
+  private:
+    void APIConfigure(wgpu::TextureFormat format,
+                      wgpu::TextureUsage allowedUsage,
+                      uint32_t width,
+                      uint32_t height) override {
+        GetDevice()->ConsumedError(DAWN_FORMAT_VALIDATION_ERROR("%s is an error swapchain.", this));
+    }
 
-            TextureViewBase* APIGetCurrentTextureView() override {
-                GetDevice()->ConsumedError(
-                    DAWN_FORMAT_VALIDATION_ERROR("%s is an error swapchain.", this));
-                return TextureViewBase::MakeError(GetDevice());
-            }
+    TextureViewBase* APIGetCurrentTextureView() override {
+        GetDevice()->ConsumedError(DAWN_FORMAT_VALIDATION_ERROR("%s is an error swapchain.", this));
+        return TextureViewBase::MakeError(GetDevice());
+    }
 
-            void APIPresent() override {
-                GetDevice()->ConsumedError(
-                    DAWN_FORMAT_VALIDATION_ERROR("%s is an error swapchain.", this));
-            }
-        };
+    void APIPresent() override {
+        GetDevice()->ConsumedError(DAWN_FORMAT_VALIDATION_ERROR("%s is an error swapchain.", this));
+    }
+};
 
-    }  // anonymous namespace
+}  // anonymous namespace
 
-    MaybeError ValidateSwapChainDescriptor(const DeviceBase* device,
-                                           const Surface* surface,
-                                           const SwapChainDescriptor* descriptor) {
-        if (descriptor->implementation != 0) {
-            DAWN_INVALID_IF(surface != nullptr,
-                            "Exactly one of surface or implementation must be set");
+MaybeError ValidateSwapChainDescriptor(const DeviceBase* device,
+                                       const Surface* surface,
+                                       const SwapChainDescriptor* descriptor) {
+    if (descriptor->implementation != 0) {
+        DAWN_INVALID_IF(surface != nullptr, "Exactly one of surface or implementation must be set");
 
-            DawnSwapChainImplementation* impl =
-                reinterpret_cast<DawnSwapChainImplementation*>(descriptor->implementation);
+        DawnSwapChainImplementation* impl =
+            reinterpret_cast<DawnSwapChainImplementation*>(descriptor->implementation);
 
-            DAWN_INVALID_IF(!impl->Init || !impl->Destroy || !impl->Configure ||
-                                !impl->GetNextTexture || !impl->Present,
-                            "Implementation is incomplete");
+        DAWN_INVALID_IF(!impl->Init || !impl->Destroy || !impl->Configure ||
+                            !impl->GetNextTexture || !impl->Present,
+                        "Implementation is incomplete");
 
-        } else {
-            DAWN_INVALID_IF(surface == nullptr,
-                            "At least one of surface or implementation must be set");
+    } else {
+        DAWN_INVALID_IF(surface == nullptr,
+                        "At least one of surface or implementation must be set");
 
-            DAWN_TRY(ValidatePresentMode(descriptor->presentMode));
+        DAWN_TRY(ValidatePresentMode(descriptor->presentMode));
 
 // TODO(crbug.com/dawn/160): Lift this restriction once wgpu::Instance::GetPreferredSurfaceFormat is
 // implemented.
 // TODO(dawn:286):
 #if defined(DAWN_PLATFORM_ANDROID)
-            constexpr wgpu::TextureFormat kRequireSwapChainFormat = wgpu::TextureFormat::RGBA8Unorm;
+        constexpr wgpu::TextureFormat kRequireSwapChainFormat = wgpu::TextureFormat::RGBA8Unorm;
 #else
-            constexpr wgpu::TextureFormat kRequireSwapChainFormat = wgpu::TextureFormat::BGRA8Unorm;
+        constexpr wgpu::TextureFormat kRequireSwapChainFormat = wgpu::TextureFormat::BGRA8Unorm;
 #endif  // !defined(DAWN_PLATFORM_ANDROID)
-            DAWN_INVALID_IF(descriptor->format != kRequireSwapChainFormat,
-                            "Format (%s) is not %s, which is (currently) the only accepted format.",
-                            descriptor->format, kRequireSwapChainFormat);
+        DAWN_INVALID_IF(descriptor->format != kRequireSwapChainFormat,
+                        "Format (%s) is not %s, which is (currently) the only accepted format.",
+                        descriptor->format, kRequireSwapChainFormat);
 
-            DAWN_INVALID_IF(descriptor->usage != wgpu::TextureUsage::RenderAttachment,
-                            "Usage (%s) is not %s, which is (currently) the only accepted usage.",
-                            descriptor->usage, wgpu::TextureUsage::RenderAttachment);
+        DAWN_INVALID_IF(descriptor->usage != wgpu::TextureUsage::RenderAttachment,
+                        "Usage (%s) is not %s, which is (currently) the only accepted usage.",
+                        descriptor->usage, wgpu::TextureUsage::RenderAttachment);
 
-            DAWN_INVALID_IF(descriptor->width == 0 || descriptor->height == 0,
-                            "Swap Chain size (width: %u, height: %u) is empty.", descriptor->width,
-                            descriptor->height);
+        DAWN_INVALID_IF(descriptor->width == 0 || descriptor->height == 0,
+                        "Swap Chain size (width: %u, height: %u) is empty.", descriptor->width,
+                        descriptor->height);
 
-            DAWN_INVALID_IF(
-                descriptor->width > device->GetLimits().v1.maxTextureDimension2D ||
-                    descriptor->height > device->GetLimits().v1.maxTextureDimension2D,
-                "Swap Chain size (width: %u, height: %u) is greater than the maximum 2D texture "
-                "size (width: %u, height: %u).",
-                descriptor->width, descriptor->height, device->GetLimits().v1.maxTextureDimension2D,
-                device->GetLimits().v1.maxTextureDimension2D);
-        }
-
-        return {};
+        DAWN_INVALID_IF(
+            descriptor->width > device->GetLimits().v1.maxTextureDimension2D ||
+                descriptor->height > device->GetLimits().v1.maxTextureDimension2D,
+            "Swap Chain size (width: %u, height: %u) is greater than the maximum 2D texture "
+            "size (width: %u, height: %u).",
+            descriptor->width, descriptor->height, device->GetLimits().v1.maxTextureDimension2D,
+            device->GetLimits().v1.maxTextureDimension2D);
     }
 
-    TextureDescriptor GetSwapChainBaseTextureDescriptor(NewSwapChainBase* swapChain) {
-        TextureDescriptor desc;
-        desc.usage = swapChain->GetUsage();
-        desc.dimension = wgpu::TextureDimension::e2D;
-        desc.size = {swapChain->GetWidth(), swapChain->GetHeight(), 1};
-        desc.format = swapChain->GetFormat();
-        desc.mipLevelCount = 1;
-        desc.sampleCount = 1;
+    return {};
+}
 
-        return desc;
+TextureDescriptor GetSwapChainBaseTextureDescriptor(NewSwapChainBase* swapChain) {
+    TextureDescriptor desc;
+    desc.usage = swapChain->GetUsage();
+    desc.dimension = wgpu::TextureDimension::e2D;
+    desc.size = {swapChain->GetWidth(), swapChain->GetHeight(), 1};
+    desc.format = swapChain->GetFormat();
+    desc.mipLevelCount = 1;
+    desc.sampleCount = 1;
+
+    return desc;
+}
+
+// SwapChainBase
+
+SwapChainBase::SwapChainBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
+    TrackInDevice();
+}
+
+SwapChainBase::SwapChainBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+    : ApiObjectBase(device, tag) {}
+
+SwapChainBase::~SwapChainBase() {}
+
+void SwapChainBase::DestroyImpl() {}
+
+// static
+SwapChainBase* SwapChainBase::MakeError(DeviceBase* device) {
+    return new ErrorSwapChain(device);
+}
+
+ObjectType SwapChainBase::GetType() const {
+    return ObjectType::SwapChain;
+}
+
+// OldSwapChainBase
+
+OldSwapChainBase::OldSwapChainBase(DeviceBase* device, const SwapChainDescriptor* descriptor)
+    : SwapChainBase(device),
+      mImplementation(*reinterpret_cast<DawnSwapChainImplementation*>(descriptor->implementation)) {
+}
+
+OldSwapChainBase::~OldSwapChainBase() {
+    if (!IsError()) {
+        const auto& im = GetImplementation();
+        im.Destroy(im.userData);
     }
+}
 
-    // SwapChainBase
-
-    SwapChainBase::SwapChainBase(DeviceBase* device) : ApiObjectBase(device, kLabelNotImplemented) {
-        TrackInDevice();
+void OldSwapChainBase::APIConfigure(wgpu::TextureFormat format,
+                                    wgpu::TextureUsage allowedUsage,
+                                    uint32_t width,
+                                    uint32_t height) {
+    if (GetDevice()->ConsumedError(ValidateConfigure(format, allowedUsage, width, height))) {
+        return;
     }
+    ASSERT(!IsError());
 
-    SwapChainBase::SwapChainBase(DeviceBase* device, ObjectBase::ErrorTag tag)
-        : ApiObjectBase(device, tag) {
+    allowedUsage |= wgpu::TextureUsage::Present;
+
+    mFormat = format;
+    mAllowedUsage = allowedUsage;
+    mWidth = width;
+    mHeight = height;
+    mImplementation.Configure(mImplementation.userData, static_cast<WGPUTextureFormat>(format),
+                              static_cast<WGPUTextureUsage>(allowedUsage), width, height);
+}
+
+TextureViewBase* OldSwapChainBase::APIGetCurrentTextureView() {
+    if (GetDevice()->ConsumedError(ValidateGetCurrentTextureView())) {
+        return TextureViewBase::MakeError(GetDevice());
     }
+    ASSERT(!IsError());
 
-    SwapChainBase::~SwapChainBase() {
-    }
-
-    void SwapChainBase::DestroyImpl() {
-    }
-
-    // static
-    SwapChainBase* SwapChainBase::MakeError(DeviceBase* device) {
-        return new ErrorSwapChain(device);
-    }
-
-    ObjectType SwapChainBase::GetType() const {
-        return ObjectType::SwapChain;
-    }
-
-    // OldSwapChainBase
-
-    OldSwapChainBase::OldSwapChainBase(DeviceBase* device, const SwapChainDescriptor* descriptor)
-        : SwapChainBase(device),
-          mImplementation(
-              *reinterpret_cast<DawnSwapChainImplementation*>(descriptor->implementation)) {
-    }
-
-    OldSwapChainBase::~OldSwapChainBase() {
-        if (!IsError()) {
-            const auto& im = GetImplementation();
-            im.Destroy(im.userData);
-        }
-    }
-
-    void OldSwapChainBase::APIConfigure(wgpu::TextureFormat format,
-                                        wgpu::TextureUsage allowedUsage,
-                                        uint32_t width,
-                                        uint32_t height) {
-        if (GetDevice()->ConsumedError(ValidateConfigure(format, allowedUsage, width, height))) {
-            return;
-        }
-        ASSERT(!IsError());
-
-        allowedUsage |= wgpu::TextureUsage::Present;
-
-        mFormat = format;
-        mAllowedUsage = allowedUsage;
-        mWidth = width;
-        mHeight = height;
-        mImplementation.Configure(mImplementation.userData, static_cast<WGPUTextureFormat>(format),
-                                  static_cast<WGPUTextureUsage>(allowedUsage), width, height);
-    }
-
-    TextureViewBase* OldSwapChainBase::APIGetCurrentTextureView() {
-        if (GetDevice()->ConsumedError(ValidateGetCurrentTextureView())) {
-            return TextureViewBase::MakeError(GetDevice());
-        }
-        ASSERT(!IsError());
-
-        // Return the same current texture view until Present is called.
-        if (mCurrentTextureView != nullptr) {
-            // Calling GetCurrentTextureView always returns a new reference so add it even when
-            // reuse the existing texture view.
-            mCurrentTextureView->Reference();
-            return mCurrentTextureView.Get();
-        }
-
-        // Create the backing texture and the view.
-        TextureDescriptor descriptor;
-        descriptor.dimension = wgpu::TextureDimension::e2D;
-        descriptor.size.width = mWidth;
-        descriptor.size.height = mHeight;
-        descriptor.size.depthOrArrayLayers = 1;
-        descriptor.sampleCount = 1;
-        descriptor.format = mFormat;
-        descriptor.mipLevelCount = 1;
-        descriptor.usage = mAllowedUsage;
-
-        // Get the texture but remove the external refcount because it is never passed outside
-        // of dawn_native
-        mCurrentTexture = AcquireRef(GetNextTextureImpl(&descriptor));
-
-        mCurrentTextureView = mCurrentTexture->APICreateView();
+    // Return the same current texture view until Present is called.
+    if (mCurrentTextureView != nullptr) {
+        // Calling GetCurrentTextureView always returns a new reference so add it even when
+        // reuse the existing texture view.
+        mCurrentTextureView->Reference();
         return mCurrentTextureView.Get();
     }
 
-    void OldSwapChainBase::APIPresent() {
-        if (GetDevice()->ConsumedError(ValidatePresent())) {
-            return;
-        }
-        ASSERT(!IsError());
+    // Create the backing texture and the view.
+    TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.size.width = mWidth;
+    descriptor.size.height = mHeight;
+    descriptor.size.depthOrArrayLayers = 1;
+    descriptor.sampleCount = 1;
+    descriptor.format = mFormat;
+    descriptor.mipLevelCount = 1;
+    descriptor.usage = mAllowedUsage;
 
-        if (GetDevice()->ConsumedError(OnBeforePresent(mCurrentTextureView.Get()))) {
-            return;
-        }
+    // Get the texture but remove the external refcount because it is never passed outside
+    // of dawn_native
+    mCurrentTexture = AcquireRef(GetNextTextureImpl(&descriptor));
 
-        mImplementation.Present(mImplementation.userData);
+    mCurrentTextureView = mCurrentTexture->APICreateView();
+    return mCurrentTextureView.Get();
+}
 
-        mCurrentTexture = nullptr;
-        mCurrentTextureView = nullptr;
+void OldSwapChainBase::APIPresent() {
+    if (GetDevice()->ConsumedError(ValidatePresent())) {
+        return;
+    }
+    ASSERT(!IsError());
+
+    if (GetDevice()->ConsumedError(OnBeforePresent(mCurrentTextureView.Get()))) {
+        return;
     }
 
-    const DawnSwapChainImplementation& OldSwapChainBase::GetImplementation() {
-        ASSERT(!IsError());
-        return mImplementation;
+    mImplementation.Present(mImplementation.userData);
+
+    mCurrentTexture = nullptr;
+    mCurrentTextureView = nullptr;
+}
+
+const DawnSwapChainImplementation& OldSwapChainBase::GetImplementation() {
+    ASSERT(!IsError());
+    return mImplementation;
+}
+
+MaybeError OldSwapChainBase::ValidateConfigure(wgpu::TextureFormat format,
+                                               wgpu::TextureUsage allowedUsage,
+                                               uint32_t width,
+                                               uint32_t height) const {
+    DAWN_TRY(GetDevice()->ValidateIsAlive());
+    DAWN_TRY(GetDevice()->ValidateObject(this));
+
+    DAWN_TRY(ValidateTextureUsage(allowedUsage));
+    DAWN_TRY(ValidateTextureFormat(format));
+
+    DAWN_INVALID_IF(width == 0 || height == 0,
+                    "Configuration size (width: %u, height: %u) for %s is empty.", width, height,
+                    this);
+
+    return {};
+}
+
+MaybeError OldSwapChainBase::ValidateGetCurrentTextureView() const {
+    DAWN_TRY(GetDevice()->ValidateIsAlive());
+    DAWN_TRY(GetDevice()->ValidateObject(this));
+
+    // If width is 0, it implies swap chain has never been configured
+    DAWN_INVALID_IF(mWidth == 0, "%s was not configured prior to calling GetNextTexture.", this);
+
+    return {};
+}
+
+MaybeError OldSwapChainBase::ValidatePresent() const {
+    DAWN_TRY(GetDevice()->ValidateIsAlive());
+    DAWN_TRY(GetDevice()->ValidateObject(this));
+
+    DAWN_INVALID_IF(
+        mCurrentTextureView == nullptr,
+        "GetCurrentTextureView was not called on %s this frame prior to calling Present.", this);
+
+    return {};
+}
+
+// Implementation of NewSwapChainBase
+
+NewSwapChainBase::NewSwapChainBase(DeviceBase* device,
+                                   Surface* surface,
+                                   const SwapChainDescriptor* descriptor)
+    : SwapChainBase(device),
+      mAttached(false),
+      mWidth(descriptor->width),
+      mHeight(descriptor->height),
+      mFormat(descriptor->format),
+      mUsage(descriptor->usage),
+      mPresentMode(descriptor->presentMode),
+      mSurface(surface) {}
+
+NewSwapChainBase::~NewSwapChainBase() {
+    if (mCurrentTextureView != nullptr) {
+        ASSERT(mCurrentTextureView->GetTexture()->GetTextureState() ==
+               TextureBase::TextureState::Destroyed);
     }
 
-    MaybeError OldSwapChainBase::ValidateConfigure(wgpu::TextureFormat format,
-                                                   wgpu::TextureUsage allowedUsage,
-                                                   uint32_t width,
-                                                   uint32_t height) const {
-        DAWN_TRY(GetDevice()->ValidateIsAlive());
-        DAWN_TRY(GetDevice()->ValidateObject(this));
+    ASSERT(!mAttached);
+}
 
-        DAWN_TRY(ValidateTextureUsage(allowedUsage));
-        DAWN_TRY(ValidateTextureFormat(format));
-
-        DAWN_INVALID_IF(width == 0 || height == 0,
-                        "Configuration size (width: %u, height: %u) for %s is empty.", width,
-                        height, this);
-
-        return {};
+void NewSwapChainBase::DetachFromSurface() {
+    if (mAttached) {
+        DetachFromSurfaceImpl();
+        mSurface = nullptr;
+        mAttached = false;
     }
+}
 
-    MaybeError OldSwapChainBase::ValidateGetCurrentTextureView() const {
-        DAWN_TRY(GetDevice()->ValidateIsAlive());
-        DAWN_TRY(GetDevice()->ValidateObject(this));
+void NewSwapChainBase::SetIsAttached() {
+    mAttached = true;
+}
 
-        // If width is 0, it implies swap chain has never been configured
-        DAWN_INVALID_IF(mWidth == 0, "%s was not configured prior to calling GetNextTexture.",
-                        this);
+void NewSwapChainBase::APIConfigure(wgpu::TextureFormat format,
+                                    wgpu::TextureUsage allowedUsage,
+                                    uint32_t width,
+                                    uint32_t height) {
+    GetDevice()->ConsumedError(
+        DAWN_FORMAT_VALIDATION_ERROR("Configure is invalid for surface-based swapchains."));
+}
 
-        return {};
+TextureViewBase* NewSwapChainBase::APIGetCurrentTextureView() {
+    Ref<TextureViewBase> result;
+    if (GetDevice()->ConsumedError(GetCurrentTextureView(), &result,
+                                   "calling %s.GetCurrentTextureView()", this)) {
+        return TextureViewBase::MakeError(GetDevice());
     }
+    return result.Detach();
+}
 
-    MaybeError OldSwapChainBase::ValidatePresent() const {
-        DAWN_TRY(GetDevice()->ValidateIsAlive());
-        DAWN_TRY(GetDevice()->ValidateObject(this));
+ResultOrError<Ref<TextureViewBase>> NewSwapChainBase::GetCurrentTextureView() {
+    DAWN_TRY(ValidateGetCurrentTextureView());
 
-        DAWN_INVALID_IF(
-            mCurrentTextureView == nullptr,
-            "GetCurrentTextureView was not called on %s this frame prior to calling Present.",
-            this);
-
-        return {};
-    }
-
-    // Implementation of NewSwapChainBase
-
-    NewSwapChainBase::NewSwapChainBase(DeviceBase* device,
-                                       Surface* surface,
-                                       const SwapChainDescriptor* descriptor)
-        : SwapChainBase(device),
-          mAttached(false),
-          mWidth(descriptor->width),
-          mHeight(descriptor->height),
-          mFormat(descriptor->format),
-          mUsage(descriptor->usage),
-          mPresentMode(descriptor->presentMode),
-          mSurface(surface) {
-    }
-
-    NewSwapChainBase::~NewSwapChainBase() {
-        if (mCurrentTextureView != nullptr) {
-            ASSERT(mCurrentTextureView->GetTexture()->GetTextureState() ==
-                   TextureBase::TextureState::Destroyed);
-        }
-
-        ASSERT(!mAttached);
-    }
-
-    void NewSwapChainBase::DetachFromSurface() {
-        if (mAttached) {
-            DetachFromSurfaceImpl();
-            mSurface = nullptr;
-            mAttached = false;
-        }
-    }
-
-    void NewSwapChainBase::SetIsAttached() {
-        mAttached = true;
-    }
-
-    void NewSwapChainBase::APIConfigure(wgpu::TextureFormat format,
-                                        wgpu::TextureUsage allowedUsage,
-                                        uint32_t width,
-                                        uint32_t height) {
-        GetDevice()->ConsumedError(
-            DAWN_FORMAT_VALIDATION_ERROR("Configure is invalid for surface-based swapchains."));
-    }
-
-    TextureViewBase* NewSwapChainBase::APIGetCurrentTextureView() {
-        Ref<TextureViewBase> result;
-        if (GetDevice()->ConsumedError(GetCurrentTextureView(), &result,
-                                       "calling %s.GetCurrentTextureView()", this)) {
-            return TextureViewBase::MakeError(GetDevice());
-        }
-        return result.Detach();
-    }
-
-    ResultOrError<Ref<TextureViewBase>> NewSwapChainBase::GetCurrentTextureView() {
-        DAWN_TRY(ValidateGetCurrentTextureView());
-
-        if (mCurrentTextureView != nullptr) {
-            // Calling GetCurrentTextureView always returns a new reference.
-            return mCurrentTextureView;
-        }
-
-        DAWN_TRY_ASSIGN(mCurrentTextureView, GetCurrentTextureViewImpl());
-
-        // Check that the return texture view matches exactly what was given for this descriptor.
-        ASSERT(mCurrentTextureView->GetTexture()->GetFormat().format == mFormat);
-        ASSERT(IsSubset(mUsage, mCurrentTextureView->GetTexture()->GetUsage()));
-        ASSERT(mCurrentTextureView->GetLevelCount() == 1);
-        ASSERT(mCurrentTextureView->GetLayerCount() == 1);
-        ASSERT(mCurrentTextureView->GetDimension() == wgpu::TextureViewDimension::e2D);
-        ASSERT(mCurrentTextureView->GetTexture()
-                   ->GetMipLevelVirtualSize(mCurrentTextureView->GetBaseMipLevel())
-                   .width == mWidth);
-        ASSERT(mCurrentTextureView->GetTexture()
-                   ->GetMipLevelVirtualSize(mCurrentTextureView->GetBaseMipLevel())
-                   .height == mHeight);
-
+    if (mCurrentTextureView != nullptr) {
+        // Calling GetCurrentTextureView always returns a new reference.
         return mCurrentTextureView;
     }
 
-    void NewSwapChainBase::APIPresent() {
-        if (GetDevice()->ConsumedError(ValidatePresent())) {
-            return;
-        }
+    DAWN_TRY_ASSIGN(mCurrentTextureView, GetCurrentTextureViewImpl());
 
-        if (GetDevice()->ConsumedError(PresentImpl())) {
-            return;
-        }
+    // Check that the return texture view matches exactly what was given for this descriptor.
+    ASSERT(mCurrentTextureView->GetTexture()->GetFormat().format == mFormat);
+    ASSERT(IsSubset(mUsage, mCurrentTextureView->GetTexture()->GetUsage()));
+    ASSERT(mCurrentTextureView->GetLevelCount() == 1);
+    ASSERT(mCurrentTextureView->GetLayerCount() == 1);
+    ASSERT(mCurrentTextureView->GetDimension() == wgpu::TextureViewDimension::e2D);
+    ASSERT(mCurrentTextureView->GetTexture()
+               ->GetMipLevelVirtualSize(mCurrentTextureView->GetBaseMipLevel())
+               .width == mWidth);
+    ASSERT(mCurrentTextureView->GetTexture()
+               ->GetMipLevelVirtualSize(mCurrentTextureView->GetBaseMipLevel())
+               .height == mHeight);
 
-        ASSERT(mCurrentTextureView->GetTexture()->GetTextureState() ==
-               TextureBase::TextureState::Destroyed);
-        mCurrentTextureView = nullptr;
+    return mCurrentTextureView;
+}
+
+void NewSwapChainBase::APIPresent() {
+    if (GetDevice()->ConsumedError(ValidatePresent())) {
+        return;
     }
 
-    uint32_t NewSwapChainBase::GetWidth() const {
-        return mWidth;
+    if (GetDevice()->ConsumedError(PresentImpl())) {
+        return;
     }
 
-    uint32_t NewSwapChainBase::GetHeight() const {
-        return mHeight;
-    }
+    ASSERT(mCurrentTextureView->GetTexture()->GetTextureState() ==
+           TextureBase::TextureState::Destroyed);
+    mCurrentTextureView = nullptr;
+}
 
-    wgpu::TextureFormat NewSwapChainBase::GetFormat() const {
-        return mFormat;
-    }
+uint32_t NewSwapChainBase::GetWidth() const {
+    return mWidth;
+}
 
-    wgpu::TextureUsage NewSwapChainBase::GetUsage() const {
-        return mUsage;
-    }
+uint32_t NewSwapChainBase::GetHeight() const {
+    return mHeight;
+}
 
-    wgpu::PresentMode NewSwapChainBase::GetPresentMode() const {
-        return mPresentMode;
-    }
+wgpu::TextureFormat NewSwapChainBase::GetFormat() const {
+    return mFormat;
+}
 
-    Surface* NewSwapChainBase::GetSurface() const {
-        return mSurface;
-    }
+wgpu::TextureUsage NewSwapChainBase::GetUsage() const {
+    return mUsage;
+}
 
-    bool NewSwapChainBase::IsAttached() const {
-        return mAttached;
-    }
+wgpu::PresentMode NewSwapChainBase::GetPresentMode() const {
+    return mPresentMode;
+}
 
-    wgpu::BackendType NewSwapChainBase::GetBackendType() const {
-        return GetDevice()->GetAdapter()->GetBackendType();
-    }
+Surface* NewSwapChainBase::GetSurface() const {
+    return mSurface;
+}
 
-    MaybeError NewSwapChainBase::ValidatePresent() const {
-        DAWN_TRY(GetDevice()->ValidateIsAlive());
-        DAWN_TRY(GetDevice()->ValidateObject(this));
+bool NewSwapChainBase::IsAttached() const {
+    return mAttached;
+}
 
-        DAWN_INVALID_IF(!mAttached, "Cannot call Present called on detached %s.", this);
+wgpu::BackendType NewSwapChainBase::GetBackendType() const {
+    return GetDevice()->GetAdapter()->GetBackendType();
+}
 
-        DAWN_INVALID_IF(
-            mCurrentTextureView == nullptr,
-            "GetCurrentTextureView was not called on %s this frame prior to calling Present.",
-            this);
+MaybeError NewSwapChainBase::ValidatePresent() const {
+    DAWN_TRY(GetDevice()->ValidateIsAlive());
+    DAWN_TRY(GetDevice()->ValidateObject(this));
 
-        return {};
-    }
+    DAWN_INVALID_IF(!mAttached, "Cannot call Present called on detached %s.", this);
 
-    MaybeError NewSwapChainBase::ValidateGetCurrentTextureView() const {
-        DAWN_TRY(GetDevice()->ValidateIsAlive());
-        DAWN_TRY(GetDevice()->ValidateObject(this));
+    DAWN_INVALID_IF(
+        mCurrentTextureView == nullptr,
+        "GetCurrentTextureView was not called on %s this frame prior to calling Present.", this);
 
-        DAWN_INVALID_IF(!mAttached, "Cannot call GetCurrentTextureView on detached %s.", this);
+    return {};
+}
 
-        return {};
-    }
+MaybeError NewSwapChainBase::ValidateGetCurrentTextureView() const {
+    DAWN_TRY(GetDevice()->ValidateIsAlive());
+    DAWN_TRY(GetDevice()->ValidateObject(this));
+
+    DAWN_INVALID_IF(!mAttached, "Cannot call GetCurrentTextureView on detached %s.", this);
+
+    return {};
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/SwapChain.h b/src/dawn/native/SwapChain.h
index 24f12e4..36ed02a 100644
--- a/src/dawn/native/SwapChain.h
+++ b/src/dawn/native/SwapChain.h
@@ -24,146 +24,144 @@
 
 namespace dawn::native {
 
-    MaybeError ValidateSwapChainDescriptor(const DeviceBase* device,
-                                           const Surface* surface,
-                                           const SwapChainDescriptor* descriptor);
+MaybeError ValidateSwapChainDescriptor(const DeviceBase* device,
+                                       const Surface* surface,
+                                       const SwapChainDescriptor* descriptor);
 
-    TextureDescriptor GetSwapChainBaseTextureDescriptor(NewSwapChainBase* swapChain);
+TextureDescriptor GetSwapChainBaseTextureDescriptor(NewSwapChainBase* swapChain);
 
-    class SwapChainBase : public ApiObjectBase {
-      public:
-        explicit SwapChainBase(DeviceBase* device);
+class SwapChainBase : public ApiObjectBase {
+  public:
+    explicit SwapChainBase(DeviceBase* device);
 
-        static SwapChainBase* MakeError(DeviceBase* device);
+    static SwapChainBase* MakeError(DeviceBase* device);
 
-        ObjectType GetType() const override;
+    ObjectType GetType() const override;
 
-        // Dawn API
-        virtual void APIConfigure(wgpu::TextureFormat format,
-                                  wgpu::TextureUsage allowedUsage,
-                                  uint32_t width,
-                                  uint32_t height) = 0;
-        virtual TextureViewBase* APIGetCurrentTextureView() = 0;
-        virtual void APIPresent() = 0;
+    // Dawn API
+    virtual void APIConfigure(wgpu::TextureFormat format,
+                              wgpu::TextureUsage allowedUsage,
+                              uint32_t width,
+                              uint32_t height) = 0;
+    virtual TextureViewBase* APIGetCurrentTextureView() = 0;
+    virtual void APIPresent() = 0;
 
-      protected:
-        SwapChainBase(DeviceBase* device, ObjectBase::ErrorTag tag);
-        ~SwapChainBase() override;
-        void DestroyImpl() override;
-    };
+  protected:
+    SwapChainBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+    ~SwapChainBase() override;
+    void DestroyImpl() override;
+};
 
-    // The base class for implementation-based SwapChains that are deprecated.
-    class OldSwapChainBase : public SwapChainBase {
-      public:
-        OldSwapChainBase(DeviceBase* device, const SwapChainDescriptor* descriptor);
+// The base class for implementation-based SwapChains that are deprecated.
+class OldSwapChainBase : public SwapChainBase {
+  public:
+    OldSwapChainBase(DeviceBase* device, const SwapChainDescriptor* descriptor);
 
-        // Dawn API
-        void APIConfigure(wgpu::TextureFormat format,
-                          wgpu::TextureUsage allowedUsage,
-                          uint32_t width,
-                          uint32_t height) override;
-        TextureViewBase* APIGetCurrentTextureView() override;
-        void APIPresent() override;
+    // Dawn API
+    void APIConfigure(wgpu::TextureFormat format,
+                      wgpu::TextureUsage allowedUsage,
+                      uint32_t width,
+                      uint32_t height) override;
+    TextureViewBase* APIGetCurrentTextureView() override;
+    void APIPresent() override;
 
-      protected:
-        ~OldSwapChainBase() override;
-        const DawnSwapChainImplementation& GetImplementation();
-        virtual TextureBase* GetNextTextureImpl(const TextureDescriptor*) = 0;
-        virtual MaybeError OnBeforePresent(TextureViewBase* view) = 0;
+  protected:
+    ~OldSwapChainBase() override;
+    const DawnSwapChainImplementation& GetImplementation();
+    virtual TextureBase* GetNextTextureImpl(const TextureDescriptor*) = 0;
+    virtual MaybeError OnBeforePresent(TextureViewBase* view) = 0;
 
-      private:
-        MaybeError ValidateConfigure(wgpu::TextureFormat format,
-                                     wgpu::TextureUsage allowedUsage,
-                                     uint32_t width,
-                                     uint32_t height) const;
-        MaybeError ValidateGetCurrentTextureView() const;
-        MaybeError ValidatePresent() const;
+  private:
+    MaybeError ValidateConfigure(wgpu::TextureFormat format,
+                                 wgpu::TextureUsage allowedUsage,
+                                 uint32_t width,
+                                 uint32_t height) const;
+    MaybeError ValidateGetCurrentTextureView() const;
+    MaybeError ValidatePresent() const;
 
-        DawnSwapChainImplementation mImplementation = {};
-        wgpu::TextureFormat mFormat = {};
-        wgpu::TextureUsage mAllowedUsage;
-        uint32_t mWidth = 0;
-        uint32_t mHeight = 0;
-        Ref<TextureBase> mCurrentTexture;
-        Ref<TextureViewBase> mCurrentTextureView;
-    };
+    DawnSwapChainImplementation mImplementation = {};
+    wgpu::TextureFormat mFormat = {};
+    wgpu::TextureUsage mAllowedUsage;
+    uint32_t mWidth = 0;
+    uint32_t mHeight = 0;
+    Ref<TextureBase> mCurrentTexture;
+    Ref<TextureViewBase> mCurrentTextureView;
+};
 
-    // The base class for surface-based SwapChains that aren't ready yet.
-    class NewSwapChainBase : public SwapChainBase {
-      public:
-        NewSwapChainBase(DeviceBase* device,
-                         Surface* surface,
-                         const SwapChainDescriptor* descriptor);
+// The base class for surface-based SwapChains that aren't ready yet.
+class NewSwapChainBase : public SwapChainBase {
+  public:
+    NewSwapChainBase(DeviceBase* device, Surface* surface, const SwapChainDescriptor* descriptor);
 
-        // This is called when the swapchain is detached when one of the following happens:
-        //
-        //  - The surface it is attached to is being destroyed.
-        //  - The swapchain is being replaced by another one on the surface.
-        //
-        // Note that the surface has a Ref on the last swapchain that was used on it so the
-        // SwapChain destructor will only be called after one of the things above happens.
-        //
-        // The call for the detaching previous swapchain should be called inside the backend
-        // implementation of SwapChains. This is to allow them to acquire any resources before
-        // calling detach to make a seamless transition from the previous swapchain.
-        //
-        // Likewise the call for the swapchain being destroyed must be done in the backend's
-        // swapchain's destructor since C++ says it is UB to call virtual methods in the base class
-        // destructor.
-        void DetachFromSurface();
+    // This is called when the swapchain is detached when one of the following happens:
+    //
+    //  - The surface it is attached to is being destroyed.
+    //  - The swapchain is being replaced by another one on the surface.
+    //
+    // Note that the surface has a Ref on the last swapchain that was used on it so the
+    // SwapChain destructor will only be called after one of the things above happens.
+    //
+    // The call for the detaching previous swapchain should be called inside the backend
+    // implementation of SwapChains. This is to allow them to acquire any resources before
+    // calling detach to make a seamless transition from the previous swapchain.
+    //
+    // Likewise the call for the swapchain being destroyed must be done in the backend's
+    // swapchain's destructor since C++ says it is UB to call virtual methods in the base class
+    // destructor.
+    void DetachFromSurface();
 
-        void SetIsAttached();
+    void SetIsAttached();
 
-        // Dawn API
-        void APIConfigure(wgpu::TextureFormat format,
-                          wgpu::TextureUsage allowedUsage,
-                          uint32_t width,
-                          uint32_t height) override;
-        TextureViewBase* APIGetCurrentTextureView() override;
-        void APIPresent() override;
+    // Dawn API
+    void APIConfigure(wgpu::TextureFormat format,
+                      wgpu::TextureUsage allowedUsage,
+                      uint32_t width,
+                      uint32_t height) override;
+    TextureViewBase* APIGetCurrentTextureView() override;
+    void APIPresent() override;
 
-        uint32_t GetWidth() const;
-        uint32_t GetHeight() const;
-        wgpu::TextureFormat GetFormat() const;
-        wgpu::TextureUsage GetUsage() const;
-        wgpu::PresentMode GetPresentMode() const;
-        Surface* GetSurface() const;
-        bool IsAttached() const;
-        wgpu::BackendType GetBackendType() const;
+    uint32_t GetWidth() const;
+    uint32_t GetHeight() const;
+    wgpu::TextureFormat GetFormat() const;
+    wgpu::TextureUsage GetUsage() const;
+    wgpu::PresentMode GetPresentMode() const;
+    Surface* GetSurface() const;
+    bool IsAttached() const;
+    wgpu::BackendType GetBackendType() const;
 
-      protected:
-        ~NewSwapChainBase() override;
+  protected:
+    ~NewSwapChainBase() override;
 
-      private:
-        bool mAttached;
-        uint32_t mWidth;
-        uint32_t mHeight;
-        wgpu::TextureFormat mFormat;
-        wgpu::TextureUsage mUsage;
-        wgpu::PresentMode mPresentMode;
+  private:
+    bool mAttached;
+    uint32_t mWidth;
+    uint32_t mHeight;
+    wgpu::TextureFormat mFormat;
+    wgpu::TextureUsage mUsage;
+    wgpu::PresentMode mPresentMode;
 
-        // This is a weak reference to the surface. If the surface is destroyed it will call
-        // DetachFromSurface and mSurface will be updated to nullptr.
-        Surface* mSurface = nullptr;
-        Ref<TextureViewBase> mCurrentTextureView;
+    // This is a weak reference to the surface. If the surface is destroyed it will call
+    // DetachFromSurface and mSurface will be updated to nullptr.
+    Surface* mSurface = nullptr;
+    Ref<TextureViewBase> mCurrentTextureView;
 
-        MaybeError ValidatePresent() const;
-        MaybeError ValidateGetCurrentTextureView() const;
+    MaybeError ValidatePresent() const;
+    MaybeError ValidateGetCurrentTextureView() const;
 
-        // GetCurrentTextureViewImpl and PresentImpl are guaranteed to be called in an interleaved
-        // manner, starting with GetCurrentTextureViewImpl.
+    // GetCurrentTextureViewImpl and PresentImpl are guaranteed to be called in an interleaved
+    // manner, starting with GetCurrentTextureViewImpl.
 
-        // The returned texture view must match the swapchain descriptor exactly.
-        ResultOrError<Ref<TextureViewBase>> GetCurrentTextureView();
-        virtual ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewImpl() = 0;
-        // The call to present must destroy the current view's texture so further access to it are
-        // invalid.
-        virtual MaybeError PresentImpl() = 0;
+    // The returned texture view must match the swapchain descriptor exactly.
+    ResultOrError<Ref<TextureViewBase>> GetCurrentTextureView();
+    virtual ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewImpl() = 0;
+    // The call to present must destroy the current view's texture so further access to it are
+    // invalid.
+    virtual MaybeError PresentImpl() = 0;
 
-        // Guaranteed to be called exactly once during the lifetime of the SwapChain. After it is
-        // called no other virtual method can be called.
-        virtual void DetachFromSurfaceImpl() = 0;
-    };
+    // Guaranteed to be called exactly once during the lifetime of the SwapChain. After it is
+    // called no other virtual method can be called.
+    virtual void DetachFromSurfaceImpl() = 0;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/Texture.cpp b/src/dawn/native/Texture.cpp
index dcae323..8784cf9 100644
--- a/src/dawn/native/Texture.cpp
+++ b/src/dawn/native/Texture.cpp
@@ -28,846 +28,826 @@
 #include "dawn/native/ValidationUtils_autogen.h"
 
 namespace dawn::native {
-    namespace {
+namespace {
 
-        MaybeError ValidateTextureViewFormatCompatibility(const DeviceBase* device,
-                                                          const Format& format,
-                                                          wgpu::TextureFormat viewFormatEnum) {
-            const Format* viewFormat;
-            DAWN_TRY_ASSIGN(viewFormat, device->GetInternalFormat(viewFormatEnum));
+MaybeError ValidateTextureViewFormatCompatibility(const DeviceBase* device,
+                                                  const Format& format,
+                                                  wgpu::TextureFormat viewFormatEnum) {
+    const Format* viewFormat;
+    DAWN_TRY_ASSIGN(viewFormat, device->GetInternalFormat(viewFormatEnum));
 
-            DAWN_INVALID_IF(!format.ViewCompatibleWith(*viewFormat),
-                            "The texture view format (%s) is not texture view format compatible "
-                            "with the texture format (%s).",
-                            viewFormatEnum, format.format);
+    DAWN_INVALID_IF(!format.ViewCompatibleWith(*viewFormat),
+                    "The texture view format (%s) is not texture view format compatible "
+                    "with the texture format (%s).",
+                    viewFormatEnum, format.format);
+    return {};
+}
+
+MaybeError ValidateCanViewTextureAs(const DeviceBase* device,
+                                    const TextureBase* texture,
+                                    const Format& viewFormat,
+                                    wgpu::TextureAspect aspect) {
+    const Format& format = texture->GetFormat();
+
+    if (aspect != wgpu::TextureAspect::All) {
+        wgpu::TextureFormat aspectFormat = format.GetAspectInfo(aspect).format;
+        if (viewFormat.format == aspectFormat) {
             return {};
+        } else {
+            return DAWN_FORMAT_VALIDATION_ERROR(
+                "The view format (%s) is not compatible with %s of %s (%s).", viewFormat.format,
+                aspect, format.format, aspectFormat);
         }
+    }
 
-        MaybeError ValidateCanViewTextureAs(const DeviceBase* device,
-                                            const TextureBase* texture,
-                                            const Format& viewFormat,
-                                            wgpu::TextureAspect aspect) {
-            const Format& format = texture->GetFormat();
+    if (format.format == viewFormat.format) {
+        return {};
+    }
 
-            if (aspect != wgpu::TextureAspect::All) {
-                wgpu::TextureFormat aspectFormat = format.GetAspectInfo(aspect).format;
-                if (viewFormat.format == aspectFormat) {
-                    return {};
-                } else {
-                    return DAWN_FORMAT_VALIDATION_ERROR(
-                        "The view format (%s) is not compatible with %s of %s (%s).",
-                        viewFormat.format, aspect, format.format, aspectFormat);
-                }
-            }
+    const FormatSet& compatibleViewFormats = texture->GetViewFormats();
+    if (compatibleViewFormats[viewFormat]) {
+        // Validation of this list is done on texture creation, so we don't need to
+        // handle the case where a format is in the list, but not compatible.
+        return {};
+    }
 
-            if (format.format == viewFormat.format) {
-                return {};
-            }
+    // |viewFormat| is not in the list. Check compatibility to generate an error message
+    // depending on whether it could be compatible, but needs to be explicitly listed,
+    // or it could never be compatible.
+    if (!format.ViewCompatibleWith(viewFormat)) {
+        // The view format isn't compatible with the format at all. Return an error
+        // that indicates this, in addition to reporting that it's missing from the
+        // list.
+        return DAWN_FORMAT_VALIDATION_ERROR(
+            "The texture view format (%s) is not compatible with the "
+            "texture format (%s)."
+            "The formats must be compatible, and the view format "
+            "must be passed in the list of view formats on texture creation.",
+            viewFormat.format, format.format);
+    } else {
+        // The view format is compatible, but not in the list.
+        return DAWN_FORMAT_VALIDATION_ERROR(
+            "%s was not created with the texture view format (%s) "
+            "in the list of compatible view formats.",
+            texture, viewFormat.format);
+    }
+    return {};
+}
 
-            const FormatSet& compatibleViewFormats = texture->GetViewFormats();
-            if (compatibleViewFormats[viewFormat]) {
-                // Validation of this list is done on texture creation, so we don't need to
-                // handle the case where a format is in the list, but not compatible.
-                return {};
-            }
+bool IsTextureViewDimensionCompatibleWithTextureDimension(
+    wgpu::TextureViewDimension textureViewDimension,
+    wgpu::TextureDimension textureDimension) {
+    switch (textureViewDimension) {
+        case wgpu::TextureViewDimension::e2D:
+        case wgpu::TextureViewDimension::e2DArray:
+        case wgpu::TextureViewDimension::Cube:
+        case wgpu::TextureViewDimension::CubeArray:
+            return textureDimension == wgpu::TextureDimension::e2D;
 
-            // |viewFormat| is not in the list. Check compatibility to generate an error message
-            // depending on whether it could be compatible, but needs to be explicitly listed,
-            // or it could never be compatible.
-            if (!format.ViewCompatibleWith(viewFormat)) {
-                // The view format isn't compatible with the format at all. Return an error
-                // that indicates this, in addition to reporting that it's missing from the
-                // list.
-                return DAWN_FORMAT_VALIDATION_ERROR(
-                    "The texture view format (%s) is not compatible with the "
-                    "texture format (%s)."
-                    "The formats must be compatible, and the view format "
-                    "must be passed in the list of view formats on texture creation.",
-                    viewFormat.format, format.format);
-            } else {
-                // The view format is compatible, but not in the list.
-                return DAWN_FORMAT_VALIDATION_ERROR(
-                    "%s was not created with the texture view format (%s) "
-                    "in the list of compatible view formats.",
-                    texture, viewFormat.format);
-            }
-            return {};
-        }
+        case wgpu::TextureViewDimension::e3D:
+            return textureDimension == wgpu::TextureDimension::e3D;
 
-        bool IsTextureViewDimensionCompatibleWithTextureDimension(
-            wgpu::TextureViewDimension textureViewDimension,
-            wgpu::TextureDimension textureDimension) {
-            switch (textureViewDimension) {
-                case wgpu::TextureViewDimension::e2D:
-                case wgpu::TextureViewDimension::e2DArray:
-                case wgpu::TextureViewDimension::Cube:
-                case wgpu::TextureViewDimension::CubeArray:
-                    return textureDimension == wgpu::TextureDimension::e2D;
+        case wgpu::TextureViewDimension::e1D:
+            return textureDimension == wgpu::TextureDimension::e1D;
 
-                case wgpu::TextureViewDimension::e3D:
-                    return textureDimension == wgpu::TextureDimension::e3D;
+        case wgpu::TextureViewDimension::Undefined:
+            break;
+    }
+    UNREACHABLE();
+}
 
-                case wgpu::TextureViewDimension::e1D:
-                    return textureDimension == wgpu::TextureDimension::e1D;
+bool IsArrayLayerValidForTextureViewDimension(wgpu::TextureViewDimension textureViewDimension,
+                                              uint32_t textureViewArrayLayer) {
+    switch (textureViewDimension) {
+        case wgpu::TextureViewDimension::e2D:
+        case wgpu::TextureViewDimension::e3D:
+            return textureViewArrayLayer == 1u;
+        case wgpu::TextureViewDimension::e2DArray:
+            return true;
+        case wgpu::TextureViewDimension::Cube:
+            return textureViewArrayLayer == 6u;
+        case wgpu::TextureViewDimension::CubeArray:
+            return textureViewArrayLayer % 6 == 0;
+        case wgpu::TextureViewDimension::e1D:
+            return textureViewArrayLayer == 1u;
 
-                case wgpu::TextureViewDimension::Undefined:
-                    break;
-            }
-            UNREACHABLE();
-        }
+        case wgpu::TextureViewDimension::Undefined:
+            break;
+    }
+    UNREACHABLE();
+}
 
-        bool IsArrayLayerValidForTextureViewDimension(
-            wgpu::TextureViewDimension textureViewDimension,
-            uint32_t textureViewArrayLayer) {
-            switch (textureViewDimension) {
-                case wgpu::TextureViewDimension::e2D:
-                case wgpu::TextureViewDimension::e3D:
-                    return textureViewArrayLayer == 1u;
-                case wgpu::TextureViewDimension::e2DArray:
-                    return true;
-                case wgpu::TextureViewDimension::Cube:
-                    return textureViewArrayLayer == 6u;
-                case wgpu::TextureViewDimension::CubeArray:
-                    return textureViewArrayLayer % 6 == 0;
-                case wgpu::TextureViewDimension::e1D:
-                    return textureViewArrayLayer == 1u;
+MaybeError ValidateSampleCount(const TextureDescriptor* descriptor,
+                               wgpu::TextureUsage usage,
+                               const Format* format) {
+    DAWN_INVALID_IF(!IsValidSampleCount(descriptor->sampleCount),
+                    "The sample count (%u) of the texture is not supported.",
+                    descriptor->sampleCount);
 
-                case wgpu::TextureViewDimension::Undefined:
-                    break;
-            }
-            UNREACHABLE();
-        }
-
-        MaybeError ValidateSampleCount(const TextureDescriptor* descriptor,
-                                       wgpu::TextureUsage usage,
-                                       const Format* format) {
-            DAWN_INVALID_IF(!IsValidSampleCount(descriptor->sampleCount),
-                            "The sample count (%u) of the texture is not supported.",
-                            descriptor->sampleCount);
-
-            if (descriptor->sampleCount > 1) {
-                DAWN_INVALID_IF(descriptor->mipLevelCount > 1,
-                                "The mip level count (%u) of a multisampled texture is not 1.",
-                                descriptor->mipLevelCount);
-
-                // Multisampled 1D and 3D textures are not supported in D3D12/Metal/Vulkan.
-                // Multisampled 2D array texture is not supported because on Metal it requires the
-                // version of macOS be greater than 10.14.
-                DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
-                                "The dimension (%s) of a multisampled texture is not 2D.",
-                                descriptor->dimension);
-
-                DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers > 1,
-                                "The depthOrArrayLayers (%u) of a multisampled texture is not 1.",
-                                descriptor->size.depthOrArrayLayers);
-
-                DAWN_INVALID_IF(!format->supportsMultisample,
-                                "The texture format (%s) does not support multisampling.",
-                                format->format);
-
-                // Compressed formats are not renderable. They cannot support multisample.
-                ASSERT(!format->isCompressed);
-
-                DAWN_INVALID_IF(usage & wgpu::TextureUsage::StorageBinding,
-                                "The sample count (%u) of a storage textures is not 1.",
-                                descriptor->sampleCount);
-            }
-
-            return {};
-        }
-
-        MaybeError ValidateTextureViewDimensionCompatibility(
-            const TextureBase* texture,
-            const TextureViewDescriptor* descriptor) {
-            DAWN_INVALID_IF(
-                !IsArrayLayerValidForTextureViewDimension(descriptor->dimension,
-                                                          descriptor->arrayLayerCount),
-                "The dimension (%s) of the texture view is not compatible with the layer count "
-                "(%u) of %s.",
-                descriptor->dimension, descriptor->arrayLayerCount, texture);
-
-            DAWN_INVALID_IF(
-                !IsTextureViewDimensionCompatibleWithTextureDimension(descriptor->dimension,
-                                                                      texture->GetDimension()),
-                "The dimension (%s) of the texture view is not compatible with the dimension (%s) "
-                "of %s.",
-                descriptor->dimension, texture->GetDimension(), texture);
-
-            DAWN_INVALID_IF(texture->GetSampleCount() > 1 &&
-                                descriptor->dimension != wgpu::TextureViewDimension::e2D,
-                            "The dimension (%s) of the multisampled texture view is not %s.",
-                            descriptor->dimension, wgpu::TextureViewDimension::e2D);
-
-            switch (descriptor->dimension) {
-                case wgpu::TextureViewDimension::Cube:
-                case wgpu::TextureViewDimension::CubeArray:
-                    DAWN_INVALID_IF(
-                        texture->GetSize().width != texture->GetSize().height,
-                        "A %s texture view is not compatible with %s because the texture's width "
-                        "(%u) and height (%u) are not equal.",
-                        descriptor->dimension, texture, texture->GetSize().width,
-                        texture->GetSize().height);
-                    break;
-
-                case wgpu::TextureViewDimension::e1D:
-                case wgpu::TextureViewDimension::e2D:
-                case wgpu::TextureViewDimension::e2DArray:
-                case wgpu::TextureViewDimension::e3D:
-                    break;
-
-                case wgpu::TextureViewDimension::Undefined:
-                    UNREACHABLE();
-            }
-
-            return {};
-        }
-
-        MaybeError ValidateTextureSize(const DeviceBase* device,
-                                       const TextureDescriptor* descriptor,
-                                       const Format* format) {
-            ASSERT(descriptor->size.width != 0 && descriptor->size.height != 0 &&
-                   descriptor->size.depthOrArrayLayers != 0);
-            const CombinedLimits& limits = device->GetLimits();
-            Extent3D maxExtent;
-            switch (descriptor->dimension) {
-                case wgpu::TextureDimension::e1D:
-                    maxExtent = {limits.v1.maxTextureDimension1D, 1, 1};
-                    break;
-                case wgpu::TextureDimension::e2D:
-                    maxExtent = {limits.v1.maxTextureDimension2D, limits.v1.maxTextureDimension2D,
-                                 limits.v1.maxTextureArrayLayers};
-                    break;
-                case wgpu::TextureDimension::e3D:
-                    maxExtent = {limits.v1.maxTextureDimension3D, limits.v1.maxTextureDimension3D,
-                                 limits.v1.maxTextureDimension3D};
-                    break;
-            }
-            DAWN_INVALID_IF(descriptor->size.width > maxExtent.width ||
-                                descriptor->size.height > maxExtent.height ||
-                                descriptor->size.depthOrArrayLayers > maxExtent.depthOrArrayLayers,
-                            "Texture size (%s) exceeded maximum texture size (%s).",
-                            &descriptor->size, &maxExtent);
-
-            switch (descriptor->dimension) {
-                case wgpu::TextureDimension::e1D:
-                    DAWN_INVALID_IF(
-                        descriptor->mipLevelCount != 1,
-                        "Texture mip level count (%u) is more than 1 when its dimension is %s.",
-                        descriptor->mipLevelCount, wgpu::TextureDimension::e1D);
-                    break;
-                case wgpu::TextureDimension::e2D: {
-                    uint32_t maxMippedDimension =
-                        std::max(descriptor->size.width, descriptor->size.height);
-                    DAWN_INVALID_IF(
-                        Log2(maxMippedDimension) + 1 < descriptor->mipLevelCount,
-                        "Texture mip level count (%u) exceeds the maximum (%u) for its size (%s).",
-                        descriptor->mipLevelCount, Log2(maxMippedDimension) + 1, &descriptor->size);
-                    break;
-                }
-                case wgpu::TextureDimension::e3D: {
-                    uint32_t maxMippedDimension = std::max(
-                        descriptor->size.width,
-                        std::max(descriptor->size.height, descriptor->size.depthOrArrayLayers));
-                    DAWN_INVALID_IF(
-                        Log2(maxMippedDimension) + 1 < descriptor->mipLevelCount,
-                        "Texture mip level count (%u) exceeds the maximum (%u) for its size (%s).",
-                        descriptor->mipLevelCount, Log2(maxMippedDimension) + 1, &descriptor->size);
-                    break;
-                }
-            }
-
-            if (format->isCompressed) {
-                const TexelBlockInfo& blockInfo =
-                    format->GetAspectInfo(wgpu::TextureAspect::All).block;
-                DAWN_INVALID_IF(
-                    descriptor->size.width % blockInfo.width != 0 ||
-                        descriptor->size.height % blockInfo.height != 0,
-                    "The size (%s) of the texture is not a multiple of the block width (%u) and "
-                    "height (%u) of the texture format (%s).",
-                    &descriptor->size, blockInfo.width, blockInfo.height, format->format);
-            }
-
-            return {};
-        }
-
-        MaybeError ValidateTextureUsage(const TextureDescriptor* descriptor,
-                                        wgpu::TextureUsage usage,
-                                        const Format* format) {
-            DAWN_TRY(dawn::native::ValidateTextureUsage(usage));
-
-            DAWN_INVALID_IF(usage == wgpu::TextureUsage::None, "The texture usage must not be 0.");
-
-            constexpr wgpu::TextureUsage kValidCompressedUsages =
-                wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc |
-                wgpu::TextureUsage::CopyDst;
-            DAWN_INVALID_IF(
-                format->isCompressed && !IsSubset(usage, kValidCompressedUsages),
-                "The texture usage (%s) is incompatible with the compressed texture format (%s).",
-                usage, format->format);
-
-            DAWN_INVALID_IF(
-                !format->isRenderable && (usage & wgpu::TextureUsage::RenderAttachment),
-                "The texture usage (%s) includes %s, which is incompatible with the non-renderable "
-                "format (%s).",
-                usage, wgpu::TextureUsage::RenderAttachment, format->format);
-
-            DAWN_INVALID_IF(
-                descriptor->dimension != wgpu::TextureDimension::e2D &&
-                    (usage & wgpu::TextureUsage::RenderAttachment),
-                "The texture usage (%s) includes %s, which is incompatible with the texture "
-                "dimension (%s).",
-                usage, wgpu::TextureUsage::RenderAttachment, descriptor->dimension);
-
-            DAWN_INVALID_IF(
-                !format->supportsStorageUsage && (usage & wgpu::TextureUsage::StorageBinding),
-                "The texture usage (%s) includes %s, which is incompatible with the format (%s).",
-                usage, wgpu::TextureUsage::StorageBinding, format->format);
-
-            // Only allows simple readonly texture usages.
-            constexpr wgpu::TextureUsage kValidMultiPlanarUsages =
-                wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc;
-            DAWN_INVALID_IF(
-                format->IsMultiPlanar() && !IsSubset(usage, kValidMultiPlanarUsages),
-                "The texture usage (%s) is incompatible with the multi-planar format (%s).", usage,
-                format->format);
-
-            return {};
-        }
-
-    }  // anonymous namespace
-
-    MaybeError ValidateTextureDescriptor(const DeviceBase* device,
-                                         const TextureDescriptor* descriptor) {
-        DAWN_TRY(ValidateSingleSType(descriptor->nextInChain,
-                                     wgpu::SType::DawnTextureInternalUsageDescriptor));
-
-        const DawnTextureInternalUsageDescriptor* internalUsageDesc = nullptr;
-        FindInChain(descriptor->nextInChain, &internalUsageDesc);
-
-        DAWN_INVALID_IF(
-            internalUsageDesc != nullptr && !device->IsFeatureEnabled(Feature::DawnInternalUsages),
-            "The dawn-internal-usages feature is not enabled");
-
-        const Format* format;
-        DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
-
-        for (uint32_t i = 0; i < descriptor->viewFormatCount; ++i) {
-            DAWN_TRY_CONTEXT(
-                ValidateTextureViewFormatCompatibility(device, *format, descriptor->viewFormats[i]),
-                "validating viewFormats[%u]", i);
-        }
-
-        wgpu::TextureUsage usage = descriptor->usage;
-        if (internalUsageDesc != nullptr) {
-            usage |= internalUsageDesc->internalUsage;
-        }
-
-        DAWN_TRY(ValidateTextureUsage(descriptor, usage, format));
-        DAWN_TRY(ValidateTextureDimension(descriptor->dimension));
-        DAWN_TRY(ValidateSampleCount(descriptor, usage, format));
-
-        DAWN_INVALID_IF(descriptor->size.width == 0 || descriptor->size.height == 0 ||
-                            descriptor->size.depthOrArrayLayers == 0 ||
-                            descriptor->mipLevelCount == 0,
-                        "The texture size (%s) or mipLevelCount (%u) is empty.", &descriptor->size,
+    if (descriptor->sampleCount > 1) {
+        DAWN_INVALID_IF(descriptor->mipLevelCount > 1,
+                        "The mip level count (%u) of a multisampled texture is not 1.",
                         descriptor->mipLevelCount);
 
-        DAWN_INVALID_IF(
-            descriptor->dimension != wgpu::TextureDimension::e2D && format->isCompressed,
-            "The dimension (%s) of a texture with a compressed format (%s) is not 2D.",
-            descriptor->dimension, format->format);
+        // Multisampled 1D and 3D textures are not supported in D3D12/Metal/Vulkan.
+        // Multisampled 2D array texture is not supported because on Metal it requires the
+        // version of macOS be greater than 10.14.
+        DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
+                        "The dimension (%s) of a multisampled texture is not 2D.",
+                        descriptor->dimension);
 
-        // Depth/stencil formats are valid for 2D textures only. Metal has this limit. And D3D12
-        // doesn't support depth/stencil formats on 3D textures.
-        DAWN_INVALID_IF(
-            descriptor->dimension != wgpu::TextureDimension::e2D &&
-                (format->aspects & (Aspect::Depth | Aspect::Stencil)) != 0,
-            "The dimension (%s) of a texture with a depth/stencil format (%s) is not 2D.",
-            descriptor->dimension, format->format);
+        DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers > 1,
+                        "The depthOrArrayLayers (%u) of a multisampled texture is not 1.",
+                        descriptor->size.depthOrArrayLayers);
 
-        DAWN_TRY(ValidateTextureSize(device, descriptor, format));
+        DAWN_INVALID_IF(!format->supportsMultisample,
+                        "The texture format (%s) does not support multisampling.", format->format);
 
-        // TODO(crbug.com/dawn/838): Implement a workaround for this issue.
-        // Readbacks from the non-zero mip of a stencil texture may contain garbage data.
-        DAWN_INVALID_IF(
-            device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs) && format->HasStencil() &&
-                descriptor->mipLevelCount > 1 &&
-                device->GetAdapter()->GetBackendType() == wgpu::BackendType::Metal,
-            "https://crbug.com/dawn/838: Stencil textures with more than one mip level are "
-            "disabled on Metal.");
+        // Compressed formats are not renderable. They cannot support multisample.
+        ASSERT(!format->isCompressed);
 
-        return {};
+        DAWN_INVALID_IF(usage & wgpu::TextureUsage::StorageBinding,
+                        "The sample count (%u) of a storage textures is not 1.",
+                        descriptor->sampleCount);
     }
 
-    MaybeError ValidateTextureViewDescriptor(const DeviceBase* device,
-                                             const TextureBase* texture,
-                                             const TextureViewDescriptor* descriptor) {
-        DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+    return {};
+}
 
-        // Parent texture should have been already validated.
-        ASSERT(texture);
-        ASSERT(!texture->IsError());
+MaybeError ValidateTextureViewDimensionCompatibility(const TextureBase* texture,
+                                                     const TextureViewDescriptor* descriptor) {
+    DAWN_INVALID_IF(!IsArrayLayerValidForTextureViewDimension(descriptor->dimension,
+                                                              descriptor->arrayLayerCount),
+                    "The dimension (%s) of the texture view is not compatible with the layer count "
+                    "(%u) of %s.",
+                    descriptor->dimension, descriptor->arrayLayerCount, texture);
 
-        DAWN_TRY(ValidateTextureViewDimension(descriptor->dimension));
-        DAWN_TRY(ValidateTextureFormat(descriptor->format));
-        DAWN_TRY(ValidateTextureAspect(descriptor->aspect));
+    DAWN_INVALID_IF(
+        !IsTextureViewDimensionCompatibleWithTextureDimension(descriptor->dimension,
+                                                              texture->GetDimension()),
+        "The dimension (%s) of the texture view is not compatible with the dimension (%s) "
+        "of %s.",
+        descriptor->dimension, texture->GetDimension(), texture);
 
+    DAWN_INVALID_IF(
+        texture->GetSampleCount() > 1 && descriptor->dimension != wgpu::TextureViewDimension::e2D,
+        "The dimension (%s) of the multisampled texture view is not %s.", descriptor->dimension,
+        wgpu::TextureViewDimension::e2D);
+
+    switch (descriptor->dimension) {
+        case wgpu::TextureViewDimension::Cube:
+        case wgpu::TextureViewDimension::CubeArray:
+            DAWN_INVALID_IF(
+                texture->GetSize().width != texture->GetSize().height,
+                "A %s texture view is not compatible with %s because the texture's width "
+                "(%u) and height (%u) are not equal.",
+                descriptor->dimension, texture, texture->GetSize().width,
+                texture->GetSize().height);
+            break;
+
+        case wgpu::TextureViewDimension::e1D:
+        case wgpu::TextureViewDimension::e2D:
+        case wgpu::TextureViewDimension::e2DArray:
+        case wgpu::TextureViewDimension::e3D:
+            break;
+
+        case wgpu::TextureViewDimension::Undefined:
+            UNREACHABLE();
+    }
+
+    return {};
+}
+
+MaybeError ValidateTextureSize(const DeviceBase* device,
+                               const TextureDescriptor* descriptor,
+                               const Format* format) {
+    ASSERT(descriptor->size.width != 0 && descriptor->size.height != 0 &&
+           descriptor->size.depthOrArrayLayers != 0);
+    const CombinedLimits& limits = device->GetLimits();
+    Extent3D maxExtent;
+    switch (descriptor->dimension) {
+        case wgpu::TextureDimension::e1D:
+            maxExtent = {limits.v1.maxTextureDimension1D, 1, 1};
+            break;
+        case wgpu::TextureDimension::e2D:
+            maxExtent = {limits.v1.maxTextureDimension2D, limits.v1.maxTextureDimension2D,
+                         limits.v1.maxTextureArrayLayers};
+            break;
+        case wgpu::TextureDimension::e3D:
+            maxExtent = {limits.v1.maxTextureDimension3D, limits.v1.maxTextureDimension3D,
+                         limits.v1.maxTextureDimension3D};
+            break;
+    }
+    DAWN_INVALID_IF(
+        descriptor->size.width > maxExtent.width || descriptor->size.height > maxExtent.height ||
+            descriptor->size.depthOrArrayLayers > maxExtent.depthOrArrayLayers,
+        "Texture size (%s) exceeded maximum texture size (%s).", &descriptor->size, &maxExtent);
+
+    switch (descriptor->dimension) {
+        case wgpu::TextureDimension::e1D:
+            DAWN_INVALID_IF(descriptor->mipLevelCount != 1,
+                            "Texture mip level count (%u) is more than 1 when its dimension is %s.",
+                            descriptor->mipLevelCount, wgpu::TextureDimension::e1D);
+            break;
+        case wgpu::TextureDimension::e2D: {
+            uint32_t maxMippedDimension = std::max(descriptor->size.width, descriptor->size.height);
+            DAWN_INVALID_IF(
+                Log2(maxMippedDimension) + 1 < descriptor->mipLevelCount,
+                "Texture mip level count (%u) exceeds the maximum (%u) for its size (%s).",
+                descriptor->mipLevelCount, Log2(maxMippedDimension) + 1, &descriptor->size);
+            break;
+        }
+        case wgpu::TextureDimension::e3D: {
+            uint32_t maxMippedDimension =
+                std::max(descriptor->size.width,
+                         std::max(descriptor->size.height, descriptor->size.depthOrArrayLayers));
+            DAWN_INVALID_IF(
+                Log2(maxMippedDimension) + 1 < descriptor->mipLevelCount,
+                "Texture mip level count (%u) exceeds the maximum (%u) for its size (%s).",
+                descriptor->mipLevelCount, Log2(maxMippedDimension) + 1, &descriptor->size);
+            break;
+        }
+    }
+
+    if (format->isCompressed) {
+        const TexelBlockInfo& blockInfo = format->GetAspectInfo(wgpu::TextureAspect::All).block;
+        DAWN_INVALID_IF(
+            descriptor->size.width % blockInfo.width != 0 ||
+                descriptor->size.height % blockInfo.height != 0,
+            "The size (%s) of the texture is not a multiple of the block width (%u) and "
+            "height (%u) of the texture format (%s).",
+            &descriptor->size, blockInfo.width, blockInfo.height, format->format);
+    }
+
+    return {};
+}
+
+MaybeError ValidateTextureUsage(const TextureDescriptor* descriptor,
+                                wgpu::TextureUsage usage,
+                                const Format* format) {
+    DAWN_TRY(dawn::native::ValidateTextureUsage(usage));
+
+    DAWN_INVALID_IF(usage == wgpu::TextureUsage::None, "The texture usage must not be 0.");
+
+    constexpr wgpu::TextureUsage kValidCompressedUsages = wgpu::TextureUsage::TextureBinding |
+                                                          wgpu::TextureUsage::CopySrc |
+                                                          wgpu::TextureUsage::CopyDst;
+    DAWN_INVALID_IF(
+        format->isCompressed && !IsSubset(usage, kValidCompressedUsages),
+        "The texture usage (%s) is incompatible with the compressed texture format (%s).", usage,
+        format->format);
+
+    DAWN_INVALID_IF(
+        !format->isRenderable && (usage & wgpu::TextureUsage::RenderAttachment),
+        "The texture usage (%s) includes %s, which is incompatible with the non-renderable "
+        "format (%s).",
+        usage, wgpu::TextureUsage::RenderAttachment, format->format);
+
+    DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D &&
+                        (usage & wgpu::TextureUsage::RenderAttachment),
+                    "The texture usage (%s) includes %s, which is incompatible with the texture "
+                    "dimension (%s).",
+                    usage, wgpu::TextureUsage::RenderAttachment, descriptor->dimension);
+
+    DAWN_INVALID_IF(
+        !format->supportsStorageUsage && (usage & wgpu::TextureUsage::StorageBinding),
+        "The texture usage (%s) includes %s, which is incompatible with the format (%s).", usage,
+        wgpu::TextureUsage::StorageBinding, format->format);
+
+    // Only allows simple readonly texture usages.
+    constexpr wgpu::TextureUsage kValidMultiPlanarUsages =
+        wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc;
+    DAWN_INVALID_IF(format->IsMultiPlanar() && !IsSubset(usage, kValidMultiPlanarUsages),
+                    "The texture usage (%s) is incompatible with the multi-planar format (%s).",
+                    usage, format->format);
+
+    return {};
+}
+
+}  // anonymous namespace
+
+MaybeError ValidateTextureDescriptor(const DeviceBase* device,
+                                     const TextureDescriptor* descriptor) {
+    DAWN_TRY(ValidateSingleSType(descriptor->nextInChain,
+                                 wgpu::SType::DawnTextureInternalUsageDescriptor));
+
+    const DawnTextureInternalUsageDescriptor* internalUsageDesc = nullptr;
+    FindInChain(descriptor->nextInChain, &internalUsageDesc);
+
+    DAWN_INVALID_IF(
+        internalUsageDesc != nullptr && !device->IsFeatureEnabled(Feature::DawnInternalUsages),
+        "The dawn-internal-usages feature is not enabled");
+
+    const Format* format;
+    DAWN_TRY_ASSIGN(format, device->GetInternalFormat(descriptor->format));
+
+    for (uint32_t i = 0; i < descriptor->viewFormatCount; ++i) {
+        DAWN_TRY_CONTEXT(
+            ValidateTextureViewFormatCompatibility(device, *format, descriptor->viewFormats[i]),
+            "validating viewFormats[%u]", i);
+    }
+
+    wgpu::TextureUsage usage = descriptor->usage;
+    if (internalUsageDesc != nullptr) {
+        usage |= internalUsageDesc->internalUsage;
+    }
+
+    DAWN_TRY(ValidateTextureUsage(descriptor, usage, format));
+    DAWN_TRY(ValidateTextureDimension(descriptor->dimension));
+    DAWN_TRY(ValidateSampleCount(descriptor, usage, format));
+
+    DAWN_INVALID_IF(descriptor->size.width == 0 || descriptor->size.height == 0 ||
+                        descriptor->size.depthOrArrayLayers == 0 || descriptor->mipLevelCount == 0,
+                    "The texture size (%s) or mipLevelCount (%u) is empty.", &descriptor->size,
+                    descriptor->mipLevelCount);
+
+    DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D && format->isCompressed,
+                    "The dimension (%s) of a texture with a compressed format (%s) is not 2D.",
+                    descriptor->dimension, format->format);
+
+    // Depth/stencil formats are valid for 2D textures only. Metal has this limit. And D3D12
+    // doesn't support depth/stencil formats on 3D textures.
+    DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D &&
+                        (format->aspects & (Aspect::Depth | Aspect::Stencil)) != 0,
+                    "The dimension (%s) of a texture with a depth/stencil format (%s) is not 2D.",
+                    descriptor->dimension, format->format);
+
+    DAWN_TRY(ValidateTextureSize(device, descriptor, format));
+
+    // TODO(crbug.com/dawn/838): Implement a workaround for this issue.
+    // Readbacks from the non-zero mip of a stencil texture may contain garbage data.
+    DAWN_INVALID_IF(device->IsToggleEnabled(Toggle::DisallowUnsafeAPIs) && format->HasStencil() &&
+                        descriptor->mipLevelCount > 1 &&
+                        device->GetAdapter()->GetBackendType() == wgpu::BackendType::Metal,
+                    "https://crbug.com/dawn/838: Stencil textures with more than one mip level are "
+                    "disabled on Metal.");
+
+    return {};
+}
+
+MaybeError ValidateTextureViewDescriptor(const DeviceBase* device,
+                                         const TextureBase* texture,
+                                         const TextureViewDescriptor* descriptor) {
+    DAWN_INVALID_IF(descriptor->nextInChain != nullptr, "nextInChain must be nullptr.");
+
+    // Parent texture should have been already validated.
+    ASSERT(texture);
+    ASSERT(!texture->IsError());
+
+    DAWN_TRY(ValidateTextureViewDimension(descriptor->dimension));
+    DAWN_TRY(ValidateTextureFormat(descriptor->format));
+    DAWN_TRY(ValidateTextureAspect(descriptor->aspect));
+
+    const Format& format = texture->GetFormat();
+    const Format* viewFormat;
+    DAWN_TRY_ASSIGN(viewFormat, device->GetInternalFormat(descriptor->format));
+
+    DAWN_INVALID_IF(SelectFormatAspects(format, descriptor->aspect) == Aspect::None,
+                    "Texture format (%s) does not have the texture view's selected aspect (%s).",
+                    format.format, descriptor->aspect);
+
+    DAWN_INVALID_IF(descriptor->arrayLayerCount == 0 || descriptor->mipLevelCount == 0,
+                    "The texture view's arrayLayerCount (%u) or mipLevelCount (%u) is zero.",
+                    descriptor->arrayLayerCount, descriptor->mipLevelCount);
+
+    DAWN_INVALID_IF(
+        uint64_t(descriptor->baseArrayLayer) + uint64_t(descriptor->arrayLayerCount) >
+            uint64_t(texture->GetArrayLayers()),
+        "Texture view array layer range (baseArrayLayer: %u, arrayLayerCount: %u) exceeds the "
+        "texture's array layer count (%u).",
+        descriptor->baseArrayLayer, descriptor->arrayLayerCount, texture->GetArrayLayers());
+
+    DAWN_INVALID_IF(
+        uint64_t(descriptor->baseMipLevel) + uint64_t(descriptor->mipLevelCount) >
+            uint64_t(texture->GetNumMipLevels()),
+        "Texture view mip level range (baseMipLevel: %u, mipLevelCount: %u) exceeds the "
+        "texture's mip level count (%u).",
+        descriptor->baseMipLevel, descriptor->mipLevelCount, texture->GetNumMipLevels());
+
+    DAWN_TRY(ValidateCanViewTextureAs(device, texture, *viewFormat, descriptor->aspect));
+    DAWN_TRY(ValidateTextureViewDimensionCompatibility(texture, descriptor));
+
+    return {};
+}
+
+ResultOrError<TextureViewDescriptor> GetTextureViewDescriptorWithDefaults(
+    const TextureBase* texture,
+    const TextureViewDescriptor* descriptor) {
+    ASSERT(texture);
+
+    TextureViewDescriptor desc = {};
+    if (descriptor) {
+        desc = *descriptor;
+    }
+
+    // The default value for the view dimension depends on the texture's dimension with a
+    // special case for 2DArray being chosen automatically if arrayLayerCount is unspecified.
+    if (desc.dimension == wgpu::TextureViewDimension::Undefined) {
+        switch (texture->GetDimension()) {
+            case wgpu::TextureDimension::e1D:
+                desc.dimension = wgpu::TextureViewDimension::e1D;
+                break;
+
+            case wgpu::TextureDimension::e2D:
+                desc.dimension = wgpu::TextureViewDimension::e2D;
+                break;
+
+            case wgpu::TextureDimension::e3D:
+                desc.dimension = wgpu::TextureViewDimension::e3D;
+                break;
+        }
+    }
+
+    if (desc.format == wgpu::TextureFormat::Undefined) {
         const Format& format = texture->GetFormat();
-        const Format* viewFormat;
-        DAWN_TRY_ASSIGN(viewFormat, device->GetInternalFormat(descriptor->format));
 
-        DAWN_INVALID_IF(
-            SelectFormatAspects(format, descriptor->aspect) == Aspect::None,
-            "Texture format (%s) does not have the texture view's selected aspect (%s).",
-            format.format, descriptor->aspect);
+        // Check the aspect since |SelectFormatAspects| assumes a valid aspect.
+        // Creation would have failed validation later since the aspect is invalid.
+        DAWN_TRY(ValidateTextureAspect(desc.aspect));
 
-        DAWN_INVALID_IF(descriptor->arrayLayerCount == 0 || descriptor->mipLevelCount == 0,
-                        "The texture view's arrayLayerCount (%u) or mipLevelCount (%u) is zero.",
-                        descriptor->arrayLayerCount, descriptor->mipLevelCount);
-
-        DAWN_INVALID_IF(
-            uint64_t(descriptor->baseArrayLayer) + uint64_t(descriptor->arrayLayerCount) >
-                uint64_t(texture->GetArrayLayers()),
-            "Texture view array layer range (baseArrayLayer: %u, arrayLayerCount: %u) exceeds the "
-            "texture's array layer count (%u).",
-            descriptor->baseArrayLayer, descriptor->arrayLayerCount, texture->GetArrayLayers());
-
-        DAWN_INVALID_IF(
-            uint64_t(descriptor->baseMipLevel) + uint64_t(descriptor->mipLevelCount) >
-                uint64_t(texture->GetNumMipLevels()),
-            "Texture view mip level range (baseMipLevel: %u, mipLevelCount: %u) exceeds the "
-            "texture's mip level count (%u).",
-            descriptor->baseMipLevel, descriptor->mipLevelCount, texture->GetNumMipLevels());
-
-        DAWN_TRY(ValidateCanViewTextureAs(device, texture, *viewFormat, descriptor->aspect));
-        DAWN_TRY(ValidateTextureViewDimensionCompatibility(texture, descriptor));
-
-        return {};
+        Aspect aspects = SelectFormatAspects(format, desc.aspect);
+        if (HasOneBit(aspects)) {
+            desc.format = format.GetAspectInfo(aspects).format;
+        } else {
+            desc.format = format.format;
+        }
     }
-
-    ResultOrError<TextureViewDescriptor> GetTextureViewDescriptorWithDefaults(
-        const TextureBase* texture,
-        const TextureViewDescriptor* descriptor) {
-        ASSERT(texture);
-
-        TextureViewDescriptor desc = {};
-        if (descriptor) {
-            desc = *descriptor;
-        }
-
-        // The default value for the view dimension depends on the texture's dimension with a
-        // special case for 2DArray being chosen automatically if arrayLayerCount is unspecified.
-        if (desc.dimension == wgpu::TextureViewDimension::Undefined) {
-            switch (texture->GetDimension()) {
-                case wgpu::TextureDimension::e1D:
-                    desc.dimension = wgpu::TextureViewDimension::e1D;
-                    break;
-
-                case wgpu::TextureDimension::e2D:
-                    desc.dimension = wgpu::TextureViewDimension::e2D;
-                    break;
-
-                case wgpu::TextureDimension::e3D:
-                    desc.dimension = wgpu::TextureViewDimension::e3D;
-                    break;
-            }
-        }
-
-        if (desc.format == wgpu::TextureFormat::Undefined) {
-            const Format& format = texture->GetFormat();
-
-            // Check the aspect since |SelectFormatAspects| assumes a valid aspect.
-            // Creation would have failed validation later since the aspect is invalid.
-            DAWN_TRY(ValidateTextureAspect(desc.aspect));
-
-            Aspect aspects = SelectFormatAspects(format, desc.aspect);
-            if (HasOneBit(aspects)) {
-                desc.format = format.GetAspectInfo(aspects).format;
-            } else {
-                desc.format = format.format;
-            }
-        }
-        if (desc.arrayLayerCount == wgpu::kArrayLayerCountUndefined) {
-            switch (desc.dimension) {
-                case wgpu::TextureViewDimension::e1D:
-                case wgpu::TextureViewDimension::e2D:
-                case wgpu::TextureViewDimension::e3D:
-                    desc.arrayLayerCount = 1;
-                    break;
-                case wgpu::TextureViewDimension::Cube:
-                    desc.arrayLayerCount = 6;
-                    break;
-                case wgpu::TextureViewDimension::e2DArray:
-                case wgpu::TextureViewDimension::CubeArray:
-                    desc.arrayLayerCount = texture->GetArrayLayers() - desc.baseArrayLayer;
-                    break;
-                default:
-                    // We don't put UNREACHABLE() here because we validate enums only after this
-                    // function sets default values. Otherwise, the UNREACHABLE() will be hit.
-                    break;
-            }
-        }
-
-        if (desc.mipLevelCount == wgpu::kMipLevelCountUndefined) {
-            desc.mipLevelCount = texture->GetNumMipLevels() - desc.baseMipLevel;
-        }
-        return desc;
-    }
-
-    // WebGPU only supports sample counts of 1 and 4. We could expand to more based on
-    // platform support, but it would probably be a feature.
-    bool IsValidSampleCount(uint32_t sampleCount) {
-        switch (sampleCount) {
-            case 1:
-            case 4:
-                return true;
-
+    if (desc.arrayLayerCount == wgpu::kArrayLayerCountUndefined) {
+        switch (desc.dimension) {
+            case wgpu::TextureViewDimension::e1D:
+            case wgpu::TextureViewDimension::e2D:
+            case wgpu::TextureViewDimension::e3D:
+                desc.arrayLayerCount = 1;
+                break;
+            case wgpu::TextureViewDimension::Cube:
+                desc.arrayLayerCount = 6;
+                break;
+            case wgpu::TextureViewDimension::e2DArray:
+            case wgpu::TextureViewDimension::CubeArray:
+                desc.arrayLayerCount = texture->GetArrayLayers() - desc.baseArrayLayer;
+                break;
             default:
-                return false;
+                // We don't put UNREACHABLE() here because we validate enums only after this
+                // function sets default values. Otherwise, the UNREACHABLE() will be hit.
+                break;
         }
     }
 
-    // TextureBase
+    if (desc.mipLevelCount == wgpu::kMipLevelCountUndefined) {
+        desc.mipLevelCount = texture->GetNumMipLevels() - desc.baseMipLevel;
+    }
+    return desc;
+}
 
-    TextureBase::TextureBase(DeviceBase* device,
-                             const TextureDescriptor* descriptor,
-                             TextureState state)
-        : ApiObjectBase(device, descriptor->label),
-          mDimension(descriptor->dimension),
-          mFormat(device->GetValidInternalFormat(descriptor->format)),
-          mSize(descriptor->size),
-          mMipLevelCount(descriptor->mipLevelCount),
-          mSampleCount(descriptor->sampleCount),
-          mUsage(descriptor->usage),
-          mInternalUsage(mUsage),
-          mState(state) {
-        uint32_t subresourceCount =
-            mMipLevelCount * GetArrayLayers() * GetAspectCount(mFormat.aspects);
-        mIsSubresourceContentInitializedAtIndex = std::vector<bool>(subresourceCount, false);
+// WebGPU only supports sample counts of 1 and 4. We could expand to more based on
+// platform support, but it would probably be a feature.
+bool IsValidSampleCount(uint32_t sampleCount) {
+    switch (sampleCount) {
+        case 1:
+        case 4:
+            return true;
 
-        for (uint32_t i = 0; i < descriptor->viewFormatCount; ++i) {
-            if (descriptor->viewFormats[i] == descriptor->format) {
-                // Skip our own format, so the backends don't allocate the texture for
-                // reinterpretation if it's not needed.
-                continue;
-            }
-            mViewFormats[device->GetValidInternalFormat(descriptor->viewFormats[i])] = true;
+        default:
+            return false;
+    }
+}
+
+// TextureBase
+
+TextureBase::TextureBase(DeviceBase* device,
+                         const TextureDescriptor* descriptor,
+                         TextureState state)
+    : ApiObjectBase(device, descriptor->label),
+      mDimension(descriptor->dimension),
+      mFormat(device->GetValidInternalFormat(descriptor->format)),
+      mSize(descriptor->size),
+      mMipLevelCount(descriptor->mipLevelCount),
+      mSampleCount(descriptor->sampleCount),
+      mUsage(descriptor->usage),
+      mInternalUsage(mUsage),
+      mState(state) {
+    uint32_t subresourceCount = mMipLevelCount * GetArrayLayers() * GetAspectCount(mFormat.aspects);
+    mIsSubresourceContentInitializedAtIndex = std::vector<bool>(subresourceCount, false);
+
+    for (uint32_t i = 0; i < descriptor->viewFormatCount; ++i) {
+        if (descriptor->viewFormats[i] == descriptor->format) {
+            // Skip our own format, so the backends don't allocate the texture for
+            // reinterpretation if it's not needed.
+            continue;
         }
-
-        const DawnTextureInternalUsageDescriptor* internalUsageDesc = nullptr;
-        FindInChain(descriptor->nextInChain, &internalUsageDesc);
-        if (internalUsageDesc != nullptr) {
-            mInternalUsage |= internalUsageDesc->internalUsage;
-        }
-        TrackInDevice();
+        mViewFormats[device->GetValidInternalFormat(descriptor->viewFormats[i])] = true;
     }
 
-    static Format kUnusedFormat;
+    const DawnTextureInternalUsageDescriptor* internalUsageDesc = nullptr;
+    FindInChain(descriptor->nextInChain, &internalUsageDesc);
+    if (internalUsageDesc != nullptr) {
+        mInternalUsage |= internalUsageDesc->internalUsage;
+    }
+    TrackInDevice();
+}
 
-    TextureBase::TextureBase(DeviceBase* device, TextureState state)
-        : ApiObjectBase(device, kLabelNotImplemented), mFormat(kUnusedFormat), mState(state) {
-        TrackInDevice();
-    }
+static Format kUnusedFormat;
 
-    TextureBase::TextureBase(DeviceBase* device, ObjectBase::ErrorTag tag)
-        : ApiObjectBase(device, tag), mFormat(kUnusedFormat) {
-    }
+TextureBase::TextureBase(DeviceBase* device, TextureState state)
+    : ApiObjectBase(device, kLabelNotImplemented), mFormat(kUnusedFormat), mState(state) {
+    TrackInDevice();
+}
 
-    void TextureBase::DestroyImpl() {
-        mState = TextureState::Destroyed;
-    }
+TextureBase::TextureBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+    : ApiObjectBase(device, tag), mFormat(kUnusedFormat) {}
 
-    // static
-    TextureBase* TextureBase::MakeError(DeviceBase* device) {
-        return new TextureBase(device, ObjectBase::kError);
-    }
+void TextureBase::DestroyImpl() {
+    mState = TextureState::Destroyed;
+}
 
-    ObjectType TextureBase::GetType() const {
-        return ObjectType::Texture;
-    }
+// static
+TextureBase* TextureBase::MakeError(DeviceBase* device) {
+    return new TextureBase(device, ObjectBase::kError);
+}
 
-    wgpu::TextureDimension TextureBase::GetDimension() const {
-        ASSERT(!IsError());
-        return mDimension;
-    }
+ObjectType TextureBase::GetType() const {
+    return ObjectType::Texture;
+}
 
-    const Format& TextureBase::GetFormat() const {
-        ASSERT(!IsError());
-        return mFormat;
-    }
-    const FormatSet& TextureBase::GetViewFormats() const {
-        ASSERT(!IsError());
-        return mViewFormats;
-    }
-    const Extent3D& TextureBase::GetSize() const {
-        ASSERT(!IsError());
-        return mSize;
-    }
-    uint32_t TextureBase::GetWidth() const {
-        ASSERT(!IsError());
-        return mSize.width;
-    }
-    uint32_t TextureBase::GetHeight() const {
-        ASSERT(!IsError());
-        return mSize.height;
-    }
-    uint32_t TextureBase::GetDepth() const {
-        ASSERT(!IsError());
-        ASSERT(mDimension == wgpu::TextureDimension::e3D);
-        return mSize.depthOrArrayLayers;
-    }
-    uint32_t TextureBase::GetArrayLayers() const {
-        ASSERT(!IsError());
-        if (mDimension == wgpu::TextureDimension::e3D) {
-            return 1;
-        }
-        return mSize.depthOrArrayLayers;
-    }
-    uint32_t TextureBase::GetNumMipLevels() const {
-        ASSERT(!IsError());
-        return mMipLevelCount;
-    }
-    SubresourceRange TextureBase::GetAllSubresources() const {
-        ASSERT(!IsError());
-        return {mFormat.aspects, {0, GetArrayLayers()}, {0, mMipLevelCount}};
-    }
-    uint32_t TextureBase::GetSampleCount() const {
-        ASSERT(!IsError());
-        return mSampleCount;
-    }
-    uint32_t TextureBase::GetSubresourceCount() const {
-        ASSERT(!IsError());
-        return static_cast<uint32_t>(mIsSubresourceContentInitializedAtIndex.size());
-    }
-    wgpu::TextureUsage TextureBase::GetUsage() const {
-        ASSERT(!IsError());
-        return mUsage;
-    }
-    wgpu::TextureUsage TextureBase::GetInternalUsage() const {
-        ASSERT(!IsError());
-        return mInternalUsage;
-    }
+wgpu::TextureDimension TextureBase::GetDimension() const {
+    ASSERT(!IsError());
+    return mDimension;
+}
 
-    TextureBase::TextureState TextureBase::GetTextureState() const {
-        ASSERT(!IsError());
-        return mState;
+const Format& TextureBase::GetFormat() const {
+    ASSERT(!IsError());
+    return mFormat;
+}
+const FormatSet& TextureBase::GetViewFormats() const {
+    ASSERT(!IsError());
+    return mViewFormats;
+}
+const Extent3D& TextureBase::GetSize() const {
+    ASSERT(!IsError());
+    return mSize;
+}
+uint32_t TextureBase::GetWidth() const {
+    ASSERT(!IsError());
+    return mSize.width;
+}
+uint32_t TextureBase::GetHeight() const {
+    ASSERT(!IsError());
+    return mSize.height;
+}
+uint32_t TextureBase::GetDepth() const {
+    ASSERT(!IsError());
+    ASSERT(mDimension == wgpu::TextureDimension::e3D);
+    return mSize.depthOrArrayLayers;
+}
+uint32_t TextureBase::GetArrayLayers() const {
+    ASSERT(!IsError());
+    if (mDimension == wgpu::TextureDimension::e3D) {
+        return 1;
     }
+    return mSize.depthOrArrayLayers;
+}
+uint32_t TextureBase::GetNumMipLevels() const {
+    ASSERT(!IsError());
+    return mMipLevelCount;
+}
+SubresourceRange TextureBase::GetAllSubresources() const {
+    ASSERT(!IsError());
+    return {mFormat.aspects, {0, GetArrayLayers()}, {0, mMipLevelCount}};
+}
+uint32_t TextureBase::GetSampleCount() const {
+    ASSERT(!IsError());
+    return mSampleCount;
+}
+uint32_t TextureBase::GetSubresourceCount() const {
+    ASSERT(!IsError());
+    return static_cast<uint32_t>(mIsSubresourceContentInitializedAtIndex.size());
+}
+wgpu::TextureUsage TextureBase::GetUsage() const {
+    ASSERT(!IsError());
+    return mUsage;
+}
+wgpu::TextureUsage TextureBase::GetInternalUsage() const {
+    ASSERT(!IsError());
+    return mInternalUsage;
+}
 
-    uint32_t TextureBase::GetSubresourceIndex(uint32_t mipLevel,
-                                              uint32_t arraySlice,
-                                              Aspect aspect) const {
-        ASSERT(HasOneBit(aspect));
-        return mipLevel +
-               GetNumMipLevels() * (arraySlice + GetArrayLayers() * GetAspectIndex(aspect));
-    }
+TextureBase::TextureState TextureBase::GetTextureState() const {
+    ASSERT(!IsError());
+    return mState;
+}
 
-    bool TextureBase::IsSubresourceContentInitialized(const SubresourceRange& range) const {
-        ASSERT(!IsError());
-        for (Aspect aspect : IterateEnumMask(range.aspects)) {
-            for (uint32_t arrayLayer = range.baseArrayLayer;
-                 arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
-                for (uint32_t mipLevel = range.baseMipLevel;
-                     mipLevel < range.baseMipLevel + range.levelCount; ++mipLevel) {
-                    uint32_t subresourceIndex = GetSubresourceIndex(mipLevel, arrayLayer, aspect);
-                    ASSERT(subresourceIndex < mIsSubresourceContentInitializedAtIndex.size());
-                    if (!mIsSubresourceContentInitializedAtIndex[subresourceIndex]) {
-                        return false;
-                    }
-                }
-            }
-        }
-        return true;
-    }
+uint32_t TextureBase::GetSubresourceIndex(uint32_t mipLevel,
+                                          uint32_t arraySlice,
+                                          Aspect aspect) const {
+    ASSERT(HasOneBit(aspect));
+    return mipLevel + GetNumMipLevels() * (arraySlice + GetArrayLayers() * GetAspectIndex(aspect));
+}
 
-    void TextureBase::SetIsSubresourceContentInitialized(bool isInitialized,
-                                                         const SubresourceRange& range) {
-        ASSERT(!IsError());
-        for (Aspect aspect : IterateEnumMask(range.aspects)) {
-            for (uint32_t arrayLayer = range.baseArrayLayer;
-                 arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
-                for (uint32_t mipLevel = range.baseMipLevel;
-                     mipLevel < range.baseMipLevel + range.levelCount; ++mipLevel) {
-                    uint32_t subresourceIndex = GetSubresourceIndex(mipLevel, arrayLayer, aspect);
-                    ASSERT(subresourceIndex < mIsSubresourceContentInitializedAtIndex.size());
-                    mIsSubresourceContentInitializedAtIndex[subresourceIndex] = isInitialized;
+bool TextureBase::IsSubresourceContentInitialized(const SubresourceRange& range) const {
+    ASSERT(!IsError());
+    for (Aspect aspect : IterateEnumMask(range.aspects)) {
+        for (uint32_t arrayLayer = range.baseArrayLayer;
+             arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
+            for (uint32_t mipLevel = range.baseMipLevel;
+                 mipLevel < range.baseMipLevel + range.levelCount; ++mipLevel) {
+                uint32_t subresourceIndex = GetSubresourceIndex(mipLevel, arrayLayer, aspect);
+                ASSERT(subresourceIndex < mIsSubresourceContentInitializedAtIndex.size());
+                if (!mIsSubresourceContentInitializedAtIndex[subresourceIndex]) {
+                    return false;
                 }
             }
         }
     }
+    return true;
+}
 
-    MaybeError TextureBase::ValidateCanUseInSubmitNow() const {
-        ASSERT(!IsError());
-        DAWN_INVALID_IF(mState == TextureState::Destroyed, "Destroyed texture %s used in a submit.",
-                        this);
-        return {};
-    }
-
-    bool TextureBase::IsMultisampledTexture() const {
-        ASSERT(!IsError());
-        return mSampleCount > 1;
-    }
-
-    Extent3D TextureBase::GetMipLevelVirtualSize(uint32_t level) const {
-        Extent3D extent = {std::max(mSize.width >> level, 1u), 1u, 1u};
-        if (mDimension == wgpu::TextureDimension::e1D) {
-            return extent;
+void TextureBase::SetIsSubresourceContentInitialized(bool isInitialized,
+                                                     const SubresourceRange& range) {
+    ASSERT(!IsError());
+    for (Aspect aspect : IterateEnumMask(range.aspects)) {
+        for (uint32_t arrayLayer = range.baseArrayLayer;
+             arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
+            for (uint32_t mipLevel = range.baseMipLevel;
+                 mipLevel < range.baseMipLevel + range.levelCount; ++mipLevel) {
+                uint32_t subresourceIndex = GetSubresourceIndex(mipLevel, arrayLayer, aspect);
+                ASSERT(subresourceIndex < mIsSubresourceContentInitializedAtIndex.size());
+                mIsSubresourceContentInitializedAtIndex[subresourceIndex] = isInitialized;
+            }
         }
+    }
+}
 
-        extent.height = std::max(mSize.height >> level, 1u);
-        if (mDimension == wgpu::TextureDimension::e2D) {
-            return extent;
-        }
+MaybeError TextureBase::ValidateCanUseInSubmitNow() const {
+    ASSERT(!IsError());
+    DAWN_INVALID_IF(mState == TextureState::Destroyed, "Destroyed texture %s used in a submit.",
+                    this);
+    return {};
+}
 
-        extent.depthOrArrayLayers = std::max(mSize.depthOrArrayLayers >> level, 1u);
+bool TextureBase::IsMultisampledTexture() const {
+    ASSERT(!IsError());
+    return mSampleCount > 1;
+}
+
+Extent3D TextureBase::GetMipLevelVirtualSize(uint32_t level) const {
+    Extent3D extent = {std::max(mSize.width >> level, 1u), 1u, 1u};
+    if (mDimension == wgpu::TextureDimension::e1D) {
         return extent;
     }
 
-    Extent3D TextureBase::GetMipLevelPhysicalSize(uint32_t level) const {
-        Extent3D extent = GetMipLevelVirtualSize(level);
-
-        // Compressed Textures will have paddings if their width or height is not a multiple of
-        // 4 at non-zero mipmap levels.
-        if (mFormat.isCompressed && level != 0) {
-            // If |level| is non-zero, then each dimension of |extent| is at most half of
-            // the max texture dimension. Computations here which add the block width/height
-            // to the extent cannot overflow.
-            const TexelBlockInfo& blockInfo = mFormat.GetAspectInfo(wgpu::TextureAspect::All).block;
-            extent.width = (extent.width + blockInfo.width - 1) / blockInfo.width * blockInfo.width;
-            extent.height =
-                (extent.height + blockInfo.height - 1) / blockInfo.height * blockInfo.height;
-        }
-
+    extent.height = std::max(mSize.height >> level, 1u);
+    if (mDimension == wgpu::TextureDimension::e2D) {
         return extent;
     }
 
-    Extent3D TextureBase::ClampToMipLevelVirtualSize(uint32_t level,
-                                                     const Origin3D& origin,
-                                                     const Extent3D& extent) const {
-        const Extent3D virtualSizeAtLevel = GetMipLevelVirtualSize(level);
-        ASSERT(origin.x <= virtualSizeAtLevel.width);
-        ASSERT(origin.y <= virtualSizeAtLevel.height);
-        uint32_t clampedCopyExtentWidth = (extent.width > virtualSizeAtLevel.width - origin.x)
-                                              ? (virtualSizeAtLevel.width - origin.x)
-                                              : extent.width;
-        uint32_t clampedCopyExtentHeight = (extent.height > virtualSizeAtLevel.height - origin.y)
-                                               ? (virtualSizeAtLevel.height - origin.y)
-                                               : extent.height;
-        return {clampedCopyExtentWidth, clampedCopyExtentHeight, extent.depthOrArrayLayers};
+    extent.depthOrArrayLayers = std::max(mSize.depthOrArrayLayers >> level, 1u);
+    return extent;
+}
+
+Extent3D TextureBase::GetMipLevelPhysicalSize(uint32_t level) const {
+    Extent3D extent = GetMipLevelVirtualSize(level);
+
+    // Compressed Textures will have paddings if their width or height is not a multiple of
+    // 4 at non-zero mipmap levels.
+    if (mFormat.isCompressed && level != 0) {
+        // If |level| is non-zero, then each dimension of |extent| is at most half of
+        // the max texture dimension. Computations here which add the block width/height
+        // to the extent cannot overflow.
+        const TexelBlockInfo& blockInfo = mFormat.GetAspectInfo(wgpu::TextureAspect::All).block;
+        extent.width = (extent.width + blockInfo.width - 1) / blockInfo.width * blockInfo.width;
+        extent.height =
+            (extent.height + blockInfo.height - 1) / blockInfo.height * blockInfo.height;
     }
 
-    ResultOrError<Ref<TextureViewBase>> TextureBase::CreateView(
-        const TextureViewDescriptor* descriptor) {
-        return GetDevice()->CreateTextureView(this, descriptor);
+    return extent;
+}
+
+Extent3D TextureBase::ClampToMipLevelVirtualSize(uint32_t level,
+                                                 const Origin3D& origin,
+                                                 const Extent3D& extent) const {
+    const Extent3D virtualSizeAtLevel = GetMipLevelVirtualSize(level);
+    ASSERT(origin.x <= virtualSizeAtLevel.width);
+    ASSERT(origin.y <= virtualSizeAtLevel.height);
+    uint32_t clampedCopyExtentWidth = (extent.width > virtualSizeAtLevel.width - origin.x)
+                                          ? (virtualSizeAtLevel.width - origin.x)
+                                          : extent.width;
+    uint32_t clampedCopyExtentHeight = (extent.height > virtualSizeAtLevel.height - origin.y)
+                                           ? (virtualSizeAtLevel.height - origin.y)
+                                           : extent.height;
+    return {clampedCopyExtentWidth, clampedCopyExtentHeight, extent.depthOrArrayLayers};
+}
+
+ResultOrError<Ref<TextureViewBase>> TextureBase::CreateView(
+    const TextureViewDescriptor* descriptor) {
+    return GetDevice()->CreateTextureView(this, descriptor);
+}
+
+TextureViewBase* TextureBase::APICreateView(const TextureViewDescriptor* descriptor) {
+    DeviceBase* device = GetDevice();
+
+    Ref<TextureViewBase> result;
+    if (device->ConsumedError(CreateView(descriptor), &result, "calling %s.CreateView(%s).", this,
+                              descriptor)) {
+        return TextureViewBase::MakeError(device);
     }
+    return result.Detach();
+}
 
-    TextureViewBase* TextureBase::APICreateView(const TextureViewDescriptor* descriptor) {
-        DeviceBase* device = GetDevice();
-
-        Ref<TextureViewBase> result;
-        if (device->ConsumedError(CreateView(descriptor), &result, "calling %s.CreateView(%s).",
-                                  this, descriptor)) {
-            return TextureViewBase::MakeError(device);
-        }
-        return result.Detach();
+void TextureBase::APIDestroy() {
+    if (GetDevice()->ConsumedError(ValidateDestroy(), "calling %s.Destroy().", this)) {
+        return;
     }
+    ASSERT(!IsError());
+    Destroy();
+}
 
-    void TextureBase::APIDestroy() {
-        if (GetDevice()->ConsumedError(ValidateDestroy(), "calling %s.Destroy().", this)) {
-            return;
-        }
-        ASSERT(!IsError());
-        Destroy();
-    }
+MaybeError TextureBase::ValidateDestroy() const {
+    DAWN_TRY(GetDevice()->ValidateObject(this));
+    return {};
+}
 
-    MaybeError TextureBase::ValidateDestroy() const {
-        DAWN_TRY(GetDevice()->ValidateObject(this));
-        return {};
-    }
+// TextureViewBase
 
-    // TextureViewBase
+TextureViewBase::TextureViewBase(TextureBase* texture, const TextureViewDescriptor* descriptor)
+    : ApiObjectBase(texture->GetDevice(), descriptor->label),
+      mTexture(texture),
+      mFormat(GetDevice()->GetValidInternalFormat(descriptor->format)),
+      mDimension(descriptor->dimension),
+      mRange({ConvertViewAspect(mFormat, descriptor->aspect),
+              {descriptor->baseArrayLayer, descriptor->arrayLayerCount},
+              {descriptor->baseMipLevel, descriptor->mipLevelCount}}) {
+    TrackInDevice();
+}
 
-    TextureViewBase::TextureViewBase(TextureBase* texture, const TextureViewDescriptor* descriptor)
-        : ApiObjectBase(texture->GetDevice(), descriptor->label),
-          mTexture(texture),
-          mFormat(GetDevice()->GetValidInternalFormat(descriptor->format)),
-          mDimension(descriptor->dimension),
-          mRange({ConvertViewAspect(mFormat, descriptor->aspect),
-                  {descriptor->baseArrayLayer, descriptor->arrayLayerCount},
-                  {descriptor->baseMipLevel, descriptor->mipLevelCount}}) {
-        TrackInDevice();
-    }
+TextureViewBase::TextureViewBase(TextureBase* texture)
+    : ApiObjectBase(texture->GetDevice(), kLabelNotImplemented),
+      mTexture(texture),
+      mFormat(kUnusedFormat) {
+    TrackInDevice();
+}
 
-    TextureViewBase::TextureViewBase(TextureBase* texture)
-        : ApiObjectBase(texture->GetDevice(), kLabelNotImplemented),
-          mTexture(texture),
-          mFormat(kUnusedFormat) {
-        TrackInDevice();
-    }
+TextureViewBase::TextureViewBase(DeviceBase* device, ObjectBase::ErrorTag tag)
+    : ApiObjectBase(device, tag), mFormat(kUnusedFormat) {}
 
-    TextureViewBase::TextureViewBase(DeviceBase* device, ObjectBase::ErrorTag tag)
-        : ApiObjectBase(device, tag), mFormat(kUnusedFormat) {
-    }
+void TextureViewBase::DestroyImpl() {}
 
-    void TextureViewBase::DestroyImpl() {
-    }
+// static
+TextureViewBase* TextureViewBase::MakeError(DeviceBase* device) {
+    return new TextureViewBase(device, ObjectBase::kError);
+}
 
-    // static
-    TextureViewBase* TextureViewBase::MakeError(DeviceBase* device) {
-        return new TextureViewBase(device, ObjectBase::kError);
-    }
+ObjectType TextureViewBase::GetType() const {
+    return ObjectType::TextureView;
+}
 
-    ObjectType TextureViewBase::GetType() const {
-        return ObjectType::TextureView;
-    }
+const TextureBase* TextureViewBase::GetTexture() const {
+    ASSERT(!IsError());
+    return mTexture.Get();
+}
 
-    const TextureBase* TextureViewBase::GetTexture() const {
-        ASSERT(!IsError());
-        return mTexture.Get();
-    }
+TextureBase* TextureViewBase::GetTexture() {
+    ASSERT(!IsError());
+    return mTexture.Get();
+}
 
-    TextureBase* TextureViewBase::GetTexture() {
-        ASSERT(!IsError());
-        return mTexture.Get();
-    }
+Aspect TextureViewBase::GetAspects() const {
+    ASSERT(!IsError());
+    return mRange.aspects;
+}
 
-    Aspect TextureViewBase::GetAspects() const {
-        ASSERT(!IsError());
-        return mRange.aspects;
-    }
+const Format& TextureViewBase::GetFormat() const {
+    ASSERT(!IsError());
+    return mFormat;
+}
 
-    const Format& TextureViewBase::GetFormat() const {
-        ASSERT(!IsError());
-        return mFormat;
-    }
+wgpu::TextureViewDimension TextureViewBase::GetDimension() const {
+    ASSERT(!IsError());
+    return mDimension;
+}
 
-    wgpu::TextureViewDimension TextureViewBase::GetDimension() const {
-        ASSERT(!IsError());
-        return mDimension;
-    }
+uint32_t TextureViewBase::GetBaseMipLevel() const {
+    ASSERT(!IsError());
+    return mRange.baseMipLevel;
+}
 
-    uint32_t TextureViewBase::GetBaseMipLevel() const {
-        ASSERT(!IsError());
-        return mRange.baseMipLevel;
-    }
+uint32_t TextureViewBase::GetLevelCount() const {
+    ASSERT(!IsError());
+    return mRange.levelCount;
+}
 
-    uint32_t TextureViewBase::GetLevelCount() const {
-        ASSERT(!IsError());
-        return mRange.levelCount;
-    }
+uint32_t TextureViewBase::GetBaseArrayLayer() const {
+    ASSERT(!IsError());
+    return mRange.baseArrayLayer;
+}
 
-    uint32_t TextureViewBase::GetBaseArrayLayer() const {
-        ASSERT(!IsError());
-        return mRange.baseArrayLayer;
-    }
+uint32_t TextureViewBase::GetLayerCount() const {
+    ASSERT(!IsError());
+    return mRange.layerCount;
+}
 
-    uint32_t TextureViewBase::GetLayerCount() const {
-        ASSERT(!IsError());
-        return mRange.layerCount;
-    }
-
-    const SubresourceRange& TextureViewBase::GetSubresourceRange() const {
-        ASSERT(!IsError());
-        return mRange;
-    }
+const SubresourceRange& TextureViewBase::GetSubresourceRange() const {
+    ASSERT(!IsError());
+    return mRange;
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/Texture.h b/src/dawn/native/Texture.h
index 5b6bba4..9cf5f53 100644
--- a/src/dawn/native/Texture.h
+++ b/src/dawn/native/Texture.h
@@ -29,134 +29,132 @@
 
 namespace dawn::native {
 
-    MaybeError ValidateTextureDescriptor(const DeviceBase* device,
-                                         const TextureDescriptor* descriptor);
-    MaybeError ValidateTextureViewDescriptor(const DeviceBase* device,
-                                             const TextureBase* texture,
-                                             const TextureViewDescriptor* descriptor);
-    ResultOrError<TextureViewDescriptor> GetTextureViewDescriptorWithDefaults(
-        const TextureBase* texture,
-        const TextureViewDescriptor* descriptor);
+MaybeError ValidateTextureDescriptor(const DeviceBase* device, const TextureDescriptor* descriptor);
+MaybeError ValidateTextureViewDescriptor(const DeviceBase* device,
+                                         const TextureBase* texture,
+                                         const TextureViewDescriptor* descriptor);
+ResultOrError<TextureViewDescriptor> GetTextureViewDescriptorWithDefaults(
+    const TextureBase* texture,
+    const TextureViewDescriptor* descriptor);
 
-    bool IsValidSampleCount(uint32_t sampleCount);
+bool IsValidSampleCount(uint32_t sampleCount);
 
-    static constexpr wgpu::TextureUsage kReadOnlyTextureUsages =
-        wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::TextureBinding |
-        kReadOnlyRenderAttachment;
+static constexpr wgpu::TextureUsage kReadOnlyTextureUsages =
+    wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::TextureBinding | kReadOnlyRenderAttachment;
 
-    class TextureBase : public ApiObjectBase {
-      public:
-        enum class TextureState { OwnedInternal, OwnedExternal, Destroyed };
-        enum class ClearValue { Zero, NonZero };
-        TextureBase(DeviceBase* device, const TextureDescriptor* descriptor, TextureState state);
+class TextureBase : public ApiObjectBase {
+  public:
+    enum class TextureState { OwnedInternal, OwnedExternal, Destroyed };
+    enum class ClearValue { Zero, NonZero };
+    TextureBase(DeviceBase* device, const TextureDescriptor* descriptor, TextureState state);
 
-        static TextureBase* MakeError(DeviceBase* device);
+    static TextureBase* MakeError(DeviceBase* device);
 
-        ObjectType GetType() const override;
+    ObjectType GetType() const override;
 
-        wgpu::TextureDimension GetDimension() const;
-        const Format& GetFormat() const;
-        const FormatSet& GetViewFormats() const;
-        const Extent3D& GetSize() const;
-        uint32_t GetWidth() const;
-        uint32_t GetHeight() const;
-        uint32_t GetDepth() const;
-        uint32_t GetArrayLayers() const;
-        uint32_t GetNumMipLevels() const;
-        SubresourceRange GetAllSubresources() const;
-        uint32_t GetSampleCount() const;
-        uint32_t GetSubresourceCount() const;
+    wgpu::TextureDimension GetDimension() const;
+    const Format& GetFormat() const;
+    const FormatSet& GetViewFormats() const;
+    const Extent3D& GetSize() const;
+    uint32_t GetWidth() const;
+    uint32_t GetHeight() const;
+    uint32_t GetDepth() const;
+    uint32_t GetArrayLayers() const;
+    uint32_t GetNumMipLevels() const;
+    SubresourceRange GetAllSubresources() const;
+    uint32_t GetSampleCount() const;
+    uint32_t GetSubresourceCount() const;
 
-        // |GetUsage| returns the usage with which the texture was created using the base WebGPU
-        // API. The dawn-internal-usages extension may add additional usages. |GetInternalUsage|
-        // returns the union of base usage and the usages added by the extension.
-        wgpu::TextureUsage GetUsage() const;
-        wgpu::TextureUsage GetInternalUsage() const;
+    // |GetUsage| returns the usage with which the texture was created using the base WebGPU
+    // API. The dawn-internal-usages extension may add additional usages. |GetInternalUsage|
+    // returns the union of base usage and the usages added by the extension.
+    wgpu::TextureUsage GetUsage() const;
+    wgpu::TextureUsage GetInternalUsage() const;
 
-        TextureState GetTextureState() const;
-        uint32_t GetSubresourceIndex(uint32_t mipLevel, uint32_t arraySlice, Aspect aspect) const;
-        bool IsSubresourceContentInitialized(const SubresourceRange& range) const;
-        void SetIsSubresourceContentInitialized(bool isInitialized, const SubresourceRange& range);
+    TextureState GetTextureState() const;
+    uint32_t GetSubresourceIndex(uint32_t mipLevel, uint32_t arraySlice, Aspect aspect) const;
+    bool IsSubresourceContentInitialized(const SubresourceRange& range) const;
+    void SetIsSubresourceContentInitialized(bool isInitialized, const SubresourceRange& range);
 
-        MaybeError ValidateCanUseInSubmitNow() const;
+    MaybeError ValidateCanUseInSubmitNow() const;
 
-        bool IsMultisampledTexture() const;
+    bool IsMultisampledTexture() const;
 
-        // For a texture with non-block-compressed texture format, its physical size is always equal
-        // to its virtual size. For a texture with block compressed texture format, the physical
-        // size is the one with paddings if necessary, which is always a multiple of the block size
-        // and used in texture copying. The virtual size is the one without paddings, which is not
-        // required to be a multiple of the block size and used in texture sampling.
-        Extent3D GetMipLevelPhysicalSize(uint32_t level) const;
-        Extent3D GetMipLevelVirtualSize(uint32_t level) const;
-        Extent3D ClampToMipLevelVirtualSize(uint32_t level,
-                                            const Origin3D& origin,
-                                            const Extent3D& extent) const;
+    // For a texture with non-block-compressed texture format, its physical size is always equal
+    // to its virtual size. For a texture with block compressed texture format, the physical
+    // size is the one with paddings if necessary, which is always a multiple of the block size
+    // and used in texture copying. The virtual size is the one without paddings, which is not
+    // required to be a multiple of the block size and used in texture sampling.
+    Extent3D GetMipLevelPhysicalSize(uint32_t level) const;
+    Extent3D GetMipLevelVirtualSize(uint32_t level) const;
+    Extent3D ClampToMipLevelVirtualSize(uint32_t level,
+                                        const Origin3D& origin,
+                                        const Extent3D& extent) const;
 
-        ResultOrError<Ref<TextureViewBase>> CreateView(
-            const TextureViewDescriptor* descriptor = nullptr);
+    ResultOrError<Ref<TextureViewBase>> CreateView(
+        const TextureViewDescriptor* descriptor = nullptr);
 
-        // Dawn API
-        TextureViewBase* APICreateView(const TextureViewDescriptor* descriptor = nullptr);
-        void APIDestroy();
+    // Dawn API
+    TextureViewBase* APICreateView(const TextureViewDescriptor* descriptor = nullptr);
+    void APIDestroy();
 
-      protected:
-        // Constructor used only for mocking and testing.
-        TextureBase(DeviceBase* device, TextureState state);
-        void DestroyImpl() override;
+  protected:
+    // Constructor used only for mocking and testing.
+    TextureBase(DeviceBase* device, TextureState state);
+    void DestroyImpl() override;
 
-      private:
-        TextureBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+  private:
+    TextureBase(DeviceBase* device, ObjectBase::ErrorTag tag);
 
-        MaybeError ValidateDestroy() const;
-        wgpu::TextureDimension mDimension;
-        const Format& mFormat;
-        FormatSet mViewFormats;
-        Extent3D mSize;
-        uint32_t mMipLevelCount;
-        uint32_t mSampleCount;
-        wgpu::TextureUsage mUsage = wgpu::TextureUsage::None;
-        wgpu::TextureUsage mInternalUsage = wgpu::TextureUsage::None;
-        TextureState mState;
+    MaybeError ValidateDestroy() const;
+    wgpu::TextureDimension mDimension;
+    const Format& mFormat;
+    FormatSet mViewFormats;
+    Extent3D mSize;
+    uint32_t mMipLevelCount;
+    uint32_t mSampleCount;
+    wgpu::TextureUsage mUsage = wgpu::TextureUsage::None;
+    wgpu::TextureUsage mInternalUsage = wgpu::TextureUsage::None;
+    TextureState mState;
 
-        // TODO(crbug.com/dawn/845): Use a more optimized data structure to save space
-        std::vector<bool> mIsSubresourceContentInitializedAtIndex;
-    };
+    // TODO(crbug.com/dawn/845): Use a more optimized data structure to save space
+    std::vector<bool> mIsSubresourceContentInitializedAtIndex;
+};
 
-    class TextureViewBase : public ApiObjectBase {
-      public:
-        TextureViewBase(TextureBase* texture, const TextureViewDescriptor* descriptor);
+class TextureViewBase : public ApiObjectBase {
+  public:
+    TextureViewBase(TextureBase* texture, const TextureViewDescriptor* descriptor);
 
-        static TextureViewBase* MakeError(DeviceBase* device);
+    static TextureViewBase* MakeError(DeviceBase* device);
 
-        ObjectType GetType() const override;
+    ObjectType GetType() const override;
 
-        const TextureBase* GetTexture() const;
-        TextureBase* GetTexture();
+    const TextureBase* GetTexture() const;
+    TextureBase* GetTexture();
 
-        Aspect GetAspects() const;
-        const Format& GetFormat() const;
-        wgpu::TextureViewDimension GetDimension() const;
-        uint32_t GetBaseMipLevel() const;
-        uint32_t GetLevelCount() const;
-        uint32_t GetBaseArrayLayer() const;
-        uint32_t GetLayerCount() const;
-        const SubresourceRange& GetSubresourceRange() const;
+    Aspect GetAspects() const;
+    const Format& GetFormat() const;
+    wgpu::TextureViewDimension GetDimension() const;
+    uint32_t GetBaseMipLevel() const;
+    uint32_t GetLevelCount() const;
+    uint32_t GetBaseArrayLayer() const;
+    uint32_t GetLayerCount() const;
+    const SubresourceRange& GetSubresourceRange() const;
 
-      protected:
-        // Constructor used only for mocking and testing.
-        explicit TextureViewBase(TextureBase* texture);
-        void DestroyImpl() override;
+  protected:
+    // Constructor used only for mocking and testing.
+    explicit TextureViewBase(TextureBase* texture);
+    void DestroyImpl() override;
 
-      private:
-        TextureViewBase(DeviceBase* device, ObjectBase::ErrorTag tag);
+  private:
+    TextureViewBase(DeviceBase* device, ObjectBase::ErrorTag tag);
 
-        Ref<TextureBase> mTexture;
+    Ref<TextureBase> mTexture;
 
-        const Format& mFormat;
-        wgpu::TextureViewDimension mDimension;
-        SubresourceRange mRange;
-    };
+    const Format& mFormat;
+    wgpu::TextureViewDimension mDimension;
+    SubresourceRange mRange;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/TintUtils.cpp b/src/dawn/native/TintUtils.cpp
index 12062de..ca4aea4 100644
--- a/src/dawn/native/TintUtils.cpp
+++ b/src/dawn/native/TintUtils.cpp
@@ -20,37 +20,37 @@
 
 namespace dawn::native {
 
-    namespace {
+namespace {
 
-        thread_local DeviceBase* tlDevice = nullptr;
+thread_local DeviceBase* tlDevice = nullptr;
 
-        void TintICEReporter(const tint::diag::List& diagnostics) {
-            if (tlDevice) {
-                tlDevice->HandleError(InternalErrorType::Validation, diagnostics.str().c_str());
-            }
-        }
-
-        bool InitializeTintErrorReporter() {
-            tint::SetInternalCompilerErrorReporter(&TintICEReporter);
-            return true;
-        }
-
-    }  // namespace
-
-    ScopedTintICEHandler::ScopedTintICEHandler(DeviceBase* device) {
-        // Call tint::SetInternalCompilerErrorReporter() the first time
-        // this constructor is called. Static initialization is
-        // guaranteed to be thread-safe, and only occur once.
-        static bool init_once_tint_error_reporter = InitializeTintErrorReporter();
-        (void)init_once_tint_error_reporter;
-
-        // Shouldn't have overlapping instances of this handler.
-        ASSERT(tlDevice == nullptr);
-        tlDevice = device;
+void TintICEReporter(const tint::diag::List& diagnostics) {
+    if (tlDevice) {
+        tlDevice->HandleError(InternalErrorType::Validation, diagnostics.str().c_str());
     }
+}
 
-    ScopedTintICEHandler::~ScopedTintICEHandler() {
-        tlDevice = nullptr;
-    }
+bool InitializeTintErrorReporter() {
+    tint::SetInternalCompilerErrorReporter(&TintICEReporter);
+    return true;
+}
+
+}  // namespace
+
+ScopedTintICEHandler::ScopedTintICEHandler(DeviceBase* device) {
+    // Call tint::SetInternalCompilerErrorReporter() the first time
+    // this constructor is called. Static initialization is
+    // guaranteed to be thread-safe, and only occur once.
+    static bool init_once_tint_error_reporter = InitializeTintErrorReporter();
+    (void)init_once_tint_error_reporter;
+
+    // Shouldn't have overlapping instances of this handler.
+    ASSERT(tlDevice == nullptr);
+    tlDevice = device;
+}
+
+ScopedTintICEHandler::~ScopedTintICEHandler() {
+    tlDevice = nullptr;
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/TintUtils.h b/src/dawn/native/TintUtils.h
index e7a8fa7..4a2df60 100644
--- a/src/dawn/native/TintUtils.h
+++ b/src/dawn/native/TintUtils.h
@@ -19,18 +19,18 @@
 
 namespace dawn::native {
 
-    class DeviceBase;
+class DeviceBase;
 
-    // Indicates that for the lifetime of this object tint internal compiler errors should be
-    // reported to the given device.
-    class ScopedTintICEHandler : public NonCopyable {
-      public:
-        explicit ScopedTintICEHandler(DeviceBase* device);
-        ~ScopedTintICEHandler();
+// Indicates that for the lifetime of this object tint internal compiler errors should be
+// reported to the given device.
+class ScopedTintICEHandler : public NonCopyable {
+  public:
+    explicit ScopedTintICEHandler(DeviceBase* device);
+    ~ScopedTintICEHandler();
 
-      private:
-        ScopedTintICEHandler(ScopedTintICEHandler&&) = delete;
-    };
+  private:
+    ScopedTintICEHandler(ScopedTintICEHandler&&) = delete;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/ToBackend.h b/src/dawn/native/ToBackend.h
index ef8f237..567305f 100644
--- a/src/dawn/native/ToBackend.h
+++ b/src/dawn/native/ToBackend.h
@@ -19,136 +19,133 @@
 
 namespace dawn::native {
 
-    // ToBackendTraits implements the mapping from base type to member type of BackendTraits
-    template <typename T, typename BackendTraits>
-    struct ToBackendTraits;
+// ToBackendTraits implements the mapping from base type to member type of BackendTraits
+template <typename T, typename BackendTraits>
+struct ToBackendTraits;
 
-    template <typename BackendTraits>
-    struct ToBackendTraits<AdapterBase, BackendTraits> {
-        using BackendType = typename BackendTraits::AdapterType;
-    };
+template <typename BackendTraits>
+struct ToBackendTraits<AdapterBase, BackendTraits> {
+    using BackendType = typename BackendTraits::AdapterType;
+};
 
-    template <typename BackendTraits>
-    struct ToBackendTraits<BindGroupBase, BackendTraits> {
-        using BackendType = typename BackendTraits::BindGroupType;
-    };
+template <typename BackendTraits>
+struct ToBackendTraits<BindGroupBase, BackendTraits> {
+    using BackendType = typename BackendTraits::BindGroupType;
+};
 
-    template <typename BackendTraits>
-    struct ToBackendTraits<BindGroupLayoutBase, BackendTraits> {
-        using BackendType = typename BackendTraits::BindGroupLayoutType;
-    };
+template <typename BackendTraits>
+struct ToBackendTraits<BindGroupLayoutBase, BackendTraits> {
+    using BackendType = typename BackendTraits::BindGroupLayoutType;
+};
 
-    template <typename BackendTraits>
-    struct ToBackendTraits<BufferBase, BackendTraits> {
-        using BackendType = typename BackendTraits::BufferType;
-    };
+template <typename BackendTraits>
+struct ToBackendTraits<BufferBase, BackendTraits> {
+    using BackendType = typename BackendTraits::BufferType;
+};
 
-    template <typename BackendTraits>
-    struct ToBackendTraits<CommandBufferBase, BackendTraits> {
-        using BackendType = typename BackendTraits::CommandBufferType;
-    };
+template <typename BackendTraits>
+struct ToBackendTraits<CommandBufferBase, BackendTraits> {
+    using BackendType = typename BackendTraits::CommandBufferType;
+};
 
-    template <typename BackendTraits>
-    struct ToBackendTraits<ComputePipelineBase, BackendTraits> {
-        using BackendType = typename BackendTraits::ComputePipelineType;
-    };
+template <typename BackendTraits>
+struct ToBackendTraits<ComputePipelineBase, BackendTraits> {
+    using BackendType = typename BackendTraits::ComputePipelineType;
+};
 
-    template <typename BackendTraits>
-    struct ToBackendTraits<DeviceBase, BackendTraits> {
-        using BackendType = typename BackendTraits::DeviceType;
-    };
+template <typename BackendTraits>
+struct ToBackendTraits<DeviceBase, BackendTraits> {
+    using BackendType = typename BackendTraits::DeviceType;
+};
 
-    template <typename BackendTraits>
-    struct ToBackendTraits<PipelineLayoutBase, BackendTraits> {
-        using BackendType = typename BackendTraits::PipelineLayoutType;
-    };
+template <typename BackendTraits>
+struct ToBackendTraits<PipelineLayoutBase, BackendTraits> {
+    using BackendType = typename BackendTraits::PipelineLayoutType;
+};
 
-    template <typename BackendTraits>
-    struct ToBackendTraits<QuerySetBase, BackendTraits> {
-        using BackendType = typename BackendTraits::QuerySetType;
-    };
+template <typename BackendTraits>
+struct ToBackendTraits<QuerySetBase, BackendTraits> {
+    using BackendType = typename BackendTraits::QuerySetType;
+};
 
-    template <typename BackendTraits>
-    struct ToBackendTraits<QueueBase, BackendTraits> {
-        using BackendType = typename BackendTraits::QueueType;
-    };
+template <typename BackendTraits>
+struct ToBackendTraits<QueueBase, BackendTraits> {
+    using BackendType = typename BackendTraits::QueueType;
+};
 
-    template <typename BackendTraits>
-    struct ToBackendTraits<RenderPipelineBase, BackendTraits> {
-        using BackendType = typename BackendTraits::RenderPipelineType;
-    };
+template <typename BackendTraits>
+struct ToBackendTraits<RenderPipelineBase, BackendTraits> {
+    using BackendType = typename BackendTraits::RenderPipelineType;
+};
 
-    template <typename BackendTraits>
-    struct ToBackendTraits<ResourceHeapBase, BackendTraits> {
-        using BackendType = typename BackendTraits::ResourceHeapType;
-    };
+template <typename BackendTraits>
+struct ToBackendTraits<ResourceHeapBase, BackendTraits> {
+    using BackendType = typename BackendTraits::ResourceHeapType;
+};
 
-    template <typename BackendTraits>
-    struct ToBackendTraits<SamplerBase, BackendTraits> {
-        using BackendType = typename BackendTraits::SamplerType;
-    };
+template <typename BackendTraits>
+struct ToBackendTraits<SamplerBase, BackendTraits> {
+    using BackendType = typename BackendTraits::SamplerType;
+};
 
-    template <typename BackendTraits>
-    struct ToBackendTraits<ShaderModuleBase, BackendTraits> {
-        using BackendType = typename BackendTraits::ShaderModuleType;
-    };
+template <typename BackendTraits>
+struct ToBackendTraits<ShaderModuleBase, BackendTraits> {
+    using BackendType = typename BackendTraits::ShaderModuleType;
+};
 
-    template <typename BackendTraits>
-    struct ToBackendTraits<StagingBufferBase, BackendTraits> {
-        using BackendType = typename BackendTraits::StagingBufferType;
-    };
+template <typename BackendTraits>
+struct ToBackendTraits<StagingBufferBase, BackendTraits> {
+    using BackendType = typename BackendTraits::StagingBufferType;
+};
 
-    template <typename BackendTraits>
-    struct ToBackendTraits<TextureBase, BackendTraits> {
-        using BackendType = typename BackendTraits::TextureType;
-    };
+template <typename BackendTraits>
+struct ToBackendTraits<TextureBase, BackendTraits> {
+    using BackendType = typename BackendTraits::TextureType;
+};
 
-    template <typename BackendTraits>
-    struct ToBackendTraits<SwapChainBase, BackendTraits> {
-        using BackendType = typename BackendTraits::SwapChainType;
-    };
+template <typename BackendTraits>
+struct ToBackendTraits<SwapChainBase, BackendTraits> {
+    using BackendType = typename BackendTraits::SwapChainType;
+};
 
-    template <typename BackendTraits>
-    struct ToBackendTraits<TextureViewBase, BackendTraits> {
-        using BackendType = typename BackendTraits::TextureViewType;
-    };
+template <typename BackendTraits>
+struct ToBackendTraits<TextureViewBase, BackendTraits> {
+    using BackendType = typename BackendTraits::TextureViewType;
+};
 
-    // ToBackendBase implements conversion to the given BackendTraits
-    // To use it in a backend, use the following:
-    //   template<typename T>
-    //   auto ToBackend(T&& common) -> decltype(ToBackendBase<MyBackendTraits>(common)) {
-    //       return ToBackendBase<MyBackendTraits>(common);
-    //   }
+// ToBackendBase implements conversion to the given BackendTraits
+// To use it in a backend, use the following:
+//   template<typename T>
+//   auto ToBackend(T&& common) -> decltype(ToBackendBase<MyBackendTraits>(common)) {
+//       return ToBackendBase<MyBackendTraits>(common);
+//   }
 
-    template <typename BackendTraits, typename T>
-    Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>& ToBackendBase(Ref<T>& common) {
-        return reinterpret_cast<Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&>(
-            common);
-    }
+template <typename BackendTraits, typename T>
+Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>& ToBackendBase(Ref<T>& common) {
+    return reinterpret_cast<Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&>(common);
+}
 
-    template <typename BackendTraits, typename T>
-    Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&& ToBackendBase(Ref<T>&& common) {
-        return reinterpret_cast<Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&&>(
-            common);
-    }
+template <typename BackendTraits, typename T>
+Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&& ToBackendBase(Ref<T>&& common) {
+    return reinterpret_cast<Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&&>(common);
+}
 
-    template <typename BackendTraits, typename T>
-    const Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>& ToBackendBase(
-        const Ref<T>& common) {
-        return reinterpret_cast<
-            const Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&>(common);
-    }
+template <typename BackendTraits, typename T>
+const Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>& ToBackendBase(
+    const Ref<T>& common) {
+    return reinterpret_cast<const Ref<typename ToBackendTraits<T, BackendTraits>::BackendType>&>(
+        common);
+}
 
-    template <typename BackendTraits, typename T>
-    typename ToBackendTraits<T, BackendTraits>::BackendType* ToBackendBase(T* common) {
-        return reinterpret_cast<typename ToBackendTraits<T, BackendTraits>::BackendType*>(common);
-    }
+template <typename BackendTraits, typename T>
+typename ToBackendTraits<T, BackendTraits>::BackendType* ToBackendBase(T* common) {
+    return reinterpret_cast<typename ToBackendTraits<T, BackendTraits>::BackendType*>(common);
+}
 
-    template <typename BackendTraits, typename T>
-    const typename ToBackendTraits<T, BackendTraits>::BackendType* ToBackendBase(const T* common) {
-        return reinterpret_cast<const typename ToBackendTraits<T, BackendTraits>::BackendType*>(
-            common);
-    }
+template <typename BackendTraits, typename T>
+const typename ToBackendTraits<T, BackendTraits>::BackendType* ToBackendBase(const T* common) {
+    return reinterpret_cast<const typename ToBackendTraits<T, BackendTraits>::BackendType*>(common);
+}
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/Toggles.cpp b/src/dawn/native/Toggles.cpp
index f9f39a7..b668a01 100644
--- a/src/dawn/native/Toggles.cpp
+++ b/src/dawn/native/Toggles.cpp
@@ -19,351 +19,347 @@
 #include "dawn/native/Toggles.h"
 
 namespace dawn::native {
-    namespace {
+namespace {
 
-        struct ToggleEnumAndInfo {
-            Toggle toggle;
-            ToggleInfo info;
-        };
+struct ToggleEnumAndInfo {
+    Toggle toggle;
+    ToggleInfo info;
+};
 
-        using ToggleEnumAndInfoList =
-            std::array<ToggleEnumAndInfo, static_cast<size_t>(Toggle::EnumCount)>;
+using ToggleEnumAndInfoList = std::array<ToggleEnumAndInfo, static_cast<size_t>(Toggle::EnumCount)>;
 
-        static constexpr ToggleEnumAndInfoList kToggleNameAndInfoList = {{
-            {Toggle::EmulateStoreAndMSAAResolve,
-             {"emulate_store_and_msaa_resolve",
-              "Emulate storing into multisampled color attachments and doing MSAA resolve "
-              "simultaneously. This workaround is enabled by default on the Metal drivers that do "
-              "not support MTLStoreActionStoreAndMultisampleResolve. To support StoreOp::Store on "
-              "those platforms, we should do MSAA resolve in another render pass after ending the "
-              "previous one.",
-              "https://crbug.com/dawn/56"}},
-            {Toggle::NonzeroClearResourcesOnCreationForTesting,
-             {"nonzero_clear_resources_on_creation_for_testing",
-              "Clears texture to full 1 bits as soon as they are created, but doesn't update "
-              "the tracking state of the texture. This way we can test the logic of clearing "
-              "textures that use recycled memory.",
-              "https://crbug.com/dawn/145"}},
-            {Toggle::AlwaysResolveIntoZeroLevelAndLayer,
-             {"always_resolve_into_zero_level_and_layer",
-              "When the resolve target is a texture view that is created on the non-zero level or "
-              "layer of a texture, we first resolve into a temporarily 2D texture with only one "
-              "mipmap level and one array layer, and copy the result of MSAA resolve into the "
-              "true resolve target. This workaround is enabled by default on the Metal drivers "
-              "that have bugs when setting non-zero resolveLevel or resolveSlice.",
-              "https://crbug.com/dawn/56"}},
-            {Toggle::LazyClearResourceOnFirstUse,
-             {"lazy_clear_resource_on_first_use",
-              "Clears resource to zero on first usage. This initializes the resource "
-              "so that no dirty bits from recycled memory is present in the new resource.",
-              "https://crbug.com/dawn/145"}},
-            {Toggle::TurnOffVsync,
-             {"turn_off_vsync",
-              "Turn off vsync when rendering. In order to do performance test or run perf tests, "
-              "turn off vsync so that the fps can exeed 60.",
-              "https://crbug.com/dawn/237"}},
-            {Toggle::UseTemporaryBufferInCompressedTextureToTextureCopy,
-             {"use_temporary_buffer_in_texture_to_texture_copy",
-              "Split texture-to-texture copy into two copies: copy from source texture into a "
-              "temporary buffer, and copy from the temporary buffer into the destination texture "
-              "when copying between compressed textures that don't have block-aligned sizes. This "
-              "workaround is enabled by default on all Vulkan drivers to solve an issue in the "
-              "Vulkan SPEC about the texture-to-texture copies with compressed formats. See #1005 "
-              "(https://github.com/KhronosGroup/Vulkan-Docs/issues/1005) for more details.",
-              "https://crbug.com/dawn/42"}},
-            {Toggle::UseD3D12ResourceHeapTier2,
-             {"use_d3d12_resource_heap_tier2",
-              "Enable support for resource heap tier 2. Resource heap tier 2 allows mixing of "
-              "texture and buffers in the same heap. This allows better heap re-use and reduces "
-              "fragmentation.",
-              "https://crbug.com/dawn/27"}},
-            {Toggle::UseD3D12RenderPass,
-             {"use_d3d12_render_pass",
-              "Use the D3D12 render pass API introduced in Windows build 1809 by default. On "
-              "versions of Windows prior to build 1809, or when this toggle is turned off, Dawn "
-              "will emulate a render pass.",
-              "https://crbug.com/dawn/36"}},
-            {Toggle::UseD3D12ResidencyManagement,
-             {"use_d3d12_residency_management",
-              "Enable residency management. This allows page-in and page-out of resource heaps in "
-              "GPU memory. This component improves overcommitted performance by keeping the most "
-              "recently used resources local to the GPU. Turning this component off can cause "
-              "allocation failures when application memory exceeds physical device memory.",
-              "https://crbug.com/dawn/193"}},
-            {Toggle::DisableResourceSuballocation,
-             {"disable_resource_suballocation",
-              "Force the backends to not perform resource suballocation. This may expose "
-              "allocation "
-              "patterns which would otherwise only occur with large or specific types of "
-              "resources.",
-              "https://crbug.com/1313172"}},
-            {Toggle::SkipValidation,
-             {"skip_validation", "Skip expensive validation of Dawn commands.",
-              "https://crbug.com/dawn/271"}},
-            {Toggle::VulkanUseD32S8,
-             {"vulkan_use_d32s8",
-              "Vulkan mandates support of either D32_FLOAT_S8 or D24_UNORM_S8. When available the "
-              "backend will use D32S8 (toggle to on) but setting the toggle to off will make it "
-              "use the D24S8 format when possible.",
-              "https://crbug.com/dawn/286"}},
-            {Toggle::VulkanUseS8,
-             {"vulkan_use_s8",
-              "Vulkan has a pure stencil8 format but it is not universally available. When this "
-              "toggle is on, the backend will use S8 for the stencil8 format, otherwise it will "
-              "fallback to D32S8 or D24S8.",
-              "https://crbug.com/dawn/666"}},
-            {Toggle::MetalDisableSamplerCompare,
-             {"metal_disable_sampler_compare",
-              "Disables the use of sampler compare on Metal. This is unsupported before A9 "
-              "processors.",
-              "https://crbug.com/dawn/342"}},
-            {Toggle::MetalUseSharedModeForCounterSampleBuffer,
-             {"metal_use_shared_mode_for_counter_sample_buffer",
-              "The query set on Metal need to create MTLCounterSampleBuffer which storage mode "
-              "must be either MTLStorageModeShared or MTLStorageModePrivate. But the private mode "
-              "does not work properly on Intel platforms. The workaround is use shared mode "
-              "instead.",
-              "https://crbug.com/dawn/434"}},
-            {Toggle::DisableBaseVertex,
-             {"disable_base_vertex",
-              "Disables the use of non-zero base vertex which is unsupported on some platforms.",
-              "https://crbug.com/dawn/343"}},
-            {Toggle::DisableBaseInstance,
-             {"disable_base_instance",
-              "Disables the use of non-zero base instance which is unsupported on some "
-              "platforms.",
-              "https://crbug.com/dawn/343"}},
-            {Toggle::DisableIndexedDrawBuffers,
-             {"disable_indexed_draw_buffers",
-              "Disables the use of indexed draw buffer state which is unsupported on some "
-              "platforms.",
-              "https://crbug.com/dawn/582"}},
-            {Toggle::DisableSnormRead,
-             {"disable_snorm_read",
-              "Disables reading from Snorm textures which is unsupported on some platforms.",
-              "https://crbug.com/dawn/667"}},
-            {Toggle::DisableDepthRead,
-             {"disable_depth_read",
-              "Disables reading from depth textures which is unsupported on some platforms.",
-              "https://crbug.com/dawn/667"}},
-            {Toggle::DisableStencilRead,
-             {"disable_stencil_read",
-              "Disables reading from stencil textures which is unsupported on some platforms.",
-              "https://crbug.com/dawn/667"}},
-            {Toggle::DisableDepthStencilRead,
-             {"disable_depth_stencil_read",
-              "Disables reading from depth/stencil textures which is unsupported on some "
-              "platforms.",
-              "https://crbug.com/dawn/667"}},
-            {Toggle::DisableBGRARead,
-             {"disable_bgra_read",
-              "Disables reading from BGRA textures which is unsupported on some platforms.",
-              "https://crbug.com/dawn/1393"}},
-            {Toggle::DisableSampleVariables,
-             {"disable_sample_variables",
-              "Disables gl_SampleMask and related functionality which is unsupported on some "
-              "platforms.",
-              "https://crbug.com/dawn/673"}},
-            {Toggle::UseD3D12SmallShaderVisibleHeapForTesting,
-             {"use_d3d12_small_shader_visible_heap",
-              "Enable use of a small D3D12 shader visible heap, instead of using a large one by "
-              "default. This setting is used to test bindgroup encoding.",
-              "https://crbug.com/dawn/155"}},
-            {Toggle::UseDXC,
-             {"use_dxc",
-              "Use DXC instead of FXC for compiling HLSL when both dxcompiler.dll and dxil.dll "
-              "is available.",
-              "https://crbug.com/dawn/402"}},
-            {Toggle::DisableRobustness,
-             {"disable_robustness", "Disable robust buffer access", "https://crbug.com/dawn/480"}},
-            {Toggle::MetalEnableVertexPulling,
-             {"metal_enable_vertex_pulling",
-              "Uses vertex pulling to protect out-of-bounds reads on Metal",
-              "https://crbug.com/dawn/480"}},
-            {Toggle::DisallowUnsafeAPIs,
-             {"disallow_unsafe_apis",
-              "Produces validation errors on API entry points or parameter combinations that "
-              "aren't considered secure yet.",
-              "http://crbug.com/1138528"}},
-            {Toggle::FlushBeforeClientWaitSync,
-             {"flush_before_client_wait_sync",
-              "Call glFlush before glClientWaitSync to work around bugs in the latter",
-              "https://crbug.com/dawn/633"}},
-            {Toggle::UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel,
-             {"use_temp_buffer_in_small_format_texture_to_texture_copy_from_greater_to_less_mip_"
-              "level",
-              "Split texture-to-texture copy into two copies: copy from source texture into a "
-              "temporary buffer, and copy from the temporary buffer into the destination texture "
-              "under specific situations. This workaround is by default enabled on some Intel "
-              "GPUs which have a driver bug in the execution of CopyTextureRegion() when we copy "
-              "with the formats whose texel block sizes are less than 4 bytes from a greater mip "
-              "level to a smaller mip level on D3D12 backends.",
-              "https://crbug.com/1161355"}},
-            {Toggle::EmitHLSLDebugSymbols,
-             {"emit_hlsl_debug_symbols",
-              "Sets the D3DCOMPILE_SKIP_OPTIMIZATION and D3DCOMPILE_DEBUG compilation flags when "
-              "compiling HLSL code. Enables better shader debugging with external graphics "
-              "debugging tools.",
-              "https://crbug.com/dawn/776"}},
-            {Toggle::DisallowSpirv,
-             {"disallow_spirv",
-              "Disallow usage of SPIR-V completely so that only WGSL is used for shader modules. "
-              "This is useful to prevent a Chromium renderer process from successfully sending "
-              "SPIR-V code to be compiled in the GPU process.",
-              "https://crbug.com/1214923"}},
-            {Toggle::DumpShaders,
-             {"dump_shaders",
-              "Dump shaders for debugging purposes. Dumped shaders will be log via "
-              "EmitLog, thus printed in Chrome console or consumed by user-defined callback "
-              "function.",
-              "https://crbug.com/dawn/792"}},
-            {Toggle::DEPRECATED_DumpTranslatedShaders,
-             {"dump_translated_shaders", "Deprecated. Use dump_shaders",
-              "https://crbug.com/dawn/792"}},
-            {Toggle::ForceWGSLStep,
-             {"force_wgsl_step",
-              "When ingesting SPIR-V shaders, force a first conversion to WGSL. This allows "
-              "testing Tint's SPIRV->WGSL translation on real content to be sure that it will "
-              "work when the same translation runs in a WASM module in the page.",
-              "https://crbug.com/dawn/960"}},
-            {Toggle::DisableWorkgroupInit,
-             {"disable_workgroup_init",
-              "Disables the workgroup memory zero-initialization for compute shaders.",
-              "https://crbug.com/tint/1003"}},
-            {Toggle::DisableSymbolRenaming,
-             {"disable_symbol_renaming",
-              "Disables the WGSL symbol renaming so that names are preserved.",
-              "https://crbug.com/dawn/1016"}},
-            {Toggle::UseUserDefinedLabelsInBackend,
-             {"use_user_defined_labels_in_backend",
-              "Enables calls to SetLabel to be forwarded to backend-specific APIs that label "
-              "objects.",
-              "https://crbug.com/dawn/840"}},
-            {Toggle::UsePlaceholderFragmentInVertexOnlyPipeline,
-             {"use_placeholder_fragment_in_vertex_only_pipeline",
-              "Use a placeholder empty fragment shader in vertex only render pipeline. This toggle "
-              "must be enabled for OpenGL ES backend, and serves as a workaround by default "
-              "enabled on some Metal devices with Intel GPU to ensure the depth result is correct.",
-              "https://crbug.com/dawn/136"}},
-            {Toggle::FxcOptimizations,
-             {"fxc_optimizations",
-              "Enable optimizations when compiling with FXC. Disabled by default because FXC "
-              "miscompiles in many cases when optimizations are enabled.",
-              "https://crbug.com/dawn/1203"}},
-            {Toggle::RecordDetailedTimingInTraceEvents,
-             {"record_detailed_timing_in_trace_events",
-              "Record detailed timing information in trace events at certain point. Currently the "
-              "timing information is recorded right before calling ExecuteCommandLists on a D3D12 "
-              "command queue, and the information includes system time, CPU timestamp, GPU "
-              "timestamp, and their frequency.",
-              "https://crbug.com/dawn/1264"}},
-            {Toggle::DisableTimestampQueryConversion,
-             {"disable_timestamp_query_conversion",
-              "Resolve timestamp queries into ticks instead of nanoseconds.",
-              "https://crbug.com/dawn/1305"}},
-            {Toggle::VulkanUseZeroInitializeWorkgroupMemoryExtension,
-             {"use_vulkan_zero_initialize_workgroup_memory_extension",
-              "Initialize workgroup memory with OpConstantNull on Vulkan when the Vulkan extension "
-              "VK_KHR_zero_initialize_workgroup_memory is supported.",
-              "https://crbug.com/dawn/1302"}},
-            {Toggle::D3D12SplitBufferTextureCopyForRowsPerImagePaddings,
-             {"d3d12_split_buffer_texture_copy_for_rows_per_image_paddings",
-              "D3D12 requires more buffer storage than it should when rowsPerImage is greater than "
-              "copyHeight, which means there are pure padding row(s) on each image. In this "
-              "situation, the buffer used for B2T/T2B copy might be big enough according to "
-              "WebGPU's spec but it doesn't meet D3D12's requirement, then we need to workaround "
-              "it via split the copy operation into two copies, in order to make B2T/T2B copy "
-              "being done correctly on D3D12.",
-              "https://crbug.com/dawn/1289"}},
-            {Toggle::MetalRenderR8RG8UnormSmallMipToTempTexture,
-             {"metal_render_r8_rg8_unorm_small_mip_to_temp_texture",
-              "Metal Intel devices have issues with r8unorm and rg8unorm textures where rendering "
-              "to small mips (level >= 2) doesn't work correctly. Workaround this issue by "
-              "detecting this case and rendering to a temporary texture instead (with copies "
-              "before "
-              "and after if needed).",
-              "https://crbug.com/dawn/1071"}},
-            // Comment to separate the }} so it is clearer what to copy-paste to add a toggle.
-        }};
-    }  // anonymous namespace
+static constexpr ToggleEnumAndInfoList kToggleNameAndInfoList = {{
+    {Toggle::EmulateStoreAndMSAAResolve,
+     {"emulate_store_and_msaa_resolve",
+      "Emulate storing into multisampled color attachments and doing MSAA resolve "
+      "simultaneously. This workaround is enabled by default on the Metal drivers that do "
+      "not support MTLStoreActionStoreAndMultisampleResolve. To support StoreOp::Store on "
+      "those platforms, we should do MSAA resolve in another render pass after ending the "
+      "previous one.",
+      "https://crbug.com/dawn/56"}},
+    {Toggle::NonzeroClearResourcesOnCreationForTesting,
+     {"nonzero_clear_resources_on_creation_for_testing",
+      "Clears texture to full 1 bits as soon as they are created, but doesn't update "
+      "the tracking state of the texture. This way we can test the logic of clearing "
+      "textures that use recycled memory.",
+      "https://crbug.com/dawn/145"}},
+    {Toggle::AlwaysResolveIntoZeroLevelAndLayer,
+     {"always_resolve_into_zero_level_and_layer",
+      "When the resolve target is a texture view that is created on the non-zero level or "
+      "layer of a texture, we first resolve into a temporarily 2D texture with only one "
+      "mipmap level and one array layer, and copy the result of MSAA resolve into the "
+      "true resolve target. This workaround is enabled by default on the Metal drivers "
+      "that have bugs when setting non-zero resolveLevel or resolveSlice.",
+      "https://crbug.com/dawn/56"}},
+    {Toggle::LazyClearResourceOnFirstUse,
+     {"lazy_clear_resource_on_first_use",
+      "Clears resource to zero on first usage. This initializes the resource "
+      "so that no dirty bits from recycled memory is present in the new resource.",
+      "https://crbug.com/dawn/145"}},
+    {Toggle::TurnOffVsync,
+     {"turn_off_vsync",
+      "Turn off vsync when rendering. In order to do performance test or run perf tests, "
+      "turn off vsync so that the fps can exeed 60.",
+      "https://crbug.com/dawn/237"}},
+    {Toggle::UseTemporaryBufferInCompressedTextureToTextureCopy,
+     {"use_temporary_buffer_in_texture_to_texture_copy",
+      "Split texture-to-texture copy into two copies: copy from source texture into a "
+      "temporary buffer, and copy from the temporary buffer into the destination texture "
+      "when copying between compressed textures that don't have block-aligned sizes. This "
+      "workaround is enabled by default on all Vulkan drivers to solve an issue in the "
+      "Vulkan SPEC about the texture-to-texture copies with compressed formats. See #1005 "
+      "(https://github.com/KhronosGroup/Vulkan-Docs/issues/1005) for more details.",
+      "https://crbug.com/dawn/42"}},
+    {Toggle::UseD3D12ResourceHeapTier2,
+     {"use_d3d12_resource_heap_tier2",
+      "Enable support for resource heap tier 2. Resource heap tier 2 allows mixing of "
+      "texture and buffers in the same heap. This allows better heap re-use and reduces "
+      "fragmentation.",
+      "https://crbug.com/dawn/27"}},
+    {Toggle::UseD3D12RenderPass,
+     {"use_d3d12_render_pass",
+      "Use the D3D12 render pass API introduced in Windows build 1809 by default. On "
+      "versions of Windows prior to build 1809, or when this toggle is turned off, Dawn "
+      "will emulate a render pass.",
+      "https://crbug.com/dawn/36"}},
+    {Toggle::UseD3D12ResidencyManagement,
+     {"use_d3d12_residency_management",
+      "Enable residency management. This allows page-in and page-out of resource heaps in "
+      "GPU memory. This component improves overcommitted performance by keeping the most "
+      "recently used resources local to the GPU. Turning this component off can cause "
+      "allocation failures when application memory exceeds physical device memory.",
+      "https://crbug.com/dawn/193"}},
+    {Toggle::DisableResourceSuballocation,
+     {"disable_resource_suballocation",
+      "Force the backends to not perform resource suballocation. This may expose "
+      "allocation "
+      "patterns which would otherwise only occur with large or specific types of "
+      "resources.",
+      "https://crbug.com/1313172"}},
+    {Toggle::SkipValidation,
+     {"skip_validation", "Skip expensive validation of Dawn commands.",
+      "https://crbug.com/dawn/271"}},
+    {Toggle::VulkanUseD32S8,
+     {"vulkan_use_d32s8",
+      "Vulkan mandates support of either D32_FLOAT_S8 or D24_UNORM_S8. When available the "
+      "backend will use D32S8 (toggle to on) but setting the toggle to off will make it "
+      "use the D24S8 format when possible.",
+      "https://crbug.com/dawn/286"}},
+    {Toggle::VulkanUseS8,
+     {"vulkan_use_s8",
+      "Vulkan has a pure stencil8 format but it is not universally available. When this "
+      "toggle is on, the backend will use S8 for the stencil8 format, otherwise it will "
+      "fallback to D32S8 or D24S8.",
+      "https://crbug.com/dawn/666"}},
+    {Toggle::MetalDisableSamplerCompare,
+     {"metal_disable_sampler_compare",
+      "Disables the use of sampler compare on Metal. This is unsupported before A9 "
+      "processors.",
+      "https://crbug.com/dawn/342"}},
+    {Toggle::MetalUseSharedModeForCounterSampleBuffer,
+     {"metal_use_shared_mode_for_counter_sample_buffer",
+      "The query set on Metal need to create MTLCounterSampleBuffer which storage mode "
+      "must be either MTLStorageModeShared or MTLStorageModePrivate. But the private mode "
+      "does not work properly on Intel platforms. The workaround is use shared mode "
+      "instead.",
+      "https://crbug.com/dawn/434"}},
+    {Toggle::DisableBaseVertex,
+     {"disable_base_vertex",
+      "Disables the use of non-zero base vertex which is unsupported on some platforms.",
+      "https://crbug.com/dawn/343"}},
+    {Toggle::DisableBaseInstance,
+     {"disable_base_instance",
+      "Disables the use of non-zero base instance which is unsupported on some "
+      "platforms.",
+      "https://crbug.com/dawn/343"}},
+    {Toggle::DisableIndexedDrawBuffers,
+     {"disable_indexed_draw_buffers",
+      "Disables the use of indexed draw buffer state which is unsupported on some "
+      "platforms.",
+      "https://crbug.com/dawn/582"}},
+    {Toggle::DisableSnormRead,
+     {"disable_snorm_read",
+      "Disables reading from Snorm textures which is unsupported on some platforms.",
+      "https://crbug.com/dawn/667"}},
+    {Toggle::DisableDepthRead,
+     {"disable_depth_read",
+      "Disables reading from depth textures which is unsupported on some platforms.",
+      "https://crbug.com/dawn/667"}},
+    {Toggle::DisableStencilRead,
+     {"disable_stencil_read",
+      "Disables reading from stencil textures which is unsupported on some platforms.",
+      "https://crbug.com/dawn/667"}},
+    {Toggle::DisableDepthStencilRead,
+     {"disable_depth_stencil_read",
+      "Disables reading from depth/stencil textures which is unsupported on some "
+      "platforms.",
+      "https://crbug.com/dawn/667"}},
+    {Toggle::DisableBGRARead,
+     {"disable_bgra_read",
+      "Disables reading from BGRA textures which is unsupported on some platforms.",
+      "https://crbug.com/dawn/1393"}},
+    {Toggle::DisableSampleVariables,
+     {"disable_sample_variables",
+      "Disables gl_SampleMask and related functionality which is unsupported on some "
+      "platforms.",
+      "https://crbug.com/dawn/673"}},
+    {Toggle::UseD3D12SmallShaderVisibleHeapForTesting,
+     {"use_d3d12_small_shader_visible_heap",
+      "Enable use of a small D3D12 shader visible heap, instead of using a large one by "
+      "default. This setting is used to test bindgroup encoding.",
+      "https://crbug.com/dawn/155"}},
+    {Toggle::UseDXC,
+     {"use_dxc",
+      "Use DXC instead of FXC for compiling HLSL when both dxcompiler.dll and dxil.dll "
+      "is available.",
+      "https://crbug.com/dawn/402"}},
+    {Toggle::DisableRobustness,
+     {"disable_robustness", "Disable robust buffer access", "https://crbug.com/dawn/480"}},
+    {Toggle::MetalEnableVertexPulling,
+     {"metal_enable_vertex_pulling", "Uses vertex pulling to protect out-of-bounds reads on Metal",
+      "https://crbug.com/dawn/480"}},
+    {Toggle::DisallowUnsafeAPIs,
+     {"disallow_unsafe_apis",
+      "Produces validation errors on API entry points or parameter combinations that "
+      "aren't considered secure yet.",
+      "http://crbug.com/1138528"}},
+    {Toggle::FlushBeforeClientWaitSync,
+     {"flush_before_client_wait_sync",
+      "Call glFlush before glClientWaitSync to work around bugs in the latter",
+      "https://crbug.com/dawn/633"}},
+    {Toggle::UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel,
+     {"use_temp_buffer_in_small_format_texture_to_texture_copy_from_greater_to_less_mip_"
+      "level",
+      "Split texture-to-texture copy into two copies: copy from source texture into a "
+      "temporary buffer, and copy from the temporary buffer into the destination texture "
+      "under specific situations. This workaround is by default enabled on some Intel "
+      "GPUs which have a driver bug in the execution of CopyTextureRegion() when we copy "
+      "with the formats whose texel block sizes are less than 4 bytes from a greater mip "
+      "level to a smaller mip level on D3D12 backends.",
+      "https://crbug.com/1161355"}},
+    {Toggle::EmitHLSLDebugSymbols,
+     {"emit_hlsl_debug_symbols",
+      "Sets the D3DCOMPILE_SKIP_OPTIMIZATION and D3DCOMPILE_DEBUG compilation flags when "
+      "compiling HLSL code. Enables better shader debugging with external graphics "
+      "debugging tools.",
+      "https://crbug.com/dawn/776"}},
+    {Toggle::DisallowSpirv,
+     {"disallow_spirv",
+      "Disallow usage of SPIR-V completely so that only WGSL is used for shader modules. "
+      "This is useful to prevent a Chromium renderer process from successfully sending "
+      "SPIR-V code to be compiled in the GPU process.",
+      "https://crbug.com/1214923"}},
+    {Toggle::DumpShaders,
+     {"dump_shaders",
+      "Dump shaders for debugging purposes. Dumped shaders will be log via "
+      "EmitLog, thus printed in Chrome console or consumed by user-defined callback "
+      "function.",
+      "https://crbug.com/dawn/792"}},
+    {Toggle::DEPRECATED_DumpTranslatedShaders,
+     {"dump_translated_shaders", "Deprecated. Use dump_shaders", "https://crbug.com/dawn/792"}},
+    {Toggle::ForceWGSLStep,
+     {"force_wgsl_step",
+      "When ingesting SPIR-V shaders, force a first conversion to WGSL. This allows "
+      "testing Tint's SPIRV->WGSL translation on real content to be sure that it will "
+      "work when the same translation runs in a WASM module in the page.",
+      "https://crbug.com/dawn/960"}},
+    {Toggle::DisableWorkgroupInit,
+     {"disable_workgroup_init",
+      "Disables the workgroup memory zero-initialization for compute shaders.",
+      "https://crbug.com/tint/1003"}},
+    {Toggle::DisableSymbolRenaming,
+     {"disable_symbol_renaming", "Disables the WGSL symbol renaming so that names are preserved.",
+      "https://crbug.com/dawn/1016"}},
+    {Toggle::UseUserDefinedLabelsInBackend,
+     {"use_user_defined_labels_in_backend",
+      "Enables calls to SetLabel to be forwarded to backend-specific APIs that label "
+      "objects.",
+      "https://crbug.com/dawn/840"}},
+    {Toggle::UsePlaceholderFragmentInVertexOnlyPipeline,
+     {"use_placeholder_fragment_in_vertex_only_pipeline",
+      "Use a placeholder empty fragment shader in vertex only render pipeline. This toggle "
+      "must be enabled for OpenGL ES backend, and serves as a workaround by default "
+      "enabled on some Metal devices with Intel GPU to ensure the depth result is correct.",
+      "https://crbug.com/dawn/136"}},
+    {Toggle::FxcOptimizations,
+     {"fxc_optimizations",
+      "Enable optimizations when compiling with FXC. Disabled by default because FXC "
+      "miscompiles in many cases when optimizations are enabled.",
+      "https://crbug.com/dawn/1203"}},
+    {Toggle::RecordDetailedTimingInTraceEvents,
+     {"record_detailed_timing_in_trace_events",
+      "Record detailed timing information in trace events at certain point. Currently the "
+      "timing information is recorded right before calling ExecuteCommandLists on a D3D12 "
+      "command queue, and the information includes system time, CPU timestamp, GPU "
+      "timestamp, and their frequency.",
+      "https://crbug.com/dawn/1264"}},
+    {Toggle::DisableTimestampQueryConversion,
+     {"disable_timestamp_query_conversion",
+      "Resolve timestamp queries into ticks instead of nanoseconds.",
+      "https://crbug.com/dawn/1305"}},
+    {Toggle::VulkanUseZeroInitializeWorkgroupMemoryExtension,
+     {"use_vulkan_zero_initialize_workgroup_memory_extension",
+      "Initialize workgroup memory with OpConstantNull on Vulkan when the Vulkan extension "
+      "VK_KHR_zero_initialize_workgroup_memory is supported.",
+      "https://crbug.com/dawn/1302"}},
+    {Toggle::D3D12SplitBufferTextureCopyForRowsPerImagePaddings,
+     {"d3d12_split_buffer_texture_copy_for_rows_per_image_paddings",
+      "D3D12 requires more buffer storage than it should when rowsPerImage is greater than "
+      "copyHeight, which means there are pure padding row(s) on each image. In this "
+      "situation, the buffer used for B2T/T2B copy might be big enough according to "
+      "WebGPU's spec but it doesn't meet D3D12's requirement, then we need to workaround "
+      "it via split the copy operation into two copies, in order to make B2T/T2B copy "
+      "being done correctly on D3D12.",
+      "https://crbug.com/dawn/1289"}},
+    {Toggle::MetalRenderR8RG8UnormSmallMipToTempTexture,
+     {"metal_render_r8_rg8_unorm_small_mip_to_temp_texture",
+      "Metal Intel devices have issues with r8unorm and rg8unorm textures where rendering "
+      "to small mips (level >= 2) doesn't work correctly. Workaround this issue by "
+      "detecting this case and rendering to a temporary texture instead (with copies "
+      "before "
+      "and after if needed).",
+      "https://crbug.com/dawn/1071"}},
+    // Comment to separate the }} so it is clearer what to copy-paste to add a toggle.
+}};
+}  // anonymous namespace
 
-    void TogglesSet::Set(Toggle toggle, bool enabled) {
-        if (toggle == Toggle::DEPRECATED_DumpTranslatedShaders) {
-            Set(Toggle::DumpShaders, enabled);
-            return;
-        }
-        ASSERT(toggle != Toggle::InvalidEnum);
-        const size_t toggleIndex = static_cast<size_t>(toggle);
-        toggleBitset.set(toggleIndex, enabled);
+void TogglesSet::Set(Toggle toggle, bool enabled) {
+    if (toggle == Toggle::DEPRECATED_DumpTranslatedShaders) {
+        Set(Toggle::DumpShaders, enabled);
+        return;
+    }
+    ASSERT(toggle != Toggle::InvalidEnum);
+    const size_t toggleIndex = static_cast<size_t>(toggle);
+    toggleBitset.set(toggleIndex, enabled);
+}
+
+bool TogglesSet::Has(Toggle toggle) const {
+    if (toggle == Toggle::DEPRECATED_DumpTranslatedShaders) {
+        return Has(Toggle::DumpShaders);
+    }
+    ASSERT(toggle != Toggle::InvalidEnum);
+    const size_t toggleIndex = static_cast<size_t>(toggle);
+    return toggleBitset.test(toggleIndex);
+}
+
+std::vector<const char*> TogglesSet::GetContainedToggleNames() const {
+    std::vector<const char*> togglesNameInUse(toggleBitset.count());
+
+    uint32_t index = 0;
+    for (uint32_t i : IterateBitSet(toggleBitset)) {
+        const char* toggleName = ToggleEnumToName(static_cast<Toggle>(i));
+        togglesNameInUse[index] = toggleName;
+        ++index;
     }
 
-    bool TogglesSet::Has(Toggle toggle) const {
-        if (toggle == Toggle::DEPRECATED_DumpTranslatedShaders) {
-            return Has(Toggle::DumpShaders);
-        }
-        ASSERT(toggle != Toggle::InvalidEnum);
-        const size_t toggleIndex = static_cast<size_t>(toggle);
-        return toggleBitset.test(toggleIndex);
+    return togglesNameInUse;
+}
+
+const char* ToggleEnumToName(Toggle toggle) {
+    ASSERT(toggle != Toggle::InvalidEnum);
+
+    const ToggleEnumAndInfo& toggleNameAndInfo =
+        kToggleNameAndInfoList[static_cast<size_t>(toggle)];
+    ASSERT(toggleNameAndInfo.toggle == toggle);
+    return toggleNameAndInfo.info.name;
+}
+
+const ToggleInfo* TogglesInfo::GetToggleInfo(const char* toggleName) {
+    ASSERT(toggleName);
+
+    EnsureToggleNameToEnumMapInitialized();
+
+    const auto& iter = mToggleNameToEnumMap.find(toggleName);
+    if (iter != mToggleNameToEnumMap.cend()) {
+        return &kToggleNameAndInfoList[static_cast<size_t>(iter->second)].info;
+    }
+    return nullptr;
+}
+
+Toggle TogglesInfo::ToggleNameToEnum(const char* toggleName) {
+    ASSERT(toggleName);
+
+    EnsureToggleNameToEnumMapInitialized();
+
+    const auto& iter = mToggleNameToEnumMap.find(toggleName);
+    if (iter != mToggleNameToEnumMap.cend()) {
+        return kToggleNameAndInfoList[static_cast<size_t>(iter->second)].toggle;
+    }
+    return Toggle::InvalidEnum;
+}
+
+void TogglesInfo::EnsureToggleNameToEnumMapInitialized() {
+    if (mToggleNameToEnumMapInitialized) {
+        return;
     }
 
-    std::vector<const char*> TogglesSet::GetContainedToggleNames() const {
-        std::vector<const char*> togglesNameInUse(toggleBitset.count());
-
-        uint32_t index = 0;
-        for (uint32_t i : IterateBitSet(toggleBitset)) {
-            const char* toggleName = ToggleEnumToName(static_cast<Toggle>(i));
-            togglesNameInUse[index] = toggleName;
-            ++index;
-        }
-
-        return togglesNameInUse;
+    for (size_t index = 0; index < kToggleNameAndInfoList.size(); ++index) {
+        const ToggleEnumAndInfo& toggleNameAndInfo = kToggleNameAndInfoList[index];
+        ASSERT(index == static_cast<size_t>(toggleNameAndInfo.toggle));
+        mToggleNameToEnumMap[toggleNameAndInfo.info.name] = toggleNameAndInfo.toggle;
     }
 
-    const char* ToggleEnumToName(Toggle toggle) {
-        ASSERT(toggle != Toggle::InvalidEnum);
-
-        const ToggleEnumAndInfo& toggleNameAndInfo =
-            kToggleNameAndInfoList[static_cast<size_t>(toggle)];
-        ASSERT(toggleNameAndInfo.toggle == toggle);
-        return toggleNameAndInfo.info.name;
-    }
-
-    const ToggleInfo* TogglesInfo::GetToggleInfo(const char* toggleName) {
-        ASSERT(toggleName);
-
-        EnsureToggleNameToEnumMapInitialized();
-
-        const auto& iter = mToggleNameToEnumMap.find(toggleName);
-        if (iter != mToggleNameToEnumMap.cend()) {
-            return &kToggleNameAndInfoList[static_cast<size_t>(iter->second)].info;
-        }
-        return nullptr;
-    }
-
-    Toggle TogglesInfo::ToggleNameToEnum(const char* toggleName) {
-        ASSERT(toggleName);
-
-        EnsureToggleNameToEnumMapInitialized();
-
-        const auto& iter = mToggleNameToEnumMap.find(toggleName);
-        if (iter != mToggleNameToEnumMap.cend()) {
-            return kToggleNameAndInfoList[static_cast<size_t>(iter->second)].toggle;
-        }
-        return Toggle::InvalidEnum;
-    }
-
-    void TogglesInfo::EnsureToggleNameToEnumMapInitialized() {
-        if (mToggleNameToEnumMapInitialized) {
-            return;
-        }
-
-        for (size_t index = 0; index < kToggleNameAndInfoList.size(); ++index) {
-            const ToggleEnumAndInfo& toggleNameAndInfo = kToggleNameAndInfoList[index];
-            ASSERT(index == static_cast<size_t>(toggleNameAndInfo.toggle));
-            mToggleNameToEnumMap[toggleNameAndInfo.info.name] = toggleNameAndInfo.toggle;
-        }
-
-        mToggleNameToEnumMapInitialized = true;
-    }
+    mToggleNameToEnumMapInitialized = true;
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/Toggles.h b/src/dawn/native/Toggles.h
index d9748fb..3d3523e 100644
--- a/src/dawn/native/Toggles.h
+++ b/src/dawn/native/Toggles.h
@@ -24,83 +24,83 @@
 
 namespace dawn::native {
 
-    enum class Toggle {
-        EmulateStoreAndMSAAResolve,
-        NonzeroClearResourcesOnCreationForTesting,
-        AlwaysResolveIntoZeroLevelAndLayer,
-        LazyClearResourceOnFirstUse,
-        TurnOffVsync,
-        UseTemporaryBufferInCompressedTextureToTextureCopy,
-        UseD3D12ResourceHeapTier2,
-        UseD3D12RenderPass,
-        UseD3D12ResidencyManagement,
-        DisableResourceSuballocation,
-        SkipValidation,
-        VulkanUseD32S8,
-        VulkanUseS8,
-        MetalDisableSamplerCompare,
-        MetalUseSharedModeForCounterSampleBuffer,
-        DisableBaseVertex,
-        DisableBaseInstance,
-        DisableIndexedDrawBuffers,
-        DisableSnormRead,
-        DisableDepthRead,
-        DisableStencilRead,
-        DisableDepthStencilRead,
-        DisableBGRARead,
-        DisableSampleVariables,
-        UseD3D12SmallShaderVisibleHeapForTesting,
-        UseDXC,
-        DisableRobustness,
-        MetalEnableVertexPulling,
-        DisallowUnsafeAPIs,
-        FlushBeforeClientWaitSync,
-        UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel,
-        EmitHLSLDebugSymbols,
-        DisallowSpirv,
-        DumpShaders,
-        DEPRECATED_DumpTranslatedShaders,  // Use DumpShaders
-        ForceWGSLStep,
-        DisableWorkgroupInit,
-        DisableSymbolRenaming,
-        UseUserDefinedLabelsInBackend,
-        UsePlaceholderFragmentInVertexOnlyPipeline,
-        FxcOptimizations,
-        RecordDetailedTimingInTraceEvents,
-        DisableTimestampQueryConversion,
-        VulkanUseZeroInitializeWorkgroupMemoryExtension,
-        D3D12SplitBufferTextureCopyForRowsPerImagePaddings,
-        MetalRenderR8RG8UnormSmallMipToTempTexture,
+enum class Toggle {
+    EmulateStoreAndMSAAResolve,
+    NonzeroClearResourcesOnCreationForTesting,
+    AlwaysResolveIntoZeroLevelAndLayer,
+    LazyClearResourceOnFirstUse,
+    TurnOffVsync,
+    UseTemporaryBufferInCompressedTextureToTextureCopy,
+    UseD3D12ResourceHeapTier2,
+    UseD3D12RenderPass,
+    UseD3D12ResidencyManagement,
+    DisableResourceSuballocation,
+    SkipValidation,
+    VulkanUseD32S8,
+    VulkanUseS8,
+    MetalDisableSamplerCompare,
+    MetalUseSharedModeForCounterSampleBuffer,
+    DisableBaseVertex,
+    DisableBaseInstance,
+    DisableIndexedDrawBuffers,
+    DisableSnormRead,
+    DisableDepthRead,
+    DisableStencilRead,
+    DisableDepthStencilRead,
+    DisableBGRARead,
+    DisableSampleVariables,
+    UseD3D12SmallShaderVisibleHeapForTesting,
+    UseDXC,
+    DisableRobustness,
+    MetalEnableVertexPulling,
+    DisallowUnsafeAPIs,
+    FlushBeforeClientWaitSync,
+    UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel,
+    EmitHLSLDebugSymbols,
+    DisallowSpirv,
+    DumpShaders,
+    DEPRECATED_DumpTranslatedShaders,  // Use DumpShaders
+    ForceWGSLStep,
+    DisableWorkgroupInit,
+    DisableSymbolRenaming,
+    UseUserDefinedLabelsInBackend,
+    UsePlaceholderFragmentInVertexOnlyPipeline,
+    FxcOptimizations,
+    RecordDetailedTimingInTraceEvents,
+    DisableTimestampQueryConversion,
+    VulkanUseZeroInitializeWorkgroupMemoryExtension,
+    D3D12SplitBufferTextureCopyForRowsPerImagePaddings,
+    MetalRenderR8RG8UnormSmallMipToTempTexture,
 
-        EnumCount,
-        InvalidEnum = EnumCount,
-    };
+    EnumCount,
+    InvalidEnum = EnumCount,
+};
 
-    // A wrapper of the bitset to store if a toggle is present or not. This wrapper provides the
-    // convenience to convert the enums of enum class Toggle to the indices of a bitset.
-    struct TogglesSet {
-        std::bitset<static_cast<size_t>(Toggle::EnumCount)> toggleBitset;
+// A wrapper of the bitset to store if a toggle is present or not. This wrapper provides the
+// convenience to convert the enums of enum class Toggle to the indices of a bitset.
+struct TogglesSet {
+    std::bitset<static_cast<size_t>(Toggle::EnumCount)> toggleBitset;
 
-        void Set(Toggle toggle, bool enabled);
-        bool Has(Toggle toggle) const;
-        std::vector<const char*> GetContainedToggleNames() const;
-    };
+    void Set(Toggle toggle, bool enabled);
+    bool Has(Toggle toggle) const;
+    std::vector<const char*> GetContainedToggleNames() const;
+};
 
-    const char* ToggleEnumToName(Toggle toggle);
+const char* ToggleEnumToName(Toggle toggle);
 
-    class TogglesInfo {
-      public:
-        // Used to query the details of a toggle. Return nullptr if toggleName is not a valid name
-        // of a toggle supported in Dawn.
-        const ToggleInfo* GetToggleInfo(const char* toggleName);
-        Toggle ToggleNameToEnum(const char* toggleName);
+class TogglesInfo {
+  public:
+    // Used to query the details of a toggle. Return nullptr if toggleName is not a valid name
+    // of a toggle supported in Dawn.
+    const ToggleInfo* GetToggleInfo(const char* toggleName);
+    Toggle ToggleNameToEnum(const char* toggleName);
 
-      private:
-        void EnsureToggleNameToEnumMapInitialized();
+  private:
+    void EnsureToggleNameToEnumMapInitialized();
 
-        bool mToggleNameToEnumMapInitialized = false;
-        std::unordered_map<std::string, Toggle> mToggleNameToEnumMap;
-    };
+    bool mToggleNameToEnumMapInitialized = false;
+    std::unordered_map<std::string, Toggle> mToggleNameToEnumMap;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/VertexFormat.cpp b/src/dawn/native/VertexFormat.cpp
index 61c7f23..c859887 100644
--- a/src/dawn/native/VertexFormat.cpp
+++ b/src/dawn/native/VertexFormat.cpp
@@ -20,50 +20,50 @@
 
 namespace dawn::native {
 
-    static constexpr std::array<VertexFormatInfo, 31> sVertexFormatTable = {{
-        //
-        {wgpu::VertexFormat::Undefined, 0, 0, 0, VertexFormatBaseType::Float},
+static constexpr std::array<VertexFormatInfo, 31> sVertexFormatTable = {{
+    //
+    {wgpu::VertexFormat::Undefined, 0, 0, 0, VertexFormatBaseType::Float},
 
-        {wgpu::VertexFormat::Uint8x2, 2, 2, 1, VertexFormatBaseType::Uint},
-        {wgpu::VertexFormat::Uint8x4, 4, 4, 1, VertexFormatBaseType::Uint},
-        {wgpu::VertexFormat::Sint8x2, 2, 2, 1, VertexFormatBaseType::Sint},
-        {wgpu::VertexFormat::Sint8x4, 4, 4, 1, VertexFormatBaseType::Sint},
-        {wgpu::VertexFormat::Unorm8x2, 2, 2, 1, VertexFormatBaseType::Float},
-        {wgpu::VertexFormat::Unorm8x4, 4, 4, 1, VertexFormatBaseType::Float},
-        {wgpu::VertexFormat::Snorm8x2, 2, 2, 1, VertexFormatBaseType::Float},
-        {wgpu::VertexFormat::Snorm8x4, 4, 4, 1, VertexFormatBaseType::Float},
+    {wgpu::VertexFormat::Uint8x2, 2, 2, 1, VertexFormatBaseType::Uint},
+    {wgpu::VertexFormat::Uint8x4, 4, 4, 1, VertexFormatBaseType::Uint},
+    {wgpu::VertexFormat::Sint8x2, 2, 2, 1, VertexFormatBaseType::Sint},
+    {wgpu::VertexFormat::Sint8x4, 4, 4, 1, VertexFormatBaseType::Sint},
+    {wgpu::VertexFormat::Unorm8x2, 2, 2, 1, VertexFormatBaseType::Float},
+    {wgpu::VertexFormat::Unorm8x4, 4, 4, 1, VertexFormatBaseType::Float},
+    {wgpu::VertexFormat::Snorm8x2, 2, 2, 1, VertexFormatBaseType::Float},
+    {wgpu::VertexFormat::Snorm8x4, 4, 4, 1, VertexFormatBaseType::Float},
 
-        {wgpu::VertexFormat::Uint16x2, 4, 2, 2, VertexFormatBaseType::Uint},
-        {wgpu::VertexFormat::Uint16x4, 8, 4, 2, VertexFormatBaseType::Uint},
-        {wgpu::VertexFormat::Sint16x2, 4, 2, 2, VertexFormatBaseType::Sint},
-        {wgpu::VertexFormat::Sint16x4, 8, 4, 2, VertexFormatBaseType::Sint},
-        {wgpu::VertexFormat::Unorm16x2, 4, 2, 2, VertexFormatBaseType::Float},
-        {wgpu::VertexFormat::Unorm16x4, 8, 4, 2, VertexFormatBaseType::Float},
-        {wgpu::VertexFormat::Snorm16x2, 4, 2, 2, VertexFormatBaseType::Float},
-        {wgpu::VertexFormat::Snorm16x4, 8, 4, 2, VertexFormatBaseType::Float},
-        {wgpu::VertexFormat::Float16x2, 4, 2, 2, VertexFormatBaseType::Float},
-        {wgpu::VertexFormat::Float16x4, 8, 4, 2, VertexFormatBaseType::Float},
+    {wgpu::VertexFormat::Uint16x2, 4, 2, 2, VertexFormatBaseType::Uint},
+    {wgpu::VertexFormat::Uint16x4, 8, 4, 2, VertexFormatBaseType::Uint},
+    {wgpu::VertexFormat::Sint16x2, 4, 2, 2, VertexFormatBaseType::Sint},
+    {wgpu::VertexFormat::Sint16x4, 8, 4, 2, VertexFormatBaseType::Sint},
+    {wgpu::VertexFormat::Unorm16x2, 4, 2, 2, VertexFormatBaseType::Float},
+    {wgpu::VertexFormat::Unorm16x4, 8, 4, 2, VertexFormatBaseType::Float},
+    {wgpu::VertexFormat::Snorm16x2, 4, 2, 2, VertexFormatBaseType::Float},
+    {wgpu::VertexFormat::Snorm16x4, 8, 4, 2, VertexFormatBaseType::Float},
+    {wgpu::VertexFormat::Float16x2, 4, 2, 2, VertexFormatBaseType::Float},
+    {wgpu::VertexFormat::Float16x4, 8, 4, 2, VertexFormatBaseType::Float},
 
-        {wgpu::VertexFormat::Float32, 4, 1, 4, VertexFormatBaseType::Float},
-        {wgpu::VertexFormat::Float32x2, 8, 2, 4, VertexFormatBaseType::Float},
-        {wgpu::VertexFormat::Float32x3, 12, 3, 4, VertexFormatBaseType::Float},
-        {wgpu::VertexFormat::Float32x4, 16, 4, 4, VertexFormatBaseType::Float},
-        {wgpu::VertexFormat::Uint32, 4, 1, 4, VertexFormatBaseType::Uint},
-        {wgpu::VertexFormat::Uint32x2, 8, 2, 4, VertexFormatBaseType::Uint},
-        {wgpu::VertexFormat::Uint32x3, 12, 3, 4, VertexFormatBaseType::Uint},
-        {wgpu::VertexFormat::Uint32x4, 16, 4, 4, VertexFormatBaseType::Uint},
-        {wgpu::VertexFormat::Sint32, 4, 1, 4, VertexFormatBaseType::Sint},
-        {wgpu::VertexFormat::Sint32x2, 8, 2, 4, VertexFormatBaseType::Sint},
-        {wgpu::VertexFormat::Sint32x3, 12, 3, 4, VertexFormatBaseType::Sint},
-        {wgpu::VertexFormat::Sint32x4, 16, 4, 4, VertexFormatBaseType::Sint},
-        //
-    }};
+    {wgpu::VertexFormat::Float32, 4, 1, 4, VertexFormatBaseType::Float},
+    {wgpu::VertexFormat::Float32x2, 8, 2, 4, VertexFormatBaseType::Float},
+    {wgpu::VertexFormat::Float32x3, 12, 3, 4, VertexFormatBaseType::Float},
+    {wgpu::VertexFormat::Float32x4, 16, 4, 4, VertexFormatBaseType::Float},
+    {wgpu::VertexFormat::Uint32, 4, 1, 4, VertexFormatBaseType::Uint},
+    {wgpu::VertexFormat::Uint32x2, 8, 2, 4, VertexFormatBaseType::Uint},
+    {wgpu::VertexFormat::Uint32x3, 12, 3, 4, VertexFormatBaseType::Uint},
+    {wgpu::VertexFormat::Uint32x4, 16, 4, 4, VertexFormatBaseType::Uint},
+    {wgpu::VertexFormat::Sint32, 4, 1, 4, VertexFormatBaseType::Sint},
+    {wgpu::VertexFormat::Sint32x2, 8, 2, 4, VertexFormatBaseType::Sint},
+    {wgpu::VertexFormat::Sint32x3, 12, 3, 4, VertexFormatBaseType::Sint},
+    {wgpu::VertexFormat::Sint32x4, 16, 4, 4, VertexFormatBaseType::Sint},
+    //
+}};
 
-    const VertexFormatInfo& GetVertexFormatInfo(wgpu::VertexFormat format) {
-        ASSERT(format != wgpu::VertexFormat::Undefined);
-        ASSERT(static_cast<uint32_t>(format) < sVertexFormatTable.size());
-        ASSERT(sVertexFormatTable[static_cast<uint32_t>(format)].format == format);
-        return sVertexFormatTable[static_cast<uint32_t>(format)];
-    }
+const VertexFormatInfo& GetVertexFormatInfo(wgpu::VertexFormat format) {
+    ASSERT(format != wgpu::VertexFormat::Undefined);
+    ASSERT(static_cast<uint32_t>(format) < sVertexFormatTable.size());
+    ASSERT(sVertexFormatTable[static_cast<uint32_t>(format)].format == format);
+    return sVertexFormatTable[static_cast<uint32_t>(format)];
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/VertexFormat.h b/src/dawn/native/VertexFormat.h
index d321232..33e615c 100644
--- a/src/dawn/native/VertexFormat.h
+++ b/src/dawn/native/VertexFormat.h
@@ -19,21 +19,21 @@
 
 namespace dawn::native {
 
-    enum class VertexFormatBaseType {
-        Float,
-        Uint,
-        Sint,
-    };
+enum class VertexFormatBaseType {
+    Float,
+    Uint,
+    Sint,
+};
 
-    struct VertexFormatInfo {
-        wgpu::VertexFormat format;
-        uint32_t byteSize;
-        uint32_t componentCount;
-        uint32_t componentByteSize;
-        VertexFormatBaseType baseType;
-    };
+struct VertexFormatInfo {
+    wgpu::VertexFormat format;
+    uint32_t byteSize;
+    uint32_t componentCount;
+    uint32_t componentByteSize;
+    VertexFormatBaseType baseType;
+};
 
-    const VertexFormatInfo& GetVertexFormatInfo(wgpu::VertexFormat format);
+const VertexFormatInfo& GetVertexFormatInfo(wgpu::VertexFormat format);
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/XlibXcbFunctions.cpp b/src/dawn/native/XlibXcbFunctions.cpp
index 1b0f6e8..5996676 100644
--- a/src/dawn/native/XlibXcbFunctions.cpp
+++ b/src/dawn/native/XlibXcbFunctions.cpp
@@ -16,16 +16,15 @@
 
 namespace dawn::native {
 
-    XlibXcbFunctions::XlibXcbFunctions() {
-        if (!mLib.Open("libX11-xcb.so.1") ||
-            !mLib.GetProc(&xGetXCBConnection, "XGetXCBConnection")) {
-            mLib.Close();
-        }
+XlibXcbFunctions::XlibXcbFunctions() {
+    if (!mLib.Open("libX11-xcb.so.1") || !mLib.GetProc(&xGetXCBConnection, "XGetXCBConnection")) {
+        mLib.Close();
     }
-    XlibXcbFunctions::~XlibXcbFunctions() = default;
+}
+XlibXcbFunctions::~XlibXcbFunctions() = default;
 
-    bool XlibXcbFunctions::IsLoaded() const {
-        return xGetXCBConnection != nullptr;
-    }
+bool XlibXcbFunctions::IsLoaded() const {
+    return xGetXCBConnection != nullptr;
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/XlibXcbFunctions.h b/src/dawn/native/XlibXcbFunctions.h
index 5f0659e..a8b967a 100644
--- a/src/dawn/native/XlibXcbFunctions.h
+++ b/src/dawn/native/XlibXcbFunctions.h
@@ -24,22 +24,22 @@
 
 namespace dawn::native {
 
-    // A helper class that dynamically loads the x11-xcb library that contains XGetXCBConnection
-    // (and nothing else). This has to be dynamic because this libraries isn't present on all Linux
-    // deployment platforms that Chromium targets.
-    class XlibXcbFunctions {
-      public:
-        XlibXcbFunctions();
-        ~XlibXcbFunctions();
+// A helper class that dynamically loads the x11-xcb library that contains XGetXCBConnection
+// (and nothing else). This has to be dynamic because this libraries isn't present on all Linux
+// deployment platforms that Chromium targets.
+class XlibXcbFunctions {
+  public:
+    XlibXcbFunctions();
+    ~XlibXcbFunctions();
 
-        bool IsLoaded() const;
+    bool IsLoaded() const;
 
-        // Functions from x11-xcb
-        decltype(&::XGetXCBConnection) xGetXCBConnection = nullptr;
+    // Functions from x11-xcb
+    decltype(&::XGetXCBConnection) xGetXCBConnection = nullptr;
 
-      private:
-        DynamicLib mLib;
-    };
+  private:
+    DynamicLib mLib;
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/native/d3d12/AdapterD3D12.cpp b/src/dawn/native/d3d12/AdapterD3D12.cpp
index e4ba00f..5f48014 100644
--- a/src/dawn/native/d3d12/AdapterD3D12.cpp
+++ b/src/dawn/native/d3d12/AdapterD3D12.cpp
@@ -26,399 +26,394 @@
 
 namespace dawn::native::d3d12 {
 
-    Adapter::Adapter(Backend* backend, ComPtr<IDXGIAdapter3> hardwareAdapter)
-        : AdapterBase(backend->GetInstance(), wgpu::BackendType::D3D12),
-          mHardwareAdapter(hardwareAdapter),
-          mBackend(backend) {
+Adapter::Adapter(Backend* backend, ComPtr<IDXGIAdapter3> hardwareAdapter)
+    : AdapterBase(backend->GetInstance(), wgpu::BackendType::D3D12),
+      mHardwareAdapter(hardwareAdapter),
+      mBackend(backend) {}
+
+Adapter::~Adapter() {
+    CleanUpDebugLayerFilters();
+}
+
+bool Adapter::SupportsExternalImages() const {
+    // Via dawn::native::d3d12::ExternalImageDXGI::Create
+    return true;
+}
+
+const D3D12DeviceInfo& Adapter::GetDeviceInfo() const {
+    return mDeviceInfo;
+}
+
+IDXGIAdapter3* Adapter::GetHardwareAdapter() const {
+    return mHardwareAdapter.Get();
+}
+
+Backend* Adapter::GetBackend() const {
+    return mBackend;
+}
+
+ComPtr<ID3D12Device> Adapter::GetDevice() const {
+    return mD3d12Device;
+}
+
+const gpu_info::D3DDriverVersion& Adapter::GetDriverVersion() const {
+    return mDriverVersion;
+}
+
+MaybeError Adapter::InitializeImpl() {
+    // D3D12 cannot check for feature support without a device.
+    // Create the device to populate the adapter properties then reuse it when needed for actual
+    // rendering.
+    const PlatformFunctions* functions = GetBackend()->GetFunctions();
+    if (FAILED(functions->d3d12CreateDevice(GetHardwareAdapter(), D3D_FEATURE_LEVEL_11_0,
+                                            _uuidof(ID3D12Device), &mD3d12Device))) {
+        return DAWN_INTERNAL_ERROR("D3D12CreateDevice failed");
     }
 
-    Adapter::~Adapter() {
-        CleanUpDebugLayerFilters();
+    DAWN_TRY(InitializeDebugLayerFilters());
+
+    DXGI_ADAPTER_DESC1 adapterDesc;
+    mHardwareAdapter->GetDesc1(&adapterDesc);
+
+    mDeviceId = adapterDesc.DeviceId;
+    mVendorId = adapterDesc.VendorId;
+    mName = WCharToUTF8(adapterDesc.Description);
+
+    DAWN_TRY_ASSIGN(mDeviceInfo, GatherDeviceInfo(*this));
+
+    if (adapterDesc.Flags & DXGI_ADAPTER_FLAG_SOFTWARE) {
+        mAdapterType = wgpu::AdapterType::CPU;
+    } else {
+        mAdapterType =
+            (mDeviceInfo.isUMA) ? wgpu::AdapterType::IntegratedGPU : wgpu::AdapterType::DiscreteGPU;
     }
 
-    bool Adapter::SupportsExternalImages() const {
-        // Via dawn::native::d3d12::ExternalImageDXGI::Create
-        return true;
-    }
+    // Convert the adapter's D3D12 driver version to a readable string like "24.21.13.9793".
+    LARGE_INTEGER umdVersion;
+    if (mHardwareAdapter->CheckInterfaceSupport(__uuidof(IDXGIDevice), &umdVersion) !=
+        DXGI_ERROR_UNSUPPORTED) {
+        uint64_t encodedVersion = umdVersion.QuadPart;
 
-    const D3D12DeviceInfo& Adapter::GetDeviceInfo() const {
-        return mDeviceInfo;
-    }
-
-    IDXGIAdapter3* Adapter::GetHardwareAdapter() const {
-        return mHardwareAdapter.Get();
-    }
-
-    Backend* Adapter::GetBackend() const {
-        return mBackend;
-    }
-
-    ComPtr<ID3D12Device> Adapter::GetDevice() const {
-        return mD3d12Device;
-    }
-
-    const gpu_info::D3DDriverVersion& Adapter::GetDriverVersion() const {
-        return mDriverVersion;
-    }
-
-    MaybeError Adapter::InitializeImpl() {
-        // D3D12 cannot check for feature support without a device.
-        // Create the device to populate the adapter properties then reuse it when needed for actual
-        // rendering.
-        const PlatformFunctions* functions = GetBackend()->GetFunctions();
-        if (FAILED(functions->d3d12CreateDevice(GetHardwareAdapter(), D3D_FEATURE_LEVEL_11_0,
-                                                _uuidof(ID3D12Device), &mD3d12Device))) {
-            return DAWN_INTERNAL_ERROR("D3D12CreateDevice failed");
+        std::ostringstream o;
+        o << "D3D12 driver version ";
+        for (size_t i = 0; i < mDriverVersion.size(); ++i) {
+            mDriverVersion[i] = (encodedVersion >> (48 - 16 * i)) & 0xFFFF;
+            o << mDriverVersion[i] << ".";
         }
+        mDriverDescription = o.str();
+    }
 
-        DAWN_TRY(InitializeDebugLayerFilters());
+    return {};
+}
 
-        DXGI_ADAPTER_DESC1 adapterDesc;
-        mHardwareAdapter->GetDesc1(&adapterDesc);
+bool Adapter::AreTimestampQueriesSupported() const {
+    D3D12_COMMAND_QUEUE_DESC queueDesc = {};
+    queueDesc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE;
+    queueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT;
+    ComPtr<ID3D12CommandQueue> d3d12CommandQueue;
+    HRESULT hr = mD3d12Device->CreateCommandQueue(&queueDesc, IID_PPV_ARGS(&d3d12CommandQueue));
+    if (FAILED(hr)) {
+        return false;
+    }
 
-        mDeviceId = adapterDesc.DeviceId;
-        mVendorId = adapterDesc.VendorId;
-        mName = WCharToUTF8(adapterDesc.Description);
+    // GetTimestampFrequency returns an error HRESULT when there are bugs in Windows container
+    // and vGPU implementations.
+    uint64_t timeStampFrequency;
+    hr = d3d12CommandQueue->GetTimestampFrequency(&timeStampFrequency);
+    if (FAILED(hr)) {
+        return false;
+    }
 
-        DAWN_TRY_ASSIGN(mDeviceInfo, GatherDeviceInfo(*this));
+    return true;
+}
 
-        if (adapterDesc.Flags & DXGI_ADAPTER_FLAG_SOFTWARE) {
-            mAdapterType = wgpu::AdapterType::CPU;
-        } else {
-            mAdapterType = (mDeviceInfo.isUMA) ? wgpu::AdapterType::IntegratedGPU
-                                               : wgpu::AdapterType::DiscreteGPU;
-        }
+MaybeError Adapter::InitializeSupportedFeaturesImpl() {
+    if (AreTimestampQueriesSupported()) {
+        mSupportedFeatures.EnableFeature(Feature::TimestampQuery);
+    }
+    mSupportedFeatures.EnableFeature(Feature::TextureCompressionBC);
+    mSupportedFeatures.EnableFeature(Feature::PipelineStatisticsQuery);
+    mSupportedFeatures.EnableFeature(Feature::MultiPlanarFormats);
+    mSupportedFeatures.EnableFeature(Feature::Depth24UnormStencil8);
+    mSupportedFeatures.EnableFeature(Feature::Depth32FloatStencil8);
 
-        // Convert the adapter's D3D12 driver version to a readable string like "24.21.13.9793".
-        LARGE_INTEGER umdVersion;
-        if (mHardwareAdapter->CheckInterfaceSupport(__uuidof(IDXGIDevice), &umdVersion) !=
-            DXGI_ERROR_UNSUPPORTED) {
-            uint64_t encodedVersion = umdVersion.QuadPart;
+    return {};
+}
 
-            std::ostringstream o;
-            o << "D3D12 driver version ";
-            for (size_t i = 0; i < mDriverVersion.size(); ++i) {
-                mDriverVersion[i] = (encodedVersion >> (48 - 16 * i)) & 0xFFFF;
-                o << mDriverVersion[i] << ".";
-            }
-            mDriverDescription = o.str();
-        }
+MaybeError Adapter::InitializeSupportedLimitsImpl(CombinedLimits* limits) {
+    D3D12_FEATURE_DATA_D3D12_OPTIONS featureData = {};
 
+    DAWN_TRY(CheckHRESULT(mD3d12Device->CheckFeatureSupport(D3D12_FEATURE_D3D12_OPTIONS,
+                                                            &featureData, sizeof(featureData)),
+                          "CheckFeatureSupport D3D12_FEATURE_D3D12_OPTIONS"));
+
+    // Check if the device is at least D3D_FEATURE_LEVEL_11_1 or D3D_FEATURE_LEVEL_11_0
+    const D3D_FEATURE_LEVEL levelsToQuery[]{D3D_FEATURE_LEVEL_11_1, D3D_FEATURE_LEVEL_11_0};
+
+    D3D12_FEATURE_DATA_FEATURE_LEVELS featureLevels;
+    featureLevels.NumFeatureLevels = sizeof(levelsToQuery) / sizeof(D3D_FEATURE_LEVEL);
+    featureLevels.pFeatureLevelsRequested = levelsToQuery;
+    DAWN_TRY(CheckHRESULT(mD3d12Device->CheckFeatureSupport(D3D12_FEATURE_FEATURE_LEVELS,
+                                                            &featureLevels, sizeof(featureLevels)),
+                          "CheckFeatureSupport D3D12_FEATURE_FEATURE_LEVELS"));
+
+    if (featureLevels.MaxSupportedFeatureLevel == D3D_FEATURE_LEVEL_11_0 &&
+        featureData.ResourceBindingTier < D3D12_RESOURCE_BINDING_TIER_2) {
+        return DAWN_VALIDATION_ERROR(
+            "At least Resource Binding Tier 2 is required for D3D12 Feature Level 11.0 "
+            "devices.");
+    }
+
+    GetDefaultLimits(&limits->v1);
+
+    // https://docs.microsoft.com/en-us/windows/win32/direct3d12/hardware-feature-levels
+
+    // Limits that are the same across D3D feature levels
+    limits->v1.maxTextureDimension1D = D3D12_REQ_TEXTURE1D_U_DIMENSION;
+    limits->v1.maxTextureDimension2D = D3D12_REQ_TEXTURE2D_U_OR_V_DIMENSION;
+    limits->v1.maxTextureDimension3D = D3D12_REQ_TEXTURE3D_U_V_OR_W_DIMENSION;
+    limits->v1.maxTextureArrayLayers = D3D12_REQ_TEXTURE2D_ARRAY_AXIS_DIMENSION;
+    // Slot values can be 0-15, inclusive:
+    // https://docs.microsoft.com/en-ca/windows/win32/api/d3d12/ns-d3d12-d3d12_input_element_desc
+    limits->v1.maxVertexBuffers = 16;
+    limits->v1.maxVertexAttributes = D3D12_IA_VERTEX_INPUT_RESOURCE_SLOT_COUNT;
+
+    // Note: WebGPU requires FL11.1+
+    // https://docs.microsoft.com/en-us/windows/win32/direct3d12/hardware-support
+    // Resource Binding Tier:   1      2      3
+
+    // Max(CBV+UAV+SRV)         1M    1M    1M+
+    // Max CBV per stage        14    14   full
+    // Max SRV per stage       128  full   full
+    // Max UAV in all stages    64    64   full
+    // Max Samplers per stage   16  2048   2048
+
+    // https://docs.microsoft.com/en-us/windows-hardware/test/hlk/testref/efad06e8-51d1-40ce-ad5c-573a134b4bb6
+    // "full" means the full heap can be used. This is tested
+    // to work for 1 million descriptors, and 1.1M for tier 3.
+    uint32_t maxCBVsPerStage;
+    uint32_t maxSRVsPerStage;
+    uint32_t maxUAVsAllStages;
+    uint32_t maxSamplersPerStage;
+    switch (featureData.ResourceBindingTier) {
+        case D3D12_RESOURCE_BINDING_TIER_1:
+            maxCBVsPerStage = 14;
+            maxSRVsPerStage = 128;
+            maxUAVsAllStages = 64;
+            maxSamplersPerStage = 16;
+            break;
+        case D3D12_RESOURCE_BINDING_TIER_2:
+            maxCBVsPerStage = 14;
+            maxSRVsPerStage = 1'000'000;
+            maxUAVsAllStages = 64;
+            maxSamplersPerStage = 2048;
+            break;
+        case D3D12_RESOURCE_BINDING_TIER_3:
+        default:
+            maxCBVsPerStage = 1'100'000;
+            maxSRVsPerStage = 1'100'000;
+            maxUAVsAllStages = 1'100'000;
+            maxSamplersPerStage = 2048;
+            break;
+    }
+
+    ASSERT(maxUAVsAllStages / 4 > limits->v1.maxStorageTexturesPerShaderStage);
+    ASSERT(maxUAVsAllStages / 4 > limits->v1.maxStorageBuffersPerShaderStage);
+    uint32_t maxUAVsPerStage = maxUAVsAllStages / 2;
+
+    limits->v1.maxUniformBuffersPerShaderStage = maxCBVsPerStage;
+    // Allocate half of the UAVs to storage buffers, and half to storage textures.
+    limits->v1.maxStorageTexturesPerShaderStage = maxUAVsPerStage / 2;
+    limits->v1.maxStorageBuffersPerShaderStage = maxUAVsPerStage - maxUAVsPerStage / 2;
+    limits->v1.maxSampledTexturesPerShaderStage = maxSRVsPerStage;
+    limits->v1.maxSamplersPerShaderStage = maxSamplersPerStage;
+
+    // https://docs.microsoft.com/en-us/windows/win32/direct3d12/root-signature-limits
+    // In DWORDS. Descriptor tables cost 1, Root constants cost 1, Root descriptors cost 2.
+    static constexpr uint32_t kMaxRootSignatureSize = 64u;
+    // Dawn maps WebGPU's binding model by:
+    //  - (maxBindGroups)
+    //    CBVs/UAVs/SRVs for bind group are a root descriptor table
+    //  - (maxBindGroups)
+    //    Samplers for each bind group are a root descriptor table
+    //  - (2 * maxDynamicBuffers)
+    //    Each dynamic buffer is a root descriptor
+    //  RESERVED:
+    //  - 3 = max of:
+    //    - 2 root constants for the baseVertex/baseInstance constants.
+    //    - 3 root constants for num workgroups X, Y, Z
+    //  - 4 root constants (kMaxDynamicStorageBuffersPerPipelineLayout) for dynamic storage
+    //  buffer lengths.
+    static constexpr uint32_t kReservedSlots = 7;
+
+    // Available slots after base limits considered.
+    uint32_t availableRootSignatureSlots =
+        kMaxRootSignatureSize - kReservedSlots -
+        2 * (limits->v1.maxBindGroups + limits->v1.maxDynamicUniformBuffersPerPipelineLayout +
+             limits->v1.maxDynamicStorageBuffersPerPipelineLayout);
+
+    // Because we need either:
+    //  - 1 cbv/uav/srv table + 1 sampler table
+    //  - 2 slots for a root descriptor
+    uint32_t availableDynamicBufferOrBindGroup = availableRootSignatureSlots / 2;
+
+    // We can either have a bind group, a dyn uniform buffer or a dyn storage buffer.
+    // Distribute evenly.
+    limits->v1.maxBindGroups += availableDynamicBufferOrBindGroup / 3;
+    limits->v1.maxDynamicUniformBuffersPerPipelineLayout += availableDynamicBufferOrBindGroup / 3;
+    limits->v1.maxDynamicStorageBuffersPerPipelineLayout +=
+        (availableDynamicBufferOrBindGroup - 2 * (availableDynamicBufferOrBindGroup / 3));
+
+    ASSERT(2 * (limits->v1.maxBindGroups + limits->v1.maxDynamicUniformBuffersPerPipelineLayout +
+                limits->v1.maxDynamicStorageBuffersPerPipelineLayout) <=
+           kMaxRootSignatureSize - kReservedSlots);
+
+    // https://docs.microsoft.com/en-us/windows/win32/direct3dhlsl/sm5-attributes-numthreads
+    limits->v1.maxComputeWorkgroupSizeX = D3D12_CS_THREAD_GROUP_MAX_X;
+    limits->v1.maxComputeWorkgroupSizeY = D3D12_CS_THREAD_GROUP_MAX_Y;
+    limits->v1.maxComputeWorkgroupSizeZ = D3D12_CS_THREAD_GROUP_MAX_Z;
+    limits->v1.maxComputeInvocationsPerWorkgroup = D3D12_CS_THREAD_GROUP_MAX_THREADS_PER_GROUP;
+
+    // https://docs.maxComputeWorkgroupSizeXmicrosoft.com/en-us/windows/win32/api/d3d12/ns-d3d12-d3d12_dispatch_arguments
+    limits->v1.maxComputeWorkgroupsPerDimension = D3D12_CS_DISPATCH_MAX_THREAD_GROUPS_PER_DIMENSION;
+
+    // https://docs.microsoft.com/en-us/windows/win32/direct3d11/overviews-direct3d-11-devices-downlevel-compute-shaders
+    // Thread Group Shared Memory is limited to 16Kb on downlevel hardware. This is less than
+    // the 32Kb that is available to Direct3D 11 hardware. D3D12 is also 32kb.
+    limits->v1.maxComputeWorkgroupStorageSize = 32768;
+
+    // Max number of "constants" where each constant is a 16-byte float4
+    limits->v1.maxUniformBufferBindingSize = D3D12_REQ_CONSTANT_BUFFER_ELEMENT_COUNT * 16;
+    // D3D12 has no documented limit on the size of a storage buffer binding.
+    limits->v1.maxStorageBufferBindingSize = 4294967295;
+
+    // TODO(crbug.com/dawn/685):
+    // LIMITS NOT SET:
+    // - maxInterStageShaderComponents
+    // - maxVertexBufferArrayStride
+
+    return {};
+}
+
+MaybeError Adapter::InitializeDebugLayerFilters() {
+    if (!GetInstance()->IsBackendValidationEnabled()) {
         return {};
     }
 
-    bool Adapter::AreTimestampQueriesSupported() const {
-        D3D12_COMMAND_QUEUE_DESC queueDesc = {};
-        queueDesc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE;
-        queueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT;
-        ComPtr<ID3D12CommandQueue> d3d12CommandQueue;
-        HRESULT hr = mD3d12Device->CreateCommandQueue(&queueDesc, IID_PPV_ARGS(&d3d12CommandQueue));
-        if (FAILED(hr)) {
-            return false;
-        }
+    D3D12_MESSAGE_ID denyIds[] = {
+        //
+        // Permanent IDs: list of warnings that are not applicable
+        //
 
-        // GetTimestampFrequency returns an error HRESULT when there are bugs in Windows container
-        // and vGPU implementations.
-        uint64_t timeStampFrequency;
-        hr = d3d12CommandQueue->GetTimestampFrequency(&timeStampFrequency);
-        if (FAILED(hr)) {
-            return false;
-        }
+        // Resource sub-allocation partially maps pre-allocated heaps. This means the
+        // entire physical addresses space may have no resources or have many resources
+        // assigned the same heap.
+        D3D12_MESSAGE_ID_HEAP_ADDRESS_RANGE_HAS_NO_RESOURCE,
+        D3D12_MESSAGE_ID_HEAP_ADDRESS_RANGE_INTERSECTS_MULTIPLE_BUFFERS,
 
-        return true;
+        // The debug layer validates pipeline objects when they are created. Dawn validates
+        // them when them when they are set. Therefore, since the issue is caught at a later
+        // time, we can silence this warnings.
+        D3D12_MESSAGE_ID_CREATEGRAPHICSPIPELINESTATE_RENDERTARGETVIEW_NOT_SET,
+
+        // Adding a clear color during resource creation would require heuristics or delayed
+        // creation.
+        // https://crbug.com/dawn/418
+        D3D12_MESSAGE_ID_CLEARRENDERTARGETVIEW_MISMATCHINGCLEARVALUE,
+        D3D12_MESSAGE_ID_CLEARDEPTHSTENCILVIEW_MISMATCHINGCLEARVALUE,
+
+        // Dawn enforces proper Unmaps at a later time.
+        // https://crbug.com/dawn/422
+        D3D12_MESSAGE_ID_EXECUTECOMMANDLISTS_GPU_WRITTEN_READBACK_RESOURCE_MAPPED,
+
+        // WebGPU allows empty scissors without empty viewports.
+        D3D12_MESSAGE_ID_DRAW_EMPTY_SCISSOR_RECTANGLE,
+
+        //
+        // Temporary IDs: list of warnings that should be fixed or promoted
+        //
+
+        // Remove after warning have been addressed
+        // https://crbug.com/dawn/421
+        D3D12_MESSAGE_ID_GPU_BASED_VALIDATION_INCOMPATIBLE_RESOURCE_STATE,
+
+        // For small placed resource alignment, we first request the small alignment, which may
+        // get rejected and generate a debug error. Then, we request 0 to get the allowed
+        // allowed alignment.
+        D3D12_MESSAGE_ID_CREATERESOURCE_INVALIDALIGNMENT,
+
+        // WebGPU allows OOB vertex buffer access and relies on D3D12's robust buffer access
+        // behavior.
+        D3D12_MESSAGE_ID_COMMAND_LIST_DRAW_VERTEX_BUFFER_TOO_SMALL,
+
+        // WebGPU allows setVertexBuffer with offset that equals to the whole vertex buffer
+        // size.
+        // Even this means that no vertex buffer view has been set in D3D12 backend.
+        // https://crbug.com/dawn/1255
+        D3D12_MESSAGE_ID_COMMAND_LIST_DRAW_VERTEX_BUFFER_NOT_SET,
+    };
+
+    // Create a retrieval filter with a deny list to suppress messages.
+    // Any messages remaining will be converted to Dawn errors.
+    D3D12_INFO_QUEUE_FILTER filter{};
+    // Filter out info/message and only create errors from warnings or worse.
+    D3D12_MESSAGE_SEVERITY severities[] = {
+        D3D12_MESSAGE_SEVERITY_INFO,
+        D3D12_MESSAGE_SEVERITY_MESSAGE,
+    };
+    filter.DenyList.NumSeverities = ARRAYSIZE(severities);
+    filter.DenyList.pSeverityList = severities;
+    filter.DenyList.NumIDs = ARRAYSIZE(denyIds);
+    filter.DenyList.pIDList = denyIds;
+
+    ComPtr<ID3D12InfoQueue> infoQueue;
+    DAWN_TRY(CheckHRESULT(mD3d12Device.As(&infoQueue),
+                          "D3D12 QueryInterface ID3D12Device to ID3D12InfoQueue"));
+
+    // To avoid flooding the console, a storage-filter is also used to
+    // prevent messages from getting logged.
+    DAWN_TRY(
+        CheckHRESULT(infoQueue->PushStorageFilter(&filter), "ID3D12InfoQueue::PushStorageFilter"));
+
+    DAWN_TRY(CheckHRESULT(infoQueue->PushRetrievalFilter(&filter),
+                          "ID3D12InfoQueue::PushRetrievalFilter"));
+
+    return {};
+}
+
+void Adapter::CleanUpDebugLayerFilters() {
+    if (!GetInstance()->IsBackendValidationEnabled()) {
+        return;
     }
 
-    MaybeError Adapter::InitializeSupportedFeaturesImpl() {
-        if (AreTimestampQueriesSupported()) {
-            mSupportedFeatures.EnableFeature(Feature::TimestampQuery);
-        }
-        mSupportedFeatures.EnableFeature(Feature::TextureCompressionBC);
-        mSupportedFeatures.EnableFeature(Feature::PipelineStatisticsQuery);
-        mSupportedFeatures.EnableFeature(Feature::MultiPlanarFormats);
-        mSupportedFeatures.EnableFeature(Feature::Depth24UnormStencil8);
-        mSupportedFeatures.EnableFeature(Feature::Depth32FloatStencil8);
-
-        return {};
+    // The device may not exist if this adapter failed to initialize.
+    if (mD3d12Device == nullptr) {
+        return;
     }
 
-    MaybeError Adapter::InitializeSupportedLimitsImpl(CombinedLimits* limits) {
-        D3D12_FEATURE_DATA_D3D12_OPTIONS featureData = {};
-
-        DAWN_TRY(CheckHRESULT(mD3d12Device->CheckFeatureSupport(D3D12_FEATURE_D3D12_OPTIONS,
-                                                                &featureData, sizeof(featureData)),
-                              "CheckFeatureSupport D3D12_FEATURE_D3D12_OPTIONS"));
-
-        // Check if the device is at least D3D_FEATURE_LEVEL_11_1 or D3D_FEATURE_LEVEL_11_0
-        const D3D_FEATURE_LEVEL levelsToQuery[]{D3D_FEATURE_LEVEL_11_1, D3D_FEATURE_LEVEL_11_0};
-
-        D3D12_FEATURE_DATA_FEATURE_LEVELS featureLevels;
-        featureLevels.NumFeatureLevels = sizeof(levelsToQuery) / sizeof(D3D_FEATURE_LEVEL);
-        featureLevels.pFeatureLevelsRequested = levelsToQuery;
-        DAWN_TRY(
-            CheckHRESULT(mD3d12Device->CheckFeatureSupport(D3D12_FEATURE_FEATURE_LEVELS,
-                                                           &featureLevels, sizeof(featureLevels)),
-                         "CheckFeatureSupport D3D12_FEATURE_FEATURE_LEVELS"));
-
-        if (featureLevels.MaxSupportedFeatureLevel == D3D_FEATURE_LEVEL_11_0 &&
-            featureData.ResourceBindingTier < D3D12_RESOURCE_BINDING_TIER_2) {
-            return DAWN_VALIDATION_ERROR(
-                "At least Resource Binding Tier 2 is required for D3D12 Feature Level 11.0 "
-                "devices.");
-        }
-
-        GetDefaultLimits(&limits->v1);
-
-        // https://docs.microsoft.com/en-us/windows/win32/direct3d12/hardware-feature-levels
-
-        // Limits that are the same across D3D feature levels
-        limits->v1.maxTextureDimension1D = D3D12_REQ_TEXTURE1D_U_DIMENSION;
-        limits->v1.maxTextureDimension2D = D3D12_REQ_TEXTURE2D_U_OR_V_DIMENSION;
-        limits->v1.maxTextureDimension3D = D3D12_REQ_TEXTURE3D_U_V_OR_W_DIMENSION;
-        limits->v1.maxTextureArrayLayers = D3D12_REQ_TEXTURE2D_ARRAY_AXIS_DIMENSION;
-        // Slot values can be 0-15, inclusive:
-        // https://docs.microsoft.com/en-ca/windows/win32/api/d3d12/ns-d3d12-d3d12_input_element_desc
-        limits->v1.maxVertexBuffers = 16;
-        limits->v1.maxVertexAttributes = D3D12_IA_VERTEX_INPUT_RESOURCE_SLOT_COUNT;
-
-        // Note: WebGPU requires FL11.1+
-        // https://docs.microsoft.com/en-us/windows/win32/direct3d12/hardware-support
-        // Resource Binding Tier:   1      2      3
-
-        // Max(CBV+UAV+SRV)         1M    1M    1M+
-        // Max CBV per stage        14    14   full
-        // Max SRV per stage       128  full   full
-        // Max UAV in all stages    64    64   full
-        // Max Samplers per stage   16  2048   2048
-
-        // https://docs.microsoft.com/en-us/windows-hardware/test/hlk/testref/efad06e8-51d1-40ce-ad5c-573a134b4bb6
-        // "full" means the full heap can be used. This is tested
-        // to work for 1 million descriptors, and 1.1M for tier 3.
-        uint32_t maxCBVsPerStage;
-        uint32_t maxSRVsPerStage;
-        uint32_t maxUAVsAllStages;
-        uint32_t maxSamplersPerStage;
-        switch (featureData.ResourceBindingTier) {
-            case D3D12_RESOURCE_BINDING_TIER_1:
-                maxCBVsPerStage = 14;
-                maxSRVsPerStage = 128;
-                maxUAVsAllStages = 64;
-                maxSamplersPerStage = 16;
-                break;
-            case D3D12_RESOURCE_BINDING_TIER_2:
-                maxCBVsPerStage = 14;
-                maxSRVsPerStage = 1'000'000;
-                maxUAVsAllStages = 64;
-                maxSamplersPerStage = 2048;
-                break;
-            case D3D12_RESOURCE_BINDING_TIER_3:
-            default:
-                maxCBVsPerStage = 1'100'000;
-                maxSRVsPerStage = 1'100'000;
-                maxUAVsAllStages = 1'100'000;
-                maxSamplersPerStage = 2048;
-                break;
-        }
-
-        ASSERT(maxUAVsAllStages / 4 > limits->v1.maxStorageTexturesPerShaderStage);
-        ASSERT(maxUAVsAllStages / 4 > limits->v1.maxStorageBuffersPerShaderStage);
-        uint32_t maxUAVsPerStage = maxUAVsAllStages / 2;
-
-        limits->v1.maxUniformBuffersPerShaderStage = maxCBVsPerStage;
-        // Allocate half of the UAVs to storage buffers, and half to storage textures.
-        limits->v1.maxStorageTexturesPerShaderStage = maxUAVsPerStage / 2;
-        limits->v1.maxStorageBuffersPerShaderStage = maxUAVsPerStage - maxUAVsPerStage / 2;
-        limits->v1.maxSampledTexturesPerShaderStage = maxSRVsPerStage;
-        limits->v1.maxSamplersPerShaderStage = maxSamplersPerStage;
-
-        // https://docs.microsoft.com/en-us/windows/win32/direct3d12/root-signature-limits
-        // In DWORDS. Descriptor tables cost 1, Root constants cost 1, Root descriptors cost 2.
-        static constexpr uint32_t kMaxRootSignatureSize = 64u;
-        // Dawn maps WebGPU's binding model by:
-        //  - (maxBindGroups)
-        //    CBVs/UAVs/SRVs for bind group are a root descriptor table
-        //  - (maxBindGroups)
-        //    Samplers for each bind group are a root descriptor table
-        //  - (2 * maxDynamicBuffers)
-        //    Each dynamic buffer is a root descriptor
-        //  RESERVED:
-        //  - 3 = max of:
-        //    - 2 root constants for the baseVertex/baseInstance constants.
-        //    - 3 root constants for num workgroups X, Y, Z
-        //  - 4 root constants (kMaxDynamicStorageBuffersPerPipelineLayout) for dynamic storage
-        //  buffer lengths.
-        static constexpr uint32_t kReservedSlots = 7;
-
-        // Available slots after base limits considered.
-        uint32_t availableRootSignatureSlots =
-            kMaxRootSignatureSize - kReservedSlots -
-            2 * (limits->v1.maxBindGroups + limits->v1.maxDynamicUniformBuffersPerPipelineLayout +
-                 limits->v1.maxDynamicStorageBuffersPerPipelineLayout);
-
-        // Because we need either:
-        //  - 1 cbv/uav/srv table + 1 sampler table
-        //  - 2 slots for a root descriptor
-        uint32_t availableDynamicBufferOrBindGroup = availableRootSignatureSlots / 2;
-
-        // We can either have a bind group, a dyn uniform buffer or a dyn storage buffer.
-        // Distribute evenly.
-        limits->v1.maxBindGroups += availableDynamicBufferOrBindGroup / 3;
-        limits->v1.maxDynamicUniformBuffersPerPipelineLayout +=
-            availableDynamicBufferOrBindGroup / 3;
-        limits->v1.maxDynamicStorageBuffersPerPipelineLayout +=
-            (availableDynamicBufferOrBindGroup - 2 * (availableDynamicBufferOrBindGroup / 3));
-
-        ASSERT(2 * (limits->v1.maxBindGroups +
-                    limits->v1.maxDynamicUniformBuffersPerPipelineLayout +
-                    limits->v1.maxDynamicStorageBuffersPerPipelineLayout) <=
-               kMaxRootSignatureSize - kReservedSlots);
-
-        // https://docs.microsoft.com/en-us/windows/win32/direct3dhlsl/sm5-attributes-numthreads
-        limits->v1.maxComputeWorkgroupSizeX = D3D12_CS_THREAD_GROUP_MAX_X;
-        limits->v1.maxComputeWorkgroupSizeY = D3D12_CS_THREAD_GROUP_MAX_Y;
-        limits->v1.maxComputeWorkgroupSizeZ = D3D12_CS_THREAD_GROUP_MAX_Z;
-        limits->v1.maxComputeInvocationsPerWorkgroup = D3D12_CS_THREAD_GROUP_MAX_THREADS_PER_GROUP;
-
-        // https://docs.maxComputeWorkgroupSizeXmicrosoft.com/en-us/windows/win32/api/d3d12/ns-d3d12-d3d12_dispatch_arguments
-        limits->v1.maxComputeWorkgroupsPerDimension =
-            D3D12_CS_DISPATCH_MAX_THREAD_GROUPS_PER_DIMENSION;
-
-        // https://docs.microsoft.com/en-us/windows/win32/direct3d11/overviews-direct3d-11-devices-downlevel-compute-shaders
-        // Thread Group Shared Memory is limited to 16Kb on downlevel hardware. This is less than
-        // the 32Kb that is available to Direct3D 11 hardware. D3D12 is also 32kb.
-        limits->v1.maxComputeWorkgroupStorageSize = 32768;
-
-        // Max number of "constants" where each constant is a 16-byte float4
-        limits->v1.maxUniformBufferBindingSize = D3D12_REQ_CONSTANT_BUFFER_ELEMENT_COUNT * 16;
-        // D3D12 has no documented limit on the size of a storage buffer binding.
-        limits->v1.maxStorageBufferBindingSize = 4294967295;
-
-        // TODO(crbug.com/dawn/685):
-        // LIMITS NOT SET:
-        // - maxInterStageShaderComponents
-        // - maxVertexBufferArrayStride
-
-        return {};
+    // If the debug layer is not installed, return immediately to avoid crashing the process.
+    ComPtr<ID3D12InfoQueue> infoQueue;
+    if (FAILED(mD3d12Device.As(&infoQueue))) {
+        return;
     }
 
-    MaybeError Adapter::InitializeDebugLayerFilters() {
-        if (!GetInstance()->IsBackendValidationEnabled()) {
-            return {};
-        }
+    infoQueue->PopRetrievalFilter();
+    infoQueue->PopStorageFilter();
+}
 
-        D3D12_MESSAGE_ID denyIds[] = {
-            //
-            // Permanent IDs: list of warnings that are not applicable
-            //
+ResultOrError<Ref<DeviceBase>> Adapter::CreateDeviceImpl(const DeviceDescriptor* descriptor) {
+    return Device::Create(this, descriptor);
+}
 
-            // Resource sub-allocation partially maps pre-allocated heaps. This means the
-            // entire physical addresses space may have no resources or have many resources
-            // assigned the same heap.
-            D3D12_MESSAGE_ID_HEAP_ADDRESS_RANGE_HAS_NO_RESOURCE,
-            D3D12_MESSAGE_ID_HEAP_ADDRESS_RANGE_INTERSECTS_MULTIPLE_BUFFERS,
+// Resets the backend device and creates a new one. If any D3D12 objects belonging to the
+// current ID3D12Device have not been destroyed, a non-zero value will be returned upon Reset()
+// and the subequent call to CreateDevice will return a handle the existing device instead of
+// creating a new one.
+MaybeError Adapter::ResetInternalDeviceForTestingImpl() {
+    ASSERT(mD3d12Device.Reset() == 0);
+    DAWN_TRY(Initialize());
 
-            // The debug layer validates pipeline objects when they are created. Dawn validates
-            // them when them when they are set. Therefore, since the issue is caught at a later
-            // time, we can silence this warnings.
-            D3D12_MESSAGE_ID_CREATEGRAPHICSPIPELINESTATE_RENDERTARGETVIEW_NOT_SET,
-
-            // Adding a clear color during resource creation would require heuristics or delayed
-            // creation.
-            // https://crbug.com/dawn/418
-            D3D12_MESSAGE_ID_CLEARRENDERTARGETVIEW_MISMATCHINGCLEARVALUE,
-            D3D12_MESSAGE_ID_CLEARDEPTHSTENCILVIEW_MISMATCHINGCLEARVALUE,
-
-            // Dawn enforces proper Unmaps at a later time.
-            // https://crbug.com/dawn/422
-            D3D12_MESSAGE_ID_EXECUTECOMMANDLISTS_GPU_WRITTEN_READBACK_RESOURCE_MAPPED,
-
-            // WebGPU allows empty scissors without empty viewports.
-            D3D12_MESSAGE_ID_DRAW_EMPTY_SCISSOR_RECTANGLE,
-
-            //
-            // Temporary IDs: list of warnings that should be fixed or promoted
-            //
-
-            // Remove after warning have been addressed
-            // https://crbug.com/dawn/421
-            D3D12_MESSAGE_ID_GPU_BASED_VALIDATION_INCOMPATIBLE_RESOURCE_STATE,
-
-            // For small placed resource alignment, we first request the small alignment, which may
-            // get rejected and generate a debug error. Then, we request 0 to get the allowed
-            // allowed alignment.
-            D3D12_MESSAGE_ID_CREATERESOURCE_INVALIDALIGNMENT,
-
-            // WebGPU allows OOB vertex buffer access and relies on D3D12's robust buffer access
-            // behavior.
-            D3D12_MESSAGE_ID_COMMAND_LIST_DRAW_VERTEX_BUFFER_TOO_SMALL,
-
-            // WebGPU allows setVertexBuffer with offset that equals to the whole vertex buffer
-            // size.
-            // Even this means that no vertex buffer view has been set in D3D12 backend.
-            // https://crbug.com/dawn/1255
-            D3D12_MESSAGE_ID_COMMAND_LIST_DRAW_VERTEX_BUFFER_NOT_SET,
-        };
-
-        // Create a retrieval filter with a deny list to suppress messages.
-        // Any messages remaining will be converted to Dawn errors.
-        D3D12_INFO_QUEUE_FILTER filter{};
-        // Filter out info/message and only create errors from warnings or worse.
-        D3D12_MESSAGE_SEVERITY severities[] = {
-            D3D12_MESSAGE_SEVERITY_INFO,
-            D3D12_MESSAGE_SEVERITY_MESSAGE,
-        };
-        filter.DenyList.NumSeverities = ARRAYSIZE(severities);
-        filter.DenyList.pSeverityList = severities;
-        filter.DenyList.NumIDs = ARRAYSIZE(denyIds);
-        filter.DenyList.pIDList = denyIds;
-
-        ComPtr<ID3D12InfoQueue> infoQueue;
-        DAWN_TRY(CheckHRESULT(mD3d12Device.As(&infoQueue),
-                              "D3D12 QueryInterface ID3D12Device to ID3D12InfoQueue"));
-
-        // To avoid flooding the console, a storage-filter is also used to
-        // prevent messages from getting logged.
-        DAWN_TRY(CheckHRESULT(infoQueue->PushStorageFilter(&filter),
-                              "ID3D12InfoQueue::PushStorageFilter"));
-
-        DAWN_TRY(CheckHRESULT(infoQueue->PushRetrievalFilter(&filter),
-                              "ID3D12InfoQueue::PushRetrievalFilter"));
-
-        return {};
-    }
-
-    void Adapter::CleanUpDebugLayerFilters() {
-        if (!GetInstance()->IsBackendValidationEnabled()) {
-            return;
-        }
-
-        // The device may not exist if this adapter failed to initialize.
-        if (mD3d12Device == nullptr) {
-            return;
-        }
-
-        // If the debug layer is not installed, return immediately to avoid crashing the process.
-        ComPtr<ID3D12InfoQueue> infoQueue;
-        if (FAILED(mD3d12Device.As(&infoQueue))) {
-            return;
-        }
-
-        infoQueue->PopRetrievalFilter();
-        infoQueue->PopStorageFilter();
-    }
-
-    ResultOrError<Ref<DeviceBase>> Adapter::CreateDeviceImpl(const DeviceDescriptor* descriptor) {
-        return Device::Create(this, descriptor);
-    }
-
-    // Resets the backend device and creates a new one. If any D3D12 objects belonging to the
-    // current ID3D12Device have not been destroyed, a non-zero value will be returned upon Reset()
-    // and the subequent call to CreateDevice will return a handle the existing device instead of
-    // creating a new one.
-    MaybeError Adapter::ResetInternalDeviceForTestingImpl() {
-        ASSERT(mD3d12Device.Reset() == 0);
-        DAWN_TRY(Initialize());
-
-        return {};
-    }
+    return {};
+}
 
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/AdapterD3D12.h b/src/dawn/native/d3d12/AdapterD3D12.h
index b1f6760..035e291 100644
--- a/src/dawn/native/d3d12/AdapterD3D12.h
+++ b/src/dawn/native/d3d12/AdapterD3D12.h
@@ -23,43 +23,42 @@
 
 namespace dawn::native::d3d12 {
 
-    class Backend;
+class Backend;
 
-    class Adapter : public AdapterBase {
-      public:
-        Adapter(Backend* backend, ComPtr<IDXGIAdapter3> hardwareAdapter);
-        ~Adapter() override;
+class Adapter : public AdapterBase {
+  public:
+    Adapter(Backend* backend, ComPtr<IDXGIAdapter3> hardwareAdapter);
+    ~Adapter() override;
 
-        // AdapterBase Implementation
-        bool SupportsExternalImages() const override;
+    // AdapterBase Implementation
+    bool SupportsExternalImages() const override;
 
-        const D3D12DeviceInfo& GetDeviceInfo() const;
-        IDXGIAdapter3* GetHardwareAdapter() const;
-        Backend* GetBackend() const;
-        ComPtr<ID3D12Device> GetDevice() const;
-        const gpu_info::D3DDriverVersion& GetDriverVersion() const;
+    const D3D12DeviceInfo& GetDeviceInfo() const;
+    IDXGIAdapter3* GetHardwareAdapter() const;
+    Backend* GetBackend() const;
+    ComPtr<ID3D12Device> GetDevice() const;
+    const gpu_info::D3DDriverVersion& GetDriverVersion() const;
 
-      private:
-        ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(
-            const DeviceDescriptor* descriptor) override;
-        MaybeError ResetInternalDeviceForTestingImpl() override;
+  private:
+    ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(const DeviceDescriptor* descriptor) override;
+    MaybeError ResetInternalDeviceForTestingImpl() override;
 
-        bool AreTimestampQueriesSupported() const;
+    bool AreTimestampQueriesSupported() const;
 
-        MaybeError InitializeImpl() override;
-        MaybeError InitializeSupportedFeaturesImpl() override;
-        MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override;
+    MaybeError InitializeImpl() override;
+    MaybeError InitializeSupportedFeaturesImpl() override;
+    MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override;
 
-        MaybeError InitializeDebugLayerFilters();
-        void CleanUpDebugLayerFilters();
+    MaybeError InitializeDebugLayerFilters();
+    void CleanUpDebugLayerFilters();
 
-        ComPtr<IDXGIAdapter3> mHardwareAdapter;
-        ComPtr<ID3D12Device> mD3d12Device;
-        gpu_info::D3DDriverVersion mDriverVersion;
+    ComPtr<IDXGIAdapter3> mHardwareAdapter;
+    ComPtr<ID3D12Device> mD3d12Device;
+    gpu_info::D3DDriverVersion mDriverVersion;
 
-        Backend* mBackend;
-        D3D12DeviceInfo mDeviceInfo = {};
-    };
+    Backend* mBackend;
+    D3D12DeviceInfo mDeviceInfo = {};
+};
 
 }  // namespace dawn::native::d3d12
 
diff --git a/src/dawn/native/d3d12/BackendD3D12.cpp b/src/dawn/native/d3d12/BackendD3D12.cpp
index b09e9ce..1232dbf 100644
--- a/src/dawn/native/d3d12/BackendD3D12.cpp
+++ b/src/dawn/native/d3d12/BackendD3D12.cpp
@@ -24,188 +24,181 @@
 
 namespace dawn::native::d3d12 {
 
-    namespace {
+namespace {
 
-        ResultOrError<ComPtr<IDXGIFactory4>> CreateFactory(const PlatformFunctions* functions,
-                                                           BackendValidationLevel validationLevel,
-                                                           bool beginCaptureOnStartup) {
-            ComPtr<IDXGIFactory4> factory;
+ResultOrError<ComPtr<IDXGIFactory4>> CreateFactory(const PlatformFunctions* functions,
+                                                   BackendValidationLevel validationLevel,
+                                                   bool beginCaptureOnStartup) {
+    ComPtr<IDXGIFactory4> factory;
 
-            uint32_t dxgiFactoryFlags = 0;
+    uint32_t dxgiFactoryFlags = 0;
 
-            // Enable the debug layer (requires the Graphics Tools "optional feature").
-            {
-                if (validationLevel != BackendValidationLevel::Disabled) {
-                    ComPtr<ID3D12Debug3> debugController;
-                    if (SUCCEEDED(
-                            functions->d3d12GetDebugInterface(IID_PPV_ARGS(&debugController)))) {
-                        ASSERT(debugController != nullptr);
-                        debugController->EnableDebugLayer();
-                        if (validationLevel == BackendValidationLevel::Full) {
-                            debugController->SetEnableGPUBasedValidation(true);
-                        }
-
-                        // Enable additional debug layers.
-                        dxgiFactoryFlags |= DXGI_CREATE_FACTORY_DEBUG;
-                    }
+    // Enable the debug layer (requires the Graphics Tools "optional feature").
+    {
+        if (validationLevel != BackendValidationLevel::Disabled) {
+            ComPtr<ID3D12Debug3> debugController;
+            if (SUCCEEDED(functions->d3d12GetDebugInterface(IID_PPV_ARGS(&debugController)))) {
+                ASSERT(debugController != nullptr);
+                debugController->EnableDebugLayer();
+                if (validationLevel == BackendValidationLevel::Full) {
+                    debugController->SetEnableGPUBasedValidation(true);
                 }
 
-                if (beginCaptureOnStartup) {
-                    ComPtr<IDXGraphicsAnalysis> graphicsAnalysis;
-                    if (functions->dxgiGetDebugInterface1 != nullptr &&
-                        SUCCEEDED(functions->dxgiGetDebugInterface1(
-                            0, IID_PPV_ARGS(&graphicsAnalysis)))) {
-                        graphicsAnalysis->BeginCapture();
-                    }
-                }
+                // Enable additional debug layers.
+                dxgiFactoryFlags |= DXGI_CREATE_FACTORY_DEBUG;
             }
+        }
 
-            if (FAILED(functions->createDxgiFactory2(dxgiFactoryFlags, IID_PPV_ARGS(&factory)))) {
-                return DAWN_INTERNAL_ERROR("Failed to create a DXGI factory");
+        if (beginCaptureOnStartup) {
+            ComPtr<IDXGraphicsAnalysis> graphicsAnalysis;
+            if (functions->dxgiGetDebugInterface1 != nullptr &&
+                SUCCEEDED(functions->dxgiGetDebugInterface1(0, IID_PPV_ARGS(&graphicsAnalysis)))) {
+                graphicsAnalysis->BeginCapture();
             }
-
-            ASSERT(factory != nullptr);
-            return std::move(factory);
         }
-
-        ResultOrError<Ref<AdapterBase>> CreateAdapterFromIDXGIAdapter(
-            Backend* backend,
-            ComPtr<IDXGIAdapter> dxgiAdapter) {
-            ComPtr<IDXGIAdapter3> dxgiAdapter3;
-            DAWN_TRY(CheckHRESULT(dxgiAdapter.As(&dxgiAdapter3), "DXGIAdapter retrieval"));
-            Ref<Adapter> adapter = AcquireRef(new Adapter(backend, std::move(dxgiAdapter3)));
-            DAWN_TRY(adapter->Initialize());
-
-            return {std::move(adapter)};
-        }
-
-    }  // anonymous namespace
-
-    Backend::Backend(InstanceBase* instance)
-        : BackendConnection(instance, wgpu::BackendType::D3D12) {
     }
 
-    MaybeError Backend::Initialize() {
-        mFunctions = std::make_unique<PlatformFunctions>();
-        DAWN_TRY(mFunctions->LoadFunctions());
-
-        const auto instance = GetInstance();
-
-        DAWN_TRY_ASSIGN(mFactory,
-                        CreateFactory(mFunctions.get(), instance->GetBackendValidationLevel(),
-                                      instance->IsBeginCaptureOnStartupEnabled()));
-
-        return {};
+    if (FAILED(functions->createDxgiFactory2(dxgiFactoryFlags, IID_PPV_ARGS(&factory)))) {
+        return DAWN_INTERNAL_ERROR("Failed to create a DXGI factory");
     }
 
-    ComPtr<IDXGIFactory4> Backend::GetFactory() const {
-        return mFactory;
-    }
+    ASSERT(factory != nullptr);
+    return std::move(factory);
+}
 
-    MaybeError Backend::EnsureDxcLibrary() {
-        if (mDxcLibrary == nullptr) {
-            DAWN_TRY(CheckHRESULT(
-                mFunctions->dxcCreateInstance(CLSID_DxcLibrary, IID_PPV_ARGS(&mDxcLibrary)),
-                "DXC create library"));
-            ASSERT(mDxcLibrary != nullptr);
-        }
-        return {};
-    }
+ResultOrError<Ref<AdapterBase>> CreateAdapterFromIDXGIAdapter(Backend* backend,
+                                                              ComPtr<IDXGIAdapter> dxgiAdapter) {
+    ComPtr<IDXGIAdapter3> dxgiAdapter3;
+    DAWN_TRY(CheckHRESULT(dxgiAdapter.As(&dxgiAdapter3), "DXGIAdapter retrieval"));
+    Ref<Adapter> adapter = AcquireRef(new Adapter(backend, std::move(dxgiAdapter3)));
+    DAWN_TRY(adapter->Initialize());
 
-    MaybeError Backend::EnsureDxcCompiler() {
-        if (mDxcCompiler == nullptr) {
-            DAWN_TRY(CheckHRESULT(
-                mFunctions->dxcCreateInstance(CLSID_DxcCompiler, IID_PPV_ARGS(&mDxcCompiler)),
-                "DXC create compiler"));
-            ASSERT(mDxcCompiler != nullptr);
-        }
-        return {};
-    }
+    return {std::move(adapter)};
+}
 
-    MaybeError Backend::EnsureDxcValidator() {
-        if (mDxcValidator == nullptr) {
-            DAWN_TRY(CheckHRESULT(
-                mFunctions->dxcCreateInstance(CLSID_DxcValidator, IID_PPV_ARGS(&mDxcValidator)),
-                "DXC create validator"));
-            ASSERT(mDxcValidator != nullptr);
-        }
-        return {};
-    }
+}  // anonymous namespace
 
-    ComPtr<IDxcLibrary> Backend::GetDxcLibrary() const {
+Backend::Backend(InstanceBase* instance) : BackendConnection(instance, wgpu::BackendType::D3D12) {}
+
+MaybeError Backend::Initialize() {
+    mFunctions = std::make_unique<PlatformFunctions>();
+    DAWN_TRY(mFunctions->LoadFunctions());
+
+    const auto instance = GetInstance();
+
+    DAWN_TRY_ASSIGN(mFactory, CreateFactory(mFunctions.get(), instance->GetBackendValidationLevel(),
+                                            instance->IsBeginCaptureOnStartupEnabled()));
+
+    return {};
+}
+
+ComPtr<IDXGIFactory4> Backend::GetFactory() const {
+    return mFactory;
+}
+
+MaybeError Backend::EnsureDxcLibrary() {
+    if (mDxcLibrary == nullptr) {
+        DAWN_TRY(CheckHRESULT(
+            mFunctions->dxcCreateInstance(CLSID_DxcLibrary, IID_PPV_ARGS(&mDxcLibrary)),
+            "DXC create library"));
         ASSERT(mDxcLibrary != nullptr);
-        return mDxcLibrary;
     }
+    return {};
+}
 
-    ComPtr<IDxcCompiler> Backend::GetDxcCompiler() const {
+MaybeError Backend::EnsureDxcCompiler() {
+    if (mDxcCompiler == nullptr) {
+        DAWN_TRY(CheckHRESULT(
+            mFunctions->dxcCreateInstance(CLSID_DxcCompiler, IID_PPV_ARGS(&mDxcCompiler)),
+            "DXC create compiler"));
         ASSERT(mDxcCompiler != nullptr);
-        return mDxcCompiler;
     }
+    return {};
+}
 
-    ComPtr<IDxcValidator> Backend::GetDxcValidator() const {
+MaybeError Backend::EnsureDxcValidator() {
+    if (mDxcValidator == nullptr) {
+        DAWN_TRY(CheckHRESULT(
+            mFunctions->dxcCreateInstance(CLSID_DxcValidator, IID_PPV_ARGS(&mDxcValidator)),
+            "DXC create validator"));
         ASSERT(mDxcValidator != nullptr);
-        return mDxcValidator;
+    }
+    return {};
+}
+
+ComPtr<IDxcLibrary> Backend::GetDxcLibrary() const {
+    ASSERT(mDxcLibrary != nullptr);
+    return mDxcLibrary;
+}
+
+ComPtr<IDxcCompiler> Backend::GetDxcCompiler() const {
+    ASSERT(mDxcCompiler != nullptr);
+    return mDxcCompiler;
+}
+
+ComPtr<IDxcValidator> Backend::GetDxcValidator() const {
+    ASSERT(mDxcValidator != nullptr);
+    return mDxcValidator;
+}
+
+const PlatformFunctions* Backend::GetFunctions() const {
+    return mFunctions.get();
+}
+
+std::vector<Ref<AdapterBase>> Backend::DiscoverDefaultAdapters() {
+    AdapterDiscoveryOptions options;
+    auto result = DiscoverAdapters(&options);
+    if (result.IsError()) {
+        GetInstance()->ConsumedError(result.AcquireError());
+        return {};
+    }
+    return result.AcquireSuccess();
+}
+
+ResultOrError<std::vector<Ref<AdapterBase>>> Backend::DiscoverAdapters(
+    const AdapterDiscoveryOptionsBase* optionsBase) {
+    ASSERT(optionsBase->backendType == WGPUBackendType_D3D12);
+    const AdapterDiscoveryOptions* options =
+        static_cast<const AdapterDiscoveryOptions*>(optionsBase);
+
+    std::vector<Ref<AdapterBase>> adapters;
+    if (options->dxgiAdapter != nullptr) {
+        // |dxgiAdapter| was provided. Discover just that adapter.
+        Ref<AdapterBase> adapter;
+        DAWN_TRY_ASSIGN(adapter, CreateAdapterFromIDXGIAdapter(this, options->dxgiAdapter));
+        adapters.push_back(std::move(adapter));
+        return std::move(adapters);
     }
 
-    const PlatformFunctions* Backend::GetFunctions() const {
-        return mFunctions.get();
-    }
-
-    std::vector<Ref<AdapterBase>> Backend::DiscoverDefaultAdapters() {
-        AdapterDiscoveryOptions options;
-        auto result = DiscoverAdapters(&options);
-        if (result.IsError()) {
-            GetInstance()->ConsumedError(result.AcquireError());
-            return {};
-        }
-        return result.AcquireSuccess();
-    }
-
-    ResultOrError<std::vector<Ref<AdapterBase>>> Backend::DiscoverAdapters(
-        const AdapterDiscoveryOptionsBase* optionsBase) {
-        ASSERT(optionsBase->backendType == WGPUBackendType_D3D12);
-        const AdapterDiscoveryOptions* options =
-            static_cast<const AdapterDiscoveryOptions*>(optionsBase);
-
-        std::vector<Ref<AdapterBase>> adapters;
-        if (options->dxgiAdapter != nullptr) {
-            // |dxgiAdapter| was provided. Discover just that adapter.
-            Ref<AdapterBase> adapter;
-            DAWN_TRY_ASSIGN(adapter, CreateAdapterFromIDXGIAdapter(this, options->dxgiAdapter));
-            adapters.push_back(std::move(adapter));
-            return std::move(adapters);
+    // Enumerate and discover all available adapters.
+    for (uint32_t adapterIndex = 0;; ++adapterIndex) {
+        ComPtr<IDXGIAdapter1> dxgiAdapter = nullptr;
+        if (mFactory->EnumAdapters1(adapterIndex, &dxgiAdapter) == DXGI_ERROR_NOT_FOUND) {
+            break;  // No more adapters to enumerate.
         }
 
-        // Enumerate and discover all available adapters.
-        for (uint32_t adapterIndex = 0;; ++adapterIndex) {
-            ComPtr<IDXGIAdapter1> dxgiAdapter = nullptr;
-            if (mFactory->EnumAdapters1(adapterIndex, &dxgiAdapter) == DXGI_ERROR_NOT_FOUND) {
-                break;  // No more adapters to enumerate.
-            }
-
-            ASSERT(dxgiAdapter != nullptr);
-            ResultOrError<Ref<AdapterBase>> adapter =
-                CreateAdapterFromIDXGIAdapter(this, dxgiAdapter);
-            if (adapter.IsError()) {
-                GetInstance()->ConsumedError(adapter.AcquireError());
-                continue;
-            }
-
-            adapters.push_back(adapter.AcquireSuccess());
+        ASSERT(dxgiAdapter != nullptr);
+        ResultOrError<Ref<AdapterBase>> adapter = CreateAdapterFromIDXGIAdapter(this, dxgiAdapter);
+        if (adapter.IsError()) {
+            GetInstance()->ConsumedError(adapter.AcquireError());
+            continue;
         }
 
-        return adapters;
+        adapters.push_back(adapter.AcquireSuccess());
     }
 
-    BackendConnection* Connect(InstanceBase* instance) {
-        Backend* backend = new Backend(instance);
+    return adapters;
+}
 
-        if (instance->ConsumedError(backend->Initialize())) {
-            delete backend;
-            return nullptr;
-        }
+BackendConnection* Connect(InstanceBase* instance) {
+    Backend* backend = new Backend(instance);
 
-        return backend;
+    if (instance->ConsumedError(backend->Initialize())) {
+        delete backend;
+        return nullptr;
     }
 
+    return backend;
+}
+
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/BackendD3D12.h b/src/dawn/native/d3d12/BackendD3D12.h
index e403bce..9a983f2 100644
--- a/src/dawn/native/d3d12/BackendD3D12.h
+++ b/src/dawn/native/d3d12/BackendD3D12.h
@@ -24,38 +24,38 @@
 
 namespace dawn::native::d3d12 {
 
-    class PlatformFunctions;
+class PlatformFunctions;
 
-    class Backend : public BackendConnection {
-      public:
-        explicit Backend(InstanceBase* instance);
+class Backend : public BackendConnection {
+  public:
+    explicit Backend(InstanceBase* instance);
 
-        MaybeError Initialize();
+    MaybeError Initialize();
 
-        ComPtr<IDXGIFactory4> GetFactory() const;
+    ComPtr<IDXGIFactory4> GetFactory() const;
 
-        MaybeError EnsureDxcLibrary();
-        MaybeError EnsureDxcCompiler();
-        MaybeError EnsureDxcValidator();
-        ComPtr<IDxcLibrary> GetDxcLibrary() const;
-        ComPtr<IDxcCompiler> GetDxcCompiler() const;
-        ComPtr<IDxcValidator> GetDxcValidator() const;
+    MaybeError EnsureDxcLibrary();
+    MaybeError EnsureDxcCompiler();
+    MaybeError EnsureDxcValidator();
+    ComPtr<IDxcLibrary> GetDxcLibrary() const;
+    ComPtr<IDxcCompiler> GetDxcCompiler() const;
+    ComPtr<IDxcValidator> GetDxcValidator() const;
 
-        const PlatformFunctions* GetFunctions() const;
+    const PlatformFunctions* GetFunctions() const;
 
-        std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override;
-        ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
-            const AdapterDiscoveryOptionsBase* optionsBase) override;
+    std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override;
+    ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
+        const AdapterDiscoveryOptionsBase* optionsBase) override;
 
-      private:
-        // Keep mFunctions as the first member so that in the destructor it is freed last. Otherwise
-        // the D3D12 DLLs are unloaded before we are done using them.
-        std::unique_ptr<PlatformFunctions> mFunctions;
-        ComPtr<IDXGIFactory4> mFactory;
-        ComPtr<IDxcLibrary> mDxcLibrary;
-        ComPtr<IDxcCompiler> mDxcCompiler;
-        ComPtr<IDxcValidator> mDxcValidator;
-    };
+  private:
+    // Keep mFunctions as the first member so that in the destructor it is freed last. Otherwise
+    // the D3D12 DLLs are unloaded before we are done using them.
+    std::unique_ptr<PlatformFunctions> mFunctions;
+    ComPtr<IDXGIFactory4> mFactory;
+    ComPtr<IDxcLibrary> mDxcLibrary;
+    ComPtr<IDxcCompiler> mDxcCompiler;
+    ComPtr<IDxcValidator> mDxcValidator;
+};
 
 }  // namespace dawn::native::d3d12
 
diff --git a/src/dawn/native/d3d12/BindGroupD3D12.cpp b/src/dawn/native/d3d12/BindGroupD3D12.cpp
index 2a4da01..f0f8471 100644
--- a/src/dawn/native/d3d12/BindGroupD3D12.cpp
+++ b/src/dawn/native/d3d12/BindGroupD3D12.cpp
@@ -27,244 +27,241 @@
 
 namespace dawn::native::d3d12 {
 
-    // static
-    ResultOrError<Ref<BindGroup>> BindGroup::Create(Device* device,
-                                                    const BindGroupDescriptor* descriptor) {
-        return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
-    }
+// static
+ResultOrError<Ref<BindGroup>> BindGroup::Create(Device* device,
+                                                const BindGroupDescriptor* descriptor) {
+    return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
+}
 
-    BindGroup::BindGroup(Device* device,
-                         const BindGroupDescriptor* descriptor,
-                         uint32_t viewSizeIncrement,
-                         const CPUDescriptorHeapAllocation& viewAllocation)
-        : BindGroupBase(this, device, descriptor) {
-        BindGroupLayout* bgl = ToBackend(GetLayout());
+BindGroup::BindGroup(Device* device,
+                     const BindGroupDescriptor* descriptor,
+                     uint32_t viewSizeIncrement,
+                     const CPUDescriptorHeapAllocation& viewAllocation)
+    : BindGroupBase(this, device, descriptor) {
+    BindGroupLayout* bgl = ToBackend(GetLayout());
 
-        mCPUViewAllocation = viewAllocation;
+    mCPUViewAllocation = viewAllocation;
 
-        const auto& descriptorHeapOffsets = bgl->GetDescriptorHeapOffsets();
+    const auto& descriptorHeapOffsets = bgl->GetDescriptorHeapOffsets();
 
-        ID3D12Device* d3d12Device = device->GetD3D12Device();
+    ID3D12Device* d3d12Device = device->GetD3D12Device();
 
-        // It's not necessary to create descriptors in the descriptor heap for dynamic resources.
-        // This is because they are created as root descriptors which are never heap allocated.
-        // Since dynamic buffers are packed in the front, we can skip over these bindings by
-        // starting from the dynamic buffer count.
-        for (BindingIndex bindingIndex = bgl->GetDynamicBufferCount();
-             bindingIndex < bgl->GetBindingCount(); ++bindingIndex) {
-            const BindingInfo& bindingInfo = bgl->GetBindingInfo(bindingIndex);
+    // It's not necessary to create descriptors in the descriptor heap for dynamic resources.
+    // This is because they are created as root descriptors which are never heap allocated.
+    // Since dynamic buffers are packed in the front, we can skip over these bindings by
+    // starting from the dynamic buffer count.
+    for (BindingIndex bindingIndex = bgl->GetDynamicBufferCount();
+         bindingIndex < bgl->GetBindingCount(); ++bindingIndex) {
+        const BindingInfo& bindingInfo = bgl->GetBindingInfo(bindingIndex);
 
-            // Increment size does not need to be stored and is only used to get a handle
-            // local to the allocation with OffsetFrom().
-            switch (bindingInfo.bindingType) {
-                case BindingInfoType::Buffer: {
-                    BufferBinding binding = GetBindingAsBufferBinding(bindingIndex);
+        // Increment size does not need to be stored and is only used to get a handle
+        // local to the allocation with OffsetFrom().
+        switch (bindingInfo.bindingType) {
+            case BindingInfoType::Buffer: {
+                BufferBinding binding = GetBindingAsBufferBinding(bindingIndex);
 
-                    ID3D12Resource* resource = ToBackend(binding.buffer)->GetD3D12Resource();
-                    if (resource == nullptr) {
-                        // The Buffer was destroyed. Skip creating buffer views since there is no
-                        // resource. This bind group won't be used as it is an error to submit a
-                        // command buffer that references destroyed resources.
-                        continue;
-                    }
-
-                    switch (bindingInfo.buffer.type) {
-                        case wgpu::BufferBindingType::Uniform: {
-                            D3D12_CONSTANT_BUFFER_VIEW_DESC desc;
-                            desc.SizeInBytes =
-                                Align(binding.size, D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT);
-                            desc.BufferLocation =
-                                ToBackend(binding.buffer)->GetVA() + binding.offset;
-
-                            d3d12Device->CreateConstantBufferView(
-                                &desc, viewAllocation.OffsetFrom(
-                                           viewSizeIncrement, descriptorHeapOffsets[bindingIndex]));
-                            break;
-                        }
-                        case wgpu::BufferBindingType::Storage:
-                        case kInternalStorageBufferBinding: {
-                            // Since Tint outputs HLSL shaders with RWByteAddressBuffer,
-                            // we must use D3D12_BUFFER_UAV_FLAG_RAW when making the
-                            // UNORDERED_ACCESS_VIEW_DESC. Using D3D12_BUFFER_UAV_FLAG_RAW requires
-                            // that we use DXGI_FORMAT_R32_TYPELESS as the format of the view.
-                            // DXGI_FORMAT_R32_TYPELESS requires that the element size be 4
-                            // byte aligned. Since binding.size and binding.offset are in bytes,
-                            // we need to divide by 4 to obtain the element size.
-                            D3D12_UNORDERED_ACCESS_VIEW_DESC desc;
-                            desc.Buffer.NumElements = binding.size / 4;
-                            desc.Format = DXGI_FORMAT_R32_TYPELESS;
-                            desc.ViewDimension = D3D12_UAV_DIMENSION_BUFFER;
-                            desc.Buffer.FirstElement = binding.offset / 4;
-                            desc.Buffer.StructureByteStride = 0;
-                            desc.Buffer.CounterOffsetInBytes = 0;
-                            desc.Buffer.Flags = D3D12_BUFFER_UAV_FLAG_RAW;
-
-                            d3d12Device->CreateUnorderedAccessView(
-                                resource, nullptr, &desc,
-                                viewAllocation.OffsetFrom(viewSizeIncrement,
-                                                          descriptorHeapOffsets[bindingIndex]));
-                            break;
-                        }
-                        case wgpu::BufferBindingType::ReadOnlyStorage: {
-                            // Like StorageBuffer, Tint outputs HLSL shaders for readonly
-                            // storage buffer with ByteAddressBuffer. So we must use
-                            // D3D12_BUFFER_SRV_FLAG_RAW when making the SRV descriptor. And it has
-                            // similar requirement for format, element size, etc.
-                            D3D12_SHADER_RESOURCE_VIEW_DESC desc;
-                            desc.Format = DXGI_FORMAT_R32_TYPELESS;
-                            desc.ViewDimension = D3D12_SRV_DIMENSION_BUFFER;
-                            desc.Shader4ComponentMapping = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING;
-                            desc.Buffer.FirstElement = binding.offset / 4;
-                            desc.Buffer.NumElements = binding.size / 4;
-                            desc.Buffer.StructureByteStride = 0;
-                            desc.Buffer.Flags = D3D12_BUFFER_SRV_FLAG_RAW;
-                            d3d12Device->CreateShaderResourceView(
-                                resource, &desc,
-                                viewAllocation.OffsetFrom(viewSizeIncrement,
-                                                          descriptorHeapOffsets[bindingIndex]));
-                            break;
-                        }
-                        case wgpu::BufferBindingType::Undefined:
-                            UNREACHABLE();
-                    }
-
-                    break;
+                ID3D12Resource* resource = ToBackend(binding.buffer)->GetD3D12Resource();
+                if (resource == nullptr) {
+                    // The Buffer was destroyed. Skip creating buffer views since there is no
+                    // resource. This bind group won't be used as it is an error to submit a
+                    // command buffer that references destroyed resources.
+                    continue;
                 }
 
-                case BindingInfoType::Texture: {
-                    auto* view = ToBackend(GetBindingAsTextureView(bindingIndex));
-                    auto& srv = view->GetSRVDescriptor();
+                switch (bindingInfo.buffer.type) {
+                    case wgpu::BufferBindingType::Uniform: {
+                        D3D12_CONSTANT_BUFFER_VIEW_DESC desc;
+                        desc.SizeInBytes =
+                            Align(binding.size, D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT);
+                        desc.BufferLocation = ToBackend(binding.buffer)->GetVA() + binding.offset;
 
-                    ID3D12Resource* resource = ToBackend(view->GetTexture())->GetD3D12Resource();
-                    if (resource == nullptr) {
-                        // The Texture was destroyed. Skip creating the SRV since there is no
-                        // resource. This bind group won't be used as it is an error to submit a
-                        // command buffer that references destroyed resources.
-                        continue;
+                        d3d12Device->CreateConstantBufferView(
+                            &desc, viewAllocation.OffsetFrom(viewSizeIncrement,
+                                                             descriptorHeapOffsets[bindingIndex]));
+                        break;
                     }
+                    case wgpu::BufferBindingType::Storage:
+                    case kInternalStorageBufferBinding: {
+                        // Since Tint outputs HLSL shaders with RWByteAddressBuffer,
+                        // we must use D3D12_BUFFER_UAV_FLAG_RAW when making the
+                        // UNORDERED_ACCESS_VIEW_DESC. Using D3D12_BUFFER_UAV_FLAG_RAW requires
+                        // that we use DXGI_FORMAT_R32_TYPELESS as the format of the view.
+                        // DXGI_FORMAT_R32_TYPELESS requires that the element size be 4
+                        // byte aligned. Since binding.size and binding.offset are in bytes,
+                        // we need to divide by 4 to obtain the element size.
+                        D3D12_UNORDERED_ACCESS_VIEW_DESC desc;
+                        desc.Buffer.NumElements = binding.size / 4;
+                        desc.Format = DXGI_FORMAT_R32_TYPELESS;
+                        desc.ViewDimension = D3D12_UAV_DIMENSION_BUFFER;
+                        desc.Buffer.FirstElement = binding.offset / 4;
+                        desc.Buffer.StructureByteStride = 0;
+                        desc.Buffer.CounterOffsetInBytes = 0;
+                        desc.Buffer.Flags = D3D12_BUFFER_UAV_FLAG_RAW;
 
-                    d3d12Device->CreateShaderResourceView(
-                        resource, &srv,
-                        viewAllocation.OffsetFrom(viewSizeIncrement,
-                                                  descriptorHeapOffsets[bindingIndex]));
-                    break;
-                }
-
-                case BindingInfoType::StorageTexture: {
-                    TextureView* view = ToBackend(GetBindingAsTextureView(bindingIndex));
-
-                    ID3D12Resource* resource = ToBackend(view->GetTexture())->GetD3D12Resource();
-                    if (resource == nullptr) {
-                        // The Texture was destroyed. Skip creating the SRV/UAV since there is no
-                        // resource. This bind group won't be used as it is an error to submit a
-                        // command buffer that references destroyed resources.
-                        continue;
+                        d3d12Device->CreateUnorderedAccessView(
+                            resource, nullptr, &desc,
+                            viewAllocation.OffsetFrom(viewSizeIncrement,
+                                                      descriptorHeapOffsets[bindingIndex]));
+                        break;
                     }
-
-                    switch (bindingInfo.storageTexture.access) {
-                        case wgpu::StorageTextureAccess::WriteOnly: {
-                            D3D12_UNORDERED_ACCESS_VIEW_DESC uav = view->GetUAVDescriptor();
-                            d3d12Device->CreateUnorderedAccessView(
-                                resource, nullptr, &uav,
-                                viewAllocation.OffsetFrom(viewSizeIncrement,
-                                                          descriptorHeapOffsets[bindingIndex]));
-                            break;
-                        }
-
-                        case wgpu::StorageTextureAccess::Undefined:
-                            UNREACHABLE();
+                    case wgpu::BufferBindingType::ReadOnlyStorage: {
+                        // Like StorageBuffer, Tint outputs HLSL shaders for readonly
+                        // storage buffer with ByteAddressBuffer. So we must use
+                        // D3D12_BUFFER_SRV_FLAG_RAW when making the SRV descriptor. And it has
+                        // similar requirement for format, element size, etc.
+                        D3D12_SHADER_RESOURCE_VIEW_DESC desc;
+                        desc.Format = DXGI_FORMAT_R32_TYPELESS;
+                        desc.ViewDimension = D3D12_SRV_DIMENSION_BUFFER;
+                        desc.Shader4ComponentMapping = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING;
+                        desc.Buffer.FirstElement = binding.offset / 4;
+                        desc.Buffer.NumElements = binding.size / 4;
+                        desc.Buffer.StructureByteStride = 0;
+                        desc.Buffer.Flags = D3D12_BUFFER_SRV_FLAG_RAW;
+                        d3d12Device->CreateShaderResourceView(
+                            resource, &desc,
+                            viewAllocation.OffsetFrom(viewSizeIncrement,
+                                                      descriptorHeapOffsets[bindingIndex]));
+                        break;
                     }
-
-                    break;
+                    case wgpu::BufferBindingType::Undefined:
+                        UNREACHABLE();
                 }
 
-                case BindingInfoType::ExternalTexture: {
-                    UNREACHABLE();
-                }
-
-                case BindingInfoType::Sampler: {
-                    // No-op as samplers will be later initialized by CreateSamplers().
-                    break;
-                }
+                break;
             }
-        }
 
-        // Loop through the dynamic storage buffers and build a flat map from the index of the
-        // dynamic storage buffer to its binding size. The index |dynamicStorageBufferIndex|
-        // means that it is the i'th buffer that is both dynamic and storage, in increasing order
-        // of BindingNumber.
-        mDynamicStorageBufferLengths.resize(bgl->GetBindingCountInfo().dynamicStorageBufferCount);
-        uint32_t dynamicStorageBufferIndex = 0;
-        for (BindingIndex bindingIndex(0); bindingIndex < bgl->GetDynamicBufferCount();
-             ++bindingIndex) {
-            if (bgl->IsStorageBufferBinding(bindingIndex)) {
-                mDynamicStorageBufferLengths[dynamicStorageBufferIndex++] =
-                    GetBindingAsBufferBinding(bindingIndex).size;
+            case BindingInfoType::Texture: {
+                auto* view = ToBackend(GetBindingAsTextureView(bindingIndex));
+                auto& srv = view->GetSRVDescriptor();
+
+                ID3D12Resource* resource = ToBackend(view->GetTexture())->GetD3D12Resource();
+                if (resource == nullptr) {
+                    // The Texture was destroyed. Skip creating the SRV since there is no
+                    // resource. This bind group won't be used as it is an error to submit a
+                    // command buffer that references destroyed resources.
+                    continue;
+                }
+
+                d3d12Device->CreateShaderResourceView(
+                    resource, &srv,
+                    viewAllocation.OffsetFrom(viewSizeIncrement,
+                                              descriptorHeapOffsets[bindingIndex]));
+                break;
+            }
+
+            case BindingInfoType::StorageTexture: {
+                TextureView* view = ToBackend(GetBindingAsTextureView(bindingIndex));
+
+                ID3D12Resource* resource = ToBackend(view->GetTexture())->GetD3D12Resource();
+                if (resource == nullptr) {
+                    // The Texture was destroyed. Skip creating the SRV/UAV since there is no
+                    // resource. This bind group won't be used as it is an error to submit a
+                    // command buffer that references destroyed resources.
+                    continue;
+                }
+
+                switch (bindingInfo.storageTexture.access) {
+                    case wgpu::StorageTextureAccess::WriteOnly: {
+                        D3D12_UNORDERED_ACCESS_VIEW_DESC uav = view->GetUAVDescriptor();
+                        d3d12Device->CreateUnorderedAccessView(
+                            resource, nullptr, &uav,
+                            viewAllocation.OffsetFrom(viewSizeIncrement,
+                                                      descriptorHeapOffsets[bindingIndex]));
+                        break;
+                    }
+
+                    case wgpu::StorageTextureAccess::Undefined:
+                        UNREACHABLE();
+                }
+
+                break;
+            }
+
+            case BindingInfoType::ExternalTexture: {
+                UNREACHABLE();
+            }
+
+            case BindingInfoType::Sampler: {
+                // No-op as samplers will be later initialized by CreateSamplers().
+                break;
             }
         }
     }
 
-    BindGroup::~BindGroup() = default;
-
-    void BindGroup::DestroyImpl() {
-        BindGroupBase::DestroyImpl();
-        ToBackend(GetLayout())->DeallocateBindGroup(this, &mCPUViewAllocation);
-        ASSERT(!mCPUViewAllocation.IsValid());
+    // Loop through the dynamic storage buffers and build a flat map from the index of the
+    // dynamic storage buffer to its binding size. The index |dynamicStorageBufferIndex|
+    // means that it is the i'th buffer that is both dynamic and storage, in increasing order
+    // of BindingNumber.
+    mDynamicStorageBufferLengths.resize(bgl->GetBindingCountInfo().dynamicStorageBufferCount);
+    uint32_t dynamicStorageBufferIndex = 0;
+    for (BindingIndex bindingIndex(0); bindingIndex < bgl->GetDynamicBufferCount();
+         ++bindingIndex) {
+        if (bgl->IsStorageBufferBinding(bindingIndex)) {
+            mDynamicStorageBufferLengths[dynamicStorageBufferIndex++] =
+                GetBindingAsBufferBinding(bindingIndex).size;
+        }
     }
+}
 
-    bool BindGroup::PopulateViews(ShaderVisibleDescriptorAllocator* viewAllocator) {
-        const BindGroupLayout* bgl = ToBackend(GetLayout());
+BindGroup::~BindGroup() = default;
 
-        const uint32_t descriptorCount = bgl->GetCbvUavSrvDescriptorCount();
-        if (descriptorCount == 0 || viewAllocator->IsAllocationStillValid(mGPUViewAllocation)) {
-            return true;
-        }
+void BindGroup::DestroyImpl() {
+    BindGroupBase::DestroyImpl();
+    ToBackend(GetLayout())->DeallocateBindGroup(this, &mCPUViewAllocation);
+    ASSERT(!mCPUViewAllocation.IsValid());
+}
 
-        // Attempt to allocate descriptors for the currently bound shader-visible heaps.
-        // If either failed, return early to re-allocate and switch the heaps.
-        Device* device = ToBackend(GetDevice());
+bool BindGroup::PopulateViews(ShaderVisibleDescriptorAllocator* viewAllocator) {
+    const BindGroupLayout* bgl = ToBackend(GetLayout());
 
-        D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptor;
-        if (!viewAllocator->AllocateGPUDescriptors(descriptorCount,
-                                                   device->GetPendingCommandSerial(),
-                                                   &baseCPUDescriptor, &mGPUViewAllocation)) {
-            return false;
-        }
-
-        // CPU bindgroups are sparsely allocated across CPU heaps. Instead of doing
-        // simple copies per bindgroup, a single non-simple copy could be issued.
-        // TODO(dawn:155): Consider doing this optimization.
-        device->GetD3D12Device()->CopyDescriptorsSimple(descriptorCount, baseCPUDescriptor,
-                                                        mCPUViewAllocation.GetBaseDescriptor(),
-                                                        D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
-
+    const uint32_t descriptorCount = bgl->GetCbvUavSrvDescriptorCount();
+    if (descriptorCount == 0 || viewAllocator->IsAllocationStillValid(mGPUViewAllocation)) {
         return true;
     }
 
-    D3D12_GPU_DESCRIPTOR_HANDLE BindGroup::GetBaseViewDescriptor() const {
-        return mGPUViewAllocation.GetBaseDescriptor();
+    // Attempt to allocate descriptors for the currently bound shader-visible heaps.
+    // If either failed, return early to re-allocate and switch the heaps.
+    Device* device = ToBackend(GetDevice());
+
+    D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptor;
+    if (!viewAllocator->AllocateGPUDescriptors(descriptorCount, device->GetPendingCommandSerial(),
+                                               &baseCPUDescriptor, &mGPUViewAllocation)) {
+        return false;
     }
 
-    D3D12_GPU_DESCRIPTOR_HANDLE BindGroup::GetBaseSamplerDescriptor() const {
-        ASSERT(mSamplerAllocationEntry != nullptr);
-        return mSamplerAllocationEntry->GetBaseDescriptor();
-    }
+    // CPU bindgroups are sparsely allocated across CPU heaps. Instead of doing
+    // simple copies per bindgroup, a single non-simple copy could be issued.
+    // TODO(dawn:155): Consider doing this optimization.
+    device->GetD3D12Device()->CopyDescriptorsSimple(descriptorCount, baseCPUDescriptor,
+                                                    mCPUViewAllocation.GetBaseDescriptor(),
+                                                    D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
 
-    bool BindGroup::PopulateSamplers(Device* device,
-                                     ShaderVisibleDescriptorAllocator* samplerAllocator) {
-        if (mSamplerAllocationEntry == nullptr) {
-            return true;
-        }
-        return mSamplerAllocationEntry->Populate(device, samplerAllocator);
-    }
+    return true;
+}
 
-    void BindGroup::SetSamplerAllocationEntry(Ref<SamplerHeapCacheEntry> entry) {
-        mSamplerAllocationEntry = std::move(entry);
-    }
+D3D12_GPU_DESCRIPTOR_HANDLE BindGroup::GetBaseViewDescriptor() const {
+    return mGPUViewAllocation.GetBaseDescriptor();
+}
 
-    const BindGroup::DynamicStorageBufferLengths& BindGroup::GetDynamicStorageBufferLengths()
-        const {
-        return mDynamicStorageBufferLengths;
+D3D12_GPU_DESCRIPTOR_HANDLE BindGroup::GetBaseSamplerDescriptor() const {
+    ASSERT(mSamplerAllocationEntry != nullptr);
+    return mSamplerAllocationEntry->GetBaseDescriptor();
+}
+
+bool BindGroup::PopulateSamplers(Device* device,
+                                 ShaderVisibleDescriptorAllocator* samplerAllocator) {
+    if (mSamplerAllocationEntry == nullptr) {
+        return true;
     }
+    return mSamplerAllocationEntry->Populate(device, samplerAllocator);
+}
+
+void BindGroup::SetSamplerAllocationEntry(Ref<SamplerHeapCacheEntry> entry) {
+    mSamplerAllocationEntry = std::move(entry);
+}
+
+const BindGroup::DynamicStorageBufferLengths& BindGroup::GetDynamicStorageBufferLengths() const {
+    return mDynamicStorageBufferLengths;
+}
 
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/BindGroupD3D12.h b/src/dawn/native/d3d12/BindGroupD3D12.h
index 58498fc..243374a 100644
--- a/src/dawn/native/d3d12/BindGroupD3D12.h
+++ b/src/dawn/native/d3d12/BindGroupD3D12.h
@@ -24,45 +24,45 @@
 
 namespace dawn::native::d3d12 {
 
-    class Device;
-    class SamplerHeapCacheEntry;
-    class ShaderVisibleDescriptorAllocator;
+class Device;
+class SamplerHeapCacheEntry;
+class ShaderVisibleDescriptorAllocator;
 
-    class BindGroup final : public BindGroupBase, public PlacementAllocated {
-      public:
-        static ResultOrError<Ref<BindGroup>> Create(Device* device,
-                                                    const BindGroupDescriptor* descriptor);
+class BindGroup final : public BindGroupBase, public PlacementAllocated {
+  public:
+    static ResultOrError<Ref<BindGroup>> Create(Device* device,
+                                                const BindGroupDescriptor* descriptor);
 
-        BindGroup(Device* device,
-                  const BindGroupDescriptor* descriptor,
-                  uint32_t viewSizeIncrement,
-                  const CPUDescriptorHeapAllocation& viewAllocation);
+    BindGroup(Device* device,
+              const BindGroupDescriptor* descriptor,
+              uint32_t viewSizeIncrement,
+              const CPUDescriptorHeapAllocation& viewAllocation);
 
-        // Returns true if the BindGroup was successfully populated.
-        bool PopulateViews(ShaderVisibleDescriptorAllocator* viewAllocator);
-        bool PopulateSamplers(Device* device, ShaderVisibleDescriptorAllocator* samplerAllocator);
+    // Returns true if the BindGroup was successfully populated.
+    bool PopulateViews(ShaderVisibleDescriptorAllocator* viewAllocator);
+    bool PopulateSamplers(Device* device, ShaderVisibleDescriptorAllocator* samplerAllocator);
 
-        D3D12_GPU_DESCRIPTOR_HANDLE GetBaseViewDescriptor() const;
-        D3D12_GPU_DESCRIPTOR_HANDLE GetBaseSamplerDescriptor() const;
+    D3D12_GPU_DESCRIPTOR_HANDLE GetBaseViewDescriptor() const;
+    D3D12_GPU_DESCRIPTOR_HANDLE GetBaseSamplerDescriptor() const;
 
-        void SetSamplerAllocationEntry(Ref<SamplerHeapCacheEntry> entry);
+    void SetSamplerAllocationEntry(Ref<SamplerHeapCacheEntry> entry);
 
-        using DynamicStorageBufferLengths =
-            ityp::stack_vec<uint32_t, uint32_t, kMaxDynamicStorageBuffersPerPipelineLayout>;
-        const DynamicStorageBufferLengths& GetDynamicStorageBufferLengths() const;
+    using DynamicStorageBufferLengths =
+        ityp::stack_vec<uint32_t, uint32_t, kMaxDynamicStorageBuffersPerPipelineLayout>;
+    const DynamicStorageBufferLengths& GetDynamicStorageBufferLengths() const;
 
-      private:
-        ~BindGroup() override;
+  private:
+    ~BindGroup() override;
 
-        void DestroyImpl() override;
+    void DestroyImpl() override;
 
-        Ref<SamplerHeapCacheEntry> mSamplerAllocationEntry;
+    Ref<SamplerHeapCacheEntry> mSamplerAllocationEntry;
 
-        GPUDescriptorHeapAllocation mGPUViewAllocation;
-        CPUDescriptorHeapAllocation mCPUViewAllocation;
+    GPUDescriptorHeapAllocation mGPUViewAllocation;
+    CPUDescriptorHeapAllocation mCPUViewAllocation;
 
-        DynamicStorageBufferLengths mDynamicStorageBufferLengths;
-    };
+    DynamicStorageBufferLengths mDynamicStorageBufferLengths;
+};
 }  // namespace dawn::native::d3d12
 
 #endif  // SRC_DAWN_NATIVE_D3D12_BINDGROUPD3D12_H_
diff --git a/src/dawn/native/d3d12/BindGroupLayoutD3D12.cpp b/src/dawn/native/d3d12/BindGroupLayoutD3D12.cpp
index 7ef3745..9e5abad 100644
--- a/src/dawn/native/d3d12/BindGroupLayoutD3D12.cpp
+++ b/src/dawn/native/d3d12/BindGroupLayoutD3D12.cpp
@@ -22,165 +22,160 @@
 #include "dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h"
 
 namespace dawn::native::d3d12 {
-    namespace {
-        D3D12_DESCRIPTOR_RANGE_TYPE WGPUBindingInfoToDescriptorRangeType(
-            const BindingInfo& bindingInfo) {
-            switch (bindingInfo.bindingType) {
-                case BindingInfoType::Buffer:
-                    switch (bindingInfo.buffer.type) {
-                        case wgpu::BufferBindingType::Uniform:
-                            return D3D12_DESCRIPTOR_RANGE_TYPE_CBV;
-                        case wgpu::BufferBindingType::Storage:
-                        case kInternalStorageBufferBinding:
-                            return D3D12_DESCRIPTOR_RANGE_TYPE_UAV;
-                        case wgpu::BufferBindingType::ReadOnlyStorage:
-                            return D3D12_DESCRIPTOR_RANGE_TYPE_SRV;
-                        case wgpu::BufferBindingType::Undefined:
-                            UNREACHABLE();
-                    }
-
-                case BindingInfoType::Sampler:
-                    return D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER;
-
-                case BindingInfoType::Texture:
-                case BindingInfoType::ExternalTexture:
+namespace {
+D3D12_DESCRIPTOR_RANGE_TYPE WGPUBindingInfoToDescriptorRangeType(const BindingInfo& bindingInfo) {
+    switch (bindingInfo.bindingType) {
+        case BindingInfoType::Buffer:
+            switch (bindingInfo.buffer.type) {
+                case wgpu::BufferBindingType::Uniform:
+                    return D3D12_DESCRIPTOR_RANGE_TYPE_CBV;
+                case wgpu::BufferBindingType::Storage:
+                case kInternalStorageBufferBinding:
+                    return D3D12_DESCRIPTOR_RANGE_TYPE_UAV;
+                case wgpu::BufferBindingType::ReadOnlyStorage:
                     return D3D12_DESCRIPTOR_RANGE_TYPE_SRV;
-
-                case BindingInfoType::StorageTexture:
-                    switch (bindingInfo.storageTexture.access) {
-                        case wgpu::StorageTextureAccess::WriteOnly:
-                            return D3D12_DESCRIPTOR_RANGE_TYPE_UAV;
-                        case wgpu::StorageTextureAccess::Undefined:
-                            UNREACHABLE();
-                    }
+                case wgpu::BufferBindingType::Undefined:
+                    UNREACHABLE();
             }
-        }
-    }  // anonymous namespace
 
-    // static
-    Ref<BindGroupLayout> BindGroupLayout::Create(
-        Device* device,
-        const BindGroupLayoutDescriptor* descriptor,
-        PipelineCompatibilityToken pipelineCompatibilityToken) {
-        return AcquireRef(new BindGroupLayout(device, descriptor, pipelineCompatibilityToken));
+        case BindingInfoType::Sampler:
+            return D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER;
+
+        case BindingInfoType::Texture:
+        case BindingInfoType::ExternalTexture:
+            return D3D12_DESCRIPTOR_RANGE_TYPE_SRV;
+
+        case BindingInfoType::StorageTexture:
+            switch (bindingInfo.storageTexture.access) {
+                case wgpu::StorageTextureAccess::WriteOnly:
+                    return D3D12_DESCRIPTOR_RANGE_TYPE_UAV;
+                case wgpu::StorageTextureAccess::Undefined:
+                    UNREACHABLE();
+            }
     }
+}
+}  // anonymous namespace
 
-    BindGroupLayout::BindGroupLayout(Device* device,
-                                     const BindGroupLayoutDescriptor* descriptor,
-                                     PipelineCompatibilityToken pipelineCompatibilityToken)
-        : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
-          mDescriptorHeapOffsets(GetBindingCount()),
-          mShaderRegisters(GetBindingCount()),
-          mCbvUavSrvDescriptorCount(0),
-          mSamplerDescriptorCount(0),
-          mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
-        for (BindingIndex bindingIndex{0}; bindingIndex < GetBindingCount(); ++bindingIndex) {
-            const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
+// static
+Ref<BindGroupLayout> BindGroupLayout::Create(
+    Device* device,
+    const BindGroupLayoutDescriptor* descriptor,
+    PipelineCompatibilityToken pipelineCompatibilityToken) {
+    return AcquireRef(new BindGroupLayout(device, descriptor, pipelineCompatibilityToken));
+}
 
-            D3D12_DESCRIPTOR_RANGE_TYPE descriptorRangeType =
-                WGPUBindingInfoToDescriptorRangeType(bindingInfo);
-            mShaderRegisters[bindingIndex] = uint32_t(bindingInfo.binding);
+BindGroupLayout::BindGroupLayout(Device* device,
+                                 const BindGroupLayoutDescriptor* descriptor,
+                                 PipelineCompatibilityToken pipelineCompatibilityToken)
+    : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
+      mDescriptorHeapOffsets(GetBindingCount()),
+      mShaderRegisters(GetBindingCount()),
+      mCbvUavSrvDescriptorCount(0),
+      mSamplerDescriptorCount(0),
+      mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
+    for (BindingIndex bindingIndex{0}; bindingIndex < GetBindingCount(); ++bindingIndex) {
+        const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
 
-            // For dynamic resources, Dawn uses root descriptor in D3D12 backend. So there is no
-            // need to allocate the descriptor from descriptor heap or create descriptor ranges.
-            if (bindingIndex < GetDynamicBufferCount()) {
+        D3D12_DESCRIPTOR_RANGE_TYPE descriptorRangeType =
+            WGPUBindingInfoToDescriptorRangeType(bindingInfo);
+        mShaderRegisters[bindingIndex] = uint32_t(bindingInfo.binding);
+
+        // For dynamic resources, Dawn uses root descriptor in D3D12 backend. So there is no
+        // need to allocate the descriptor from descriptor heap or create descriptor ranges.
+        if (bindingIndex < GetDynamicBufferCount()) {
+            continue;
+        }
+        ASSERT(!bindingInfo.buffer.hasDynamicOffset);
+
+        mDescriptorHeapOffsets[bindingIndex] =
+            descriptorRangeType == D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER
+                ? mSamplerDescriptorCount++
+                : mCbvUavSrvDescriptorCount++;
+
+        D3D12_DESCRIPTOR_RANGE range;
+        range.RangeType = descriptorRangeType;
+        range.NumDescriptors = 1;
+        range.BaseShaderRegister = GetShaderRegister(bindingIndex);
+        range.RegisterSpace = kRegisterSpacePlaceholder;
+        range.OffsetInDescriptorsFromTableStart = D3D12_DESCRIPTOR_RANGE_OFFSET_APPEND;
+
+        std::vector<D3D12_DESCRIPTOR_RANGE>& descriptorRanges =
+            descriptorRangeType == D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER ? mSamplerDescriptorRanges
+                                                                       : mCbvUavSrvDescriptorRanges;
+
+        // Try to join this range with the previous one, if the current range is a continuation
+        // of the previous. This is possible because the binding infos in the base type are
+        // sorted.
+        if (descriptorRanges.size() >= 2) {
+            D3D12_DESCRIPTOR_RANGE& previous = descriptorRanges.back();
+            if (previous.RangeType == range.RangeType &&
+                previous.BaseShaderRegister + previous.NumDescriptors == range.BaseShaderRegister) {
+                previous.NumDescriptors += range.NumDescriptors;
                 continue;
             }
-            ASSERT(!bindingInfo.buffer.hasDynamicOffset);
-
-            mDescriptorHeapOffsets[bindingIndex] =
-                descriptorRangeType == D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER
-                    ? mSamplerDescriptorCount++
-                    : mCbvUavSrvDescriptorCount++;
-
-            D3D12_DESCRIPTOR_RANGE range;
-            range.RangeType = descriptorRangeType;
-            range.NumDescriptors = 1;
-            range.BaseShaderRegister = GetShaderRegister(bindingIndex);
-            range.RegisterSpace = kRegisterSpacePlaceholder;
-            range.OffsetInDescriptorsFromTableStart = D3D12_DESCRIPTOR_RANGE_OFFSET_APPEND;
-
-            std::vector<D3D12_DESCRIPTOR_RANGE>& descriptorRanges =
-                descriptorRangeType == D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER
-                    ? mSamplerDescriptorRanges
-                    : mCbvUavSrvDescriptorRanges;
-
-            // Try to join this range with the previous one, if the current range is a continuation
-            // of the previous. This is possible because the binding infos in the base type are
-            // sorted.
-            if (descriptorRanges.size() >= 2) {
-                D3D12_DESCRIPTOR_RANGE& previous = descriptorRanges.back();
-                if (previous.RangeType == range.RangeType &&
-                    previous.BaseShaderRegister + previous.NumDescriptors ==
-                        range.BaseShaderRegister) {
-                    previous.NumDescriptors += range.NumDescriptors;
-                    continue;
-                }
-            }
-
-            descriptorRanges.push_back(range);
         }
 
-        mViewAllocator = device->GetViewStagingDescriptorAllocator(GetCbvUavSrvDescriptorCount());
-        mSamplerAllocator =
-            device->GetSamplerStagingDescriptorAllocator(GetSamplerDescriptorCount());
+        descriptorRanges.push_back(range);
     }
 
-    ResultOrError<Ref<BindGroup>> BindGroupLayout::AllocateBindGroup(
-        Device* device,
-        const BindGroupDescriptor* descriptor) {
-        uint32_t viewSizeIncrement = 0;
-        CPUDescriptorHeapAllocation viewAllocation;
-        if (GetCbvUavSrvDescriptorCount() > 0) {
-            DAWN_TRY_ASSIGN(viewAllocation, mViewAllocator->AllocateCPUDescriptors());
-            viewSizeIncrement = mViewAllocator->GetSizeIncrement();
-        }
+    mViewAllocator = device->GetViewStagingDescriptorAllocator(GetCbvUavSrvDescriptorCount());
+    mSamplerAllocator = device->GetSamplerStagingDescriptorAllocator(GetSamplerDescriptorCount());
+}
 
-        Ref<BindGroup> bindGroup = AcquireRef<BindGroup>(
-            mBindGroupAllocator.Allocate(device, descriptor, viewSizeIncrement, viewAllocation));
-
-        if (GetSamplerDescriptorCount() > 0) {
-            Ref<SamplerHeapCacheEntry> samplerHeapCacheEntry;
-            DAWN_TRY_ASSIGN(samplerHeapCacheEntry, device->GetSamplerHeapCache()->GetOrCreate(
-                                                       bindGroup.Get(), mSamplerAllocator));
-            bindGroup->SetSamplerAllocationEntry(std::move(samplerHeapCacheEntry));
-        }
-
-        return bindGroup;
+ResultOrError<Ref<BindGroup>> BindGroupLayout::AllocateBindGroup(
+    Device* device,
+    const BindGroupDescriptor* descriptor) {
+    uint32_t viewSizeIncrement = 0;
+    CPUDescriptorHeapAllocation viewAllocation;
+    if (GetCbvUavSrvDescriptorCount() > 0) {
+        DAWN_TRY_ASSIGN(viewAllocation, mViewAllocator->AllocateCPUDescriptors());
+        viewSizeIncrement = mViewAllocator->GetSizeIncrement();
     }
 
-    void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup,
-                                              CPUDescriptorHeapAllocation* viewAllocation) {
-        if (viewAllocation->IsValid()) {
-            mViewAllocator->Deallocate(viewAllocation);
-        }
+    Ref<BindGroup> bindGroup = AcquireRef<BindGroup>(
+        mBindGroupAllocator.Allocate(device, descriptor, viewSizeIncrement, viewAllocation));
 
-        mBindGroupAllocator.Deallocate(bindGroup);
+    if (GetSamplerDescriptorCount() > 0) {
+        Ref<SamplerHeapCacheEntry> samplerHeapCacheEntry;
+        DAWN_TRY_ASSIGN(samplerHeapCacheEntry, device->GetSamplerHeapCache()->GetOrCreate(
+                                                   bindGroup.Get(), mSamplerAllocator));
+        bindGroup->SetSamplerAllocationEntry(std::move(samplerHeapCacheEntry));
     }
 
-    ityp::span<BindingIndex, const uint32_t> BindGroupLayout::GetDescriptorHeapOffsets() const {
-        return {mDescriptorHeapOffsets.data(), mDescriptorHeapOffsets.size()};
+    return bindGroup;
+}
+
+void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup,
+                                          CPUDescriptorHeapAllocation* viewAllocation) {
+    if (viewAllocation->IsValid()) {
+        mViewAllocator->Deallocate(viewAllocation);
     }
 
-    uint32_t BindGroupLayout::GetShaderRegister(BindingIndex bindingIndex) const {
-        return mShaderRegisters[bindingIndex];
-    }
+    mBindGroupAllocator.Deallocate(bindGroup);
+}
 
-    uint32_t BindGroupLayout::GetCbvUavSrvDescriptorCount() const {
-        return mCbvUavSrvDescriptorCount;
-    }
+ityp::span<BindingIndex, const uint32_t> BindGroupLayout::GetDescriptorHeapOffsets() const {
+    return {mDescriptorHeapOffsets.data(), mDescriptorHeapOffsets.size()};
+}
 
-    uint32_t BindGroupLayout::GetSamplerDescriptorCount() const {
-        return mSamplerDescriptorCount;
-    }
+uint32_t BindGroupLayout::GetShaderRegister(BindingIndex bindingIndex) const {
+    return mShaderRegisters[bindingIndex];
+}
 
-    const std::vector<D3D12_DESCRIPTOR_RANGE>& BindGroupLayout::GetCbvUavSrvDescriptorRanges()
-        const {
-        return mCbvUavSrvDescriptorRanges;
-    }
+uint32_t BindGroupLayout::GetCbvUavSrvDescriptorCount() const {
+    return mCbvUavSrvDescriptorCount;
+}
 
-    const std::vector<D3D12_DESCRIPTOR_RANGE>& BindGroupLayout::GetSamplerDescriptorRanges() const {
-        return mSamplerDescriptorRanges;
-    }
+uint32_t BindGroupLayout::GetSamplerDescriptorCount() const {
+    return mSamplerDescriptorCount;
+}
+
+const std::vector<D3D12_DESCRIPTOR_RANGE>& BindGroupLayout::GetCbvUavSrvDescriptorRanges() const {
+    return mCbvUavSrvDescriptorRanges;
+}
+
+const std::vector<D3D12_DESCRIPTOR_RANGE>& BindGroupLayout::GetSamplerDescriptorRanges() const {
+    return mSamplerDescriptorRanges;
+}
 
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/BindGroupLayoutD3D12.h b/src/dawn/native/d3d12/BindGroupLayoutD3D12.h
index 1e051a4..f045492 100644
--- a/src/dawn/native/d3d12/BindGroupLayoutD3D12.h
+++ b/src/dawn/native/d3d12/BindGroupLayoutD3D12.h
@@ -26,70 +26,70 @@
 
 namespace dawn::native::d3d12 {
 
-    class CPUDescriptorHeapAllocation;
-    class Device;
-    class StagingDescriptorAllocator;
+class CPUDescriptorHeapAllocation;
+class Device;
+class StagingDescriptorAllocator;
 
-    // A purposefully invalid register space.
+// A purposefully invalid register space.
+//
+// We use the bind group index as the register space, but don't know the bind group index until
+// pipeline layout creation time. This value should be replaced in PipelineLayoutD3D12.
+static constexpr uint32_t kRegisterSpacePlaceholder =
+    D3D12_DRIVER_RESERVED_REGISTER_SPACE_VALUES_START;
+
+class BindGroupLayout final : public BindGroupLayoutBase {
+  public:
+    static Ref<BindGroupLayout> Create(Device* device,
+                                       const BindGroupLayoutDescriptor* descriptor,
+                                       PipelineCompatibilityToken pipelineCompatibilityToken);
+
+    ResultOrError<Ref<BindGroup>> AllocateBindGroup(Device* device,
+                                                    const BindGroupDescriptor* descriptor);
+    void DeallocateBindGroup(BindGroup* bindGroup, CPUDescriptorHeapAllocation* viewAllocation);
+
+    // The offset (in descriptor count) into the corresponding descriptor heap. Not valid for
+    // dynamic binding indexes.
+    ityp::span<BindingIndex, const uint32_t> GetDescriptorHeapOffsets() const;
+
+    // The D3D shader register that the Dawn binding index is mapped to by this bind group
+    // layout.
+    uint32_t GetShaderRegister(BindingIndex bindingIndex) const;
+
+    // Counts of descriptors in the descriptor tables.
+    uint32_t GetCbvUavSrvDescriptorCount() const;
+    uint32_t GetSamplerDescriptorCount() const;
+
+    const std::vector<D3D12_DESCRIPTOR_RANGE>& GetCbvUavSrvDescriptorRanges() const;
+    const std::vector<D3D12_DESCRIPTOR_RANGE>& GetSamplerDescriptorRanges() const;
+
+  private:
+    BindGroupLayout(Device* device,
+                    const BindGroupLayoutDescriptor* descriptor,
+                    PipelineCompatibilityToken pipelineCompatibilityToken);
+    ~BindGroupLayout() override = default;
+
+    // Contains the offset into the descriptor heap for the given resource view. Samplers and
+    // non-samplers are stored in separate descriptor heaps, so the offsets should be unique
+    // within each group and tightly packed.
     //
-    // We use the bind group index as the register space, but don't know the bind group index until
-    // pipeline layout creation time. This value should be replaced in PipelineLayoutD3D12.
-    static constexpr uint32_t kRegisterSpacePlaceholder =
-        D3D12_DRIVER_RESERVED_REGISTER_SPACE_VALUES_START;
+    // Dynamic resources are not used here since their descriptors are placed directly in root
+    // parameters.
+    ityp::stack_vec<BindingIndex, uint32_t, kMaxOptimalBindingsPerGroup> mDescriptorHeapOffsets;
 
-    class BindGroupLayout final : public BindGroupLayoutBase {
-      public:
-        static Ref<BindGroupLayout> Create(Device* device,
-                                           const BindGroupLayoutDescriptor* descriptor,
-                                           PipelineCompatibilityToken pipelineCompatibilityToken);
+    // Contains the shader register this binding is mapped to.
+    ityp::stack_vec<BindingIndex, uint32_t, kMaxOptimalBindingsPerGroup> mShaderRegisters;
 
-        ResultOrError<Ref<BindGroup>> AllocateBindGroup(Device* device,
-                                                        const BindGroupDescriptor* descriptor);
-        void DeallocateBindGroup(BindGroup* bindGroup, CPUDescriptorHeapAllocation* viewAllocation);
+    uint32_t mCbvUavSrvDescriptorCount;
+    uint32_t mSamplerDescriptorCount;
 
-        // The offset (in descriptor count) into the corresponding descriptor heap. Not valid for
-        // dynamic binding indexes.
-        ityp::span<BindingIndex, const uint32_t> GetDescriptorHeapOffsets() const;
+    std::vector<D3D12_DESCRIPTOR_RANGE> mCbvUavSrvDescriptorRanges;
+    std::vector<D3D12_DESCRIPTOR_RANGE> mSamplerDescriptorRanges;
 
-        // The D3D shader register that the Dawn binding index is mapped to by this bind group
-        // layout.
-        uint32_t GetShaderRegister(BindingIndex bindingIndex) const;
+    SlabAllocator<BindGroup> mBindGroupAllocator;
 
-        // Counts of descriptors in the descriptor tables.
-        uint32_t GetCbvUavSrvDescriptorCount() const;
-        uint32_t GetSamplerDescriptorCount() const;
-
-        const std::vector<D3D12_DESCRIPTOR_RANGE>& GetCbvUavSrvDescriptorRanges() const;
-        const std::vector<D3D12_DESCRIPTOR_RANGE>& GetSamplerDescriptorRanges() const;
-
-      private:
-        BindGroupLayout(Device* device,
-                        const BindGroupLayoutDescriptor* descriptor,
-                        PipelineCompatibilityToken pipelineCompatibilityToken);
-        ~BindGroupLayout() override = default;
-
-        // Contains the offset into the descriptor heap for the given resource view. Samplers and
-        // non-samplers are stored in separate descriptor heaps, so the offsets should be unique
-        // within each group and tightly packed.
-        //
-        // Dynamic resources are not used here since their descriptors are placed directly in root
-        // parameters.
-        ityp::stack_vec<BindingIndex, uint32_t, kMaxOptimalBindingsPerGroup> mDescriptorHeapOffsets;
-
-        // Contains the shader register this binding is mapped to.
-        ityp::stack_vec<BindingIndex, uint32_t, kMaxOptimalBindingsPerGroup> mShaderRegisters;
-
-        uint32_t mCbvUavSrvDescriptorCount;
-        uint32_t mSamplerDescriptorCount;
-
-        std::vector<D3D12_DESCRIPTOR_RANGE> mCbvUavSrvDescriptorRanges;
-        std::vector<D3D12_DESCRIPTOR_RANGE> mSamplerDescriptorRanges;
-
-        SlabAllocator<BindGroup> mBindGroupAllocator;
-
-        StagingDescriptorAllocator* mSamplerAllocator = nullptr;
-        StagingDescriptorAllocator* mViewAllocator = nullptr;
-    };
+    StagingDescriptorAllocator* mSamplerAllocator = nullptr;
+    StagingDescriptorAllocator* mViewAllocator = nullptr;
+};
 
 }  // namespace dawn::native::d3d12
 
diff --git a/src/dawn/native/d3d12/BufferD3D12.cpp b/src/dawn/native/d3d12/BufferD3D12.cpp
index ebb2339..0488fce 100644
--- a/src/dawn/native/d3d12/BufferD3D12.cpp
+++ b/src/dawn/native/d3d12/BufferD3D12.cpp
@@ -32,473 +32,466 @@
 
 namespace dawn::native::d3d12 {
 
-    namespace {
-        D3D12_RESOURCE_FLAGS D3D12ResourceFlags(wgpu::BufferUsage usage) {
-            D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE;
+namespace {
+D3D12_RESOURCE_FLAGS D3D12ResourceFlags(wgpu::BufferUsage usage) {
+    D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE;
 
-            if (usage & (wgpu::BufferUsage::Storage | kInternalStorageBuffer)) {
-                flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
-            }
-
-            return flags;
-        }
-
-        D3D12_RESOURCE_STATES D3D12BufferUsage(wgpu::BufferUsage usage) {
-            D3D12_RESOURCE_STATES resourceState = D3D12_RESOURCE_STATE_COMMON;
-
-            if (usage & wgpu::BufferUsage::CopySrc) {
-                resourceState |= D3D12_RESOURCE_STATE_COPY_SOURCE;
-            }
-            if (usage & wgpu::BufferUsage::CopyDst) {
-                resourceState |= D3D12_RESOURCE_STATE_COPY_DEST;
-            }
-            if (usage & (wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Uniform)) {
-                resourceState |= D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER;
-            }
-            if (usage & wgpu::BufferUsage::Index) {
-                resourceState |= D3D12_RESOURCE_STATE_INDEX_BUFFER;
-            }
-            if (usage & (wgpu::BufferUsage::Storage | kInternalStorageBuffer)) {
-                resourceState |= D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
-            }
-            if (usage & kReadOnlyStorageBuffer) {
-                resourceState |= (D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE |
-                                  D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE);
-            }
-            if (usage & wgpu::BufferUsage::Indirect) {
-                resourceState |= D3D12_RESOURCE_STATE_INDIRECT_ARGUMENT;
-            }
-            if (usage & wgpu::BufferUsage::QueryResolve) {
-                resourceState |= D3D12_RESOURCE_STATE_COPY_DEST;
-            }
-
-            return resourceState;
-        }
-
-        D3D12_HEAP_TYPE D3D12HeapType(wgpu::BufferUsage allowedUsage) {
-            if (allowedUsage & wgpu::BufferUsage::MapRead) {
-                return D3D12_HEAP_TYPE_READBACK;
-            } else if (allowedUsage & wgpu::BufferUsage::MapWrite) {
-                return D3D12_HEAP_TYPE_UPLOAD;
-            } else {
-                return D3D12_HEAP_TYPE_DEFAULT;
-            }
-        }
-
-        size_t D3D12BufferSizeAlignment(wgpu::BufferUsage usage) {
-            if ((usage & wgpu::BufferUsage::Uniform) != 0) {
-                // D3D buffers are always resource size aligned to 64KB. However, D3D12's validation
-                // forbids binding a CBV to an unaligned size. To prevent, one can always safely
-                // align the buffer size to the CBV data alignment as other buffer usages
-                // ignore it (no size check). The validation will still enforce bound checks with
-                // the unaligned size returned by GetSize().
-                // https://docs.microsoft.com/en-us/windows/win32/direct3d12/uploading-resources#buffer-alignment
-                return D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT;
-            }
-            return 1;
-        }
-    }  // namespace
-
-    // static
-    ResultOrError<Ref<Buffer>> Buffer::Create(Device* device, const BufferDescriptor* descriptor) {
-        Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor));
-        DAWN_TRY(buffer->Initialize(descriptor->mappedAtCreation));
-        return buffer;
+    if (usage & (wgpu::BufferUsage::Storage | kInternalStorageBuffer)) {
+        flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
     }
 
-    Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
-        : BufferBase(device, descriptor) {
+    return flags;
+}
+
+D3D12_RESOURCE_STATES D3D12BufferUsage(wgpu::BufferUsage usage) {
+    D3D12_RESOURCE_STATES resourceState = D3D12_RESOURCE_STATE_COMMON;
+
+    if (usage & wgpu::BufferUsage::CopySrc) {
+        resourceState |= D3D12_RESOURCE_STATE_COPY_SOURCE;
+    }
+    if (usage & wgpu::BufferUsage::CopyDst) {
+        resourceState |= D3D12_RESOURCE_STATE_COPY_DEST;
+    }
+    if (usage & (wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Uniform)) {
+        resourceState |= D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER;
+    }
+    if (usage & wgpu::BufferUsage::Index) {
+        resourceState |= D3D12_RESOURCE_STATE_INDEX_BUFFER;
+    }
+    if (usage & (wgpu::BufferUsage::Storage | kInternalStorageBuffer)) {
+        resourceState |= D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
+    }
+    if (usage & kReadOnlyStorageBuffer) {
+        resourceState |= (D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE |
+                          D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE);
+    }
+    if (usage & wgpu::BufferUsage::Indirect) {
+        resourceState |= D3D12_RESOURCE_STATE_INDIRECT_ARGUMENT;
+    }
+    if (usage & wgpu::BufferUsage::QueryResolve) {
+        resourceState |= D3D12_RESOURCE_STATE_COPY_DEST;
     }
 
-    MaybeError Buffer::Initialize(bool mappedAtCreation) {
-        // Allocate at least 4 bytes so clamped accesses are always in bounds.
-        uint64_t size = std::max(GetSize(), uint64_t(4u));
-        size_t alignment = D3D12BufferSizeAlignment(GetUsage());
-        if (size > std::numeric_limits<uint64_t>::max() - alignment) {
-            // Alignment would overlow.
-            return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
-        }
-        mAllocatedSize = Align(size, alignment);
+    return resourceState;
+}
 
-        D3D12_RESOURCE_DESC resourceDescriptor;
-        resourceDescriptor.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER;
-        resourceDescriptor.Alignment = 0;
-        resourceDescriptor.Width = mAllocatedSize;
-        resourceDescriptor.Height = 1;
-        resourceDescriptor.DepthOrArraySize = 1;
-        resourceDescriptor.MipLevels = 1;
-        resourceDescriptor.Format = DXGI_FORMAT_UNKNOWN;
-        resourceDescriptor.SampleDesc.Count = 1;
-        resourceDescriptor.SampleDesc.Quality = 0;
-        resourceDescriptor.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR;
-        // Add CopyDst for non-mappable buffer initialization with mappedAtCreation
-        // and robust resource initialization.
-        resourceDescriptor.Flags = D3D12ResourceFlags(GetUsage() | wgpu::BufferUsage::CopyDst);
+D3D12_HEAP_TYPE D3D12HeapType(wgpu::BufferUsage allowedUsage) {
+    if (allowedUsage & wgpu::BufferUsage::MapRead) {
+        return D3D12_HEAP_TYPE_READBACK;
+    } else if (allowedUsage & wgpu::BufferUsage::MapWrite) {
+        return D3D12_HEAP_TYPE_UPLOAD;
+    } else {
+        return D3D12_HEAP_TYPE_DEFAULT;
+    }
+}
 
-        auto heapType = D3D12HeapType(GetUsage());
-        auto bufferUsage = D3D12_RESOURCE_STATE_COMMON;
+size_t D3D12BufferSizeAlignment(wgpu::BufferUsage usage) {
+    if ((usage & wgpu::BufferUsage::Uniform) != 0) {
+        // D3D buffers are always resource size aligned to 64KB. However, D3D12's validation
+        // forbids binding a CBV to an unaligned size. To prevent, one can always safely
+        // align the buffer size to the CBV data alignment as other buffer usages
+        // ignore it (no size check). The validation will still enforce bound checks with
+        // the unaligned size returned by GetSize().
+        // https://docs.microsoft.com/en-us/windows/win32/direct3d12/uploading-resources#buffer-alignment
+        return D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT;
+    }
+    return 1;
+}
+}  // namespace
 
-        // D3D12 requires buffers on the READBACK heap to have the D3D12_RESOURCE_STATE_COPY_DEST
-        // state
-        if (heapType == D3D12_HEAP_TYPE_READBACK) {
-            bufferUsage |= D3D12_RESOURCE_STATE_COPY_DEST;
-            mFixedResourceState = true;
-            mLastUsage = wgpu::BufferUsage::CopyDst;
-        }
+// static
+ResultOrError<Ref<Buffer>> Buffer::Create(Device* device, const BufferDescriptor* descriptor) {
+    Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor));
+    DAWN_TRY(buffer->Initialize(descriptor->mappedAtCreation));
+    return buffer;
+}
 
-        // D3D12 requires buffers on the UPLOAD heap to have the D3D12_RESOURCE_STATE_GENERIC_READ
-        // state
-        if (heapType == D3D12_HEAP_TYPE_UPLOAD) {
-            bufferUsage |= D3D12_RESOURCE_STATE_GENERIC_READ;
-            mFixedResourceState = true;
-            mLastUsage = wgpu::BufferUsage::CopySrc;
-        }
+Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
+    : BufferBase(device, descriptor) {}
 
-        DAWN_TRY_ASSIGN(
-            mResourceAllocation,
-            ToBackend(GetDevice())->AllocateMemory(heapType, resourceDescriptor, bufferUsage));
+MaybeError Buffer::Initialize(bool mappedAtCreation) {
+    // Allocate at least 4 bytes so clamped accesses are always in bounds.
+    uint64_t size = std::max(GetSize(), uint64_t(4u));
+    size_t alignment = D3D12BufferSizeAlignment(GetUsage());
+    if (size > std::numeric_limits<uint64_t>::max() - alignment) {
+        // Alignment would overlow.
+        return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
+    }
+    mAllocatedSize = Align(size, alignment);
 
-        SetLabelImpl();
+    D3D12_RESOURCE_DESC resourceDescriptor;
+    resourceDescriptor.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER;
+    resourceDescriptor.Alignment = 0;
+    resourceDescriptor.Width = mAllocatedSize;
+    resourceDescriptor.Height = 1;
+    resourceDescriptor.DepthOrArraySize = 1;
+    resourceDescriptor.MipLevels = 1;
+    resourceDescriptor.Format = DXGI_FORMAT_UNKNOWN;
+    resourceDescriptor.SampleDesc.Count = 1;
+    resourceDescriptor.SampleDesc.Quality = 0;
+    resourceDescriptor.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR;
+    // Add CopyDst for non-mappable buffer initialization with mappedAtCreation
+    // and robust resource initialization.
+    resourceDescriptor.Flags = D3D12ResourceFlags(GetUsage() | wgpu::BufferUsage::CopyDst);
 
-        // The buffers with mappedAtCreation == true will be initialized in
-        // BufferBase::MapAtCreation().
-        if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
-            !mappedAtCreation) {
+    auto heapType = D3D12HeapType(GetUsage());
+    auto bufferUsage = D3D12_RESOURCE_STATE_COMMON;
+
+    // D3D12 requires buffers on the READBACK heap to have the D3D12_RESOURCE_STATE_COPY_DEST
+    // state
+    if (heapType == D3D12_HEAP_TYPE_READBACK) {
+        bufferUsage |= D3D12_RESOURCE_STATE_COPY_DEST;
+        mFixedResourceState = true;
+        mLastUsage = wgpu::BufferUsage::CopyDst;
+    }
+
+    // D3D12 requires buffers on the UPLOAD heap to have the D3D12_RESOURCE_STATE_GENERIC_READ
+    // state
+    if (heapType == D3D12_HEAP_TYPE_UPLOAD) {
+        bufferUsage |= D3D12_RESOURCE_STATE_GENERIC_READ;
+        mFixedResourceState = true;
+        mLastUsage = wgpu::BufferUsage::CopySrc;
+    }
+
+    DAWN_TRY_ASSIGN(
+        mResourceAllocation,
+        ToBackend(GetDevice())->AllocateMemory(heapType, resourceDescriptor, bufferUsage));
+
+    SetLabelImpl();
+
+    // The buffers with mappedAtCreation == true will be initialized in
+    // BufferBase::MapAtCreation().
+    if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
+        !mappedAtCreation) {
+        CommandRecordingContext* commandRecordingContext;
+        DAWN_TRY_ASSIGN(commandRecordingContext,
+                        ToBackend(GetDevice())->GetPendingCommandContext());
+
+        DAWN_TRY(ClearBuffer(commandRecordingContext, uint8_t(1u)));
+    }
+
+    // Initialize the padding bytes to zero.
+    if (GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse) && !mappedAtCreation) {
+        uint32_t paddingBytes = GetAllocatedSize() - GetSize();
+        if (paddingBytes > 0) {
             CommandRecordingContext* commandRecordingContext;
             DAWN_TRY_ASSIGN(commandRecordingContext,
                             ToBackend(GetDevice())->GetPendingCommandContext());
 
-            DAWN_TRY(ClearBuffer(commandRecordingContext, uint8_t(1u)));
-        }
-
-        // Initialize the padding bytes to zero.
-        if (GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse) &&
-            !mappedAtCreation) {
-            uint32_t paddingBytes = GetAllocatedSize() - GetSize();
-            if (paddingBytes > 0) {
-                CommandRecordingContext* commandRecordingContext;
-                DAWN_TRY_ASSIGN(commandRecordingContext,
-                                ToBackend(GetDevice())->GetPendingCommandContext());
-
-                uint32_t clearSize = paddingBytes;
-                uint64_t clearOffset = GetSize();
-                DAWN_TRY(ClearBuffer(commandRecordingContext, 0, clearOffset, clearSize));
-            }
-        }
-
-        return {};
-    }
-
-    Buffer::~Buffer() = default;
-
-    ID3D12Resource* Buffer::GetD3D12Resource() const {
-        return mResourceAllocation.GetD3D12Resource();
-    }
-
-    // When true is returned, a D3D12_RESOURCE_BARRIER has been created and must be used in a
-    // ResourceBarrier call. Failing to do so will cause the tracked state to become invalid and can
-    // cause subsequent errors.
-    bool Buffer::TrackUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
-                                                 D3D12_RESOURCE_BARRIER* barrier,
-                                                 wgpu::BufferUsage newUsage) {
-        // Track the underlying heap to ensure residency.
-        Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
-        commandContext->TrackHeapUsage(heap, GetDevice()->GetPendingCommandSerial());
-
-        // Return the resource barrier.
-        return TransitionUsageAndGetResourceBarrier(commandContext, barrier, newUsage);
-    }
-
-    void Buffer::TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
-                                            wgpu::BufferUsage newUsage) {
-        D3D12_RESOURCE_BARRIER barrier;
-
-        if (TrackUsageAndGetResourceBarrier(commandContext, &barrier, newUsage)) {
-            commandContext->GetCommandList()->ResourceBarrier(1, &barrier);
+            uint32_t clearSize = paddingBytes;
+            uint64_t clearOffset = GetSize();
+            DAWN_TRY(ClearBuffer(commandRecordingContext, 0, clearOffset, clearSize));
         }
     }
 
-    // When true is returned, a D3D12_RESOURCE_BARRIER has been created and must be used in a
-    // ResourceBarrier call. Failing to do so will cause the tracked state to become invalid and can
-    // cause subsequent errors.
-    bool Buffer::TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
-                                                      D3D12_RESOURCE_BARRIER* barrier,
-                                                      wgpu::BufferUsage newUsage) {
-        // Resources in upload and readback heaps must be kept in the COPY_SOURCE/DEST state
-        if (mFixedResourceState) {
-            ASSERT(mLastUsage == newUsage);
-            return false;
-        }
+    return {};
+}
 
-        D3D12_RESOURCE_STATES lastState = D3D12BufferUsage(mLastUsage);
-        D3D12_RESOURCE_STATES newState = D3D12BufferUsage(newUsage);
+Buffer::~Buffer() = default;
 
-        // If the transition is from-UAV-to-UAV, then a UAV barrier is needed.
-        // If one of the usages isn't UAV, then other barriers are used.
-        bool needsUAVBarrier = lastState == D3D12_RESOURCE_STATE_UNORDERED_ACCESS &&
-                               newState == D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
+ID3D12Resource* Buffer::GetD3D12Resource() const {
+    return mResourceAllocation.GetD3D12Resource();
+}
 
-        if (needsUAVBarrier) {
-            barrier->Type = D3D12_RESOURCE_BARRIER_TYPE_UAV;
-            barrier->Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
-            barrier->UAV.pResource = GetD3D12Resource();
+// When true is returned, a D3D12_RESOURCE_BARRIER has been created and must be used in a
+// ResourceBarrier call. Failing to do so will cause the tracked state to become invalid and can
+// cause subsequent errors.
+bool Buffer::TrackUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+                                             D3D12_RESOURCE_BARRIER* barrier,
+                                             wgpu::BufferUsage newUsage) {
+    // Track the underlying heap to ensure residency.
+    Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+    commandContext->TrackHeapUsage(heap, GetDevice()->GetPendingCommandSerial());
 
-            mLastUsage = newUsage;
-            return true;
-        }
+    // Return the resource barrier.
+    return TransitionUsageAndGetResourceBarrier(commandContext, barrier, newUsage);
+}
 
-        // We can skip transitions to already current usages.
-        if (IsSubset(newUsage, mLastUsage)) {
-            return false;
-        }
+void Buffer::TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+                                        wgpu::BufferUsage newUsage) {
+    D3D12_RESOURCE_BARRIER barrier;
+
+    if (TrackUsageAndGetResourceBarrier(commandContext, &barrier, newUsage)) {
+        commandContext->GetCommandList()->ResourceBarrier(1, &barrier);
+    }
+}
+
+// When true is returned, a D3D12_RESOURCE_BARRIER has been created and must be used in a
+// ResourceBarrier call. Failing to do so will cause the tracked state to become invalid and can
+// cause subsequent errors.
+bool Buffer::TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+                                                  D3D12_RESOURCE_BARRIER* barrier,
+                                                  wgpu::BufferUsage newUsage) {
+    // Resources in upload and readback heaps must be kept in the COPY_SOURCE/DEST state
+    if (mFixedResourceState) {
+        ASSERT(mLastUsage == newUsage);
+        return false;
+    }
+
+    D3D12_RESOURCE_STATES lastState = D3D12BufferUsage(mLastUsage);
+    D3D12_RESOURCE_STATES newState = D3D12BufferUsage(newUsage);
+
+    // If the transition is from-UAV-to-UAV, then a UAV barrier is needed.
+    // If one of the usages isn't UAV, then other barriers are used.
+    bool needsUAVBarrier = lastState == D3D12_RESOURCE_STATE_UNORDERED_ACCESS &&
+                           newState == D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
+
+    if (needsUAVBarrier) {
+        barrier->Type = D3D12_RESOURCE_BARRIER_TYPE_UAV;
+        barrier->Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
+        barrier->UAV.pResource = GetD3D12Resource();
 
         mLastUsage = newUsage;
-
-        // The COMMON state represents a state where no write operations can be pending, which makes
-        // it possible to transition to and from some states without synchronizaton (i.e. without an
-        // explicit ResourceBarrier call). A buffer can be implicitly promoted to 1) a single write
-        // state, or 2) multiple read states. A buffer that is accessed within a command list will
-        // always implicitly decay to the COMMON state after the call to ExecuteCommandLists
-        // completes - this is because all buffer writes are guaranteed to be completed before the
-        // next ExecuteCommandLists call executes.
-        // https://docs.microsoft.com/en-us/windows/desktop/direct3d12/using-resource-barriers-to-synchronize-resource-states-in-direct3d-12#implicit-state-transitions
-
-        // To track implicit decays, we must record the pending serial on which a transition will
-        // occur. When that buffer is used again, the previously recorded serial must be compared to
-        // the last completed serial to determine if the buffer has implicity decayed to the common
-        // state.
-        const ExecutionSerial pendingCommandSerial =
-            ToBackend(GetDevice())->GetPendingCommandSerial();
-        if (pendingCommandSerial > mLastUsedSerial) {
-            lastState = D3D12_RESOURCE_STATE_COMMON;
-            mLastUsedSerial = pendingCommandSerial;
-        }
-
-        // All possible buffer states used by Dawn are eligible for implicit promotion from COMMON.
-        // These are: COPY_SOURCE, VERTEX_AND_COPY_BUFFER, INDEX_BUFFER, COPY_DEST,
-        // UNORDERED_ACCESS, and INDIRECT_ARGUMENT. Note that for implicit promotion, the
-        // destination state cannot be 1) more than one write state, or 2) both a read and write
-        // state. This goes unchecked here because it should not be allowed through render/compute
-        // pass validation.
-        if (lastState == D3D12_RESOURCE_STATE_COMMON) {
-            return false;
-        }
-
-        // TODO(crbug.com/dawn/1024): The before and after states must be different. Remove this
-        // workaround and use D3D12 states instead of WebGPU usages to manage the tracking of
-        // barrier state.
-        if (lastState == newState) {
-            return false;
-        }
-
-        barrier->Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION;
-        barrier->Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
-        barrier->Transition.pResource = GetD3D12Resource();
-        barrier->Transition.StateBefore = lastState;
-        barrier->Transition.StateAfter = newState;
-        barrier->Transition.Subresource = D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES;
-
         return true;
     }
 
-    D3D12_GPU_VIRTUAL_ADDRESS Buffer::GetVA() const {
-        return mResourceAllocation.GetGPUPointer();
+    // We can skip transitions to already current usages.
+    if (IsSubset(newUsage, mLastUsage)) {
+        return false;
     }
 
-    bool Buffer::IsCPUWritableAtCreation() const {
-        // We use a staging buffer for the buffers with mappedAtCreation == true and created on the
-        // READBACK heap because for the buffers on the READBACK heap, the data written on the CPU
-        // side won't be uploaded to GPU. When we enable zero-initialization, the CPU side memory
-        // of the buffer is all written to 0 but not the GPU side memory, so on the next mapping
-        // operation the zeroes get overwritten by whatever was in the GPU memory when the buffer
-        // was created. With a staging buffer, the data on the CPU side will first upload to the
-        // staging buffer, and copied from the staging buffer to the GPU memory of the current
-        // buffer in the unmap() call.
-        // TODO(enga): Handle CPU-visible memory on UMA
-        return (GetUsage() & wgpu::BufferUsage::MapWrite) != 0;
+    mLastUsage = newUsage;
+
+    // The COMMON state represents a state where no write operations can be pending, which makes
+    // it possible to transition to and from some states without synchronizaton (i.e. without an
+    // explicit ResourceBarrier call). A buffer can be implicitly promoted to 1) a single write
+    // state, or 2) multiple read states. A buffer that is accessed within a command list will
+    // always implicitly decay to the COMMON state after the call to ExecuteCommandLists
+    // completes - this is because all buffer writes are guaranteed to be completed before the
+    // next ExecuteCommandLists call executes.
+    // https://docs.microsoft.com/en-us/windows/desktop/direct3d12/using-resource-barriers-to-synchronize-resource-states-in-direct3d-12#implicit-state-transitions
+
+    // To track implicit decays, we must record the pending serial on which a transition will
+    // occur. When that buffer is used again, the previously recorded serial must be compared to
+    // the last completed serial to determine if the buffer has implicity decayed to the common
+    // state.
+    const ExecutionSerial pendingCommandSerial = ToBackend(GetDevice())->GetPendingCommandSerial();
+    if (pendingCommandSerial > mLastUsedSerial) {
+        lastState = D3D12_RESOURCE_STATE_COMMON;
+        mLastUsedSerial = pendingCommandSerial;
     }
 
-    MaybeError Buffer::MapInternal(bool isWrite,
-                                   size_t offset,
-                                   size_t size,
-                                   const char* contextInfo) {
-        // The mapped buffer can be accessed at any time, so it must be locked to ensure it is never
-        // evicted. This buffer should already have been made resident when it was created.
-        TRACE_EVENT0(GetDevice()->GetPlatform(), General, "BufferD3D12::MapInternal");
-
-        Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
-        DAWN_TRY(ToBackend(GetDevice())->GetResidencyManager()->LockAllocation(heap));
-
-        D3D12_RANGE range = {offset, offset + size};
-        // mMappedData is the pointer to the start of the resource, irrespective of offset.
-        // MSDN says (note the weird use of "never"):
-        //
-        //   When ppData is not NULL, the pointer returned is never offset by any values in
-        //   pReadRange.
-        //
-        // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12resource-map
-        DAWN_TRY(CheckHRESULT(GetD3D12Resource()->Map(0, &range, &mMappedData), contextInfo));
-
-        if (isWrite) {
-            mWrittenMappedRange = range;
-        }
-
-        return {};
+    // All possible buffer states used by Dawn are eligible for implicit promotion from COMMON.
+    // These are: COPY_SOURCE, VERTEX_AND_COPY_BUFFER, INDEX_BUFFER, COPY_DEST,
+    // UNORDERED_ACCESS, and INDIRECT_ARGUMENT. Note that for implicit promotion, the
+    // destination state cannot be 1) more than one write state, or 2) both a read and write
+    // state. This goes unchecked here because it should not be allowed through render/compute
+    // pass validation.
+    if (lastState == D3D12_RESOURCE_STATE_COMMON) {
+        return false;
     }
 
-    MaybeError Buffer::MapAtCreationImpl() {
-        // We will use a staging buffer for MapRead buffers instead so we just clear the staging
-        // buffer and initialize the original buffer by copying the staging buffer to the original
-        // buffer one the first time Unmap() is called.
-        ASSERT((GetUsage() & wgpu::BufferUsage::MapWrite) != 0);
-
-        // The buffers with mappedAtCreation == true will be initialized in
-        // BufferBase::MapAtCreation().
-        DAWN_TRY(MapInternal(true, 0, size_t(GetAllocatedSize()), "D3D12 map at creation"));
-
-        return {};
+    // TODO(crbug.com/dawn/1024): The before and after states must be different. Remove this
+    // workaround and use D3D12 states instead of WebGPU usages to manage the tracking of
+    // barrier state.
+    if (lastState == newState) {
+        return false;
     }
 
-    MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
-        // GetPendingCommandContext() call might create a new commandList. Dawn will handle
-        // it in Tick() by execute the commandList and signal a fence for it even it is empty.
-        // Skip the unnecessary GetPendingCommandContext() call saves an extra fence.
-        if (NeedsInitialization()) {
-            CommandRecordingContext* commandContext;
-            DAWN_TRY_ASSIGN(commandContext, ToBackend(GetDevice())->GetPendingCommandContext());
-            DAWN_TRY(EnsureDataInitialized(commandContext));
-        }
+    barrier->Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION;
+    barrier->Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
+    barrier->Transition.pResource = GetD3D12Resource();
+    barrier->Transition.StateBefore = lastState;
+    barrier->Transition.StateAfter = newState;
+    barrier->Transition.Subresource = D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES;
 
-        return MapInternal(mode & wgpu::MapMode::Write, offset, size, "D3D12 map async");
+    return true;
+}
+
+D3D12_GPU_VIRTUAL_ADDRESS Buffer::GetVA() const {
+    return mResourceAllocation.GetGPUPointer();
+}
+
+bool Buffer::IsCPUWritableAtCreation() const {
+    // We use a staging buffer for the buffers with mappedAtCreation == true and created on the
+    // READBACK heap because for the buffers on the READBACK heap, the data written on the CPU
+    // side won't be uploaded to GPU. When we enable zero-initialization, the CPU side memory
+    // of the buffer is all written to 0 but not the GPU side memory, so on the next mapping
+    // operation the zeroes get overwritten by whatever was in the GPU memory when the buffer
+    // was created. With a staging buffer, the data on the CPU side will first upload to the
+    // staging buffer, and copied from the staging buffer to the GPU memory of the current
+    // buffer in the unmap() call.
+    // TODO(enga): Handle CPU-visible memory on UMA
+    return (GetUsage() & wgpu::BufferUsage::MapWrite) != 0;
+}
+
+MaybeError Buffer::MapInternal(bool isWrite, size_t offset, size_t size, const char* contextInfo) {
+    // The mapped buffer can be accessed at any time, so it must be locked to ensure it is never
+    // evicted. This buffer should already have been made resident when it was created.
+    TRACE_EVENT0(GetDevice()->GetPlatform(), General, "BufferD3D12::MapInternal");
+
+    Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+    DAWN_TRY(ToBackend(GetDevice())->GetResidencyManager()->LockAllocation(heap));
+
+    D3D12_RANGE range = {offset, offset + size};
+    // mMappedData is the pointer to the start of the resource, irrespective of offset.
+    // MSDN says (note the weird use of "never"):
+    //
+    //   When ppData is not NULL, the pointer returned is never offset by any values in
+    //   pReadRange.
+    //
+    // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12resource-map
+    DAWN_TRY(CheckHRESULT(GetD3D12Resource()->Map(0, &range, &mMappedData), contextInfo));
+
+    if (isWrite) {
+        mWrittenMappedRange = range;
     }
 
-    void Buffer::UnmapImpl() {
-        GetD3D12Resource()->Unmap(0, &mWrittenMappedRange);
-        mMappedData = nullptr;
+    return {};
+}
+
+MaybeError Buffer::MapAtCreationImpl() {
+    // We will use a staging buffer for MapRead buffers instead so we just clear the staging
+    // buffer and initialize the original buffer by copying the staging buffer to the original
+    // buffer one the first time Unmap() is called.
+    ASSERT((GetUsage() & wgpu::BufferUsage::MapWrite) != 0);
+
+    // The buffers with mappedAtCreation == true will be initialized in
+    // BufferBase::MapAtCreation().
+    DAWN_TRY(MapInternal(true, 0, size_t(GetAllocatedSize()), "D3D12 map at creation"));
+
+    return {};
+}
+
+MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
+    // GetPendingCommandContext() call might create a new commandList. Dawn will handle
+    // it in Tick() by execute the commandList and signal a fence for it even it is empty.
+    // Skip the unnecessary GetPendingCommandContext() call saves an extra fence.
+    if (NeedsInitialization()) {
+        CommandRecordingContext* commandContext;
+        DAWN_TRY_ASSIGN(commandContext, ToBackend(GetDevice())->GetPendingCommandContext());
+        DAWN_TRY(EnsureDataInitialized(commandContext));
+    }
+
+    return MapInternal(mode & wgpu::MapMode::Write, offset, size, "D3D12 map async");
+}
+
+void Buffer::UnmapImpl() {
+    GetD3D12Resource()->Unmap(0, &mWrittenMappedRange);
+    mMappedData = nullptr;
+    mWrittenMappedRange = {0, 0};
+
+    // When buffers are mapped, they are locked to keep them in resident memory. We must unlock
+    // them when they are unmapped.
+    Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+    ToBackend(GetDevice())->GetResidencyManager()->UnlockAllocation(heap);
+}
+
+void* Buffer::GetMappedPointerImpl() {
+    // The frontend asks that the pointer returned is from the start of the resource
+    // irrespective of the offset passed in MapAsyncImpl, which is what mMappedData is.
+    return mMappedData;
+}
+
+void Buffer::DestroyImpl() {
+    if (mMappedData != nullptr) {
+        // If the buffer is currently mapped, unmap without flushing the writes to the GPU
+        // since the buffer cannot be used anymore. UnmapImpl checks mWrittenRange to know
+        // which parts to flush, so we set it to an empty range to prevent flushes.
         mWrittenMappedRange = {0, 0};
-
-        // When buffers are mapped, they are locked to keep them in resident memory. We must unlock
-        // them when they are unmapped.
-        Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
-        ToBackend(GetDevice())->GetResidencyManager()->UnlockAllocation(heap);
     }
+    BufferBase::DestroyImpl();
 
-    void* Buffer::GetMappedPointerImpl() {
-        // The frontend asks that the pointer returned is from the start of the resource
-        // irrespective of the offset passed in MapAsyncImpl, which is what mMappedData is.
-        return mMappedData;
-    }
+    ToBackend(GetDevice())->DeallocateMemory(mResourceAllocation);
+}
 
-    void Buffer::DestroyImpl() {
-        if (mMappedData != nullptr) {
-            // If the buffer is currently mapped, unmap without flushing the writes to the GPU
-            // since the buffer cannot be used anymore. UnmapImpl checks mWrittenRange to know
-            // which parts to flush, so we set it to an empty range to prevent flushes.
-            mWrittenMappedRange = {0, 0};
-        }
-        BufferBase::DestroyImpl();
+bool Buffer::CheckIsResidentForTesting() const {
+    Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+    return heap->IsInList() || heap->IsResidencyLocked();
+}
 
-        ToBackend(GetDevice())->DeallocateMemory(mResourceAllocation);
-    }
+bool Buffer::CheckAllocationMethodForTesting(AllocationMethod allocationMethod) const {
+    return mResourceAllocation.GetInfo().mMethod == allocationMethod;
+}
 
-    bool Buffer::CheckIsResidentForTesting() const {
-        Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
-        return heap->IsInList() || heap->IsResidencyLocked();
-    }
-
-    bool Buffer::CheckAllocationMethodForTesting(AllocationMethod allocationMethod) const {
-        return mResourceAllocation.GetInfo().mMethod == allocationMethod;
-    }
-
-    MaybeError Buffer::EnsureDataInitialized(CommandRecordingContext* commandContext) {
-        if (!NeedsInitialization()) {
-            return {};
-        }
-
-        DAWN_TRY(InitializeToZero(commandContext));
+MaybeError Buffer::EnsureDataInitialized(CommandRecordingContext* commandContext) {
+    if (!NeedsInitialization()) {
         return {};
     }
 
-    ResultOrError<bool> Buffer::EnsureDataInitializedAsDestination(
-        CommandRecordingContext* commandContext,
-        uint64_t offset,
-        uint64_t size) {
-        if (!NeedsInitialization()) {
-            return {false};
-        }
+    DAWN_TRY(InitializeToZero(commandContext));
+    return {};
+}
 
-        if (IsFullBufferRange(offset, size)) {
-            SetIsDataInitialized();
-            return {false};
-        }
-
-        DAWN_TRY(InitializeToZero(commandContext));
-        return {true};
+ResultOrError<bool> Buffer::EnsureDataInitializedAsDestination(
+    CommandRecordingContext* commandContext,
+    uint64_t offset,
+    uint64_t size) {
+    if (!NeedsInitialization()) {
+        return {false};
     }
 
-    MaybeError Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
-                                                          const CopyTextureToBufferCmd* copy) {
-        if (!NeedsInitialization()) {
-            return {};
-        }
-
-        if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
-            SetIsDataInitialized();
-        } else {
-            DAWN_TRY(InitializeToZero(commandContext));
-        }
-
-        return {};
-    }
-
-    void Buffer::SetLabelImpl() {
-        SetDebugName(ToBackend(GetDevice()), mResourceAllocation.GetD3D12Resource(), "Dawn_Buffer",
-                     GetLabel());
-    }
-
-    MaybeError Buffer::InitializeToZero(CommandRecordingContext* commandContext) {
-        ASSERT(NeedsInitialization());
-
-        // TODO(crbug.com/dawn/484): skip initializing the buffer when it is created on a heap
-        // that has already been zero initialized.
-        DAWN_TRY(ClearBuffer(commandContext, uint8_t(0u)));
+    if (IsFullBufferRange(offset, size)) {
         SetIsDataInitialized();
-        GetDevice()->IncrementLazyClearCountForTesting();
+        return {false};
+    }
 
+    DAWN_TRY(InitializeToZero(commandContext));
+    return {true};
+}
+
+MaybeError Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+                                                      const CopyTextureToBufferCmd* copy) {
+    if (!NeedsInitialization()) {
         return {};
     }
 
-    MaybeError Buffer::ClearBuffer(CommandRecordingContext* commandContext,
-                                   uint8_t clearValue,
-                                   uint64_t offset,
-                                   uint64_t size) {
-        Device* device = ToBackend(GetDevice());
-        size = size > 0 ? size : GetAllocatedSize();
-
-        // The state of the buffers on UPLOAD heap must always be GENERIC_READ and cannot be
-        // changed away, so we can only clear such buffer with buffer mapping.
-        if (D3D12HeapType(GetUsage()) == D3D12_HEAP_TYPE_UPLOAD) {
-            DAWN_TRY(MapInternal(true, static_cast<size_t>(offset), static_cast<size_t>(size),
-                                 "D3D12 map at clear buffer"));
-            memset(mMappedData, clearValue, size);
-            UnmapImpl();
-        } else if (clearValue == 0u) {
-            DAWN_TRY(device->ClearBufferToZero(commandContext, this, offset, size));
-        } else {
-            // TODO(crbug.com/dawn/852): use ClearUnorderedAccessView*() when the buffer usage
-            // includes STORAGE.
-            DynamicUploader* uploader = device->GetDynamicUploader();
-            UploadHandle uploadHandle;
-            DAWN_TRY_ASSIGN(uploadHandle,
-                            uploader->Allocate(size, device->GetPendingCommandSerial(),
-                                               kCopyBufferToBufferOffsetAlignment));
-
-            memset(uploadHandle.mappedBuffer, clearValue, size);
-
-            device->CopyFromStagingToBufferImpl(commandContext, uploadHandle.stagingBuffer,
-                                                uploadHandle.startOffset, this, offset, size);
-        }
-
-        return {};
+    if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
+        SetIsDataInitialized();
+    } else {
+        DAWN_TRY(InitializeToZero(commandContext));
     }
+
+    return {};
+}
+
+void Buffer::SetLabelImpl() {
+    SetDebugName(ToBackend(GetDevice()), mResourceAllocation.GetD3D12Resource(), "Dawn_Buffer",
+                 GetLabel());
+}
+
+MaybeError Buffer::InitializeToZero(CommandRecordingContext* commandContext) {
+    ASSERT(NeedsInitialization());
+
+    // TODO(crbug.com/dawn/484): skip initializing the buffer when it is created on a heap
+    // that has already been zero initialized.
+    DAWN_TRY(ClearBuffer(commandContext, uint8_t(0u)));
+    SetIsDataInitialized();
+    GetDevice()->IncrementLazyClearCountForTesting();
+
+    return {};
+}
+
+MaybeError Buffer::ClearBuffer(CommandRecordingContext* commandContext,
+                               uint8_t clearValue,
+                               uint64_t offset,
+                               uint64_t size) {
+    Device* device = ToBackend(GetDevice());
+    size = size > 0 ? size : GetAllocatedSize();
+
+    // The state of the buffers on UPLOAD heap must always be GENERIC_READ and cannot be
+    // changed away, so we can only clear such buffer with buffer mapping.
+    if (D3D12HeapType(GetUsage()) == D3D12_HEAP_TYPE_UPLOAD) {
+        DAWN_TRY(MapInternal(true, static_cast<size_t>(offset), static_cast<size_t>(size),
+                             "D3D12 map at clear buffer"));
+        memset(mMappedData, clearValue, size);
+        UnmapImpl();
+    } else if (clearValue == 0u) {
+        DAWN_TRY(device->ClearBufferToZero(commandContext, this, offset, size));
+    } else {
+        // TODO(crbug.com/dawn/852): use ClearUnorderedAccessView*() when the buffer usage
+        // includes STORAGE.
+        DynamicUploader* uploader = device->GetDynamicUploader();
+        UploadHandle uploadHandle;
+        DAWN_TRY_ASSIGN(uploadHandle, uploader->Allocate(size, device->GetPendingCommandSerial(),
+                                                         kCopyBufferToBufferOffsetAlignment));
+
+        memset(uploadHandle.mappedBuffer, clearValue, size);
+
+        device->CopyFromStagingToBufferImpl(commandContext, uploadHandle.stagingBuffer,
+                                            uploadHandle.startOffset, this, offset, size);
+    }
+
+    return {};
+}
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/BufferD3D12.h b/src/dawn/native/d3d12/BufferD3D12.h
index 4545f36..cb36a85 100644
--- a/src/dawn/native/d3d12/BufferD3D12.h
+++ b/src/dawn/native/d3d12/BufferD3D12.h
@@ -24,69 +24,67 @@
 
 namespace dawn::native::d3d12 {
 
-    class CommandRecordingContext;
-    class Device;
+class CommandRecordingContext;
+class Device;
 
-    class Buffer final : public BufferBase {
-      public:
-        static ResultOrError<Ref<Buffer>> Create(Device* device,
-                                                 const BufferDescriptor* descriptor);
+class Buffer final : public BufferBase {
+  public:
+    static ResultOrError<Ref<Buffer>> Create(Device* device, const BufferDescriptor* descriptor);
 
-        ID3D12Resource* GetD3D12Resource() const;
-        D3D12_GPU_VIRTUAL_ADDRESS GetVA() const;
+    ID3D12Resource* GetD3D12Resource() const;
+    D3D12_GPU_VIRTUAL_ADDRESS GetVA() const;
 
-        bool TrackUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
-                                             D3D12_RESOURCE_BARRIER* barrier,
-                                             wgpu::BufferUsage newUsage);
-        void TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
-                                        wgpu::BufferUsage newUsage);
+    bool TrackUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+                                         D3D12_RESOURCE_BARRIER* barrier,
+                                         wgpu::BufferUsage newUsage);
+    void TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+                                    wgpu::BufferUsage newUsage);
 
-        bool CheckAllocationMethodForTesting(AllocationMethod allocationMethod) const;
-        bool CheckIsResidentForTesting() const;
+    bool CheckAllocationMethodForTesting(AllocationMethod allocationMethod) const;
+    bool CheckIsResidentForTesting() const;
 
-        MaybeError EnsureDataInitialized(CommandRecordingContext* commandContext);
-        ResultOrError<bool> EnsureDataInitializedAsDestination(
-            CommandRecordingContext* commandContext,
-            uint64_t offset,
-            uint64_t size);
-        MaybeError EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
-                                                      const CopyTextureToBufferCmd* copy);
+    MaybeError EnsureDataInitialized(CommandRecordingContext* commandContext);
+    ResultOrError<bool> EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+                                                           uint64_t offset,
+                                                           uint64_t size);
+    MaybeError EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+                                                  const CopyTextureToBufferCmd* copy);
 
-        // Dawn API
-        void SetLabelImpl() override;
+    // Dawn API
+    void SetLabelImpl() override;
 
-      private:
-        Buffer(Device* device, const BufferDescriptor* descriptor);
-        ~Buffer() override;
+  private:
+    Buffer(Device* device, const BufferDescriptor* descriptor);
+    ~Buffer() override;
 
-        MaybeError Initialize(bool mappedAtCreation);
-        MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
-        void UnmapImpl() override;
-        void DestroyImpl() override;
-        bool IsCPUWritableAtCreation() const override;
-        MaybeError MapAtCreationImpl() override;
-        void* GetMappedPointerImpl() override;
+    MaybeError Initialize(bool mappedAtCreation);
+    MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
+    void UnmapImpl() override;
+    void DestroyImpl() override;
+    bool IsCPUWritableAtCreation() const override;
+    MaybeError MapAtCreationImpl() override;
+    void* GetMappedPointerImpl() override;
 
-        MaybeError MapInternal(bool isWrite, size_t start, size_t end, const char* contextInfo);
+    MaybeError MapInternal(bool isWrite, size_t start, size_t end, const char* contextInfo);
 
-        bool TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
-                                                  D3D12_RESOURCE_BARRIER* barrier,
-                                                  wgpu::BufferUsage newUsage);
+    bool TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+                                              D3D12_RESOURCE_BARRIER* barrier,
+                                              wgpu::BufferUsage newUsage);
 
-        MaybeError InitializeToZero(CommandRecordingContext* commandContext);
-        MaybeError ClearBuffer(CommandRecordingContext* commandContext,
-                               uint8_t clearValue,
-                               uint64_t offset = 0,
-                               uint64_t size = 0);
+    MaybeError InitializeToZero(CommandRecordingContext* commandContext);
+    MaybeError ClearBuffer(CommandRecordingContext* commandContext,
+                           uint8_t clearValue,
+                           uint64_t offset = 0,
+                           uint64_t size = 0);
 
-        ResourceHeapAllocation mResourceAllocation;
-        bool mFixedResourceState = false;
-        wgpu::BufferUsage mLastUsage = wgpu::BufferUsage::None;
-        ExecutionSerial mLastUsedSerial = std::numeric_limits<ExecutionSerial>::max();
+    ResourceHeapAllocation mResourceAllocation;
+    bool mFixedResourceState = false;
+    wgpu::BufferUsage mLastUsage = wgpu::BufferUsage::None;
+    ExecutionSerial mLastUsedSerial = std::numeric_limits<ExecutionSerial>::max();
 
-        D3D12_RANGE mWrittenMappedRange = {0, 0};
-        void* mMappedData = nullptr;
-    };
+    D3D12_RANGE mWrittenMappedRange = {0, 0};
+    void* mMappedData = nullptr;
+};
 
 }  // namespace dawn::native::d3d12
 
diff --git a/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.cpp b/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.cpp
index 617c196..88e1d6d 100644
--- a/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.cpp
+++ b/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.cpp
@@ -17,37 +17,35 @@
 
 namespace dawn::native::d3d12 {
 
-    CPUDescriptorHeapAllocation::CPUDescriptorHeapAllocation(
-        D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor,
-        uint32_t heapIndex)
-        : mBaseDescriptor(baseDescriptor), mHeapIndex(heapIndex) {
-    }
+CPUDescriptorHeapAllocation::CPUDescriptorHeapAllocation(D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor,
+                                                         uint32_t heapIndex)
+    : mBaseDescriptor(baseDescriptor), mHeapIndex(heapIndex) {}
 
-    D3D12_CPU_DESCRIPTOR_HANDLE CPUDescriptorHeapAllocation::GetBaseDescriptor() const {
-        ASSERT(IsValid());
-        return mBaseDescriptor;
-    }
+D3D12_CPU_DESCRIPTOR_HANDLE CPUDescriptorHeapAllocation::GetBaseDescriptor() const {
+    ASSERT(IsValid());
+    return mBaseDescriptor;
+}
 
-    D3D12_CPU_DESCRIPTOR_HANDLE CPUDescriptorHeapAllocation::OffsetFrom(
-        uint32_t sizeIncrementInBytes,
-        uint32_t offsetInDescriptorCount) const {
-        ASSERT(IsValid());
-        D3D12_CPU_DESCRIPTOR_HANDLE cpuHandle = mBaseDescriptor;
-        cpuHandle.ptr += sizeIncrementInBytes * offsetInDescriptorCount;
-        return cpuHandle;
-    }
+D3D12_CPU_DESCRIPTOR_HANDLE CPUDescriptorHeapAllocation::OffsetFrom(
+    uint32_t sizeIncrementInBytes,
+    uint32_t offsetInDescriptorCount) const {
+    ASSERT(IsValid());
+    D3D12_CPU_DESCRIPTOR_HANDLE cpuHandle = mBaseDescriptor;
+    cpuHandle.ptr += sizeIncrementInBytes * offsetInDescriptorCount;
+    return cpuHandle;
+}
 
-    uint32_t CPUDescriptorHeapAllocation::GetHeapIndex() const {
-        ASSERT(mHeapIndex >= 0);
-        return mHeapIndex;
-    }
+uint32_t CPUDescriptorHeapAllocation::GetHeapIndex() const {
+    ASSERT(mHeapIndex >= 0);
+    return mHeapIndex;
+}
 
-    bool CPUDescriptorHeapAllocation::IsValid() const {
-        return mBaseDescriptor.ptr != 0;
-    }
+bool CPUDescriptorHeapAllocation::IsValid() const {
+    return mBaseDescriptor.ptr != 0;
+}
 
-    void CPUDescriptorHeapAllocation::Invalidate() {
-        mBaseDescriptor = {0};
-    }
+void CPUDescriptorHeapAllocation::Invalidate() {
+    mBaseDescriptor = {0};
+}
 
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.h b/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.h
index 5c85268..58c4eb5 100644
--- a/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.h
+++ b/src/dawn/native/d3d12/CPUDescriptorHeapAllocationD3D12.h
@@ -21,26 +21,26 @@
 
 namespace dawn::native::d3d12 {
 
-    // Wrapper for a handle into a CPU-only descriptor heap.
-    class CPUDescriptorHeapAllocation {
-      public:
-        CPUDescriptorHeapAllocation() = default;
-        CPUDescriptorHeapAllocation(D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor, uint32_t heapIndex);
+// Wrapper for a handle into a CPU-only descriptor heap.
+class CPUDescriptorHeapAllocation {
+  public:
+    CPUDescriptorHeapAllocation() = default;
+    CPUDescriptorHeapAllocation(D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor, uint32_t heapIndex);
 
-        D3D12_CPU_DESCRIPTOR_HANDLE GetBaseDescriptor() const;
+    D3D12_CPU_DESCRIPTOR_HANDLE GetBaseDescriptor() const;
 
-        D3D12_CPU_DESCRIPTOR_HANDLE OffsetFrom(uint32_t sizeIncrementInBytes,
-                                               uint32_t offsetInDescriptorCount) const;
-        uint32_t GetHeapIndex() const;
+    D3D12_CPU_DESCRIPTOR_HANDLE OffsetFrom(uint32_t sizeIncrementInBytes,
+                                           uint32_t offsetInDescriptorCount) const;
+    uint32_t GetHeapIndex() const;
 
-        bool IsValid() const;
+    bool IsValid() const;
 
-        void Invalidate();
+    void Invalidate();
 
-      private:
-        D3D12_CPU_DESCRIPTOR_HANDLE mBaseDescriptor = {0};
-        uint32_t mHeapIndex = -1;
-    };
+  private:
+    D3D12_CPU_DESCRIPTOR_HANDLE mBaseDescriptor = {0};
+    uint32_t mHeapIndex = -1;
+};
 
 }  // namespace dawn::native::d3d12
 
diff --git a/src/dawn/native/d3d12/CommandAllocatorManager.cpp b/src/dawn/native/d3d12/CommandAllocatorManager.cpp
index 88ac0b8..892cc32 100644
--- a/src/dawn/native/d3d12/CommandAllocatorManager.cpp
+++ b/src/dawn/native/d3d12/CommandAllocatorManager.cpp
@@ -22,51 +22,51 @@
 
 namespace dawn::native::d3d12 {
 
-    CommandAllocatorManager::CommandAllocatorManager(Device* device)
-        : device(device), mAllocatorCount(0) {
-        mFreeAllocators.set();
+CommandAllocatorManager::CommandAllocatorManager(Device* device)
+    : device(device), mAllocatorCount(0) {
+    mFreeAllocators.set();
+}
+
+ResultOrError<ID3D12CommandAllocator*> CommandAllocatorManager::ReserveCommandAllocator() {
+    // If there are no free allocators, get the oldest serial in flight and wait on it
+    if (mFreeAllocators.none()) {
+        const ExecutionSerial firstSerial = mInFlightCommandAllocators.FirstSerial();
+        DAWN_TRY(device->WaitForSerial(firstSerial));
+        DAWN_TRY(Tick(firstSerial));
     }
 
-    ResultOrError<ID3D12CommandAllocator*> CommandAllocatorManager::ReserveCommandAllocator() {
-        // If there are no free allocators, get the oldest serial in flight and wait on it
-        if (mFreeAllocators.none()) {
-            const ExecutionSerial firstSerial = mInFlightCommandAllocators.FirstSerial();
-            DAWN_TRY(device->WaitForSerial(firstSerial));
-            DAWN_TRY(Tick(firstSerial));
-        }
+    ASSERT(mFreeAllocators.any());
 
-        ASSERT(mFreeAllocators.any());
+    // Get the index of the first free allocator from the bitset
+    unsigned int firstFreeIndex = *(IterateBitSet(mFreeAllocators).begin());
 
-        // Get the index of the first free allocator from the bitset
-        unsigned int firstFreeIndex = *(IterateBitSet(mFreeAllocators).begin());
-
-        if (firstFreeIndex >= mAllocatorCount) {
-            ASSERT(firstFreeIndex == mAllocatorCount);
-            mAllocatorCount++;
-            DAWN_TRY(CheckHRESULT(device->GetD3D12Device()->CreateCommandAllocator(
-                                      D3D12_COMMAND_LIST_TYPE_DIRECT,
-                                      IID_PPV_ARGS(&mCommandAllocators[firstFreeIndex])),
-                                  "D3D12 create command allocator"));
-        }
-
-        // Mark the command allocator as used
-        mFreeAllocators.reset(firstFreeIndex);
-
-        // Enqueue the command allocator. It will be scheduled for reset after the next
-        // ExecuteCommandLists
-        mInFlightCommandAllocators.Enqueue({mCommandAllocators[firstFreeIndex], firstFreeIndex},
-                                           device->GetPendingCommandSerial());
-        return mCommandAllocators[firstFreeIndex].Get();
+    if (firstFreeIndex >= mAllocatorCount) {
+        ASSERT(firstFreeIndex == mAllocatorCount);
+        mAllocatorCount++;
+        DAWN_TRY(CheckHRESULT(
+            device->GetD3D12Device()->CreateCommandAllocator(
+                D3D12_COMMAND_LIST_TYPE_DIRECT, IID_PPV_ARGS(&mCommandAllocators[firstFreeIndex])),
+            "D3D12 create command allocator"));
     }
 
-    MaybeError CommandAllocatorManager::Tick(ExecutionSerial lastCompletedSerial) {
-        // Reset all command allocators that are no longer in flight
-        for (auto it : mInFlightCommandAllocators.IterateUpTo(lastCompletedSerial)) {
-            DAWN_TRY(CheckHRESULT(it.commandAllocator->Reset(), "D3D12 reset command allocator"));
-            mFreeAllocators.set(it.index);
-        }
-        mInFlightCommandAllocators.ClearUpTo(lastCompletedSerial);
-        return {};
+    // Mark the command allocator as used
+    mFreeAllocators.reset(firstFreeIndex);
+
+    // Enqueue the command allocator. It will be scheduled for reset after the next
+    // ExecuteCommandLists
+    mInFlightCommandAllocators.Enqueue({mCommandAllocators[firstFreeIndex], firstFreeIndex},
+                                       device->GetPendingCommandSerial());
+    return mCommandAllocators[firstFreeIndex].Get();
+}
+
+MaybeError CommandAllocatorManager::Tick(ExecutionSerial lastCompletedSerial) {
+    // Reset all command allocators that are no longer in flight
+    for (auto it : mInFlightCommandAllocators.IterateUpTo(lastCompletedSerial)) {
+        DAWN_TRY(CheckHRESULT(it.commandAllocator->Reset(), "D3D12 reset command allocator"));
+        mFreeAllocators.set(it.index);
     }
+    mInFlightCommandAllocators.ClearUpTo(lastCompletedSerial);
+    return {};
+}
 
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/CommandAllocatorManager.h b/src/dawn/native/d3d12/CommandAllocatorManager.h
index 6c2089a..708ba44 100644
--- a/src/dawn/native/d3d12/CommandAllocatorManager.h
+++ b/src/dawn/native/d3d12/CommandAllocatorManager.h
@@ -25,34 +25,34 @@
 
 namespace dawn::native::d3d12 {
 
-    class Device;
+class Device;
 
-    class CommandAllocatorManager {
-      public:
-        explicit CommandAllocatorManager(Device* device);
+class CommandAllocatorManager {
+  public:
+    explicit CommandAllocatorManager(Device* device);
 
-        // A CommandAllocator that is reserved must be used on the next ExecuteCommandLists
-        // otherwise its commands may be reset before execution has completed on the GPU
-        ResultOrError<ID3D12CommandAllocator*> ReserveCommandAllocator();
-        MaybeError Tick(ExecutionSerial lastCompletedSerial);
+    // A CommandAllocator that is reserved must be used on the next ExecuteCommandLists
+    // otherwise its commands may be reset before execution has completed on the GPU
+    ResultOrError<ID3D12CommandAllocator*> ReserveCommandAllocator();
+    MaybeError Tick(ExecutionSerial lastCompletedSerial);
 
-      private:
-        Device* device;
+  private:
+    Device* device;
 
-        // This must be at least 2 because the Device and Queue use separate command allocators
-        static constexpr unsigned int kMaxCommandAllocators = 32;
-        unsigned int mAllocatorCount;
+    // This must be at least 2 because the Device and Queue use separate command allocators
+    static constexpr unsigned int kMaxCommandAllocators = 32;
+    unsigned int mAllocatorCount;
 
-        struct IndexedCommandAllocator {
-            ComPtr<ID3D12CommandAllocator> commandAllocator;
-            unsigned int index;
-        };
-
-        ComPtr<ID3D12CommandAllocator> mCommandAllocators[kMaxCommandAllocators];
-        std::bitset<kMaxCommandAllocators> mFreeAllocators;
-        SerialQueue<ExecutionSerial, IndexedCommandAllocator> mInFlightCommandAllocators;
+    struct IndexedCommandAllocator {
+        ComPtr<ID3D12CommandAllocator> commandAllocator;
+        unsigned int index;
     };
 
+    ComPtr<ID3D12CommandAllocator> mCommandAllocators[kMaxCommandAllocators];
+    std::bitset<kMaxCommandAllocators> mFreeAllocators;
+    SerialQueue<ExecutionSerial, IndexedCommandAllocator> mInFlightCommandAllocators;
+};
+
 }  // namespace dawn::native::d3d12
 
 #endif  // SRC_DAWN_NATIVE_D3D12_COMMANDALLOCATORMANAGER_H_
diff --git a/src/dawn/native/d3d12/CommandBufferD3D12.cpp b/src/dawn/native/d3d12/CommandBufferD3D12.cpp
index e699032..77c7ba3 100644
--- a/src/dawn/native/d3d12/CommandBufferD3D12.cpp
+++ b/src/dawn/native/d3d12/CommandBufferD3D12.cpp
@@ -39,1625 +39,1573 @@
 
 namespace dawn::native::d3d12 {
 
-    namespace {
+namespace {
 
-        DXGI_FORMAT DXGIIndexFormat(wgpu::IndexFormat format) {
-            switch (format) {
-                case wgpu::IndexFormat::Undefined:
-                    return DXGI_FORMAT_UNKNOWN;
-                case wgpu::IndexFormat::Uint16:
-                    return DXGI_FORMAT_R16_UINT;
-                case wgpu::IndexFormat::Uint32:
-                    return DXGI_FORMAT_R32_UINT;
-            }
+DXGI_FORMAT DXGIIndexFormat(wgpu::IndexFormat format) {
+    switch (format) {
+        case wgpu::IndexFormat::Undefined:
+            return DXGI_FORMAT_UNKNOWN;
+        case wgpu::IndexFormat::Uint16:
+            return DXGI_FORMAT_R16_UINT;
+        case wgpu::IndexFormat::Uint32:
+            return DXGI_FORMAT_R32_UINT;
+    }
+}
+
+D3D12_QUERY_TYPE D3D12QueryType(wgpu::QueryType type) {
+    switch (type) {
+        case wgpu::QueryType::Occlusion:
+            return D3D12_QUERY_TYPE_BINARY_OCCLUSION;
+        case wgpu::QueryType::PipelineStatistics:
+            return D3D12_QUERY_TYPE_PIPELINE_STATISTICS;
+        case wgpu::QueryType::Timestamp:
+            return D3D12_QUERY_TYPE_TIMESTAMP;
+    }
+}
+
+bool CanUseCopyResource(const TextureCopy& src, const TextureCopy& dst, const Extent3D& copySize) {
+    // Checked by validation
+    ASSERT(src.texture->GetSampleCount() == dst.texture->GetSampleCount());
+    ASSERT(src.texture->GetFormat().CopyCompatibleWith(dst.texture->GetFormat()));
+    ASSERT(src.aspect == dst.aspect);
+
+    const Extent3D& srcSize = src.texture->GetSize();
+    const Extent3D& dstSize = dst.texture->GetSize();
+
+    // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12graphicscommandlist-copyresource
+    // In order to use D3D12's copy resource, the textures must be the same dimensions, and
+    // the copy must be of the entire resource.
+    // TODO(dawn:129): Support 1D textures.
+    return src.aspect == src.texture->GetFormat().aspects &&
+           src.texture->GetDimension() == dst.texture->GetDimension() &&  //
+           dst.texture->GetNumMipLevels() == 1 &&                         //
+           src.texture->GetNumMipLevels() == 1 &&  // A copy command is of a single mip, so
+                                                   // if a resource has more than one, we
+                                                   // definitely cannot use CopyResource.
+           copySize.width == dstSize.width &&      //
+           copySize.width == srcSize.width &&      //
+           copySize.height == dstSize.height &&    //
+           copySize.height == srcSize.height &&    //
+           copySize.depthOrArrayLayers == dstSize.depthOrArrayLayers &&  //
+           copySize.depthOrArrayLayers == srcSize.depthOrArrayLayers;
+}
+
+void RecordWriteTimestampCmd(ID3D12GraphicsCommandList* commandList, WriteTimestampCmd* cmd) {
+    QuerySet* querySet = ToBackend(cmd->querySet.Get());
+    ASSERT(D3D12QueryType(querySet->GetQueryType()) == D3D12_QUERY_TYPE_TIMESTAMP);
+    commandList->EndQuery(querySet->GetQueryHeap(), D3D12_QUERY_TYPE_TIMESTAMP, cmd->queryIndex);
+}
+
+void RecordResolveQuerySetCmd(ID3D12GraphicsCommandList* commandList,
+                              Device* device,
+                              QuerySet* querySet,
+                              uint32_t firstQuery,
+                              uint32_t queryCount,
+                              Buffer* destination,
+                              uint64_t destinationOffset) {
+    const std::vector<bool>& availability = querySet->GetQueryAvailability();
+
+    auto currentIt = availability.begin() + firstQuery;
+    auto lastIt = availability.begin() + firstQuery + queryCount;
+
+    // Traverse available queries in the range of [firstQuery, firstQuery +  queryCount - 1]
+    while (currentIt != lastIt) {
+        auto firstTrueIt = std::find(currentIt, lastIt, true);
+        // No available query found for resolving
+        if (firstTrueIt == lastIt) {
+            break;
         }
+        auto nextFalseIt = std::find(firstTrueIt, lastIt, false);
 
-        D3D12_QUERY_TYPE D3D12QueryType(wgpu::QueryType type) {
-            switch (type) {
-                case wgpu::QueryType::Occlusion:
-                    return D3D12_QUERY_TYPE_BINARY_OCCLUSION;
-                case wgpu::QueryType::PipelineStatistics:
-                    return D3D12_QUERY_TYPE_PIPELINE_STATISTICS;
-                case wgpu::QueryType::Timestamp:
-                    return D3D12_QUERY_TYPE_TIMESTAMP;
-            }
+        // The query index of firstTrueIt where the resolving starts
+        uint32_t resolveQueryIndex = std::distance(availability.begin(), firstTrueIt);
+        // The queries count between firstTrueIt and nextFalseIt need to be resolved
+        uint32_t resolveQueryCount = std::distance(firstTrueIt, nextFalseIt);
+
+        // Calculate destinationOffset based on the current resolveQueryIndex and firstQuery
+        uint32_t resolveDestinationOffset =
+            destinationOffset + (resolveQueryIndex - firstQuery) * sizeof(uint64_t);
+
+        // Resolve the queries between firstTrueIt and nextFalseIt (which is at most lastIt)
+        commandList->ResolveQueryData(
+            querySet->GetQueryHeap(), D3D12QueryType(querySet->GetQueryType()), resolveQueryIndex,
+            resolveQueryCount, destination->GetD3D12Resource(), resolveDestinationOffset);
+
+        // Set current iterator to next false
+        currentIt = nextFalseIt;
+    }
+}
+
+void RecordFirstIndexOffset(ID3D12GraphicsCommandList* commandList,
+                            RenderPipeline* pipeline,
+                            uint32_t firstVertex,
+                            uint32_t firstInstance) {
+    if (!pipeline->UsesVertexOrInstanceIndex()) {
+        return;
+    }
+    std::array<uint32_t, 2> offsets{firstVertex, firstInstance};
+    PipelineLayout* layout = ToBackend(pipeline->GetLayout());
+    commandList->SetGraphicsRoot32BitConstants(layout->GetFirstIndexOffsetParameterIndex(),
+                                               offsets.size(), offsets.data(), 0);
+}
+
+bool ShouldCopyUsingTemporaryBuffer(DeviceBase* device,
+                                    const TextureCopy& srcCopy,
+                                    const TextureCopy& dstCopy) {
+    // Currently we only need the workaround for an Intel D3D12 driver issue.
+    if (device->IsToggleEnabled(
+            Toggle::UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel)) {
+        bool copyToLesserLevel = srcCopy.mipLevel > dstCopy.mipLevel;
+        ASSERT(srcCopy.texture->GetFormat().CopyCompatibleWith(dstCopy.texture->GetFormat()));
+
+        // GetAspectInfo(aspect) requires HasOneBit(aspect) == true, plus the texel block
+        // sizes of depth stencil formats are always no less than 4 bytes.
+        bool isSmallColorFormat =
+            HasOneBit(srcCopy.aspect) &&
+            srcCopy.texture->GetFormat().GetAspectInfo(srcCopy.aspect).block.byteSize < 4u;
+        if (copyToLesserLevel && isSmallColorFormat) {
+            return true;
         }
+    }
 
-        bool CanUseCopyResource(const TextureCopy& src,
-                                const TextureCopy& dst,
-                                const Extent3D& copySize) {
-            // Checked by validation
-            ASSERT(src.texture->GetSampleCount() == dst.texture->GetSampleCount());
-            ASSERT(src.texture->GetFormat().CopyCompatibleWith(dst.texture->GetFormat()));
-            ASSERT(src.aspect == dst.aspect);
+    return false;
+}
 
-            const Extent3D& srcSize = src.texture->GetSize();
-            const Extent3D& dstSize = dst.texture->GetSize();
+MaybeError RecordCopyTextureWithTemporaryBuffer(CommandRecordingContext* recordingContext,
+                                                const TextureCopy& srcCopy,
+                                                const TextureCopy& dstCopy,
+                                                const Extent3D& copySize) {
+    ASSERT(srcCopy.texture->GetFormat().format == dstCopy.texture->GetFormat().format);
+    ASSERT(srcCopy.aspect == dstCopy.aspect);
+    dawn::native::Format format = srcCopy.texture->GetFormat();
+    const TexelBlockInfo& blockInfo = format.GetAspectInfo(srcCopy.aspect).block;
+    ASSERT(copySize.width % blockInfo.width == 0);
+    uint32_t widthInBlocks = copySize.width / blockInfo.width;
+    ASSERT(copySize.height % blockInfo.height == 0);
+    uint32_t heightInBlocks = copySize.height / blockInfo.height;
 
-            // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12graphicscommandlist-copyresource
-            // In order to use D3D12's copy resource, the textures must be the same dimensions, and
-            // the copy must be of the entire resource.
-            // TODO(dawn:129): Support 1D textures.
-            return src.aspect == src.texture->GetFormat().aspects &&
-                   src.texture->GetDimension() == dst.texture->GetDimension() &&  //
-                   dst.texture->GetNumMipLevels() == 1 &&                         //
-                   src.texture->GetNumMipLevels() == 1 &&  // A copy command is of a single mip, so
-                                                           // if a resource has more than one, we
-                                                           // definitely cannot use CopyResource.
-                   copySize.width == dstSize.width &&      //
-                   copySize.width == srcSize.width &&      //
-                   copySize.height == dstSize.height &&    //
-                   copySize.height == srcSize.height &&    //
-                   copySize.depthOrArrayLayers == dstSize.depthOrArrayLayers &&  //
-                   copySize.depthOrArrayLayers == srcSize.depthOrArrayLayers;
+    // Create tempBuffer
+    uint32_t bytesPerRow = Align(blockInfo.byteSize * widthInBlocks, kTextureBytesPerRowAlignment);
+    uint32_t rowsPerImage = heightInBlocks;
+
+    // The size of temporary buffer isn't needed to be a multiple of 4 because we don't
+    // need to set mappedAtCreation to be true.
+    auto tempBufferSize =
+        ComputeRequiredBytesInCopy(blockInfo, copySize, bytesPerRow, rowsPerImage);
+
+    BufferDescriptor tempBufferDescriptor;
+    tempBufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    tempBufferDescriptor.size = tempBufferSize.AcquireSuccess();
+    Device* device = ToBackend(srcCopy.texture->GetDevice());
+    Ref<BufferBase> tempBufferBase;
+    DAWN_TRY_ASSIGN(tempBufferBase, device->CreateBuffer(&tempBufferDescriptor));
+    Ref<Buffer> tempBuffer = ToBackend(std::move(tempBufferBase));
+
+    BufferCopy bufferCopy;
+    bufferCopy.buffer = tempBuffer;
+    bufferCopy.offset = 0;
+    bufferCopy.bytesPerRow = bytesPerRow;
+    bufferCopy.rowsPerImage = rowsPerImage;
+
+    // Copy from source texture into tempBuffer
+    tempBuffer->TrackUsageAndTransitionNow(recordingContext, wgpu::BufferUsage::CopyDst);
+    RecordBufferTextureCopy(BufferTextureCopyDirection::T2B, recordingContext->GetCommandList(),
+                            bufferCopy, srcCopy, copySize);
+
+    // Copy from tempBuffer into destination texture
+    tempBuffer->TrackUsageAndTransitionNow(recordingContext, wgpu::BufferUsage::CopySrc);
+    RecordBufferTextureCopy(BufferTextureCopyDirection::B2T, recordingContext->GetCommandList(),
+                            bufferCopy, dstCopy, copySize);
+
+    // Save tempBuffer into recordingContext
+    recordingContext->AddToTempBuffers(std::move(tempBuffer));
+
+    return {};
+}
+
+void RecordNumWorkgroupsForDispatch(ID3D12GraphicsCommandList* commandList,
+                                    ComputePipeline* pipeline,
+                                    DispatchCmd* dispatch) {
+    if (!pipeline->UsesNumWorkgroups()) {
+        return;
+    }
+
+    PipelineLayout* layout = ToBackend(pipeline->GetLayout());
+    commandList->SetComputeRoot32BitConstants(layout->GetNumWorkgroupsParameterIndex(), 3, dispatch,
+                                              0);
+}
+
+// Records the necessary barriers for a synchronization scope using the resource usage
+// data pre-computed in the frontend. Also performs lazy initialization if required.
+// Returns whether any UAV are used in the synchronization scope.
+bool TransitionAndClearForSyncScope(CommandRecordingContext* commandContext,
+                                    const SyncScopeResourceUsage& usages) {
+    std::vector<D3D12_RESOURCE_BARRIER> barriers;
+
+    ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+
+    wgpu::BufferUsage bufferUsages = wgpu::BufferUsage::None;
+
+    for (size_t i = 0; i < usages.buffers.size(); ++i) {
+        Buffer* buffer = ToBackend(usages.buffers[i]);
+
+        // TODO(crbug.com/dawn/852): clear storage buffers with
+        // ClearUnorderedAccessView*().
+        buffer->GetDevice()->ConsumedError(buffer->EnsureDataInitialized(commandContext));
+
+        D3D12_RESOURCE_BARRIER barrier;
+        if (buffer->TrackUsageAndGetResourceBarrier(commandContext, &barrier,
+                                                    usages.bufferUsages[i])) {
+            barriers.push_back(barrier);
         }
+        bufferUsages |= usages.bufferUsages[i];
+    }
 
-        void RecordWriteTimestampCmd(ID3D12GraphicsCommandList* commandList,
-                                     WriteTimestampCmd* cmd) {
-            QuerySet* querySet = ToBackend(cmd->querySet.Get());
-            ASSERT(D3D12QueryType(querySet->GetQueryType()) == D3D12_QUERY_TYPE_TIMESTAMP);
-            commandList->EndQuery(querySet->GetQueryHeap(), D3D12_QUERY_TYPE_TIMESTAMP,
-                                  cmd->queryIndex);
-        }
+    wgpu::TextureUsage textureUsages = wgpu::TextureUsage::None;
 
-        void RecordResolveQuerySetCmd(ID3D12GraphicsCommandList* commandList,
-                                      Device* device,
-                                      QuerySet* querySet,
-                                      uint32_t firstQuery,
-                                      uint32_t queryCount,
-                                      Buffer* destination,
-                                      uint64_t destinationOffset) {
-            const std::vector<bool>& availability = querySet->GetQueryAvailability();
+    for (size_t i = 0; i < usages.textures.size(); ++i) {
+        Texture* texture = ToBackend(usages.textures[i]);
 
-            auto currentIt = availability.begin() + firstQuery;
-            auto lastIt = availability.begin() + firstQuery + queryCount;
-
-            // Traverse available queries in the range of [firstQuery, firstQuery +  queryCount - 1]
-            while (currentIt != lastIt) {
-                auto firstTrueIt = std::find(currentIt, lastIt, true);
-                // No available query found for resolving
-                if (firstTrueIt == lastIt) {
-                    break;
+        // Clear subresources that are not render attachments. Render attachments will be
+        // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
+        // subresource has not been initialized before the render pass.
+        usages.textureUsages[i].Iterate(
+            [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
+                if (usage & ~wgpu::TextureUsage::RenderAttachment) {
+                    texture->EnsureSubresourceContentInitialized(commandContext, range);
                 }
-                auto nextFalseIt = std::find(firstTrueIt, lastIt, false);
+                textureUsages |= usage;
+            });
 
-                // The query index of firstTrueIt where the resolving starts
-                uint32_t resolveQueryIndex = std::distance(availability.begin(), firstTrueIt);
-                // The queries count between firstTrueIt and nextFalseIt need to be resolved
-                uint32_t resolveQueryCount = std::distance(firstTrueIt, nextFalseIt);
+        ToBackend(usages.textures[i])
+            ->TrackUsageAndGetResourceBarrierForPass(commandContext, &barriers,
+                                                     usages.textureUsages[i]);
+    }
 
-                // Calculate destinationOffset based on the current resolveQueryIndex and firstQuery
-                uint32_t resolveDestinationOffset =
-                    destinationOffset + (resolveQueryIndex - firstQuery) * sizeof(uint64_t);
+    if (barriers.size()) {
+        commandList->ResourceBarrier(barriers.size(), barriers.data());
+    }
 
-                // Resolve the queries between firstTrueIt and nextFalseIt (which is at most lastIt)
-                commandList->ResolveQueryData(
-                    querySet->GetQueryHeap(), D3D12QueryType(querySet->GetQueryType()),
-                    resolveQueryIndex, resolveQueryCount, destination->GetD3D12Resource(),
-                    resolveDestinationOffset);
+    return (bufferUsages & wgpu::BufferUsage::Storage ||
+            textureUsages & wgpu::TextureUsage::StorageBinding);
+}
 
-                // Set current iterator to next false
-                currentIt = nextFalseIt;
+}  // anonymous namespace
+
+class BindGroupStateTracker : public BindGroupTrackerBase<false, uint64_t> {
+    using Base = BindGroupTrackerBase;
+
+  public:
+    explicit BindGroupStateTracker(Device* device)
+        : BindGroupTrackerBase(),
+          mDevice(device),
+          mViewAllocator(device->GetViewShaderVisibleDescriptorAllocator()),
+          mSamplerAllocator(device->GetSamplerShaderVisibleDescriptorAllocator()) {}
+
+    void SetInComputePass(bool inCompute_) { mInCompute = inCompute_; }
+
+    MaybeError Apply(CommandRecordingContext* commandContext) {
+        BeforeApply();
+
+        ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+        UpdateRootSignatureIfNecessary(commandList);
+
+        // Bindgroups are allocated in shader-visible descriptor heaps which are managed by a
+        // ringbuffer. There can be a single shader-visible descriptor heap of each type bound
+        // at any given time. This means that when we switch heaps, all other currently bound
+        // bindgroups must be re-populated. Bindgroups can fail allocation gracefully which is
+        // the signal to change the bounded heaps.
+        // Re-populating all bindgroups after the last one fails causes duplicated allocations
+        // to occur on overflow.
+        bool didCreateBindGroupViews = true;
+        bool didCreateBindGroupSamplers = true;
+        for (BindGroupIndex index : IterateBitSet(mDirtyBindGroups)) {
+            BindGroup* group = ToBackend(mBindGroups[index]);
+            didCreateBindGroupViews = group->PopulateViews(mViewAllocator);
+            didCreateBindGroupSamplers = group->PopulateSamplers(mDevice, mSamplerAllocator);
+            if (!didCreateBindGroupViews && !didCreateBindGroupSamplers) {
+                break;
             }
         }
 
-        void RecordFirstIndexOffset(ID3D12GraphicsCommandList* commandList,
-                                    RenderPipeline* pipeline,
-                                    uint32_t firstVertex,
-                                    uint32_t firstInstance) {
-            if (!pipeline->UsesVertexOrInstanceIndex()) {
-                return;
-            }
-            std::array<uint32_t, 2> offsets{firstVertex, firstInstance};
-            PipelineLayout* layout = ToBackend(pipeline->GetLayout());
-            commandList->SetGraphicsRoot32BitConstants(layout->GetFirstIndexOffsetParameterIndex(),
-                                                       offsets.size(), offsets.data(), 0);
-        }
-
-        bool ShouldCopyUsingTemporaryBuffer(DeviceBase* device,
-                                            const TextureCopy& srcCopy,
-                                            const TextureCopy& dstCopy) {
-            // Currently we only need the workaround for an Intel D3D12 driver issue.
-            if (device->IsToggleEnabled(
-                    Toggle::
-                        UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel)) {
-                bool copyToLesserLevel = srcCopy.mipLevel > dstCopy.mipLevel;
-                ASSERT(
-                    srcCopy.texture->GetFormat().CopyCompatibleWith(dstCopy.texture->GetFormat()));
-
-                // GetAspectInfo(aspect) requires HasOneBit(aspect) == true, plus the texel block
-                // sizes of depth stencil formats are always no less than 4 bytes.
-                bool isSmallColorFormat =
-                    HasOneBit(srcCopy.aspect) &&
-                    srcCopy.texture->GetFormat().GetAspectInfo(srcCopy.aspect).block.byteSize < 4u;
-                if (copyToLesserLevel && isSmallColorFormat) {
-                    return true;
-                }
+        if (!didCreateBindGroupViews || !didCreateBindGroupSamplers) {
+            if (!didCreateBindGroupViews) {
+                DAWN_TRY(mViewAllocator->AllocateAndSwitchShaderVisibleHeap());
             }
 
-            return false;
-        }
-
-        MaybeError RecordCopyTextureWithTemporaryBuffer(CommandRecordingContext* recordingContext,
-                                                        const TextureCopy& srcCopy,
-                                                        const TextureCopy& dstCopy,
-                                                        const Extent3D& copySize) {
-            ASSERT(srcCopy.texture->GetFormat().format == dstCopy.texture->GetFormat().format);
-            ASSERT(srcCopy.aspect == dstCopy.aspect);
-            dawn::native::Format format = srcCopy.texture->GetFormat();
-            const TexelBlockInfo& blockInfo = format.GetAspectInfo(srcCopy.aspect).block;
-            ASSERT(copySize.width % blockInfo.width == 0);
-            uint32_t widthInBlocks = copySize.width / blockInfo.width;
-            ASSERT(copySize.height % blockInfo.height == 0);
-            uint32_t heightInBlocks = copySize.height / blockInfo.height;
-
-            // Create tempBuffer
-            uint32_t bytesPerRow =
-                Align(blockInfo.byteSize * widthInBlocks, kTextureBytesPerRowAlignment);
-            uint32_t rowsPerImage = heightInBlocks;
-
-            // The size of temporary buffer isn't needed to be a multiple of 4 because we don't
-            // need to set mappedAtCreation to be true.
-            auto tempBufferSize =
-                ComputeRequiredBytesInCopy(blockInfo, copySize, bytesPerRow, rowsPerImage);
-
-            BufferDescriptor tempBufferDescriptor;
-            tempBufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
-            tempBufferDescriptor.size = tempBufferSize.AcquireSuccess();
-            Device* device = ToBackend(srcCopy.texture->GetDevice());
-            Ref<BufferBase> tempBufferBase;
-            DAWN_TRY_ASSIGN(tempBufferBase, device->CreateBuffer(&tempBufferDescriptor));
-            Ref<Buffer> tempBuffer = ToBackend(std::move(tempBufferBase));
-
-            BufferCopy bufferCopy;
-            bufferCopy.buffer = tempBuffer;
-            bufferCopy.offset = 0;
-            bufferCopy.bytesPerRow = bytesPerRow;
-            bufferCopy.rowsPerImage = rowsPerImage;
-
-            // Copy from source texture into tempBuffer
-            tempBuffer->TrackUsageAndTransitionNow(recordingContext, wgpu::BufferUsage::CopyDst);
-            RecordBufferTextureCopy(BufferTextureCopyDirection::T2B,
-                                    recordingContext->GetCommandList(), bufferCopy, srcCopy,
-                                    copySize);
-
-            // Copy from tempBuffer into destination texture
-            tempBuffer->TrackUsageAndTransitionNow(recordingContext, wgpu::BufferUsage::CopySrc);
-            RecordBufferTextureCopy(BufferTextureCopyDirection::B2T,
-                                    recordingContext->GetCommandList(), bufferCopy, dstCopy,
-                                    copySize);
-
-            // Save tempBuffer into recordingContext
-            recordingContext->AddToTempBuffers(std::move(tempBuffer));
-
-            return {};
-        }
-
-        void RecordNumWorkgroupsForDispatch(ID3D12GraphicsCommandList* commandList,
-                                            ComputePipeline* pipeline,
-                                            DispatchCmd* dispatch) {
-            if (!pipeline->UsesNumWorkgroups()) {
-                return;
+            if (!didCreateBindGroupSamplers) {
+                DAWN_TRY(mSamplerAllocator->AllocateAndSwitchShaderVisibleHeap());
             }
 
-            PipelineLayout* layout = ToBackend(pipeline->GetLayout());
-            commandList->SetComputeRoot32BitConstants(layout->GetNumWorkgroupsParameterIndex(), 3,
-                                                      dispatch, 0);
-        }
+            mDirtyBindGroupsObjectChangedOrIsDynamic |= mBindGroupLayoutsMask;
+            mDirtyBindGroups |= mBindGroupLayoutsMask;
 
-        // Records the necessary barriers for a synchronization scope using the resource usage
-        // data pre-computed in the frontend. Also performs lazy initialization if required.
-        // Returns whether any UAV are used in the synchronization scope.
-        bool TransitionAndClearForSyncScope(CommandRecordingContext* commandContext,
-                                            const SyncScopeResourceUsage& usages) {
-            std::vector<D3D12_RESOURCE_BARRIER> barriers;
+            // Must be called before applying the bindgroups.
+            SetID3D12DescriptorHeaps(commandList);
 
-            ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
-
-            wgpu::BufferUsage bufferUsages = wgpu::BufferUsage::None;
-
-            for (size_t i = 0; i < usages.buffers.size(); ++i) {
-                Buffer* buffer = ToBackend(usages.buffers[i]);
-
-                // TODO(crbug.com/dawn/852): clear storage buffers with
-                // ClearUnorderedAccessView*().
-                buffer->GetDevice()->ConsumedError(buffer->EnsureDataInitialized(commandContext));
-
-                D3D12_RESOURCE_BARRIER barrier;
-                if (buffer->TrackUsageAndGetResourceBarrier(commandContext, &barrier,
-                                                            usages.bufferUsages[i])) {
-                    barriers.push_back(barrier);
-                }
-                bufferUsages |= usages.bufferUsages[i];
-            }
-
-            wgpu::TextureUsage textureUsages = wgpu::TextureUsage::None;
-
-            for (size_t i = 0; i < usages.textures.size(); ++i) {
-                Texture* texture = ToBackend(usages.textures[i]);
-
-                // Clear subresources that are not render attachments. Render attachments will be
-                // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
-                // subresource has not been initialized before the render pass.
-                usages.textureUsages[i].Iterate(
-                    [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
-                        if (usage & ~wgpu::TextureUsage::RenderAttachment) {
-                            texture->EnsureSubresourceContentInitialized(commandContext, range);
-                        }
-                        textureUsages |= usage;
-                    });
-
-                ToBackend(usages.textures[i])
-                    ->TrackUsageAndGetResourceBarrierForPass(commandContext, &barriers,
-                                                             usages.textureUsages[i]);
-            }
-
-            if (barriers.size()) {
-                commandList->ResourceBarrier(barriers.size(), barriers.data());
-            }
-
-            return (bufferUsages & wgpu::BufferUsage::Storage ||
-                    textureUsages & wgpu::TextureUsage::StorageBinding);
-        }
-
-    }  // anonymous namespace
-
-    class BindGroupStateTracker : public BindGroupTrackerBase<false, uint64_t> {
-        using Base = BindGroupTrackerBase;
-
-      public:
-        explicit BindGroupStateTracker(Device* device)
-            : BindGroupTrackerBase(),
-              mDevice(device),
-              mViewAllocator(device->GetViewShaderVisibleDescriptorAllocator()),
-              mSamplerAllocator(device->GetSamplerShaderVisibleDescriptorAllocator()) {
-        }
-
-        void SetInComputePass(bool inCompute_) {
-            mInCompute = inCompute_;
-        }
-
-        MaybeError Apply(CommandRecordingContext* commandContext) {
-            BeforeApply();
-
-            ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
-            UpdateRootSignatureIfNecessary(commandList);
-
-            // Bindgroups are allocated in shader-visible descriptor heaps which are managed by a
-            // ringbuffer. There can be a single shader-visible descriptor heap of each type bound
-            // at any given time. This means that when we switch heaps, all other currently bound
-            // bindgroups must be re-populated. Bindgroups can fail allocation gracefully which is
-            // the signal to change the bounded heaps.
-            // Re-populating all bindgroups after the last one fails causes duplicated allocations
-            // to occur on overflow.
-            bool didCreateBindGroupViews = true;
-            bool didCreateBindGroupSamplers = true;
-            for (BindGroupIndex index : IterateBitSet(mDirtyBindGroups)) {
+            for (BindGroupIndex index : IterateBitSet(mBindGroupLayoutsMask)) {
                 BindGroup* group = ToBackend(mBindGroups[index]);
                 didCreateBindGroupViews = group->PopulateViews(mViewAllocator);
                 didCreateBindGroupSamplers = group->PopulateSamplers(mDevice, mSamplerAllocator);
-                if (!didCreateBindGroupViews && !didCreateBindGroupSamplers) {
-                    break;
-                }
+                ASSERT(didCreateBindGroupViews);
+                ASSERT(didCreateBindGroupSamplers);
             }
-
-            if (!didCreateBindGroupViews || !didCreateBindGroupSamplers) {
-                if (!didCreateBindGroupViews) {
-                    DAWN_TRY(mViewAllocator->AllocateAndSwitchShaderVisibleHeap());
-                }
-
-                if (!didCreateBindGroupSamplers) {
-                    DAWN_TRY(mSamplerAllocator->AllocateAndSwitchShaderVisibleHeap());
-                }
-
-                mDirtyBindGroupsObjectChangedOrIsDynamic |= mBindGroupLayoutsMask;
-                mDirtyBindGroups |= mBindGroupLayoutsMask;
-
-                // Must be called before applying the bindgroups.
-                SetID3D12DescriptorHeaps(commandList);
-
-                for (BindGroupIndex index : IterateBitSet(mBindGroupLayoutsMask)) {
-                    BindGroup* group = ToBackend(mBindGroups[index]);
-                    didCreateBindGroupViews = group->PopulateViews(mViewAllocator);
-                    didCreateBindGroupSamplers =
-                        group->PopulateSamplers(mDevice, mSamplerAllocator);
-                    ASSERT(didCreateBindGroupViews);
-                    ASSERT(didCreateBindGroupSamplers);
-                }
-            }
-
-            for (BindGroupIndex index : IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
-                BindGroup* group = ToBackend(mBindGroups[index]);
-                ApplyBindGroup(commandList, ToBackend(mPipelineLayout), index, group,
-                               mDynamicOffsetCounts[index], mDynamicOffsets[index].data());
-            }
-
-            AfterApply();
-
-            return {};
         }
 
-        void SetID3D12DescriptorHeaps(ID3D12GraphicsCommandList* commandList) {
-            ASSERT(commandList != nullptr);
-            std::array<ID3D12DescriptorHeap*, 2> descriptorHeaps = {
-                mViewAllocator->GetShaderVisibleHeap(), mSamplerAllocator->GetShaderVisibleHeap()};
-            ASSERT(descriptorHeaps[0] != nullptr);
-            ASSERT(descriptorHeaps[1] != nullptr);
-            commandList->SetDescriptorHeaps(descriptorHeaps.size(), descriptorHeaps.data());
+        for (BindGroupIndex index : IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
+            BindGroup* group = ToBackend(mBindGroups[index]);
+            ApplyBindGroup(commandList, ToBackend(mPipelineLayout), index, group,
+                           mDynamicOffsetCounts[index], mDynamicOffsets[index].data());
+        }
 
-            // Descriptor table state is undefined at the beginning of a command list and after
-            // descriptor heaps are changed on a command list. Invalidate the root sampler tables to
-            // reset the root descriptor table for samplers, otherwise the shader cannot access the
-            // descriptor heaps.
+        AfterApply();
+
+        return {};
+    }
+
+    void SetID3D12DescriptorHeaps(ID3D12GraphicsCommandList* commandList) {
+        ASSERT(commandList != nullptr);
+        std::array<ID3D12DescriptorHeap*, 2> descriptorHeaps = {
+            mViewAllocator->GetShaderVisibleHeap(), mSamplerAllocator->GetShaderVisibleHeap()};
+        ASSERT(descriptorHeaps[0] != nullptr);
+        ASSERT(descriptorHeaps[1] != nullptr);
+        commandList->SetDescriptorHeaps(descriptorHeaps.size(), descriptorHeaps.data());
+
+        // Descriptor table state is undefined at the beginning of a command list and after
+        // descriptor heaps are changed on a command list. Invalidate the root sampler tables to
+        // reset the root descriptor table for samplers, otherwise the shader cannot access the
+        // descriptor heaps.
+        mBoundRootSamplerTables = {};
+    }
+
+  private:
+    void UpdateRootSignatureIfNecessary(ID3D12GraphicsCommandList* commandList) {
+        if (mLastAppliedPipelineLayout != mPipelineLayout) {
+            if (mInCompute) {
+                commandList->SetComputeRootSignature(
+                    ToBackend(mPipelineLayout)->GetRootSignature());
+            } else {
+                commandList->SetGraphicsRootSignature(
+                    ToBackend(mPipelineLayout)->GetRootSignature());
+            }
+            // Invalidate the root sampler tables previously set in the root signature.
             mBoundRootSamplerTables = {};
         }
+    }
 
-      private:
-        void UpdateRootSignatureIfNecessary(ID3D12GraphicsCommandList* commandList) {
-            if (mLastAppliedPipelineLayout != mPipelineLayout) {
-                if (mInCompute) {
-                    commandList->SetComputeRootSignature(
-                        ToBackend(mPipelineLayout)->GetRootSignature());
-                } else {
-                    commandList->SetGraphicsRootSignature(
-                        ToBackend(mPipelineLayout)->GetRootSignature());
+    void ApplyBindGroup(ID3D12GraphicsCommandList* commandList,
+                        const PipelineLayout* pipelineLayout,
+                        BindGroupIndex index,
+                        BindGroup* group,
+                        uint32_t dynamicOffsetCountIn,
+                        const uint64_t* dynamicOffsetsIn) {
+        ityp::span<BindingIndex, const uint64_t> dynamicOffsets(dynamicOffsetsIn,
+                                                                BindingIndex(dynamicOffsetCountIn));
+        ASSERT(dynamicOffsets.size() == group->GetLayout()->GetDynamicBufferCount());
+
+        // Usually, the application won't set the same offsets many times,
+        // so always try to apply dynamic offsets even if the offsets stay the same
+        if (dynamicOffsets.size() != BindingIndex(0)) {
+            // Update dynamic offsets.
+            // Dynamic buffer bindings are packed at the beginning of the layout.
+            for (BindingIndex bindingIndex{0}; bindingIndex < dynamicOffsets.size();
+                 ++bindingIndex) {
+                const BindingInfo& bindingInfo = group->GetLayout()->GetBindingInfo(bindingIndex);
+                if (bindingInfo.visibility == wgpu::ShaderStage::None) {
+                    // Skip dynamic buffers that are not visible. D3D12 does not have None
+                    // visibility.
+                    continue;
                 }
-                // Invalidate the root sampler tables previously set in the root signature.
-                mBoundRootSamplerTables = {};
+
+                uint32_t parameterIndex =
+                    pipelineLayout->GetDynamicRootParameterIndex(index, bindingIndex);
+                BufferBinding binding = group->GetBindingAsBufferBinding(bindingIndex);
+
+                // Calculate buffer locations that root descriptors links to. The location
+                // is (base buffer location + initial offset + dynamic offset)
+                uint64_t dynamicOffset = dynamicOffsets[bindingIndex];
+                uint64_t offset = binding.offset + dynamicOffset;
+                D3D12_GPU_VIRTUAL_ADDRESS bufferLocation =
+                    ToBackend(binding.buffer)->GetVA() + offset;
+
+                ASSERT(bindingInfo.bindingType == BindingInfoType::Buffer);
+                switch (bindingInfo.buffer.type) {
+                    case wgpu::BufferBindingType::Uniform:
+                        if (mInCompute) {
+                            commandList->SetComputeRootConstantBufferView(parameterIndex,
+                                                                          bufferLocation);
+                        } else {
+                            commandList->SetGraphicsRootConstantBufferView(parameterIndex,
+                                                                           bufferLocation);
+                        }
+                        break;
+                    case wgpu::BufferBindingType::Storage:
+                    case kInternalStorageBufferBinding:
+                        if (mInCompute) {
+                            commandList->SetComputeRootUnorderedAccessView(parameterIndex,
+                                                                           bufferLocation);
+                        } else {
+                            commandList->SetGraphicsRootUnorderedAccessView(parameterIndex,
+                                                                            bufferLocation);
+                        }
+                        break;
+                    case wgpu::BufferBindingType::ReadOnlyStorage:
+                        if (mInCompute) {
+                            commandList->SetComputeRootShaderResourceView(parameterIndex,
+                                                                          bufferLocation);
+                        } else {
+                            commandList->SetGraphicsRootShaderResourceView(parameterIndex,
+                                                                           bufferLocation);
+                        }
+                        break;
+                    case wgpu::BufferBindingType::Undefined:
+                        UNREACHABLE();
+                }
             }
         }
 
-        void ApplyBindGroup(ID3D12GraphicsCommandList* commandList,
-                            const PipelineLayout* pipelineLayout,
-                            BindGroupIndex index,
-                            BindGroup* group,
-                            uint32_t dynamicOffsetCountIn,
-                            const uint64_t* dynamicOffsetsIn) {
-            ityp::span<BindingIndex, const uint64_t> dynamicOffsets(
-                dynamicOffsetsIn, BindingIndex(dynamicOffsetCountIn));
-            ASSERT(dynamicOffsets.size() == group->GetLayout()->GetDynamicBufferCount());
+        // It's not necessary to update descriptor tables if only the dynamic offset changed.
+        if (!mDirtyBindGroups[index]) {
+            return;
+        }
 
-            // Usually, the application won't set the same offsets many times,
-            // so always try to apply dynamic offsets even if the offsets stay the same
-            if (dynamicOffsets.size() != BindingIndex(0)) {
-                // Update dynamic offsets.
-                // Dynamic buffer bindings are packed at the beginning of the layout.
-                for (BindingIndex bindingIndex{0}; bindingIndex < dynamicOffsets.size();
-                     ++bindingIndex) {
-                    const BindingInfo& bindingInfo =
-                        group->GetLayout()->GetBindingInfo(bindingIndex);
-                    if (bindingInfo.visibility == wgpu::ShaderStage::None) {
-                        // Skip dynamic buffers that are not visible. D3D12 does not have None
-                        // visibility.
-                        continue;
-                    }
+        const uint32_t cbvUavSrvCount =
+            ToBackend(group->GetLayout())->GetCbvUavSrvDescriptorCount();
+        const uint32_t samplerCount = ToBackend(group->GetLayout())->GetSamplerDescriptorCount();
 
-                    uint32_t parameterIndex =
-                        pipelineLayout->GetDynamicRootParameterIndex(index, bindingIndex);
-                    BufferBinding binding = group->GetBindingAsBufferBinding(bindingIndex);
-
-                    // Calculate buffer locations that root descriptors links to. The location
-                    // is (base buffer location + initial offset + dynamic offset)
-                    uint64_t dynamicOffset = dynamicOffsets[bindingIndex];
-                    uint64_t offset = binding.offset + dynamicOffset;
-                    D3D12_GPU_VIRTUAL_ADDRESS bufferLocation =
-                        ToBackend(binding.buffer)->GetVA() + offset;
-
-                    ASSERT(bindingInfo.bindingType == BindingInfoType::Buffer);
-                    switch (bindingInfo.buffer.type) {
-                        case wgpu::BufferBindingType::Uniform:
-                            if (mInCompute) {
-                                commandList->SetComputeRootConstantBufferView(parameterIndex,
-                                                                              bufferLocation);
-                            } else {
-                                commandList->SetGraphicsRootConstantBufferView(parameterIndex,
-                                                                               bufferLocation);
-                            }
-                            break;
-                        case wgpu::BufferBindingType::Storage:
-                        case kInternalStorageBufferBinding:
-                            if (mInCompute) {
-                                commandList->SetComputeRootUnorderedAccessView(parameterIndex,
-                                                                               bufferLocation);
-                            } else {
-                                commandList->SetGraphicsRootUnorderedAccessView(parameterIndex,
-                                                                                bufferLocation);
-                            }
-                            break;
-                        case wgpu::BufferBindingType::ReadOnlyStorage:
-                            if (mInCompute) {
-                                commandList->SetComputeRootShaderResourceView(parameterIndex,
-                                                                              bufferLocation);
-                            } else {
-                                commandList->SetGraphicsRootShaderResourceView(parameterIndex,
-                                                                               bufferLocation);
-                            }
-                            break;
-                        case wgpu::BufferBindingType::Undefined:
-                            UNREACHABLE();
-                    }
-                }
+        if (cbvUavSrvCount > 0) {
+            uint32_t parameterIndex = pipelineLayout->GetCbvUavSrvRootParameterIndex(index);
+            const D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor = group->GetBaseViewDescriptor();
+            if (mInCompute) {
+                commandList->SetComputeRootDescriptorTable(parameterIndex, baseDescriptor);
+            } else {
+                commandList->SetGraphicsRootDescriptorTable(parameterIndex, baseDescriptor);
             }
+        }
 
-            // It's not necessary to update descriptor tables if only the dynamic offset changed.
-            if (!mDirtyBindGroups[index]) {
-                return;
-            }
-
-            const uint32_t cbvUavSrvCount =
-                ToBackend(group->GetLayout())->GetCbvUavSrvDescriptorCount();
-            const uint32_t samplerCount =
-                ToBackend(group->GetLayout())->GetSamplerDescriptorCount();
-
-            if (cbvUavSrvCount > 0) {
-                uint32_t parameterIndex = pipelineLayout->GetCbvUavSrvRootParameterIndex(index);
-                const D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor = group->GetBaseViewDescriptor();
+        if (samplerCount > 0) {
+            uint32_t parameterIndex = pipelineLayout->GetSamplerRootParameterIndex(index);
+            const D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor = group->GetBaseSamplerDescriptor();
+            // Check if the group requires its sampler table to be set in the pipeline.
+            // This because sampler heap allocations could be cached and use the same table.
+            if (mBoundRootSamplerTables[index].ptr != baseDescriptor.ptr) {
                 if (mInCompute) {
                     commandList->SetComputeRootDescriptorTable(parameterIndex, baseDescriptor);
                 } else {
                     commandList->SetGraphicsRootDescriptorTable(parameterIndex, baseDescriptor);
                 }
-            }
 
-            if (samplerCount > 0) {
-                uint32_t parameterIndex = pipelineLayout->GetSamplerRootParameterIndex(index);
-                const D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor =
-                    group->GetBaseSamplerDescriptor();
-                // Check if the group requires its sampler table to be set in the pipeline.
-                // This because sampler heap allocations could be cached and use the same table.
-                if (mBoundRootSamplerTables[index].ptr != baseDescriptor.ptr) {
-                    if (mInCompute) {
-                        commandList->SetComputeRootDescriptorTable(parameterIndex, baseDescriptor);
-                    } else {
-                        commandList->SetGraphicsRootDescriptorTable(parameterIndex, baseDescriptor);
-                    }
-
-                    mBoundRootSamplerTables[index] = baseDescriptor;
-                }
-            }
-
-            const auto& dynamicStorageBufferLengths = group->GetDynamicStorageBufferLengths();
-            if (dynamicStorageBufferLengths.size() != 0) {
-                uint32_t parameterIndex =
-                    pipelineLayout->GetDynamicStorageBufferLengthsParameterIndex();
-                uint32_t firstRegisterOffset =
-                    pipelineLayout->GetDynamicStorageBufferLengthInfo()[index].firstRegisterOffset;
-
-                if (mInCompute) {
-                    commandList->SetComputeRoot32BitConstants(
-                        parameterIndex, dynamicStorageBufferLengths.size(),
-                        dynamicStorageBufferLengths.data(), firstRegisterOffset);
-                } else {
-                    commandList->SetGraphicsRoot32BitConstants(
-                        parameterIndex, dynamicStorageBufferLengths.size(),
-                        dynamicStorageBufferLengths.data(), firstRegisterOffset);
-                }
+                mBoundRootSamplerTables[index] = baseDescriptor;
             }
         }
 
-        Device* mDevice;
+        const auto& dynamicStorageBufferLengths = group->GetDynamicStorageBufferLengths();
+        if (dynamicStorageBufferLengths.size() != 0) {
+            uint32_t parameterIndex =
+                pipelineLayout->GetDynamicStorageBufferLengthsParameterIndex();
+            uint32_t firstRegisterOffset =
+                pipelineLayout->GetDynamicStorageBufferLengthInfo()[index].firstRegisterOffset;
 
-        bool mInCompute = false;
+            if (mInCompute) {
+                commandList->SetComputeRoot32BitConstants(
+                    parameterIndex, dynamicStorageBufferLengths.size(),
+                    dynamicStorageBufferLengths.data(), firstRegisterOffset);
+            } else {
+                commandList->SetGraphicsRoot32BitConstants(
+                    parameterIndex, dynamicStorageBufferLengths.size(),
+                    dynamicStorageBufferLengths.data(), firstRegisterOffset);
+            }
+        }
+    }
 
-        ityp::array<BindGroupIndex, D3D12_GPU_DESCRIPTOR_HANDLE, kMaxBindGroups>
-            mBoundRootSamplerTables = {};
+    Device* mDevice;
 
-        ShaderVisibleDescriptorAllocator* mViewAllocator;
-        ShaderVisibleDescriptorAllocator* mSamplerAllocator;
-    };
+    bool mInCompute = false;
 
-    namespace {
-        class VertexBufferTracker {
-          public:
-            void OnSetVertexBuffer(VertexBufferSlot slot,
-                                   Buffer* buffer,
-                                   uint64_t offset,
-                                   uint64_t size) {
-                mStartSlot = std::min(mStartSlot, slot);
-                mEndSlot = std::max(mEndSlot, ityp::Add(slot, VertexBufferSlot(uint8_t(1))));
+    ityp::array<BindGroupIndex, D3D12_GPU_DESCRIPTOR_HANDLE, kMaxBindGroups>
+        mBoundRootSamplerTables = {};
 
-                auto* d3d12BufferView = &mD3D12BufferViews[slot];
-                d3d12BufferView->BufferLocation = buffer->GetVA() + offset;
-                d3d12BufferView->SizeInBytes = size;
-                // The bufferView stride is set based on the vertex state before a draw.
+    ShaderVisibleDescriptorAllocator* mViewAllocator;
+    ShaderVisibleDescriptorAllocator* mSamplerAllocator;
+};
+
+namespace {
+class VertexBufferTracker {
+  public:
+    void OnSetVertexBuffer(VertexBufferSlot slot, Buffer* buffer, uint64_t offset, uint64_t size) {
+        mStartSlot = std::min(mStartSlot, slot);
+        mEndSlot = std::max(mEndSlot, ityp::Add(slot, VertexBufferSlot(uint8_t(1))));
+
+        auto* d3d12BufferView = &mD3D12BufferViews[slot];
+        d3d12BufferView->BufferLocation = buffer->GetVA() + offset;
+        d3d12BufferView->SizeInBytes = size;
+        // The bufferView stride is set based on the vertex state before a draw.
+    }
+
+    void Apply(ID3D12GraphicsCommandList* commandList, const RenderPipeline* renderPipeline) {
+        ASSERT(renderPipeline != nullptr);
+
+        VertexBufferSlot startSlot = mStartSlot;
+        VertexBufferSlot endSlot = mEndSlot;
+
+        // If the vertex state has changed, we need to update the StrideInBytes
+        // for the D3D12 buffer views. We also need to extend the dirty range to
+        // touch all these slots because the stride may have changed.
+        if (mLastAppliedRenderPipeline != renderPipeline) {
+            mLastAppliedRenderPipeline = renderPipeline;
+
+            for (VertexBufferSlot slot :
+                 IterateBitSet(renderPipeline->GetVertexBufferSlotsUsed())) {
+                startSlot = std::min(startSlot, slot);
+                endSlot = std::max(endSlot, ityp::Add(slot, VertexBufferSlot(uint8_t(1))));
+                mD3D12BufferViews[slot].StrideInBytes =
+                    renderPipeline->GetVertexBuffer(slot).arrayStride;
+            }
+        }
+
+        if (endSlot <= startSlot) {
+            return;
+        }
+
+        // mD3D12BufferViews is kept up to date with the most recent data passed
+        // to SetVertexBuffer. This makes it correct to only track the start
+        // and end of the dirty range. When Apply is called,
+        // we will at worst set non-dirty vertex buffers in duplicate.
+        commandList->IASetVertexBuffers(static_cast<uint8_t>(startSlot),
+                                        static_cast<uint8_t>(ityp::Sub(endSlot, startSlot)),
+                                        &mD3D12BufferViews[startSlot]);
+
+        mStartSlot = VertexBufferSlot(kMaxVertexBuffers);
+        mEndSlot = VertexBufferSlot(uint8_t(0));
+    }
+
+  private:
+    // startSlot and endSlot indicate the range of dirty vertex buffers.
+    // If there are multiple calls to SetVertexBuffer, the start and end
+    // represent the union of the dirty ranges (the union may have non-dirty
+    // data in the middle of the range).
+    const RenderPipeline* mLastAppliedRenderPipeline = nullptr;
+    VertexBufferSlot mStartSlot{kMaxVertexBuffers};
+    VertexBufferSlot mEndSlot{uint8_t(0)};
+    ityp::array<VertexBufferSlot, D3D12_VERTEX_BUFFER_VIEW, kMaxVertexBuffers> mD3D12BufferViews =
+        {};
+};
+
+void ResolveMultisampledRenderPass(CommandRecordingContext* commandContext,
+                                   BeginRenderPassCmd* renderPass) {
+    ASSERT(renderPass != nullptr);
+
+    for (ColorAttachmentIndex i :
+         IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+        TextureViewBase* resolveTarget = renderPass->colorAttachments[i].resolveTarget.Get();
+        if (resolveTarget == nullptr) {
+            continue;
+        }
+
+        TextureViewBase* colorView = renderPass->colorAttachments[i].view.Get();
+        Texture* colorTexture = ToBackend(colorView->GetTexture());
+        Texture* resolveTexture = ToBackend(resolveTarget->GetTexture());
+
+        // Transition the usages of the color attachment and resolve target.
+        colorTexture->TrackUsageAndTransitionNow(
+            commandContext, D3D12_RESOURCE_STATE_RESOLVE_SOURCE, colorView->GetSubresourceRange());
+        resolveTexture->TrackUsageAndTransitionNow(commandContext,
+                                                   D3D12_RESOURCE_STATE_RESOLVE_DEST,
+                                                   resolveTarget->GetSubresourceRange());
+
+        // Do MSAA resolve with ResolveSubResource().
+        ID3D12Resource* colorTextureHandle = colorTexture->GetD3D12Resource();
+        ID3D12Resource* resolveTextureHandle = resolveTexture->GetD3D12Resource();
+        const uint32_t resolveTextureSubresourceIndex = resolveTexture->GetSubresourceIndex(
+            resolveTarget->GetBaseMipLevel(), resolveTarget->GetBaseArrayLayer(), Aspect::Color);
+        constexpr uint32_t kColorTextureSubresourceIndex = 0;
+        commandContext->GetCommandList()->ResolveSubresource(
+            resolveTextureHandle, resolveTextureSubresourceIndex, colorTextureHandle,
+            kColorTextureSubresourceIndex, colorTexture->GetD3D12Format());
+    }
+}
+
+}  // anonymous namespace
+
+// static
+Ref<CommandBuffer> CommandBuffer::Create(CommandEncoder* encoder,
+                                         const CommandBufferDescriptor* descriptor) {
+    return AcquireRef(new CommandBuffer(encoder, descriptor));
+}
+
+CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
+    : CommandBufferBase(encoder, descriptor) {}
+
+MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* commandContext) {
+    Device* device = ToBackend(GetDevice());
+    BindGroupStateTracker bindingTracker(device);
+
+    ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+
+    // Make sure we use the correct descriptors for this command list. Could be done once per
+    // actual command list but here is ok because there should be few command buffers.
+    bindingTracker.SetID3D12DescriptorHeaps(commandList);
+
+    size_t nextComputePassNumber = 0;
+    size_t nextRenderPassNumber = 0;
+
+    Command type;
+    while (mCommands.NextCommandId(&type)) {
+        switch (type) {
+            case Command::BeginComputePass: {
+                mCommands.NextCommand<BeginComputePassCmd>();
+
+                bindingTracker.SetInComputePass(true);
+                DAWN_TRY(
+                    RecordComputePass(commandContext, &bindingTracker,
+                                      GetResourceUsages().computePasses[nextComputePassNumber]));
+
+                nextComputePassNumber++;
+                break;
             }
 
-            void Apply(ID3D12GraphicsCommandList* commandList,
-                       const RenderPipeline* renderPipeline) {
-                ASSERT(renderPipeline != nullptr);
+            case Command::BeginRenderPass: {
+                BeginRenderPassCmd* beginRenderPassCmd =
+                    mCommands.NextCommand<BeginRenderPassCmd>();
 
-                VertexBufferSlot startSlot = mStartSlot;
-                VertexBufferSlot endSlot = mEndSlot;
+                const bool passHasUAV = TransitionAndClearForSyncScope(
+                    commandContext, GetResourceUsages().renderPasses[nextRenderPassNumber]);
+                bindingTracker.SetInComputePass(false);
 
-                // If the vertex state has changed, we need to update the StrideInBytes
-                // for the D3D12 buffer views. We also need to extend the dirty range to
-                // touch all these slots because the stride may have changed.
-                if (mLastAppliedRenderPipeline != renderPipeline) {
-                    mLastAppliedRenderPipeline = renderPipeline;
+                LazyClearRenderPassAttachments(beginRenderPassCmd);
+                DAWN_TRY(RecordRenderPass(commandContext, &bindingTracker, beginRenderPassCmd,
+                                          passHasUAV));
 
-                    for (VertexBufferSlot slot :
-                         IterateBitSet(renderPipeline->GetVertexBufferSlotsUsed())) {
-                        startSlot = std::min(startSlot, slot);
-                        endSlot = std::max(endSlot, ityp::Add(slot, VertexBufferSlot(uint8_t(1))));
-                        mD3D12BufferViews[slot].StrideInBytes =
-                            renderPipeline->GetVertexBuffer(slot).arrayStride;
+                nextRenderPassNumber++;
+                break;
+            }
+
+            case Command::CopyBufferToBuffer: {
+                CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
+                if (copy->size == 0) {
+                    // Skip no-op copies.
+                    break;
+                }
+                Buffer* srcBuffer = ToBackend(copy->source.Get());
+                Buffer* dstBuffer = ToBackend(copy->destination.Get());
+
+                DAWN_TRY(srcBuffer->EnsureDataInitialized(commandContext));
+                bool cleared;
+                DAWN_TRY_ASSIGN(cleared, dstBuffer->EnsureDataInitializedAsDestination(
+                                             commandContext, copy->destinationOffset, copy->size));
+                DAWN_UNUSED(cleared);
+
+                srcBuffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopySrc);
+                dstBuffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
+
+                commandList->CopyBufferRegion(
+                    dstBuffer->GetD3D12Resource(), copy->destinationOffset,
+                    srcBuffer->GetD3D12Resource(), copy->sourceOffset, copy->size);
+                break;
+            }
+
+            case Command::CopyBufferToTexture: {
+                CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
+                if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+                    copy->copySize.depthOrArrayLayers == 0) {
+                    // Skip no-op copies.
+                    continue;
+                }
+                Buffer* buffer = ToBackend(copy->source.buffer.Get());
+                Texture* texture = ToBackend(copy->destination.texture.Get());
+
+                DAWN_TRY(buffer->EnsureDataInitialized(commandContext));
+
+                SubresourceRange subresources =
+                    GetSubresourcesAffectedByCopy(copy->destination, copy->copySize);
+
+                if (IsCompleteSubresourceCopiedTo(texture, copy->copySize,
+                                                  copy->destination.mipLevel)) {
+                    texture->SetIsSubresourceContentInitialized(true, subresources);
+                } else {
+                    texture->EnsureSubresourceContentInitialized(commandContext, subresources);
+                }
+
+                buffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopySrc);
+                texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopyDst,
+                                                    subresources);
+
+                RecordBufferTextureCopy(BufferTextureCopyDirection::B2T, commandList, copy->source,
+                                        copy->destination, copy->copySize);
+
+                break;
+            }
+
+            case Command::CopyTextureToBuffer: {
+                CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
+                if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+                    copy->copySize.depthOrArrayLayers == 0) {
+                    // Skip no-op copies.
+                    continue;
+                }
+                Texture* texture = ToBackend(copy->source.texture.Get());
+                Buffer* buffer = ToBackend(copy->destination.buffer.Get());
+
+                DAWN_TRY(buffer->EnsureDataInitializedAsDestination(commandContext, copy));
+
+                SubresourceRange subresources =
+                    GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
+
+                texture->EnsureSubresourceContentInitialized(commandContext, subresources);
+
+                texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopySrc,
+                                                    subresources);
+                buffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
+
+                RecordBufferTextureCopy(BufferTextureCopyDirection::T2B, commandList,
+                                        copy->destination, copy->source, copy->copySize);
+
+                break;
+            }
+
+            case Command::CopyTextureToTexture: {
+                CopyTextureToTextureCmd* copy = mCommands.NextCommand<CopyTextureToTextureCmd>();
+                if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+                    copy->copySize.depthOrArrayLayers == 0) {
+                    // Skip no-op copies.
+                    continue;
+                }
+                Texture* source = ToBackend(copy->source.texture.Get());
+                Texture* destination = ToBackend(copy->destination.texture.Get());
+
+                SubresourceRange srcRange =
+                    GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
+                SubresourceRange dstRange =
+                    GetSubresourcesAffectedByCopy(copy->destination, copy->copySize);
+
+                source->EnsureSubresourceContentInitialized(commandContext, srcRange);
+                if (IsCompleteSubresourceCopiedTo(destination, copy->copySize,
+                                                  copy->destination.mipLevel)) {
+                    destination->SetIsSubresourceContentInitialized(true, dstRange);
+                } else {
+                    destination->EnsureSubresourceContentInitialized(commandContext, dstRange);
+                }
+
+                if (copy->source.texture.Get() == copy->destination.texture.Get() &&
+                    copy->source.mipLevel == copy->destination.mipLevel) {
+                    // When there are overlapped subresources, the layout of the overlapped
+                    // subresources should all be COMMON instead of what we set now. Currently
+                    // it is not allowed to copy with overlapped subresources, but we still
+                    // add the ASSERT here as a reminder for this possible misuse.
+                    ASSERT(!IsRangeOverlapped(copy->source.origin.z, copy->destination.origin.z,
+                                              copy->copySize.depthOrArrayLayers));
+                }
+                source->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopySrc,
+                                                   srcRange);
+                destination->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopyDst,
+                                                        dstRange);
+
+                ASSERT(srcRange.aspects == dstRange.aspects);
+                if (ShouldCopyUsingTemporaryBuffer(GetDevice(), copy->source, copy->destination)) {
+                    DAWN_TRY(RecordCopyTextureWithTemporaryBuffer(
+                        commandContext, copy->source, copy->destination, copy->copySize));
+                    break;
+                }
+
+                if (CanUseCopyResource(copy->source, copy->destination, copy->copySize)) {
+                    commandList->CopyResource(destination->GetD3D12Resource(),
+                                              source->GetD3D12Resource());
+                } else if (source->GetDimension() == wgpu::TextureDimension::e3D &&
+                           destination->GetDimension() == wgpu::TextureDimension::e3D) {
+                    for (Aspect aspect : IterateEnumMask(srcRange.aspects)) {
+                        D3D12_TEXTURE_COPY_LOCATION srcLocation =
+                            ComputeTextureCopyLocationForTexture(source, copy->source.mipLevel, 0,
+                                                                 aspect);
+                        D3D12_TEXTURE_COPY_LOCATION dstLocation =
+                            ComputeTextureCopyLocationForTexture(
+                                destination, copy->destination.mipLevel, 0, aspect);
+
+                        D3D12_BOX sourceRegion =
+                            ComputeD3D12BoxFromOffsetAndSize(copy->source.origin, copy->copySize);
+
+                        commandList->CopyTextureRegion(
+                            &dstLocation, copy->destination.origin.x, copy->destination.origin.y,
+                            copy->destination.origin.z, &srcLocation, &sourceRegion);
+                    }
+                } else {
+                    const dawn::native::Extent3D copyExtentOneSlice = {copy->copySize.width,
+                                                                       copy->copySize.height, 1u};
+
+                    for (Aspect aspect : IterateEnumMask(srcRange.aspects)) {
+                        for (uint32_t z = 0; z < copy->copySize.depthOrArrayLayers; ++z) {
+                            uint32_t sourceLayer = 0;
+                            uint32_t sourceZ = 0;
+                            switch (source->GetDimension()) {
+                                case wgpu::TextureDimension::e1D:
+                                    ASSERT(copy->source.origin.z == 0);
+                                    break;
+                                case wgpu::TextureDimension::e2D:
+                                    sourceLayer = copy->source.origin.z + z;
+                                    break;
+                                case wgpu::TextureDimension::e3D:
+                                    sourceZ = copy->source.origin.z + z;
+                                    break;
+                            }
+
+                            uint32_t destinationLayer = 0;
+                            uint32_t destinationZ = 0;
+                            switch (destination->GetDimension()) {
+                                case wgpu::TextureDimension::e1D:
+                                    ASSERT(copy->destination.origin.z == 0);
+                                    break;
+                                case wgpu::TextureDimension::e2D:
+                                    destinationLayer = copy->destination.origin.z + z;
+                                    break;
+                                case wgpu::TextureDimension::e3D:
+                                    destinationZ = copy->destination.origin.z + z;
+                                    break;
+                            }
+                            D3D12_TEXTURE_COPY_LOCATION srcLocation =
+                                ComputeTextureCopyLocationForTexture(source, copy->source.mipLevel,
+                                                                     sourceLayer, aspect);
+
+                            D3D12_TEXTURE_COPY_LOCATION dstLocation =
+                                ComputeTextureCopyLocationForTexture(destination,
+                                                                     copy->destination.mipLevel,
+                                                                     destinationLayer, aspect);
+
+                            Origin3D sourceOriginInSubresource = copy->source.origin;
+                            sourceOriginInSubresource.z = sourceZ;
+                            D3D12_BOX sourceRegion = ComputeD3D12BoxFromOffsetAndSize(
+                                sourceOriginInSubresource, copyExtentOneSlice);
+
+                            commandList->CopyTextureRegion(&dstLocation, copy->destination.origin.x,
+                                                           copy->destination.origin.y, destinationZ,
+                                                           &srcLocation, &sourceRegion);
+                        }
                     }
                 }
-
-                if (endSlot <= startSlot) {
-                    return;
-                }
-
-                // mD3D12BufferViews is kept up to date with the most recent data passed
-                // to SetVertexBuffer. This makes it correct to only track the start
-                // and end of the dirty range. When Apply is called,
-                // we will at worst set non-dirty vertex buffers in duplicate.
-                commandList->IASetVertexBuffers(static_cast<uint8_t>(startSlot),
-                                                static_cast<uint8_t>(ityp::Sub(endSlot, startSlot)),
-                                                &mD3D12BufferViews[startSlot]);
-
-                mStartSlot = VertexBufferSlot(kMaxVertexBuffers);
-                mEndSlot = VertexBufferSlot(uint8_t(0));
+                break;
             }
 
-          private:
-            // startSlot and endSlot indicate the range of dirty vertex buffers.
-            // If there are multiple calls to SetVertexBuffer, the start and end
-            // represent the union of the dirty ranges (the union may have non-dirty
-            // data in the middle of the range).
-            const RenderPipeline* mLastAppliedRenderPipeline = nullptr;
-            VertexBufferSlot mStartSlot{kMaxVertexBuffers};
-            VertexBufferSlot mEndSlot{uint8_t(0)};
-            ityp::array<VertexBufferSlot, D3D12_VERTEX_BUFFER_VIEW, kMaxVertexBuffers>
-                mD3D12BufferViews = {};
-        };
+            case Command::ClearBuffer: {
+                ClearBufferCmd* cmd = mCommands.NextCommand<ClearBufferCmd>();
+                if (cmd->size == 0) {
+                    // Skip no-op fills.
+                    break;
+                }
+                Buffer* dstBuffer = ToBackend(cmd->buffer.Get());
 
-        void ResolveMultisampledRenderPass(CommandRecordingContext* commandContext,
-                                           BeginRenderPassCmd* renderPass) {
-            ASSERT(renderPass != nullptr);
+                bool clearedToZero;
+                DAWN_TRY_ASSIGN(clearedToZero, dstBuffer->EnsureDataInitializedAsDestination(
+                                                   commandContext, cmd->offset, cmd->size));
 
-            for (ColorAttachmentIndex i :
-                 IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
-                TextureViewBase* resolveTarget =
-                    renderPass->colorAttachments[i].resolveTarget.Get();
-                if (resolveTarget == nullptr) {
+                if (!clearedToZero) {
+                    DAWN_TRY(device->ClearBufferToZero(commandContext, cmd->buffer.Get(),
+                                                       cmd->offset, cmd->size));
+                }
+
+                break;
+            }
+
+            case Command::ResolveQuerySet: {
+                ResolveQuerySetCmd* cmd = mCommands.NextCommand<ResolveQuerySetCmd>();
+                QuerySet* querySet = ToBackend(cmd->querySet.Get());
+                uint32_t firstQuery = cmd->firstQuery;
+                uint32_t queryCount = cmd->queryCount;
+                Buffer* destination = ToBackend(cmd->destination.Get());
+                uint64_t destinationOffset = cmd->destinationOffset;
+
+                bool cleared;
+                DAWN_TRY_ASSIGN(
+                    cleared, destination->EnsureDataInitializedAsDestination(
+                                 commandContext, destinationOffset, queryCount * sizeof(uint64_t)));
+                DAWN_UNUSED(cleared);
+
+                // Resolving unavailable queries is undefined behaviour on D3D12, we only can
+                // resolve the available part of sparse queries. In order to resolve the
+                // unavailables as 0s, we need to clear the resolving region of the destination
+                // buffer to 0s.
+                auto startIt = querySet->GetQueryAvailability().begin() + firstQuery;
+                auto endIt = querySet->GetQueryAvailability().begin() + firstQuery + queryCount;
+                bool hasUnavailableQueries = std::find(startIt, endIt, false) != endIt;
+                if (hasUnavailableQueries) {
+                    DAWN_TRY(device->ClearBufferToZero(commandContext, destination,
+                                                       destinationOffset,
+                                                       queryCount * sizeof(uint64_t)));
+                }
+
+                destination->TrackUsageAndTransitionNow(commandContext,
+                                                        wgpu::BufferUsage::QueryResolve);
+
+                RecordResolveQuerySetCmd(commandList, device, querySet, firstQuery, queryCount,
+                                         destination, destinationOffset);
+
+                break;
+            }
+
+            case Command::WriteTimestamp: {
+                WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+
+                RecordWriteTimestampCmd(commandList, cmd);
+                break;
+            }
+
+            case Command::InsertDebugMarker: {
+                InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
+                const char* label = mCommands.NextData<char>(cmd->length + 1);
+
+                if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+                    // PIX color is 1 byte per channel in ARGB format
+                    constexpr uint64_t kPIXBlackColor = 0xff000000;
+                    ToBackend(GetDevice())
+                        ->GetFunctions()
+                        ->pixSetMarkerOnCommandList(commandList, kPIXBlackColor, label);
+                }
+                break;
+            }
+
+            case Command::PopDebugGroup: {
+                mCommands.NextCommand<PopDebugGroupCmd>();
+
+                if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+                    ToBackend(GetDevice())->GetFunctions()->pixEndEventOnCommandList(commandList);
+                }
+                break;
+            }
+
+            case Command::PushDebugGroup: {
+                PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
+                const char* label = mCommands.NextData<char>(cmd->length + 1);
+
+                if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+                    // PIX color is 1 byte per channel in ARGB format
+                    constexpr uint64_t kPIXBlackColor = 0xff000000;
+                    ToBackend(GetDevice())
+                        ->GetFunctions()
+                        ->pixBeginEventOnCommandList(commandList, kPIXBlackColor, label);
+                }
+                break;
+            }
+
+            case Command::WriteBuffer: {
+                WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
+                const uint64_t offset = write->offset;
+                const uint64_t size = write->size;
+                if (size == 0) {
                     continue;
                 }
 
-                TextureViewBase* colorView = renderPass->colorAttachments[i].view.Get();
-                Texture* colorTexture = ToBackend(colorView->GetTexture());
-                Texture* resolveTexture = ToBackend(resolveTarget->GetTexture());
+                Buffer* dstBuffer = ToBackend(write->buffer.Get());
+                uint8_t* data = mCommands.NextData<uint8_t>(size);
+                Device* device = ToBackend(GetDevice());
 
-                // Transition the usages of the color attachment and resolve target.
-                colorTexture->TrackUsageAndTransitionNow(commandContext,
-                                                         D3D12_RESOURCE_STATE_RESOLVE_SOURCE,
-                                                         colorView->GetSubresourceRange());
-                resolveTexture->TrackUsageAndTransitionNow(commandContext,
-                                                           D3D12_RESOURCE_STATE_RESOLVE_DEST,
-                                                           resolveTarget->GetSubresourceRange());
+                UploadHandle uploadHandle;
+                DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
+                                                  size, device->GetPendingCommandSerial(),
+                                                  kCopyBufferToBufferOffsetAlignment));
+                ASSERT(uploadHandle.mappedBuffer != nullptr);
+                memcpy(uploadHandle.mappedBuffer, data, size);
 
-                // Do MSAA resolve with ResolveSubResource().
-                ID3D12Resource* colorTextureHandle = colorTexture->GetD3D12Resource();
-                ID3D12Resource* resolveTextureHandle = resolveTexture->GetD3D12Resource();
-                const uint32_t resolveTextureSubresourceIndex = resolveTexture->GetSubresourceIndex(
-                    resolveTarget->GetBaseMipLevel(), resolveTarget->GetBaseArrayLayer(),
-                    Aspect::Color);
-                constexpr uint32_t kColorTextureSubresourceIndex = 0;
-                commandContext->GetCommandList()->ResolveSubresource(
-                    resolveTextureHandle, resolveTextureSubresourceIndex, colorTextureHandle,
-                    kColorTextureSubresourceIndex, colorTexture->GetD3D12Format());
+                bool cleared;
+                DAWN_TRY_ASSIGN(cleared, dstBuffer->EnsureDataInitializedAsDestination(
+                                             commandContext, offset, size));
+                DAWN_UNUSED(cleared);
+                dstBuffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
+                commandList->CopyBufferRegion(dstBuffer->GetD3D12Resource(), offset,
+                                              ToBackend(uploadHandle.stagingBuffer)->GetResource(),
+                                              uploadHandle.startOffset, size);
+                break;
             }
+
+            default:
+                UNREACHABLE();
         }
-
-    }  // anonymous namespace
-
-    // static
-    Ref<CommandBuffer> CommandBuffer::Create(CommandEncoder* encoder,
-                                             const CommandBufferDescriptor* descriptor) {
-        return AcquireRef(new CommandBuffer(encoder, descriptor));
     }
 
-    CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
-        : CommandBufferBase(encoder, descriptor) {
+    return {};
+}
+
+MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* commandContext,
+                                            BindGroupStateTracker* bindingTracker,
+                                            const ComputePassResourceUsage& resourceUsages) {
+    uint64_t currentDispatch = 0;
+    ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+
+    Command type;
+    ComputePipeline* lastPipeline = nullptr;
+    while (mCommands.NextCommandId(&type)) {
+        switch (type) {
+            case Command::Dispatch: {
+                DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
+
+                // Skip noop dispatches, it can cause D3D12 warning from validation layers and
+                // leads to device lost.
+                if (dispatch->x == 0 || dispatch->y == 0 || dispatch->z == 0) {
+                    break;
+                }
+
+                TransitionAndClearForSyncScope(commandContext,
+                                               resourceUsages.dispatchUsages[currentDispatch]);
+                DAWN_TRY(bindingTracker->Apply(commandContext));
+
+                RecordNumWorkgroupsForDispatch(commandList, lastPipeline, dispatch);
+                commandList->Dispatch(dispatch->x, dispatch->y, dispatch->z);
+                currentDispatch++;
+                break;
+            }
+
+            case Command::DispatchIndirect: {
+                DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
+
+                TransitionAndClearForSyncScope(commandContext,
+                                               resourceUsages.dispatchUsages[currentDispatch]);
+                DAWN_TRY(bindingTracker->Apply(commandContext));
+
+                ComPtr<ID3D12CommandSignature> signature =
+                    lastPipeline->GetDispatchIndirectCommandSignature();
+                commandList->ExecuteIndirect(
+                    signature.Get(), 1, ToBackend(dispatch->indirectBuffer)->GetD3D12Resource(),
+                    dispatch->indirectOffset, nullptr, 0);
+                currentDispatch++;
+                break;
+            }
+
+            case Command::EndComputePass: {
+                mCommands.NextCommand<EndComputePassCmd>();
+                return {};
+            }
+
+            case Command::SetComputePipeline: {
+                SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
+                ComputePipeline* pipeline = ToBackend(cmd->pipeline).Get();
+
+                commandList->SetPipelineState(pipeline->GetPipelineState());
+
+                bindingTracker->OnSetPipeline(pipeline);
+                lastPipeline = pipeline;
+                break;
+            }
+
+            case Command::SetBindGroup: {
+                SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
+                BindGroup* group = ToBackend(cmd->group.Get());
+                uint32_t* dynamicOffsets = nullptr;
+
+                if (cmd->dynamicOffsetCount > 0) {
+                    dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
+                }
+
+                bindingTracker->OnSetBindGroup(cmd->index, group, cmd->dynamicOffsetCount,
+                                               dynamicOffsets);
+                break;
+            }
+
+            case Command::InsertDebugMarker: {
+                InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
+                const char* label = mCommands.NextData<char>(cmd->length + 1);
+
+                if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+                    // PIX color is 1 byte per channel in ARGB format
+                    constexpr uint64_t kPIXBlackColor = 0xff000000;
+                    ToBackend(GetDevice())
+                        ->GetFunctions()
+                        ->pixSetMarkerOnCommandList(commandList, kPIXBlackColor, label);
+                }
+                break;
+            }
+
+            case Command::PopDebugGroup: {
+                mCommands.NextCommand<PopDebugGroupCmd>();
+
+                if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+                    ToBackend(GetDevice())->GetFunctions()->pixEndEventOnCommandList(commandList);
+                }
+                break;
+            }
+
+            case Command::PushDebugGroup: {
+                PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
+                const char* label = mCommands.NextData<char>(cmd->length + 1);
+
+                if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+                    // PIX color is 1 byte per channel in ARGB format
+                    constexpr uint64_t kPIXBlackColor = 0xff000000;
+                    ToBackend(GetDevice())
+                        ->GetFunctions()
+                        ->pixBeginEventOnCommandList(commandList, kPIXBlackColor, label);
+                }
+                break;
+            }
+
+            case Command::WriteTimestamp: {
+                WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+
+                RecordWriteTimestampCmd(commandList, cmd);
+                break;
+            }
+
+            default:
+                UNREACHABLE();
+        }
     }
 
-    MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* commandContext) {
-        Device* device = ToBackend(GetDevice());
-        BindGroupStateTracker bindingTracker(device);
+    return {};
+}
 
-        ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+MaybeError CommandBuffer::SetupRenderPass(CommandRecordingContext* commandContext,
+                                          BeginRenderPassCmd* renderPass,
+                                          RenderPassBuilder* renderPassBuilder) {
+    Device* device = ToBackend(GetDevice());
 
-        // Make sure we use the correct descriptors for this command list. Could be done once per
-        // actual command list but here is ok because there should be few command buffers.
-        bindingTracker.SetID3D12DescriptorHeaps(commandList);
+    CPUDescriptorHeapAllocation nullRTVAllocation;
+    D3D12_CPU_DESCRIPTOR_HANDLE nullRTV;
 
-        size_t nextComputePassNumber = 0;
-        size_t nextRenderPassNumber = 0;
+    const auto& colorAttachmentsMaskBitSet = renderPass->attachmentState->GetColorAttachmentsMask();
+    for (ColorAttachmentIndex i(uint8_t(0)); i < ColorAttachmentIndex(kMaxColorAttachments); i++) {
+        if (colorAttachmentsMaskBitSet.test(i)) {
+            RenderPassColorAttachmentInfo& attachmentInfo = renderPass->colorAttachments[i];
+            TextureView* view = ToBackend(attachmentInfo.view.Get());
 
-        Command type;
-        while (mCommands.NextCommandId(&type)) {
-            switch (type) {
-                case Command::BeginComputePass: {
-                    mCommands.NextCommand<BeginComputePassCmd>();
-
-                    bindingTracker.SetInComputePass(true);
-                    DAWN_TRY(RecordComputePass(
-                        commandContext, &bindingTracker,
-                        GetResourceUsages().computePasses[nextComputePassNumber]));
-
-                    nextComputePassNumber++;
-                    break;
-                }
-
-                case Command::BeginRenderPass: {
-                    BeginRenderPassCmd* beginRenderPassCmd =
-                        mCommands.NextCommand<BeginRenderPassCmd>();
-
-                    const bool passHasUAV = TransitionAndClearForSyncScope(
-                        commandContext, GetResourceUsages().renderPasses[nextRenderPassNumber]);
-                    bindingTracker.SetInComputePass(false);
-
-                    LazyClearRenderPassAttachments(beginRenderPassCmd);
-                    DAWN_TRY(RecordRenderPass(commandContext, &bindingTracker, beginRenderPassCmd,
-                                              passHasUAV));
-
-                    nextRenderPassNumber++;
-                    break;
-                }
-
-                case Command::CopyBufferToBuffer: {
-                    CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
-                    if (copy->size == 0) {
-                        // Skip no-op copies.
-                        break;
-                    }
-                    Buffer* srcBuffer = ToBackend(copy->source.Get());
-                    Buffer* dstBuffer = ToBackend(copy->destination.Get());
-
-                    DAWN_TRY(srcBuffer->EnsureDataInitialized(commandContext));
-                    bool cleared;
-                    DAWN_TRY_ASSIGN(cleared,
-                                    dstBuffer->EnsureDataInitializedAsDestination(
-                                        commandContext, copy->destinationOffset, copy->size));
-                    DAWN_UNUSED(cleared);
-
-                    srcBuffer->TrackUsageAndTransitionNow(commandContext,
-                                                          wgpu::BufferUsage::CopySrc);
-                    dstBuffer->TrackUsageAndTransitionNow(commandContext,
-                                                          wgpu::BufferUsage::CopyDst);
-
-                    commandList->CopyBufferRegion(
-                        dstBuffer->GetD3D12Resource(), copy->destinationOffset,
-                        srcBuffer->GetD3D12Resource(), copy->sourceOffset, copy->size);
-                    break;
-                }
-
-                case Command::CopyBufferToTexture: {
-                    CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
-                    if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
-                        copy->copySize.depthOrArrayLayers == 0) {
-                        // Skip no-op copies.
-                        continue;
-                    }
-                    Buffer* buffer = ToBackend(copy->source.buffer.Get());
-                    Texture* texture = ToBackend(copy->destination.texture.Get());
-
-                    DAWN_TRY(buffer->EnsureDataInitialized(commandContext));
-
-                    SubresourceRange subresources =
-                        GetSubresourcesAffectedByCopy(copy->destination, copy->copySize);
-
-                    if (IsCompleteSubresourceCopiedTo(texture, copy->copySize,
-                                                      copy->destination.mipLevel)) {
-                        texture->SetIsSubresourceContentInitialized(true, subresources);
-                    } else {
-                        texture->EnsureSubresourceContentInitialized(commandContext, subresources);
-                    }
-
-                    buffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopySrc);
-                    texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopyDst,
-                                                        subresources);
-
-                    RecordBufferTextureCopy(BufferTextureCopyDirection::B2T, commandList,
-                                            copy->source, copy->destination, copy->copySize);
-
-                    break;
-                }
-
-                case Command::CopyTextureToBuffer: {
-                    CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
-                    if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
-                        copy->copySize.depthOrArrayLayers == 0) {
-                        // Skip no-op copies.
-                        continue;
-                    }
-                    Texture* texture = ToBackend(copy->source.texture.Get());
-                    Buffer* buffer = ToBackend(copy->destination.buffer.Get());
-
-                    DAWN_TRY(buffer->EnsureDataInitializedAsDestination(commandContext, copy));
-
-                    SubresourceRange subresources =
-                        GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
-
-                    texture->EnsureSubresourceContentInitialized(commandContext, subresources);
-
-                    texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopySrc,
-                                                        subresources);
-                    buffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
-
-                    RecordBufferTextureCopy(BufferTextureCopyDirection::T2B, commandList,
-                                            copy->destination, copy->source, copy->copySize);
-
-                    break;
-                }
-
-                case Command::CopyTextureToTexture: {
-                    CopyTextureToTextureCmd* copy =
-                        mCommands.NextCommand<CopyTextureToTextureCmd>();
-                    if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
-                        copy->copySize.depthOrArrayLayers == 0) {
-                        // Skip no-op copies.
-                        continue;
-                    }
-                    Texture* source = ToBackend(copy->source.texture.Get());
-                    Texture* destination = ToBackend(copy->destination.texture.Get());
-
-                    SubresourceRange srcRange =
-                        GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
-                    SubresourceRange dstRange =
-                        GetSubresourcesAffectedByCopy(copy->destination, copy->copySize);
-
-                    source->EnsureSubresourceContentInitialized(commandContext, srcRange);
-                    if (IsCompleteSubresourceCopiedTo(destination, copy->copySize,
-                                                      copy->destination.mipLevel)) {
-                        destination->SetIsSubresourceContentInitialized(true, dstRange);
-                    } else {
-                        destination->EnsureSubresourceContentInitialized(commandContext, dstRange);
-                    }
-
-                    if (copy->source.texture.Get() == copy->destination.texture.Get() &&
-                        copy->source.mipLevel == copy->destination.mipLevel) {
-                        // When there are overlapped subresources, the layout of the overlapped
-                        // subresources should all be COMMON instead of what we set now. Currently
-                        // it is not allowed to copy with overlapped subresources, but we still
-                        // add the ASSERT here as a reminder for this possible misuse.
-                        ASSERT(!IsRangeOverlapped(copy->source.origin.z, copy->destination.origin.z,
-                                                  copy->copySize.depthOrArrayLayers));
-                    }
-                    source->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopySrc,
-                                                       srcRange);
-                    destination->TrackUsageAndTransitionNow(commandContext,
-                                                            wgpu::TextureUsage::CopyDst, dstRange);
-
-                    ASSERT(srcRange.aspects == dstRange.aspects);
-                    if (ShouldCopyUsingTemporaryBuffer(GetDevice(), copy->source,
-                                                       copy->destination)) {
-                        DAWN_TRY(RecordCopyTextureWithTemporaryBuffer(
-                            commandContext, copy->source, copy->destination, copy->copySize));
-                        break;
-                    }
-
-                    if (CanUseCopyResource(copy->source, copy->destination, copy->copySize)) {
-                        commandList->CopyResource(destination->GetD3D12Resource(),
-                                                  source->GetD3D12Resource());
-                    } else if (source->GetDimension() == wgpu::TextureDimension::e3D &&
-                               destination->GetDimension() == wgpu::TextureDimension::e3D) {
-                        for (Aspect aspect : IterateEnumMask(srcRange.aspects)) {
-                            D3D12_TEXTURE_COPY_LOCATION srcLocation =
-                                ComputeTextureCopyLocationForTexture(source, copy->source.mipLevel,
-                                                                     0, aspect);
-                            D3D12_TEXTURE_COPY_LOCATION dstLocation =
-                                ComputeTextureCopyLocationForTexture(
-                                    destination, copy->destination.mipLevel, 0, aspect);
-
-                            D3D12_BOX sourceRegion = ComputeD3D12BoxFromOffsetAndSize(
-                                copy->source.origin, copy->copySize);
-
-                            commandList->CopyTextureRegion(&dstLocation, copy->destination.origin.x,
-                                                           copy->destination.origin.y,
-                                                           copy->destination.origin.z, &srcLocation,
-                                                           &sourceRegion);
-                        }
-                    } else {
-                        const dawn::native::Extent3D copyExtentOneSlice = {
-                            copy->copySize.width, copy->copySize.height, 1u};
-
-                        for (Aspect aspect : IterateEnumMask(srcRange.aspects)) {
-                            for (uint32_t z = 0; z < copy->copySize.depthOrArrayLayers; ++z) {
-                                uint32_t sourceLayer = 0;
-                                uint32_t sourceZ = 0;
-                                switch (source->GetDimension()) {
-                                    case wgpu::TextureDimension::e1D:
-                                        ASSERT(copy->source.origin.z == 0);
-                                        break;
-                                    case wgpu::TextureDimension::e2D:
-                                        sourceLayer = copy->source.origin.z + z;
-                                        break;
-                                    case wgpu::TextureDimension::e3D:
-                                        sourceZ = copy->source.origin.z + z;
-                                        break;
-                                }
-
-                                uint32_t destinationLayer = 0;
-                                uint32_t destinationZ = 0;
-                                switch (destination->GetDimension()) {
-                                    case wgpu::TextureDimension::e1D:
-                                        ASSERT(copy->destination.origin.z == 0);
-                                        break;
-                                    case wgpu::TextureDimension::e2D:
-                                        destinationLayer = copy->destination.origin.z + z;
-                                        break;
-                                    case wgpu::TextureDimension::e3D:
-                                        destinationZ = copy->destination.origin.z + z;
-                                        break;
-                                }
-                                D3D12_TEXTURE_COPY_LOCATION srcLocation =
-                                    ComputeTextureCopyLocationForTexture(
-                                        source, copy->source.mipLevel, sourceLayer, aspect);
-
-                                D3D12_TEXTURE_COPY_LOCATION dstLocation =
-                                    ComputeTextureCopyLocationForTexture(destination,
-                                                                         copy->destination.mipLevel,
-                                                                         destinationLayer, aspect);
-
-                                Origin3D sourceOriginInSubresource = copy->source.origin;
-                                sourceOriginInSubresource.z = sourceZ;
-                                D3D12_BOX sourceRegion = ComputeD3D12BoxFromOffsetAndSize(
-                                    sourceOriginInSubresource, copyExtentOneSlice);
-
-                                commandList->CopyTextureRegion(
-                                    &dstLocation, copy->destination.origin.x,
-                                    copy->destination.origin.y, destinationZ, &srcLocation,
-                                    &sourceRegion);
-                            }
-                        }
-                    }
-                    break;
-                }
-
-                case Command::ClearBuffer: {
-                    ClearBufferCmd* cmd = mCommands.NextCommand<ClearBufferCmd>();
-                    if (cmd->size == 0) {
-                        // Skip no-op fills.
-                        break;
-                    }
-                    Buffer* dstBuffer = ToBackend(cmd->buffer.Get());
-
-                    bool clearedToZero;
-                    DAWN_TRY_ASSIGN(clearedToZero, dstBuffer->EnsureDataInitializedAsDestination(
-                                                       commandContext, cmd->offset, cmd->size));
-
-                    if (!clearedToZero) {
-                        DAWN_TRY(device->ClearBufferToZero(commandContext, cmd->buffer.Get(),
-                                                           cmd->offset, cmd->size));
-                    }
-
-                    break;
-                }
-
-                case Command::ResolveQuerySet: {
-                    ResolveQuerySetCmd* cmd = mCommands.NextCommand<ResolveQuerySetCmd>();
-                    QuerySet* querySet = ToBackend(cmd->querySet.Get());
-                    uint32_t firstQuery = cmd->firstQuery;
-                    uint32_t queryCount = cmd->queryCount;
-                    Buffer* destination = ToBackend(cmd->destination.Get());
-                    uint64_t destinationOffset = cmd->destinationOffset;
-
-                    bool cleared;
-                    DAWN_TRY_ASSIGN(cleared, destination->EnsureDataInitializedAsDestination(
-                                                 commandContext, destinationOffset,
-                                                 queryCount * sizeof(uint64_t)));
-                    DAWN_UNUSED(cleared);
-
-                    // Resolving unavailable queries is undefined behaviour on D3D12, we only can
-                    // resolve the available part of sparse queries. In order to resolve the
-                    // unavailables as 0s, we need to clear the resolving region of the destination
-                    // buffer to 0s.
-                    auto startIt = querySet->GetQueryAvailability().begin() + firstQuery;
-                    auto endIt = querySet->GetQueryAvailability().begin() + firstQuery + queryCount;
-                    bool hasUnavailableQueries = std::find(startIt, endIt, false) != endIt;
-                    if (hasUnavailableQueries) {
-                        DAWN_TRY(device->ClearBufferToZero(commandContext, destination,
-                                                           destinationOffset,
-                                                           queryCount * sizeof(uint64_t)));
-                    }
-
-                    destination->TrackUsageAndTransitionNow(commandContext,
-                                                            wgpu::BufferUsage::QueryResolve);
-
-                    RecordResolveQuerySetCmd(commandList, device, querySet, firstQuery, queryCount,
-                                             destination, destinationOffset);
-
-                    break;
-                }
-
-                case Command::WriteTimestamp: {
-                    WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
-
-                    RecordWriteTimestampCmd(commandList, cmd);
-                    break;
-                }
-
-                case Command::InsertDebugMarker: {
-                    InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
-                    const char* label = mCommands.NextData<char>(cmd->length + 1);
-
-                    if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
-                        // PIX color is 1 byte per channel in ARGB format
-                        constexpr uint64_t kPIXBlackColor = 0xff000000;
-                        ToBackend(GetDevice())
-                            ->GetFunctions()
-                            ->pixSetMarkerOnCommandList(commandList, kPIXBlackColor, label);
-                    }
-                    break;
-                }
-
-                case Command::PopDebugGroup: {
-                    mCommands.NextCommand<PopDebugGroupCmd>();
-
-                    if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
-                        ToBackend(GetDevice())
-                            ->GetFunctions()
-                            ->pixEndEventOnCommandList(commandList);
-                    }
-                    break;
-                }
-
-                case Command::PushDebugGroup: {
-                    PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
-                    const char* label = mCommands.NextData<char>(cmd->length + 1);
-
-                    if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
-                        // PIX color is 1 byte per channel in ARGB format
-                        constexpr uint64_t kPIXBlackColor = 0xff000000;
-                        ToBackend(GetDevice())
-                            ->GetFunctions()
-                            ->pixBeginEventOnCommandList(commandList, kPIXBlackColor, label);
-                    }
-                    break;
-                }
-
-                case Command::WriteBuffer: {
-                    WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
-                    const uint64_t offset = write->offset;
-                    const uint64_t size = write->size;
-                    if (size == 0) {
-                        continue;
-                    }
-
-                    Buffer* dstBuffer = ToBackend(write->buffer.Get());
-                    uint8_t* data = mCommands.NextData<uint8_t>(size);
-                    Device* device = ToBackend(GetDevice());
-
-                    UploadHandle uploadHandle;
-                    DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
-                                                      size, device->GetPendingCommandSerial(),
-                                                      kCopyBufferToBufferOffsetAlignment));
-                    ASSERT(uploadHandle.mappedBuffer != nullptr);
-                    memcpy(uploadHandle.mappedBuffer, data, size);
-
-                    bool cleared;
-                    DAWN_TRY_ASSIGN(cleared, dstBuffer->EnsureDataInitializedAsDestination(
-                                                 commandContext, offset, size));
-                    DAWN_UNUSED(cleared);
-                    dstBuffer->TrackUsageAndTransitionNow(commandContext,
-                                                          wgpu::BufferUsage::CopyDst);
-                    commandList->CopyBufferRegion(
-                        dstBuffer->GetD3D12Resource(), offset,
-                        ToBackend(uploadHandle.stagingBuffer)->GetResource(),
-                        uploadHandle.startOffset, size);
-                    break;
-                }
-
-                default:
-                    UNREACHABLE();
-            }
-        }
-
-        return {};
-    }
-
-    MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* commandContext,
-                                                BindGroupStateTracker* bindingTracker,
-                                                const ComputePassResourceUsage& resourceUsages) {
-        uint64_t currentDispatch = 0;
-        ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
-
-        Command type;
-        ComputePipeline* lastPipeline = nullptr;
-        while (mCommands.NextCommandId(&type)) {
-            switch (type) {
-                case Command::Dispatch: {
-                    DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
-
-                    // Skip noop dispatches, it can cause D3D12 warning from validation layers and
-                    // leads to device lost.
-                    if (dispatch->x == 0 || dispatch->y == 0 || dispatch->z == 0) {
-                        break;
-                    }
-
-                    TransitionAndClearForSyncScope(commandContext,
-                                                   resourceUsages.dispatchUsages[currentDispatch]);
-                    DAWN_TRY(bindingTracker->Apply(commandContext));
-
-                    RecordNumWorkgroupsForDispatch(commandList, lastPipeline, dispatch);
-                    commandList->Dispatch(dispatch->x, dispatch->y, dispatch->z);
-                    currentDispatch++;
-                    break;
-                }
-
-                case Command::DispatchIndirect: {
-                    DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
-
-                    TransitionAndClearForSyncScope(commandContext,
-                                                   resourceUsages.dispatchUsages[currentDispatch]);
-                    DAWN_TRY(bindingTracker->Apply(commandContext));
-
-                    ComPtr<ID3D12CommandSignature> signature =
-                        lastPipeline->GetDispatchIndirectCommandSignature();
-                    commandList->ExecuteIndirect(
-                        signature.Get(), 1, ToBackend(dispatch->indirectBuffer)->GetD3D12Resource(),
-                        dispatch->indirectOffset, nullptr, 0);
-                    currentDispatch++;
-                    break;
-                }
-
-                case Command::EndComputePass: {
-                    mCommands.NextCommand<EndComputePassCmd>();
-                    return {};
-                }
-
-                case Command::SetComputePipeline: {
-                    SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
-                    ComputePipeline* pipeline = ToBackend(cmd->pipeline).Get();
-
-                    commandList->SetPipelineState(pipeline->GetPipelineState());
-
-                    bindingTracker->OnSetPipeline(pipeline);
-                    lastPipeline = pipeline;
-                    break;
-                }
-
-                case Command::SetBindGroup: {
-                    SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
-                    BindGroup* group = ToBackend(cmd->group.Get());
-                    uint32_t* dynamicOffsets = nullptr;
-
-                    if (cmd->dynamicOffsetCount > 0) {
-                        dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
-                    }
-
-                    bindingTracker->OnSetBindGroup(cmd->index, group, cmd->dynamicOffsetCount,
-                                                   dynamicOffsets);
-                    break;
-                }
-
-                case Command::InsertDebugMarker: {
-                    InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
-                    const char* label = mCommands.NextData<char>(cmd->length + 1);
-
-                    if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
-                        // PIX color is 1 byte per channel in ARGB format
-                        constexpr uint64_t kPIXBlackColor = 0xff000000;
-                        ToBackend(GetDevice())
-                            ->GetFunctions()
-                            ->pixSetMarkerOnCommandList(commandList, kPIXBlackColor, label);
-                    }
-                    break;
-                }
-
-                case Command::PopDebugGroup: {
-                    mCommands.NextCommand<PopDebugGroupCmd>();
-
-                    if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
-                        ToBackend(GetDevice())
-                            ->GetFunctions()
-                            ->pixEndEventOnCommandList(commandList);
-                    }
-                    break;
-                }
-
-                case Command::PushDebugGroup: {
-                    PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
-                    const char* label = mCommands.NextData<char>(cmd->length + 1);
-
-                    if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
-                        // PIX color is 1 byte per channel in ARGB format
-                        constexpr uint64_t kPIXBlackColor = 0xff000000;
-                        ToBackend(GetDevice())
-                            ->GetFunctions()
-                            ->pixBeginEventOnCommandList(commandList, kPIXBlackColor, label);
-                    }
-                    break;
-                }
-
-                case Command::WriteTimestamp: {
-                    WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
-
-                    RecordWriteTimestampCmd(commandList, cmd);
-                    break;
-                }
-
-                default:
-                    UNREACHABLE();
-            }
-        }
-
-        return {};
-    }
-
-    MaybeError CommandBuffer::SetupRenderPass(CommandRecordingContext* commandContext,
-                                              BeginRenderPassCmd* renderPass,
-                                              RenderPassBuilder* renderPassBuilder) {
-        Device* device = ToBackend(GetDevice());
-
-        CPUDescriptorHeapAllocation nullRTVAllocation;
-        D3D12_CPU_DESCRIPTOR_HANDLE nullRTV;
-
-        const auto& colorAttachmentsMaskBitSet =
-            renderPass->attachmentState->GetColorAttachmentsMask();
-        for (ColorAttachmentIndex i(uint8_t(0)); i < ColorAttachmentIndex(kMaxColorAttachments);
-             i++) {
-            if (colorAttachmentsMaskBitSet.test(i)) {
-                RenderPassColorAttachmentInfo& attachmentInfo = renderPass->colorAttachments[i];
-                TextureView* view = ToBackend(attachmentInfo.view.Get());
-
-                // Set view attachment.
-                CPUDescriptorHeapAllocation rtvAllocation;
-                DAWN_TRY_ASSIGN(
-                    rtvAllocation,
-                    device->GetRenderTargetViewAllocator()->AllocateTransientCPUDescriptors());
-
-                const D3D12_RENDER_TARGET_VIEW_DESC viewDesc = view->GetRTVDescriptor();
-                const D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor =
-                    rtvAllocation.GetBaseDescriptor();
-
-                device->GetD3D12Device()->CreateRenderTargetView(
-                    ToBackend(view->GetTexture())->GetD3D12Resource(), &viewDesc, baseDescriptor);
-
-                renderPassBuilder->SetRenderTargetView(i, baseDescriptor, false);
-
-                // Set color load operation.
-                renderPassBuilder->SetRenderTargetBeginningAccess(
-                    i, attachmentInfo.loadOp, attachmentInfo.clearColor, view->GetD3D12Format());
-
-                // Set color store operation.
-                if (attachmentInfo.resolveTarget != nullptr) {
-                    TextureView* resolveDestinationView =
-                        ToBackend(attachmentInfo.resolveTarget.Get());
-                    Texture* resolveDestinationTexture =
-                        ToBackend(resolveDestinationView->GetTexture());
-
-                    resolveDestinationTexture->TrackUsageAndTransitionNow(
-                        commandContext, D3D12_RESOURCE_STATE_RESOLVE_DEST,
-                        resolveDestinationView->GetSubresourceRange());
-
-                    renderPassBuilder->SetRenderTargetEndingAccessResolve(
-                        i, attachmentInfo.storeOp, view, resolveDestinationView);
-                } else {
-                    renderPassBuilder->SetRenderTargetEndingAccess(i, attachmentInfo.storeOp);
-                }
-            } else {
-                if (!nullRTVAllocation.IsValid()) {
-                    DAWN_TRY_ASSIGN(
-                        nullRTVAllocation,
-                        device->GetRenderTargetViewAllocator()->AllocateTransientCPUDescriptors());
-                    nullRTV = nullRTVAllocation.GetBaseDescriptor();
-                    D3D12_RENDER_TARGET_VIEW_DESC nullRTVDesc;
-                    nullRTVDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
-                    nullRTVDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2D;
-                    nullRTVDesc.Texture2D.MipSlice = 0;
-                    nullRTVDesc.Texture2D.PlaneSlice = 0;
-                    device->GetD3D12Device()->CreateRenderTargetView(nullptr, &nullRTVDesc,
-                                                                     nullRTV);
-                }
-
-                renderPassBuilder->SetRenderTargetView(i, nullRTV, true);
-            }
-        }
-
-        if (renderPass->attachmentState->HasDepthStencilAttachment()) {
-            RenderPassDepthStencilAttachmentInfo& attachmentInfo =
-                renderPass->depthStencilAttachment;
-            TextureView* view = ToBackend(renderPass->depthStencilAttachment.view.Get());
-
-            // Set depth attachment.
-            CPUDescriptorHeapAllocation dsvAllocation;
+            // Set view attachment.
+            CPUDescriptorHeapAllocation rtvAllocation;
             DAWN_TRY_ASSIGN(
-                dsvAllocation,
-                device->GetDepthStencilViewAllocator()->AllocateTransientCPUDescriptors());
+                rtvAllocation,
+                device->GetRenderTargetViewAllocator()->AllocateTransientCPUDescriptors());
 
-            const D3D12_DEPTH_STENCIL_VIEW_DESC viewDesc = view->GetDSVDescriptor(
-                attachmentInfo.depthReadOnly, attachmentInfo.stencilReadOnly);
-            const D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor = dsvAllocation.GetBaseDescriptor();
+            const D3D12_RENDER_TARGET_VIEW_DESC viewDesc = view->GetRTVDescriptor();
+            const D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor = rtvAllocation.GetBaseDescriptor();
 
-            device->GetD3D12Device()->CreateDepthStencilView(
+            device->GetD3D12Device()->CreateRenderTargetView(
                 ToBackend(view->GetTexture())->GetD3D12Resource(), &viewDesc, baseDescriptor);
 
-            renderPassBuilder->SetDepthStencilView(baseDescriptor);
+            renderPassBuilder->SetRenderTargetView(i, baseDescriptor, false);
 
-            const bool hasDepth = view->GetTexture()->GetFormat().HasDepth();
-            const bool hasStencil = view->GetTexture()->GetFormat().HasStencil();
+            // Set color load operation.
+            renderPassBuilder->SetRenderTargetBeginningAccess(
+                i, attachmentInfo.loadOp, attachmentInfo.clearColor, view->GetD3D12Format());
 
-            // Set depth/stencil load operations.
-            if (hasDepth) {
-                renderPassBuilder->SetDepthAccess(
-                    attachmentInfo.depthLoadOp, attachmentInfo.depthStoreOp,
-                    attachmentInfo.clearDepth, view->GetD3D12Format());
+            // Set color store operation.
+            if (attachmentInfo.resolveTarget != nullptr) {
+                TextureView* resolveDestinationView = ToBackend(attachmentInfo.resolveTarget.Get());
+                Texture* resolveDestinationTexture =
+                    ToBackend(resolveDestinationView->GetTexture());
+
+                resolveDestinationTexture->TrackUsageAndTransitionNow(
+                    commandContext, D3D12_RESOURCE_STATE_RESOLVE_DEST,
+                    resolveDestinationView->GetSubresourceRange());
+
+                renderPassBuilder->SetRenderTargetEndingAccessResolve(i, attachmentInfo.storeOp,
+                                                                      view, resolveDestinationView);
             } else {
-                renderPassBuilder->SetDepthNoAccess();
+                renderPassBuilder->SetRenderTargetEndingAccess(i, attachmentInfo.storeOp);
             }
-
-            if (hasStencil) {
-                renderPassBuilder->SetStencilAccess(
-                    attachmentInfo.stencilLoadOp, attachmentInfo.stencilStoreOp,
-                    attachmentInfo.clearStencil, view->GetD3D12Format());
-            } else {
-                renderPassBuilder->SetStencilNoAccess();
-            }
-
         } else {
-            renderPassBuilder->SetDepthStencilNoAccess();
-        }
+            if (!nullRTVAllocation.IsValid()) {
+                DAWN_TRY_ASSIGN(
+                    nullRTVAllocation,
+                    device->GetRenderTargetViewAllocator()->AllocateTransientCPUDescriptors());
+                nullRTV = nullRTVAllocation.GetBaseDescriptor();
+                D3D12_RENDER_TARGET_VIEW_DESC nullRTVDesc;
+                nullRTVDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
+                nullRTVDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2D;
+                nullRTVDesc.Texture2D.MipSlice = 0;
+                nullRTVDesc.Texture2D.PlaneSlice = 0;
+                device->GetD3D12Device()->CreateRenderTargetView(nullptr, &nullRTVDesc, nullRTV);
+            }
 
-        return {};
+            renderPassBuilder->SetRenderTargetView(i, nullRTV, true);
+        }
     }
 
-    void CommandBuffer::EmulateBeginRenderPass(CommandRecordingContext* commandContext,
-                                               const RenderPassBuilder* renderPassBuilder) const {
-        ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+    if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+        RenderPassDepthStencilAttachmentInfo& attachmentInfo = renderPass->depthStencilAttachment;
+        TextureView* view = ToBackend(renderPass->depthStencilAttachment.view.Get());
 
-        // Clear framebuffer attachments as needed.
-        {
-            for (const auto& attachment :
-                 renderPassBuilder->GetRenderPassRenderTargetDescriptors()) {
-                // Load op - color
-                if (attachment.cpuDescriptor.ptr != 0 &&
-                    attachment.BeginningAccess.Type ==
-                        D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR) {
-                    commandList->ClearRenderTargetView(
-                        attachment.cpuDescriptor, attachment.BeginningAccess.Clear.ClearValue.Color,
-                        0, nullptr);
-                }
-            }
+        // Set depth attachment.
+        CPUDescriptorHeapAllocation dsvAllocation;
+        DAWN_TRY_ASSIGN(dsvAllocation,
+                        device->GetDepthStencilViewAllocator()->AllocateTransientCPUDescriptors());
 
-            if (renderPassBuilder->HasDepthOrStencil()) {
-                D3D12_CLEAR_FLAGS clearFlags = {};
-                float depthClear = 0.0f;
-                uint8_t stencilClear = 0u;
+        const D3D12_DEPTH_STENCIL_VIEW_DESC viewDesc =
+            view->GetDSVDescriptor(attachmentInfo.depthReadOnly, attachmentInfo.stencilReadOnly);
+        const D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor = dsvAllocation.GetBaseDescriptor();
 
-                if (renderPassBuilder->GetRenderPassDepthStencilDescriptor()
-                        ->DepthBeginningAccess.Type ==
-                    D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR) {
-                    clearFlags |= D3D12_CLEAR_FLAG_DEPTH;
-                    depthClear = renderPassBuilder->GetRenderPassDepthStencilDescriptor()
-                                     ->DepthBeginningAccess.Clear.ClearValue.DepthStencil.Depth;
-                }
-                if (renderPassBuilder->GetRenderPassDepthStencilDescriptor()
-                        ->StencilBeginningAccess.Type ==
-                    D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR) {
-                    clearFlags |= D3D12_CLEAR_FLAG_STENCIL;
-                    stencilClear =
-                        renderPassBuilder->GetRenderPassDepthStencilDescriptor()
-                            ->StencilBeginningAccess.Clear.ClearValue.DepthStencil.Stencil;
-                }
+        device->GetD3D12Device()->CreateDepthStencilView(
+            ToBackend(view->GetTexture())->GetD3D12Resource(), &viewDesc, baseDescriptor);
 
-                if (clearFlags) {
-                    commandList->ClearDepthStencilView(
-                        renderPassBuilder->GetRenderPassDepthStencilDescriptor()->cpuDescriptor,
-                        clearFlags, depthClear, stencilClear, 0, nullptr);
-                }
-            }
+        renderPassBuilder->SetDepthStencilView(baseDescriptor);
+
+        const bool hasDepth = view->GetTexture()->GetFormat().HasDepth();
+        const bool hasStencil = view->GetTexture()->GetFormat().HasStencil();
+
+        // Set depth/stencil load operations.
+        if (hasDepth) {
+            renderPassBuilder->SetDepthAccess(attachmentInfo.depthLoadOp,
+                                              attachmentInfo.depthStoreOp,
+                                              attachmentInfo.clearDepth, view->GetD3D12Format());
+        } else {
+            renderPassBuilder->SetDepthNoAccess();
         }
 
-        commandList->OMSetRenderTargets(
-            static_cast<uint8_t>(renderPassBuilder->GetHighestColorAttachmentIndexPlusOne()),
-            renderPassBuilder->GetRenderTargetViews(), FALSE,
-            renderPassBuilder->HasDepthOrStencil()
-                ? &renderPassBuilder->GetRenderPassDepthStencilDescriptor()->cpuDescriptor
-                : nullptr);
+        if (hasStencil) {
+            renderPassBuilder->SetStencilAccess(
+                attachmentInfo.stencilLoadOp, attachmentInfo.stencilStoreOp,
+                attachmentInfo.clearStencil, view->GetD3D12Format());
+        } else {
+            renderPassBuilder->SetStencilNoAccess();
+        }
+
+    } else {
+        renderPassBuilder->SetDepthStencilNoAccess();
     }
 
-    MaybeError CommandBuffer::RecordRenderPass(CommandRecordingContext* commandContext,
-                                               BindGroupStateTracker* bindingTracker,
-                                               BeginRenderPassCmd* renderPass,
-                                               const bool passHasUAV) {
-        Device* device = ToBackend(GetDevice());
-        const bool useRenderPass = device->IsToggleEnabled(Toggle::UseD3D12RenderPass);
+    return {};
+}
 
-        // renderPassBuilder must be scoped to RecordRenderPass because any underlying
-        // D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS structs must remain
-        // valid until after EndRenderPass() has been called.
-        RenderPassBuilder renderPassBuilder(passHasUAV);
+void CommandBuffer::EmulateBeginRenderPass(CommandRecordingContext* commandContext,
+                                           const RenderPassBuilder* renderPassBuilder) const {
+    ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
 
-        DAWN_TRY(SetupRenderPass(commandContext, renderPass, &renderPassBuilder));
-
-        // Use D3D12's native render pass API if it's available, otherwise emulate the
-        // beginning and ending access operations.
-        if (useRenderPass) {
-            commandContext->GetCommandList4()->BeginRenderPass(
-                static_cast<uint8_t>(renderPassBuilder.GetHighestColorAttachmentIndexPlusOne()),
-                renderPassBuilder.GetRenderPassRenderTargetDescriptors().data(),
-                renderPassBuilder.HasDepthOrStencil()
-                    ? renderPassBuilder.GetRenderPassDepthStencilDescriptor()
-                    : nullptr,
-                renderPassBuilder.GetRenderPassFlags());
-        } else {
-            EmulateBeginRenderPass(commandContext, &renderPassBuilder);
+    // Clear framebuffer attachments as needed.
+    {
+        for (const auto& attachment : renderPassBuilder->GetRenderPassRenderTargetDescriptors()) {
+            // Load op - color
+            if (attachment.cpuDescriptor.ptr != 0 &&
+                attachment.BeginningAccess.Type == D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR) {
+                commandList->ClearRenderTargetView(
+                    attachment.cpuDescriptor, attachment.BeginningAccess.Clear.ClearValue.Color, 0,
+                    nullptr);
+            }
         }
 
-        ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+        if (renderPassBuilder->HasDepthOrStencil()) {
+            D3D12_CLEAR_FLAGS clearFlags = {};
+            float depthClear = 0.0f;
+            uint8_t stencilClear = 0u;
 
-        // Set up default dynamic state
-        {
-            uint32_t width = renderPass->width;
-            uint32_t height = renderPass->height;
-            D3D12_VIEWPORT viewport = {
-                0.f, 0.f, static_cast<float>(width), static_cast<float>(height), 0.f, 1.f};
-            D3D12_RECT scissorRect = {0, 0, static_cast<int32_t>(width),
-                                      static_cast<int32_t>(height)};
-            commandList->RSSetViewports(1, &viewport);
-            commandList->RSSetScissorRects(1, &scissorRect);
+            if (renderPassBuilder->GetRenderPassDepthStencilDescriptor()
+                    ->DepthBeginningAccess.Type == D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR) {
+                clearFlags |= D3D12_CLEAR_FLAG_DEPTH;
+                depthClear = renderPassBuilder->GetRenderPassDepthStencilDescriptor()
+                                 ->DepthBeginningAccess.Clear.ClearValue.DepthStencil.Depth;
+            }
+            if (renderPassBuilder->GetRenderPassDepthStencilDescriptor()
+                    ->StencilBeginningAccess.Type ==
+                D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR) {
+                clearFlags |= D3D12_CLEAR_FLAG_STENCIL;
+                stencilClear = renderPassBuilder->GetRenderPassDepthStencilDescriptor()
+                                   ->StencilBeginningAccess.Clear.ClearValue.DepthStencil.Stencil;
+            }
 
-            static constexpr std::array<float, 4> defaultBlendFactor = {0, 0, 0, 0};
-            commandList->OMSetBlendFactor(&defaultBlendFactor[0]);
-
-            commandList->OMSetStencilRef(0);
+            if (clearFlags) {
+                commandList->ClearDepthStencilView(
+                    renderPassBuilder->GetRenderPassDepthStencilDescriptor()->cpuDescriptor,
+                    clearFlags, depthClear, stencilClear, 0, nullptr);
+            }
         }
+    }
 
-        RenderPipeline* lastPipeline = nullptr;
-        VertexBufferTracker vertexBufferTracker = {};
+    commandList->OMSetRenderTargets(
+        static_cast<uint8_t>(renderPassBuilder->GetHighestColorAttachmentIndexPlusOne()),
+        renderPassBuilder->GetRenderTargetViews(), FALSE,
+        renderPassBuilder->HasDepthOrStencil()
+            ? &renderPassBuilder->GetRenderPassDepthStencilDescriptor()->cpuDescriptor
+            : nullptr);
+}
 
-        auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) -> MaybeError {
-            switch (type) {
-                case Command::Draw: {
-                    DrawCmd* draw = iter->NextCommand<DrawCmd>();
+MaybeError CommandBuffer::RecordRenderPass(CommandRecordingContext* commandContext,
+                                           BindGroupStateTracker* bindingTracker,
+                                           BeginRenderPassCmd* renderPass,
+                                           const bool passHasUAV) {
+    Device* device = ToBackend(GetDevice());
+    const bool useRenderPass = device->IsToggleEnabled(Toggle::UseD3D12RenderPass);
 
-                    DAWN_TRY(bindingTracker->Apply(commandContext));
-                    vertexBufferTracker.Apply(commandList, lastPipeline);
-                    RecordFirstIndexOffset(commandList, lastPipeline, draw->firstVertex,
-                                           draw->firstInstance);
-                    commandList->DrawInstanced(draw->vertexCount, draw->instanceCount,
-                                               draw->firstVertex, draw->firstInstance);
-                    break;
-                }
+    // renderPassBuilder must be scoped to RecordRenderPass because any underlying
+    // D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS structs must remain
+    // valid until after EndRenderPass() has been called.
+    RenderPassBuilder renderPassBuilder(passHasUAV);
 
-                case Command::DrawIndexed: {
-                    DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
+    DAWN_TRY(SetupRenderPass(commandContext, renderPass, &renderPassBuilder));
 
-                    DAWN_TRY(bindingTracker->Apply(commandContext));
-                    vertexBufferTracker.Apply(commandList, lastPipeline);
-                    RecordFirstIndexOffset(commandList, lastPipeline, draw->baseVertex,
-                                           draw->firstInstance);
-                    commandList->DrawIndexedInstanced(draw->indexCount, draw->instanceCount,
-                                                      draw->firstIndex, draw->baseVertex,
-                                                      draw->firstInstance);
-                    break;
-                }
+    // Use D3D12's native render pass API if it's available, otherwise emulate the
+    // beginning and ending access operations.
+    if (useRenderPass) {
+        commandContext->GetCommandList4()->BeginRenderPass(
+            static_cast<uint8_t>(renderPassBuilder.GetHighestColorAttachmentIndexPlusOne()),
+            renderPassBuilder.GetRenderPassRenderTargetDescriptors().data(),
+            renderPassBuilder.HasDepthOrStencil()
+                ? renderPassBuilder.GetRenderPassDepthStencilDescriptor()
+                : nullptr,
+            renderPassBuilder.GetRenderPassFlags());
+    } else {
+        EmulateBeginRenderPass(commandContext, &renderPassBuilder);
+    }
 
-                case Command::DrawIndirect: {
-                    DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
+    ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
 
-                    DAWN_TRY(bindingTracker->Apply(commandContext));
-                    vertexBufferTracker.Apply(commandList, lastPipeline);
+    // Set up default dynamic state
+    {
+        uint32_t width = renderPass->width;
+        uint32_t height = renderPass->height;
+        D3D12_VIEWPORT viewport = {0.f, 0.f, static_cast<float>(width), static_cast<float>(height),
+                                   0.f, 1.f};
+        D3D12_RECT scissorRect = {0, 0, static_cast<int32_t>(width), static_cast<int32_t>(height)};
+        commandList->RSSetViewports(1, &viewport);
+        commandList->RSSetScissorRects(1, &scissorRect);
 
-                    Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
-                    ComPtr<ID3D12CommandSignature> signature =
-                        lastPipeline->GetDrawIndirectCommandSignature();
-                    commandList->ExecuteIndirect(signature.Get(), 1, buffer->GetD3D12Resource(),
-                                                 draw->indirectOffset, nullptr, 0);
-                    break;
-                }
+        static constexpr std::array<float, 4> defaultBlendFactor = {0, 0, 0, 0};
+        commandList->OMSetBlendFactor(&defaultBlendFactor[0]);
 
-                case Command::DrawIndexedIndirect: {
-                    DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
+        commandList->OMSetStencilRef(0);
+    }
 
-                    DAWN_TRY(bindingTracker->Apply(commandContext));
-                    vertexBufferTracker.Apply(commandList, lastPipeline);
+    RenderPipeline* lastPipeline = nullptr;
+    VertexBufferTracker vertexBufferTracker = {};
 
-                    Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
-                    ASSERT(buffer != nullptr);
+    auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) -> MaybeError {
+        switch (type) {
+            case Command::Draw: {
+                DrawCmd* draw = iter->NextCommand<DrawCmd>();
 
-                    ComPtr<ID3D12CommandSignature> signature =
-                        lastPipeline->GetDrawIndexedIndirectCommandSignature();
-                    commandList->ExecuteIndirect(signature.Get(), 1, buffer->GetD3D12Resource(),
-                                                 draw->indirectOffset, nullptr, 0);
-                    break;
-                }
-
-                case Command::InsertDebugMarker: {
-                    InsertDebugMarkerCmd* cmd = iter->NextCommand<InsertDebugMarkerCmd>();
-                    const char* label = iter->NextData<char>(cmd->length + 1);
-
-                    if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
-                        // PIX color is 1 byte per channel in ARGB format
-                        constexpr uint64_t kPIXBlackColor = 0xff000000;
-                        ToBackend(GetDevice())
-                            ->GetFunctions()
-                            ->pixSetMarkerOnCommandList(commandList, kPIXBlackColor, label);
-                    }
-                    break;
-                }
-
-                case Command::PopDebugGroup: {
-                    iter->NextCommand<PopDebugGroupCmd>();
-
-                    if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
-                        ToBackend(GetDevice())
-                            ->GetFunctions()
-                            ->pixEndEventOnCommandList(commandList);
-                    }
-                    break;
-                }
-
-                case Command::PushDebugGroup: {
-                    PushDebugGroupCmd* cmd = iter->NextCommand<PushDebugGroupCmd>();
-                    const char* label = iter->NextData<char>(cmd->length + 1);
-
-                    if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
-                        // PIX color is 1 byte per channel in ARGB format
-                        constexpr uint64_t kPIXBlackColor = 0xff000000;
-                        ToBackend(GetDevice())
-                            ->GetFunctions()
-                            ->pixBeginEventOnCommandList(commandList, kPIXBlackColor, label);
-                    }
-                    break;
-                }
-
-                case Command::SetRenderPipeline: {
-                    SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
-                    RenderPipeline* pipeline = ToBackend(cmd->pipeline).Get();
-
-                    commandList->SetPipelineState(pipeline->GetPipelineState());
-                    commandList->IASetPrimitiveTopology(pipeline->GetD3D12PrimitiveTopology());
-
-                    bindingTracker->OnSetPipeline(pipeline);
-
-                    lastPipeline = pipeline;
-                    break;
-                }
-
-                case Command::SetBindGroup: {
-                    SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
-                    BindGroup* group = ToBackend(cmd->group.Get());
-                    uint32_t* dynamicOffsets = nullptr;
-
-                    if (cmd->dynamicOffsetCount > 0) {
-                        dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
-                    }
-
-                    bindingTracker->OnSetBindGroup(cmd->index, group, cmd->dynamicOffsetCount,
-                                                   dynamicOffsets);
-                    break;
-                }
-
-                case Command::SetIndexBuffer: {
-                    SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
-
-                    D3D12_INDEX_BUFFER_VIEW bufferView;
-                    bufferView.Format = DXGIIndexFormat(cmd->format);
-                    bufferView.BufferLocation = ToBackend(cmd->buffer)->GetVA() + cmd->offset;
-                    bufferView.SizeInBytes = cmd->size;
-
-                    commandList->IASetIndexBuffer(&bufferView);
-                    break;
-                }
-
-                case Command::SetVertexBuffer: {
-                    SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
-
-                    vertexBufferTracker.OnSetVertexBuffer(cmd->slot, ToBackend(cmd->buffer.Get()),
-                                                          cmd->offset, cmd->size);
-                    break;
-                }
-
-                default:
-                    UNREACHABLE();
-                    break;
+                DAWN_TRY(bindingTracker->Apply(commandContext));
+                vertexBufferTracker.Apply(commandList, lastPipeline);
+                RecordFirstIndexOffset(commandList, lastPipeline, draw->firstVertex,
+                                       draw->firstInstance);
+                commandList->DrawInstanced(draw->vertexCount, draw->instanceCount,
+                                           draw->firstVertex, draw->firstInstance);
+                break;
             }
-            return {};
-        };
 
-        Command type;
-        while (mCommands.NextCommandId(&type)) {
-            switch (type) {
-                case Command::EndRenderPass: {
-                    mCommands.NextCommand<EndRenderPassCmd>();
-                    if (useRenderPass) {
-                        commandContext->GetCommandList4()->EndRenderPass();
-                    } else if (renderPass->attachmentState->GetSampleCount() > 1) {
-                        ResolveMultisampledRenderPass(commandContext, renderPass);
-                    }
-                    return {};
-                }
+            case Command::DrawIndexed: {
+                DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
 
-                case Command::SetStencilReference: {
-                    SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
-
-                    commandList->OMSetStencilRef(cmd->reference);
-                    break;
-                }
-
-                case Command::SetViewport: {
-                    SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
-                    D3D12_VIEWPORT viewport;
-                    viewport.TopLeftX = cmd->x;
-                    viewport.TopLeftY = cmd->y;
-                    viewport.Width = cmd->width;
-                    viewport.Height = cmd->height;
-                    viewport.MinDepth = cmd->minDepth;
-                    viewport.MaxDepth = cmd->maxDepth;
-
-                    commandList->RSSetViewports(1, &viewport);
-                    break;
-                }
-
-                case Command::SetScissorRect: {
-                    SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
-                    D3D12_RECT rect;
-                    rect.left = cmd->x;
-                    rect.top = cmd->y;
-                    rect.right = cmd->x + cmd->width;
-                    rect.bottom = cmd->y + cmd->height;
-
-                    commandList->RSSetScissorRects(1, &rect);
-                    break;
-                }
-
-                case Command::SetBlendConstant: {
-                    SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
-                    const std::array<float, 4> color = ConvertToFloatColor(cmd->color);
-                    commandList->OMSetBlendFactor(color.data());
-                    break;
-                }
-
-                case Command::ExecuteBundles: {
-                    ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
-                    auto bundles = mCommands.NextData<Ref<RenderBundleBase>>(cmd->count);
-
-                    for (uint32_t i = 0; i < cmd->count; ++i) {
-                        CommandIterator* iter = bundles[i]->GetCommands();
-                        iter->Reset();
-                        while (iter->NextCommandId(&type)) {
-                            DAWN_TRY(EncodeRenderBundleCommand(iter, type));
-                        }
-                    }
-                    break;
-                }
-
-                case Command::BeginOcclusionQuery: {
-                    BeginOcclusionQueryCmd* cmd = mCommands.NextCommand<BeginOcclusionQueryCmd>();
-                    QuerySet* querySet = ToBackend(cmd->querySet.Get());
-                    ASSERT(D3D12QueryType(querySet->GetQueryType()) ==
-                           D3D12_QUERY_TYPE_BINARY_OCCLUSION);
-                    commandList->BeginQuery(querySet->GetQueryHeap(),
-                                            D3D12_QUERY_TYPE_BINARY_OCCLUSION, cmd->queryIndex);
-                    break;
-                }
-
-                case Command::EndOcclusionQuery: {
-                    EndOcclusionQueryCmd* cmd = mCommands.NextCommand<EndOcclusionQueryCmd>();
-                    QuerySet* querySet = ToBackend(cmd->querySet.Get());
-                    ASSERT(D3D12QueryType(querySet->GetQueryType()) ==
-                           D3D12_QUERY_TYPE_BINARY_OCCLUSION);
-                    commandList->EndQuery(querySet->GetQueryHeap(),
-                                          D3D12_QUERY_TYPE_BINARY_OCCLUSION, cmd->queryIndex);
-                    break;
-                }
-
-                case Command::WriteTimestamp: {
-                    WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
-
-                    RecordWriteTimestampCmd(commandList, cmd);
-                    break;
-                }
-
-                default: {
-                    DAWN_TRY(EncodeRenderBundleCommand(&mCommands, type));
-                    break;
-                }
+                DAWN_TRY(bindingTracker->Apply(commandContext));
+                vertexBufferTracker.Apply(commandList, lastPipeline);
+                RecordFirstIndexOffset(commandList, lastPipeline, draw->baseVertex,
+                                       draw->firstInstance);
+                commandList->DrawIndexedInstanced(draw->indexCount, draw->instanceCount,
+                                                  draw->firstIndex, draw->baseVertex,
+                                                  draw->firstInstance);
+                break;
             }
+
+            case Command::DrawIndirect: {
+                DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
+
+                DAWN_TRY(bindingTracker->Apply(commandContext));
+                vertexBufferTracker.Apply(commandList, lastPipeline);
+
+                Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
+                ComPtr<ID3D12CommandSignature> signature =
+                    lastPipeline->GetDrawIndirectCommandSignature();
+                commandList->ExecuteIndirect(signature.Get(), 1, buffer->GetD3D12Resource(),
+                                             draw->indirectOffset, nullptr, 0);
+                break;
+            }
+
+            case Command::DrawIndexedIndirect: {
+                DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
+
+                DAWN_TRY(bindingTracker->Apply(commandContext));
+                vertexBufferTracker.Apply(commandList, lastPipeline);
+
+                Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
+                ASSERT(buffer != nullptr);
+
+                ComPtr<ID3D12CommandSignature> signature =
+                    lastPipeline->GetDrawIndexedIndirectCommandSignature();
+                commandList->ExecuteIndirect(signature.Get(), 1, buffer->GetD3D12Resource(),
+                                             draw->indirectOffset, nullptr, 0);
+                break;
+            }
+
+            case Command::InsertDebugMarker: {
+                InsertDebugMarkerCmd* cmd = iter->NextCommand<InsertDebugMarkerCmd>();
+                const char* label = iter->NextData<char>(cmd->length + 1);
+
+                if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+                    // PIX color is 1 byte per channel in ARGB format
+                    constexpr uint64_t kPIXBlackColor = 0xff000000;
+                    ToBackend(GetDevice())
+                        ->GetFunctions()
+                        ->pixSetMarkerOnCommandList(commandList, kPIXBlackColor, label);
+                }
+                break;
+            }
+
+            case Command::PopDebugGroup: {
+                iter->NextCommand<PopDebugGroupCmd>();
+
+                if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+                    ToBackend(GetDevice())->GetFunctions()->pixEndEventOnCommandList(commandList);
+                }
+                break;
+            }
+
+            case Command::PushDebugGroup: {
+                PushDebugGroupCmd* cmd = iter->NextCommand<PushDebugGroupCmd>();
+                const char* label = iter->NextData<char>(cmd->length + 1);
+
+                if (ToBackend(GetDevice())->GetFunctions()->IsPIXEventRuntimeLoaded()) {
+                    // PIX color is 1 byte per channel in ARGB format
+                    constexpr uint64_t kPIXBlackColor = 0xff000000;
+                    ToBackend(GetDevice())
+                        ->GetFunctions()
+                        ->pixBeginEventOnCommandList(commandList, kPIXBlackColor, label);
+                }
+                break;
+            }
+
+            case Command::SetRenderPipeline: {
+                SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
+                RenderPipeline* pipeline = ToBackend(cmd->pipeline).Get();
+
+                commandList->SetPipelineState(pipeline->GetPipelineState());
+                commandList->IASetPrimitiveTopology(pipeline->GetD3D12PrimitiveTopology());
+
+                bindingTracker->OnSetPipeline(pipeline);
+
+                lastPipeline = pipeline;
+                break;
+            }
+
+            case Command::SetBindGroup: {
+                SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
+                BindGroup* group = ToBackend(cmd->group.Get());
+                uint32_t* dynamicOffsets = nullptr;
+
+                if (cmd->dynamicOffsetCount > 0) {
+                    dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
+                }
+
+                bindingTracker->OnSetBindGroup(cmd->index, group, cmd->dynamicOffsetCount,
+                                               dynamicOffsets);
+                break;
+            }
+
+            case Command::SetIndexBuffer: {
+                SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
+
+                D3D12_INDEX_BUFFER_VIEW bufferView;
+                bufferView.Format = DXGIIndexFormat(cmd->format);
+                bufferView.BufferLocation = ToBackend(cmd->buffer)->GetVA() + cmd->offset;
+                bufferView.SizeInBytes = cmd->size;
+
+                commandList->IASetIndexBuffer(&bufferView);
+                break;
+            }
+
+            case Command::SetVertexBuffer: {
+                SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
+
+                vertexBufferTracker.OnSetVertexBuffer(cmd->slot, ToBackend(cmd->buffer.Get()),
+                                                      cmd->offset, cmd->size);
+                break;
+            }
+
+            default:
+                UNREACHABLE();
+                break;
         }
         return {};
+    };
+
+    Command type;
+    while (mCommands.NextCommandId(&type)) {
+        switch (type) {
+            case Command::EndRenderPass: {
+                mCommands.NextCommand<EndRenderPassCmd>();
+                if (useRenderPass) {
+                    commandContext->GetCommandList4()->EndRenderPass();
+                } else if (renderPass->attachmentState->GetSampleCount() > 1) {
+                    ResolveMultisampledRenderPass(commandContext, renderPass);
+                }
+                return {};
+            }
+
+            case Command::SetStencilReference: {
+                SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
+
+                commandList->OMSetStencilRef(cmd->reference);
+                break;
+            }
+
+            case Command::SetViewport: {
+                SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
+                D3D12_VIEWPORT viewport;
+                viewport.TopLeftX = cmd->x;
+                viewport.TopLeftY = cmd->y;
+                viewport.Width = cmd->width;
+                viewport.Height = cmd->height;
+                viewport.MinDepth = cmd->minDepth;
+                viewport.MaxDepth = cmd->maxDepth;
+
+                commandList->RSSetViewports(1, &viewport);
+                break;
+            }
+
+            case Command::SetScissorRect: {
+                SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
+                D3D12_RECT rect;
+                rect.left = cmd->x;
+                rect.top = cmd->y;
+                rect.right = cmd->x + cmd->width;
+                rect.bottom = cmd->y + cmd->height;
+
+                commandList->RSSetScissorRects(1, &rect);
+                break;
+            }
+
+            case Command::SetBlendConstant: {
+                SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
+                const std::array<float, 4> color = ConvertToFloatColor(cmd->color);
+                commandList->OMSetBlendFactor(color.data());
+                break;
+            }
+
+            case Command::ExecuteBundles: {
+                ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
+                auto bundles = mCommands.NextData<Ref<RenderBundleBase>>(cmd->count);
+
+                for (uint32_t i = 0; i < cmd->count; ++i) {
+                    CommandIterator* iter = bundles[i]->GetCommands();
+                    iter->Reset();
+                    while (iter->NextCommandId(&type)) {
+                        DAWN_TRY(EncodeRenderBundleCommand(iter, type));
+                    }
+                }
+                break;
+            }
+
+            case Command::BeginOcclusionQuery: {
+                BeginOcclusionQueryCmd* cmd = mCommands.NextCommand<BeginOcclusionQueryCmd>();
+                QuerySet* querySet = ToBackend(cmd->querySet.Get());
+                ASSERT(D3D12QueryType(querySet->GetQueryType()) ==
+                       D3D12_QUERY_TYPE_BINARY_OCCLUSION);
+                commandList->BeginQuery(querySet->GetQueryHeap(), D3D12_QUERY_TYPE_BINARY_OCCLUSION,
+                                        cmd->queryIndex);
+                break;
+            }
+
+            case Command::EndOcclusionQuery: {
+                EndOcclusionQueryCmd* cmd = mCommands.NextCommand<EndOcclusionQueryCmd>();
+                QuerySet* querySet = ToBackend(cmd->querySet.Get());
+                ASSERT(D3D12QueryType(querySet->GetQueryType()) ==
+                       D3D12_QUERY_TYPE_BINARY_OCCLUSION);
+                commandList->EndQuery(querySet->GetQueryHeap(), D3D12_QUERY_TYPE_BINARY_OCCLUSION,
+                                      cmd->queryIndex);
+                break;
+            }
+
+            case Command::WriteTimestamp: {
+                WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+
+                RecordWriteTimestampCmd(commandList, cmd);
+                break;
+            }
+
+            default: {
+                DAWN_TRY(EncodeRenderBundleCommand(&mCommands, type));
+                break;
+            }
+        }
     }
+    return {};
+}
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/CommandBufferD3D12.h b/src/dawn/native/d3d12/CommandBufferD3D12.h
index 952d450..ea0bb70 100644
--- a/src/dawn/native/d3d12/CommandBufferD3D12.h
+++ b/src/dawn/native/d3d12/CommandBufferD3D12.h
@@ -19,38 +19,38 @@
 #include "dawn/native/Error.h"
 
 namespace dawn::native {
-    struct BeginRenderPassCmd;
+struct BeginRenderPassCmd;
 }  // namespace dawn::native
 
 namespace dawn::native::d3d12 {
 
-    class BindGroupStateTracker;
-    class CommandRecordingContext;
-    class RenderPassBuilder;
+class BindGroupStateTracker;
+class CommandRecordingContext;
+class RenderPassBuilder;
 
-    class CommandBuffer final : public CommandBufferBase {
-      public:
-        static Ref<CommandBuffer> Create(CommandEncoder* encoder,
-                                         const CommandBufferDescriptor* descriptor);
+class CommandBuffer final : public CommandBufferBase {
+  public:
+    static Ref<CommandBuffer> Create(CommandEncoder* encoder,
+                                     const CommandBufferDescriptor* descriptor);
 
-        MaybeError RecordCommands(CommandRecordingContext* commandContext);
+    MaybeError RecordCommands(CommandRecordingContext* commandContext);
 
-      private:
-        CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
+  private:
+    CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
 
-        MaybeError RecordComputePass(CommandRecordingContext* commandContext,
-                                     BindGroupStateTracker* bindingTracker,
-                                     const ComputePassResourceUsage& resourceUsages);
-        MaybeError RecordRenderPass(CommandRecordingContext* commandContext,
-                                    BindGroupStateTracker* bindingTracker,
-                                    BeginRenderPassCmd* renderPass,
-                                    bool passHasUAV);
-        MaybeError SetupRenderPass(CommandRecordingContext* commandContext,
-                                   BeginRenderPassCmd* renderPass,
-                                   RenderPassBuilder* renderPassBuilder);
-        void EmulateBeginRenderPass(CommandRecordingContext* commandContext,
-                                    const RenderPassBuilder* renderPassBuilder) const;
-    };
+    MaybeError RecordComputePass(CommandRecordingContext* commandContext,
+                                 BindGroupStateTracker* bindingTracker,
+                                 const ComputePassResourceUsage& resourceUsages);
+    MaybeError RecordRenderPass(CommandRecordingContext* commandContext,
+                                BindGroupStateTracker* bindingTracker,
+                                BeginRenderPassCmd* renderPass,
+                                bool passHasUAV);
+    MaybeError SetupRenderPass(CommandRecordingContext* commandContext,
+                               BeginRenderPassCmd* renderPass,
+                               RenderPassBuilder* renderPassBuilder);
+    void EmulateBeginRenderPass(CommandRecordingContext* commandContext,
+                                const RenderPassBuilder* renderPassBuilder) const;
+};
 
 }  // namespace dawn::native::d3d12
 
diff --git a/src/dawn/native/d3d12/CommandRecordingContext.cpp b/src/dawn/native/d3d12/CommandRecordingContext.cpp
index 121b64f..d4fa04d 100644
--- a/src/dawn/native/d3d12/CommandRecordingContext.cpp
+++ b/src/dawn/native/d3d12/CommandRecordingContext.cpp
@@ -30,150 +30,148 @@
 
 namespace dawn::native::d3d12 {
 
-    void CommandRecordingContext::AddToSharedTextureList(Texture* texture) {
-        ASSERT(IsOpen());
-        mSharedTextures.insert(texture);
+void CommandRecordingContext::AddToSharedTextureList(Texture* texture) {
+    ASSERT(IsOpen());
+    mSharedTextures.insert(texture);
+}
+
+MaybeError CommandRecordingContext::Open(ID3D12Device* d3d12Device,
+                                         CommandAllocatorManager* commandAllocationManager) {
+    ASSERT(!IsOpen());
+    ID3D12CommandAllocator* commandAllocator;
+    DAWN_TRY_ASSIGN(commandAllocator, commandAllocationManager->ReserveCommandAllocator());
+    if (mD3d12CommandList != nullptr) {
+        MaybeError error = CheckHRESULT(mD3d12CommandList->Reset(commandAllocator, nullptr),
+                                        "D3D12 resetting command list");
+        if (error.IsError()) {
+            mD3d12CommandList.Reset();
+            DAWN_TRY(std::move(error));
+        }
+    } else {
+        ComPtr<ID3D12GraphicsCommandList> d3d12GraphicsCommandList;
+        DAWN_TRY(CheckHRESULT(
+            d3d12Device->CreateCommandList(0, D3D12_COMMAND_LIST_TYPE_DIRECT, commandAllocator,
+                                           nullptr, IID_PPV_ARGS(&d3d12GraphicsCommandList)),
+            "D3D12 creating direct command list"));
+        mD3d12CommandList = std::move(d3d12GraphicsCommandList);
+        // Store a cast to ID3D12GraphicsCommandList4. This is required to use the D3D12 render
+        // pass APIs introduced in Windows build 1809.
+        mD3d12CommandList.As(&mD3d12CommandList4);
     }
 
-    MaybeError CommandRecordingContext::Open(ID3D12Device* d3d12Device,
-                                             CommandAllocatorManager* commandAllocationManager) {
-        ASSERT(!IsOpen());
-        ID3D12CommandAllocator* commandAllocator;
-        DAWN_TRY_ASSIGN(commandAllocator, commandAllocationManager->ReserveCommandAllocator());
-        if (mD3d12CommandList != nullptr) {
-            MaybeError error = CheckHRESULT(mD3d12CommandList->Reset(commandAllocator, nullptr),
-                                            "D3D12 resetting command list");
-            if (error.IsError()) {
-                mD3d12CommandList.Reset();
-                DAWN_TRY(std::move(error));
-            }
-        } else {
-            ComPtr<ID3D12GraphicsCommandList> d3d12GraphicsCommandList;
-            DAWN_TRY(CheckHRESULT(
-                d3d12Device->CreateCommandList(0, D3D12_COMMAND_LIST_TYPE_DIRECT, commandAllocator,
-                                               nullptr, IID_PPV_ARGS(&d3d12GraphicsCommandList)),
-                "D3D12 creating direct command list"));
-            mD3d12CommandList = std::move(d3d12GraphicsCommandList);
-            // Store a cast to ID3D12GraphicsCommandList4. This is required to use the D3D12 render
-            // pass APIs introduced in Windows build 1809.
-            mD3d12CommandList.As(&mD3d12CommandList4);
+    mIsOpen = true;
+
+    return {};
+}
+
+MaybeError CommandRecordingContext::ExecuteCommandList(Device* device) {
+    if (IsOpen()) {
+        // Shared textures must be transitioned to common state after the last usage in order
+        // for them to be used by other APIs like D3D11. We ensure this by transitioning to the
+        // common state right before command list submission. TransitionUsageNow itself ensures
+        // no unnecessary transitions happen if the resources is already in the common state.
+        for (Texture* texture : mSharedTextures) {
+            DAWN_TRY(texture->AcquireKeyedMutex());
+            texture->TrackAllUsageAndTransitionNow(this, D3D12_RESOURCE_STATE_COMMON);
         }
 
-        mIsOpen = true;
-
-        return {};
-    }
-
-    MaybeError CommandRecordingContext::ExecuteCommandList(Device* device) {
-        if (IsOpen()) {
-            // Shared textures must be transitioned to common state after the last usage in order
-            // for them to be used by other APIs like D3D11. We ensure this by transitioning to the
-            // common state right before command list submission. TransitionUsageNow itself ensures
-            // no unnecessary transitions happen if the resources is already in the common state.
-            for (Texture* texture : mSharedTextures) {
-                DAWN_TRY(texture->AcquireKeyedMutex());
-                texture->TrackAllUsageAndTransitionNow(this, D3D12_RESOURCE_STATE_COMMON);
-            }
-
-            MaybeError error =
-                CheckHRESULT(mD3d12CommandList->Close(), "D3D12 closing pending command list");
-            if (error.IsError()) {
-                Release();
-                DAWN_TRY(std::move(error));
-            }
-            DAWN_TRY(device->GetResidencyManager()->EnsureHeapsAreResident(
-                mHeapsPendingUsage.data(), mHeapsPendingUsage.size()));
-
-            if (device->IsToggleEnabled(Toggle::RecordDetailedTimingInTraceEvents)) {
-                uint64_t gpuTimestamp;
-                uint64_t cpuTimestamp;
-                FILETIME fileTimeNonPrecise;
-                SYSTEMTIME systemTimeNonPrecise;
-
-                // Both supported since Windows 2000, have a accuracy of 1ms
-                GetSystemTimeAsFileTime(&fileTimeNonPrecise);
-                GetSystemTime(&systemTimeNonPrecise);
-                // Query CPU and GPU timestamps at almost the same time
-                device->GetCommandQueue()->GetClockCalibration(&gpuTimestamp, &cpuTimestamp);
-
-                uint64_t gpuFrequency;
-                uint64_t cpuFrequency;
-                LARGE_INTEGER cpuFrequencyLargeInteger;
-                device->GetCommandQueue()->GetTimestampFrequency(&gpuFrequency);
-                QueryPerformanceFrequency(
-                    &cpuFrequencyLargeInteger);  // Supported since Windows 2000
-                cpuFrequency = cpuFrequencyLargeInteger.QuadPart;
-
-                std::string timingInfo = absl::StrFormat(
-                    "UTC Time: %u/%u/%u %02u:%02u:%02u.%03u, File Time: %u, CPU "
-                    "Timestamp: %u, GPU Timestamp: %u, CPU Tick Frequency: %u, GPU Tick Frequency: "
-                    "%u",
-                    systemTimeNonPrecise.wYear, systemTimeNonPrecise.wMonth,
-                    systemTimeNonPrecise.wDay, systemTimeNonPrecise.wHour,
-                    systemTimeNonPrecise.wMinute, systemTimeNonPrecise.wSecond,
-                    systemTimeNonPrecise.wMilliseconds,
-                    (static_cast<uint64_t>(fileTimeNonPrecise.dwHighDateTime) << 32) +
-                        fileTimeNonPrecise.dwLowDateTime,
-                    cpuTimestamp, gpuTimestamp, cpuFrequency, gpuFrequency);
-
-                TRACE_EVENT_INSTANT1(
-                    device->GetPlatform(), General,
-                    "d3d12::CommandRecordingContext::ExecuteCommandList Detailed Timing", "Timing",
-                    timingInfo.c_str());
-            }
-
-            ID3D12CommandList* d3d12CommandList = GetCommandList();
-            device->GetCommandQueue()->ExecuteCommandLists(1, &d3d12CommandList);
-
-            for (Texture* texture : mSharedTextures) {
-                texture->ReleaseKeyedMutex();
-            }
-
-            mIsOpen = false;
-            mSharedTextures.clear();
-            mHeapsPendingUsage.clear();
+        MaybeError error =
+            CheckHRESULT(mD3d12CommandList->Close(), "D3D12 closing pending command list");
+        if (error.IsError()) {
+            Release();
+            DAWN_TRY(std::move(error));
         }
-        return {};
-    }
+        DAWN_TRY(device->GetResidencyManager()->EnsureHeapsAreResident(mHeapsPendingUsage.data(),
+                                                                       mHeapsPendingUsage.size()));
 
-    void CommandRecordingContext::TrackHeapUsage(Heap* heap, ExecutionSerial serial) {
-        // Before tracking the heap, check the last serial it was recorded on to ensure we aren't
-        // tracking it more than once.
-        if (heap->GetLastUsage() < serial) {
-            heap->SetLastUsage(serial);
-            mHeapsPendingUsage.push_back(heap);
+        if (device->IsToggleEnabled(Toggle::RecordDetailedTimingInTraceEvents)) {
+            uint64_t gpuTimestamp;
+            uint64_t cpuTimestamp;
+            FILETIME fileTimeNonPrecise;
+            SYSTEMTIME systemTimeNonPrecise;
+
+            // Both supported since Windows 2000, have a accuracy of 1ms
+            GetSystemTimeAsFileTime(&fileTimeNonPrecise);
+            GetSystemTime(&systemTimeNonPrecise);
+            // Query CPU and GPU timestamps at almost the same time
+            device->GetCommandQueue()->GetClockCalibration(&gpuTimestamp, &cpuTimestamp);
+
+            uint64_t gpuFrequency;
+            uint64_t cpuFrequency;
+            LARGE_INTEGER cpuFrequencyLargeInteger;
+            device->GetCommandQueue()->GetTimestampFrequency(&gpuFrequency);
+            QueryPerformanceFrequency(&cpuFrequencyLargeInteger);  // Supported since Windows 2000
+            cpuFrequency = cpuFrequencyLargeInteger.QuadPart;
+
+            std::string timingInfo = absl::StrFormat(
+                "UTC Time: %u/%u/%u %02u:%02u:%02u.%03u, File Time: %u, CPU "
+                "Timestamp: %u, GPU Timestamp: %u, CPU Tick Frequency: %u, GPU Tick Frequency: "
+                "%u",
+                systemTimeNonPrecise.wYear, systemTimeNonPrecise.wMonth, systemTimeNonPrecise.wDay,
+                systemTimeNonPrecise.wHour, systemTimeNonPrecise.wMinute,
+                systemTimeNonPrecise.wSecond, systemTimeNonPrecise.wMilliseconds,
+                (static_cast<uint64_t>(fileTimeNonPrecise.dwHighDateTime) << 32) +
+                    fileTimeNonPrecise.dwLowDateTime,
+                cpuTimestamp, gpuTimestamp, cpuFrequency, gpuFrequency);
+
+            TRACE_EVENT_INSTANT1(
+                device->GetPlatform(), General,
+                "d3d12::CommandRecordingContext::ExecuteCommandList Detailed Timing", "Timing",
+                timingInfo.c_str());
         }
-    }
 
-    ID3D12GraphicsCommandList* CommandRecordingContext::GetCommandList() const {
-        ASSERT(mD3d12CommandList != nullptr);
-        ASSERT(IsOpen());
-        return mD3d12CommandList.Get();
-    }
+        ID3D12CommandList* d3d12CommandList = GetCommandList();
+        device->GetCommandQueue()->ExecuteCommandLists(1, &d3d12CommandList);
 
-    // This function will fail on Windows versions prior to 1809. Support must be queried through
-    // the device before calling.
-    ID3D12GraphicsCommandList4* CommandRecordingContext::GetCommandList4() const {
-        ASSERT(IsOpen());
-        ASSERT(mD3d12CommandList != nullptr);
-        return mD3d12CommandList4.Get();
-    }
+        for (Texture* texture : mSharedTextures) {
+            texture->ReleaseKeyedMutex();
+        }
 
-    void CommandRecordingContext::Release() {
-        mD3d12CommandList.Reset();
-        mD3d12CommandList4.Reset();
         mIsOpen = false;
         mSharedTextures.clear();
         mHeapsPendingUsage.clear();
-        mTempBuffers.clear();
     }
+    return {};
+}
 
-    bool CommandRecordingContext::IsOpen() const {
-        return mIsOpen;
+void CommandRecordingContext::TrackHeapUsage(Heap* heap, ExecutionSerial serial) {
+    // Before tracking the heap, check the last serial it was recorded on to ensure we aren't
+    // tracking it more than once.
+    if (heap->GetLastUsage() < serial) {
+        heap->SetLastUsage(serial);
+        mHeapsPendingUsage.push_back(heap);
     }
+}
 
-    void CommandRecordingContext::AddToTempBuffers(Ref<Buffer> tempBuffer) {
-        mTempBuffers.emplace_back(tempBuffer);
-    }
+ID3D12GraphicsCommandList* CommandRecordingContext::GetCommandList() const {
+    ASSERT(mD3d12CommandList != nullptr);
+    ASSERT(IsOpen());
+    return mD3d12CommandList.Get();
+}
+
+// This function will fail on Windows versions prior to 1809. Support must be queried through
+// the device before calling.
+ID3D12GraphicsCommandList4* CommandRecordingContext::GetCommandList4() const {
+    ASSERT(IsOpen());
+    ASSERT(mD3d12CommandList != nullptr);
+    return mD3d12CommandList4.Get();
+}
+
+void CommandRecordingContext::Release() {
+    mD3d12CommandList.Reset();
+    mD3d12CommandList4.Reset();
+    mIsOpen = false;
+    mSharedTextures.clear();
+    mHeapsPendingUsage.clear();
+    mTempBuffers.clear();
+}
+
+bool CommandRecordingContext::IsOpen() const {
+    return mIsOpen;
+}
+
+void CommandRecordingContext::AddToTempBuffers(Ref<Buffer> tempBuffer) {
+    mTempBuffers.emplace_back(tempBuffer);
+}
 
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/CommandRecordingContext.h b/src/dawn/native/d3d12/CommandRecordingContext.h
index 610b99e..80b6204 100644
--- a/src/dawn/native/d3d12/CommandRecordingContext.h
+++ b/src/dawn/native/d3d12/CommandRecordingContext.h
@@ -23,37 +23,36 @@
 #include "dawn/native/d3d12/d3d12_platform.h"
 
 namespace dawn::native::d3d12 {
-    class CommandAllocatorManager;
-    class Device;
-    class Heap;
-    class Texture;
+class CommandAllocatorManager;
+class Device;
+class Heap;
+class Texture;
 
-    class CommandRecordingContext {
-      public:
-        void AddToSharedTextureList(Texture* texture);
-        MaybeError Open(ID3D12Device* d3d12Device,
-                        CommandAllocatorManager* commandAllocationManager);
+class CommandRecordingContext {
+  public:
+    void AddToSharedTextureList(Texture* texture);
+    MaybeError Open(ID3D12Device* d3d12Device, CommandAllocatorManager* commandAllocationManager);
 
-        ID3D12GraphicsCommandList* GetCommandList() const;
-        ID3D12GraphicsCommandList4* GetCommandList4() const;
-        void Release();
-        bool IsOpen() const;
+    ID3D12GraphicsCommandList* GetCommandList() const;
+    ID3D12GraphicsCommandList4* GetCommandList4() const;
+    void Release();
+    bool IsOpen() const;
 
-        MaybeError ExecuteCommandList(Device* device);
+    MaybeError ExecuteCommandList(Device* device);
 
-        void TrackHeapUsage(Heap* heap, ExecutionSerial serial);
+    void TrackHeapUsage(Heap* heap, ExecutionSerial serial);
 
-        void AddToTempBuffers(Ref<Buffer> tempBuffer);
+    void AddToTempBuffers(Ref<Buffer> tempBuffer);
 
-      private:
-        ComPtr<ID3D12GraphicsCommandList> mD3d12CommandList;
-        ComPtr<ID3D12GraphicsCommandList4> mD3d12CommandList4;
-        bool mIsOpen = false;
-        std::set<Texture*> mSharedTextures;
-        std::vector<Heap*> mHeapsPendingUsage;
+  private:
+    ComPtr<ID3D12GraphicsCommandList> mD3d12CommandList;
+    ComPtr<ID3D12GraphicsCommandList4> mD3d12CommandList4;
+    bool mIsOpen = false;
+    std::set<Texture*> mSharedTextures;
+    std::vector<Heap*> mHeapsPendingUsage;
 
-        std::vector<Ref<Buffer>> mTempBuffers;
-    };
+    std::vector<Ref<Buffer>> mTempBuffers;
+};
 }  // namespace dawn::native::d3d12
 
 #endif  // SRC_DAWN_NATIVE_D3D12_COMMANDRECORDINGCONTEXT_H_
diff --git a/src/dawn/native/d3d12/ComputePipelineD3D12.cpp b/src/dawn/native/d3d12/ComputePipelineD3D12.cpp
index e3ec315..cad0ce5 100644
--- a/src/dawn/native/d3d12/ComputePipelineD3D12.cpp
+++ b/src/dawn/native/d3d12/ComputePipelineD3D12.cpp
@@ -27,82 +27,81 @@
 
 namespace dawn::native::d3d12 {
 
-    Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
-        Device* device,
-        const ComputePipelineDescriptor* descriptor) {
-        return AcquireRef(new ComputePipeline(device, descriptor));
+Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
+    Device* device,
+    const ComputePipelineDescriptor* descriptor) {
+    return AcquireRef(new ComputePipeline(device, descriptor));
+}
+
+MaybeError ComputePipeline::Initialize() {
+    Device* device = ToBackend(GetDevice());
+    uint32_t compileFlags = 0;
+
+    if (!device->IsToggleEnabled(Toggle::UseDXC) &&
+        !device->IsToggleEnabled(Toggle::FxcOptimizations)) {
+        compileFlags |= D3DCOMPILE_OPTIMIZATION_LEVEL0;
     }
 
-    MaybeError ComputePipeline::Initialize() {
-        Device* device = ToBackend(GetDevice());
-        uint32_t compileFlags = 0;
-
-        if (!device->IsToggleEnabled(Toggle::UseDXC) &&
-            !device->IsToggleEnabled(Toggle::FxcOptimizations)) {
-            compileFlags |= D3DCOMPILE_OPTIMIZATION_LEVEL0;
-        }
-
-        if (device->IsToggleEnabled(Toggle::EmitHLSLDebugSymbols)) {
-            compileFlags |= D3DCOMPILE_DEBUG | D3DCOMPILE_SKIP_OPTIMIZATION;
-        }
-
-        // SPRIV-cross does matrix multiplication expecting row major matrices
-        compileFlags |= D3DCOMPILE_PACK_MATRIX_ROW_MAJOR;
-
-        const ProgrammableStage& computeStage = GetStage(SingleShaderStage::Compute);
-        ShaderModule* module = ToBackend(computeStage.module.Get());
-
-        D3D12_COMPUTE_PIPELINE_STATE_DESC d3dDesc = {};
-        d3dDesc.pRootSignature = ToBackend(GetLayout())->GetRootSignature();
-
-        CompiledShader compiledShader;
-        DAWN_TRY_ASSIGN(compiledShader, module->Compile(computeStage, SingleShaderStage::Compute,
-                                                        ToBackend(GetLayout()), compileFlags));
-        d3dDesc.CS = compiledShader.GetD3D12ShaderBytecode();
-        auto* d3d12Device = device->GetD3D12Device();
-        DAWN_TRY(CheckHRESULT(
-            d3d12Device->CreateComputePipelineState(&d3dDesc, IID_PPV_ARGS(&mPipelineState)),
-            "D3D12 creating pipeline state"));
-
-        SetLabelImpl();
-
-        return {};
+    if (device->IsToggleEnabled(Toggle::EmitHLSLDebugSymbols)) {
+        compileFlags |= D3DCOMPILE_DEBUG | D3DCOMPILE_SKIP_OPTIMIZATION;
     }
 
-    ComputePipeline::~ComputePipeline() = default;
+    // SPRIV-cross does matrix multiplication expecting row major matrices
+    compileFlags |= D3DCOMPILE_PACK_MATRIX_ROW_MAJOR;
 
-    void ComputePipeline::DestroyImpl() {
-        ComputePipelineBase::DestroyImpl();
-        ToBackend(GetDevice())->ReferenceUntilUnused(mPipelineState);
-    }
+    const ProgrammableStage& computeStage = GetStage(SingleShaderStage::Compute);
+    ShaderModule* module = ToBackend(computeStage.module.Get());
 
-    ID3D12PipelineState* ComputePipeline::GetPipelineState() const {
-        return mPipelineState.Get();
-    }
+    D3D12_COMPUTE_PIPELINE_STATE_DESC d3dDesc = {};
+    d3dDesc.pRootSignature = ToBackend(GetLayout())->GetRootSignature();
 
-    void ComputePipeline::SetLabelImpl() {
-        SetDebugName(ToBackend(GetDevice()), GetPipelineState(), "Dawn_ComputePipeline",
-                     GetLabel());
-    }
+    CompiledShader compiledShader;
+    DAWN_TRY_ASSIGN(compiledShader, module->Compile(computeStage, SingleShaderStage::Compute,
+                                                    ToBackend(GetLayout()), compileFlags));
+    d3dDesc.CS = compiledShader.GetD3D12ShaderBytecode();
+    auto* d3d12Device = device->GetD3D12Device();
+    DAWN_TRY(CheckHRESULT(
+        d3d12Device->CreateComputePipelineState(&d3dDesc, IID_PPV_ARGS(&mPipelineState)),
+        "D3D12 creating pipeline state"));
 
-    void ComputePipeline::InitializeAsync(Ref<ComputePipelineBase> computePipeline,
-                                          WGPUCreateComputePipelineAsyncCallback callback,
-                                          void* userdata) {
-        std::unique_ptr<CreateComputePipelineAsyncTask> asyncTask =
-            std::make_unique<CreateComputePipelineAsyncTask>(std::move(computePipeline), callback,
-                                                             userdata);
-        CreateComputePipelineAsyncTask::RunAsync(std::move(asyncTask));
-    }
+    SetLabelImpl();
 
-    bool ComputePipeline::UsesNumWorkgroups() const {
-        return GetStage(SingleShaderStage::Compute).metadata->usesNumWorkgroups;
-    }
+    return {};
+}
 
-    ComPtr<ID3D12CommandSignature> ComputePipeline::GetDispatchIndirectCommandSignature() {
-        if (UsesNumWorkgroups()) {
-            return ToBackend(GetLayout())->GetDispatchIndirectCommandSignatureWithNumWorkgroups();
-        }
-        return ToBackend(GetDevice())->GetDispatchIndirectSignature();
+ComputePipeline::~ComputePipeline() = default;
+
+void ComputePipeline::DestroyImpl() {
+    ComputePipelineBase::DestroyImpl();
+    ToBackend(GetDevice())->ReferenceUntilUnused(mPipelineState);
+}
+
+ID3D12PipelineState* ComputePipeline::GetPipelineState() const {
+    return mPipelineState.Get();
+}
+
+void ComputePipeline::SetLabelImpl() {
+    SetDebugName(ToBackend(GetDevice()), GetPipelineState(), "Dawn_ComputePipeline", GetLabel());
+}
+
+void ComputePipeline::InitializeAsync(Ref<ComputePipelineBase> computePipeline,
+                                      WGPUCreateComputePipelineAsyncCallback callback,
+                                      void* userdata) {
+    std::unique_ptr<CreateComputePipelineAsyncTask> asyncTask =
+        std::make_unique<CreateComputePipelineAsyncTask>(std::move(computePipeline), callback,
+                                                         userdata);
+    CreateComputePipelineAsyncTask::RunAsync(std::move(asyncTask));
+}
+
+bool ComputePipeline::UsesNumWorkgroups() const {
+    return GetStage(SingleShaderStage::Compute).metadata->usesNumWorkgroups;
+}
+
+ComPtr<ID3D12CommandSignature> ComputePipeline::GetDispatchIndirectCommandSignature() {
+    if (UsesNumWorkgroups()) {
+        return ToBackend(GetLayout())->GetDispatchIndirectCommandSignatureWithNumWorkgroups();
     }
+    return ToBackend(GetDevice())->GetDispatchIndirectSignature();
+}
 
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/ComputePipelineD3D12.h b/src/dawn/native/d3d12/ComputePipelineD3D12.h
index cf55c13..ef07ced 100644
--- a/src/dawn/native/d3d12/ComputePipelineD3D12.h
+++ b/src/dawn/native/d3d12/ComputePipelineD3D12.h
@@ -21,37 +21,36 @@
 
 namespace dawn::native::d3d12 {
 
-    class Device;
+class Device;
 
-    class ComputePipeline final : public ComputePipelineBase {
-      public:
-        static Ref<ComputePipeline> CreateUninitialized(
-            Device* device,
-            const ComputePipelineDescriptor* descriptor);
-        static void InitializeAsync(Ref<ComputePipelineBase> computePipeline,
-                                    WGPUCreateComputePipelineAsyncCallback callback,
-                                    void* userdata);
-        ComputePipeline() = delete;
+class ComputePipeline final : public ComputePipelineBase {
+  public:
+    static Ref<ComputePipeline> CreateUninitialized(Device* device,
+                                                    const ComputePipelineDescriptor* descriptor);
+    static void InitializeAsync(Ref<ComputePipelineBase> computePipeline,
+                                WGPUCreateComputePipelineAsyncCallback callback,
+                                void* userdata);
+    ComputePipeline() = delete;
 
-        ID3D12PipelineState* GetPipelineState() const;
+    ID3D12PipelineState* GetPipelineState() const;
 
-        MaybeError Initialize() override;
+    MaybeError Initialize() override;
 
-        // Dawn API
-        void SetLabelImpl() override;
+    // Dawn API
+    void SetLabelImpl() override;
 
-        bool UsesNumWorkgroups() const;
+    bool UsesNumWorkgroups() const;
 
-        ComPtr<ID3D12CommandSignature> GetDispatchIndirectCommandSignature();
+    ComPtr<ID3D12CommandSignature> GetDispatchIndirectCommandSignature();
 
-      private:
-        ~ComputePipeline() override;
+  private:
+    ~ComputePipeline() override;
 
-        void DestroyImpl() override;
+    void DestroyImpl() override;
 
-        using ComputePipelineBase::ComputePipelineBase;
-        ComPtr<ID3D12PipelineState> mPipelineState;
-    };
+    using ComputePipelineBase::ComputePipelineBase;
+    ComPtr<ID3D12PipelineState> mPipelineState;
+};
 
 }  // namespace dawn::native::d3d12
 
diff --git a/src/dawn/native/d3d12/D3D11on12Util.cpp b/src/dawn/native/d3d12/D3D11on12Util.cpp
index cca831b..a31c833 100644
--- a/src/dawn/native/d3d12/D3D11on12Util.cpp
+++ b/src/dawn/native/d3d12/D3D11on12Util.cpp
@@ -21,168 +21,164 @@
 
 #include "dawn/common/HashUtils.h"
 #include "dawn/common/Log.h"
+#include "dawn/native/D3D12Backend.h"
 #include "dawn/native/d3d12/D3D12Error.h"
 #include "dawn/native/d3d12/DeviceD3D12.h"
-#include "dawn/native/D3D12Backend.h"
 
 namespace dawn::native::d3d12 {
 
-    void Flush11On12DeviceToAvoidLeaks(ComPtr<ID3D11On12Device> d3d11on12Device) {
-        if (d3d11on12Device == nullptr) {
-            return;
-        }
-
-        ComPtr<ID3D11Device> d3d11Device;
-        if (FAILED(d3d11on12Device.As(&d3d11Device))) {
-            return;
-        }
-
-        ComPtr<ID3D11DeviceContext> d3d11DeviceContext;
-        d3d11Device->GetImmediateContext(&d3d11DeviceContext);
-
-        ASSERT(d3d11DeviceContext != nullptr);
-
-        // 11on12 has a bug where D3D12 resources used only for keyed shared mutexes
-        // are not released until work is submitted to the device context and flushed.
-        // The most minimal work we can get away with is issuing a TiledResourceBarrier.
-
-        // ID3D11DeviceContext2 is available in Win8.1 and above. This suffices for a
-        // D3D12 backend since both D3D12 and 11on12 first appeared in Windows 10.
-        ComPtr<ID3D11DeviceContext2> d3d11DeviceContext2;
-        if (FAILED(d3d11DeviceContext.As(&d3d11DeviceContext2))) {
-            return;
-        }
-
-        d3d11DeviceContext2->TiledResourceBarrier(nullptr, nullptr);
-        d3d11DeviceContext2->Flush();
+void Flush11On12DeviceToAvoidLeaks(ComPtr<ID3D11On12Device> d3d11on12Device) {
+    if (d3d11on12Device == nullptr) {
+        return;
     }
 
-    D3D11on12ResourceCacheEntry::D3D11on12ResourceCacheEntry(
-        ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex,
-        ComPtr<ID3D11On12Device> d3d11On12Device)
-        : mDXGIKeyedMutex(std::move(dxgiKeyedMutex)), mD3D11on12Device(std::move(d3d11On12Device)) {
+    ComPtr<ID3D11Device> d3d11Device;
+    if (FAILED(d3d11on12Device.As(&d3d11Device))) {
+        return;
     }
 
-    D3D11on12ResourceCacheEntry::D3D11on12ResourceCacheEntry(
-        ComPtr<ID3D11On12Device> d3d11On12Device)
-        : mD3D11on12Device(std::move(d3d11On12Device)) {
+    ComPtr<ID3D11DeviceContext> d3d11DeviceContext;
+    d3d11Device->GetImmediateContext(&d3d11DeviceContext);
+
+    ASSERT(d3d11DeviceContext != nullptr);
+
+    // 11on12 has a bug where D3D12 resources used only for keyed shared mutexes
+    // are not released until work is submitted to the device context and flushed.
+    // The most minimal work we can get away with is issuing a TiledResourceBarrier.
+
+    // ID3D11DeviceContext2 is available in Win8.1 and above. This suffices for a
+    // D3D12 backend since both D3D12 and 11on12 first appeared in Windows 10.
+    ComPtr<ID3D11DeviceContext2> d3d11DeviceContext2;
+    if (FAILED(d3d11DeviceContext.As(&d3d11DeviceContext2))) {
+        return;
     }
 
-    D3D11on12ResourceCacheEntry::~D3D11on12ResourceCacheEntry() {
-        if (mDXGIKeyedMutex == nullptr) {
-            return;
-        }
+    d3d11DeviceContext2->TiledResourceBarrier(nullptr, nullptr);
+    d3d11DeviceContext2->Flush();
+}
 
-        if (mAcquireCount > 0) {
-            mDXGIKeyedMutex->ReleaseSync(kDXGIKeyedMutexAcquireReleaseKey);
-        }
+D3D11on12ResourceCacheEntry::D3D11on12ResourceCacheEntry(ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex,
+                                                         ComPtr<ID3D11On12Device> d3d11On12Device)
+    : mDXGIKeyedMutex(std::move(dxgiKeyedMutex)), mD3D11on12Device(std::move(d3d11On12Device)) {}
 
-        ComPtr<ID3D11Resource> d3d11Resource;
-        if (FAILED(mDXGIKeyedMutex.As(&d3d11Resource))) {
-            return;
-        }
+D3D11on12ResourceCacheEntry::D3D11on12ResourceCacheEntry(ComPtr<ID3D11On12Device> d3d11On12Device)
+    : mD3D11on12Device(std::move(d3d11On12Device)) {}
 
-        ASSERT(mD3D11on12Device != nullptr);
-
-        ID3D11Resource* d3d11ResourceRaw = d3d11Resource.Get();
-        mD3D11on12Device->ReleaseWrappedResources(&d3d11ResourceRaw, 1);
-
-        d3d11Resource.Reset();
-        mDXGIKeyedMutex.Reset();
-
-        Flush11On12DeviceToAvoidLeaks(std::move(mD3D11on12Device));
+D3D11on12ResourceCacheEntry::~D3D11on12ResourceCacheEntry() {
+    if (mDXGIKeyedMutex == nullptr) {
+        return;
     }
 
-    MaybeError D3D11on12ResourceCacheEntry::AcquireKeyedMutex() {
-        ASSERT(mDXGIKeyedMutex != nullptr);
-        ASSERT(mAcquireCount >= 0);
-        if (mAcquireCount == 0) {
-            DAWN_TRY(CheckHRESULT(
-                mDXGIKeyedMutex->AcquireSync(kDXGIKeyedMutexAcquireReleaseKey, INFINITE),
-                "D3D12 acquiring shared mutex"));
-        }
-        mAcquireCount++;
-        return {};
+    if (mAcquireCount > 0) {
+        mDXGIKeyedMutex->ReleaseSync(kDXGIKeyedMutexAcquireReleaseKey);
     }
 
-    void D3D11on12ResourceCacheEntry::ReleaseKeyedMutex() {
-        ASSERT(mDXGIKeyedMutex != nullptr);
-        ASSERT(mAcquireCount > 0);
-        mAcquireCount--;
-        if (mAcquireCount == 0) {
-            mDXGIKeyedMutex->ReleaseSync(kDXGIKeyedMutexAcquireReleaseKey);
-        }
+    ComPtr<ID3D11Resource> d3d11Resource;
+    if (FAILED(mDXGIKeyedMutex.As(&d3d11Resource))) {
+        return;
     }
 
-    size_t D3D11on12ResourceCacheEntry::HashFunc::operator()(
-        const Ref<D3D11on12ResourceCacheEntry> a) const {
-        size_t hash = 0;
-        HashCombine(&hash, a->mD3D11on12Device.Get());
-        return hash;
+    ASSERT(mD3D11on12Device != nullptr);
+
+    ID3D11Resource* d3d11ResourceRaw = d3d11Resource.Get();
+    mD3D11on12Device->ReleaseWrappedResources(&d3d11ResourceRaw, 1);
+
+    d3d11Resource.Reset();
+    mDXGIKeyedMutex.Reset();
+
+    Flush11On12DeviceToAvoidLeaks(std::move(mD3D11on12Device));
+}
+
+MaybeError D3D11on12ResourceCacheEntry::AcquireKeyedMutex() {
+    ASSERT(mDXGIKeyedMutex != nullptr);
+    ASSERT(mAcquireCount >= 0);
+    if (mAcquireCount == 0) {
+        DAWN_TRY(
+            CheckHRESULT(mDXGIKeyedMutex->AcquireSync(kDXGIKeyedMutexAcquireReleaseKey, INFINITE),
+                         "D3D12 acquiring shared mutex"));
+    }
+    mAcquireCount++;
+    return {};
+}
+
+void D3D11on12ResourceCacheEntry::ReleaseKeyedMutex() {
+    ASSERT(mDXGIKeyedMutex != nullptr);
+    ASSERT(mAcquireCount > 0);
+    mAcquireCount--;
+    if (mAcquireCount == 0) {
+        mDXGIKeyedMutex->ReleaseSync(kDXGIKeyedMutexAcquireReleaseKey);
+    }
+}
+
+size_t D3D11on12ResourceCacheEntry::HashFunc::operator()(
+    const Ref<D3D11on12ResourceCacheEntry> a) const {
+    size_t hash = 0;
+    HashCombine(&hash, a->mD3D11on12Device.Get());
+    return hash;
+}
+
+bool D3D11on12ResourceCacheEntry::EqualityFunc::operator()(
+    const Ref<D3D11on12ResourceCacheEntry> a,
+    const Ref<D3D11on12ResourceCacheEntry> b) const {
+    return a->mD3D11on12Device == b->mD3D11on12Device;
+}
+
+D3D11on12ResourceCache::D3D11on12ResourceCache() = default;
+
+D3D11on12ResourceCache::~D3D11on12ResourceCache() = default;
+
+Ref<D3D11on12ResourceCacheEntry> D3D11on12ResourceCache::GetOrCreateD3D11on12Resource(
+    WGPUDevice device,
+    ID3D12Resource* d3d12Resource) {
+    Device* backendDevice = reinterpret_cast<Device*>(device);
+    // The Dawn and 11on12 device share the same D3D12 command queue whereas this external image
+    // could be accessed/produced with multiple Dawn devices. To avoid cross-queue sharing
+    // restrictions, the 11 wrapped resource is forbidden to be shared between Dawn devices by
+    // using the 11on12 device as the cache key.
+    ComPtr<ID3D11On12Device> d3d11on12Device = backendDevice->GetOrCreateD3D11on12Device();
+    if (d3d11on12Device == nullptr) {
+        dawn::ErrorLog() << "Unable to create 11on12 device for external image";
+        return nullptr;
     }
 
-    bool D3D11on12ResourceCacheEntry::EqualityFunc::operator()(
-        const Ref<D3D11on12ResourceCacheEntry> a,
-        const Ref<D3D11on12ResourceCacheEntry> b) const {
-        return a->mD3D11on12Device == b->mD3D11on12Device;
+    D3D11on12ResourceCacheEntry blueprint(d3d11on12Device);
+    auto iter = mCache.find(&blueprint);
+    if (iter != mCache.end()) {
+        return *iter;
     }
 
-    D3D11on12ResourceCache::D3D11on12ResourceCache() = default;
-
-    D3D11on12ResourceCache::~D3D11on12ResourceCache() = default;
-
-    Ref<D3D11on12ResourceCacheEntry> D3D11on12ResourceCache::GetOrCreateD3D11on12Resource(
-        WGPUDevice device,
-        ID3D12Resource* d3d12Resource) {
-        Device* backendDevice = reinterpret_cast<Device*>(device);
-        // The Dawn and 11on12 device share the same D3D12 command queue whereas this external image
-        // could be accessed/produced with multiple Dawn devices. To avoid cross-queue sharing
-        // restrictions, the 11 wrapped resource is forbidden to be shared between Dawn devices by
-        // using the 11on12 device as the cache key.
-        ComPtr<ID3D11On12Device> d3d11on12Device = backendDevice->GetOrCreateD3D11on12Device();
-        if (d3d11on12Device == nullptr) {
-            dawn::ErrorLog() << "Unable to create 11on12 device for external image";
-            return nullptr;
-        }
-
-        D3D11on12ResourceCacheEntry blueprint(d3d11on12Device);
-        auto iter = mCache.find(&blueprint);
-        if (iter != mCache.end()) {
-            return *iter;
-        }
-
-        // We use IDXGIKeyedMutexes to synchronize access between D3D11 and D3D12. D3D11/12 fences
-        // are a viable alternative but are, unfortunately, not available on all versions of Windows
-        // 10. Since D3D12 does not directly support keyed mutexes, we need to wrap the D3D12
-        // resource using 11on12 and QueryInterface the D3D11 representation for the keyed mutex.
-        ComPtr<ID3D11Texture2D> d3d11Texture;
-        D3D11_RESOURCE_FLAGS resourceFlags;
-        resourceFlags.BindFlags = 0;
-        resourceFlags.MiscFlags = D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX;
-        resourceFlags.CPUAccessFlags = 0;
-        resourceFlags.StructureByteStride = 0;
-        if (FAILED(d3d11on12Device->CreateWrappedResource(
-                d3d12Resource, &resourceFlags, D3D12_RESOURCE_STATE_COMMON,
-                D3D12_RESOURCE_STATE_COMMON, IID_PPV_ARGS(&d3d11Texture)))) {
-            return nullptr;
-        }
-
-        ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex;
-        if (FAILED(d3d11Texture.As(&dxgiKeyedMutex))) {
-            return nullptr;
-        }
-
-        // Keep this cache from growing unbounded.
-        // TODO(dawn:625): Consider using a replacement policy based cache.
-        if (mCache.size() > kMaxD3D11on12ResourceCacheSize) {
-            mCache.clear();
-        }
-
-        Ref<D3D11on12ResourceCacheEntry> entry =
-            AcquireRef(new D3D11on12ResourceCacheEntry(dxgiKeyedMutex, std::move(d3d11on12Device)));
-        mCache.insert(entry);
-
-        return entry;
+    // We use IDXGIKeyedMutexes to synchronize access between D3D11 and D3D12. D3D11/12 fences
+    // are a viable alternative but are, unfortunately, not available on all versions of Windows
+    // 10. Since D3D12 does not directly support keyed mutexes, we need to wrap the D3D12
+    // resource using 11on12 and QueryInterface the D3D11 representation for the keyed mutex.
+    ComPtr<ID3D11Texture2D> d3d11Texture;
+    D3D11_RESOURCE_FLAGS resourceFlags;
+    resourceFlags.BindFlags = 0;
+    resourceFlags.MiscFlags = D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX;
+    resourceFlags.CPUAccessFlags = 0;
+    resourceFlags.StructureByteStride = 0;
+    if (FAILED(d3d11on12Device->CreateWrappedResource(
+            d3d12Resource, &resourceFlags, D3D12_RESOURCE_STATE_COMMON, D3D12_RESOURCE_STATE_COMMON,
+            IID_PPV_ARGS(&d3d11Texture)))) {
+        return nullptr;
     }
 
+    ComPtr<IDXGIKeyedMutex> dxgiKeyedMutex;
+    if (FAILED(d3d11Texture.As(&dxgiKeyedMutex))) {
+        return nullptr;
+    }
+
+    // Keep this cache from growing unbounded.
+    // TODO(dawn:625): Consider using a replacement policy based cache.
+    if (mCache.size() > kMaxD3D11on12ResourceCacheSize) {
+        mCache.clear();
+    }
+
+    Ref<D3D11on12ResourceCacheEntry> entry =
+        AcquireRef(new D3D11on12ResourceCacheEntry(dxgiKeyedMutex, std::move(d3d11on12Device)));
+    mCache.insert(entry);
+
+    return entry;
+}
+
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/D3D11on12Util.h b/src/dawn/native/d3d12/D3D11on12Util.h
index 77b2437..9e28c8b 100644
--- a/src/dawn/native/d3d12/D3D11on12Util.h
+++ b/src/dawn/native/d3d12/D3D11on12Util.h
@@ -28,65 +28,64 @@
 
 namespace dawn::native::d3d12 {
 
-    // Wraps 11 wrapped resources in a cache.
-    class D3D11on12ResourceCacheEntry : public RefCounted {
-      public:
-        explicit D3D11on12ResourceCacheEntry(ComPtr<ID3D11On12Device> d3d11on12Device);
-        D3D11on12ResourceCacheEntry(ComPtr<IDXGIKeyedMutex> d3d11on12Resource,
-                                    ComPtr<ID3D11On12Device> d3d11on12Device);
-        ~D3D11on12ResourceCacheEntry();
+// Wraps 11 wrapped resources in a cache.
+class D3D11on12ResourceCacheEntry : public RefCounted {
+  public:
+    explicit D3D11on12ResourceCacheEntry(ComPtr<ID3D11On12Device> d3d11on12Device);
+    D3D11on12ResourceCacheEntry(ComPtr<IDXGIKeyedMutex> d3d11on12Resource,
+                                ComPtr<ID3D11On12Device> d3d11on12Device);
+    ~D3D11on12ResourceCacheEntry();
 
-        MaybeError AcquireKeyedMutex();
-        void ReleaseKeyedMutex();
+    MaybeError AcquireKeyedMutex();
+    void ReleaseKeyedMutex();
 
-        // Functors necessary for the
-        // unordered_set<D3D11on12ResourceCacheEntry&>-based cache.
-        struct HashFunc {
-            size_t operator()(const Ref<D3D11on12ResourceCacheEntry> a) const;
-        };
-
-        struct EqualityFunc {
-            bool operator()(const Ref<D3D11on12ResourceCacheEntry> a,
-                            const Ref<D3D11on12ResourceCacheEntry> b) const;
-        };
-
-      private:
-        ComPtr<IDXGIKeyedMutex> mDXGIKeyedMutex;
-        ComPtr<ID3D11On12Device> mD3D11on12Device;
-        int64_t mAcquireCount = 0;
+    // Functors necessary for the
+    // unordered_set<D3D11on12ResourceCacheEntry&>-based cache.
+    struct HashFunc {
+        size_t operator()(const Ref<D3D11on12ResourceCacheEntry> a) const;
     };
 
-    // |D3D11on12ResourceCache| maintains a cache of 11 wrapped resources.
-    // Each entry represents a 11 resource that is exclusively accessed by Dawn device.
-    // Since each Dawn device creates and stores a 11on12 device, the 11on12 device
-    // is used as the key for the cache entry which ensures only the same 11 wrapped
-    // resource is re-used and also fully released.
-    //
-    // The cache is primarily needed to avoid repeatedly calling CreateWrappedResource
-    // and special release code per ProduceTexture(device).
-    class D3D11on12ResourceCache {
-      public:
-        D3D11on12ResourceCache();
-        ~D3D11on12ResourceCache();
-
-        Ref<D3D11on12ResourceCacheEntry> GetOrCreateD3D11on12Resource(
-            WGPUDevice device,
-            ID3D12Resource* d3d12Resource);
-
-      private:
-        // TODO(dawn:625): Figure out a large enough cache size.
-        static constexpr uint64_t kMaxD3D11on12ResourceCacheSize = 5;
-
-        // 11on12 resource cache entries are refcounted to ensure if the ExternalImage outlives the
-        // Dawn texture (or vice-versa), we always fully release the 11 wrapped resource without
-        // waiting until Dawn device to shutdown.
-        using Cache = std::unordered_set<Ref<D3D11on12ResourceCacheEntry>,
-                                         D3D11on12ResourceCacheEntry::HashFunc,
-                                         D3D11on12ResourceCacheEntry::EqualityFunc>;
-
-        Cache mCache;
+    struct EqualityFunc {
+        bool operator()(const Ref<D3D11on12ResourceCacheEntry> a,
+                        const Ref<D3D11on12ResourceCacheEntry> b) const;
     };
 
+  private:
+    ComPtr<IDXGIKeyedMutex> mDXGIKeyedMutex;
+    ComPtr<ID3D11On12Device> mD3D11on12Device;
+    int64_t mAcquireCount = 0;
+};
+
+// |D3D11on12ResourceCache| maintains a cache of 11 wrapped resources.
+// Each entry represents a 11 resource that is exclusively accessed by Dawn device.
+// Since each Dawn device creates and stores a 11on12 device, the 11on12 device
+// is used as the key for the cache entry which ensures only the same 11 wrapped
+// resource is re-used and also fully released.
+//
+// The cache is primarily needed to avoid repeatedly calling CreateWrappedResource
+// and special release code per ProduceTexture(device).
+class D3D11on12ResourceCache {
+  public:
+    D3D11on12ResourceCache();
+    ~D3D11on12ResourceCache();
+
+    Ref<D3D11on12ResourceCacheEntry> GetOrCreateD3D11on12Resource(WGPUDevice device,
+                                                                  ID3D12Resource* d3d12Resource);
+
+  private:
+    // TODO(dawn:625): Figure out a large enough cache size.
+    static constexpr uint64_t kMaxD3D11on12ResourceCacheSize = 5;
+
+    // 11on12 resource cache entries are refcounted to ensure if the ExternalImage outlives the
+    // Dawn texture (or vice-versa), we always fully release the 11 wrapped resource without
+    // waiting until Dawn device to shutdown.
+    using Cache = std::unordered_set<Ref<D3D11on12ResourceCacheEntry>,
+                                     D3D11on12ResourceCacheEntry::HashFunc,
+                                     D3D11on12ResourceCacheEntry::EqualityFunc>;
+
+    Cache mCache;
+};
+
 }  // namespace dawn::native::d3d12
 
 #endif  // SRC_DAWN_NATIVE_D3D12_D3D11ON12UTIL_H_
diff --git a/src/dawn/native/d3d12/D3D12Backend.cpp b/src/dawn/native/d3d12/D3D12Backend.cpp
index a08918b..9747b41 100644
--- a/src/dawn/native/d3d12/D3D12Backend.cpp
+++ b/src/dawn/native/d3d12/D3D12Backend.cpp
@@ -31,152 +31,146 @@
 
 namespace dawn::native::d3d12 {
 
-    ComPtr<ID3D12Device> GetD3D12Device(WGPUDevice device) {
-        return ToBackend(FromAPI(device))->GetD3D12Device();
+ComPtr<ID3D12Device> GetD3D12Device(WGPUDevice device) {
+    return ToBackend(FromAPI(device))->GetD3D12Device();
+}
+
+DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device, HWND window) {
+    Device* backendDevice = ToBackend(FromAPI(device));
+
+    DawnSwapChainImplementation impl;
+    impl = CreateSwapChainImplementation(new NativeSwapChainImpl(backendDevice, window));
+    impl.textureUsage = WGPUTextureUsage_Present;
+
+    return impl;
+}
+
+WGPUTextureFormat GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain) {
+    NativeSwapChainImpl* impl = reinterpret_cast<NativeSwapChainImpl*>(swapChain->userData);
+    return static_cast<WGPUTextureFormat>(impl->GetPreferredFormat());
+}
+
+ExternalImageDescriptorDXGISharedHandle::ExternalImageDescriptorDXGISharedHandle()
+    : ExternalImageDescriptor(ExternalImageType::DXGISharedHandle) {}
+
+ExternalImageDXGI::ExternalImageDXGI(ComPtr<ID3D12Resource> d3d12Resource,
+                                     const WGPUTextureDescriptor* descriptor)
+    : mD3D12Resource(std::move(d3d12Resource)),
+      mUsage(descriptor->usage),
+      mDimension(descriptor->dimension),
+      mSize(descriptor->size),
+      mFormat(descriptor->format),
+      mMipLevelCount(descriptor->mipLevelCount),
+      mSampleCount(descriptor->sampleCount) {
+    ASSERT(!descriptor->nextInChain ||
+           descriptor->nextInChain->sType == WGPUSType_DawnTextureInternalUsageDescriptor);
+    if (descriptor->nextInChain) {
+        mUsageInternal =
+            reinterpret_cast<const WGPUDawnTextureInternalUsageDescriptor*>(descriptor->nextInChain)
+                ->internalUsage;
+    }
+    mD3D11on12ResourceCache = std::make_unique<D3D11on12ResourceCache>();
+}
+
+ExternalImageDXGI::~ExternalImageDXGI() = default;
+
+WGPUTexture ExternalImageDXGI::ProduceTexture(
+    WGPUDevice device,
+    const ExternalImageAccessDescriptorDXGIKeyedMutex* descriptor) {
+    Device* backendDevice = ToBackend(FromAPI(device));
+
+    // Ensure the texture usage is allowed
+    if (!IsSubset(descriptor->usage, mUsage)) {
+        dawn::ErrorLog() << "Texture usage is not valid for external image";
+        return nullptr;
     }
 
-    DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device, HWND window) {
-        Device* backendDevice = ToBackend(FromAPI(device));
+    TextureDescriptor textureDescriptor = {};
+    textureDescriptor.usage = static_cast<wgpu::TextureUsage>(descriptor->usage);
+    textureDescriptor.dimension = static_cast<wgpu::TextureDimension>(mDimension);
+    textureDescriptor.size = {mSize.width, mSize.height, mSize.depthOrArrayLayers};
+    textureDescriptor.format = static_cast<wgpu::TextureFormat>(mFormat);
+    textureDescriptor.mipLevelCount = mMipLevelCount;
+    textureDescriptor.sampleCount = mSampleCount;
 
-        DawnSwapChainImplementation impl;
-        impl = CreateSwapChainImplementation(new NativeSwapChainImpl(backendDevice, window));
-        impl.textureUsage = WGPUTextureUsage_Present;
-
-        return impl;
+    DawnTextureInternalUsageDescriptor internalDesc = {};
+    if (mUsageInternal) {
+        textureDescriptor.nextInChain = &internalDesc;
+        internalDesc.internalUsage = static_cast<wgpu::TextureUsage>(mUsageInternal);
+        internalDesc.sType = wgpu::SType::DawnTextureInternalUsageDescriptor;
     }
 
-    WGPUTextureFormat GetNativeSwapChainPreferredFormat(
-        const DawnSwapChainImplementation* swapChain) {
-        NativeSwapChainImpl* impl = reinterpret_cast<NativeSwapChainImpl*>(swapChain->userData);
-        return static_cast<WGPUTextureFormat>(impl->GetPreferredFormat());
+    Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource =
+        mD3D11on12ResourceCache->GetOrCreateD3D11on12Resource(device, mD3D12Resource.Get());
+    if (d3d11on12Resource == nullptr) {
+        dawn::ErrorLog() << "Unable to create 11on12 resource for external image";
+        return nullptr;
     }
 
-    ExternalImageDescriptorDXGISharedHandle::ExternalImageDescriptorDXGISharedHandle()
-        : ExternalImageDescriptor(ExternalImageType::DXGISharedHandle) {
+    Ref<TextureBase> texture = backendDevice->CreateD3D12ExternalTexture(
+        &textureDescriptor, mD3D12Resource, std::move(d3d11on12Resource),
+        descriptor->isSwapChainTexture, descriptor->isInitialized);
+
+    return ToAPI(texture.Detach());
+}
+
+// static
+std::unique_ptr<ExternalImageDXGI> ExternalImageDXGI::Create(
+    WGPUDevice device,
+    const ExternalImageDescriptorDXGISharedHandle* descriptor) {
+    Device* backendDevice = ToBackend(FromAPI(device));
+
+    Microsoft::WRL::ComPtr<ID3D12Resource> d3d12Resource;
+    if (FAILED(backendDevice->GetD3D12Device()->OpenSharedHandle(descriptor->sharedHandle,
+                                                                 IID_PPV_ARGS(&d3d12Resource)))) {
+        return nullptr;
     }
 
-    ExternalImageDXGI::ExternalImageDXGI(ComPtr<ID3D12Resource> d3d12Resource,
-                                         const WGPUTextureDescriptor* descriptor)
-        : mD3D12Resource(std::move(d3d12Resource)),
-          mUsage(descriptor->usage),
-          mDimension(descriptor->dimension),
-          mSize(descriptor->size),
-          mFormat(descriptor->format),
-          mMipLevelCount(descriptor->mipLevelCount),
-          mSampleCount(descriptor->sampleCount) {
-        ASSERT(!descriptor->nextInChain ||
-               descriptor->nextInChain->sType == WGPUSType_DawnTextureInternalUsageDescriptor);
-        if (descriptor->nextInChain) {
-            mUsageInternal = reinterpret_cast<const WGPUDawnTextureInternalUsageDescriptor*>(
-                                 descriptor->nextInChain)
-                                 ->internalUsage;
-        }
-        mD3D11on12ResourceCache = std::make_unique<D3D11on12ResourceCache>();
+    const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
+
+    if (backendDevice->ConsumedError(ValidateTextureDescriptor(backendDevice, textureDescriptor))) {
+        return nullptr;
     }
 
-    ExternalImageDXGI::~ExternalImageDXGI() = default;
+    if (backendDevice->ConsumedError(
+            ValidateTextureDescriptorCanBeWrapped(textureDescriptor),
+            "validating that a D3D12 external image can be wrapped with %s", textureDescriptor)) {
+        return nullptr;
+    }
 
-    WGPUTexture ExternalImageDXGI::ProduceTexture(
-        WGPUDevice device,
-        const ExternalImageAccessDescriptorDXGIKeyedMutex* descriptor) {
-        Device* backendDevice = ToBackend(FromAPI(device));
+    if (backendDevice->ConsumedError(
+            ValidateD3D12TextureCanBeWrapped(d3d12Resource.Get(), textureDescriptor))) {
+        return nullptr;
+    }
 
-        // Ensure the texture usage is allowed
-        if (!IsSubset(descriptor->usage, mUsage)) {
-            dawn::ErrorLog() << "Texture usage is not valid for external image";
+    // Shared handle is assumed to support resource sharing capability. The resource
+    // shared capability tier must agree to share resources between D3D devices.
+    const Format* format =
+        backendDevice->GetInternalFormat(textureDescriptor->format).AcquireSuccess();
+    if (format->IsMultiPlanar()) {
+        if (backendDevice->ConsumedError(ValidateD3D12VideoTextureCanBeShared(
+                backendDevice, D3D12TextureFormat(textureDescriptor->format)))) {
             return nullptr;
         }
-
-        TextureDescriptor textureDescriptor = {};
-        textureDescriptor.usage = static_cast<wgpu::TextureUsage>(descriptor->usage);
-        textureDescriptor.dimension = static_cast<wgpu::TextureDimension>(mDimension);
-        textureDescriptor.size = {mSize.width, mSize.height, mSize.depthOrArrayLayers};
-        textureDescriptor.format = static_cast<wgpu::TextureFormat>(mFormat);
-        textureDescriptor.mipLevelCount = mMipLevelCount;
-        textureDescriptor.sampleCount = mSampleCount;
-
-        DawnTextureInternalUsageDescriptor internalDesc = {};
-        if (mUsageInternal) {
-            textureDescriptor.nextInChain = &internalDesc;
-            internalDesc.internalUsage = static_cast<wgpu::TextureUsage>(mUsageInternal);
-            internalDesc.sType = wgpu::SType::DawnTextureInternalUsageDescriptor;
-        }
-
-        Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource =
-            mD3D11on12ResourceCache->GetOrCreateD3D11on12Resource(device, mD3D12Resource.Get());
-        if (d3d11on12Resource == nullptr) {
-            dawn::ErrorLog() << "Unable to create 11on12 resource for external image";
-            return nullptr;
-        }
-
-        Ref<TextureBase> texture = backendDevice->CreateD3D12ExternalTexture(
-            &textureDescriptor, mD3D12Resource, std::move(d3d11on12Resource),
-            descriptor->isSwapChainTexture, descriptor->isInitialized);
-
-        return ToAPI(texture.Detach());
     }
 
-    // static
-    std::unique_ptr<ExternalImageDXGI> ExternalImageDXGI::Create(
-        WGPUDevice device,
-        const ExternalImageDescriptorDXGISharedHandle* descriptor) {
-        Device* backendDevice = ToBackend(FromAPI(device));
+    std::unique_ptr<ExternalImageDXGI> result(
+        new ExternalImageDXGI(std::move(d3d12Resource), descriptor->cTextureDescriptor));
+    return result;
+}
 
-        Microsoft::WRL::ComPtr<ID3D12Resource> d3d12Resource;
-        if (FAILED(backendDevice->GetD3D12Device()->OpenSharedHandle(
-                descriptor->sharedHandle, IID_PPV_ARGS(&d3d12Resource)))) {
-            return nullptr;
-        }
+uint64_t SetExternalMemoryReservation(WGPUDevice device,
+                                      uint64_t requestedReservationSize,
+                                      MemorySegment memorySegment) {
+    Device* backendDevice = ToBackend(FromAPI(device));
 
-        const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
+    return backendDevice->GetResidencyManager()->SetExternalMemoryReservation(
+        memorySegment, requestedReservationSize);
+}
 
-        if (backendDevice->ConsumedError(
-                ValidateTextureDescriptor(backendDevice, textureDescriptor))) {
-            return nullptr;
-        }
+AdapterDiscoveryOptions::AdapterDiscoveryOptions()
+    : AdapterDiscoveryOptionsBase(WGPUBackendType_D3D12), dxgiAdapter(nullptr) {}
 
-        if (backendDevice->ConsumedError(
-                ValidateTextureDescriptorCanBeWrapped(textureDescriptor),
-                "validating that a D3D12 external image can be wrapped with %s",
-                textureDescriptor)) {
-            return nullptr;
-        }
-
-        if (backendDevice->ConsumedError(
-                ValidateD3D12TextureCanBeWrapped(d3d12Resource.Get(), textureDescriptor))) {
-            return nullptr;
-        }
-
-        // Shared handle is assumed to support resource sharing capability. The resource
-        // shared capability tier must agree to share resources between D3D devices.
-        const Format* format =
-            backendDevice->GetInternalFormat(textureDescriptor->format).AcquireSuccess();
-        if (format->IsMultiPlanar()) {
-            if (backendDevice->ConsumedError(ValidateD3D12VideoTextureCanBeShared(
-                    backendDevice, D3D12TextureFormat(textureDescriptor->format)))) {
-                return nullptr;
-            }
-        }
-
-        std::unique_ptr<ExternalImageDXGI> result(
-            new ExternalImageDXGI(std::move(d3d12Resource), descriptor->cTextureDescriptor));
-        return result;
-    }
-
-    uint64_t SetExternalMemoryReservation(WGPUDevice device,
-                                          uint64_t requestedReservationSize,
-                                          MemorySegment memorySegment) {
-        Device* backendDevice = ToBackend(FromAPI(device));
-
-        return backendDevice->GetResidencyManager()->SetExternalMemoryReservation(
-            memorySegment, requestedReservationSize);
-    }
-
-    AdapterDiscoveryOptions::AdapterDiscoveryOptions()
-        : AdapterDiscoveryOptionsBase(WGPUBackendType_D3D12), dxgiAdapter(nullptr) {
-    }
-
-    AdapterDiscoveryOptions::AdapterDiscoveryOptions(ComPtr<IDXGIAdapter> adapter)
-        : AdapterDiscoveryOptionsBase(WGPUBackendType_D3D12), dxgiAdapter(std::move(adapter)) {
-    }
+AdapterDiscoveryOptions::AdapterDiscoveryOptions(ComPtr<IDXGIAdapter> adapter)
+    : AdapterDiscoveryOptionsBase(WGPUBackendType_D3D12), dxgiAdapter(std::move(adapter)) {}
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/D3D12Error.cpp b/src/dawn/native/d3d12/D3D12Error.cpp
index b26aa63..0fda82b 100644
--- a/src/dawn/native/d3d12/D3D12Error.cpp
+++ b/src/dawn/native/d3d12/D3D12Error.cpp
@@ -19,68 +19,68 @@
 #include <string>
 
 namespace dawn::native::d3d12 {
-    const char* HRESULTAsString(HRESULT result) {
-        // There's a lot of possible HRESULTS, but these ones are the ones specifically listed as
-        // being returned from D3D12, in addition to fake codes used internally for testing.
-        // https://docs.microsoft.com/en-us/windows/win32/direct3d12/d3d12-graphics-reference-returnvalues
-        switch (result) {
-            case S_OK:
-                return "S_OK";
-            case S_FALSE:
-                return "S_FALSE";
+const char* HRESULTAsString(HRESULT result) {
+    // There's a lot of possible HRESULTS, but these ones are the ones specifically listed as
+    // being returned from D3D12, in addition to fake codes used internally for testing.
+    // https://docs.microsoft.com/en-us/windows/win32/direct3d12/d3d12-graphics-reference-returnvalues
+    switch (result) {
+        case S_OK:
+            return "S_OK";
+        case S_FALSE:
+            return "S_FALSE";
 
-            case E_FAIL:
-                return "E_FAIL";
-            case E_INVALIDARG:
-                return "E_INVALIDARG";
-            case E_OUTOFMEMORY:
-                return "E_OUTOFMEMORY";
-            case E_NOTIMPL:
-                return "E_NOTIMPL";
+        case E_FAIL:
+            return "E_FAIL";
+        case E_INVALIDARG:
+            return "E_INVALIDARG";
+        case E_OUTOFMEMORY:
+            return "E_OUTOFMEMORY";
+        case E_NOTIMPL:
+            return "E_NOTIMPL";
 
-            case DXGI_ERROR_INVALID_CALL:
-                return "DXGI_ERROR_INVALID_CALL";
-            case DXGI_ERROR_WAS_STILL_DRAWING:
-                return "DXGI_ERROR_WAS_STILL_DRAWING";
+        case DXGI_ERROR_INVALID_CALL:
+            return "DXGI_ERROR_INVALID_CALL";
+        case DXGI_ERROR_WAS_STILL_DRAWING:
+            return "DXGI_ERROR_WAS_STILL_DRAWING";
 
-            case D3D12_ERROR_ADAPTER_NOT_FOUND:
-                return "D3D12_ERROR_ADAPTER_NOT_FOUND";
-            case D3D12_ERROR_DRIVER_VERSION_MISMATCH:
-                return "D3D12_ERROR_DRIVER_VERSION_MISMATCH";
+        case D3D12_ERROR_ADAPTER_NOT_FOUND:
+            return "D3D12_ERROR_ADAPTER_NOT_FOUND";
+        case D3D12_ERROR_DRIVER_VERSION_MISMATCH:
+            return "D3D12_ERROR_DRIVER_VERSION_MISMATCH";
 
-            case E_FAKE_ERROR_FOR_TESTING:
-                return "E_FAKE_ERROR_FOR_TESTING";
-            case E_FAKE_OUTOFMEMORY_ERROR_FOR_TESTING:
-                return "E_FAKE_OUTOFMEMORY_ERROR_FOR_TESTING";
+        case E_FAKE_ERROR_FOR_TESTING:
+            return "E_FAKE_ERROR_FOR_TESTING";
+        case E_FAKE_OUTOFMEMORY_ERROR_FOR_TESTING:
+            return "E_FAKE_OUTOFMEMORY_ERROR_FOR_TESTING";
 
-            default:
-                return "<Unknown HRESULT>";
-        }
+        default:
+            return "<Unknown HRESULT>";
+    }
+}
+
+MaybeError CheckHRESULTImpl(HRESULT result, const char* context) {
+    if (DAWN_LIKELY(SUCCEEDED(result))) {
+        return {};
     }
 
-    MaybeError CheckHRESULTImpl(HRESULT result, const char* context) {
-        if (DAWN_LIKELY(SUCCEEDED(result))) {
-            return {};
-        }
+    std::ostringstream messageStream;
+    messageStream << context << " failed with " << HRESULTAsString(result) << " (0x"
+                  << std::uppercase << std::setfill('0') << std::setw(8) << std::hex << result
+                  << ")";
 
-        std::ostringstream messageStream;
-        messageStream << context << " failed with " << HRESULTAsString(result) << " (0x"
-                      << std::uppercase << std::setfill('0') << std::setw(8) << std::hex << result
-                      << ")";
+    if (result == DXGI_ERROR_DEVICE_REMOVED) {
+        return DAWN_DEVICE_LOST_ERROR(messageStream.str());
+    } else {
+        return DAWN_INTERNAL_ERROR(messageStream.str());
+    }
+}
 
-        if (result == DXGI_ERROR_DEVICE_REMOVED) {
-            return DAWN_DEVICE_LOST_ERROR(messageStream.str());
-        } else {
-            return DAWN_INTERNAL_ERROR(messageStream.str());
-        }
+MaybeError CheckOutOfMemoryHRESULTImpl(HRESULT result, const char* context) {
+    if (result == E_OUTOFMEMORY || result == E_FAKE_OUTOFMEMORY_ERROR_FOR_TESTING) {
+        return DAWN_OUT_OF_MEMORY_ERROR(context);
     }
 
-    MaybeError CheckOutOfMemoryHRESULTImpl(HRESULT result, const char* context) {
-        if (result == E_OUTOFMEMORY || result == E_FAKE_OUTOFMEMORY_ERROR_FOR_TESTING) {
-            return DAWN_OUT_OF_MEMORY_ERROR(context);
-        }
-
-        return CheckHRESULTImpl(result, context);
-    }
+    return CheckHRESULTImpl(result, context);
+}
 
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/D3D12Error.h b/src/dawn/native/d3d12/D3D12Error.h
index fda0ebe..b058c7c 100644
--- a/src/dawn/native/d3d12/D3D12Error.h
+++ b/src/dawn/native/d3d12/D3D12Error.h
@@ -21,15 +21,15 @@
 
 namespace dawn::native::d3d12 {
 
-    constexpr HRESULT E_FAKE_ERROR_FOR_TESTING = MAKE_HRESULT(SEVERITY_ERROR, FACILITY_ITF, 0xFF);
-    constexpr HRESULT E_FAKE_OUTOFMEMORY_ERROR_FOR_TESTING =
-        MAKE_HRESULT(SEVERITY_ERROR, FACILITY_ITF, 0xFE);
+constexpr HRESULT E_FAKE_ERROR_FOR_TESTING = MAKE_HRESULT(SEVERITY_ERROR, FACILITY_ITF, 0xFF);
+constexpr HRESULT E_FAKE_OUTOFMEMORY_ERROR_FOR_TESTING =
+    MAKE_HRESULT(SEVERITY_ERROR, FACILITY_ITF, 0xFE);
 
-    // Returns a success only if result of HResult is success
-    MaybeError CheckHRESULTImpl(HRESULT result, const char* context);
+// Returns a success only if result of HResult is success
+MaybeError CheckHRESULTImpl(HRESULT result, const char* context);
 
-    // Uses CheckRESULT but returns OOM specific error when recoverable.
-    MaybeError CheckOutOfMemoryHRESULTImpl(HRESULT result, const char* context);
+// Uses CheckRESULT but returns OOM specific error when recoverable.
+MaybeError CheckOutOfMemoryHRESULTImpl(HRESULT result, const char* context);
 
 #define CheckHRESULT(resultIn, contextIn)    \
     ::dawn::native::d3d12::CheckHRESULTImpl( \
diff --git a/src/dawn/native/d3d12/D3D12Info.cpp b/src/dawn/native/d3d12/D3D12Info.cpp
index 110074c..b8fc896 100644
--- a/src/dawn/native/d3d12/D3D12Info.cpp
+++ b/src/dawn/native/d3d12/D3D12Info.cpp
@@ -24,101 +24,101 @@
 
 namespace dawn::native::d3d12 {
 
-    ResultOrError<D3D12DeviceInfo> GatherDeviceInfo(const Adapter& adapter) {
-        D3D12DeviceInfo info = {};
+ResultOrError<D3D12DeviceInfo> GatherDeviceInfo(const Adapter& adapter) {
+    D3D12DeviceInfo info = {};
 
-        // Newer builds replace D3D_FEATURE_DATA_ARCHITECTURE with
-        // D3D_FEATURE_DATA_ARCHITECTURE1. However, D3D_FEATURE_DATA_ARCHITECTURE can be used
-        // for backwards compat.
-        // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ne-d3d12-d3d12_feature
-        D3D12_FEATURE_DATA_ARCHITECTURE arch = {};
-        DAWN_TRY(CheckHRESULT(adapter.GetDevice()->CheckFeatureSupport(D3D12_FEATURE_ARCHITECTURE,
-                                                                       &arch, sizeof(arch)),
-                              "ID3D12Device::CheckFeatureSupport"));
+    // Newer builds replace D3D_FEATURE_DATA_ARCHITECTURE with
+    // D3D_FEATURE_DATA_ARCHITECTURE1. However, D3D_FEATURE_DATA_ARCHITECTURE can be used
+    // for backwards compat.
+    // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ne-d3d12-d3d12_feature
+    D3D12_FEATURE_DATA_ARCHITECTURE arch = {};
+    DAWN_TRY(CheckHRESULT(
+        adapter.GetDevice()->CheckFeatureSupport(D3D12_FEATURE_ARCHITECTURE, &arch, sizeof(arch)),
+        "ID3D12Device::CheckFeatureSupport"));
 
-        info.isUMA = arch.UMA;
+    info.isUMA = arch.UMA;
 
-        D3D12_FEATURE_DATA_D3D12_OPTIONS options = {};
-        DAWN_TRY(CheckHRESULT(adapter.GetDevice()->CheckFeatureSupport(D3D12_FEATURE_D3D12_OPTIONS,
-                                                                       &options, sizeof(options)),
-                              "ID3D12Device::CheckFeatureSupport"));
+    D3D12_FEATURE_DATA_D3D12_OPTIONS options = {};
+    DAWN_TRY(CheckHRESULT(adapter.GetDevice()->CheckFeatureSupport(D3D12_FEATURE_D3D12_OPTIONS,
+                                                                   &options, sizeof(options)),
+                          "ID3D12Device::CheckFeatureSupport"));
 
-        info.resourceHeapTier = options.ResourceHeapTier;
+    info.resourceHeapTier = options.ResourceHeapTier;
 
-        // Windows builds 1809 and above can use the D3D12 render pass API. If we query
-        // CheckFeatureSupport for D3D12_FEATURE_D3D12_OPTIONS5 successfully, then we can use
-        // the render pass API.
-        info.supportsRenderPass = false;
-        D3D12_FEATURE_DATA_D3D12_OPTIONS5 featureOptions5 = {};
-        if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
-                D3D12_FEATURE_D3D12_OPTIONS5, &featureOptions5, sizeof(featureOptions5)))) {
-            // Performance regressions been observed when using a render pass on Intel graphics
-            // with RENDER_PASS_TIER_1 available, so fall back to a software emulated render
-            // pass on these platforms.
-            if (featureOptions5.RenderPassesTier < D3D12_RENDER_PASS_TIER_1 ||
-                !gpu_info::IsIntel(adapter.GetVendorId())) {
-                info.supportsRenderPass = true;
-            }
+    // Windows builds 1809 and above can use the D3D12 render pass API. If we query
+    // CheckFeatureSupport for D3D12_FEATURE_D3D12_OPTIONS5 successfully, then we can use
+    // the render pass API.
+    info.supportsRenderPass = false;
+    D3D12_FEATURE_DATA_D3D12_OPTIONS5 featureOptions5 = {};
+    if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
+            D3D12_FEATURE_D3D12_OPTIONS5, &featureOptions5, sizeof(featureOptions5)))) {
+        // Performance regressions been observed when using a render pass on Intel graphics
+        // with RENDER_PASS_TIER_1 available, so fall back to a software emulated render
+        // pass on these platforms.
+        if (featureOptions5.RenderPassesTier < D3D12_RENDER_PASS_TIER_1 ||
+            !gpu_info::IsIntel(adapter.GetVendorId())) {
+            info.supportsRenderPass = true;
         }
-
-        // Used to share resources cross-API. If we query CheckFeatureSupport for
-        // D3D12_FEATURE_D3D12_OPTIONS4 successfully, then we can use cross-API sharing.
-        info.supportsSharedResourceCapabilityTier1 = false;
-        D3D12_FEATURE_DATA_D3D12_OPTIONS4 featureOptions4 = {};
-        if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
-                D3D12_FEATURE_D3D12_OPTIONS4, &featureOptions4, sizeof(featureOptions4)))) {
-            // Tier 1 support additionally enables the NV12 format. Since only the NV12 format
-            // is used by Dawn, check for Tier 1.
-            if (featureOptions4.SharedResourceCompatibilityTier >=
-                D3D12_SHARED_RESOURCE_COMPATIBILITY_TIER_1) {
-                info.supportsSharedResourceCapabilityTier1 = true;
-            }
-        }
-
-        D3D12_FEATURE_DATA_SHADER_MODEL knownShaderModels[] = {{D3D_SHADER_MODEL_6_2},
-                                                               {D3D_SHADER_MODEL_6_1},
-                                                               {D3D_SHADER_MODEL_6_0},
-                                                               {D3D_SHADER_MODEL_5_1}};
-        uint32_t driverShaderModel = 0;
-        for (D3D12_FEATURE_DATA_SHADER_MODEL shaderModel : knownShaderModels) {
-            if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
-                    D3D12_FEATURE_SHADER_MODEL, &shaderModel, sizeof(shaderModel)))) {
-                driverShaderModel = shaderModel.HighestShaderModel;
-                break;
-            }
-        }
-
-        if (driverShaderModel < D3D_SHADER_MODEL_5_1) {
-            return DAWN_INTERNAL_ERROR("Driver doesn't support Shader Model 5.1 or higher");
-        }
-
-        // D3D_SHADER_MODEL is encoded as 0xMm with M the major version and m the minor version
-        ASSERT(driverShaderModel <= 0xFF);
-        uint32_t shaderModelMajor = (driverShaderModel & 0xF0) >> 4;
-        uint32_t shaderModelMinor = (driverShaderModel & 0xF);
-
-        ASSERT(shaderModelMajor < 10);
-        ASSERT(shaderModelMinor < 10);
-        info.shaderModel = 10 * shaderModelMajor + shaderModelMinor;
-
-        // Profiles are always <stage>s_<minor>_<major> so we build the s_<minor>_major and add
-        // it to each of the stage's suffix.
-        std::wstring profileSuffix = L"s_M_n";
-        profileSuffix[2] = wchar_t('0' + shaderModelMajor);
-        profileSuffix[4] = wchar_t('0' + shaderModelMinor);
-
-        info.shaderProfiles[SingleShaderStage::Vertex] = L"v" + profileSuffix;
-        info.shaderProfiles[SingleShaderStage::Fragment] = L"p" + profileSuffix;
-        info.shaderProfiles[SingleShaderStage::Compute] = L"c" + profileSuffix;
-
-        D3D12_FEATURE_DATA_D3D12_OPTIONS4 featureData4 = {};
-        if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
-                D3D12_FEATURE_D3D12_OPTIONS4, &featureData4, sizeof(featureData4)))) {
-            info.supportsShaderFloat16 = driverShaderModel >= D3D_SHADER_MODEL_6_2 &&
-                                         featureData4.Native16BitShaderOpsSupported;
-        }
-
-        return std::move(info);
     }
 
+    // Used to share resources cross-API. If we query CheckFeatureSupport for
+    // D3D12_FEATURE_D3D12_OPTIONS4 successfully, then we can use cross-API sharing.
+    info.supportsSharedResourceCapabilityTier1 = false;
+    D3D12_FEATURE_DATA_D3D12_OPTIONS4 featureOptions4 = {};
+    if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
+            D3D12_FEATURE_D3D12_OPTIONS4, &featureOptions4, sizeof(featureOptions4)))) {
+        // Tier 1 support additionally enables the NV12 format. Since only the NV12 format
+        // is used by Dawn, check for Tier 1.
+        if (featureOptions4.SharedResourceCompatibilityTier >=
+            D3D12_SHARED_RESOURCE_COMPATIBILITY_TIER_1) {
+            info.supportsSharedResourceCapabilityTier1 = true;
+        }
+    }
+
+    D3D12_FEATURE_DATA_SHADER_MODEL knownShaderModels[] = {{D3D_SHADER_MODEL_6_2},
+                                                           {D3D_SHADER_MODEL_6_1},
+                                                           {D3D_SHADER_MODEL_6_0},
+                                                           {D3D_SHADER_MODEL_5_1}};
+    uint32_t driverShaderModel = 0;
+    for (D3D12_FEATURE_DATA_SHADER_MODEL shaderModel : knownShaderModels) {
+        if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(
+                D3D12_FEATURE_SHADER_MODEL, &shaderModel, sizeof(shaderModel)))) {
+            driverShaderModel = shaderModel.HighestShaderModel;
+            break;
+        }
+    }
+
+    if (driverShaderModel < D3D_SHADER_MODEL_5_1) {
+        return DAWN_INTERNAL_ERROR("Driver doesn't support Shader Model 5.1 or higher");
+    }
+
+    // D3D_SHADER_MODEL is encoded as 0xMm with M the major version and m the minor version
+    ASSERT(driverShaderModel <= 0xFF);
+    uint32_t shaderModelMajor = (driverShaderModel & 0xF0) >> 4;
+    uint32_t shaderModelMinor = (driverShaderModel & 0xF);
+
+    ASSERT(shaderModelMajor < 10);
+    ASSERT(shaderModelMinor < 10);
+    info.shaderModel = 10 * shaderModelMajor + shaderModelMinor;
+
+    // Profiles are always <stage>s_<minor>_<major> so we build the s_<minor>_major and add
+    // it to each of the stage's suffix.
+    std::wstring profileSuffix = L"s_M_n";
+    profileSuffix[2] = wchar_t('0' + shaderModelMajor);
+    profileSuffix[4] = wchar_t('0' + shaderModelMinor);
+
+    info.shaderProfiles[SingleShaderStage::Vertex] = L"v" + profileSuffix;
+    info.shaderProfiles[SingleShaderStage::Fragment] = L"p" + profileSuffix;
+    info.shaderProfiles[SingleShaderStage::Compute] = L"c" + profileSuffix;
+
+    D3D12_FEATURE_DATA_D3D12_OPTIONS4 featureData4 = {};
+    if (SUCCEEDED(adapter.GetDevice()->CheckFeatureSupport(D3D12_FEATURE_D3D12_OPTIONS4,
+                                                           &featureData4, sizeof(featureData4)))) {
+        info.supportsShaderFloat16 =
+            driverShaderModel >= D3D_SHADER_MODEL_6_2 && featureData4.Native16BitShaderOpsSupported;
+    }
+
+    return std::move(info);
+}
+
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/D3D12Info.h b/src/dawn/native/d3d12/D3D12Info.h
index d38c485..c0ffc47 100644
--- a/src/dawn/native/d3d12/D3D12Info.h
+++ b/src/dawn/native/d3d12/D3D12Info.h
@@ -21,21 +21,21 @@
 
 namespace dawn::native::d3d12 {
 
-    class Adapter;
+class Adapter;
 
-    struct D3D12DeviceInfo {
-        bool isUMA;
-        uint32_t resourceHeapTier;
-        bool supportsRenderPass;
-        bool supportsShaderFloat16;
-        // shaderModel indicates the maximum supported shader model, for example, the value 62
-        // indicates that current driver supports the maximum shader model is shader model 6.2.
-        uint32_t shaderModel;
-        PerStage<std::wstring> shaderProfiles;
-        bool supportsSharedResourceCapabilityTier1;
-    };
+struct D3D12DeviceInfo {
+    bool isUMA;
+    uint32_t resourceHeapTier;
+    bool supportsRenderPass;
+    bool supportsShaderFloat16;
+    // shaderModel indicates the maximum supported shader model, for example, the value 62
+    // indicates that current driver supports the maximum shader model is shader model 6.2.
+    uint32_t shaderModel;
+    PerStage<std::wstring> shaderProfiles;
+    bool supportsSharedResourceCapabilityTier1;
+};
 
-    ResultOrError<D3D12DeviceInfo> GatherDeviceInfo(const Adapter& adapter);
+ResultOrError<D3D12DeviceInfo> GatherDeviceInfo(const Adapter& adapter);
 }  // namespace dawn::native::d3d12
 
 #endif  // SRC_DAWN_NATIVE_D3D12_D3D12INFO_H_
diff --git a/src/dawn/native/d3d12/DeviceD3D12.cpp b/src/dawn/native/d3d12/DeviceD3D12.cpp
index f844286..30f7066 100644
--- a/src/dawn/native/d3d12/DeviceD3D12.cpp
+++ b/src/dawn/native/d3d12/DeviceD3D12.cpp
@@ -51,757 +51,749 @@
 
 namespace dawn::native::d3d12 {
 
-    // TODO(dawn:155): Figure out these values.
-    static constexpr uint16_t kShaderVisibleDescriptorHeapSize = 1024;
-    static constexpr uint8_t kAttachmentDescriptorHeapSize = 64;
+// TODO(dawn:155): Figure out these values.
+static constexpr uint16_t kShaderVisibleDescriptorHeapSize = 1024;
+static constexpr uint8_t kAttachmentDescriptorHeapSize = 64;
 
-    // Value may change in the future to better accomodate large clears.
-    static constexpr uint64_t kZeroBufferSize = 1024 * 1024 * 4;  // 4 Mb
+// Value may change in the future to better accomodate large clears.
+static constexpr uint64_t kZeroBufferSize = 1024 * 1024 * 4;  // 4 Mb
 
-    static constexpr uint64_t kMaxDebugMessagesToPrint = 5;
+static constexpr uint64_t kMaxDebugMessagesToPrint = 5;
 
-    // static
-    ResultOrError<Ref<Device>> Device::Create(Adapter* adapter,
-                                              const DeviceDescriptor* descriptor) {
-        Ref<Device> device = AcquireRef(new Device(adapter, descriptor));
-        DAWN_TRY(device->Initialize(descriptor));
-        return device;
+// static
+ResultOrError<Ref<Device>> Device::Create(Adapter* adapter, const DeviceDescriptor* descriptor) {
+    Ref<Device> device = AcquireRef(new Device(adapter, descriptor));
+    DAWN_TRY(device->Initialize(descriptor));
+    return device;
+}
+
+MaybeError Device::Initialize(const DeviceDescriptor* descriptor) {
+    InitTogglesFromDriver();
+
+    mD3d12Device = ToBackend(GetAdapter())->GetDevice();
+
+    ASSERT(mD3d12Device != nullptr);
+
+    // Create device-global objects
+    D3D12_COMMAND_QUEUE_DESC queueDesc = {};
+    queueDesc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE;
+    queueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT;
+    DAWN_TRY(
+        CheckHRESULT(mD3d12Device->CreateCommandQueue(&queueDesc, IID_PPV_ARGS(&mCommandQueue)),
+                     "D3D12 create command queue"));
+
+    if (IsFeatureEnabled(Feature::TimestampQuery) &&
+        !IsToggleEnabled(Toggle::DisableTimestampQueryConversion)) {
+        // Get GPU timestamp counter frequency (in ticks/second). This fails if the specified
+        // command queue doesn't support timestamps. D3D12_COMMAND_LIST_TYPE_DIRECT queues
+        // always support timestamps except where there are bugs in Windows container and vGPU
+        // implementations.
+        uint64_t frequency;
+        DAWN_TRY(CheckHRESULT(mCommandQueue->GetTimestampFrequency(&frequency),
+                              "D3D12 get timestamp frequency"));
+        // Calculate the period in nanoseconds by the frequency.
+        mTimestampPeriod = static_cast<float>(1e9) / frequency;
     }
 
-    MaybeError Device::Initialize(const DeviceDescriptor* descriptor) {
-        InitTogglesFromDriver();
+    // If PIX is not attached, the QueryInterface fails. Hence, no need to check the return
+    // value.
+    mCommandQueue.As(&mD3d12SharingContract);
 
-        mD3d12Device = ToBackend(GetAdapter())->GetDevice();
+    DAWN_TRY(CheckHRESULT(mD3d12Device->CreateFence(uint64_t(GetLastSubmittedCommandSerial()),
+                                                    D3D12_FENCE_FLAG_NONE, IID_PPV_ARGS(&mFence)),
+                          "D3D12 create fence"));
 
-        ASSERT(mD3d12Device != nullptr);
+    mFenceEvent = CreateEvent(nullptr, FALSE, FALSE, nullptr);
+    ASSERT(mFenceEvent != nullptr);
 
-        // Create device-global objects
-        D3D12_COMMAND_QUEUE_DESC queueDesc = {};
-        queueDesc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE;
-        queueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT;
-        DAWN_TRY(
-            CheckHRESULT(mD3d12Device->CreateCommandQueue(&queueDesc, IID_PPV_ARGS(&mCommandQueue)),
-                         "D3D12 create command queue"));
+    // Initialize backend services
+    mCommandAllocatorManager = std::make_unique<CommandAllocatorManager>(this);
 
-        if (IsFeatureEnabled(Feature::TimestampQuery) &&
-            !IsToggleEnabled(Toggle::DisableTimestampQueryConversion)) {
-            // Get GPU timestamp counter frequency (in ticks/second). This fails if the specified
-            // command queue doesn't support timestamps. D3D12_COMMAND_LIST_TYPE_DIRECT queues
-            // always support timestamps except where there are bugs in Windows container and vGPU
-            // implementations.
-            uint64_t frequency;
-            DAWN_TRY(CheckHRESULT(mCommandQueue->GetTimestampFrequency(&frequency),
-                                  "D3D12 get timestamp frequency"));
-            // Calculate the period in nanoseconds by the frequency.
-            mTimestampPeriod = static_cast<float>(1e9) / frequency;
-        }
-
-        // If PIX is not attached, the QueryInterface fails. Hence, no need to check the return
-        // value.
-        mCommandQueue.As(&mD3d12SharingContract);
-
-        DAWN_TRY(
-            CheckHRESULT(mD3d12Device->CreateFence(uint64_t(GetLastSubmittedCommandSerial()),
-                                                   D3D12_FENCE_FLAG_NONE, IID_PPV_ARGS(&mFence)),
-                         "D3D12 create fence"));
-
-        mFenceEvent = CreateEvent(nullptr, FALSE, FALSE, nullptr);
-        ASSERT(mFenceEvent != nullptr);
-
-        // Initialize backend services
-        mCommandAllocatorManager = std::make_unique<CommandAllocatorManager>(this);
-
-        // Zero sized allocator is never requested and does not need to exist.
-        for (uint32_t countIndex = 0; countIndex < kNumViewDescriptorAllocators; countIndex++) {
-            mViewAllocators[countIndex + 1] = std::make_unique<StagingDescriptorAllocator>(
-                this, 1u << countIndex, kShaderVisibleDescriptorHeapSize,
-                D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
-        }
-
-        for (uint32_t countIndex = 0; countIndex < kNumSamplerDescriptorAllocators; countIndex++) {
-            mSamplerAllocators[countIndex + 1] = std::make_unique<StagingDescriptorAllocator>(
-                this, 1u << countIndex, kShaderVisibleDescriptorHeapSize,
-                D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
-        }
-
-        mRenderTargetViewAllocator = std::make_unique<StagingDescriptorAllocator>(
-            this, 1, kAttachmentDescriptorHeapSize, D3D12_DESCRIPTOR_HEAP_TYPE_RTV);
-
-        mDepthStencilViewAllocator = std::make_unique<StagingDescriptorAllocator>(
-            this, 1, kAttachmentDescriptorHeapSize, D3D12_DESCRIPTOR_HEAP_TYPE_DSV);
-
-        mSamplerHeapCache = std::make_unique<SamplerHeapCache>(this);
-
-        mResidencyManager = std::make_unique<ResidencyManager>(this);
-        mResourceAllocatorManager = std::make_unique<ResourceAllocatorManager>(this);
-
-        // ShaderVisibleDescriptorAllocators use the ResidencyManager and must be initialized after.
-        DAWN_TRY_ASSIGN(
-            mSamplerShaderVisibleDescriptorAllocator,
-            ShaderVisibleDescriptorAllocator::Create(this, D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER));
-
-        DAWN_TRY_ASSIGN(
-            mViewShaderVisibleDescriptorAllocator,
-            ShaderVisibleDescriptorAllocator::Create(this, D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV));
-
-        // Initialize indirect commands
-        D3D12_INDIRECT_ARGUMENT_DESC argumentDesc = {};
-        argumentDesc.Type = D3D12_INDIRECT_ARGUMENT_TYPE_DISPATCH;
-
-        D3D12_COMMAND_SIGNATURE_DESC programDesc = {};
-        programDesc.ByteStride = 3 * sizeof(uint32_t);
-        programDesc.NumArgumentDescs = 1;
-        programDesc.pArgumentDescs = &argumentDesc;
-
-        GetD3D12Device()->CreateCommandSignature(&programDesc, NULL,
-                                                 IID_PPV_ARGS(&mDispatchIndirectSignature));
-
-        argumentDesc.Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW;
-        programDesc.ByteStride = 4 * sizeof(uint32_t);
-
-        GetD3D12Device()->CreateCommandSignature(&programDesc, NULL,
-                                                 IID_PPV_ARGS(&mDrawIndirectSignature));
-
-        argumentDesc.Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW_INDEXED;
-        programDesc.ByteStride = 5 * sizeof(uint32_t);
-
-        GetD3D12Device()->CreateCommandSignature(&programDesc, NULL,
-                                                 IID_PPV_ARGS(&mDrawIndexedIndirectSignature));
-
-        DAWN_TRY(DeviceBase::Initialize(Queue::Create(this, &descriptor->defaultQueue)));
-        // Device shouldn't be used until after DeviceBase::Initialize so we must wait until after
-        // device initialization to call NextSerial
-        DAWN_TRY(NextSerial());
-
-        // The environment can only use DXC when it's available. Override the decision if it is not
-        // applicable.
-        DAWN_TRY(ApplyUseDxcToggle());
-
-        DAWN_TRY(CreateZeroBuffer());
-
-        SetLabelImpl();
-
-        return {};
+    // Zero sized allocator is never requested and does not need to exist.
+    for (uint32_t countIndex = 0; countIndex < kNumViewDescriptorAllocators; countIndex++) {
+        mViewAllocators[countIndex + 1] = std::make_unique<StagingDescriptorAllocator>(
+            this, 1u << countIndex, kShaderVisibleDescriptorHeapSize,
+            D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
     }
 
-    Device::~Device() {
-        Destroy();
+    for (uint32_t countIndex = 0; countIndex < kNumSamplerDescriptorAllocators; countIndex++) {
+        mSamplerAllocators[countIndex + 1] = std::make_unique<StagingDescriptorAllocator>(
+            this, 1u << countIndex, kShaderVisibleDescriptorHeapSize,
+            D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
     }
 
-    ID3D12Device* Device::GetD3D12Device() const {
-        return mD3d12Device.Get();
+    mRenderTargetViewAllocator = std::make_unique<StagingDescriptorAllocator>(
+        this, 1, kAttachmentDescriptorHeapSize, D3D12_DESCRIPTOR_HEAP_TYPE_RTV);
+
+    mDepthStencilViewAllocator = std::make_unique<StagingDescriptorAllocator>(
+        this, 1, kAttachmentDescriptorHeapSize, D3D12_DESCRIPTOR_HEAP_TYPE_DSV);
+
+    mSamplerHeapCache = std::make_unique<SamplerHeapCache>(this);
+
+    mResidencyManager = std::make_unique<ResidencyManager>(this);
+    mResourceAllocatorManager = std::make_unique<ResourceAllocatorManager>(this);
+
+    // ShaderVisibleDescriptorAllocators use the ResidencyManager and must be initialized after.
+    DAWN_TRY_ASSIGN(
+        mSamplerShaderVisibleDescriptorAllocator,
+        ShaderVisibleDescriptorAllocator::Create(this, D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER));
+
+    DAWN_TRY_ASSIGN(
+        mViewShaderVisibleDescriptorAllocator,
+        ShaderVisibleDescriptorAllocator::Create(this, D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV));
+
+    // Initialize indirect commands
+    D3D12_INDIRECT_ARGUMENT_DESC argumentDesc = {};
+    argumentDesc.Type = D3D12_INDIRECT_ARGUMENT_TYPE_DISPATCH;
+
+    D3D12_COMMAND_SIGNATURE_DESC programDesc = {};
+    programDesc.ByteStride = 3 * sizeof(uint32_t);
+    programDesc.NumArgumentDescs = 1;
+    programDesc.pArgumentDescs = &argumentDesc;
+
+    GetD3D12Device()->CreateCommandSignature(&programDesc, NULL,
+                                             IID_PPV_ARGS(&mDispatchIndirectSignature));
+
+    argumentDesc.Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW;
+    programDesc.ByteStride = 4 * sizeof(uint32_t);
+
+    GetD3D12Device()->CreateCommandSignature(&programDesc, NULL,
+                                             IID_PPV_ARGS(&mDrawIndirectSignature));
+
+    argumentDesc.Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW_INDEXED;
+    programDesc.ByteStride = 5 * sizeof(uint32_t);
+
+    GetD3D12Device()->CreateCommandSignature(&programDesc, NULL,
+                                             IID_PPV_ARGS(&mDrawIndexedIndirectSignature));
+
+    DAWN_TRY(DeviceBase::Initialize(Queue::Create(this, &descriptor->defaultQueue)));
+    // Device shouldn't be used until after DeviceBase::Initialize so we must wait until after
+    // device initialization to call NextSerial
+    DAWN_TRY(NextSerial());
+
+    // The environment can only use DXC when it's available. Override the decision if it is not
+    // applicable.
+    DAWN_TRY(ApplyUseDxcToggle());
+
+    DAWN_TRY(CreateZeroBuffer());
+
+    SetLabelImpl();
+
+    return {};
+}
+
+Device::~Device() {
+    Destroy();
+}
+
+ID3D12Device* Device::GetD3D12Device() const {
+    return mD3d12Device.Get();
+}
+
+ComPtr<ID3D12CommandQueue> Device::GetCommandQueue() const {
+    return mCommandQueue;
+}
+
+ID3D12SharingContract* Device::GetSharingContract() const {
+    return mD3d12SharingContract.Get();
+}
+
+ComPtr<ID3D12CommandSignature> Device::GetDispatchIndirectSignature() const {
+    return mDispatchIndirectSignature;
+}
+
+ComPtr<ID3D12CommandSignature> Device::GetDrawIndirectSignature() const {
+    return mDrawIndirectSignature;
+}
+
+ComPtr<ID3D12CommandSignature> Device::GetDrawIndexedIndirectSignature() const {
+    return mDrawIndexedIndirectSignature;
+}
+
+ComPtr<IDXGIFactory4> Device::GetFactory() const {
+    return ToBackend(GetAdapter())->GetBackend()->GetFactory();
+}
+
+MaybeError Device::ApplyUseDxcToggle() {
+    if (!ToBackend(GetAdapter())->GetBackend()->GetFunctions()->IsDXCAvailable()) {
+        ForceSetToggle(Toggle::UseDXC, false);
+    } else if (IsFeatureEnabled(Feature::ShaderFloat16)) {
+        // Currently we can only use DXC to compile HLSL shaders using float16.
+        ForceSetToggle(Toggle::UseDXC, true);
     }
 
-    ComPtr<ID3D12CommandQueue> Device::GetCommandQueue() const {
-        return mCommandQueue;
+    if (IsToggleEnabled(Toggle::UseDXC)) {
+        DAWN_TRY(ToBackend(GetAdapter())->GetBackend()->EnsureDxcCompiler());
+        DAWN_TRY(ToBackend(GetAdapter())->GetBackend()->EnsureDxcLibrary());
+        DAWN_TRY(ToBackend(GetAdapter())->GetBackend()->EnsureDxcValidator());
     }
 
-    ID3D12SharingContract* Device::GetSharingContract() const {
-        return mD3d12SharingContract.Get();
+    return {};
+}
+
+ComPtr<IDxcLibrary> Device::GetDxcLibrary() const {
+    return ToBackend(GetAdapter())->GetBackend()->GetDxcLibrary();
+}
+
+ComPtr<IDxcCompiler> Device::GetDxcCompiler() const {
+    return ToBackend(GetAdapter())->GetBackend()->GetDxcCompiler();
+}
+
+ComPtr<IDxcValidator> Device::GetDxcValidator() const {
+    return ToBackend(GetAdapter())->GetBackend()->GetDxcValidator();
+}
+
+const PlatformFunctions* Device::GetFunctions() const {
+    return ToBackend(GetAdapter())->GetBackend()->GetFunctions();
+}
+
+CommandAllocatorManager* Device::GetCommandAllocatorManager() const {
+    return mCommandAllocatorManager.get();
+}
+
+ResidencyManager* Device::GetResidencyManager() const {
+    return mResidencyManager.get();
+}
+
+ResultOrError<CommandRecordingContext*> Device::GetPendingCommandContext() {
+    // Callers of GetPendingCommandList do so to record commands. Only reserve a command
+    // allocator when it is needed so we don't submit empty command lists
+    if (!mPendingCommands.IsOpen()) {
+        DAWN_TRY(mPendingCommands.Open(mD3d12Device.Get(), mCommandAllocatorManager.get()));
+    }
+    return &mPendingCommands;
+}
+
+MaybeError Device::CreateZeroBuffer() {
+    BufferDescriptor zeroBufferDescriptor;
+    zeroBufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    zeroBufferDescriptor.size = kZeroBufferSize;
+    zeroBufferDescriptor.label = "ZeroBuffer_Internal";
+    DAWN_TRY_ASSIGN(mZeroBuffer, Buffer::Create(this, &zeroBufferDescriptor));
+
+    return {};
+}
+
+MaybeError Device::ClearBufferToZero(CommandRecordingContext* commandContext,
+                                     BufferBase* destination,
+                                     uint64_t offset,
+                                     uint64_t size) {
+    // TODO(crbug.com/dawn/852): It would be ideal to clear the buffer in CreateZeroBuffer, but
+    // the allocation of the staging buffer causes various end2end tests that monitor heap usage
+    // to fail if it's done during device creation. Perhaps ClearUnorderedAccessView*() can be
+    // used to avoid that.
+    if (!mZeroBuffer->IsDataInitialized()) {
+        DynamicUploader* uploader = GetDynamicUploader();
+        UploadHandle uploadHandle;
+        DAWN_TRY_ASSIGN(uploadHandle, uploader->Allocate(kZeroBufferSize, GetPendingCommandSerial(),
+                                                         kCopyBufferToBufferOffsetAlignment));
+
+        memset(uploadHandle.mappedBuffer, 0u, kZeroBufferSize);
+
+        CopyFromStagingToBufferImpl(commandContext, uploadHandle.stagingBuffer,
+                                    uploadHandle.startOffset, mZeroBuffer.Get(), 0,
+                                    kZeroBufferSize);
+
+        mZeroBuffer->SetIsDataInitialized();
     }
 
-    ComPtr<ID3D12CommandSignature> Device::GetDispatchIndirectSignature() const {
-        return mDispatchIndirectSignature;
-    }
+    Buffer* dstBuffer = ToBackend(destination);
 
-    ComPtr<ID3D12CommandSignature> Device::GetDrawIndirectSignature() const {
-        return mDrawIndirectSignature;
-    }
+    // Necessary to ensure residency of the zero buffer.
+    mZeroBuffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopySrc);
+    dstBuffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
 
-    ComPtr<ID3D12CommandSignature> Device::GetDrawIndexedIndirectSignature() const {
-        return mDrawIndexedIndirectSignature;
-    }
-
-    ComPtr<IDXGIFactory4> Device::GetFactory() const {
-        return ToBackend(GetAdapter())->GetBackend()->GetFactory();
-    }
-
-    MaybeError Device::ApplyUseDxcToggle() {
-        if (!ToBackend(GetAdapter())->GetBackend()->GetFunctions()->IsDXCAvailable()) {
-            ForceSetToggle(Toggle::UseDXC, false);
-        } else if (IsFeatureEnabled(Feature::ShaderFloat16)) {
-            // Currently we can only use DXC to compile HLSL shaders using float16.
-            ForceSetToggle(Toggle::UseDXC, true);
-        }
-
-        if (IsToggleEnabled(Toggle::UseDXC)) {
-            DAWN_TRY(ToBackend(GetAdapter())->GetBackend()->EnsureDxcCompiler());
-            DAWN_TRY(ToBackend(GetAdapter())->GetBackend()->EnsureDxcLibrary());
-            DAWN_TRY(ToBackend(GetAdapter())->GetBackend()->EnsureDxcValidator());
-        }
-
-        return {};
-    }
-
-    ComPtr<IDxcLibrary> Device::GetDxcLibrary() const {
-        return ToBackend(GetAdapter())->GetBackend()->GetDxcLibrary();
-    }
-
-    ComPtr<IDxcCompiler> Device::GetDxcCompiler() const {
-        return ToBackend(GetAdapter())->GetBackend()->GetDxcCompiler();
-    }
-
-    ComPtr<IDxcValidator> Device::GetDxcValidator() const {
-        return ToBackend(GetAdapter())->GetBackend()->GetDxcValidator();
-    }
-
-    const PlatformFunctions* Device::GetFunctions() const {
-        return ToBackend(GetAdapter())->GetBackend()->GetFunctions();
-    }
-
-    CommandAllocatorManager* Device::GetCommandAllocatorManager() const {
-        return mCommandAllocatorManager.get();
-    }
-
-    ResidencyManager* Device::GetResidencyManager() const {
-        return mResidencyManager.get();
-    }
-
-    ResultOrError<CommandRecordingContext*> Device::GetPendingCommandContext() {
-        // Callers of GetPendingCommandList do so to record commands. Only reserve a command
-        // allocator when it is needed so we don't submit empty command lists
-        if (!mPendingCommands.IsOpen()) {
-            DAWN_TRY(mPendingCommands.Open(mD3d12Device.Get(), mCommandAllocatorManager.get()));
-        }
-        return &mPendingCommands;
-    }
-
-    MaybeError Device::CreateZeroBuffer() {
-        BufferDescriptor zeroBufferDescriptor;
-        zeroBufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
-        zeroBufferDescriptor.size = kZeroBufferSize;
-        zeroBufferDescriptor.label = "ZeroBuffer_Internal";
-        DAWN_TRY_ASSIGN(mZeroBuffer, Buffer::Create(this, &zeroBufferDescriptor));
-
-        return {};
-    }
-
-    MaybeError Device::ClearBufferToZero(CommandRecordingContext* commandContext,
-                                         BufferBase* destination,
-                                         uint64_t offset,
-                                         uint64_t size) {
-        // TODO(crbug.com/dawn/852): It would be ideal to clear the buffer in CreateZeroBuffer, but
-        // the allocation of the staging buffer causes various end2end tests that monitor heap usage
-        // to fail if it's done during device creation. Perhaps ClearUnorderedAccessView*() can be
-        // used to avoid that.
-        if (!mZeroBuffer->IsDataInitialized()) {
-            DynamicUploader* uploader = GetDynamicUploader();
-            UploadHandle uploadHandle;
-            DAWN_TRY_ASSIGN(uploadHandle,
-                            uploader->Allocate(kZeroBufferSize, GetPendingCommandSerial(),
-                                               kCopyBufferToBufferOffsetAlignment));
-
-            memset(uploadHandle.mappedBuffer, 0u, kZeroBufferSize);
-
-            CopyFromStagingToBufferImpl(commandContext, uploadHandle.stagingBuffer,
-                                        uploadHandle.startOffset, mZeroBuffer.Get(), 0,
-                                        kZeroBufferSize);
-
-            mZeroBuffer->SetIsDataInitialized();
-        }
-
-        Buffer* dstBuffer = ToBackend(destination);
-
-        // Necessary to ensure residency of the zero buffer.
-        mZeroBuffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopySrc);
-        dstBuffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
-
-        while (size > 0) {
-            uint64_t copySize = std::min(kZeroBufferSize, size);
-            commandContext->GetCommandList()->CopyBufferRegion(
-                dstBuffer->GetD3D12Resource(), offset, mZeroBuffer->GetD3D12Resource(), 0,
-                copySize);
-
-            offset += copySize;
-            size -= copySize;
-        }
-
-        return {};
-    }
-
-    MaybeError Device::TickImpl() {
-        // Perform cleanup operations to free unused objects
-        ExecutionSerial completedSerial = GetCompletedCommandSerial();
-
-        mResourceAllocatorManager->Tick(completedSerial);
-        DAWN_TRY(mCommandAllocatorManager->Tick(completedSerial));
-        mViewShaderVisibleDescriptorAllocator->Tick(completedSerial);
-        mSamplerShaderVisibleDescriptorAllocator->Tick(completedSerial);
-        mRenderTargetViewAllocator->Tick(completedSerial);
-        mDepthStencilViewAllocator->Tick(completedSerial);
-        mUsedComObjectRefs.ClearUpTo(completedSerial);
-
-        if (mPendingCommands.IsOpen()) {
-            DAWN_TRY(ExecutePendingCommandContext());
-            DAWN_TRY(NextSerial());
-        }
-
-        DAWN_TRY(CheckDebugLayerAndGenerateErrors());
-
-        return {};
-    }
-
-    MaybeError Device::NextSerial() {
-        IncrementLastSubmittedCommandSerial();
-
-        TRACE_EVENT1(GetPlatform(), General, "D3D12Device::SignalFence", "serial",
-                     uint64_t(GetLastSubmittedCommandSerial()));
-
-        return CheckHRESULT(
-            mCommandQueue->Signal(mFence.Get(), uint64_t(GetLastSubmittedCommandSerial())),
-            "D3D12 command queue signal fence");
-    }
-
-    MaybeError Device::WaitForSerial(ExecutionSerial serial) {
-        DAWN_TRY(CheckPassedSerials());
-        if (GetCompletedCommandSerial() < serial) {
-            DAWN_TRY(CheckHRESULT(mFence->SetEventOnCompletion(uint64_t(serial), mFenceEvent),
-                                  "D3D12 set event on completion"));
-            WaitForSingleObject(mFenceEvent, INFINITE);
-            DAWN_TRY(CheckPassedSerials());
-        }
-        return {};
-    }
-
-    ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
-        ExecutionSerial completedSerial = ExecutionSerial(mFence->GetCompletedValue());
-        if (DAWN_UNLIKELY(completedSerial == ExecutionSerial(UINT64_MAX))) {
-            // GetCompletedValue returns UINT64_MAX if the device was removed.
-            // Try to query the failure reason.
-            DAWN_TRY(CheckHRESULT(mD3d12Device->GetDeviceRemovedReason(),
-                                  "ID3D12Device::GetDeviceRemovedReason"));
-            // Otherwise, return a generic device lost error.
-            return DAWN_DEVICE_LOST_ERROR("Device lost");
-        }
-
-        if (completedSerial <= GetCompletedCommandSerial()) {
-            return ExecutionSerial(0);
-        }
-
-        return completedSerial;
-    }
-
-    void Device::ReferenceUntilUnused(ComPtr<IUnknown> object) {
-        mUsedComObjectRefs.Enqueue(object, GetPendingCommandSerial());
-    }
-
-    MaybeError Device::ExecutePendingCommandContext() {
-        return mPendingCommands.ExecuteCommandList(this);
-    }
-
-    ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
-        const BindGroupDescriptor* descriptor) {
-        return BindGroup::Create(this, descriptor);
-    }
-    ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
-        const BindGroupLayoutDescriptor* descriptor,
-        PipelineCompatibilityToken pipelineCompatibilityToken) {
-        return BindGroupLayout::Create(this, descriptor, pipelineCompatibilityToken);
-    }
-    ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
-        return Buffer::Create(this, descriptor);
-    }
-    ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
-        CommandEncoder* encoder,
-        const CommandBufferDescriptor* descriptor) {
-        return CommandBuffer::Create(encoder, descriptor);
-    }
-    Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
-        const ComputePipelineDescriptor* descriptor) {
-        return ComputePipeline::CreateUninitialized(this, descriptor);
-    }
-    ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
-        const PipelineLayoutDescriptor* descriptor) {
-        return PipelineLayout::Create(this, descriptor);
-    }
-    ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
-        const QuerySetDescriptor* descriptor) {
-        return QuerySet::Create(this, descriptor);
-    }
-    Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
-        const RenderPipelineDescriptor* descriptor) {
-        return RenderPipeline::CreateUninitialized(this, descriptor);
-    }
-    ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
-        return Sampler::Create(this, descriptor);
-    }
-    ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
-        const ShaderModuleDescriptor* descriptor,
-        ShaderModuleParseResult* parseResult) {
-        return ShaderModule::Create(this, descriptor, parseResult);
-    }
-    ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
-        const SwapChainDescriptor* descriptor) {
-        return OldSwapChain::Create(this, descriptor);
-    }
-    ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
-        Surface* surface,
-        NewSwapChainBase* previousSwapChain,
-        const SwapChainDescriptor* descriptor) {
-        return SwapChain::Create(this, surface, previousSwapChain, descriptor);
-    }
-    ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
-        return Texture::Create(this, descriptor);
-    }
-    ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
-        TextureBase* texture,
-        const TextureViewDescriptor* descriptor) {
-        return TextureView::Create(texture, descriptor);
-    }
-    void Device::InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
-                                                    WGPUCreateComputePipelineAsyncCallback callback,
-                                                    void* userdata) {
-        ComputePipeline::InitializeAsync(std::move(computePipeline), callback, userdata);
-    }
-    void Device::InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
-                                                   WGPUCreateRenderPipelineAsyncCallback callback,
-                                                   void* userdata) {
-        RenderPipeline::InitializeAsync(std::move(renderPipeline), callback, userdata);
-    }
-
-    ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
-        std::unique_ptr<StagingBufferBase> stagingBuffer =
-            std::make_unique<StagingBuffer>(size, this);
-        DAWN_TRY(stagingBuffer->Initialize());
-        return std::move(stagingBuffer);
-    }
-
-    MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
-                                               uint64_t sourceOffset,
-                                               BufferBase* destination,
-                                               uint64_t destinationOffset,
-                                               uint64_t size) {
-        CommandRecordingContext* commandRecordingContext;
-        DAWN_TRY_ASSIGN(commandRecordingContext, GetPendingCommandContext());
-
-        Buffer* dstBuffer = ToBackend(destination);
-
-        bool cleared;
-        DAWN_TRY_ASSIGN(cleared, dstBuffer->EnsureDataInitializedAsDestination(
-                                     commandRecordingContext, destinationOffset, size));
-        DAWN_UNUSED(cleared);
-
-        CopyFromStagingToBufferImpl(commandRecordingContext, source, sourceOffset, destination,
-                                    destinationOffset, size);
-
-        return {};
-    }
-
-    void Device::CopyFromStagingToBufferImpl(CommandRecordingContext* commandContext,
-                                             StagingBufferBase* source,
-                                             uint64_t sourceOffset,
-                                             BufferBase* destination,
-                                             uint64_t destinationOffset,
-                                             uint64_t size) {
-        ASSERT(commandContext != nullptr);
-        Buffer* dstBuffer = ToBackend(destination);
-        StagingBuffer* srcBuffer = ToBackend(source);
-        dstBuffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
-
+    while (size > 0) {
+        uint64_t copySize = std::min(kZeroBufferSize, size);
         commandContext->GetCommandList()->CopyBufferRegion(
-            dstBuffer->GetD3D12Resource(), destinationOffset, srcBuffer->GetResource(),
-            sourceOffset, size);
+            dstBuffer->GetD3D12Resource(), offset, mZeroBuffer->GetD3D12Resource(), 0, copySize);
+
+        offset += copySize;
+        size -= copySize;
     }
 
-    MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
-                                                const TextureDataLayout& src,
-                                                TextureCopy* dst,
-                                                const Extent3D& copySizePixels) {
-        CommandRecordingContext* commandContext;
-        DAWN_TRY_ASSIGN(commandContext, GetPendingCommandContext());
-        Texture* texture = ToBackend(dst->texture.Get());
+    return {};
+}
 
-        SubresourceRange range = GetSubresourcesAffectedByCopy(*dst, copySizePixels);
+MaybeError Device::TickImpl() {
+    // Perform cleanup operations to free unused objects
+    ExecutionSerial completedSerial = GetCompletedCommandSerial();
 
-        if (IsCompleteSubresourceCopiedTo(texture, copySizePixels, dst->mipLevel)) {
-            texture->SetIsSubresourceContentInitialized(true, range);
-        } else {
-            texture->EnsureSubresourceContentInitialized(commandContext, range);
-        }
+    mResourceAllocatorManager->Tick(completedSerial);
+    DAWN_TRY(mCommandAllocatorManager->Tick(completedSerial));
+    mViewShaderVisibleDescriptorAllocator->Tick(completedSerial);
+    mSamplerShaderVisibleDescriptorAllocator->Tick(completedSerial);
+    mRenderTargetViewAllocator->Tick(completedSerial);
+    mDepthStencilViewAllocator->Tick(completedSerial);
+    mUsedComObjectRefs.ClearUpTo(completedSerial);
 
-        texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopyDst, range);
-
-        RecordBufferTextureCopyWithBufferHandle(
-            BufferTextureCopyDirection::B2T, commandContext->GetCommandList(),
-            ToBackend(source)->GetResource(), src.offset, src.bytesPerRow, src.rowsPerImage, *dst,
-            copySizePixels);
-
-        return {};
+    if (mPendingCommands.IsOpen()) {
+        DAWN_TRY(ExecutePendingCommandContext());
+        DAWN_TRY(NextSerial());
     }
 
-    void Device::DeallocateMemory(ResourceHeapAllocation& allocation) {
-        mResourceAllocatorManager->DeallocateMemory(allocation);
+    DAWN_TRY(CheckDebugLayerAndGenerateErrors());
+
+    return {};
+}
+
+MaybeError Device::NextSerial() {
+    IncrementLastSubmittedCommandSerial();
+
+    TRACE_EVENT1(GetPlatform(), General, "D3D12Device::SignalFence", "serial",
+                 uint64_t(GetLastSubmittedCommandSerial()));
+
+    return CheckHRESULT(
+        mCommandQueue->Signal(mFence.Get(), uint64_t(GetLastSubmittedCommandSerial())),
+        "D3D12 command queue signal fence");
+}
+
+MaybeError Device::WaitForSerial(ExecutionSerial serial) {
+    DAWN_TRY(CheckPassedSerials());
+    if (GetCompletedCommandSerial() < serial) {
+        DAWN_TRY(CheckHRESULT(mFence->SetEventOnCompletion(uint64_t(serial), mFenceEvent),
+                              "D3D12 set event on completion"));
+        WaitForSingleObject(mFenceEvent, INFINITE);
+        DAWN_TRY(CheckPassedSerials());
+    }
+    return {};
+}
+
+ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
+    ExecutionSerial completedSerial = ExecutionSerial(mFence->GetCompletedValue());
+    if (DAWN_UNLIKELY(completedSerial == ExecutionSerial(UINT64_MAX))) {
+        // GetCompletedValue returns UINT64_MAX if the device was removed.
+        // Try to query the failure reason.
+        DAWN_TRY(CheckHRESULT(mD3d12Device->GetDeviceRemovedReason(),
+                              "ID3D12Device::GetDeviceRemovedReason"));
+        // Otherwise, return a generic device lost error.
+        return DAWN_DEVICE_LOST_ERROR("Device lost");
     }
 
-    ResultOrError<ResourceHeapAllocation> Device::AllocateMemory(
-        D3D12_HEAP_TYPE heapType,
-        const D3D12_RESOURCE_DESC& resourceDescriptor,
-        D3D12_RESOURCE_STATES initialUsage) {
-        return mResourceAllocatorManager->AllocateMemory(heapType, resourceDescriptor,
-                                                         initialUsage);
+    if (completedSerial <= GetCompletedCommandSerial()) {
+        return ExecutionSerial(0);
     }
 
-    Ref<TextureBase> Device::CreateD3D12ExternalTexture(
-        const TextureDescriptor* descriptor,
-        ComPtr<ID3D12Resource> d3d12Texture,
-        Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
-        bool isSwapChainTexture,
-        bool isInitialized) {
-        Ref<Texture> dawnTexture;
-        if (ConsumedError(Texture::CreateExternalImage(this, descriptor, std::move(d3d12Texture),
-                                                       std::move(d3d11on12Resource),
-                                                       isSwapChainTexture, isInitialized),
-                          &dawnTexture)) {
+    return completedSerial;
+}
+
+void Device::ReferenceUntilUnused(ComPtr<IUnknown> object) {
+    mUsedComObjectRefs.Enqueue(object, GetPendingCommandSerial());
+}
+
+MaybeError Device::ExecutePendingCommandContext() {
+    return mPendingCommands.ExecuteCommandList(this);
+}
+
+ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
+    const BindGroupDescriptor* descriptor) {
+    return BindGroup::Create(this, descriptor);
+}
+ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
+    const BindGroupLayoutDescriptor* descriptor,
+    PipelineCompatibilityToken pipelineCompatibilityToken) {
+    return BindGroupLayout::Create(this, descriptor, pipelineCompatibilityToken);
+}
+ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
+    return Buffer::Create(this, descriptor);
+}
+ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
+    CommandEncoder* encoder,
+    const CommandBufferDescriptor* descriptor) {
+    return CommandBuffer::Create(encoder, descriptor);
+}
+Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
+    const ComputePipelineDescriptor* descriptor) {
+    return ComputePipeline::CreateUninitialized(this, descriptor);
+}
+ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
+    const PipelineLayoutDescriptor* descriptor) {
+    return PipelineLayout::Create(this, descriptor);
+}
+ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(const QuerySetDescriptor* descriptor) {
+    return QuerySet::Create(this, descriptor);
+}
+Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
+    const RenderPipelineDescriptor* descriptor) {
+    return RenderPipeline::CreateUninitialized(this, descriptor);
+}
+ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
+    return Sampler::Create(this, descriptor);
+}
+ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
+    const ShaderModuleDescriptor* descriptor,
+    ShaderModuleParseResult* parseResult) {
+    return ShaderModule::Create(this, descriptor, parseResult);
+}
+ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
+    const SwapChainDescriptor* descriptor) {
+    return OldSwapChain::Create(this, descriptor);
+}
+ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
+    Surface* surface,
+    NewSwapChainBase* previousSwapChain,
+    const SwapChainDescriptor* descriptor) {
+    return SwapChain::Create(this, surface, previousSwapChain, descriptor);
+}
+ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
+    return Texture::Create(this, descriptor);
+}
+ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
+    TextureBase* texture,
+    const TextureViewDescriptor* descriptor) {
+    return TextureView::Create(texture, descriptor);
+}
+void Device::InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
+                                                WGPUCreateComputePipelineAsyncCallback callback,
+                                                void* userdata) {
+    ComputePipeline::InitializeAsync(std::move(computePipeline), callback, userdata);
+}
+void Device::InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+                                               WGPUCreateRenderPipelineAsyncCallback callback,
+                                               void* userdata) {
+    RenderPipeline::InitializeAsync(std::move(renderPipeline), callback, userdata);
+}
+
+ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
+    std::unique_ptr<StagingBufferBase> stagingBuffer = std::make_unique<StagingBuffer>(size, this);
+    DAWN_TRY(stagingBuffer->Initialize());
+    return std::move(stagingBuffer);
+}
+
+MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
+                                           uint64_t sourceOffset,
+                                           BufferBase* destination,
+                                           uint64_t destinationOffset,
+                                           uint64_t size) {
+    CommandRecordingContext* commandRecordingContext;
+    DAWN_TRY_ASSIGN(commandRecordingContext, GetPendingCommandContext());
+
+    Buffer* dstBuffer = ToBackend(destination);
+
+    bool cleared;
+    DAWN_TRY_ASSIGN(cleared, dstBuffer->EnsureDataInitializedAsDestination(
+                                 commandRecordingContext, destinationOffset, size));
+    DAWN_UNUSED(cleared);
+
+    CopyFromStagingToBufferImpl(commandRecordingContext, source, sourceOffset, destination,
+                                destinationOffset, size);
+
+    return {};
+}
+
+void Device::CopyFromStagingToBufferImpl(CommandRecordingContext* commandContext,
+                                         StagingBufferBase* source,
+                                         uint64_t sourceOffset,
+                                         BufferBase* destination,
+                                         uint64_t destinationOffset,
+                                         uint64_t size) {
+    ASSERT(commandContext != nullptr);
+    Buffer* dstBuffer = ToBackend(destination);
+    StagingBuffer* srcBuffer = ToBackend(source);
+    dstBuffer->TrackUsageAndTransitionNow(commandContext, wgpu::BufferUsage::CopyDst);
+
+    commandContext->GetCommandList()->CopyBufferRegion(dstBuffer->GetD3D12Resource(),
+                                                       destinationOffset, srcBuffer->GetResource(),
+                                                       sourceOffset, size);
+}
+
+MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
+                                            const TextureDataLayout& src,
+                                            TextureCopy* dst,
+                                            const Extent3D& copySizePixels) {
+    CommandRecordingContext* commandContext;
+    DAWN_TRY_ASSIGN(commandContext, GetPendingCommandContext());
+    Texture* texture = ToBackend(dst->texture.Get());
+
+    SubresourceRange range = GetSubresourcesAffectedByCopy(*dst, copySizePixels);
+
+    if (IsCompleteSubresourceCopiedTo(texture, copySizePixels, dst->mipLevel)) {
+        texture->SetIsSubresourceContentInitialized(true, range);
+    } else {
+        texture->EnsureSubresourceContentInitialized(commandContext, range);
+    }
+
+    texture->TrackUsageAndTransitionNow(commandContext, wgpu::TextureUsage::CopyDst, range);
+
+    RecordBufferTextureCopyWithBufferHandle(
+        BufferTextureCopyDirection::B2T, commandContext->GetCommandList(),
+        ToBackend(source)->GetResource(), src.offset, src.bytesPerRow, src.rowsPerImage, *dst,
+        copySizePixels);
+
+    return {};
+}
+
+void Device::DeallocateMemory(ResourceHeapAllocation& allocation) {
+    mResourceAllocatorManager->DeallocateMemory(allocation);
+}
+
+ResultOrError<ResourceHeapAllocation> Device::AllocateMemory(
+    D3D12_HEAP_TYPE heapType,
+    const D3D12_RESOURCE_DESC& resourceDescriptor,
+    D3D12_RESOURCE_STATES initialUsage) {
+    return mResourceAllocatorManager->AllocateMemory(heapType, resourceDescriptor, initialUsage);
+}
+
+Ref<TextureBase> Device::CreateD3D12ExternalTexture(
+    const TextureDescriptor* descriptor,
+    ComPtr<ID3D12Resource> d3d12Texture,
+    Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
+    bool isSwapChainTexture,
+    bool isInitialized) {
+    Ref<Texture> dawnTexture;
+    if (ConsumedError(Texture::CreateExternalImage(this, descriptor, std::move(d3d12Texture),
+                                                   std::move(d3d11on12Resource), isSwapChainTexture,
+                                                   isInitialized),
+                      &dawnTexture)) {
+        return nullptr;
+    }
+    return {dawnTexture};
+}
+
+ComPtr<ID3D11On12Device> Device::GetOrCreateD3D11on12Device() {
+    if (mD3d11On12Device == nullptr) {
+        ComPtr<ID3D11Device> d3d11Device;
+        D3D_FEATURE_LEVEL d3dFeatureLevel;
+        IUnknown* const iUnknownQueue = mCommandQueue.Get();
+        if (FAILED(GetFunctions()->d3d11on12CreateDevice(mD3d12Device.Get(), 0, nullptr, 0,
+                                                         &iUnknownQueue, 1, 1, &d3d11Device,
+                                                         nullptr, &d3dFeatureLevel))) {
             return nullptr;
         }
-        return {dawnTexture};
+
+        ComPtr<ID3D11On12Device> d3d11on12Device;
+        HRESULT hr = d3d11Device.As(&d3d11on12Device);
+        ASSERT(SUCCEEDED(hr));
+
+        mD3d11On12Device = std::move(d3d11on12Device);
     }
+    return mD3d11On12Device;
+}
 
-    ComPtr<ID3D11On12Device> Device::GetOrCreateD3D11on12Device() {
-        if (mD3d11On12Device == nullptr) {
-            ComPtr<ID3D11Device> d3d11Device;
-            D3D_FEATURE_LEVEL d3dFeatureLevel;
-            IUnknown* const iUnknownQueue = mCommandQueue.Get();
-            if (FAILED(GetFunctions()->d3d11on12CreateDevice(mD3d12Device.Get(), 0, nullptr, 0,
-                                                             &iUnknownQueue, 1, 1, &d3d11Device,
-                                                             nullptr, &d3dFeatureLevel))) {
-                return nullptr;
-            }
+const D3D12DeviceInfo& Device::GetDeviceInfo() const {
+    return ToBackend(GetAdapter())->GetDeviceInfo();
+}
 
-            ComPtr<ID3D11On12Device> d3d11on12Device;
-            HRESULT hr = d3d11Device.As(&d3d11on12Device);
-            ASSERT(SUCCEEDED(hr));
+void Device::InitTogglesFromDriver() {
+    const bool useResourceHeapTier2 = (GetDeviceInfo().resourceHeapTier >= 2);
+    SetToggle(Toggle::UseD3D12ResourceHeapTier2, useResourceHeapTier2);
+    SetToggle(Toggle::UseD3D12RenderPass, GetDeviceInfo().supportsRenderPass);
+    SetToggle(Toggle::UseD3D12ResidencyManagement, true);
+    SetToggle(Toggle::UseDXC, false);
 
-            mD3d11On12Device = std::move(d3d11on12Device);
+    // Disable optimizations when using FXC
+    // See https://crbug.com/dawn/1203
+    SetToggle(Toggle::FxcOptimizations, false);
+
+    // By default use the maximum shader-visible heap size allowed.
+    SetToggle(Toggle::UseD3D12SmallShaderVisibleHeapForTesting, false);
+
+    uint32_t deviceId = GetAdapter()->GetDeviceId();
+    uint32_t vendorId = GetAdapter()->GetVendorId();
+
+    // Currently this workaround is only needed on Intel Gen9 and Gen9.5 GPUs.
+    // See http://crbug.com/1161355 for more information.
+    if (gpu_info::IsIntel(vendorId) &&
+        (gpu_info::IsSkylake(deviceId) || gpu_info::IsKabylake(deviceId) ||
+         gpu_info::IsCoffeelake(deviceId))) {
+        constexpr gpu_info::D3DDriverVersion kFirstDriverVersionWithFix = {30, 0, 100, 9864};
+        if (gpu_info::CompareD3DDriverVersion(vendorId, ToBackend(GetAdapter())->GetDriverVersion(),
+                                              kFirstDriverVersionWithFix) < 0) {
+            SetToggle(
+                Toggle::UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel,
+                true);
         }
-        return mD3d11On12Device;
     }
 
-    const D3D12DeviceInfo& Device::GetDeviceInfo() const {
-        return ToBackend(GetAdapter())->GetDeviceInfo();
-    }
+    // Currently this workaround is needed on any D3D12 backend for some particular situations.
+    // But we may need to limit it if D3D12 runtime fixes the bug on its new release. See
+    // https://crbug.com/dawn/1289 for more information.
+    SetToggle(Toggle::D3D12SplitBufferTextureCopyForRowsPerImagePaddings, true);
+}
 
-    void Device::InitTogglesFromDriver() {
-        const bool useResourceHeapTier2 = (GetDeviceInfo().resourceHeapTier >= 2);
-        SetToggle(Toggle::UseD3D12ResourceHeapTier2, useResourceHeapTier2);
-        SetToggle(Toggle::UseD3D12RenderPass, GetDeviceInfo().supportsRenderPass);
-        SetToggle(Toggle::UseD3D12ResidencyManagement, true);
-        SetToggle(Toggle::UseDXC, false);
+MaybeError Device::WaitForIdleForDestruction() {
+    // Immediately forget about all pending commands
+    mPendingCommands.Release();
 
-        // Disable optimizations when using FXC
-        // See https://crbug.com/dawn/1203
-        SetToggle(Toggle::FxcOptimizations, false);
+    DAWN_TRY(NextSerial());
+    // Wait for all in-flight commands to finish executing
+    DAWN_TRY(WaitForSerial(GetLastSubmittedCommandSerial()));
 
-        // By default use the maximum shader-visible heap size allowed.
-        SetToggle(Toggle::UseD3D12SmallShaderVisibleHeapForTesting, false);
+    return {};
+}
 
-        uint32_t deviceId = GetAdapter()->GetDeviceId();
-        uint32_t vendorId = GetAdapter()->GetVendorId();
+void AppendDebugLayerMessagesToError(ID3D12InfoQueue* infoQueue,
+                                     uint64_t totalErrors,
+                                     ErrorData* error) {
+    ASSERT(totalErrors > 0);
+    ASSERT(error != nullptr);
 
-        // Currently this workaround is only needed on Intel Gen9 and Gen9.5 GPUs.
-        // See http://crbug.com/1161355 for more information.
-        if (gpu_info::IsIntel(vendorId) &&
-            (gpu_info::IsSkylake(deviceId) || gpu_info::IsKabylake(deviceId) ||
-             gpu_info::IsCoffeelake(deviceId))) {
-            constexpr gpu_info::D3DDriverVersion kFirstDriverVersionWithFix = {30, 0, 100, 9864};
-            if (gpu_info::CompareD3DDriverVersion(vendorId,
-                                                  ToBackend(GetAdapter())->GetDriverVersion(),
-                                                  kFirstDriverVersionWithFix) < 0) {
-                SetToggle(
-                    Toggle::UseTempBufferInSmallFormatTextureToTextureCopyFromGreaterToLessMipLevel,
-                    true);
-            }
+    uint64_t errorsToPrint = std::min(kMaxDebugMessagesToPrint, totalErrors);
+    for (uint64_t i = 0; i < errorsToPrint; ++i) {
+        std::ostringstream messageStream;
+        SIZE_T messageLength = 0;
+        HRESULT hr = infoQueue->GetMessage(i, nullptr, &messageLength);
+        if (FAILED(hr)) {
+            messageStream << " ID3D12InfoQueue::GetMessage failed with " << hr;
+            error->AppendBackendMessage(messageStream.str());
+            continue;
         }
 
-        // Currently this workaround is needed on any D3D12 backend for some particular situations.
-        // But we may need to limit it if D3D12 runtime fixes the bug on its new release. See
-        // https://crbug.com/dawn/1289 for more information.
-        SetToggle(Toggle::D3D12SplitBufferTextureCopyForRowsPerImagePaddings, true);
+        std::unique_ptr<uint8_t[]> messageData(new uint8_t[messageLength]);
+        D3D12_MESSAGE* message = reinterpret_cast<D3D12_MESSAGE*>(messageData.get());
+        hr = infoQueue->GetMessage(i, message, &messageLength);
+        if (FAILED(hr)) {
+            messageStream << " ID3D12InfoQueue::GetMessage failed with " << hr;
+            error->AppendBackendMessage(messageStream.str());
+            continue;
+        }
+
+        messageStream << message->pDescription << " (" << message->ID << ")";
+        error->AppendBackendMessage(messageStream.str());
+    }
+    if (errorsToPrint < totalErrors) {
+        std::ostringstream messages;
+        messages << (totalErrors - errorsToPrint) << " messages silenced";
+        error->AppendBackendMessage(messages.str());
     }
 
-    MaybeError Device::WaitForIdleForDestruction() {
-        // Immediately forget about all pending commands
-        mPendingCommands.Release();
+    // We only print up to the first kMaxDebugMessagesToPrint errors
+    infoQueue->ClearStoredMessages();
+}
 
-        DAWN_TRY(NextSerial());
-        // Wait for all in-flight commands to finish executing
-        DAWN_TRY(WaitForSerial(GetLastSubmittedCommandSerial()));
-
+MaybeError Device::CheckDebugLayerAndGenerateErrors() {
+    if (!GetAdapter()->GetInstance()->IsBackendValidationEnabled()) {
         return {};
     }
 
-    void AppendDebugLayerMessagesToError(ID3D12InfoQueue* infoQueue,
-                                         uint64_t totalErrors,
-                                         ErrorData* error) {
-        ASSERT(totalErrors > 0);
-        ASSERT(error != nullptr);
+    ComPtr<ID3D12InfoQueue> infoQueue;
+    DAWN_TRY(CheckHRESULT(mD3d12Device.As(&infoQueue),
+                          "D3D12 QueryInterface ID3D12Device to ID3D12InfoQueue"));
+    uint64_t totalErrors = infoQueue->GetNumStoredMessagesAllowedByRetrievalFilter();
 
-        uint64_t errorsToPrint = std::min(kMaxDebugMessagesToPrint, totalErrors);
-        for (uint64_t i = 0; i < errorsToPrint; ++i) {
-            std::ostringstream messageStream;
-            SIZE_T messageLength = 0;
-            HRESULT hr = infoQueue->GetMessage(i, nullptr, &messageLength);
-            if (FAILED(hr)) {
-                messageStream << " ID3D12InfoQueue::GetMessage failed with " << hr;
-                error->AppendBackendMessage(messageStream.str());
-                continue;
-            }
-
-            std::unique_ptr<uint8_t[]> messageData(new uint8_t[messageLength]);
-            D3D12_MESSAGE* message = reinterpret_cast<D3D12_MESSAGE*>(messageData.get());
-            hr = infoQueue->GetMessage(i, message, &messageLength);
-            if (FAILED(hr)) {
-                messageStream << " ID3D12InfoQueue::GetMessage failed with " << hr;
-                error->AppendBackendMessage(messageStream.str());
-                continue;
-            }
-
-            messageStream << message->pDescription << " (" << message->ID << ")";
-            error->AppendBackendMessage(messageStream.str());
-        }
-        if (errorsToPrint < totalErrors) {
-            std::ostringstream messages;
-            messages << (totalErrors - errorsToPrint) << " messages silenced";
-            error->AppendBackendMessage(messages.str());
-        }
-
-        // We only print up to the first kMaxDebugMessagesToPrint errors
-        infoQueue->ClearStoredMessages();
+    // Check if any errors have occurred otherwise we would be creating an empty error. Note
+    // that we use GetNumStoredMessagesAllowedByRetrievalFilter instead of GetNumStoredMessages
+    // because we only convert WARNINGS or higher messages to dawn errors.
+    if (totalErrors == 0) {
+        return {};
     }
 
-    MaybeError Device::CheckDebugLayerAndGenerateErrors() {
-        if (!GetAdapter()->GetInstance()->IsBackendValidationEnabled()) {
-            return {};
-        }
+    auto error = DAWN_INTERNAL_ERROR("The D3D12 debug layer reported uncaught errors.");
 
-        ComPtr<ID3D12InfoQueue> infoQueue;
-        DAWN_TRY(CheckHRESULT(mD3d12Device.As(&infoQueue),
-                              "D3D12 QueryInterface ID3D12Device to ID3D12InfoQueue"));
-        uint64_t totalErrors = infoQueue->GetNumStoredMessagesAllowedByRetrievalFilter();
+    AppendDebugLayerMessagesToError(infoQueue.Get(), totalErrors, error.get());
 
-        // Check if any errors have occurred otherwise we would be creating an empty error. Note
-        // that we use GetNumStoredMessagesAllowedByRetrievalFilter instead of GetNumStoredMessages
-        // because we only convert WARNINGS or higher messages to dawn errors.
-        if (totalErrors == 0) {
-            return {};
-        }
+    return error;
+}
 
-        auto error = DAWN_INTERNAL_ERROR("The D3D12 debug layer reported uncaught errors.");
-
-        AppendDebugLayerMessagesToError(infoQueue.Get(), totalErrors, error.get());
-
-        return error;
+void Device::AppendDebugLayerMessages(ErrorData* error) {
+    if (!GetAdapter()->GetInstance()->IsBackendValidationEnabled()) {
+        return;
     }
 
-    void Device::AppendDebugLayerMessages(ErrorData* error) {
-        if (!GetAdapter()->GetInstance()->IsBackendValidationEnabled()) {
-            return;
-        }
+    ComPtr<ID3D12InfoQueue> infoQueue;
+    if (FAILED(mD3d12Device.As(&infoQueue))) {
+        return;
+    }
+    uint64_t totalErrors = infoQueue->GetNumStoredMessagesAllowedByRetrievalFilter();
 
-        ComPtr<ID3D12InfoQueue> infoQueue;
-        if (FAILED(mD3d12Device.As(&infoQueue))) {
-            return;
-        }
-        uint64_t totalErrors = infoQueue->GetNumStoredMessagesAllowedByRetrievalFilter();
-
-        if (totalErrors == 0) {
-            return;
-        }
-
-        AppendDebugLayerMessagesToError(infoQueue.Get(), totalErrors, error);
+    if (totalErrors == 0) {
+        return;
     }
 
-    void Device::DestroyImpl() {
-        ASSERT(GetState() == State::Disconnected);
+    AppendDebugLayerMessagesToError(infoQueue.Get(), totalErrors, error);
+}
 
-        // Immediately forget about all pending commands for the case where device is lost on its
-        // own and WaitForIdleForDestruction isn't called.
-        mPendingCommands.Release();
+void Device::DestroyImpl() {
+    ASSERT(GetState() == State::Disconnected);
 
-        if (mFenceEvent != nullptr) {
-            ::CloseHandle(mFenceEvent);
-        }
+    // Immediately forget about all pending commands for the case where device is lost on its
+    // own and WaitForIdleForDestruction isn't called.
+    mPendingCommands.Release();
 
-        // Release recycled resource heaps.
-        if (mResourceAllocatorManager != nullptr) {
-            mResourceAllocatorManager->DestroyPool();
-        }
-
-        // We need to handle clearing up com object refs that were enqeued after TickImpl
-        mUsedComObjectRefs.ClearUpTo(std::numeric_limits<ExecutionSerial>::max());
-
-        ASSERT(mUsedComObjectRefs.Empty());
-        ASSERT(!mPendingCommands.IsOpen());
+    if (mFenceEvent != nullptr) {
+        ::CloseHandle(mFenceEvent);
     }
 
-    ShaderVisibleDescriptorAllocator* Device::GetViewShaderVisibleDescriptorAllocator() const {
-        return mViewShaderVisibleDescriptorAllocator.get();
+    // Release recycled resource heaps.
+    if (mResourceAllocatorManager != nullptr) {
+        mResourceAllocatorManager->DestroyPool();
     }
 
-    ShaderVisibleDescriptorAllocator* Device::GetSamplerShaderVisibleDescriptorAllocator() const {
-        return mSamplerShaderVisibleDescriptorAllocator.get();
-    }
+    // We need to handle clearing up com object refs that were enqeued after TickImpl
+    mUsedComObjectRefs.ClearUpTo(std::numeric_limits<ExecutionSerial>::max());
 
-    StagingDescriptorAllocator* Device::GetViewStagingDescriptorAllocator(
-        uint32_t descriptorCount) const {
-        ASSERT(descriptorCount <= kMaxViewDescriptorsPerBindGroup);
-        // This is Log2 of the next power of two, plus 1.
-        uint32_t allocatorIndex = descriptorCount == 0 ? 0 : Log2Ceil(descriptorCount) + 1;
-        return mViewAllocators[allocatorIndex].get();
-    }
+    ASSERT(mUsedComObjectRefs.Empty());
+    ASSERT(!mPendingCommands.IsOpen());
+}
 
-    StagingDescriptorAllocator* Device::GetSamplerStagingDescriptorAllocator(
-        uint32_t descriptorCount) const {
-        ASSERT(descriptorCount <= kMaxSamplerDescriptorsPerBindGroup);
-        // This is Log2 of the next power of two, plus 1.
-        uint32_t allocatorIndex = descriptorCount == 0 ? 0 : Log2Ceil(descriptorCount) + 1;
-        return mSamplerAllocators[allocatorIndex].get();
-    }
+ShaderVisibleDescriptorAllocator* Device::GetViewShaderVisibleDescriptorAllocator() const {
+    return mViewShaderVisibleDescriptorAllocator.get();
+}
 
-    StagingDescriptorAllocator* Device::GetRenderTargetViewAllocator() const {
-        return mRenderTargetViewAllocator.get();
-    }
+ShaderVisibleDescriptorAllocator* Device::GetSamplerShaderVisibleDescriptorAllocator() const {
+    return mSamplerShaderVisibleDescriptorAllocator.get();
+}
 
-    StagingDescriptorAllocator* Device::GetDepthStencilViewAllocator() const {
-        return mDepthStencilViewAllocator.get();
-    }
+StagingDescriptorAllocator* Device::GetViewStagingDescriptorAllocator(
+    uint32_t descriptorCount) const {
+    ASSERT(descriptorCount <= kMaxViewDescriptorsPerBindGroup);
+    // This is Log2 of the next power of two, plus 1.
+    uint32_t allocatorIndex = descriptorCount == 0 ? 0 : Log2Ceil(descriptorCount) + 1;
+    return mViewAllocators[allocatorIndex].get();
+}
 
-    SamplerHeapCache* Device::GetSamplerHeapCache() {
-        return mSamplerHeapCache.get();
-    }
+StagingDescriptorAllocator* Device::GetSamplerStagingDescriptorAllocator(
+    uint32_t descriptorCount) const {
+    ASSERT(descriptorCount <= kMaxSamplerDescriptorsPerBindGroup);
+    // This is Log2 of the next power of two, plus 1.
+    uint32_t allocatorIndex = descriptorCount == 0 ? 0 : Log2Ceil(descriptorCount) + 1;
+    return mSamplerAllocators[allocatorIndex].get();
+}
 
-    uint32_t Device::GetOptimalBytesPerRowAlignment() const {
-        return D3D12_TEXTURE_DATA_PITCH_ALIGNMENT;
-    }
+StagingDescriptorAllocator* Device::GetRenderTargetViewAllocator() const {
+    return mRenderTargetViewAllocator.get();
+}
 
-    // TODO(dawn:512): Once we optimize DynamicUploader allocation with offsets we
-    // should make this return D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT = 512.
-    // Current implementations would try to allocate additional 511 bytes,
-    // so we return 1 and let ComputeTextureCopySplits take care of the alignment.
-    uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
-        return 1;
-    }
+StagingDescriptorAllocator* Device::GetDepthStencilViewAllocator() const {
+    return mDepthStencilViewAllocator.get();
+}
 
-    float Device::GetTimestampPeriodInNS() const {
-        return mTimestampPeriod;
-    }
+SamplerHeapCache* Device::GetSamplerHeapCache() {
+    return mSamplerHeapCache.get();
+}
 
-    bool Device::ShouldDuplicateNumWorkgroupsForDispatchIndirect(
-        ComputePipelineBase* computePipeline) const {
-        return ToBackend(computePipeline)->UsesNumWorkgroups();
-    }
+uint32_t Device::GetOptimalBytesPerRowAlignment() const {
+    return D3D12_TEXTURE_DATA_PITCH_ALIGNMENT;
+}
 
-    void Device::SetLabelImpl() {
-        SetDebugName(this, mD3d12Device.Get(), "Dawn_Device", GetLabel());
-    }
+// TODO(dawn:512): Once we optimize DynamicUploader allocation with offsets we
+// should make this return D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT = 512.
+// Current implementations would try to allocate additional 511 bytes,
+// so we return 1 and let ComputeTextureCopySplits take care of the alignment.
+uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
+    return 1;
+}
 
-    bool Device::MayRequireDuplicationOfIndirectParameters() const {
-        return true;
-    }
+float Device::GetTimestampPeriodInNS() const {
+    return mTimestampPeriod;
+}
 
-    bool Device::ShouldDuplicateParametersForDrawIndirect(
-        const RenderPipelineBase* renderPipelineBase) const {
-        return ToBackend(renderPipelineBase)->UsesVertexOrInstanceIndex();
-    }
+bool Device::ShouldDuplicateNumWorkgroupsForDispatchIndirect(
+    ComputePipelineBase* computePipeline) const {
+    return ToBackend(computePipeline)->UsesNumWorkgroups();
+}
+
+void Device::SetLabelImpl() {
+    SetDebugName(this, mD3d12Device.Get(), "Dawn_Device", GetLabel());
+}
+
+bool Device::MayRequireDuplicationOfIndirectParameters() const {
+    return true;
+}
+
+bool Device::ShouldDuplicateParametersForDrawIndirect(
+    const RenderPipelineBase* renderPipelineBase) const {
+    return ToBackend(renderPipelineBase)->UsesVertexOrInstanceIndex();
+}
 
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/DeviceD3D12.h b/src/dawn/native/d3d12/DeviceD3D12.h
index fe80f85..fe05559 100644
--- a/src/dawn/native/d3d12/DeviceD3D12.h
+++ b/src/dawn/native/d3d12/DeviceD3D12.h
@@ -26,13 +26,13 @@
 
 namespace dawn::native::d3d12 {
 
-    class CommandAllocatorManager;
-    class PlatformFunctions;
-    class ResidencyManager;
-    class ResourceAllocatorManager;
-    class SamplerHeapCache;
-    class ShaderVisibleDescriptorAllocator;
-    class StagingDescriptorAllocator;
+class CommandAllocatorManager;
+class PlatformFunctions;
+class ResidencyManager;
+class ResourceAllocatorManager;
+class SamplerHeapCache;
+class ShaderVisibleDescriptorAllocator;
+class StagingDescriptorAllocator;
 
 #define ASSERT_SUCCESS(hr)            \
     do {                              \
@@ -40,236 +40,229 @@
         ASSERT(SUCCEEDED(succeeded)); \
     } while (0)
 
-    // Definition of backend types
-    class Device final : public DeviceBase {
-      public:
-        static ResultOrError<Ref<Device>> Create(Adapter* adapter,
-                                                 const DeviceDescriptor* descriptor);
-        ~Device() override;
+// Definition of backend types
+class Device final : public DeviceBase {
+  public:
+    static ResultOrError<Ref<Device>> Create(Adapter* adapter, const DeviceDescriptor* descriptor);
+    ~Device() override;
 
-        MaybeError Initialize(const DeviceDescriptor* descriptor);
+    MaybeError Initialize(const DeviceDescriptor* descriptor);
 
-        ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
-            CommandEncoder* encoder,
-            const CommandBufferDescriptor* descriptor) override;
+    ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
+        CommandEncoder* encoder,
+        const CommandBufferDescriptor* descriptor) override;
 
-        MaybeError TickImpl() override;
+    MaybeError TickImpl() override;
 
-        ID3D12Device* GetD3D12Device() const;
-        ComPtr<ID3D12CommandQueue> GetCommandQueue() const;
-        ID3D12SharingContract* GetSharingContract() const;
+    ID3D12Device* GetD3D12Device() const;
+    ComPtr<ID3D12CommandQueue> GetCommandQueue() const;
+    ID3D12SharingContract* GetSharingContract() const;
 
-        ComPtr<ID3D12CommandSignature> GetDispatchIndirectSignature() const;
-        ComPtr<ID3D12CommandSignature> GetDrawIndirectSignature() const;
-        ComPtr<ID3D12CommandSignature> GetDrawIndexedIndirectSignature() const;
+    ComPtr<ID3D12CommandSignature> GetDispatchIndirectSignature() const;
+    ComPtr<ID3D12CommandSignature> GetDrawIndirectSignature() const;
+    ComPtr<ID3D12CommandSignature> GetDrawIndexedIndirectSignature() const;
 
-        CommandAllocatorManager* GetCommandAllocatorManager() const;
-        ResidencyManager* GetResidencyManager() const;
+    CommandAllocatorManager* GetCommandAllocatorManager() const;
+    ResidencyManager* GetResidencyManager() const;
 
-        const PlatformFunctions* GetFunctions() const;
-        ComPtr<IDXGIFactory4> GetFactory() const;
-        ComPtr<IDxcLibrary> GetDxcLibrary() const;
-        ComPtr<IDxcCompiler> GetDxcCompiler() const;
-        ComPtr<IDxcValidator> GetDxcValidator() const;
+    const PlatformFunctions* GetFunctions() const;
+    ComPtr<IDXGIFactory4> GetFactory() const;
+    ComPtr<IDxcLibrary> GetDxcLibrary() const;
+    ComPtr<IDxcCompiler> GetDxcCompiler() const;
+    ComPtr<IDxcValidator> GetDxcValidator() const;
 
-        ResultOrError<CommandRecordingContext*> GetPendingCommandContext();
+    ResultOrError<CommandRecordingContext*> GetPendingCommandContext();
 
-        MaybeError ClearBufferToZero(CommandRecordingContext* commandContext,
+    MaybeError ClearBufferToZero(CommandRecordingContext* commandContext,
+                                 BufferBase* destination,
+                                 uint64_t destinationOffset,
+                                 uint64_t size);
+
+    const D3D12DeviceInfo& GetDeviceInfo() const;
+
+    MaybeError NextSerial();
+    MaybeError WaitForSerial(ExecutionSerial serial);
+
+    void ReferenceUntilUnused(ComPtr<IUnknown> object);
+
+    MaybeError ExecutePendingCommandContext();
+
+    ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
+    MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
+                                       uint64_t sourceOffset,
+                                       BufferBase* destination,
+                                       uint64_t destinationOffset,
+                                       uint64_t size) override;
+
+    void CopyFromStagingToBufferImpl(CommandRecordingContext* commandContext,
+                                     StagingBufferBase* source,
+                                     uint64_t sourceOffset,
                                      BufferBase* destination,
                                      uint64_t destinationOffset,
                                      uint64_t size);
 
-        const D3D12DeviceInfo& GetDeviceInfo() const;
+    MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
+                                        const TextureDataLayout& src,
+                                        TextureCopy* dst,
+                                        const Extent3D& copySizePixels) override;
 
-        MaybeError NextSerial();
-        MaybeError WaitForSerial(ExecutionSerial serial);
+    ResultOrError<ResourceHeapAllocation> AllocateMemory(
+        D3D12_HEAP_TYPE heapType,
+        const D3D12_RESOURCE_DESC& resourceDescriptor,
+        D3D12_RESOURCE_STATES initialUsage);
 
-        void ReferenceUntilUnused(ComPtr<IUnknown> object);
+    void DeallocateMemory(ResourceHeapAllocation& allocation);
 
-        MaybeError ExecutePendingCommandContext();
+    ShaderVisibleDescriptorAllocator* GetViewShaderVisibleDescriptorAllocator() const;
+    ShaderVisibleDescriptorAllocator* GetSamplerShaderVisibleDescriptorAllocator() const;
 
-        ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
-        MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
-                                           uint64_t sourceOffset,
-                                           BufferBase* destination,
-                                           uint64_t destinationOffset,
-                                           uint64_t size) override;
+    // Returns nullptr when descriptor count is zero.
+    StagingDescriptorAllocator* GetViewStagingDescriptorAllocator(uint32_t descriptorCount) const;
 
-        void CopyFromStagingToBufferImpl(CommandRecordingContext* commandContext,
-                                         StagingBufferBase* source,
-                                         uint64_t sourceOffset,
-                                         BufferBase* destination,
-                                         uint64_t destinationOffset,
-                                         uint64_t size);
+    StagingDescriptorAllocator* GetSamplerStagingDescriptorAllocator(
+        uint32_t descriptorCount) const;
 
-        MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
-                                            const TextureDataLayout& src,
-                                            TextureCopy* dst,
-                                            const Extent3D& copySizePixels) override;
+    SamplerHeapCache* GetSamplerHeapCache();
 
-        ResultOrError<ResourceHeapAllocation> AllocateMemory(
-            D3D12_HEAP_TYPE heapType,
-            const D3D12_RESOURCE_DESC& resourceDescriptor,
-            D3D12_RESOURCE_STATES initialUsage);
+    StagingDescriptorAllocator* GetRenderTargetViewAllocator() const;
 
-        void DeallocateMemory(ResourceHeapAllocation& allocation);
+    StagingDescriptorAllocator* GetDepthStencilViewAllocator() const;
 
-        ShaderVisibleDescriptorAllocator* GetViewShaderVisibleDescriptorAllocator() const;
-        ShaderVisibleDescriptorAllocator* GetSamplerShaderVisibleDescriptorAllocator() const;
+    Ref<TextureBase> CreateD3D12ExternalTexture(const TextureDescriptor* descriptor,
+                                                ComPtr<ID3D12Resource> d3d12Texture,
+                                                Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
+                                                bool isSwapChainTexture,
+                                                bool isInitialized);
 
-        // Returns nullptr when descriptor count is zero.
-        StagingDescriptorAllocator* GetViewStagingDescriptorAllocator(
-            uint32_t descriptorCount) const;
+    ComPtr<ID3D11On12Device> GetOrCreateD3D11on12Device();
 
-        StagingDescriptorAllocator* GetSamplerStagingDescriptorAllocator(
-            uint32_t descriptorCount) const;
+    void InitTogglesFromDriver();
 
-        SamplerHeapCache* GetSamplerHeapCache();
+    uint32_t GetOptimalBytesPerRowAlignment() const override;
+    uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
 
-        StagingDescriptorAllocator* GetRenderTargetViewAllocator() const;
+    float GetTimestampPeriodInNS() const override;
 
-        StagingDescriptorAllocator* GetDepthStencilViewAllocator() const;
+    bool ShouldDuplicateNumWorkgroupsForDispatchIndirect(
+        ComputePipelineBase* computePipeline) const override;
 
-        Ref<TextureBase> CreateD3D12ExternalTexture(
-            const TextureDescriptor* descriptor,
-            ComPtr<ID3D12Resource> d3d12Texture,
-            Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
-            bool isSwapChainTexture,
-            bool isInitialized);
+    bool MayRequireDuplicationOfIndirectParameters() const override;
 
-        ComPtr<ID3D11On12Device> GetOrCreateD3D11on12Device();
+    bool ShouldDuplicateParametersForDrawIndirect(
+        const RenderPipelineBase* renderPipelineBase) const override;
 
-        void InitTogglesFromDriver();
+    // Dawn APIs
+    void SetLabelImpl() override;
 
-        uint32_t GetOptimalBytesPerRowAlignment() const override;
-        uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
+  private:
+    using DeviceBase::DeviceBase;
 
-        float GetTimestampPeriodInNS() const override;
+    ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
+        const BindGroupDescriptor* descriptor) override;
+    ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
+        const BindGroupLayoutDescriptor* descriptor,
+        PipelineCompatibilityToken pipelineCompatibilityToken) override;
+    ResultOrError<Ref<BufferBase>> CreateBufferImpl(const BufferDescriptor* descriptor) override;
+    ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
+        const PipelineLayoutDescriptor* descriptor) override;
+    ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
+        const QuerySetDescriptor* descriptor) override;
+    ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(const SamplerDescriptor* descriptor) override;
+    ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
+        const ShaderModuleDescriptor* descriptor,
+        ShaderModuleParseResult* parseResult) override;
+    ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
+        const SwapChainDescriptor* descriptor) override;
+    ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
+        Surface* surface,
+        NewSwapChainBase* previousSwapChain,
+        const SwapChainDescriptor* descriptor) override;
+    ResultOrError<Ref<TextureBase>> CreateTextureImpl(const TextureDescriptor* descriptor) override;
+    ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
+        TextureBase* texture,
+        const TextureViewDescriptor* descriptor) override;
+    Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
+        const ComputePipelineDescriptor* descriptor) override;
+    Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
+        const RenderPipelineDescriptor* descriptor) override;
+    void InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
+                                            WGPUCreateComputePipelineAsyncCallback callback,
+                                            void* userdata) override;
+    void InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+                                           WGPUCreateRenderPipelineAsyncCallback callback,
+                                           void* userdata) override;
 
-        bool ShouldDuplicateNumWorkgroupsForDispatchIndirect(
-            ComputePipelineBase* computePipeline) const override;
+    void DestroyImpl() override;
+    MaybeError WaitForIdleForDestruction() override;
 
-        bool MayRequireDuplicationOfIndirectParameters() const override;
+    MaybeError CheckDebugLayerAndGenerateErrors();
+    void AppendDebugLayerMessages(ErrorData* error) override;
 
-        bool ShouldDuplicateParametersForDrawIndirect(
-            const RenderPipelineBase* renderPipelineBase) const override;
+    MaybeError ApplyUseDxcToggle();
 
-        // Dawn APIs
-        void SetLabelImpl() override;
+    MaybeError CreateZeroBuffer();
 
-      private:
-        using DeviceBase::DeviceBase;
+    ComPtr<ID3D12Fence> mFence;
+    HANDLE mFenceEvent = nullptr;
+    ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
 
-        ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
-            const BindGroupDescriptor* descriptor) override;
-        ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
-            const BindGroupLayoutDescriptor* descriptor,
-            PipelineCompatibilityToken pipelineCompatibilityToken) override;
-        ResultOrError<Ref<BufferBase>> CreateBufferImpl(
-            const BufferDescriptor* descriptor) override;
-        ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
-            const PipelineLayoutDescriptor* descriptor) override;
-        ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
-            const QuerySetDescriptor* descriptor) override;
-        ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
-            const SamplerDescriptor* descriptor) override;
-        ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
-            const ShaderModuleDescriptor* descriptor,
-            ShaderModuleParseResult* parseResult) override;
-        ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
-            const SwapChainDescriptor* descriptor) override;
-        ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
-            Surface* surface,
-            NewSwapChainBase* previousSwapChain,
-            const SwapChainDescriptor* descriptor) override;
-        ResultOrError<Ref<TextureBase>> CreateTextureImpl(
-            const TextureDescriptor* descriptor) override;
-        ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
-            TextureBase* texture,
-            const TextureViewDescriptor* descriptor) override;
-        Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
-            const ComputePipelineDescriptor* descriptor) override;
-        Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
-            const RenderPipelineDescriptor* descriptor) override;
-        void InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
-                                                WGPUCreateComputePipelineAsyncCallback callback,
-                                                void* userdata) override;
-        void InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
-                                               WGPUCreateRenderPipelineAsyncCallback callback,
-                                               void* userdata) override;
+    ComPtr<ID3D12Device> mD3d12Device;  // Device is owned by adapter and will not be outlived.
+    ComPtr<ID3D12CommandQueue> mCommandQueue;
+    ComPtr<ID3D12SharingContract> mD3d12SharingContract;
 
-        void DestroyImpl() override;
-        MaybeError WaitForIdleForDestruction() override;
+    // 11on12 device corresponding to mCommandQueue
+    ComPtr<ID3D11On12Device> mD3d11On12Device;
 
-        MaybeError CheckDebugLayerAndGenerateErrors();
-        void AppendDebugLayerMessages(ErrorData* error) override;
+    ComPtr<ID3D12CommandSignature> mDispatchIndirectSignature;
+    ComPtr<ID3D12CommandSignature> mDrawIndirectSignature;
+    ComPtr<ID3D12CommandSignature> mDrawIndexedIndirectSignature;
 
-        MaybeError ApplyUseDxcToggle();
+    CommandRecordingContext mPendingCommands;
 
-        MaybeError CreateZeroBuffer();
+    SerialQueue<ExecutionSerial, ComPtr<IUnknown>> mUsedComObjectRefs;
 
-        ComPtr<ID3D12Fence> mFence;
-        HANDLE mFenceEvent = nullptr;
-        ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
+    std::unique_ptr<CommandAllocatorManager> mCommandAllocatorManager;
+    std::unique_ptr<ResourceAllocatorManager> mResourceAllocatorManager;
+    std::unique_ptr<ResidencyManager> mResidencyManager;
 
-        ComPtr<ID3D12Device> mD3d12Device;  // Device is owned by adapter and will not be outlived.
-        ComPtr<ID3D12CommandQueue> mCommandQueue;
-        ComPtr<ID3D12SharingContract> mD3d12SharingContract;
+    static constexpr uint32_t kMaxSamplerDescriptorsPerBindGroup = 3 * kMaxSamplersPerShaderStage;
+    static constexpr uint32_t kMaxViewDescriptorsPerBindGroup =
+        kMaxBindingsPerPipelineLayout - kMaxSamplerDescriptorsPerBindGroup;
 
-        // 11on12 device corresponding to mCommandQueue
-        ComPtr<ID3D11On12Device> mD3d11On12Device;
+    static constexpr uint32_t kNumSamplerDescriptorAllocators =
+        ConstexprLog2Ceil(kMaxSamplerDescriptorsPerBindGroup) + 1;
+    static constexpr uint32_t kNumViewDescriptorAllocators =
+        ConstexprLog2Ceil(kMaxViewDescriptorsPerBindGroup) + 1;
 
-        ComPtr<ID3D12CommandSignature> mDispatchIndirectSignature;
-        ComPtr<ID3D12CommandSignature> mDrawIndirectSignature;
-        ComPtr<ID3D12CommandSignature> mDrawIndexedIndirectSignature;
+    // Index corresponds to Log2Ceil(descriptorCount) where descriptorCount is in
+    // the range [0, kMaxSamplerDescriptorsPerBindGroup].
+    std::array<std::unique_ptr<StagingDescriptorAllocator>, kNumViewDescriptorAllocators + 1>
+        mViewAllocators;
 
-        CommandRecordingContext mPendingCommands;
+    // Index corresponds to Log2Ceil(descriptorCount) where descriptorCount is in
+    // the range [0, kMaxViewDescriptorsPerBindGroup].
+    std::array<std::unique_ptr<StagingDescriptorAllocator>, kNumSamplerDescriptorAllocators + 1>
+        mSamplerAllocators;
 
-        SerialQueue<ExecutionSerial, ComPtr<IUnknown>> mUsedComObjectRefs;
+    std::unique_ptr<StagingDescriptorAllocator> mRenderTargetViewAllocator;
 
-        std::unique_ptr<CommandAllocatorManager> mCommandAllocatorManager;
-        std::unique_ptr<ResourceAllocatorManager> mResourceAllocatorManager;
-        std::unique_ptr<ResidencyManager> mResidencyManager;
+    std::unique_ptr<StagingDescriptorAllocator> mDepthStencilViewAllocator;
 
-        static constexpr uint32_t kMaxSamplerDescriptorsPerBindGroup =
-            3 * kMaxSamplersPerShaderStage;
-        static constexpr uint32_t kMaxViewDescriptorsPerBindGroup =
-            kMaxBindingsPerPipelineLayout - kMaxSamplerDescriptorsPerBindGroup;
+    std::unique_ptr<ShaderVisibleDescriptorAllocator> mViewShaderVisibleDescriptorAllocator;
 
-        static constexpr uint32_t kNumSamplerDescriptorAllocators =
-            ConstexprLog2Ceil(kMaxSamplerDescriptorsPerBindGroup) + 1;
-        static constexpr uint32_t kNumViewDescriptorAllocators =
-            ConstexprLog2Ceil(kMaxViewDescriptorsPerBindGroup) + 1;
+    std::unique_ptr<ShaderVisibleDescriptorAllocator> mSamplerShaderVisibleDescriptorAllocator;
 
-        // Index corresponds to Log2Ceil(descriptorCount) where descriptorCount is in
-        // the range [0, kMaxSamplerDescriptorsPerBindGroup].
-        std::array<std::unique_ptr<StagingDescriptorAllocator>, kNumViewDescriptorAllocators + 1>
-            mViewAllocators;
+    // Sampler cache needs to be destroyed before the CPU sampler allocator to ensure the final
+    // release is called.
+    std::unique_ptr<SamplerHeapCache> mSamplerHeapCache;
 
-        // Index corresponds to Log2Ceil(descriptorCount) where descriptorCount is in
-        // the range [0, kMaxViewDescriptorsPerBindGroup].
-        std::array<std::unique_ptr<StagingDescriptorAllocator>, kNumSamplerDescriptorAllocators + 1>
-            mSamplerAllocators;
+    // A buffer filled with zeros that is used to copy into other buffers when they need to be
+    // cleared.
+    Ref<Buffer> mZeroBuffer;
 
-        std::unique_ptr<StagingDescriptorAllocator> mRenderTargetViewAllocator;
-
-        std::unique_ptr<StagingDescriptorAllocator> mDepthStencilViewAllocator;
-
-        std::unique_ptr<ShaderVisibleDescriptorAllocator> mViewShaderVisibleDescriptorAllocator;
-
-        std::unique_ptr<ShaderVisibleDescriptorAllocator> mSamplerShaderVisibleDescriptorAllocator;
-
-        // Sampler cache needs to be destroyed before the CPU sampler allocator to ensure the final
-        // release is called.
-        std::unique_ptr<SamplerHeapCache> mSamplerHeapCache;
-
-        // A buffer filled with zeros that is used to copy into other buffers when they need to be
-        // cleared.
-        Ref<Buffer> mZeroBuffer;
-
-        // The number of nanoseconds required for a timestamp query to be incremented by 1
-        float mTimestampPeriod = 1.0f;
-    };
+    // The number of nanoseconds required for a timestamp query to be incremented by 1
+    float mTimestampPeriod = 1.0f;
+};
 
 }  // namespace dawn::native::d3d12
 
diff --git a/src/dawn/native/d3d12/Forward.h b/src/dawn/native/d3d12/Forward.h
index 3004f2e..ecb10aa 100644
--- a/src/dawn/native/d3d12/Forward.h
+++ b/src/dawn/native/d3d12/Forward.h
@@ -19,50 +19,50 @@
 
 namespace dawn::native::d3d12 {
 
-    class Adapter;
-    class BindGroup;
-    class BindGroupLayout;
-    class Buffer;
-    class CommandBuffer;
-    class ComputePipeline;
-    class Device;
-    class Heap;
-    class PipelineLayout;
-    class QuerySet;
-    class Queue;
-    class RenderPipeline;
-    class Sampler;
-    class ShaderModule;
-    class StagingBuffer;
-    class SwapChain;
-    class Texture;
-    class TextureView;
+class Adapter;
+class BindGroup;
+class BindGroupLayout;
+class Buffer;
+class CommandBuffer;
+class ComputePipeline;
+class Device;
+class Heap;
+class PipelineLayout;
+class QuerySet;
+class Queue;
+class RenderPipeline;
+class Sampler;
+class ShaderModule;
+class StagingBuffer;
+class SwapChain;
+class Texture;
+class TextureView;
 
-    struct D3D12BackendTraits {
-        using AdapterType = Adapter;
-        using BindGroupType = BindGroup;
-        using BindGroupLayoutType = BindGroupLayout;
-        using BufferType = Buffer;
-        using CommandBufferType = CommandBuffer;
-        using ComputePipelineType = ComputePipeline;
-        using DeviceType = Device;
-        using PipelineLayoutType = PipelineLayout;
-        using QuerySetType = QuerySet;
-        using QueueType = Queue;
-        using RenderPipelineType = RenderPipeline;
-        using ResourceHeapType = Heap;
-        using SamplerType = Sampler;
-        using ShaderModuleType = ShaderModule;
-        using StagingBufferType = StagingBuffer;
-        using SwapChainType = SwapChain;
-        using TextureType = Texture;
-        using TextureViewType = TextureView;
-    };
+struct D3D12BackendTraits {
+    using AdapterType = Adapter;
+    using BindGroupType = BindGroup;
+    using BindGroupLayoutType = BindGroupLayout;
+    using BufferType = Buffer;
+    using CommandBufferType = CommandBuffer;
+    using ComputePipelineType = ComputePipeline;
+    using DeviceType = Device;
+    using PipelineLayoutType = PipelineLayout;
+    using QuerySetType = QuerySet;
+    using QueueType = Queue;
+    using RenderPipelineType = RenderPipeline;
+    using ResourceHeapType = Heap;
+    using SamplerType = Sampler;
+    using ShaderModuleType = ShaderModule;
+    using StagingBufferType = StagingBuffer;
+    using SwapChainType = SwapChain;
+    using TextureType = Texture;
+    using TextureViewType = TextureView;
+};
 
-    template <typename T>
-    auto ToBackend(T&& common) -> decltype(ToBackendBase<D3D12BackendTraits>(common)) {
-        return ToBackendBase<D3D12BackendTraits>(common);
-    }
+template <typename T>
+auto ToBackend(T&& common) -> decltype(ToBackendBase<D3D12BackendTraits>(common)) {
+    return ToBackendBase<D3D12BackendTraits>(common);
+}
 
 }  // namespace dawn::native::d3d12
 
diff --git a/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.cpp b/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.cpp
index e5d4fb9..1b3613d 100644
--- a/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.cpp
+++ b/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.cpp
@@ -16,24 +16,20 @@
 
 namespace dawn::native::d3d12 {
 
-    GPUDescriptorHeapAllocation::GPUDescriptorHeapAllocation(
-        D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor,
-        ExecutionSerial lastUsageSerial,
-        HeapVersionID heapSerial)
-        : mBaseDescriptor(baseDescriptor),
-          mLastUsageSerial(lastUsageSerial),
-          mHeapSerial(heapSerial) {
-    }
+GPUDescriptorHeapAllocation::GPUDescriptorHeapAllocation(D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor,
+                                                         ExecutionSerial lastUsageSerial,
+                                                         HeapVersionID heapSerial)
+    : mBaseDescriptor(baseDescriptor), mLastUsageSerial(lastUsageSerial), mHeapSerial(heapSerial) {}
 
-    D3D12_GPU_DESCRIPTOR_HANDLE GPUDescriptorHeapAllocation::GetBaseDescriptor() const {
-        return mBaseDescriptor;
-    }
+D3D12_GPU_DESCRIPTOR_HANDLE GPUDescriptorHeapAllocation::GetBaseDescriptor() const {
+    return mBaseDescriptor;
+}
 
-    ExecutionSerial GPUDescriptorHeapAllocation::GetLastUsageSerial() const {
-        return mLastUsageSerial;
-    }
+ExecutionSerial GPUDescriptorHeapAllocation::GetLastUsageSerial() const {
+    return mLastUsageSerial;
+}
 
-    HeapVersionID GPUDescriptorHeapAllocation::GetHeapSerial() const {
-        return mHeapSerial;
-    }
+HeapVersionID GPUDescriptorHeapAllocation::GetHeapSerial() const {
+    return mHeapSerial;
+}
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.h b/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.h
index a7ac12c..f62286c 100644
--- a/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.h
+++ b/src/dawn/native/d3d12/GPUDescriptorHeapAllocationD3D12.h
@@ -21,23 +21,23 @@
 
 namespace dawn::native::d3d12 {
 
-    // Wrapper for a handle into a GPU-only descriptor heap.
-    class GPUDescriptorHeapAllocation {
-      public:
-        GPUDescriptorHeapAllocation() = default;
-        GPUDescriptorHeapAllocation(D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor,
-                                    ExecutionSerial lastUsageSerial,
-                                    HeapVersionID heapSerial);
+// Wrapper for a handle into a GPU-only descriptor heap.
+class GPUDescriptorHeapAllocation {
+  public:
+    GPUDescriptorHeapAllocation() = default;
+    GPUDescriptorHeapAllocation(D3D12_GPU_DESCRIPTOR_HANDLE baseDescriptor,
+                                ExecutionSerial lastUsageSerial,
+                                HeapVersionID heapSerial);
 
-        D3D12_GPU_DESCRIPTOR_HANDLE GetBaseDescriptor() const;
-        ExecutionSerial GetLastUsageSerial() const;
-        HeapVersionID GetHeapSerial() const;
+    D3D12_GPU_DESCRIPTOR_HANDLE GetBaseDescriptor() const;
+    ExecutionSerial GetLastUsageSerial() const;
+    HeapVersionID GetHeapSerial() const;
 
-      private:
-        D3D12_GPU_DESCRIPTOR_HANDLE mBaseDescriptor = {0};
-        ExecutionSerial mLastUsageSerial = ExecutionSerial(0);
-        HeapVersionID mHeapSerial = HeapVersionID(0);
-    };
+  private:
+    D3D12_GPU_DESCRIPTOR_HANDLE mBaseDescriptor = {0};
+    ExecutionSerial mLastUsageSerial = ExecutionSerial(0);
+    HeapVersionID mHeapSerial = HeapVersionID(0);
+};
 
 }  // namespace dawn::native::d3d12
 
diff --git a/src/dawn/native/d3d12/HeapAllocatorD3D12.cpp b/src/dawn/native/d3d12/HeapAllocatorD3D12.cpp
index dccfceb..2834981 100644
--- a/src/dawn/native/d3d12/HeapAllocatorD3D12.cpp
+++ b/src/dawn/native/d3d12/HeapAllocatorD3D12.cpp
@@ -23,52 +23,48 @@
 
 namespace dawn::native::d3d12 {
 
-    HeapAllocator::HeapAllocator(Device* device,
-                                 D3D12_HEAP_TYPE heapType,
-                                 D3D12_HEAP_FLAGS heapFlags,
-                                 MemorySegment memorySegment)
-        : mDevice(device),
-          mHeapType(heapType),
-          mHeapFlags(heapFlags),
-          mMemorySegment(memorySegment) {
-    }
+HeapAllocator::HeapAllocator(Device* device,
+                             D3D12_HEAP_TYPE heapType,
+                             D3D12_HEAP_FLAGS heapFlags,
+                             MemorySegment memorySegment)
+    : mDevice(device), mHeapType(heapType), mHeapFlags(heapFlags), mMemorySegment(memorySegment) {}
 
-    ResultOrError<std::unique_ptr<ResourceHeapBase>> HeapAllocator::AllocateResourceHeap(
-        uint64_t size) {
-        D3D12_HEAP_DESC heapDesc;
-        heapDesc.SizeInBytes = size;
-        heapDesc.Properties.Type = mHeapType;
-        heapDesc.Properties.CPUPageProperty = D3D12_CPU_PAGE_PROPERTY_UNKNOWN;
-        heapDesc.Properties.MemoryPoolPreference = D3D12_MEMORY_POOL_UNKNOWN;
-        heapDesc.Properties.CreationNodeMask = 0;
-        heapDesc.Properties.VisibleNodeMask = 0;
-        // It is preferred to use a size that is a multiple of the alignment.
-        // However, MSAA heaps are always aligned to 4MB instead of 64KB. This means
-        // if the heap size is too small, the VMM would fragment.
-        // TODO(crbug.com/dawn/849): Consider having MSAA vs non-MSAA heaps.
-        heapDesc.Alignment = D3D12_DEFAULT_MSAA_RESOURCE_PLACEMENT_ALIGNMENT;
-        heapDesc.Flags = mHeapFlags;
+ResultOrError<std::unique_ptr<ResourceHeapBase>> HeapAllocator::AllocateResourceHeap(
+    uint64_t size) {
+    D3D12_HEAP_DESC heapDesc;
+    heapDesc.SizeInBytes = size;
+    heapDesc.Properties.Type = mHeapType;
+    heapDesc.Properties.CPUPageProperty = D3D12_CPU_PAGE_PROPERTY_UNKNOWN;
+    heapDesc.Properties.MemoryPoolPreference = D3D12_MEMORY_POOL_UNKNOWN;
+    heapDesc.Properties.CreationNodeMask = 0;
+    heapDesc.Properties.VisibleNodeMask = 0;
+    // It is preferred to use a size that is a multiple of the alignment.
+    // However, MSAA heaps are always aligned to 4MB instead of 64KB. This means
+    // if the heap size is too small, the VMM would fragment.
+    // TODO(crbug.com/dawn/849): Consider having MSAA vs non-MSAA heaps.
+    heapDesc.Alignment = D3D12_DEFAULT_MSAA_RESOURCE_PLACEMENT_ALIGNMENT;
+    heapDesc.Flags = mHeapFlags;
 
-        // CreateHeap will implicitly make the created heap resident. We must ensure enough free
-        // memory exists before allocating to avoid an out-of-memory error when overcommitted.
-        DAWN_TRY(mDevice->GetResidencyManager()->EnsureCanAllocate(size, mMemorySegment));
+    // CreateHeap will implicitly make the created heap resident. We must ensure enough free
+    // memory exists before allocating to avoid an out-of-memory error when overcommitted.
+    DAWN_TRY(mDevice->GetResidencyManager()->EnsureCanAllocate(size, mMemorySegment));
 
-        ComPtr<ID3D12Heap> d3d12Heap;
-        DAWN_TRY(CheckOutOfMemoryHRESULT(
-            mDevice->GetD3D12Device()->CreateHeap(&heapDesc, IID_PPV_ARGS(&d3d12Heap)),
-            "ID3D12Device::CreateHeap"));
+    ComPtr<ID3D12Heap> d3d12Heap;
+    DAWN_TRY(CheckOutOfMemoryHRESULT(
+        mDevice->GetD3D12Device()->CreateHeap(&heapDesc, IID_PPV_ARGS(&d3d12Heap)),
+        "ID3D12Device::CreateHeap"));
 
-        std::unique_ptr<ResourceHeapBase> heapBase =
-            std::make_unique<Heap>(std::move(d3d12Heap), mMemorySegment, size);
+    std::unique_ptr<ResourceHeapBase> heapBase =
+        std::make_unique<Heap>(std::move(d3d12Heap), mMemorySegment, size);
 
-        // Calling CreateHeap implicitly calls MakeResident on the new heap. We must track this to
-        // avoid calling MakeResident a second time.
-        mDevice->GetResidencyManager()->TrackResidentAllocation(ToBackend(heapBase.get()));
-        return std::move(heapBase);
-    }
+    // Calling CreateHeap implicitly calls MakeResident on the new heap. We must track this to
+    // avoid calling MakeResident a second time.
+    mDevice->GetResidencyManager()->TrackResidentAllocation(ToBackend(heapBase.get()));
+    return std::move(heapBase);
+}
 
-    void HeapAllocator::DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> heap) {
-        mDevice->ReferenceUntilUnused(static_cast<Heap*>(heap.get())->GetD3D12Heap());
-    }
+void HeapAllocator::DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> heap) {
+    mDevice->ReferenceUntilUnused(static_cast<Heap*>(heap.get())->GetD3D12Heap());
+}
 
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/HeapAllocatorD3D12.h b/src/dawn/native/d3d12/HeapAllocatorD3D12.h
index e2e2a56..a297f04 100644
--- a/src/dawn/native/d3d12/HeapAllocatorD3D12.h
+++ b/src/dawn/native/d3d12/HeapAllocatorD3D12.h
@@ -23,27 +23,26 @@
 
 namespace dawn::native::d3d12 {
 
-    class Device;
+class Device;
 
-    // Wrapper to allocate a D3D12 heap.
-    class HeapAllocator : public ResourceHeapAllocator {
-      public:
-        HeapAllocator(Device* device,
-                      D3D12_HEAP_TYPE heapType,
-                      D3D12_HEAP_FLAGS heapFlags,
-                      MemorySegment memorySegment);
-        ~HeapAllocator() override = default;
+// Wrapper to allocate a D3D12 heap.
+class HeapAllocator : public ResourceHeapAllocator {
+  public:
+    HeapAllocator(Device* device,
+                  D3D12_HEAP_TYPE heapType,
+                  D3D12_HEAP_FLAGS heapFlags,
+                  MemorySegment memorySegment);
+    ~HeapAllocator() override = default;
 
-        ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
-            uint64_t size) override;
-        void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override;
+    ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(uint64_t size) override;
+    void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override;
 
-      private:
-        Device* mDevice;
-        D3D12_HEAP_TYPE mHeapType;
-        D3D12_HEAP_FLAGS mHeapFlags;
-        MemorySegment mMemorySegment;
-    };
+  private:
+    Device* mDevice;
+    D3D12_HEAP_TYPE mHeapType;
+    D3D12_HEAP_FLAGS mHeapFlags;
+    MemorySegment mMemorySegment;
+};
 
 }  // namespace dawn::native::d3d12
 
diff --git a/src/dawn/native/d3d12/HeapD3D12.cpp b/src/dawn/native/d3d12/HeapD3D12.cpp
index ef17695..61aa03b 100644
--- a/src/dawn/native/d3d12/HeapD3D12.cpp
+++ b/src/dawn/native/d3d12/HeapD3D12.cpp
@@ -17,17 +17,17 @@
 #include <utility>
 
 namespace dawn::native::d3d12 {
-    Heap::Heap(ComPtr<ID3D12Pageable> d3d12Pageable, MemorySegment memorySegment, uint64_t size)
-        : Pageable(std::move(d3d12Pageable), memorySegment, size) {
-        mD3d12Pageable.As(&mD3d12Heap);
-    }
+Heap::Heap(ComPtr<ID3D12Pageable> d3d12Pageable, MemorySegment memorySegment, uint64_t size)
+    : Pageable(std::move(d3d12Pageable), memorySegment, size) {
+    mD3d12Pageable.As(&mD3d12Heap);
+}
 
-    // This function should only be used when mD3D12Pageable was initialized from a
-    // ID3D12Pageable that was initially created as an ID3D12Heap (i.e. SubAllocation). If the
-    // ID3D12Pageable was initially created as an ID3D12Resource (i.e. DirectAllocation), then
-    // use GetD3D12Pageable().
-    ID3D12Heap* Heap::GetD3D12Heap() const {
-        return mD3d12Heap.Get();
-    }
+// This function should only be used when mD3D12Pageable was initialized from a
+// ID3D12Pageable that was initially created as an ID3D12Heap (i.e. SubAllocation). If the
+// ID3D12Pageable was initially created as an ID3D12Resource (i.e. DirectAllocation), then
+// use GetD3D12Pageable().
+ID3D12Heap* Heap::GetD3D12Heap() const {
+    return mD3d12Heap.Get();
+}
 
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/HeapD3D12.h b/src/dawn/native/d3d12/HeapD3D12.h
index b1efe5e..ebce289 100644
--- a/src/dawn/native/d3d12/HeapD3D12.h
+++ b/src/dawn/native/d3d12/HeapD3D12.h
@@ -21,20 +21,20 @@
 
 namespace dawn::native::d3d12 {
 
-    class Device;
+class Device;
 
-    // This class is used to represent ID3D12Heap allocations, as well as an implicit heap
-    // representing a directly allocated resource. It inherits from Pageable because each Heap must
-    // be represented in the ResidencyManager.
-    class Heap : public ResourceHeapBase, public Pageable {
-      public:
-        Heap(ComPtr<ID3D12Pageable> d3d12Pageable, MemorySegment memorySegment, uint64_t size);
+// This class is used to represent ID3D12Heap allocations, as well as an implicit heap
+// representing a directly allocated resource. It inherits from Pageable because each Heap must
+// be represented in the ResidencyManager.
+class Heap : public ResourceHeapBase, public Pageable {
+  public:
+    Heap(ComPtr<ID3D12Pageable> d3d12Pageable, MemorySegment memorySegment, uint64_t size);
 
-        ID3D12Heap* GetD3D12Heap() const;
+    ID3D12Heap* GetD3D12Heap() const;
 
-      private:
-        ComPtr<ID3D12Heap> mD3d12Heap;
-    };
+  private:
+    ComPtr<ID3D12Heap> mD3d12Heap;
+};
 }  // namespace dawn::native::d3d12
 
 #endif  // SRC_DAWN_NATIVE_D3D12_HEAPD3D12_H_
diff --git a/src/dawn/native/d3d12/IntegerTypes.h b/src/dawn/native/d3d12/IntegerTypes.h
index 6c72c07..a92cfae 100644
--- a/src/dawn/native/d3d12/IntegerTypes.h
+++ b/src/dawn/native/d3d12/IntegerTypes.h
@@ -22,9 +22,9 @@
 
 namespace dawn::native::d3d12 {
 
-    // An ID used to desambiguate between multiple uses of the same descriptor heap in the
-    // BindGroup allocations.
-    using HeapVersionID = TypedInteger<struct HeapVersionIDT, uint64_t>;
+// An ID used to desambiguate between multiple uses of the same descriptor heap in the
+// BindGroup allocations.
+using HeapVersionID = TypedInteger<struct HeapVersionIDT, uint64_t>;
 
 }  // namespace dawn::native::d3d12
 
diff --git a/src/dawn/native/d3d12/NativeSwapChainImplD3D12.cpp b/src/dawn/native/d3d12/NativeSwapChainImplD3D12.cpp
index 5156af5..de70081 100644
--- a/src/dawn/native/d3d12/NativeSwapChainImplD3D12.cpp
+++ b/src/dawn/native/d3d12/NativeSwapChainImplD3D12.cpp
@@ -20,101 +20,98 @@
 
 namespace dawn::native::d3d12 {
 
-    namespace {
-        DXGI_USAGE D3D12SwapChainBufferUsage(WGPUTextureUsage allowedUsages) {
-            DXGI_USAGE usage = DXGI_CPU_ACCESS_NONE;
-            if (allowedUsages & WGPUTextureUsage_TextureBinding) {
-                usage |= DXGI_USAGE_SHADER_INPUT;
-            }
-            if (allowedUsages & WGPUTextureUsage_StorageBinding) {
-                usage |= DXGI_USAGE_UNORDERED_ACCESS;
-            }
-            if (allowedUsages & WGPUTextureUsage_RenderAttachment) {
-                usage |= DXGI_USAGE_RENDER_TARGET_OUTPUT;
-            }
-            return usage;
-        }
+namespace {
+DXGI_USAGE D3D12SwapChainBufferUsage(WGPUTextureUsage allowedUsages) {
+    DXGI_USAGE usage = DXGI_CPU_ACCESS_NONE;
+    if (allowedUsages & WGPUTextureUsage_TextureBinding) {
+        usage |= DXGI_USAGE_SHADER_INPUT;
+    }
+    if (allowedUsages & WGPUTextureUsage_StorageBinding) {
+        usage |= DXGI_USAGE_UNORDERED_ACCESS;
+    }
+    if (allowedUsages & WGPUTextureUsage_RenderAttachment) {
+        usage |= DXGI_USAGE_RENDER_TARGET_OUTPUT;
+    }
+    return usage;
+}
 
-        static constexpr unsigned int kFrameCount = 3;
-    }  // anonymous namespace
+static constexpr unsigned int kFrameCount = 3;
+}  // anonymous namespace
 
-    NativeSwapChainImpl::NativeSwapChainImpl(Device* device, HWND window)
-        : mWindow(window), mDevice(device), mInterval(1) {
+NativeSwapChainImpl::NativeSwapChainImpl(Device* device, HWND window)
+    : mWindow(window), mDevice(device), mInterval(1) {}
+
+NativeSwapChainImpl::~NativeSwapChainImpl() {}
+
+void NativeSwapChainImpl::Init(DawnWSIContextD3D12* /*context*/) {}
+
+DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
+                                                  WGPUTextureUsage usage,
+                                                  uint32_t width,
+                                                  uint32_t height) {
+    ASSERT(width > 0);
+    ASSERT(height > 0);
+    ASSERT(format == static_cast<WGPUTextureFormat>(GetPreferredFormat()));
+
+    ComPtr<IDXGIFactory4> factory = mDevice->GetFactory();
+    ComPtr<ID3D12CommandQueue> queue = mDevice->GetCommandQueue();
+
+    mInterval = mDevice->IsToggleEnabled(Toggle::TurnOffVsync) == true ? 0 : 1;
+
+    // Create the D3D12 swapchain, assuming only two buffers for now
+    DXGI_SWAP_CHAIN_DESC1 swapChainDesc = {};
+    swapChainDesc.Width = width;
+    swapChainDesc.Height = height;
+    swapChainDesc.Format = D3D12TextureFormat(GetPreferredFormat());
+    swapChainDesc.BufferUsage = D3D12SwapChainBufferUsage(usage);
+    swapChainDesc.BufferCount = kFrameCount;
+    swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_DISCARD;
+    swapChainDesc.SampleDesc.Count = 1;
+    swapChainDesc.SampleDesc.Quality = 0;
+
+    ComPtr<IDXGISwapChain1> swapChain1;
+    ASSERT_SUCCESS(factory->CreateSwapChainForHwnd(queue.Get(), mWindow, &swapChainDesc, nullptr,
+                                                   nullptr, &swapChain1));
+
+    ASSERT_SUCCESS(swapChain1.As(&mSwapChain));
+
+    // Gather the resources that will be used to present to the swapchain
+    mBuffers.resize(kFrameCount);
+    for (uint32_t i = 0; i < kFrameCount; ++i) {
+        ASSERT_SUCCESS(mSwapChain->GetBuffer(i, IID_PPV_ARGS(&mBuffers[i])));
     }
 
-    NativeSwapChainImpl::~NativeSwapChainImpl() {
-    }
+    // Set the initial serial of buffers to 0 so that we don't wait on them when they are first
+    // used
+    mBufferSerials.resize(kFrameCount, ExecutionSerial(0));
 
-    void NativeSwapChainImpl::Init(DawnWSIContextD3D12* /*context*/) {
-    }
+    return DAWN_SWAP_CHAIN_NO_ERROR;
+}
 
-    DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
-                                                      WGPUTextureUsage usage,
-                                                      uint32_t width,
-                                                      uint32_t height) {
-        ASSERT(width > 0);
-        ASSERT(height > 0);
-        ASSERT(format == static_cast<WGPUTextureFormat>(GetPreferredFormat()));
+DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
+    mCurrentBuffer = mSwapChain->GetCurrentBackBufferIndex();
+    nextTexture->texture.ptr = mBuffers[mCurrentBuffer].Get();
 
-        ComPtr<IDXGIFactory4> factory = mDevice->GetFactory();
-        ComPtr<ID3D12CommandQueue> queue = mDevice->GetCommandQueue();
+    // TODO(crbug.com/dawn/269) Currently we force the CPU to wait for the GPU to be finished
+    // with the buffer. Ideally the synchronization should be all done on the GPU.
+    ASSERT(mDevice->WaitForSerial(mBufferSerials[mCurrentBuffer]).IsSuccess());
 
-        mInterval = mDevice->IsToggleEnabled(Toggle::TurnOffVsync) == true ? 0 : 1;
+    return DAWN_SWAP_CHAIN_NO_ERROR;
+}
 
-        // Create the D3D12 swapchain, assuming only two buffers for now
-        DXGI_SWAP_CHAIN_DESC1 swapChainDesc = {};
-        swapChainDesc.Width = width;
-        swapChainDesc.Height = height;
-        swapChainDesc.Format = D3D12TextureFormat(GetPreferredFormat());
-        swapChainDesc.BufferUsage = D3D12SwapChainBufferUsage(usage);
-        swapChainDesc.BufferCount = kFrameCount;
-        swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_DISCARD;
-        swapChainDesc.SampleDesc.Count = 1;
-        swapChainDesc.SampleDesc.Quality = 0;
+DawnSwapChainError NativeSwapChainImpl::Present() {
+    // This assumes the texture has already been transition to the PRESENT state.
 
-        ComPtr<IDXGISwapChain1> swapChain1;
-        ASSERT_SUCCESS(factory->CreateSwapChainForHwnd(queue.Get(), mWindow, &swapChainDesc,
-                                                       nullptr, nullptr, &swapChain1));
+    ASSERT_SUCCESS(mSwapChain->Present(mInterval, 0));
+    // TODO(crbug.com/dawn/833): Make the serial ticking implicit.
+    ASSERT(mDevice->NextSerial().IsSuccess());
 
-        ASSERT_SUCCESS(swapChain1.As(&mSwapChain));
+    mBufferSerials[mCurrentBuffer] = mDevice->GetPendingCommandSerial();
+    return DAWN_SWAP_CHAIN_NO_ERROR;
+}
 
-        // Gather the resources that will be used to present to the swapchain
-        mBuffers.resize(kFrameCount);
-        for (uint32_t i = 0; i < kFrameCount; ++i) {
-            ASSERT_SUCCESS(mSwapChain->GetBuffer(i, IID_PPV_ARGS(&mBuffers[i])));
-        }
-
-        // Set the initial serial of buffers to 0 so that we don't wait on them when they are first
-        // used
-        mBufferSerials.resize(kFrameCount, ExecutionSerial(0));
-
-        return DAWN_SWAP_CHAIN_NO_ERROR;
-    }
-
-    DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
-        mCurrentBuffer = mSwapChain->GetCurrentBackBufferIndex();
-        nextTexture->texture.ptr = mBuffers[mCurrentBuffer].Get();
-
-        // TODO(crbug.com/dawn/269) Currently we force the CPU to wait for the GPU to be finished
-        // with the buffer. Ideally the synchronization should be all done on the GPU.
-        ASSERT(mDevice->WaitForSerial(mBufferSerials[mCurrentBuffer]).IsSuccess());
-
-        return DAWN_SWAP_CHAIN_NO_ERROR;
-    }
-
-    DawnSwapChainError NativeSwapChainImpl::Present() {
-        // This assumes the texture has already been transition to the PRESENT state.
-
-        ASSERT_SUCCESS(mSwapChain->Present(mInterval, 0));
-        // TODO(crbug.com/dawn/833): Make the serial ticking implicit.
-        ASSERT(mDevice->NextSerial().IsSuccess());
-
-        mBufferSerials[mCurrentBuffer] = mDevice->GetPendingCommandSerial();
-        return DAWN_SWAP_CHAIN_NO_ERROR;
-    }
-
-    wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
-        return wgpu::TextureFormat::RGBA8Unorm;
-    }
+wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
+    return wgpu::TextureFormat::RGBA8Unorm;
+}
 
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/NativeSwapChainImplD3D12.h b/src/dawn/native/d3d12/NativeSwapChainImplD3D12.h
index a8ab3be..6bedd2d 100644
--- a/src/dawn/native/d3d12/NativeSwapChainImplD3D12.h
+++ b/src/dawn/native/d3d12/NativeSwapChainImplD3D12.h
@@ -25,35 +25,35 @@
 
 namespace dawn::native::d3d12 {
 
-    class Device;
+class Device;
 
-    class NativeSwapChainImpl {
-      public:
-        using WSIContext = DawnWSIContextD3D12;
+class NativeSwapChainImpl {
+  public:
+    using WSIContext = DawnWSIContextD3D12;
 
-        NativeSwapChainImpl(Device* device, HWND window);
-        ~NativeSwapChainImpl();
+    NativeSwapChainImpl(Device* device, HWND window);
+    ~NativeSwapChainImpl();
 
-        void Init(DawnWSIContextD3D12* context);
-        DawnSwapChainError Configure(WGPUTextureFormat format,
-                                     WGPUTextureUsage,
-                                     uint32_t width,
-                                     uint32_t height);
-        DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
-        DawnSwapChainError Present();
+    void Init(DawnWSIContextD3D12* context);
+    DawnSwapChainError Configure(WGPUTextureFormat format,
+                                 WGPUTextureUsage,
+                                 uint32_t width,
+                                 uint32_t height);
+    DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
+    DawnSwapChainError Present();
 
-        wgpu::TextureFormat GetPreferredFormat() const;
+    wgpu::TextureFormat GetPreferredFormat() const;
 
-      private:
-        HWND mWindow = nullptr;
-        Device* mDevice = nullptr;
-        UINT mInterval;
+  private:
+    HWND mWindow = nullptr;
+    Device* mDevice = nullptr;
+    UINT mInterval;
 
-        ComPtr<IDXGISwapChain3> mSwapChain = nullptr;
-        std::vector<ComPtr<ID3D12Resource>> mBuffers;
-        std::vector<ExecutionSerial> mBufferSerials;
-        uint32_t mCurrentBuffer;
-    };
+    ComPtr<IDXGISwapChain3> mSwapChain = nullptr;
+    std::vector<ComPtr<ID3D12Resource>> mBuffers;
+    std::vector<ExecutionSerial> mBufferSerials;
+    uint32_t mCurrentBuffer;
+};
 
 }  // namespace dawn::native::d3d12
 
diff --git a/src/dawn/native/d3d12/PageableD3D12.cpp b/src/dawn/native/d3d12/PageableD3D12.cpp
index 0d7e3bd1..72f70c0 100644
--- a/src/dawn/native/d3d12/PageableD3D12.cpp
+++ b/src/dawn/native/d3d12/PageableD3D12.cpp
@@ -17,62 +17,59 @@
 #include <utility>
 
 namespace dawn::native::d3d12 {
-    Pageable::Pageable(ComPtr<ID3D12Pageable> d3d12Pageable,
-                       MemorySegment memorySegment,
-                       uint64_t size)
-        : mD3d12Pageable(std::move(d3d12Pageable)), mMemorySegment(memorySegment), mSize(size) {
-    }
+Pageable::Pageable(ComPtr<ID3D12Pageable> d3d12Pageable, MemorySegment memorySegment, uint64_t size)
+    : mD3d12Pageable(std::move(d3d12Pageable)), mMemorySegment(memorySegment), mSize(size) {}
 
-    // When a pageable is destroyed, it no longer resides in resident memory, so we must evict
-    // it from the LRU cache. If this heap is not manually removed from the LRU-cache, the
-    // ResidencyManager will attempt to use it after it has been deallocated.
-    Pageable::~Pageable() {
-        if (IsInResidencyLRUCache()) {
-            RemoveFromList();
-        }
+// When a pageable is destroyed, it no longer resides in resident memory, so we must evict
+// it from the LRU cache. If this heap is not manually removed from the LRU-cache, the
+// ResidencyManager will attempt to use it after it has been deallocated.
+Pageable::~Pageable() {
+    if (IsInResidencyLRUCache()) {
+        RemoveFromList();
     }
+}
 
-    ID3D12Pageable* Pageable::GetD3D12Pageable() const {
-        return mD3d12Pageable.Get();
-    }
+ID3D12Pageable* Pageable::GetD3D12Pageable() const {
+    return mD3d12Pageable.Get();
+}
 
-    ExecutionSerial Pageable::GetLastUsage() const {
-        return mLastUsage;
-    }
+ExecutionSerial Pageable::GetLastUsage() const {
+    return mLastUsage;
+}
 
-    void Pageable::SetLastUsage(ExecutionSerial serial) {
-        mLastUsage = serial;
-    }
+void Pageable::SetLastUsage(ExecutionSerial serial) {
+    mLastUsage = serial;
+}
 
-    ExecutionSerial Pageable::GetLastSubmission() const {
-        return mLastSubmission;
-    }
+ExecutionSerial Pageable::GetLastSubmission() const {
+    return mLastSubmission;
+}
 
-    void Pageable::SetLastSubmission(ExecutionSerial serial) {
-        mLastSubmission = serial;
-    }
+void Pageable::SetLastSubmission(ExecutionSerial serial) {
+    mLastSubmission = serial;
+}
 
-    MemorySegment Pageable::GetMemorySegment() const {
-        return mMemorySegment;
-    }
+MemorySegment Pageable::GetMemorySegment() const {
+    return mMemorySegment;
+}
 
-    uint64_t Pageable::GetSize() const {
-        return mSize;
-    }
+uint64_t Pageable::GetSize() const {
+    return mSize;
+}
 
-    bool Pageable::IsInResidencyLRUCache() const {
-        return IsInList();
-    }
+bool Pageable::IsInResidencyLRUCache() const {
+    return IsInList();
+}
 
-    void Pageable::IncrementResidencyLock() {
-        mResidencyLockRefCount++;
-    }
+void Pageable::IncrementResidencyLock() {
+    mResidencyLockRefCount++;
+}
 
-    void Pageable::DecrementResidencyLock() {
-        mResidencyLockRefCount--;
-    }
+void Pageable::DecrementResidencyLock() {
+    mResidencyLockRefCount--;
+}
 
-    bool Pageable::IsResidencyLocked() const {
-        return mResidencyLockRefCount != 0;
-    }
+bool Pageable::IsResidencyLocked() const {
+    return mResidencyLockRefCount != 0;
+}
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/PageableD3D12.h b/src/dawn/native/d3d12/PageableD3D12.h
index a52a8a1..c11aba6 100644
--- a/src/dawn/native/d3d12/PageableD3D12.h
+++ b/src/dawn/native/d3d12/PageableD3D12.h
@@ -21,60 +21,60 @@
 #include "dawn/native/d3d12/d3d12_platform.h"
 
 namespace dawn::native::d3d12 {
-    // This class is used to represent ID3D12Pageable allocations, and also serves as a node within
-    // the ResidencyManager's LRU cache. This node is inserted into the LRU-cache when it is first
-    // allocated, and any time it is scheduled to be used by the GPU. This node is removed from the
-    // LRU cache when it is evicted from resident memory due to budget constraints, or when the
-    // pageable allocation is released.
-    class Pageable : public LinkNode<Pageable> {
-      public:
-        Pageable(ComPtr<ID3D12Pageable> d3d12Pageable, MemorySegment memorySegment, uint64_t size);
-        ~Pageable();
+// This class is used to represent ID3D12Pageable allocations, and also serves as a node within
+// the ResidencyManager's LRU cache. This node is inserted into the LRU-cache when it is first
+// allocated, and any time it is scheduled to be used by the GPU. This node is removed from the
+// LRU cache when it is evicted from resident memory due to budget constraints, or when the
+// pageable allocation is released.
+class Pageable : public LinkNode<Pageable> {
+  public:
+    Pageable(ComPtr<ID3D12Pageable> d3d12Pageable, MemorySegment memorySegment, uint64_t size);
+    ~Pageable();
 
-        ID3D12Pageable* GetD3D12Pageable() const;
+    ID3D12Pageable* GetD3D12Pageable() const;
 
-        // We set mLastRecordingSerial to denote the serial this pageable was last recorded to be
-        // used. We must check this serial against the current serial when recording usages to
-        // ensure we do not process residency for this pageable multiple times.
-        ExecutionSerial GetLastUsage() const;
-        void SetLastUsage(ExecutionSerial serial);
+    // We set mLastRecordingSerial to denote the serial this pageable was last recorded to be
+    // used. We must check this serial against the current serial when recording usages to
+    // ensure we do not process residency for this pageable multiple times.
+    ExecutionSerial GetLastUsage() const;
+    void SetLastUsage(ExecutionSerial serial);
 
-        // The residency manager must know the last serial that any portion of the pageable was
-        // submitted to be used so that we can ensure this pageable stays resident in memory at
-        // least until that serial has completed.
-        ExecutionSerial GetLastSubmission() const;
-        void SetLastSubmission(ExecutionSerial serial);
+    // The residency manager must know the last serial that any portion of the pageable was
+    // submitted to be used so that we can ensure this pageable stays resident in memory at
+    // least until that serial has completed.
+    ExecutionSerial GetLastSubmission() const;
+    void SetLastSubmission(ExecutionSerial serial);
 
-        MemorySegment GetMemorySegment() const;
+    MemorySegment GetMemorySegment() const;
 
-        uint64_t GetSize() const;
+    uint64_t GetSize() const;
 
-        bool IsInResidencyLRUCache() const;
+    bool IsInResidencyLRUCache() const;
 
-        // In some scenarios, such as async buffer mapping or descriptor heaps, we must lock
-        // residency to ensure the pageable cannot be evicted. Because multiple buffers may be
-        // mapped in a single heap, we must track the number of resources currently locked.
-        void IncrementResidencyLock();
-        void DecrementResidencyLock();
-        bool IsResidencyLocked() const;
+    // In some scenarios, such as async buffer mapping or descriptor heaps, we must lock
+    // residency to ensure the pageable cannot be evicted. Because multiple buffers may be
+    // mapped in a single heap, we must track the number of resources currently locked.
+    void IncrementResidencyLock();
+    void DecrementResidencyLock();
+    bool IsResidencyLocked() const;
 
-      protected:
-        ComPtr<ID3D12Pageable> mD3d12Pageable;
+  protected:
+    ComPtr<ID3D12Pageable> mD3d12Pageable;
 
-      private:
-        // mLastUsage denotes the last time this pageable was recorded for use.
-        ExecutionSerial mLastUsage = ExecutionSerial(0);
-        // mLastSubmission denotes the last time this pageable was submitted to the GPU. Note that
-        // although this variable often contains the same value as mLastUsage, it can differ in some
-        // situations. When some asynchronous APIs (like WriteBuffer) are called, mLastUsage is
-        // updated upon the call, but the backend operation is deferred until the next submission
-        // to the GPU. This makes mLastSubmission unique from mLastUsage, and allows us to
-        // accurately identify when a pageable can be evicted.
-        ExecutionSerial mLastSubmission = ExecutionSerial(0);
-        MemorySegment mMemorySegment;
-        uint32_t mResidencyLockRefCount = 0;
-        uint64_t mSize = 0;
-    };
+  private:
+    // mLastUsage denotes the last time this pageable was recorded for use.
+    ExecutionSerial mLastUsage = ExecutionSerial(0);
+    // mLastSubmission denotes the last time this pageable was submitted to the GPU. Note that
+    // although this variable often contains the same value as mLastUsage, it can differ in some
+    // situations. When some asynchronous APIs (like WriteBuffer) are called, mLastUsage is
+    // updated upon the call, but the backend operation is deferred until the next submission
+    // to the GPU. This makes mLastSubmission unique from mLastUsage, and allows us to
+    // accurately identify when a pageable can be evicted.
+    ExecutionSerial mLastSubmission = ExecutionSerial(0);
+    MemorySegment mMemorySegment;
+    uint32_t mResidencyLockRefCount = 0;
+    uint64_t mSize = 0;
+};
 }  // namespace dawn::native::d3d12
 
 #endif  // SRC_DAWN_NATIVE_D3D12_PAGEABLED3D12_H_
diff --git a/src/dawn/native/d3d12/PipelineLayoutD3D12.cpp b/src/dawn/native/d3d12/PipelineLayoutD3D12.cpp
index 8f1dbed..b710069 100644
--- a/src/dawn/native/d3d12/PipelineLayoutD3D12.cpp
+++ b/src/dawn/native/d3d12/PipelineLayoutD3D12.cpp
@@ -27,419 +27,414 @@
 using Microsoft::WRL::ComPtr;
 
 namespace dawn::native::d3d12 {
-    namespace {
+namespace {
 
-        // Reserve register names for internal use. This registers map to bindings in the shader,
-        // but are not directly related to allocation of the root signature.
-        // In the root signature, it the index of the root parameter where these registers are
-        // used that determines the layout of the root signature.
-        static constexpr uint32_t kRenderOrComputeInternalRegisterSpace = kMaxBindGroups + 1;
-        static constexpr uint32_t kRenderOrComputeInternalBaseRegister = 0;
+// Reserve register names for internal use. This registers map to bindings in the shader,
+// but are not directly related to allocation of the root signature.
+// In the root signature, it the index of the root parameter where these registers are
+// used that determines the layout of the root signature.
+static constexpr uint32_t kRenderOrComputeInternalRegisterSpace = kMaxBindGroups + 1;
+static constexpr uint32_t kRenderOrComputeInternalBaseRegister = 0;
 
-        static constexpr uint32_t kDynamicStorageBufferLengthsRegisterSpace = kMaxBindGroups + 2;
-        static constexpr uint32_t kDynamicStorageBufferLengthsBaseRegister = 0;
+static constexpr uint32_t kDynamicStorageBufferLengthsRegisterSpace = kMaxBindGroups + 2;
+static constexpr uint32_t kDynamicStorageBufferLengthsBaseRegister = 0;
 
-        static constexpr uint32_t kInvalidDynamicStorageBufferLengthsParameterIndex =
-            std::numeric_limits<uint32_t>::max();
+static constexpr uint32_t kInvalidDynamicStorageBufferLengthsParameterIndex =
+    std::numeric_limits<uint32_t>::max();
 
-        D3D12_SHADER_VISIBILITY ShaderVisibilityType(wgpu::ShaderStage visibility) {
-            ASSERT(visibility != wgpu::ShaderStage::None);
+D3D12_SHADER_VISIBILITY ShaderVisibilityType(wgpu::ShaderStage visibility) {
+    ASSERT(visibility != wgpu::ShaderStage::None);
 
-            if (visibility == wgpu::ShaderStage::Vertex) {
-                return D3D12_SHADER_VISIBILITY_VERTEX;
+    if (visibility == wgpu::ShaderStage::Vertex) {
+        return D3D12_SHADER_VISIBILITY_VERTEX;
+    }
+
+    if (visibility == wgpu::ShaderStage::Fragment) {
+        return D3D12_SHADER_VISIBILITY_PIXEL;
+    }
+
+    // For compute or any two combination of stages, visibility must be ALL
+    return D3D12_SHADER_VISIBILITY_ALL;
+}
+
+D3D12_ROOT_PARAMETER_TYPE RootParameterType(wgpu::BufferBindingType type) {
+    switch (type) {
+        case wgpu::BufferBindingType::Uniform:
+            return D3D12_ROOT_PARAMETER_TYPE_CBV;
+        case wgpu::BufferBindingType::Storage:
+        case kInternalStorageBufferBinding:
+            return D3D12_ROOT_PARAMETER_TYPE_UAV;
+        case wgpu::BufferBindingType::ReadOnlyStorage:
+            return D3D12_ROOT_PARAMETER_TYPE_SRV;
+        case wgpu::BufferBindingType::Undefined:
+            UNREACHABLE();
+    }
+}
+
+}  // anonymous namespace
+
+ResultOrError<Ref<PipelineLayout>> PipelineLayout::Create(
+    Device* device,
+    const PipelineLayoutDescriptor* descriptor) {
+    Ref<PipelineLayout> layout = AcquireRef(new PipelineLayout(device, descriptor));
+    DAWN_TRY(layout->Initialize());
+    return layout;
+}
+
+MaybeError PipelineLayout::Initialize() {
+    Device* device = ToBackend(GetDevice());
+    // Parameters are D3D12_ROOT_PARAMETER_TYPE which is either a root table, constant, or
+    // descriptor.
+    std::vector<D3D12_ROOT_PARAMETER> rootParameters;
+
+    size_t rangesCount = 0;
+    for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
+        const BindGroupLayout* bindGroupLayout = ToBackend(GetBindGroupLayout(group));
+        rangesCount += bindGroupLayout->GetCbvUavSrvDescriptorRanges().size() +
+                       bindGroupLayout->GetSamplerDescriptorRanges().size();
+    }
+
+    // We are taking pointers to `ranges`, so we cannot let it resize while we're pushing to it.
+    std::vector<D3D12_DESCRIPTOR_RANGE> ranges(rangesCount);
+
+    uint32_t rangeIndex = 0;
+
+    for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
+        const BindGroupLayout* bindGroupLayout = ToBackend(GetBindGroupLayout(group));
+
+        // Set the root descriptor table parameter and copy ranges. Ranges are offset by the
+        // bind group index Returns whether or not the parameter was set. A root parameter is
+        // not set if the number of ranges is 0
+        auto SetRootDescriptorTable =
+            [&](const std::vector<D3D12_DESCRIPTOR_RANGE>& descriptorRanges) -> bool {
+            auto rangeCount = descriptorRanges.size();
+            if (rangeCount == 0) {
+                return false;
             }
 
-            if (visibility == wgpu::ShaderStage::Fragment) {
-                return D3D12_SHADER_VISIBILITY_PIXEL;
+            D3D12_ROOT_PARAMETER rootParameter = {};
+            rootParameter.ParameterType = D3D12_ROOT_PARAMETER_TYPE_DESCRIPTOR_TABLE;
+            rootParameter.ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
+            rootParameter.DescriptorTable.NumDescriptorRanges = rangeCount;
+            rootParameter.DescriptorTable.pDescriptorRanges = &ranges[rangeIndex];
+
+            for (auto& range : descriptorRanges) {
+                ASSERT(range.RegisterSpace == kRegisterSpacePlaceholder);
+                ranges[rangeIndex] = range;
+                ranges[rangeIndex].RegisterSpace = static_cast<uint32_t>(group);
+                rangeIndex++;
             }
 
-            // For compute or any two combination of stages, visibility must be ALL
-            return D3D12_SHADER_VISIBILITY_ALL;
+            rootParameters.emplace_back(rootParameter);
+
+            return true;
+        };
+
+        if (SetRootDescriptorTable(bindGroupLayout->GetCbvUavSrvDescriptorRanges())) {
+            mCbvUavSrvRootParameterInfo[group] = rootParameters.size() - 1;
+        }
+        if (SetRootDescriptorTable(bindGroupLayout->GetSamplerDescriptorRanges())) {
+            mSamplerRootParameterInfo[group] = rootParameters.size() - 1;
         }
 
-        D3D12_ROOT_PARAMETER_TYPE RootParameterType(wgpu::BufferBindingType type) {
-            switch (type) {
-                case wgpu::BufferBindingType::Uniform:
-                    return D3D12_ROOT_PARAMETER_TYPE_CBV;
-                case wgpu::BufferBindingType::Storage:
-                case kInternalStorageBufferBinding:
-                    return D3D12_ROOT_PARAMETER_TYPE_UAV;
-                case wgpu::BufferBindingType::ReadOnlyStorage:
-                    return D3D12_ROOT_PARAMETER_TYPE_SRV;
-                case wgpu::BufferBindingType::Undefined:
-                    UNREACHABLE();
+        // Init root descriptors in root signatures for dynamic buffer bindings.
+        // These are packed at the beginning of the layout binding info.
+        for (BindingIndex dynamicBindingIndex{0};
+             dynamicBindingIndex < bindGroupLayout->GetDynamicBufferCount();
+             ++dynamicBindingIndex) {
+            const BindingInfo& bindingInfo = bindGroupLayout->GetBindingInfo(dynamicBindingIndex);
+
+            if (bindingInfo.visibility == wgpu::ShaderStage::None) {
+                // Skip dynamic buffers that are not visible. D3D12 does not have None
+                // visibility.
+                continue;
+            }
+
+            D3D12_ROOT_PARAMETER rootParameter = {};
+
+            // Setup root descriptor.
+            D3D12_ROOT_DESCRIPTOR rootDescriptor;
+            rootDescriptor.ShaderRegister = bindGroupLayout->GetShaderRegister(dynamicBindingIndex);
+            rootDescriptor.RegisterSpace = static_cast<uint32_t>(group);
+
+            // Set root descriptors in root signatures.
+            rootParameter.Descriptor = rootDescriptor;
+            mDynamicRootParameterIndices[group][dynamicBindingIndex] = rootParameters.size();
+
+            // Set parameter types according to bind group layout descriptor.
+            rootParameter.ParameterType = RootParameterType(bindingInfo.buffer.type);
+
+            // Set visibilities according to bind group layout descriptor.
+            rootParameter.ShaderVisibility = ShaderVisibilityType(bindingInfo.visibility);
+
+            rootParameters.emplace_back(rootParameter);
+        }
+    }
+
+    // Make sure that we added exactly the number of elements we expected. If we added more,
+    // |ranges| will have resized and the pointers in the |rootParameter|s will be invalid.
+    ASSERT(rangeIndex == rangesCount);
+
+    D3D12_ROOT_PARAMETER renderOrComputeInternalConstants{};
+    renderOrComputeInternalConstants.ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
+    renderOrComputeInternalConstants.ParameterType = D3D12_ROOT_PARAMETER_TYPE_32BIT_CONSTANTS;
+    // Always allocate 3 constants for either:
+    //  - vertex_index and instance_index
+    //  - num_workgroups_x, num_workgroups_y and num_workgroups_z
+    // NOTE: We should consider delaying root signature creation until we know how many values
+    // we need
+    renderOrComputeInternalConstants.Constants.Num32BitValues = 3;
+    renderOrComputeInternalConstants.Constants.RegisterSpace =
+        kRenderOrComputeInternalRegisterSpace;
+    renderOrComputeInternalConstants.Constants.ShaderRegister =
+        kRenderOrComputeInternalBaseRegister;
+    mFirstIndexOffsetParameterIndex = rootParameters.size();
+    mNumWorkgroupsParameterIndex = rootParameters.size();
+    // NOTE: We should consider moving this entry to earlier in the root signature since offsets
+    // would need to be updated often
+    rootParameters.emplace_back(renderOrComputeInternalConstants);
+
+    // Loops over all of the dynamic storage buffer bindings in the layout and build
+    // a mapping from the binding to the next offset into the root constant array where
+    // that dynamic storage buffer's binding size will be stored. The next register offset
+    // to use is tracked with |dynamicStorageBufferLengthsShaderRegisterOffset|.
+    // This data will be used by shader translation to emit a load from the root constant
+    // array to use as the binding's size in runtime array calculations.
+    // Each bind group's length data is stored contiguously in the root constant array,
+    // so the loop also computes the first register offset for each group where the
+    // data should start.
+    uint32_t dynamicStorageBufferLengthsShaderRegisterOffset = 0;
+    for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
+        const BindGroupLayoutBase* bgl = GetBindGroupLayout(group);
+
+        mDynamicStorageBufferLengthInfo[group].firstRegisterOffset =
+            dynamicStorageBufferLengthsShaderRegisterOffset;
+        mDynamicStorageBufferLengthInfo[group].bindingAndRegisterOffsets.reserve(
+            bgl->GetBindingCountInfo().dynamicStorageBufferCount);
+
+        for (BindingIndex bindingIndex(0); bindingIndex < bgl->GetDynamicBufferCount();
+             ++bindingIndex) {
+            if (bgl->IsStorageBufferBinding(bindingIndex)) {
+                mDynamicStorageBufferLengthInfo[group].bindingAndRegisterOffsets.push_back(
+                    {bgl->GetBindingInfo(bindingIndex).binding,
+                     dynamicStorageBufferLengthsShaderRegisterOffset++});
             }
         }
 
-    }  // anonymous namespace
+        ASSERT(mDynamicStorageBufferLengthInfo[group].bindingAndRegisterOffsets.size() ==
+               bgl->GetBindingCountInfo().dynamicStorageBufferCount);
+    }
+    ASSERT(dynamicStorageBufferLengthsShaderRegisterOffset <=
+           kMaxDynamicStorageBuffersPerPipelineLayout);
 
-    ResultOrError<Ref<PipelineLayout>> PipelineLayout::Create(
-        Device* device,
-        const PipelineLayoutDescriptor* descriptor) {
-        Ref<PipelineLayout> layout = AcquireRef(new PipelineLayout(device, descriptor));
-        DAWN_TRY(layout->Initialize());
-        return layout;
+    if (dynamicStorageBufferLengthsShaderRegisterOffset > 0) {
+        D3D12_ROOT_PARAMETER dynamicStorageBufferLengthConstants{};
+        dynamicStorageBufferLengthConstants.ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
+        dynamicStorageBufferLengthConstants.ParameterType =
+            D3D12_ROOT_PARAMETER_TYPE_32BIT_CONSTANTS;
+        dynamicStorageBufferLengthConstants.Constants.Num32BitValues =
+            dynamicStorageBufferLengthsShaderRegisterOffset;
+        dynamicStorageBufferLengthConstants.Constants.RegisterSpace =
+            kDynamicStorageBufferLengthsRegisterSpace;
+        dynamicStorageBufferLengthConstants.Constants.ShaderRegister =
+            kDynamicStorageBufferLengthsBaseRegister;
+        mDynamicStorageBufferLengthsParameterIndex = rootParameters.size();
+        rootParameters.emplace_back(dynamicStorageBufferLengthConstants);
+    } else {
+        mDynamicStorageBufferLengthsParameterIndex =
+            kInvalidDynamicStorageBufferLengthsParameterIndex;
     }
 
-    MaybeError PipelineLayout::Initialize() {
-        Device* device = ToBackend(GetDevice());
-        // Parameters are D3D12_ROOT_PARAMETER_TYPE which is either a root table, constant, or
-        // descriptor.
-        std::vector<D3D12_ROOT_PARAMETER> rootParameters;
+    D3D12_ROOT_SIGNATURE_DESC rootSignatureDescriptor;
+    rootSignatureDescriptor.NumParameters = rootParameters.size();
+    rootSignatureDescriptor.pParameters = rootParameters.data();
+    rootSignatureDescriptor.NumStaticSamplers = 0;
+    rootSignatureDescriptor.pStaticSamplers = nullptr;
+    rootSignatureDescriptor.Flags = D3D12_ROOT_SIGNATURE_FLAG_ALLOW_INPUT_ASSEMBLER_INPUT_LAYOUT;
 
-        size_t rangesCount = 0;
-        for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
-            const BindGroupLayout* bindGroupLayout = ToBackend(GetBindGroupLayout(group));
-            rangesCount += bindGroupLayout->GetCbvUavSrvDescriptorRanges().size() +
-                           bindGroupLayout->GetSamplerDescriptorRanges().size();
+    ComPtr<ID3DBlob> signature;
+    ComPtr<ID3DBlob> error;
+    HRESULT hr = device->GetFunctions()->d3d12SerializeRootSignature(
+        &rootSignatureDescriptor, D3D_ROOT_SIGNATURE_VERSION_1, &signature, &error);
+    if (DAWN_UNLIKELY(FAILED(hr))) {
+        std::ostringstream messageStream;
+        if (error) {
+            messageStream << static_cast<const char*>(error->GetBufferPointer());
+
+            // |error| is observed to always end with a \n, but is not
+            // specified to do so, so we add an extra newline just in case.
+            messageStream << std::endl;
         }
-
-        // We are taking pointers to `ranges`, so we cannot let it resize while we're pushing to it.
-        std::vector<D3D12_DESCRIPTOR_RANGE> ranges(rangesCount);
-
-        uint32_t rangeIndex = 0;
-
-        for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
-            const BindGroupLayout* bindGroupLayout = ToBackend(GetBindGroupLayout(group));
-
-            // Set the root descriptor table parameter and copy ranges. Ranges are offset by the
-            // bind group index Returns whether or not the parameter was set. A root parameter is
-            // not set if the number of ranges is 0
-            auto SetRootDescriptorTable =
-                [&](const std::vector<D3D12_DESCRIPTOR_RANGE>& descriptorRanges) -> bool {
-                auto rangeCount = descriptorRanges.size();
-                if (rangeCount == 0) {
-                    return false;
-                }
-
-                D3D12_ROOT_PARAMETER rootParameter = {};
-                rootParameter.ParameterType = D3D12_ROOT_PARAMETER_TYPE_DESCRIPTOR_TABLE;
-                rootParameter.ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
-                rootParameter.DescriptorTable.NumDescriptorRanges = rangeCount;
-                rootParameter.DescriptorTable.pDescriptorRanges = &ranges[rangeIndex];
-
-                for (auto& range : descriptorRanges) {
-                    ASSERT(range.RegisterSpace == kRegisterSpacePlaceholder);
-                    ranges[rangeIndex] = range;
-                    ranges[rangeIndex].RegisterSpace = static_cast<uint32_t>(group);
-                    rangeIndex++;
-                }
-
-                rootParameters.emplace_back(rootParameter);
-
-                return true;
-            };
-
-            if (SetRootDescriptorTable(bindGroupLayout->GetCbvUavSrvDescriptorRanges())) {
-                mCbvUavSrvRootParameterInfo[group] = rootParameters.size() - 1;
-            }
-            if (SetRootDescriptorTable(bindGroupLayout->GetSamplerDescriptorRanges())) {
-                mSamplerRootParameterInfo[group] = rootParameters.size() - 1;
-            }
-
-            // Init root descriptors in root signatures for dynamic buffer bindings.
-            // These are packed at the beginning of the layout binding info.
-            for (BindingIndex dynamicBindingIndex{0};
-                 dynamicBindingIndex < bindGroupLayout->GetDynamicBufferCount();
-                 ++dynamicBindingIndex) {
-                const BindingInfo& bindingInfo =
-                    bindGroupLayout->GetBindingInfo(dynamicBindingIndex);
-
-                if (bindingInfo.visibility == wgpu::ShaderStage::None) {
-                    // Skip dynamic buffers that are not visible. D3D12 does not have None
-                    // visibility.
-                    continue;
-                }
-
-                D3D12_ROOT_PARAMETER rootParameter = {};
-
-                // Setup root descriptor.
-                D3D12_ROOT_DESCRIPTOR rootDescriptor;
-                rootDescriptor.ShaderRegister =
-                    bindGroupLayout->GetShaderRegister(dynamicBindingIndex);
-                rootDescriptor.RegisterSpace = static_cast<uint32_t>(group);
-
-                // Set root descriptors in root signatures.
-                rootParameter.Descriptor = rootDescriptor;
-                mDynamicRootParameterIndices[group][dynamicBindingIndex] = rootParameters.size();
-
-                // Set parameter types according to bind group layout descriptor.
-                rootParameter.ParameterType = RootParameterType(bindingInfo.buffer.type);
-
-                // Set visibilities according to bind group layout descriptor.
-                rootParameter.ShaderVisibility = ShaderVisibilityType(bindingInfo.visibility);
-
-                rootParameters.emplace_back(rootParameter);
-            }
-        }
-
-        // Make sure that we added exactly the number of elements we expected. If we added more,
-        // |ranges| will have resized and the pointers in the |rootParameter|s will be invalid.
-        ASSERT(rangeIndex == rangesCount);
-
-        D3D12_ROOT_PARAMETER renderOrComputeInternalConstants{};
-        renderOrComputeInternalConstants.ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
-        renderOrComputeInternalConstants.ParameterType = D3D12_ROOT_PARAMETER_TYPE_32BIT_CONSTANTS;
-        // Always allocate 3 constants for either:
-        //  - vertex_index and instance_index
-        //  - num_workgroups_x, num_workgroups_y and num_workgroups_z
-        // NOTE: We should consider delaying root signature creation until we know how many values
-        // we need
-        renderOrComputeInternalConstants.Constants.Num32BitValues = 3;
-        renderOrComputeInternalConstants.Constants.RegisterSpace =
-            kRenderOrComputeInternalRegisterSpace;
-        renderOrComputeInternalConstants.Constants.ShaderRegister =
-            kRenderOrComputeInternalBaseRegister;
-        mFirstIndexOffsetParameterIndex = rootParameters.size();
-        mNumWorkgroupsParameterIndex = rootParameters.size();
-        // NOTE: We should consider moving this entry to earlier in the root signature since offsets
-        // would need to be updated often
-        rootParameters.emplace_back(renderOrComputeInternalConstants);
-
-        // Loops over all of the dynamic storage buffer bindings in the layout and build
-        // a mapping from the binding to the next offset into the root constant array where
-        // that dynamic storage buffer's binding size will be stored. The next register offset
-        // to use is tracked with |dynamicStorageBufferLengthsShaderRegisterOffset|.
-        // This data will be used by shader translation to emit a load from the root constant
-        // array to use as the binding's size in runtime array calculations.
-        // Each bind group's length data is stored contiguously in the root constant array,
-        // so the loop also computes the first register offset for each group where the
-        // data should start.
-        uint32_t dynamicStorageBufferLengthsShaderRegisterOffset = 0;
-        for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
-            const BindGroupLayoutBase* bgl = GetBindGroupLayout(group);
-
-            mDynamicStorageBufferLengthInfo[group].firstRegisterOffset =
-                dynamicStorageBufferLengthsShaderRegisterOffset;
-            mDynamicStorageBufferLengthInfo[group].bindingAndRegisterOffsets.reserve(
-                bgl->GetBindingCountInfo().dynamicStorageBufferCount);
-
-            for (BindingIndex bindingIndex(0); bindingIndex < bgl->GetDynamicBufferCount();
-                 ++bindingIndex) {
-                if (bgl->IsStorageBufferBinding(bindingIndex)) {
-                    mDynamicStorageBufferLengthInfo[group].bindingAndRegisterOffsets.push_back(
-                        {bgl->GetBindingInfo(bindingIndex).binding,
-                         dynamicStorageBufferLengthsShaderRegisterOffset++});
-                }
-            }
-
-            ASSERT(mDynamicStorageBufferLengthInfo[group].bindingAndRegisterOffsets.size() ==
-                   bgl->GetBindingCountInfo().dynamicStorageBufferCount);
-        }
-        ASSERT(dynamicStorageBufferLengthsShaderRegisterOffset <=
-               kMaxDynamicStorageBuffersPerPipelineLayout);
-
-        if (dynamicStorageBufferLengthsShaderRegisterOffset > 0) {
-            D3D12_ROOT_PARAMETER dynamicStorageBufferLengthConstants{};
-            dynamicStorageBufferLengthConstants.ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL;
-            dynamicStorageBufferLengthConstants.ParameterType =
-                D3D12_ROOT_PARAMETER_TYPE_32BIT_CONSTANTS;
-            dynamicStorageBufferLengthConstants.Constants.Num32BitValues =
-                dynamicStorageBufferLengthsShaderRegisterOffset;
-            dynamicStorageBufferLengthConstants.Constants.RegisterSpace =
-                kDynamicStorageBufferLengthsRegisterSpace;
-            dynamicStorageBufferLengthConstants.Constants.ShaderRegister =
-                kDynamicStorageBufferLengthsBaseRegister;
-            mDynamicStorageBufferLengthsParameterIndex = rootParameters.size();
-            rootParameters.emplace_back(dynamicStorageBufferLengthConstants);
-        } else {
-            mDynamicStorageBufferLengthsParameterIndex =
-                kInvalidDynamicStorageBufferLengthsParameterIndex;
-        }
-
-        D3D12_ROOT_SIGNATURE_DESC rootSignatureDescriptor;
-        rootSignatureDescriptor.NumParameters = rootParameters.size();
-        rootSignatureDescriptor.pParameters = rootParameters.data();
-        rootSignatureDescriptor.NumStaticSamplers = 0;
-        rootSignatureDescriptor.pStaticSamplers = nullptr;
-        rootSignatureDescriptor.Flags =
-            D3D12_ROOT_SIGNATURE_FLAG_ALLOW_INPUT_ASSEMBLER_INPUT_LAYOUT;
-
-        ComPtr<ID3DBlob> signature;
-        ComPtr<ID3DBlob> error;
-        HRESULT hr = device->GetFunctions()->d3d12SerializeRootSignature(
-            &rootSignatureDescriptor, D3D_ROOT_SIGNATURE_VERSION_1, &signature, &error);
-        if (DAWN_UNLIKELY(FAILED(hr))) {
-            std::ostringstream messageStream;
-            if (error) {
-                messageStream << static_cast<const char*>(error->GetBufferPointer());
-
-                // |error| is observed to always end with a \n, but is not
-                // specified to do so, so we add an extra newline just in case.
-                messageStream << std::endl;
-            }
-            messageStream << "D3D12 serialize root signature";
-            DAWN_TRY(CheckHRESULT(hr, messageStream.str().c_str()));
-        }
-        DAWN_TRY(CheckHRESULT(device->GetD3D12Device()->CreateRootSignature(
-                                  0, signature->GetBufferPointer(), signature->GetBufferSize(),
-                                  IID_PPV_ARGS(&mRootSignature)),
-                              "D3D12 create root signature"));
-        return {};
+        messageStream << "D3D12 serialize root signature";
+        DAWN_TRY(CheckHRESULT(hr, messageStream.str().c_str()));
     }
+    DAWN_TRY(CheckHRESULT(device->GetD3D12Device()->CreateRootSignature(
+                              0, signature->GetBufferPointer(), signature->GetBufferSize(),
+                              IID_PPV_ARGS(&mRootSignature)),
+                          "D3D12 create root signature"));
+    return {};
+}
 
-    uint32_t PipelineLayout::GetCbvUavSrvRootParameterIndex(BindGroupIndex group) const {
-        ASSERT(group < kMaxBindGroupsTyped);
-        return mCbvUavSrvRootParameterInfo[group];
-    }
+uint32_t PipelineLayout::GetCbvUavSrvRootParameterIndex(BindGroupIndex group) const {
+    ASSERT(group < kMaxBindGroupsTyped);
+    return mCbvUavSrvRootParameterInfo[group];
+}
 
-    uint32_t PipelineLayout::GetSamplerRootParameterIndex(BindGroupIndex group) const {
-        ASSERT(group < kMaxBindGroupsTyped);
-        return mSamplerRootParameterInfo[group];
-    }
+uint32_t PipelineLayout::GetSamplerRootParameterIndex(BindGroupIndex group) const {
+    ASSERT(group < kMaxBindGroupsTyped);
+    return mSamplerRootParameterInfo[group];
+}
 
-    ID3D12RootSignature* PipelineLayout::GetRootSignature() const {
-        return mRootSignature.Get();
-    }
+ID3D12RootSignature* PipelineLayout::GetRootSignature() const {
+    return mRootSignature.Get();
+}
 
-    const PipelineLayout::DynamicStorageBufferLengthInfo&
-    PipelineLayout::GetDynamicStorageBufferLengthInfo() const {
-        return mDynamicStorageBufferLengthInfo;
-    }
+const PipelineLayout::DynamicStorageBufferLengthInfo&
+PipelineLayout::GetDynamicStorageBufferLengthInfo() const {
+    return mDynamicStorageBufferLengthInfo;
+}
 
-    uint32_t PipelineLayout::GetDynamicRootParameterIndex(BindGroupIndex group,
-                                                          BindingIndex bindingIndex) const {
-        ASSERT(group < kMaxBindGroupsTyped);
-        ASSERT(bindingIndex < kMaxDynamicBuffersPerPipelineLayoutTyped);
-        ASSERT(GetBindGroupLayout(group)->GetBindingInfo(bindingIndex).buffer.hasDynamicOffset);
-        ASSERT(GetBindGroupLayout(group)->GetBindingInfo(bindingIndex).visibility !=
-               wgpu::ShaderStage::None);
-        return mDynamicRootParameterIndices[group][bindingIndex];
-    }
+uint32_t PipelineLayout::GetDynamicRootParameterIndex(BindGroupIndex group,
+                                                      BindingIndex bindingIndex) const {
+    ASSERT(group < kMaxBindGroupsTyped);
+    ASSERT(bindingIndex < kMaxDynamicBuffersPerPipelineLayoutTyped);
+    ASSERT(GetBindGroupLayout(group)->GetBindingInfo(bindingIndex).buffer.hasDynamicOffset);
+    ASSERT(GetBindGroupLayout(group)->GetBindingInfo(bindingIndex).visibility !=
+           wgpu::ShaderStage::None);
+    return mDynamicRootParameterIndices[group][bindingIndex];
+}
 
-    uint32_t PipelineLayout::GetFirstIndexOffsetRegisterSpace() const {
-        return kRenderOrComputeInternalRegisterSpace;
-    }
+uint32_t PipelineLayout::GetFirstIndexOffsetRegisterSpace() const {
+    return kRenderOrComputeInternalRegisterSpace;
+}
 
-    uint32_t PipelineLayout::GetFirstIndexOffsetShaderRegister() const {
-        return kRenderOrComputeInternalBaseRegister;
-    }
+uint32_t PipelineLayout::GetFirstIndexOffsetShaderRegister() const {
+    return kRenderOrComputeInternalBaseRegister;
+}
 
-    uint32_t PipelineLayout::GetFirstIndexOffsetParameterIndex() const {
-        return mFirstIndexOffsetParameterIndex;
-    }
+uint32_t PipelineLayout::GetFirstIndexOffsetParameterIndex() const {
+    return mFirstIndexOffsetParameterIndex;
+}
 
-    uint32_t PipelineLayout::GetNumWorkgroupsRegisterSpace() const {
-        return kRenderOrComputeInternalRegisterSpace;
-    }
+uint32_t PipelineLayout::GetNumWorkgroupsRegisterSpace() const {
+    return kRenderOrComputeInternalRegisterSpace;
+}
 
-    uint32_t PipelineLayout::GetNumWorkgroupsShaderRegister() const {
-        return kRenderOrComputeInternalBaseRegister;
-    }
+uint32_t PipelineLayout::GetNumWorkgroupsShaderRegister() const {
+    return kRenderOrComputeInternalBaseRegister;
+}
 
-    uint32_t PipelineLayout::GetNumWorkgroupsParameterIndex() const {
-        return mNumWorkgroupsParameterIndex;
-    }
+uint32_t PipelineLayout::GetNumWorkgroupsParameterIndex() const {
+    return mNumWorkgroupsParameterIndex;
+}
 
-    uint32_t PipelineLayout::GetDynamicStorageBufferLengthsRegisterSpace() const {
-        return kDynamicStorageBufferLengthsRegisterSpace;
-    }
+uint32_t PipelineLayout::GetDynamicStorageBufferLengthsRegisterSpace() const {
+    return kDynamicStorageBufferLengthsRegisterSpace;
+}
 
-    uint32_t PipelineLayout::GetDynamicStorageBufferLengthsShaderRegister() const {
-        return kDynamicStorageBufferLengthsBaseRegister;
-    }
+uint32_t PipelineLayout::GetDynamicStorageBufferLengthsShaderRegister() const {
+    return kDynamicStorageBufferLengthsBaseRegister;
+}
 
-    uint32_t PipelineLayout::GetDynamicStorageBufferLengthsParameterIndex() const {
-        ASSERT(mDynamicStorageBufferLengthsParameterIndex !=
-               kInvalidDynamicStorageBufferLengthsParameterIndex);
-        return mDynamicStorageBufferLengthsParameterIndex;
-    }
+uint32_t PipelineLayout::GetDynamicStorageBufferLengthsParameterIndex() const {
+    ASSERT(mDynamicStorageBufferLengthsParameterIndex !=
+           kInvalidDynamicStorageBufferLengthsParameterIndex);
+    return mDynamicStorageBufferLengthsParameterIndex;
+}
 
-    ID3D12CommandSignature* PipelineLayout::GetDispatchIndirectCommandSignatureWithNumWorkgroups() {
-        // mDispatchIndirectCommandSignatureWithNumWorkgroups won't be created until it is needed.
-        if (mDispatchIndirectCommandSignatureWithNumWorkgroups.Get() != nullptr) {
-            return mDispatchIndirectCommandSignatureWithNumWorkgroups.Get();
-        }
-
-        D3D12_INDIRECT_ARGUMENT_DESC argumentDescs[2] = {};
-        argumentDescs[0].Type = D3D12_INDIRECT_ARGUMENT_TYPE_CONSTANT;
-        argumentDescs[0].Constant.RootParameterIndex = GetNumWorkgroupsParameterIndex();
-        argumentDescs[0].Constant.Num32BitValuesToSet = 3;
-        argumentDescs[0].Constant.DestOffsetIn32BitValues = 0;
-
-        // A command signature must contain exactly 1 Draw / Dispatch / DispatchMesh / DispatchRays
-        // command. That command must come last.
-        argumentDescs[1].Type = D3D12_INDIRECT_ARGUMENT_TYPE_DISPATCH;
-
-        D3D12_COMMAND_SIGNATURE_DESC programDesc = {};
-        programDesc.ByteStride = 6 * sizeof(uint32_t);
-        programDesc.NumArgumentDescs = 2;
-        programDesc.pArgumentDescs = argumentDescs;
-
-        // The root signature must be specified if and only if the command signature changes one of
-        // the root arguments.
-        ToBackend(GetDevice())
-            ->GetD3D12Device()
-            ->CreateCommandSignature(
-                &programDesc, GetRootSignature(),
-                IID_PPV_ARGS(&mDispatchIndirectCommandSignatureWithNumWorkgroups));
+ID3D12CommandSignature* PipelineLayout::GetDispatchIndirectCommandSignatureWithNumWorkgroups() {
+    // mDispatchIndirectCommandSignatureWithNumWorkgroups won't be created until it is needed.
+    if (mDispatchIndirectCommandSignatureWithNumWorkgroups.Get() != nullptr) {
         return mDispatchIndirectCommandSignatureWithNumWorkgroups.Get();
     }
 
-    ID3D12CommandSignature*
-    PipelineLayout::GetDrawIndirectCommandSignatureWithInstanceVertexOffsets() {
-        // mDrawIndirectCommandSignatureWithInstanceVertexOffsets won't be created until it is
-        // needed.
-        if (mDrawIndirectCommandSignatureWithInstanceVertexOffsets.Get() != nullptr) {
-            return mDrawIndirectCommandSignatureWithInstanceVertexOffsets.Get();
-        }
+    D3D12_INDIRECT_ARGUMENT_DESC argumentDescs[2] = {};
+    argumentDescs[0].Type = D3D12_INDIRECT_ARGUMENT_TYPE_CONSTANT;
+    argumentDescs[0].Constant.RootParameterIndex = GetNumWorkgroupsParameterIndex();
+    argumentDescs[0].Constant.Num32BitValuesToSet = 3;
+    argumentDescs[0].Constant.DestOffsetIn32BitValues = 0;
 
-        D3D12_INDIRECT_ARGUMENT_DESC argumentDescs[2] = {};
-        argumentDescs[0].Type = D3D12_INDIRECT_ARGUMENT_TYPE_CONSTANT;
-        argumentDescs[0].Constant.RootParameterIndex = GetFirstIndexOffsetParameterIndex();
-        argumentDescs[0].Constant.Num32BitValuesToSet = 2;
-        argumentDescs[0].Constant.DestOffsetIn32BitValues = 0;
+    // A command signature must contain exactly 1 Draw / Dispatch / DispatchMesh / DispatchRays
+    // command. That command must come last.
+    argumentDescs[1].Type = D3D12_INDIRECT_ARGUMENT_TYPE_DISPATCH;
 
-        // A command signature must contain exactly 1 Draw / Dispatch / DispatchMesh / DispatchRays
-        // command. That command must come last.
-        argumentDescs[1].Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW;
+    D3D12_COMMAND_SIGNATURE_DESC programDesc = {};
+    programDesc.ByteStride = 6 * sizeof(uint32_t);
+    programDesc.NumArgumentDescs = 2;
+    programDesc.pArgumentDescs = argumentDescs;
 
-        D3D12_COMMAND_SIGNATURE_DESC programDesc = {};
-        programDesc.ByteStride = 6 * sizeof(uint32_t);
-        programDesc.NumArgumentDescs = 2;
-        programDesc.pArgumentDescs = argumentDescs;
+    // The root signature must be specified if and only if the command signature changes one of
+    // the root arguments.
+    ToBackend(GetDevice())
+        ->GetD3D12Device()
+        ->CreateCommandSignature(&programDesc, GetRootSignature(),
+                                 IID_PPV_ARGS(&mDispatchIndirectCommandSignatureWithNumWorkgroups));
+    return mDispatchIndirectCommandSignatureWithNumWorkgroups.Get();
+}
 
-        // The root signature must be specified if and only if the command signature changes one of
-        // the root arguments.
-        ToBackend(GetDevice())
-            ->GetD3D12Device()
-            ->CreateCommandSignature(
-                &programDesc, GetRootSignature(),
-                IID_PPV_ARGS(&mDrawIndirectCommandSignatureWithInstanceVertexOffsets));
+ID3D12CommandSignature* PipelineLayout::GetDrawIndirectCommandSignatureWithInstanceVertexOffsets() {
+    // mDrawIndirectCommandSignatureWithInstanceVertexOffsets won't be created until it is
+    // needed.
+    if (mDrawIndirectCommandSignatureWithInstanceVertexOffsets.Get() != nullptr) {
         return mDrawIndirectCommandSignatureWithInstanceVertexOffsets.Get();
     }
 
-    ID3D12CommandSignature*
-    PipelineLayout::GetDrawIndexedIndirectCommandSignatureWithInstanceVertexOffsets() {
-        // mDrawIndexedIndirectCommandSignatureWithInstanceVertexOffsets won't be created until it
-        // is needed.
-        if (mDrawIndexedIndirectCommandSignatureWithInstanceVertexOffsets.Get() != nullptr) {
-            return mDrawIndexedIndirectCommandSignatureWithInstanceVertexOffsets.Get();
-        }
+    D3D12_INDIRECT_ARGUMENT_DESC argumentDescs[2] = {};
+    argumentDescs[0].Type = D3D12_INDIRECT_ARGUMENT_TYPE_CONSTANT;
+    argumentDescs[0].Constant.RootParameterIndex = GetFirstIndexOffsetParameterIndex();
+    argumentDescs[0].Constant.Num32BitValuesToSet = 2;
+    argumentDescs[0].Constant.DestOffsetIn32BitValues = 0;
 
-        D3D12_INDIRECT_ARGUMENT_DESC argumentDescs[2] = {};
-        argumentDescs[0].Type = D3D12_INDIRECT_ARGUMENT_TYPE_CONSTANT;
-        argumentDescs[0].Constant.RootParameterIndex = GetFirstIndexOffsetParameterIndex();
-        argumentDescs[0].Constant.Num32BitValuesToSet = 2;
-        argumentDescs[0].Constant.DestOffsetIn32BitValues = 0;
+    // A command signature must contain exactly 1 Draw / Dispatch / DispatchMesh / DispatchRays
+    // command. That command must come last.
+    argumentDescs[1].Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW;
 
-        // A command signature must contain exactly 1 Draw / Dispatch / DispatchMesh / DispatchRays
-        // command. That command must come last.
-        argumentDescs[1].Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW_INDEXED;
+    D3D12_COMMAND_SIGNATURE_DESC programDesc = {};
+    programDesc.ByteStride = 6 * sizeof(uint32_t);
+    programDesc.NumArgumentDescs = 2;
+    programDesc.pArgumentDescs = argumentDescs;
 
-        D3D12_COMMAND_SIGNATURE_DESC programDesc = {};
-        programDesc.ByteStride = 7 * sizeof(uint32_t);
-        programDesc.NumArgumentDescs = 2;
-        programDesc.pArgumentDescs = argumentDescs;
+    // The root signature must be specified if and only if the command signature changes one of
+    // the root arguments.
+    ToBackend(GetDevice())
+        ->GetD3D12Device()
+        ->CreateCommandSignature(
+            &programDesc, GetRootSignature(),
+            IID_PPV_ARGS(&mDrawIndirectCommandSignatureWithInstanceVertexOffsets));
+    return mDrawIndirectCommandSignatureWithInstanceVertexOffsets.Get();
+}
 
-        // The root signature must be specified if and only if the command signature changes one of
-        // the root arguments.
-        ToBackend(GetDevice())
-            ->GetD3D12Device()
-            ->CreateCommandSignature(
-                &programDesc, GetRootSignature(),
-                IID_PPV_ARGS(&mDrawIndexedIndirectCommandSignatureWithInstanceVertexOffsets));
+ID3D12CommandSignature*
+PipelineLayout::GetDrawIndexedIndirectCommandSignatureWithInstanceVertexOffsets() {
+    // mDrawIndexedIndirectCommandSignatureWithInstanceVertexOffsets won't be created until it
+    // is needed.
+    if (mDrawIndexedIndirectCommandSignatureWithInstanceVertexOffsets.Get() != nullptr) {
         return mDrawIndexedIndirectCommandSignatureWithInstanceVertexOffsets.Get();
     }
 
+    D3D12_INDIRECT_ARGUMENT_DESC argumentDescs[2] = {};
+    argumentDescs[0].Type = D3D12_INDIRECT_ARGUMENT_TYPE_CONSTANT;
+    argumentDescs[0].Constant.RootParameterIndex = GetFirstIndexOffsetParameterIndex();
+    argumentDescs[0].Constant.Num32BitValuesToSet = 2;
+    argumentDescs[0].Constant.DestOffsetIn32BitValues = 0;
+
+    // A command signature must contain exactly 1 Draw / Dispatch / DispatchMesh / DispatchRays
+    // command. That command must come last.
+    argumentDescs[1].Type = D3D12_INDIRECT_ARGUMENT_TYPE_DRAW_INDEXED;
+
+    D3D12_COMMAND_SIGNATURE_DESC programDesc = {};
+    programDesc.ByteStride = 7 * sizeof(uint32_t);
+    programDesc.NumArgumentDescs = 2;
+    programDesc.pArgumentDescs = argumentDescs;
+
+    // The root signature must be specified if and only if the command signature changes one of
+    // the root arguments.
+    ToBackend(GetDevice())
+        ->GetD3D12Device()
+        ->CreateCommandSignature(
+            &programDesc, GetRootSignature(),
+            IID_PPV_ARGS(&mDrawIndexedIndirectCommandSignatureWithInstanceVertexOffsets));
+    return mDrawIndexedIndirectCommandSignatureWithInstanceVertexOffsets.Get();
+}
+
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/PipelineLayoutD3D12.h b/src/dawn/native/d3d12/PipelineLayoutD3D12.h
index d9bb664..69d9095 100644
--- a/src/dawn/native/d3d12/PipelineLayoutD3D12.h
+++ b/src/dawn/native/d3d12/PipelineLayoutD3D12.h
@@ -25,85 +25,82 @@
 
 namespace dawn::native::d3d12 {
 
-    class Device;
+class Device;
 
-    class PipelineLayout final : public PipelineLayoutBase {
-      public:
-        static ResultOrError<Ref<PipelineLayout>> Create(
-            Device* device,
-            const PipelineLayoutDescriptor* descriptor);
+class PipelineLayout final : public PipelineLayoutBase {
+  public:
+    static ResultOrError<Ref<PipelineLayout>> Create(Device* device,
+                                                     const PipelineLayoutDescriptor* descriptor);
 
-        uint32_t GetCbvUavSrvRootParameterIndex(BindGroupIndex group) const;
-        uint32_t GetSamplerRootParameterIndex(BindGroupIndex group) const;
+    uint32_t GetCbvUavSrvRootParameterIndex(BindGroupIndex group) const;
+    uint32_t GetSamplerRootParameterIndex(BindGroupIndex group) const;
 
-        // Returns the index of the root parameter reserved for a dynamic buffer binding
-        uint32_t GetDynamicRootParameterIndex(BindGroupIndex group,
-                                              BindingIndex bindingIndex) const;
+    // Returns the index of the root parameter reserved for a dynamic buffer binding
+    uint32_t GetDynamicRootParameterIndex(BindGroupIndex group, BindingIndex bindingIndex) const;
 
-        uint32_t GetFirstIndexOffsetRegisterSpace() const;
-        uint32_t GetFirstIndexOffsetShaderRegister() const;
-        uint32_t GetFirstIndexOffsetParameterIndex() const;
+    uint32_t GetFirstIndexOffsetRegisterSpace() const;
+    uint32_t GetFirstIndexOffsetShaderRegister() const;
+    uint32_t GetFirstIndexOffsetParameterIndex() const;
 
-        uint32_t GetNumWorkgroupsRegisterSpace() const;
-        uint32_t GetNumWorkgroupsShaderRegister() const;
-        uint32_t GetNumWorkgroupsParameterIndex() const;
+    uint32_t GetNumWorkgroupsRegisterSpace() const;
+    uint32_t GetNumWorkgroupsShaderRegister() const;
+    uint32_t GetNumWorkgroupsParameterIndex() const;
 
-        uint32_t GetDynamicStorageBufferLengthsRegisterSpace() const;
-        uint32_t GetDynamicStorageBufferLengthsShaderRegister() const;
-        uint32_t GetDynamicStorageBufferLengthsParameterIndex() const;
+    uint32_t GetDynamicStorageBufferLengthsRegisterSpace() const;
+    uint32_t GetDynamicStorageBufferLengthsShaderRegister() const;
+    uint32_t GetDynamicStorageBufferLengthsParameterIndex() const;
 
-        ID3D12RootSignature* GetRootSignature() const;
+    ID3D12RootSignature* GetRootSignature() const;
 
-        ID3D12CommandSignature* GetDispatchIndirectCommandSignatureWithNumWorkgroups();
+    ID3D12CommandSignature* GetDispatchIndirectCommandSignatureWithNumWorkgroups();
 
-        ID3D12CommandSignature* GetDrawIndirectCommandSignatureWithInstanceVertexOffsets();
+    ID3D12CommandSignature* GetDrawIndirectCommandSignatureWithInstanceVertexOffsets();
 
-        ID3D12CommandSignature* GetDrawIndexedIndirectCommandSignatureWithInstanceVertexOffsets();
+    ID3D12CommandSignature* GetDrawIndexedIndirectCommandSignatureWithInstanceVertexOffsets();
 
-        struct PerBindGroupDynamicStorageBufferLengthInfo {
-            // First register offset for a bind group's dynamic storage buffer lengths.
-            // This is the index into the array of root constants where this bind group's
-            // lengths start.
-            uint32_t firstRegisterOffset;
+    struct PerBindGroupDynamicStorageBufferLengthInfo {
+        // First register offset for a bind group's dynamic storage buffer lengths.
+        // This is the index into the array of root constants where this bind group's
+        // lengths start.
+        uint32_t firstRegisterOffset;
 
-            struct BindingAndRegisterOffset {
-                BindingNumber binding;
-                uint32_t registerOffset;
-            };
-            // Associative list of (BindingNumber,registerOffset) pairs, which is passed into
-            // the shader to map the BindingPoint(thisGroup, BindingNumber) to the registerOffset
-            // into the root constant array which holds the dynamic storage buffer lengths.
-            std::vector<BindingAndRegisterOffset> bindingAndRegisterOffsets;
+        struct BindingAndRegisterOffset {
+            BindingNumber binding;
+            uint32_t registerOffset;
         };
-
-        // Flat map from bind group index to the list of (BindingNumber,Register) pairs.
-        // Each pair is used in shader translation to
-        using DynamicStorageBufferLengthInfo =
-            ityp::array<BindGroupIndex, PerBindGroupDynamicStorageBufferLengthInfo, kMaxBindGroups>;
-
-        const DynamicStorageBufferLengthInfo& GetDynamicStorageBufferLengthInfo() const;
-
-      private:
-        ~PipelineLayout() override = default;
-        using PipelineLayoutBase::PipelineLayoutBase;
-        MaybeError Initialize();
-        ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mCbvUavSrvRootParameterInfo;
-        ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mSamplerRootParameterInfo;
-        ityp::array<BindGroupIndex,
-                    ityp::array<BindingIndex, uint32_t, kMaxDynamicBuffersPerPipelineLayout>,
-                    kMaxBindGroups>
-            mDynamicRootParameterIndices;
-        DynamicStorageBufferLengthInfo mDynamicStorageBufferLengthInfo;
-        uint32_t mFirstIndexOffsetParameterIndex;
-        uint32_t mNumWorkgroupsParameterIndex;
-        uint32_t mDynamicStorageBufferLengthsParameterIndex;
-        ComPtr<ID3D12RootSignature> mRootSignature;
-        ComPtr<ID3D12CommandSignature> mDispatchIndirectCommandSignatureWithNumWorkgroups;
-        ComPtr<ID3D12CommandSignature> mDrawIndirectCommandSignatureWithInstanceVertexOffsets;
-        ComPtr<ID3D12CommandSignature>
-            mDrawIndexedIndirectCommandSignatureWithInstanceVertexOffsets;
+        // Associative list of (BindingNumber,registerOffset) pairs, which is passed into
+        // the shader to map the BindingPoint(thisGroup, BindingNumber) to the registerOffset
+        // into the root constant array which holds the dynamic storage buffer lengths.
+        std::vector<BindingAndRegisterOffset> bindingAndRegisterOffsets;
     };
 
+    // Flat map from bind group index to the list of (BindingNumber,Register) pairs.
+    // Each pair is used in shader translation to
+    using DynamicStorageBufferLengthInfo =
+        ityp::array<BindGroupIndex, PerBindGroupDynamicStorageBufferLengthInfo, kMaxBindGroups>;
+
+    const DynamicStorageBufferLengthInfo& GetDynamicStorageBufferLengthInfo() const;
+
+  private:
+    ~PipelineLayout() override = default;
+    using PipelineLayoutBase::PipelineLayoutBase;
+    MaybeError Initialize();
+    ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mCbvUavSrvRootParameterInfo;
+    ityp::array<BindGroupIndex, uint32_t, kMaxBindGroups> mSamplerRootParameterInfo;
+    ityp::array<BindGroupIndex,
+                ityp::array<BindingIndex, uint32_t, kMaxDynamicBuffersPerPipelineLayout>,
+                kMaxBindGroups>
+        mDynamicRootParameterIndices;
+    DynamicStorageBufferLengthInfo mDynamicStorageBufferLengthInfo;
+    uint32_t mFirstIndexOffsetParameterIndex;
+    uint32_t mNumWorkgroupsParameterIndex;
+    uint32_t mDynamicStorageBufferLengthsParameterIndex;
+    ComPtr<ID3D12RootSignature> mRootSignature;
+    ComPtr<ID3D12CommandSignature> mDispatchIndirectCommandSignatureWithNumWorkgroups;
+    ComPtr<ID3D12CommandSignature> mDrawIndirectCommandSignatureWithInstanceVertexOffsets;
+    ComPtr<ID3D12CommandSignature> mDrawIndexedIndirectCommandSignatureWithInstanceVertexOffsets;
+};
+
 }  // namespace dawn::native::d3d12
 
 #endif  // SRC_DAWN_NATIVE_D3D12_PIPELINELAYOUTD3D12_H_
diff --git a/src/dawn/native/d3d12/PlatformFunctions.cpp b/src/dawn/native/d3d12/PlatformFunctions.cpp
index 5bcc182..ffd65c3 100644
--- a/src/dawn/native/d3d12/PlatformFunctions.cpp
+++ b/src/dawn/native/d3d12/PlatformFunctions.cpp
@@ -24,251 +24,242 @@
 #include "dawn/common/DynamicLib.h"
 
 namespace dawn::native::d3d12 {
-    namespace {
-        // Extract Version from "10.0.{Version}.0" if possible, otherwise return 0.
-        uint32_t GetWindowsSDKVersionFromDirectoryName(const char* directoryName) {
-            constexpr char kPrefix[] = "10.0.";
-            constexpr char kPostfix[] = ".0";
+namespace {
+// Extract Version from "10.0.{Version}.0" if possible, otherwise return 0.
+uint32_t GetWindowsSDKVersionFromDirectoryName(const char* directoryName) {
+    constexpr char kPrefix[] = "10.0.";
+    constexpr char kPostfix[] = ".0";
 
-            constexpr uint32_t kPrefixLen = sizeof(kPrefix) - 1;
-            constexpr uint32_t kPostfixLen = sizeof(kPostfix) - 1;
-            const uint32_t directoryNameLen = strlen(directoryName);
+    constexpr uint32_t kPrefixLen = sizeof(kPrefix) - 1;
+    constexpr uint32_t kPostfixLen = sizeof(kPostfix) - 1;
+    const uint32_t directoryNameLen = strlen(directoryName);
 
-            if (directoryNameLen < kPrefixLen + kPostfixLen + 1) {
-                return 0;
-            }
-
-            // Check if directoryName starts with "10.0.".
-            if (strncmp(directoryName, kPrefix, kPrefixLen) != 0) {
-                return 0;
-            }
-
-            // Check if directoryName ends with ".0".
-            if (strncmp(directoryName + (directoryNameLen - kPostfixLen), kPostfix, kPostfixLen) !=
-                0) {
-                return 0;
-            }
-
-            // Extract Version from "10.0.{Version}.0" and convert Version into an integer.
-            return atoi(directoryName + kPrefixLen);
-        }
-
-        class ScopedFileHandle final {
-          public:
-            explicit ScopedFileHandle(HANDLE handle) : mHandle(handle) {
-            }
-            ~ScopedFileHandle() {
-                if (mHandle != INVALID_HANDLE_VALUE) {
-                    ASSERT(FindClose(mHandle));
-                }
-            }
-            HANDLE GetHandle() const {
-                return mHandle;
-            }
-
-          private:
-            HANDLE mHandle;
-        };
-
-        std::string GetWindowsSDKBasePath() {
-            const char* kDefaultWindowsSDKPath =
-                "C:\\Program Files (x86)\\Windows Kits\\10\\bin\\*";
-            WIN32_FIND_DATAA fileData;
-            ScopedFileHandle handle(FindFirstFileA(kDefaultWindowsSDKPath, &fileData));
-            if (handle.GetHandle() == INVALID_HANDLE_VALUE) {
-                return "";
-            }
-
-            uint32_t highestWindowsSDKVersion = 0;
-            do {
-                if (!(fileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) {
-                    continue;
-                }
-
-                highestWindowsSDKVersion =
-                    std::max(highestWindowsSDKVersion,
-                             GetWindowsSDKVersionFromDirectoryName(fileData.cFileName));
-            } while (FindNextFileA(handle.GetHandle(), &fileData));
-
-            if (highestWindowsSDKVersion == 0) {
-                return "";
-            }
-
-            // Currently we only support using DXC on x64.
-            std::ostringstream ostream;
-            ostream << "C:\\Program Files (x86)\\Windows Kits\\10\\bin\\10.0."
-                    << highestWindowsSDKVersion << ".0\\x64\\";
-
-            return ostream.str();
-        }
-    }  // anonymous namespace
-
-    PlatformFunctions::PlatformFunctions() = default;
-    PlatformFunctions::~PlatformFunctions() = default;
-
-    MaybeError PlatformFunctions::LoadFunctions() {
-        DAWN_TRY(LoadD3D12());
-        DAWN_TRY(LoadDXGI());
-        LoadDXCLibraries();
-        DAWN_TRY(LoadFXCompiler());
-        DAWN_TRY(LoadD3D11());
-        LoadPIXRuntime();
-        return {};
+    if (directoryNameLen < kPrefixLen + kPostfixLen + 1) {
+        return 0;
     }
 
-    MaybeError PlatformFunctions::LoadD3D12() {
-#if DAWN_PLATFORM_WINUWP
-        d3d12CreateDevice = &D3D12CreateDevice;
-        d3d12GetDebugInterface = &D3D12GetDebugInterface;
-        d3d12SerializeRootSignature = &D3D12SerializeRootSignature;
-        d3d12CreateRootSignatureDeserializer = &D3D12CreateRootSignatureDeserializer;
-        d3d12SerializeVersionedRootSignature = &D3D12SerializeVersionedRootSignature;
-        d3d12CreateVersionedRootSignatureDeserializer =
-            &D3D12CreateVersionedRootSignatureDeserializer;
-#else
-        std::string error;
-        if (!mD3D12Lib.Open("d3d12.dll", &error) ||
-            !mD3D12Lib.GetProc(&d3d12CreateDevice, "D3D12CreateDevice", &error) ||
-            !mD3D12Lib.GetProc(&d3d12GetDebugInterface, "D3D12GetDebugInterface", &error) ||
-            !mD3D12Lib.GetProc(&d3d12SerializeRootSignature, "D3D12SerializeRootSignature",
-                               &error) ||
-            !mD3D12Lib.GetProc(&d3d12CreateRootSignatureDeserializer,
-                               "D3D12CreateRootSignatureDeserializer", &error) ||
-            !mD3D12Lib.GetProc(&d3d12SerializeVersionedRootSignature,
-                               "D3D12SerializeVersionedRootSignature", &error) ||
-            !mD3D12Lib.GetProc(&d3d12CreateVersionedRootSignatureDeserializer,
-                               "D3D12CreateVersionedRootSignatureDeserializer", &error)) {
-            return DAWN_INTERNAL_ERROR(error.c_str());
+    // Check if directoryName starts with "10.0.".
+    if (strncmp(directoryName, kPrefix, kPrefixLen) != 0) {
+        return 0;
+    }
+
+    // Check if directoryName ends with ".0".
+    if (strncmp(directoryName + (directoryNameLen - kPostfixLen), kPostfix, kPostfixLen) != 0) {
+        return 0;
+    }
+
+    // Extract Version from "10.0.{Version}.0" and convert Version into an integer.
+    return atoi(directoryName + kPrefixLen);
+}
+
+class ScopedFileHandle final {
+  public:
+    explicit ScopedFileHandle(HANDLE handle) : mHandle(handle) {}
+    ~ScopedFileHandle() {
+        if (mHandle != INVALID_HANDLE_VALUE) {
+            ASSERT(FindClose(mHandle));
         }
+    }
+    HANDLE GetHandle() const { return mHandle; }
+
+  private:
+    HANDLE mHandle;
+};
+
+std::string GetWindowsSDKBasePath() {
+    const char* kDefaultWindowsSDKPath = "C:\\Program Files (x86)\\Windows Kits\\10\\bin\\*";
+    WIN32_FIND_DATAA fileData;
+    ScopedFileHandle handle(FindFirstFileA(kDefaultWindowsSDKPath, &fileData));
+    if (handle.GetHandle() == INVALID_HANDLE_VALUE) {
+        return "";
+    }
+
+    uint32_t highestWindowsSDKVersion = 0;
+    do {
+        if (!(fileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) {
+            continue;
+        }
+
+        highestWindowsSDKVersion = std::max(
+            highestWindowsSDKVersion, GetWindowsSDKVersionFromDirectoryName(fileData.cFileName));
+    } while (FindNextFileA(handle.GetHandle(), &fileData));
+
+    if (highestWindowsSDKVersion == 0) {
+        return "";
+    }
+
+    // Currently we only support using DXC on x64.
+    std::ostringstream ostream;
+    ostream << "C:\\Program Files (x86)\\Windows Kits\\10\\bin\\10.0." << highestWindowsSDKVersion
+            << ".0\\x64\\";
+
+    return ostream.str();
+}
+}  // anonymous namespace
+
+PlatformFunctions::PlatformFunctions() = default;
+PlatformFunctions::~PlatformFunctions() = default;
+
+MaybeError PlatformFunctions::LoadFunctions() {
+    DAWN_TRY(LoadD3D12());
+    DAWN_TRY(LoadDXGI());
+    LoadDXCLibraries();
+    DAWN_TRY(LoadFXCompiler());
+    DAWN_TRY(LoadD3D11());
+    LoadPIXRuntime();
+    return {};
+}
+
+MaybeError PlatformFunctions::LoadD3D12() {
+#if DAWN_PLATFORM_WINUWP
+    d3d12CreateDevice = &D3D12CreateDevice;
+    d3d12GetDebugInterface = &D3D12GetDebugInterface;
+    d3d12SerializeRootSignature = &D3D12SerializeRootSignature;
+    d3d12CreateRootSignatureDeserializer = &D3D12CreateRootSignatureDeserializer;
+    d3d12SerializeVersionedRootSignature = &D3D12SerializeVersionedRootSignature;
+    d3d12CreateVersionedRootSignatureDeserializer = &D3D12CreateVersionedRootSignatureDeserializer;
+#else
+    std::string error;
+    if (!mD3D12Lib.Open("d3d12.dll", &error) ||
+        !mD3D12Lib.GetProc(&d3d12CreateDevice, "D3D12CreateDevice", &error) ||
+        !mD3D12Lib.GetProc(&d3d12GetDebugInterface, "D3D12GetDebugInterface", &error) ||
+        !mD3D12Lib.GetProc(&d3d12SerializeRootSignature, "D3D12SerializeRootSignature", &error) ||
+        !mD3D12Lib.GetProc(&d3d12CreateRootSignatureDeserializer,
+                           "D3D12CreateRootSignatureDeserializer", &error) ||
+        !mD3D12Lib.GetProc(&d3d12SerializeVersionedRootSignature,
+                           "D3D12SerializeVersionedRootSignature", &error) ||
+        !mD3D12Lib.GetProc(&d3d12CreateVersionedRootSignatureDeserializer,
+                           "D3D12CreateVersionedRootSignatureDeserializer", &error)) {
+        return DAWN_INTERNAL_ERROR(error.c_str());
+    }
 #endif
 
-        return {};
-    }
+    return {};
+}
 
-    MaybeError PlatformFunctions::LoadD3D11() {
+MaybeError PlatformFunctions::LoadD3D11() {
 #if DAWN_PLATFORM_WINUWP
-        d3d11on12CreateDevice = &D3D11On12CreateDevice;
+    d3d11on12CreateDevice = &D3D11On12CreateDevice;
 #else
-        std::string error;
-        if (!mD3D11Lib.Open("d3d11.dll", &error) ||
-            !mD3D11Lib.GetProc(&d3d11on12CreateDevice, "D3D11On12CreateDevice", &error)) {
-            return DAWN_INTERNAL_ERROR(error.c_str());
-        }
+    std::string error;
+    if (!mD3D11Lib.Open("d3d11.dll", &error) ||
+        !mD3D11Lib.GetProc(&d3d11on12CreateDevice, "D3D11On12CreateDevice", &error)) {
+        return DAWN_INTERNAL_ERROR(error.c_str());
+    }
 #endif
 
-        return {};
-    }
+    return {};
+}
 
-    MaybeError PlatformFunctions::LoadDXGI() {
+MaybeError PlatformFunctions::LoadDXGI() {
 #if DAWN_PLATFORM_WINUWP
-#    if defined(_DEBUG)
-        // DXGIGetDebugInterface1 is tagged as a development-only capability
-        // which implies that linking to this function will cause
-        // the application to fail Windows store certification
-        // But we need it when debuging using VS Graphics Diagnostics or PIX
-        // So we only link to it in debug build
-        dxgiGetDebugInterface1 = &DXGIGetDebugInterface1;
-#    endif
-        createDxgiFactory2 = &CreateDXGIFactory2;
+#if defined(_DEBUG)
+    // DXGIGetDebugInterface1 is tagged as a development-only capability
+    // which implies that linking to this function will cause
+    // the application to fail Windows store certification
+    // But we need it when debuging using VS Graphics Diagnostics or PIX
+    // So we only link to it in debug build
+    dxgiGetDebugInterface1 = &DXGIGetDebugInterface1;
+#endif
+    createDxgiFactory2 = &CreateDXGIFactory2;
 #else
-        std::string error;
-        if (!mDXGILib.Open("dxgi.dll", &error) ||
-            !mDXGILib.GetProc(&dxgiGetDebugInterface1, "DXGIGetDebugInterface1", &error) ||
-            !mDXGILib.GetProc(&createDxgiFactory2, "CreateDXGIFactory2", &error)) {
-            return DAWN_INTERNAL_ERROR(error.c_str());
-        }
+    std::string error;
+    if (!mDXGILib.Open("dxgi.dll", &error) ||
+        !mDXGILib.GetProc(&dxgiGetDebugInterface1, "DXGIGetDebugInterface1", &error) ||
+        !mDXGILib.GetProc(&createDxgiFactory2, "CreateDXGIFactory2", &error)) {
+        return DAWN_INTERNAL_ERROR(error.c_str());
+    }
 #endif
 
-        return {};
-    }
+    return {};
+}
 
-    void PlatformFunctions::LoadDXCLibraries() {
-        // TODO(dawn:766)
-        // Statically linked with dxcompiler.lib in UWP
-        // currently linked with dxcompiler.lib making CoreApp unable to activate
-        // LoadDXIL and LoadDXCompiler will fail in UWP, but LoadFunctions() can still be
-        // successfully executed.
+void PlatformFunctions::LoadDXCLibraries() {
+    // TODO(dawn:766)
+    // Statically linked with dxcompiler.lib in UWP
+    // currently linked with dxcompiler.lib making CoreApp unable to activate
+    // LoadDXIL and LoadDXCompiler will fail in UWP, but LoadFunctions() can still be
+    // successfully executed.
 
-        const std::string& windowsSDKBasePath = GetWindowsSDKBasePath();
+    const std::string& windowsSDKBasePath = GetWindowsSDKBasePath();
 
-        LoadDXIL(windowsSDKBasePath);
-        LoadDXCompiler(windowsSDKBasePath);
-    }
+    LoadDXIL(windowsSDKBasePath);
+    LoadDXCompiler(windowsSDKBasePath);
+}
 
-    void PlatformFunctions::LoadDXIL(const std::string& baseWindowsSDKPath) {
-        const char* dxilDLLName = "dxil.dll";
-        const std::array<std::string, 2> kDxilDLLPaths = {
-            {dxilDLLName, baseWindowsSDKPath + dxilDLLName}};
+void PlatformFunctions::LoadDXIL(const std::string& baseWindowsSDKPath) {
+    const char* dxilDLLName = "dxil.dll";
+    const std::array<std::string, 2> kDxilDLLPaths = {
+        {dxilDLLName, baseWindowsSDKPath + dxilDLLName}};
 
-        for (const std::string& dxilDLLPath : kDxilDLLPaths) {
-            if (mDXILLib.Open(dxilDLLPath, nullptr)) {
-                return;
-            }
-        }
-        ASSERT(!mDXILLib.Valid());
-    }
-
-    void PlatformFunctions::LoadDXCompiler(const std::string& baseWindowsSDKPath) {
-        // DXIL must be loaded before DXC, otherwise shader signing is unavailable
-        if (!mDXILLib.Valid()) {
+    for (const std::string& dxilDLLPath : kDxilDLLPaths) {
+        if (mDXILLib.Open(dxilDLLPath, nullptr)) {
             return;
         }
+    }
+    ASSERT(!mDXILLib.Valid());
+}
 
-        const char* dxCompilerDLLName = "dxcompiler.dll";
-        const std::array<std::string, 2> kDxCompilerDLLPaths = {
-            {dxCompilerDLLName, baseWindowsSDKPath + dxCompilerDLLName}};
+void PlatformFunctions::LoadDXCompiler(const std::string& baseWindowsSDKPath) {
+    // DXIL must be loaded before DXC, otherwise shader signing is unavailable
+    if (!mDXILLib.Valid()) {
+        return;
+    }
 
-        DynamicLib dxCompilerLib;
-        for (const std::string& dxCompilerDLLName : kDxCompilerDLLPaths) {
-            if (dxCompilerLib.Open(dxCompilerDLLName, nullptr)) {
-                break;
-            }
-        }
+    const char* dxCompilerDLLName = "dxcompiler.dll";
+    const std::array<std::string, 2> kDxCompilerDLLPaths = {
+        {dxCompilerDLLName, baseWindowsSDKPath + dxCompilerDLLName}};
 
-        if (dxCompilerLib.Valid() &&
-            dxCompilerLib.GetProc(&dxcCreateInstance, "DxcCreateInstance", nullptr)) {
-            mDXCompilerLib = std::move(dxCompilerLib);
-        } else {
-            mDXILLib.Close();
+    DynamicLib dxCompilerLib;
+    for (const std::string& dxCompilerDLLName : kDxCompilerDLLPaths) {
+        if (dxCompilerLib.Open(dxCompilerDLLName, nullptr)) {
+            break;
         }
     }
 
-    MaybeError PlatformFunctions::LoadFXCompiler() {
+    if (dxCompilerLib.Valid() &&
+        dxCompilerLib.GetProc(&dxcCreateInstance, "DxcCreateInstance", nullptr)) {
+        mDXCompilerLib = std::move(dxCompilerLib);
+    } else {
+        mDXILLib.Close();
+    }
+}
+
+MaybeError PlatformFunctions::LoadFXCompiler() {
 #if DAWN_PLATFORM_WINUWP
-        d3dCompile = &D3DCompile;
-        d3dDisassemble = &D3DDisassemble;
+    d3dCompile = &D3DCompile;
+    d3dDisassemble = &D3DDisassemble;
 #else
-        std::string error;
-        if (!mFXCompilerLib.Open("d3dcompiler_47.dll", &error) ||
-            !mFXCompilerLib.GetProc(&d3dCompile, "D3DCompile", &error) ||
-            !mFXCompilerLib.GetProc(&d3dDisassemble, "D3DDisassemble", &error)) {
-            return DAWN_INTERNAL_ERROR(error.c_str());
-        }
+    std::string error;
+    if (!mFXCompilerLib.Open("d3dcompiler_47.dll", &error) ||
+        !mFXCompilerLib.GetProc(&d3dCompile, "D3DCompile", &error) ||
+        !mFXCompilerLib.GetProc(&d3dDisassemble, "D3DDisassemble", &error)) {
+        return DAWN_INTERNAL_ERROR(error.c_str());
+    }
 #endif
-        return {};
-    }
+    return {};
+}
 
-    bool PlatformFunctions::IsPIXEventRuntimeLoaded() const {
-        return mPIXEventRuntimeLib.Valid();
-    }
+bool PlatformFunctions::IsPIXEventRuntimeLoaded() const {
+    return mPIXEventRuntimeLib.Valid();
+}
 
-    bool PlatformFunctions::IsDXCAvailable() const {
-        return mDXILLib.Valid() && mDXCompilerLib.Valid();
-    }
+bool PlatformFunctions::IsDXCAvailable() const {
+    return mDXILLib.Valid() && mDXCompilerLib.Valid();
+}
 
-    void PlatformFunctions::LoadPIXRuntime() {
-        // TODO(dawn:766):
-        // In UWP PIX should be statically linked WinPixEventRuntime_UAP.lib
-        // So maybe we should put WinPixEventRuntime as a third party package
-        // Currently PIX is not going to be loaded in UWP since the following
-        // mPIXEventRuntimeLib.Open will fail.
-        if (!mPIXEventRuntimeLib.Open("WinPixEventRuntime.dll") ||
-            !mPIXEventRuntimeLib.GetProc(&pixBeginEventOnCommandList,
-                                         "PIXBeginEventOnCommandList") ||
-            !mPIXEventRuntimeLib.GetProc(&pixEndEventOnCommandList, "PIXEndEventOnCommandList") ||
-            !mPIXEventRuntimeLib.GetProc(&pixSetMarkerOnCommandList, "PIXSetMarkerOnCommandList")) {
-            mPIXEventRuntimeLib.Close();
-        }
+void PlatformFunctions::LoadPIXRuntime() {
+    // TODO(dawn:766):
+    // In UWP PIX should be statically linked WinPixEventRuntime_UAP.lib
+    // So maybe we should put WinPixEventRuntime as a third party package
+    // Currently PIX is not going to be loaded in UWP since the following
+    // mPIXEventRuntimeLib.Open will fail.
+    if (!mPIXEventRuntimeLib.Open("WinPixEventRuntime.dll") ||
+        !mPIXEventRuntimeLib.GetProc(&pixBeginEventOnCommandList, "PIXBeginEventOnCommandList") ||
+        !mPIXEventRuntimeLib.GetProc(&pixEndEventOnCommandList, "PIXEndEventOnCommandList") ||
+        !mPIXEventRuntimeLib.GetProc(&pixSetMarkerOnCommandList, "PIXSetMarkerOnCommandList")) {
+        mPIXEventRuntimeLib.Close();
     }
+}
 
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/PlatformFunctions.h b/src/dawn/native/d3d12/PlatformFunctions.h
index f8e14e4..6d2e222 100644
--- a/src/dawn/native/d3d12/PlatformFunctions.h
+++ b/src/dawn/native/d3d12/PlatformFunctions.h
@@ -26,86 +26,87 @@
 
 namespace dawn::native::d3d12 {
 
-    // Loads the functions required from the platform dynamically so that we don't need to rely on
-    // them being present in the system. For example linking against d3d12.lib would prevent
-    // dawn_native from loading on Windows 7 system where d3d12.dll doesn't exist.
-    class PlatformFunctions {
-      public:
-        PlatformFunctions();
-        ~PlatformFunctions();
+// Loads the functions required from the platform dynamically so that we don't need to rely on
+// them being present in the system. For example linking against d3d12.lib would prevent
+// dawn_native from loading on Windows 7 system where d3d12.dll doesn't exist.
+class PlatformFunctions {
+  public:
+    PlatformFunctions();
+    ~PlatformFunctions();
 
-        MaybeError LoadFunctions();
-        bool IsPIXEventRuntimeLoaded() const;
-        bool IsDXCAvailable() const;
+    MaybeError LoadFunctions();
+    bool IsPIXEventRuntimeLoaded() const;
+    bool IsDXCAvailable() const;
 
-        // Functions from d3d12.dll
-        PFN_D3D12_CREATE_DEVICE d3d12CreateDevice = nullptr;
-        PFN_D3D12_GET_DEBUG_INTERFACE d3d12GetDebugInterface = nullptr;
+    // Functions from d3d12.dll
+    PFN_D3D12_CREATE_DEVICE d3d12CreateDevice = nullptr;
+    PFN_D3D12_GET_DEBUG_INTERFACE d3d12GetDebugInterface = nullptr;
 
-        PFN_D3D12_SERIALIZE_ROOT_SIGNATURE d3d12SerializeRootSignature = nullptr;
-        PFN_D3D12_CREATE_ROOT_SIGNATURE_DESERIALIZER d3d12CreateRootSignatureDeserializer = nullptr;
-        PFN_D3D12_SERIALIZE_VERSIONED_ROOT_SIGNATURE d3d12SerializeVersionedRootSignature = nullptr;
-        PFN_D3D12_CREATE_VERSIONED_ROOT_SIGNATURE_DESERIALIZER
-        d3d12CreateVersionedRootSignatureDeserializer = nullptr;
+    PFN_D3D12_SERIALIZE_ROOT_SIGNATURE d3d12SerializeRootSignature = nullptr;
+    PFN_D3D12_CREATE_ROOT_SIGNATURE_DESERIALIZER d3d12CreateRootSignatureDeserializer = nullptr;
+    PFN_D3D12_SERIALIZE_VERSIONED_ROOT_SIGNATURE d3d12SerializeVersionedRootSignature = nullptr;
+    PFN_D3D12_CREATE_VERSIONED_ROOT_SIGNATURE_DESERIALIZER
+    d3d12CreateVersionedRootSignatureDeserializer = nullptr;
 
-        // Functions from dxgi.dll
-        using PFN_DXGI_GET_DEBUG_INTERFACE1 = HRESULT(WINAPI*)(UINT Flags,
-                                                               REFIID riid,
-                                                               _COM_Outptr_ void** pDebug);
-        PFN_DXGI_GET_DEBUG_INTERFACE1 dxgiGetDebugInterface1 = nullptr;
+    // Functions from dxgi.dll
+    using PFN_DXGI_GET_DEBUG_INTERFACE1 = HRESULT(WINAPI*)(UINT Flags,
+                                                           REFIID riid,
+                                                           _COM_Outptr_ void** pDebug);
+    PFN_DXGI_GET_DEBUG_INTERFACE1 dxgiGetDebugInterface1 = nullptr;
 
-        using PFN_CREATE_DXGI_FACTORY2 = HRESULT(WINAPI*)(UINT Flags,
-                                                          REFIID riid,
-                                                          _COM_Outptr_ void** ppFactory);
-        PFN_CREATE_DXGI_FACTORY2 createDxgiFactory2 = nullptr;
+    using PFN_CREATE_DXGI_FACTORY2 = HRESULT(WINAPI*)(UINT Flags,
+                                                      REFIID riid,
+                                                      _COM_Outptr_ void** ppFactory);
+    PFN_CREATE_DXGI_FACTORY2 createDxgiFactory2 = nullptr;
 
-        // Functions from dxcompiler.dll
-        using PFN_DXC_CREATE_INSTANCE = HRESULT(WINAPI*)(REFCLSID rclsid,
-                                                         REFIID riid,
-                                                         _COM_Outptr_ void** ppCompiler);
-        PFN_DXC_CREATE_INSTANCE dxcCreateInstance = nullptr;
+    // Functions from dxcompiler.dll
+    using PFN_DXC_CREATE_INSTANCE = HRESULT(WINAPI*)(REFCLSID rclsid,
+                                                     REFIID riid,
+                                                     _COM_Outptr_ void** ppCompiler);
+    PFN_DXC_CREATE_INSTANCE dxcCreateInstance = nullptr;
 
-        // Functions from d3d3compiler.dll
-        pD3DCompile d3dCompile = nullptr;
-        pD3DDisassemble d3dDisassemble = nullptr;
+    // Functions from d3d3compiler.dll
+    pD3DCompile d3dCompile = nullptr;
+    pD3DDisassemble d3dDisassemble = nullptr;
 
-        // Functions from WinPixEventRuntime.dll
-        using PFN_PIX_END_EVENT_ON_COMMAND_LIST =
-            HRESULT(WINAPI*)(ID3D12GraphicsCommandList* commandList);
+    // Functions from WinPixEventRuntime.dll
+    using PFN_PIX_END_EVENT_ON_COMMAND_LIST =
+        HRESULT(WINAPI*)(ID3D12GraphicsCommandList* commandList);
 
-        PFN_PIX_END_EVENT_ON_COMMAND_LIST pixEndEventOnCommandList = nullptr;
+    PFN_PIX_END_EVENT_ON_COMMAND_LIST pixEndEventOnCommandList = nullptr;
 
-        using PFN_PIX_BEGIN_EVENT_ON_COMMAND_LIST = HRESULT(
-            WINAPI*)(ID3D12GraphicsCommandList* commandList, UINT64 color, _In_ PCSTR formatString);
+    using PFN_PIX_BEGIN_EVENT_ON_COMMAND_LIST = HRESULT(
+        WINAPI*)(ID3D12GraphicsCommandList* commandList, UINT64 color, _In_ PCSTR formatString);
 
-        PFN_PIX_BEGIN_EVENT_ON_COMMAND_LIST pixBeginEventOnCommandList = nullptr;
+    PFN_PIX_BEGIN_EVENT_ON_COMMAND_LIST pixBeginEventOnCommandList = nullptr;
 
-        using PFN_SET_MARKER_ON_COMMAND_LIST = HRESULT(
-            WINAPI*)(ID3D12GraphicsCommandList* commandList, UINT64 color, _In_ PCSTR formatString);
+    using PFN_SET_MARKER_ON_COMMAND_LIST = HRESULT(WINAPI*)(ID3D12GraphicsCommandList* commandList,
+                                                            UINT64 color,
+                                                            _In_ PCSTR formatString);
 
-        PFN_SET_MARKER_ON_COMMAND_LIST pixSetMarkerOnCommandList = nullptr;
+    PFN_SET_MARKER_ON_COMMAND_LIST pixSetMarkerOnCommandList = nullptr;
 
-        // Functions from D3D11.dll
-        PFN_D3D11ON12_CREATE_DEVICE d3d11on12CreateDevice = nullptr;
+    // Functions from D3D11.dll
+    PFN_D3D11ON12_CREATE_DEVICE d3d11on12CreateDevice = nullptr;
 
-      private:
-        MaybeError LoadD3D12();
-        MaybeError LoadD3D11();
-        MaybeError LoadDXGI();
-        void LoadDXCLibraries();
-        void LoadDXIL(const std::string& baseWindowsSDKPath);
-        void LoadDXCompiler(const std::string& baseWindowsSDKPath);
-        MaybeError LoadFXCompiler();
-        void LoadPIXRuntime();
+  private:
+    MaybeError LoadD3D12();
+    MaybeError LoadD3D11();
+    MaybeError LoadDXGI();
+    void LoadDXCLibraries();
+    void LoadDXIL(const std::string& baseWindowsSDKPath);
+    void LoadDXCompiler(const std::string& baseWindowsSDKPath);
+    MaybeError LoadFXCompiler();
+    void LoadPIXRuntime();
 
-        DynamicLib mD3D12Lib;
-        DynamicLib mD3D11Lib;
-        DynamicLib mDXGILib;
-        DynamicLib mDXILLib;
-        DynamicLib mDXCompilerLib;
-        DynamicLib mFXCompilerLib;
-        DynamicLib mPIXEventRuntimeLib;
-    };
+    DynamicLib mD3D12Lib;
+    DynamicLib mD3D11Lib;
+    DynamicLib mDXGILib;
+    DynamicLib mDXILLib;
+    DynamicLib mDXCompilerLib;
+    DynamicLib mFXCompilerLib;
+    DynamicLib mPIXEventRuntimeLib;
+};
 
 }  // namespace dawn::native::d3d12
 
diff --git a/src/dawn/native/d3d12/QuerySetD3D12.cpp b/src/dawn/native/d3d12/QuerySetD3D12.cpp
index d3264ac..2f8ea05 100644
--- a/src/dawn/native/d3d12/QuerySetD3D12.cpp
+++ b/src/dawn/native/d3d12/QuerySetD3D12.cpp
@@ -22,56 +22,56 @@
 
 namespace dawn::native::d3d12 {
 
-    namespace {
-        D3D12_QUERY_HEAP_TYPE D3D12QueryHeapType(wgpu::QueryType type) {
-            switch (type) {
-                case wgpu::QueryType::Occlusion:
-                    return D3D12_QUERY_HEAP_TYPE_OCCLUSION;
-                case wgpu::QueryType::PipelineStatistics:
-                    return D3D12_QUERY_HEAP_TYPE_PIPELINE_STATISTICS;
-                case wgpu::QueryType::Timestamp:
-                    return D3D12_QUERY_HEAP_TYPE_TIMESTAMP;
-            }
-        }
-    }  // anonymous namespace
-
-    // static
-    ResultOrError<Ref<QuerySet>> QuerySet::Create(Device* device,
-                                                  const QuerySetDescriptor* descriptor) {
-        Ref<QuerySet> querySet = AcquireRef(new QuerySet(device, descriptor));
-        DAWN_TRY(querySet->Initialize());
-        return querySet;
+namespace {
+D3D12_QUERY_HEAP_TYPE D3D12QueryHeapType(wgpu::QueryType type) {
+    switch (type) {
+        case wgpu::QueryType::Occlusion:
+            return D3D12_QUERY_HEAP_TYPE_OCCLUSION;
+        case wgpu::QueryType::PipelineStatistics:
+            return D3D12_QUERY_HEAP_TYPE_PIPELINE_STATISTICS;
+        case wgpu::QueryType::Timestamp:
+            return D3D12_QUERY_HEAP_TYPE_TIMESTAMP;
     }
+}
+}  // anonymous namespace
 
-    MaybeError QuerySet::Initialize() {
-        D3D12_QUERY_HEAP_DESC queryHeapDesc = {};
-        queryHeapDesc.Type = D3D12QueryHeapType(GetQueryType());
-        queryHeapDesc.Count = std::max(GetQueryCount(), uint32_t(1u));
+// static
+ResultOrError<Ref<QuerySet>> QuerySet::Create(Device* device,
+                                              const QuerySetDescriptor* descriptor) {
+    Ref<QuerySet> querySet = AcquireRef(new QuerySet(device, descriptor));
+    DAWN_TRY(querySet->Initialize());
+    return querySet;
+}
 
-        ID3D12Device* d3d12Device = ToBackend(GetDevice())->GetD3D12Device();
-        DAWN_TRY(CheckOutOfMemoryHRESULT(
-            d3d12Device->CreateQueryHeap(&queryHeapDesc, IID_PPV_ARGS(&mQueryHeap)),
-            "ID3D12Device::CreateQueryHeap"));
+MaybeError QuerySet::Initialize() {
+    D3D12_QUERY_HEAP_DESC queryHeapDesc = {};
+    queryHeapDesc.Type = D3D12QueryHeapType(GetQueryType());
+    queryHeapDesc.Count = std::max(GetQueryCount(), uint32_t(1u));
 
-        SetLabelImpl();
+    ID3D12Device* d3d12Device = ToBackend(GetDevice())->GetD3D12Device();
+    DAWN_TRY(CheckOutOfMemoryHRESULT(
+        d3d12Device->CreateQueryHeap(&queryHeapDesc, IID_PPV_ARGS(&mQueryHeap)),
+        "ID3D12Device::CreateQueryHeap"));
 
-        return {};
-    }
+    SetLabelImpl();
 
-    ID3D12QueryHeap* QuerySet::GetQueryHeap() const {
-        return mQueryHeap.Get();
-    }
+    return {};
+}
 
-    QuerySet::~QuerySet() = default;
+ID3D12QueryHeap* QuerySet::GetQueryHeap() const {
+    return mQueryHeap.Get();
+}
 
-    void QuerySet::DestroyImpl() {
-        QuerySetBase::DestroyImpl();
-        ToBackend(GetDevice())->ReferenceUntilUnused(mQueryHeap);
-        mQueryHeap = nullptr;
-    }
+QuerySet::~QuerySet() = default;
 
-    void QuerySet::SetLabelImpl() {
-        SetDebugName(ToBackend(GetDevice()), mQueryHeap.Get(), "Dawn_QuerySet", GetLabel());
-    }
+void QuerySet::DestroyImpl() {
+    QuerySetBase::DestroyImpl();
+    ToBackend(GetDevice())->ReferenceUntilUnused(mQueryHeap);
+    mQueryHeap = nullptr;
+}
+
+void QuerySet::SetLabelImpl() {
+    SetDebugName(ToBackend(GetDevice()), mQueryHeap.Get(), "Dawn_QuerySet", GetLabel());
+}
 
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/QuerySetD3D12.h b/src/dawn/native/d3d12/QuerySetD3D12.h
index e67b12f..0380543 100644
--- a/src/dawn/native/d3d12/QuerySetD3D12.h
+++ b/src/dawn/native/d3d12/QuerySetD3D12.h
@@ -20,26 +20,26 @@
 
 namespace dawn::native::d3d12 {
 
-    class Device;
+class Device;
 
-    class QuerySet : public QuerySetBase {
-      public:
-        static ResultOrError<Ref<QuerySet>> Create(Device* device,
-                                                   const QuerySetDescriptor* descriptor);
+class QuerySet : public QuerySetBase {
+  public:
+    static ResultOrError<Ref<QuerySet>> Create(Device* device,
+                                               const QuerySetDescriptor* descriptor);
 
-        ID3D12QueryHeap* GetQueryHeap() const;
+    ID3D12QueryHeap* GetQueryHeap() const;
 
-      private:
-        ~QuerySet() override;
-        using QuerySetBase::QuerySetBase;
-        MaybeError Initialize();
+  private:
+    ~QuerySet() override;
+    using QuerySetBase::QuerySetBase;
+    MaybeError Initialize();
 
-        // Dawn API
-        void DestroyImpl() override;
-        void SetLabelImpl() override;
+    // Dawn API
+    void DestroyImpl() override;
+    void SetLabelImpl() override;
 
-        ComPtr<ID3D12QueryHeap> mQueryHeap;
-    };
+    ComPtr<ID3D12QueryHeap> mQueryHeap;
+};
 
 }  // namespace dawn::native::d3d12
 
diff --git a/src/dawn/native/d3d12/QueueD3D12.cpp b/src/dawn/native/d3d12/QueueD3D12.cpp
index 19b6ac5..e3539a7 100644
--- a/src/dawn/native/d3d12/QueueD3D12.cpp
+++ b/src/dawn/native/d3d12/QueueD3D12.cpp
@@ -27,50 +27,46 @@
 
 namespace dawn::native::d3d12 {
 
-    // static
-    Ref<Queue> Queue::Create(Device* device, const QueueDescriptor* descriptor) {
-        Ref<Queue> queue = AcquireRef(new Queue(device, descriptor));
-        queue->Initialize();
-        return queue;
+// static
+Ref<Queue> Queue::Create(Device* device, const QueueDescriptor* descriptor) {
+    Ref<Queue> queue = AcquireRef(new Queue(device, descriptor));
+    queue->Initialize();
+    return queue;
+}
+
+Queue::Queue(Device* device, const QueueDescriptor* descriptor) : QueueBase(device, descriptor) {}
+
+void Queue::Initialize() {
+    SetLabelImpl();
+}
+
+MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
+    Device* device = ToBackend(GetDevice());
+
+    DAWN_TRY(device->Tick());
+
+    CommandRecordingContext* commandContext;
+    DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
+
+    TRACE_EVENT_BEGIN1(GetDevice()->GetPlatform(), Recording, "CommandBufferD3D12::RecordCommands",
+                       "serial", uint64_t(GetDevice()->GetPendingCommandSerial()));
+    for (uint32_t i = 0; i < commandCount; ++i) {
+        DAWN_TRY(ToBackend(commands[i])->RecordCommands(commandContext));
     }
+    TRACE_EVENT_END1(GetDevice()->GetPlatform(), Recording, "CommandBufferD3D12::RecordCommands",
+                     "serial", uint64_t(GetDevice()->GetPendingCommandSerial()));
 
-    Queue::Queue(Device* device, const QueueDescriptor* descriptor)
-        : QueueBase(device, descriptor) {
-    }
+    DAWN_TRY(device->ExecutePendingCommandContext());
 
-    void Queue::Initialize() {
-        SetLabelImpl();
-    }
+    DAWN_TRY(device->NextSerial());
+    return {};
+}
 
-    MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
-        Device* device = ToBackend(GetDevice());
-
-        DAWN_TRY(device->Tick());
-
-        CommandRecordingContext* commandContext;
-        DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
-
-        TRACE_EVENT_BEGIN1(GetDevice()->GetPlatform(), Recording,
-                           "CommandBufferD3D12::RecordCommands", "serial",
-                           uint64_t(GetDevice()->GetPendingCommandSerial()));
-        for (uint32_t i = 0; i < commandCount; ++i) {
-            DAWN_TRY(ToBackend(commands[i])->RecordCommands(commandContext));
-        }
-        TRACE_EVENT_END1(GetDevice()->GetPlatform(), Recording,
-                         "CommandBufferD3D12::RecordCommands", "serial",
-                         uint64_t(GetDevice()->GetPendingCommandSerial()));
-
-        DAWN_TRY(device->ExecutePendingCommandContext());
-
-        DAWN_TRY(device->NextSerial());
-        return {};
-    }
-
-    void Queue::SetLabelImpl() {
-        Device* device = ToBackend(GetDevice());
-        // TODO(crbug.com/dawn/1344): When we start using multiple queues this needs to be adjusted
-        // so it doesn't always change the default queue's label.
-        SetDebugName(device, device->GetCommandQueue().Get(), "Dawn_Queue", GetLabel());
-    }
+void Queue::SetLabelImpl() {
+    Device* device = ToBackend(GetDevice());
+    // TODO(crbug.com/dawn/1344): When we start using multiple queues this needs to be adjusted
+    // so it doesn't always change the default queue's label.
+    SetDebugName(device, device->GetCommandQueue().Get(), "Dawn_Queue", GetLabel());
+}
 
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/QueueD3D12.h b/src/dawn/native/d3d12/QueueD3D12.h
index 9d35f1c..a0f8b46 100644
--- a/src/dawn/native/d3d12/QueueD3D12.h
+++ b/src/dawn/native/d3d12/QueueD3D12.h
@@ -22,22 +22,22 @@
 
 namespace dawn::native::d3d12 {
 
-    class Device;
+class Device;
 
-    class Queue final : public QueueBase {
-      public:
-        static Ref<Queue> Create(Device* device, const QueueDescriptor* descriptor);
+class Queue final : public QueueBase {
+  public:
+    static Ref<Queue> Create(Device* device, const QueueDescriptor* descriptor);
 
-      private:
-        Queue(Device* device, const QueueDescriptor* descriptor);
+  private:
+    Queue(Device* device, const QueueDescriptor* descriptor);
 
-        void Initialize();
+    void Initialize();
 
-        MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+    MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
 
-        // Dawn API
-        void SetLabelImpl() override;
-    };
+    // Dawn API
+    void SetLabelImpl() override;
+};
 
 }  // namespace dawn::native::d3d12
 
diff --git a/src/dawn/native/d3d12/RenderPassBuilderD3D12.cpp b/src/dawn/native/d3d12/RenderPassBuilderD3D12.cpp
index 9e72e75..b2f9c95 100644
--- a/src/dawn/native/d3d12/RenderPassBuilderD3D12.cpp
+++ b/src/dawn/native/d3d12/RenderPassBuilderD3D12.cpp
@@ -24,218 +24,216 @@
 
 namespace dawn::native::d3d12 {
 
-    namespace {
-        D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE D3D12BeginningAccessType(wgpu::LoadOp loadOp) {
-            switch (loadOp) {
-                case wgpu::LoadOp::Clear:
-                    return D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR;
-                case wgpu::LoadOp::Load:
-                    return D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_PRESERVE;
-                case wgpu::LoadOp::Undefined:
-                    UNREACHABLE();
-                    break;
-            }
-        }
+namespace {
+D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE D3D12BeginningAccessType(wgpu::LoadOp loadOp) {
+    switch (loadOp) {
+        case wgpu::LoadOp::Clear:
+            return D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_CLEAR;
+        case wgpu::LoadOp::Load:
+            return D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_PRESERVE;
+        case wgpu::LoadOp::Undefined:
+            UNREACHABLE();
+            break;
+    }
+}
 
-        D3D12_RENDER_PASS_ENDING_ACCESS_TYPE D3D12EndingAccessType(wgpu::StoreOp storeOp) {
-            switch (storeOp) {
-                case wgpu::StoreOp::Discard:
-                    return D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_DISCARD;
-                case wgpu::StoreOp::Store:
-                    return D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_PRESERVE;
-                case wgpu::StoreOp::Undefined:
-                    UNREACHABLE();
-                    break;
-            }
-        }
+D3D12_RENDER_PASS_ENDING_ACCESS_TYPE D3D12EndingAccessType(wgpu::StoreOp storeOp) {
+    switch (storeOp) {
+        case wgpu::StoreOp::Discard:
+            return D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_DISCARD;
+        case wgpu::StoreOp::Store:
+            return D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_PRESERVE;
+        case wgpu::StoreOp::Undefined:
+            UNREACHABLE();
+            break;
+    }
+}
 
-        D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_PARAMETERS D3D12EndingAccessResolveParameters(
-            wgpu::StoreOp storeOp,
-            TextureView* resolveSource,
-            TextureView* resolveDestination) {
-            D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_PARAMETERS resolveParameters;
+D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_PARAMETERS D3D12EndingAccessResolveParameters(
+    wgpu::StoreOp storeOp,
+    TextureView* resolveSource,
+    TextureView* resolveDestination) {
+    D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_PARAMETERS resolveParameters;
 
-            resolveParameters.Format = resolveDestination->GetD3D12Format();
-            resolveParameters.pSrcResource =
-                ToBackend(resolveSource->GetTexture())->GetD3D12Resource();
-            resolveParameters.pDstResource =
-                ToBackend(resolveDestination->GetTexture())->GetD3D12Resource();
+    resolveParameters.Format = resolveDestination->GetD3D12Format();
+    resolveParameters.pSrcResource = ToBackend(resolveSource->GetTexture())->GetD3D12Resource();
+    resolveParameters.pDstResource =
+        ToBackend(resolveDestination->GetTexture())->GetD3D12Resource();
 
-            // Clear or preserve the resolve source.
-            if (storeOp == wgpu::StoreOp::Discard) {
-                resolveParameters.PreserveResolveSource = false;
-            } else if (storeOp == wgpu::StoreOp::Store) {
-                resolveParameters.PreserveResolveSource = true;
-            }
-
-            // RESOLVE_MODE_AVERAGE is only valid for non-integer formats.
-            ASSERT(resolveDestination->GetFormat().GetAspectInfo(Aspect::Color).baseType ==
-                   wgpu::TextureComponentType::Float);
-            resolveParameters.ResolveMode = D3D12_RESOLVE_MODE_AVERAGE;
-
-            resolveParameters.SubresourceCount = 1;
-
-            return resolveParameters;
-        }
-
-        D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS
-        D3D12EndingAccessResolveSubresourceParameters(TextureView* resolveDestination) {
-            D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS subresourceParameters;
-            Texture* resolveDestinationTexture = ToBackend(resolveDestination->GetTexture());
-            ASSERT(resolveDestinationTexture->GetFormat().aspects == Aspect::Color);
-
-            subresourceParameters.DstX = 0;
-            subresourceParameters.DstY = 0;
-            subresourceParameters.SrcSubresource = 0;
-            subresourceParameters.DstSubresource = resolveDestinationTexture->GetSubresourceIndex(
-                resolveDestination->GetBaseMipLevel(), resolveDestination->GetBaseArrayLayer(),
-                Aspect::Color);
-            // Resolving a specified sub-rect is only valid on hardware that supports sample
-            // positions. This means even {0, 0, width, height} would be invalid if unsupported. To
-            // avoid this, we assume sub-rect resolves never work by setting them to all zeros or
-            // "empty" to resolve the entire region.
-            subresourceParameters.SrcRect = {0, 0, 0, 0};
-
-            return subresourceParameters;
-        }
-    }  // anonymous namespace
-
-    RenderPassBuilder::RenderPassBuilder(bool hasUAV) {
-        if (hasUAV) {
-            mRenderPassFlags = D3D12_RENDER_PASS_FLAG_ALLOW_UAV_WRITES;
-        }
+    // Clear or preserve the resolve source.
+    if (storeOp == wgpu::StoreOp::Discard) {
+        resolveParameters.PreserveResolveSource = false;
+    } else if (storeOp == wgpu::StoreOp::Store) {
+        resolveParameters.PreserveResolveSource = true;
     }
 
-    void RenderPassBuilder::SetRenderTargetView(ColorAttachmentIndex attachmentIndex,
-                                                D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor,
-                                                bool isNullRTV) {
-        mRenderTargetViews[attachmentIndex] = baseDescriptor;
-        mRenderPassRenderTargetDescriptors[attachmentIndex].cpuDescriptor = baseDescriptor;
-        if (!isNullRTV) {
-            mHighestColorAttachmentIndexPlusOne =
-                std::max(mHighestColorAttachmentIndexPlusOne,
-                         ColorAttachmentIndex{
-                             static_cast<uint8_t>(static_cast<uint8_t>(attachmentIndex) + 1u)});
-        }
+    // RESOLVE_MODE_AVERAGE is only valid for non-integer formats.
+    ASSERT(resolveDestination->GetFormat().GetAspectInfo(Aspect::Color).baseType ==
+           wgpu::TextureComponentType::Float);
+    resolveParameters.ResolveMode = D3D12_RESOLVE_MODE_AVERAGE;
+
+    resolveParameters.SubresourceCount = 1;
+
+    return resolveParameters;
+}
+
+D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS
+D3D12EndingAccessResolveSubresourceParameters(TextureView* resolveDestination) {
+    D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS subresourceParameters;
+    Texture* resolveDestinationTexture = ToBackend(resolveDestination->GetTexture());
+    ASSERT(resolveDestinationTexture->GetFormat().aspects == Aspect::Color);
+
+    subresourceParameters.DstX = 0;
+    subresourceParameters.DstY = 0;
+    subresourceParameters.SrcSubresource = 0;
+    subresourceParameters.DstSubresource = resolveDestinationTexture->GetSubresourceIndex(
+        resolveDestination->GetBaseMipLevel(), resolveDestination->GetBaseArrayLayer(),
+        Aspect::Color);
+    // Resolving a specified sub-rect is only valid on hardware that supports sample
+    // positions. This means even {0, 0, width, height} would be invalid if unsupported. To
+    // avoid this, we assume sub-rect resolves never work by setting them to all zeros or
+    // "empty" to resolve the entire region.
+    subresourceParameters.SrcRect = {0, 0, 0, 0};
+
+    return subresourceParameters;
+}
+}  // anonymous namespace
+
+RenderPassBuilder::RenderPassBuilder(bool hasUAV) {
+    if (hasUAV) {
+        mRenderPassFlags = D3D12_RENDER_PASS_FLAG_ALLOW_UAV_WRITES;
     }
+}
 
-    void RenderPassBuilder::SetDepthStencilView(D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor) {
-        mRenderPassDepthStencilDesc.cpuDescriptor = baseDescriptor;
+void RenderPassBuilder::SetRenderTargetView(ColorAttachmentIndex attachmentIndex,
+                                            D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor,
+                                            bool isNullRTV) {
+    mRenderTargetViews[attachmentIndex] = baseDescriptor;
+    mRenderPassRenderTargetDescriptors[attachmentIndex].cpuDescriptor = baseDescriptor;
+    if (!isNullRTV) {
+        mHighestColorAttachmentIndexPlusOne = std::max(
+            mHighestColorAttachmentIndexPlusOne,
+            ColorAttachmentIndex{static_cast<uint8_t>(static_cast<uint8_t>(attachmentIndex) + 1u)});
     }
+}
 
-    ColorAttachmentIndex RenderPassBuilder::GetHighestColorAttachmentIndexPlusOne() const {
-        return mHighestColorAttachmentIndexPlusOne;
+void RenderPassBuilder::SetDepthStencilView(D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor) {
+    mRenderPassDepthStencilDesc.cpuDescriptor = baseDescriptor;
+}
+
+ColorAttachmentIndex RenderPassBuilder::GetHighestColorAttachmentIndexPlusOne() const {
+    return mHighestColorAttachmentIndexPlusOne;
+}
+
+bool RenderPassBuilder::HasDepthOrStencil() const {
+    return mHasDepthOrStencil;
+}
+
+ityp::span<ColorAttachmentIndex, const D3D12_RENDER_PASS_RENDER_TARGET_DESC>
+RenderPassBuilder::GetRenderPassRenderTargetDescriptors() const {
+    return {mRenderPassRenderTargetDescriptors.data(), mHighestColorAttachmentIndexPlusOne};
+}
+
+const D3D12_RENDER_PASS_DEPTH_STENCIL_DESC* RenderPassBuilder::GetRenderPassDepthStencilDescriptor()
+    const {
+    return &mRenderPassDepthStencilDesc;
+}
+
+D3D12_RENDER_PASS_FLAGS RenderPassBuilder::GetRenderPassFlags() const {
+    return mRenderPassFlags;
+}
+
+const D3D12_CPU_DESCRIPTOR_HANDLE* RenderPassBuilder::GetRenderTargetViews() const {
+    return mRenderTargetViews.data();
+}
+
+void RenderPassBuilder::SetRenderTargetBeginningAccess(ColorAttachmentIndex attachment,
+                                                       wgpu::LoadOp loadOp,
+                                                       dawn::native::Color clearColor,
+                                                       DXGI_FORMAT format) {
+    mRenderPassRenderTargetDescriptors[attachment].BeginningAccess.Type =
+        D3D12BeginningAccessType(loadOp);
+    if (loadOp == wgpu::LoadOp::Clear) {
+        mRenderPassRenderTargetDescriptors[attachment].BeginningAccess.Clear.ClearValue.Color[0] =
+            clearColor.r;
+        mRenderPassRenderTargetDescriptors[attachment].BeginningAccess.Clear.ClearValue.Color[1] =
+            clearColor.g;
+        mRenderPassRenderTargetDescriptors[attachment].BeginningAccess.Clear.ClearValue.Color[2] =
+            clearColor.b;
+        mRenderPassRenderTargetDescriptors[attachment].BeginningAccess.Clear.ClearValue.Color[3] =
+            clearColor.a;
+        mRenderPassRenderTargetDescriptors[attachment].BeginningAccess.Clear.ClearValue.Format =
+            format;
     }
+}
 
-    bool RenderPassBuilder::HasDepthOrStencil() const {
-        return mHasDepthOrStencil;
+void RenderPassBuilder::SetRenderTargetEndingAccess(ColorAttachmentIndex attachment,
+                                                    wgpu::StoreOp storeOp) {
+    mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Type =
+        D3D12EndingAccessType(storeOp);
+}
+
+void RenderPassBuilder::SetRenderTargetEndingAccessResolve(ColorAttachmentIndex attachment,
+                                                           wgpu::StoreOp storeOp,
+                                                           TextureView* resolveSource,
+                                                           TextureView* resolveDestination) {
+    mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Type =
+        D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_RESOLVE;
+    mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Resolve =
+        D3D12EndingAccessResolveParameters(storeOp, resolveSource, resolveDestination);
+
+    mSubresourceParams[attachment] =
+        D3D12EndingAccessResolveSubresourceParameters(resolveDestination);
+
+    mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Resolve.pSubresourceParameters =
+        &mSubresourceParams[attachment];
+}
+
+void RenderPassBuilder::SetDepthAccess(wgpu::LoadOp loadOp,
+                                       wgpu::StoreOp storeOp,
+                                       float clearDepth,
+                                       DXGI_FORMAT format) {
+    mHasDepthOrStencil = true;
+    mRenderPassDepthStencilDesc.DepthBeginningAccess.Type = D3D12BeginningAccessType(loadOp);
+    if (loadOp == wgpu::LoadOp::Clear) {
+        mRenderPassDepthStencilDesc.DepthBeginningAccess.Clear.ClearValue.DepthStencil.Depth =
+            clearDepth;
+        mRenderPassDepthStencilDesc.DepthBeginningAccess.Clear.ClearValue.Format = format;
     }
+    mRenderPassDepthStencilDesc.DepthEndingAccess.Type = D3D12EndingAccessType(storeOp);
+}
 
-    ityp::span<ColorAttachmentIndex, const D3D12_RENDER_PASS_RENDER_TARGET_DESC>
-    RenderPassBuilder::GetRenderPassRenderTargetDescriptors() const {
-        return {mRenderPassRenderTargetDescriptors.data(), mHighestColorAttachmentIndexPlusOne};
+void RenderPassBuilder::SetStencilAccess(wgpu::LoadOp loadOp,
+                                         wgpu::StoreOp storeOp,
+                                         uint8_t clearStencil,
+                                         DXGI_FORMAT format) {
+    mHasDepthOrStencil = true;
+    mRenderPassDepthStencilDesc.StencilBeginningAccess.Type = D3D12BeginningAccessType(loadOp);
+    if (loadOp == wgpu::LoadOp::Clear) {
+        mRenderPassDepthStencilDesc.StencilBeginningAccess.Clear.ClearValue.DepthStencil.Stencil =
+            clearStencil;
+        mRenderPassDepthStencilDesc.StencilBeginningAccess.Clear.ClearValue.Format = format;
     }
+    mRenderPassDepthStencilDesc.StencilEndingAccess.Type = D3D12EndingAccessType(storeOp);
+}
 
-    const D3D12_RENDER_PASS_DEPTH_STENCIL_DESC*
-    RenderPassBuilder::GetRenderPassDepthStencilDescriptor() const {
-        return &mRenderPassDepthStencilDesc;
-    }
+void RenderPassBuilder::SetDepthNoAccess() {
+    mRenderPassDepthStencilDesc.DepthBeginningAccess.Type =
+        D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_NO_ACCESS;
+    mRenderPassDepthStencilDesc.DepthEndingAccess.Type =
+        D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_NO_ACCESS;
+}
 
-    D3D12_RENDER_PASS_FLAGS RenderPassBuilder::GetRenderPassFlags() const {
-        return mRenderPassFlags;
-    }
+void RenderPassBuilder::SetDepthStencilNoAccess() {
+    SetDepthNoAccess();
+    SetStencilNoAccess();
+}
 
-    const D3D12_CPU_DESCRIPTOR_HANDLE* RenderPassBuilder::GetRenderTargetViews() const {
-        return mRenderTargetViews.data();
-    }
-
-    void RenderPassBuilder::SetRenderTargetBeginningAccess(ColorAttachmentIndex attachment,
-                                                           wgpu::LoadOp loadOp,
-                                                           dawn::native::Color clearColor,
-                                                           DXGI_FORMAT format) {
-        mRenderPassRenderTargetDescriptors[attachment].BeginningAccess.Type =
-            D3D12BeginningAccessType(loadOp);
-        if (loadOp == wgpu::LoadOp::Clear) {
-            mRenderPassRenderTargetDescriptors[attachment]
-                .BeginningAccess.Clear.ClearValue.Color[0] = clearColor.r;
-            mRenderPassRenderTargetDescriptors[attachment]
-                .BeginningAccess.Clear.ClearValue.Color[1] = clearColor.g;
-            mRenderPassRenderTargetDescriptors[attachment]
-                .BeginningAccess.Clear.ClearValue.Color[2] = clearColor.b;
-            mRenderPassRenderTargetDescriptors[attachment]
-                .BeginningAccess.Clear.ClearValue.Color[3] = clearColor.a;
-            mRenderPassRenderTargetDescriptors[attachment].BeginningAccess.Clear.ClearValue.Format =
-                format;
-        }
-    }
-
-    void RenderPassBuilder::SetRenderTargetEndingAccess(ColorAttachmentIndex attachment,
-                                                        wgpu::StoreOp storeOp) {
-        mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Type =
-            D3D12EndingAccessType(storeOp);
-    }
-
-    void RenderPassBuilder::SetRenderTargetEndingAccessResolve(ColorAttachmentIndex attachment,
-                                                               wgpu::StoreOp storeOp,
-                                                               TextureView* resolveSource,
-                                                               TextureView* resolveDestination) {
-        mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Type =
-            D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_RESOLVE;
-        mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Resolve =
-            D3D12EndingAccessResolveParameters(storeOp, resolveSource, resolveDestination);
-
-        mSubresourceParams[attachment] =
-            D3D12EndingAccessResolveSubresourceParameters(resolveDestination);
-
-        mRenderPassRenderTargetDescriptors[attachment].EndingAccess.Resolve.pSubresourceParameters =
-            &mSubresourceParams[attachment];
-    }
-
-    void RenderPassBuilder::SetDepthAccess(wgpu::LoadOp loadOp,
-                                           wgpu::StoreOp storeOp,
-                                           float clearDepth,
-                                           DXGI_FORMAT format) {
-        mHasDepthOrStencil = true;
-        mRenderPassDepthStencilDesc.DepthBeginningAccess.Type = D3D12BeginningAccessType(loadOp);
-        if (loadOp == wgpu::LoadOp::Clear) {
-            mRenderPassDepthStencilDesc.DepthBeginningAccess.Clear.ClearValue.DepthStencil.Depth =
-                clearDepth;
-            mRenderPassDepthStencilDesc.DepthBeginningAccess.Clear.ClearValue.Format = format;
-        }
-        mRenderPassDepthStencilDesc.DepthEndingAccess.Type = D3D12EndingAccessType(storeOp);
-    }
-
-    void RenderPassBuilder::SetStencilAccess(wgpu::LoadOp loadOp,
-                                             wgpu::StoreOp storeOp,
-                                             uint8_t clearStencil,
-                                             DXGI_FORMAT format) {
-        mHasDepthOrStencil = true;
-        mRenderPassDepthStencilDesc.StencilBeginningAccess.Type = D3D12BeginningAccessType(loadOp);
-        if (loadOp == wgpu::LoadOp::Clear) {
-            mRenderPassDepthStencilDesc.StencilBeginningAccess.Clear.ClearValue.DepthStencil
-                .Stencil = clearStencil;
-            mRenderPassDepthStencilDesc.StencilBeginningAccess.Clear.ClearValue.Format = format;
-        }
-        mRenderPassDepthStencilDesc.StencilEndingAccess.Type = D3D12EndingAccessType(storeOp);
-    }
-
-    void RenderPassBuilder::SetDepthNoAccess() {
-        mRenderPassDepthStencilDesc.DepthBeginningAccess.Type =
-            D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_NO_ACCESS;
-        mRenderPassDepthStencilDesc.DepthEndingAccess.Type =
-            D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_NO_ACCESS;
-    }
-
-    void RenderPassBuilder::SetDepthStencilNoAccess() {
-        SetDepthNoAccess();
-        SetStencilNoAccess();
-    }
-
-    void RenderPassBuilder::SetStencilNoAccess() {
-        mRenderPassDepthStencilDesc.StencilBeginningAccess.Type =
-            D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_NO_ACCESS;
-        mRenderPassDepthStencilDesc.StencilEndingAccess.Type =
-            D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_NO_ACCESS;
-    }
+void RenderPassBuilder::SetStencilNoAccess() {
+    mRenderPassDepthStencilDesc.StencilBeginningAccess.Type =
+        D3D12_RENDER_PASS_BEGINNING_ACCESS_TYPE_NO_ACCESS;
+    mRenderPassDepthStencilDesc.StencilEndingAccess.Type =
+        D3D12_RENDER_PASS_ENDING_ACCESS_TYPE_NO_ACCESS;
+}
 
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/RenderPassBuilderD3D12.h b/src/dawn/native/d3d12/RenderPassBuilderD3D12.h
index da6cd45..42c0fc5 100644
--- a/src/dawn/native/d3d12/RenderPassBuilderD3D12.h
+++ b/src/dawn/native/d3d12/RenderPassBuilderD3D12.h
@@ -26,76 +26,75 @@
 
 namespace dawn::native::d3d12 {
 
-    class TextureView;
+class TextureView;
 
-    // RenderPassBuilder stores parameters related to render pass load and store operations.
-    // When the D3D12 render pass API is available, the needed descriptors can be fetched
-    // directly from the RenderPassBuilder. When the D3D12 render pass API is not available, the
-    // descriptors are still fetched and any information necessary to emulate the load and store
-    // operations is extracted from the descriptors.
-    class RenderPassBuilder {
-      public:
-        explicit RenderPassBuilder(bool hasUAV);
+// RenderPassBuilder stores parameters related to render pass load and store operations.
+// When the D3D12 render pass API is available, the needed descriptors can be fetched
+// directly from the RenderPassBuilder. When the D3D12 render pass API is not available, the
+// descriptors are still fetched and any information necessary to emulate the load and store
+// operations is extracted from the descriptors.
+class RenderPassBuilder {
+  public:
+    explicit RenderPassBuilder(bool hasUAV);
 
-        // Returns the highest color attachment index + 1. If there is no color attachment, returns
-        // 0. Range: [0, kMaxColorAttachments + 1)
-        ColorAttachmentIndex GetHighestColorAttachmentIndexPlusOne() const;
+    // Returns the highest color attachment index + 1. If there is no color attachment, returns
+    // 0. Range: [0, kMaxColorAttachments + 1)
+    ColorAttachmentIndex GetHighestColorAttachmentIndexPlusOne() const;
 
-        // Returns descriptors that are fed directly to BeginRenderPass, or are used as parameter
-        // storage if D3D12 render pass API is unavailable.
-        ityp::span<ColorAttachmentIndex, const D3D12_RENDER_PASS_RENDER_TARGET_DESC>
-        GetRenderPassRenderTargetDescriptors() const;
-        const D3D12_RENDER_PASS_DEPTH_STENCIL_DESC* GetRenderPassDepthStencilDescriptor() const;
+    // Returns descriptors that are fed directly to BeginRenderPass, or are used as parameter
+    // storage if D3D12 render pass API is unavailable.
+    ityp::span<ColorAttachmentIndex, const D3D12_RENDER_PASS_RENDER_TARGET_DESC>
+    GetRenderPassRenderTargetDescriptors() const;
+    const D3D12_RENDER_PASS_DEPTH_STENCIL_DESC* GetRenderPassDepthStencilDescriptor() const;
 
-        D3D12_RENDER_PASS_FLAGS GetRenderPassFlags() const;
+    D3D12_RENDER_PASS_FLAGS GetRenderPassFlags() const;
 
-        // Returns attachment RTVs to use with OMSetRenderTargets.
-        const D3D12_CPU_DESCRIPTOR_HANDLE* GetRenderTargetViews() const;
+    // Returns attachment RTVs to use with OMSetRenderTargets.
+    const D3D12_CPU_DESCRIPTOR_HANDLE* GetRenderTargetViews() const;
 
-        bool HasDepthOrStencil() const;
+    bool HasDepthOrStencil() const;
 
-        // Functions that set the appropriate values in the render pass descriptors.
-        void SetDepthAccess(wgpu::LoadOp loadOp,
-                            wgpu::StoreOp storeOp,
-                            float clearDepth,
-                            DXGI_FORMAT format);
-        void SetDepthNoAccess();
-        void SetDepthStencilNoAccess();
-        void SetRenderTargetBeginningAccess(ColorAttachmentIndex attachment,
-                                            wgpu::LoadOp loadOp,
-                                            dawn::native::Color clearColor,
-                                            DXGI_FORMAT format);
-        void SetRenderTargetEndingAccess(ColorAttachmentIndex attachment, wgpu::StoreOp storeOp);
-        void SetRenderTargetEndingAccessResolve(ColorAttachmentIndex attachment,
-                                                wgpu::StoreOp storeOp,
-                                                TextureView* resolveSource,
-                                                TextureView* resolveDestination);
-        void SetStencilAccess(wgpu::LoadOp loadOp,
-                              wgpu::StoreOp storeOp,
-                              uint8_t clearStencil,
-                              DXGI_FORMAT format);
-        void SetStencilNoAccess();
+    // Functions that set the appropriate values in the render pass descriptors.
+    void SetDepthAccess(wgpu::LoadOp loadOp,
+                        wgpu::StoreOp storeOp,
+                        float clearDepth,
+                        DXGI_FORMAT format);
+    void SetDepthNoAccess();
+    void SetDepthStencilNoAccess();
+    void SetRenderTargetBeginningAccess(ColorAttachmentIndex attachment,
+                                        wgpu::LoadOp loadOp,
+                                        dawn::native::Color clearColor,
+                                        DXGI_FORMAT format);
+    void SetRenderTargetEndingAccess(ColorAttachmentIndex attachment, wgpu::StoreOp storeOp);
+    void SetRenderTargetEndingAccessResolve(ColorAttachmentIndex attachment,
+                                            wgpu::StoreOp storeOp,
+                                            TextureView* resolveSource,
+                                            TextureView* resolveDestination);
+    void SetStencilAccess(wgpu::LoadOp loadOp,
+                          wgpu::StoreOp storeOp,
+                          uint8_t clearStencil,
+                          DXGI_FORMAT format);
+    void SetStencilNoAccess();
 
-        void SetRenderTargetView(ColorAttachmentIndex attachmentIndex,
-                                 D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor,
-                                 bool isNullRTV);
-        void SetDepthStencilView(D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor);
+    void SetRenderTargetView(ColorAttachmentIndex attachmentIndex,
+                             D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor,
+                             bool isNullRTV);
+    void SetDepthStencilView(D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor);
 
-      private:
-        ColorAttachmentIndex mHighestColorAttachmentIndexPlusOne{uint8_t(0)};
-        bool mHasDepthOrStencil = false;
-        D3D12_RENDER_PASS_FLAGS mRenderPassFlags = D3D12_RENDER_PASS_FLAG_NONE;
-        D3D12_RENDER_PASS_DEPTH_STENCIL_DESC mRenderPassDepthStencilDesc;
-        ityp::
-            array<ColorAttachmentIndex, D3D12_RENDER_PASS_RENDER_TARGET_DESC, kMaxColorAttachments>
-                mRenderPassRenderTargetDescriptors;
-        ityp::array<ColorAttachmentIndex, D3D12_CPU_DESCRIPTOR_HANDLE, kMaxColorAttachments>
-            mRenderTargetViews;
-        ityp::array<ColorAttachmentIndex,
-                    D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS,
-                    kMaxColorAttachments>
-            mSubresourceParams;
-    };
+  private:
+    ColorAttachmentIndex mHighestColorAttachmentIndexPlusOne{uint8_t(0)};
+    bool mHasDepthOrStencil = false;
+    D3D12_RENDER_PASS_FLAGS mRenderPassFlags = D3D12_RENDER_PASS_FLAG_NONE;
+    D3D12_RENDER_PASS_DEPTH_STENCIL_DESC mRenderPassDepthStencilDesc;
+    ityp::array<ColorAttachmentIndex, D3D12_RENDER_PASS_RENDER_TARGET_DESC, kMaxColorAttachments>
+        mRenderPassRenderTargetDescriptors;
+    ityp::array<ColorAttachmentIndex, D3D12_CPU_DESCRIPTOR_HANDLE, kMaxColorAttachments>
+        mRenderTargetViews;
+    ityp::array<ColorAttachmentIndex,
+                D3D12_RENDER_PASS_ENDING_ACCESS_RESOLVE_SUBRESOURCE_PARAMETERS,
+                kMaxColorAttachments>
+        mSubresourceParams;
+};
 }  // namespace dawn::native::d3d12
 
 #endif  // SRC_DAWN_NATIVE_D3D12_RENDERPASSBUILDERD3D12_H_
diff --git a/src/dawn/native/d3d12/RenderPipelineD3D12.cpp b/src/dawn/native/d3d12/RenderPipelineD3D12.cpp
index 1eae0fd..9f513cf 100644
--- a/src/dawn/native/d3d12/RenderPipelineD3D12.cpp
+++ b/src/dawn/native/d3d12/RenderPipelineD3D12.cpp
@@ -32,496 +32,489 @@
 
 namespace dawn::native::d3d12 {
 
-    namespace {
-        DXGI_FORMAT VertexFormatType(wgpu::VertexFormat format) {
-            switch (format) {
-                case wgpu::VertexFormat::Uint8x2:
-                    return DXGI_FORMAT_R8G8_UINT;
-                case wgpu::VertexFormat::Uint8x4:
-                    return DXGI_FORMAT_R8G8B8A8_UINT;
-                case wgpu::VertexFormat::Sint8x2:
-                    return DXGI_FORMAT_R8G8_SINT;
-                case wgpu::VertexFormat::Sint8x4:
-                    return DXGI_FORMAT_R8G8B8A8_SINT;
-                case wgpu::VertexFormat::Unorm8x2:
-                    return DXGI_FORMAT_R8G8_UNORM;
-                case wgpu::VertexFormat::Unorm8x4:
-                    return DXGI_FORMAT_R8G8B8A8_UNORM;
-                case wgpu::VertexFormat::Snorm8x2:
-                    return DXGI_FORMAT_R8G8_SNORM;
-                case wgpu::VertexFormat::Snorm8x4:
-                    return DXGI_FORMAT_R8G8B8A8_SNORM;
-                case wgpu::VertexFormat::Uint16x2:
-                    return DXGI_FORMAT_R16G16_UINT;
-                case wgpu::VertexFormat::Uint16x4:
-                    return DXGI_FORMAT_R16G16B16A16_UINT;
-                case wgpu::VertexFormat::Sint16x2:
-                    return DXGI_FORMAT_R16G16_SINT;
-                case wgpu::VertexFormat::Sint16x4:
-                    return DXGI_FORMAT_R16G16B16A16_SINT;
-                case wgpu::VertexFormat::Unorm16x2:
-                    return DXGI_FORMAT_R16G16_UNORM;
-                case wgpu::VertexFormat::Unorm16x4:
-                    return DXGI_FORMAT_R16G16B16A16_UNORM;
-                case wgpu::VertexFormat::Snorm16x2:
-                    return DXGI_FORMAT_R16G16_SNORM;
-                case wgpu::VertexFormat::Snorm16x4:
-                    return DXGI_FORMAT_R16G16B16A16_SNORM;
-                case wgpu::VertexFormat::Float16x2:
-                    return DXGI_FORMAT_R16G16_FLOAT;
-                case wgpu::VertexFormat::Float16x4:
-                    return DXGI_FORMAT_R16G16B16A16_FLOAT;
-                case wgpu::VertexFormat::Float32:
-                    return DXGI_FORMAT_R32_FLOAT;
-                case wgpu::VertexFormat::Float32x2:
-                    return DXGI_FORMAT_R32G32_FLOAT;
-                case wgpu::VertexFormat::Float32x3:
-                    return DXGI_FORMAT_R32G32B32_FLOAT;
-                case wgpu::VertexFormat::Float32x4:
-                    return DXGI_FORMAT_R32G32B32A32_FLOAT;
-                case wgpu::VertexFormat::Uint32:
-                    return DXGI_FORMAT_R32_UINT;
-                case wgpu::VertexFormat::Uint32x2:
-                    return DXGI_FORMAT_R32G32_UINT;
-                case wgpu::VertexFormat::Uint32x3:
-                    return DXGI_FORMAT_R32G32B32_UINT;
-                case wgpu::VertexFormat::Uint32x4:
-                    return DXGI_FORMAT_R32G32B32A32_UINT;
-                case wgpu::VertexFormat::Sint32:
-                    return DXGI_FORMAT_R32_SINT;
-                case wgpu::VertexFormat::Sint32x2:
-                    return DXGI_FORMAT_R32G32_SINT;
-                case wgpu::VertexFormat::Sint32x3:
-                    return DXGI_FORMAT_R32G32B32_SINT;
-                case wgpu::VertexFormat::Sint32x4:
-                    return DXGI_FORMAT_R32G32B32A32_SINT;
-                default:
-                    UNREACHABLE();
-            }
-        }
+namespace {
+DXGI_FORMAT VertexFormatType(wgpu::VertexFormat format) {
+    switch (format) {
+        case wgpu::VertexFormat::Uint8x2:
+            return DXGI_FORMAT_R8G8_UINT;
+        case wgpu::VertexFormat::Uint8x4:
+            return DXGI_FORMAT_R8G8B8A8_UINT;
+        case wgpu::VertexFormat::Sint8x2:
+            return DXGI_FORMAT_R8G8_SINT;
+        case wgpu::VertexFormat::Sint8x4:
+            return DXGI_FORMAT_R8G8B8A8_SINT;
+        case wgpu::VertexFormat::Unorm8x2:
+            return DXGI_FORMAT_R8G8_UNORM;
+        case wgpu::VertexFormat::Unorm8x4:
+            return DXGI_FORMAT_R8G8B8A8_UNORM;
+        case wgpu::VertexFormat::Snorm8x2:
+            return DXGI_FORMAT_R8G8_SNORM;
+        case wgpu::VertexFormat::Snorm8x4:
+            return DXGI_FORMAT_R8G8B8A8_SNORM;
+        case wgpu::VertexFormat::Uint16x2:
+            return DXGI_FORMAT_R16G16_UINT;
+        case wgpu::VertexFormat::Uint16x4:
+            return DXGI_FORMAT_R16G16B16A16_UINT;
+        case wgpu::VertexFormat::Sint16x2:
+            return DXGI_FORMAT_R16G16_SINT;
+        case wgpu::VertexFormat::Sint16x4:
+            return DXGI_FORMAT_R16G16B16A16_SINT;
+        case wgpu::VertexFormat::Unorm16x2:
+            return DXGI_FORMAT_R16G16_UNORM;
+        case wgpu::VertexFormat::Unorm16x4:
+            return DXGI_FORMAT_R16G16B16A16_UNORM;
+        case wgpu::VertexFormat::Snorm16x2:
+            return DXGI_FORMAT_R16G16_SNORM;
+        case wgpu::VertexFormat::Snorm16x4:
+            return DXGI_FORMAT_R16G16B16A16_SNORM;
+        case wgpu::VertexFormat::Float16x2:
+            return DXGI_FORMAT_R16G16_FLOAT;
+        case wgpu::VertexFormat::Float16x4:
+            return DXGI_FORMAT_R16G16B16A16_FLOAT;
+        case wgpu::VertexFormat::Float32:
+            return DXGI_FORMAT_R32_FLOAT;
+        case wgpu::VertexFormat::Float32x2:
+            return DXGI_FORMAT_R32G32_FLOAT;
+        case wgpu::VertexFormat::Float32x3:
+            return DXGI_FORMAT_R32G32B32_FLOAT;
+        case wgpu::VertexFormat::Float32x4:
+            return DXGI_FORMAT_R32G32B32A32_FLOAT;
+        case wgpu::VertexFormat::Uint32:
+            return DXGI_FORMAT_R32_UINT;
+        case wgpu::VertexFormat::Uint32x2:
+            return DXGI_FORMAT_R32G32_UINT;
+        case wgpu::VertexFormat::Uint32x3:
+            return DXGI_FORMAT_R32G32B32_UINT;
+        case wgpu::VertexFormat::Uint32x4:
+            return DXGI_FORMAT_R32G32B32A32_UINT;
+        case wgpu::VertexFormat::Sint32:
+            return DXGI_FORMAT_R32_SINT;
+        case wgpu::VertexFormat::Sint32x2:
+            return DXGI_FORMAT_R32G32_SINT;
+        case wgpu::VertexFormat::Sint32x3:
+            return DXGI_FORMAT_R32G32B32_SINT;
+        case wgpu::VertexFormat::Sint32x4:
+            return DXGI_FORMAT_R32G32B32A32_SINT;
+        default:
+            UNREACHABLE();
+    }
+}
 
-        D3D12_INPUT_CLASSIFICATION VertexStepModeFunction(wgpu::VertexStepMode mode) {
-            switch (mode) {
-                case wgpu::VertexStepMode::Vertex:
-                    return D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA;
-                case wgpu::VertexStepMode::Instance:
-                    return D3D12_INPUT_CLASSIFICATION_PER_INSTANCE_DATA;
-            }
-        }
+D3D12_INPUT_CLASSIFICATION VertexStepModeFunction(wgpu::VertexStepMode mode) {
+    switch (mode) {
+        case wgpu::VertexStepMode::Vertex:
+            return D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA;
+        case wgpu::VertexStepMode::Instance:
+            return D3D12_INPUT_CLASSIFICATION_PER_INSTANCE_DATA;
+    }
+}
 
-        D3D12_PRIMITIVE_TOPOLOGY D3D12PrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
-            switch (primitiveTopology) {
-                case wgpu::PrimitiveTopology::PointList:
-                    return D3D_PRIMITIVE_TOPOLOGY_POINTLIST;
-                case wgpu::PrimitiveTopology::LineList:
-                    return D3D_PRIMITIVE_TOPOLOGY_LINELIST;
-                case wgpu::PrimitiveTopology::LineStrip:
-                    return D3D_PRIMITIVE_TOPOLOGY_LINESTRIP;
-                case wgpu::PrimitiveTopology::TriangleList:
-                    return D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST;
-                case wgpu::PrimitiveTopology::TriangleStrip:
-                    return D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP;
-            }
-        }
+D3D12_PRIMITIVE_TOPOLOGY D3D12PrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
+    switch (primitiveTopology) {
+        case wgpu::PrimitiveTopology::PointList:
+            return D3D_PRIMITIVE_TOPOLOGY_POINTLIST;
+        case wgpu::PrimitiveTopology::LineList:
+            return D3D_PRIMITIVE_TOPOLOGY_LINELIST;
+        case wgpu::PrimitiveTopology::LineStrip:
+            return D3D_PRIMITIVE_TOPOLOGY_LINESTRIP;
+        case wgpu::PrimitiveTopology::TriangleList:
+            return D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST;
+        case wgpu::PrimitiveTopology::TriangleStrip:
+            return D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP;
+    }
+}
 
-        D3D12_PRIMITIVE_TOPOLOGY_TYPE D3D12PrimitiveTopologyType(
-            wgpu::PrimitiveTopology primitiveTopology) {
-            switch (primitiveTopology) {
-                case wgpu::PrimitiveTopology::PointList:
-                    return D3D12_PRIMITIVE_TOPOLOGY_TYPE_POINT;
-                case wgpu::PrimitiveTopology::LineList:
-                case wgpu::PrimitiveTopology::LineStrip:
-                    return D3D12_PRIMITIVE_TOPOLOGY_TYPE_LINE;
-                case wgpu::PrimitiveTopology::TriangleList:
-                case wgpu::PrimitiveTopology::TriangleStrip:
-                    return D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE;
-            }
-        }
+D3D12_PRIMITIVE_TOPOLOGY_TYPE D3D12PrimitiveTopologyType(
+    wgpu::PrimitiveTopology primitiveTopology) {
+    switch (primitiveTopology) {
+        case wgpu::PrimitiveTopology::PointList:
+            return D3D12_PRIMITIVE_TOPOLOGY_TYPE_POINT;
+        case wgpu::PrimitiveTopology::LineList:
+        case wgpu::PrimitiveTopology::LineStrip:
+            return D3D12_PRIMITIVE_TOPOLOGY_TYPE_LINE;
+        case wgpu::PrimitiveTopology::TriangleList:
+        case wgpu::PrimitiveTopology::TriangleStrip:
+            return D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE;
+    }
+}
 
-        D3D12_CULL_MODE D3D12CullMode(wgpu::CullMode mode) {
-            switch (mode) {
-                case wgpu::CullMode::None:
-                    return D3D12_CULL_MODE_NONE;
-                case wgpu::CullMode::Front:
-                    return D3D12_CULL_MODE_FRONT;
-                case wgpu::CullMode::Back:
-                    return D3D12_CULL_MODE_BACK;
-            }
-        }
+D3D12_CULL_MODE D3D12CullMode(wgpu::CullMode mode) {
+    switch (mode) {
+        case wgpu::CullMode::None:
+            return D3D12_CULL_MODE_NONE;
+        case wgpu::CullMode::Front:
+            return D3D12_CULL_MODE_FRONT;
+        case wgpu::CullMode::Back:
+            return D3D12_CULL_MODE_BACK;
+    }
+}
 
-        D3D12_BLEND D3D12Blend(wgpu::BlendFactor factor) {
-            switch (factor) {
-                case wgpu::BlendFactor::Zero:
-                    return D3D12_BLEND_ZERO;
-                case wgpu::BlendFactor::One:
-                    return D3D12_BLEND_ONE;
-                case wgpu::BlendFactor::Src:
-                    return D3D12_BLEND_SRC_COLOR;
-                case wgpu::BlendFactor::OneMinusSrc:
-                    return D3D12_BLEND_INV_SRC_COLOR;
-                case wgpu::BlendFactor::SrcAlpha:
-                    return D3D12_BLEND_SRC_ALPHA;
-                case wgpu::BlendFactor::OneMinusSrcAlpha:
-                    return D3D12_BLEND_INV_SRC_ALPHA;
-                case wgpu::BlendFactor::Dst:
-                    return D3D12_BLEND_DEST_COLOR;
-                case wgpu::BlendFactor::OneMinusDst:
-                    return D3D12_BLEND_INV_DEST_COLOR;
-                case wgpu::BlendFactor::DstAlpha:
-                    return D3D12_BLEND_DEST_ALPHA;
-                case wgpu::BlendFactor::OneMinusDstAlpha:
-                    return D3D12_BLEND_INV_DEST_ALPHA;
-                case wgpu::BlendFactor::SrcAlphaSaturated:
-                    return D3D12_BLEND_SRC_ALPHA_SAT;
-                case wgpu::BlendFactor::Constant:
-                    return D3D12_BLEND_BLEND_FACTOR;
-                case wgpu::BlendFactor::OneMinusConstant:
-                    return D3D12_BLEND_INV_BLEND_FACTOR;
-            }
-        }
+D3D12_BLEND D3D12Blend(wgpu::BlendFactor factor) {
+    switch (factor) {
+        case wgpu::BlendFactor::Zero:
+            return D3D12_BLEND_ZERO;
+        case wgpu::BlendFactor::One:
+            return D3D12_BLEND_ONE;
+        case wgpu::BlendFactor::Src:
+            return D3D12_BLEND_SRC_COLOR;
+        case wgpu::BlendFactor::OneMinusSrc:
+            return D3D12_BLEND_INV_SRC_COLOR;
+        case wgpu::BlendFactor::SrcAlpha:
+            return D3D12_BLEND_SRC_ALPHA;
+        case wgpu::BlendFactor::OneMinusSrcAlpha:
+            return D3D12_BLEND_INV_SRC_ALPHA;
+        case wgpu::BlendFactor::Dst:
+            return D3D12_BLEND_DEST_COLOR;
+        case wgpu::BlendFactor::OneMinusDst:
+            return D3D12_BLEND_INV_DEST_COLOR;
+        case wgpu::BlendFactor::DstAlpha:
+            return D3D12_BLEND_DEST_ALPHA;
+        case wgpu::BlendFactor::OneMinusDstAlpha:
+            return D3D12_BLEND_INV_DEST_ALPHA;
+        case wgpu::BlendFactor::SrcAlphaSaturated:
+            return D3D12_BLEND_SRC_ALPHA_SAT;
+        case wgpu::BlendFactor::Constant:
+            return D3D12_BLEND_BLEND_FACTOR;
+        case wgpu::BlendFactor::OneMinusConstant:
+            return D3D12_BLEND_INV_BLEND_FACTOR;
+    }
+}
 
-        // When a blend factor is defined for the alpha channel, any of the factors that don't
-        // explicitly state that they apply to alpha should be treated as their explicitly-alpha
-        // equivalents. See: https://github.com/gpuweb/gpuweb/issues/65
-        D3D12_BLEND D3D12AlphaBlend(wgpu::BlendFactor factor) {
-            switch (factor) {
-                case wgpu::BlendFactor::Src:
-                    return D3D12_BLEND_SRC_ALPHA;
-                case wgpu::BlendFactor::OneMinusSrc:
-                    return D3D12_BLEND_INV_SRC_ALPHA;
-                case wgpu::BlendFactor::Dst:
-                    return D3D12_BLEND_DEST_ALPHA;
-                case wgpu::BlendFactor::OneMinusDst:
-                    return D3D12_BLEND_INV_DEST_ALPHA;
+// When a blend factor is defined for the alpha channel, any of the factors that don't
+// explicitly state that they apply to alpha should be treated as their explicitly-alpha
+// equivalents. See: https://github.com/gpuweb/gpuweb/issues/65
+D3D12_BLEND D3D12AlphaBlend(wgpu::BlendFactor factor) {
+    switch (factor) {
+        case wgpu::BlendFactor::Src:
+            return D3D12_BLEND_SRC_ALPHA;
+        case wgpu::BlendFactor::OneMinusSrc:
+            return D3D12_BLEND_INV_SRC_ALPHA;
+        case wgpu::BlendFactor::Dst:
+            return D3D12_BLEND_DEST_ALPHA;
+        case wgpu::BlendFactor::OneMinusDst:
+            return D3D12_BLEND_INV_DEST_ALPHA;
 
-                // Other blend factors translate to the same D3D12 enum as the color blend factors.
-                default:
-                    return D3D12Blend(factor);
-            }
-        }
+        // Other blend factors translate to the same D3D12 enum as the color blend factors.
+        default:
+            return D3D12Blend(factor);
+    }
+}
 
-        D3D12_BLEND_OP D3D12BlendOperation(wgpu::BlendOperation operation) {
-            switch (operation) {
-                case wgpu::BlendOperation::Add:
-                    return D3D12_BLEND_OP_ADD;
-                case wgpu::BlendOperation::Subtract:
-                    return D3D12_BLEND_OP_SUBTRACT;
-                case wgpu::BlendOperation::ReverseSubtract:
-                    return D3D12_BLEND_OP_REV_SUBTRACT;
-                case wgpu::BlendOperation::Min:
-                    return D3D12_BLEND_OP_MIN;
-                case wgpu::BlendOperation::Max:
-                    return D3D12_BLEND_OP_MAX;
-            }
-        }
+D3D12_BLEND_OP D3D12BlendOperation(wgpu::BlendOperation operation) {
+    switch (operation) {
+        case wgpu::BlendOperation::Add:
+            return D3D12_BLEND_OP_ADD;
+        case wgpu::BlendOperation::Subtract:
+            return D3D12_BLEND_OP_SUBTRACT;
+        case wgpu::BlendOperation::ReverseSubtract:
+            return D3D12_BLEND_OP_REV_SUBTRACT;
+        case wgpu::BlendOperation::Min:
+            return D3D12_BLEND_OP_MIN;
+        case wgpu::BlendOperation::Max:
+            return D3D12_BLEND_OP_MAX;
+    }
+}
 
-        uint8_t D3D12RenderTargetWriteMask(wgpu::ColorWriteMask writeMask) {
-            static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Red) ==
-                              D3D12_COLOR_WRITE_ENABLE_RED,
-                          "ColorWriteMask values must match");
-            static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Green) ==
-                              D3D12_COLOR_WRITE_ENABLE_GREEN,
-                          "ColorWriteMask values must match");
-            static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Blue) ==
-                              D3D12_COLOR_WRITE_ENABLE_BLUE,
-                          "ColorWriteMask values must match");
-            static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Alpha) ==
-                              D3D12_COLOR_WRITE_ENABLE_ALPHA,
-                          "ColorWriteMask values must match");
-            return static_cast<uint8_t>(writeMask);
-        }
+uint8_t D3D12RenderTargetWriteMask(wgpu::ColorWriteMask writeMask) {
+    static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Red) ==
+                      D3D12_COLOR_WRITE_ENABLE_RED,
+                  "ColorWriteMask values must match");
+    static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Green) ==
+                      D3D12_COLOR_WRITE_ENABLE_GREEN,
+                  "ColorWriteMask values must match");
+    static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Blue) ==
+                      D3D12_COLOR_WRITE_ENABLE_BLUE,
+                  "ColorWriteMask values must match");
+    static_assert(static_cast<D3D12_COLOR_WRITE_ENABLE>(wgpu::ColorWriteMask::Alpha) ==
+                      D3D12_COLOR_WRITE_ENABLE_ALPHA,
+                  "ColorWriteMask values must match");
+    return static_cast<uint8_t>(writeMask);
+}
 
-        D3D12_RENDER_TARGET_BLEND_DESC ComputeColorDesc(const ColorTargetState* state) {
-            D3D12_RENDER_TARGET_BLEND_DESC blendDesc;
-            blendDesc.BlendEnable = state->blend != nullptr;
-            if (blendDesc.BlendEnable) {
-                blendDesc.SrcBlend = D3D12Blend(state->blend->color.srcFactor);
-                blendDesc.DestBlend = D3D12Blend(state->blend->color.dstFactor);
-                blendDesc.BlendOp = D3D12BlendOperation(state->blend->color.operation);
-                blendDesc.SrcBlendAlpha = D3D12AlphaBlend(state->blend->alpha.srcFactor);
-                blendDesc.DestBlendAlpha = D3D12AlphaBlend(state->blend->alpha.dstFactor);
-                blendDesc.BlendOpAlpha = D3D12BlendOperation(state->blend->alpha.operation);
-            }
-            blendDesc.RenderTargetWriteMask = D3D12RenderTargetWriteMask(state->writeMask);
-            blendDesc.LogicOpEnable = false;
-            blendDesc.LogicOp = D3D12_LOGIC_OP_NOOP;
-            return blendDesc;
-        }
+D3D12_RENDER_TARGET_BLEND_DESC ComputeColorDesc(const ColorTargetState* state) {
+    D3D12_RENDER_TARGET_BLEND_DESC blendDesc;
+    blendDesc.BlendEnable = state->blend != nullptr;
+    if (blendDesc.BlendEnable) {
+        blendDesc.SrcBlend = D3D12Blend(state->blend->color.srcFactor);
+        blendDesc.DestBlend = D3D12Blend(state->blend->color.dstFactor);
+        blendDesc.BlendOp = D3D12BlendOperation(state->blend->color.operation);
+        blendDesc.SrcBlendAlpha = D3D12AlphaBlend(state->blend->alpha.srcFactor);
+        blendDesc.DestBlendAlpha = D3D12AlphaBlend(state->blend->alpha.dstFactor);
+        blendDesc.BlendOpAlpha = D3D12BlendOperation(state->blend->alpha.operation);
+    }
+    blendDesc.RenderTargetWriteMask = D3D12RenderTargetWriteMask(state->writeMask);
+    blendDesc.LogicOpEnable = false;
+    blendDesc.LogicOp = D3D12_LOGIC_OP_NOOP;
+    return blendDesc;
+}
 
-        D3D12_STENCIL_OP StencilOp(wgpu::StencilOperation op) {
-            switch (op) {
-                case wgpu::StencilOperation::Keep:
-                    return D3D12_STENCIL_OP_KEEP;
-                case wgpu::StencilOperation::Zero:
-                    return D3D12_STENCIL_OP_ZERO;
-                case wgpu::StencilOperation::Replace:
-                    return D3D12_STENCIL_OP_REPLACE;
-                case wgpu::StencilOperation::IncrementClamp:
-                    return D3D12_STENCIL_OP_INCR_SAT;
-                case wgpu::StencilOperation::DecrementClamp:
-                    return D3D12_STENCIL_OP_DECR_SAT;
-                case wgpu::StencilOperation::Invert:
-                    return D3D12_STENCIL_OP_INVERT;
-                case wgpu::StencilOperation::IncrementWrap:
-                    return D3D12_STENCIL_OP_INCR;
-                case wgpu::StencilOperation::DecrementWrap:
-                    return D3D12_STENCIL_OP_DECR;
-            }
-        }
+D3D12_STENCIL_OP StencilOp(wgpu::StencilOperation op) {
+    switch (op) {
+        case wgpu::StencilOperation::Keep:
+            return D3D12_STENCIL_OP_KEEP;
+        case wgpu::StencilOperation::Zero:
+            return D3D12_STENCIL_OP_ZERO;
+        case wgpu::StencilOperation::Replace:
+            return D3D12_STENCIL_OP_REPLACE;
+        case wgpu::StencilOperation::IncrementClamp:
+            return D3D12_STENCIL_OP_INCR_SAT;
+        case wgpu::StencilOperation::DecrementClamp:
+            return D3D12_STENCIL_OP_DECR_SAT;
+        case wgpu::StencilOperation::Invert:
+            return D3D12_STENCIL_OP_INVERT;
+        case wgpu::StencilOperation::IncrementWrap:
+            return D3D12_STENCIL_OP_INCR;
+        case wgpu::StencilOperation::DecrementWrap:
+            return D3D12_STENCIL_OP_DECR;
+    }
+}
 
-        D3D12_DEPTH_STENCILOP_DESC StencilOpDesc(const StencilFaceState& descriptor) {
-            D3D12_DEPTH_STENCILOP_DESC desc;
+D3D12_DEPTH_STENCILOP_DESC StencilOpDesc(const StencilFaceState& descriptor) {
+    D3D12_DEPTH_STENCILOP_DESC desc;
 
-            desc.StencilFailOp = StencilOp(descriptor.failOp);
-            desc.StencilDepthFailOp = StencilOp(descriptor.depthFailOp);
-            desc.StencilPassOp = StencilOp(descriptor.passOp);
-            desc.StencilFunc = ToD3D12ComparisonFunc(descriptor.compare);
+    desc.StencilFailOp = StencilOp(descriptor.failOp);
+    desc.StencilDepthFailOp = StencilOp(descriptor.depthFailOp);
+    desc.StencilPassOp = StencilOp(descriptor.passOp);
+    desc.StencilFunc = ToD3D12ComparisonFunc(descriptor.compare);
 
-            return desc;
-        }
+    return desc;
+}
 
-        D3D12_DEPTH_STENCIL_DESC ComputeDepthStencilDesc(const DepthStencilState* descriptor) {
-            D3D12_DEPTH_STENCIL_DESC mDepthStencilDescriptor;
-            mDepthStencilDescriptor.DepthEnable =
-                (descriptor->depthCompare == wgpu::CompareFunction::Always &&
-                 !descriptor->depthWriteEnabled)
-                    ? FALSE
-                    : TRUE;
-            mDepthStencilDescriptor.DepthWriteMask = descriptor->depthWriteEnabled
-                                                         ? D3D12_DEPTH_WRITE_MASK_ALL
-                                                         : D3D12_DEPTH_WRITE_MASK_ZERO;
-            mDepthStencilDescriptor.DepthFunc = ToD3D12ComparisonFunc(descriptor->depthCompare);
+D3D12_DEPTH_STENCIL_DESC ComputeDepthStencilDesc(const DepthStencilState* descriptor) {
+    D3D12_DEPTH_STENCIL_DESC mDepthStencilDescriptor;
+    mDepthStencilDescriptor.DepthEnable =
+        (descriptor->depthCompare == wgpu::CompareFunction::Always &&
+         !descriptor->depthWriteEnabled)
+            ? FALSE
+            : TRUE;
+    mDepthStencilDescriptor.DepthWriteMask =
+        descriptor->depthWriteEnabled ? D3D12_DEPTH_WRITE_MASK_ALL : D3D12_DEPTH_WRITE_MASK_ZERO;
+    mDepthStencilDescriptor.DepthFunc = ToD3D12ComparisonFunc(descriptor->depthCompare);
 
-            mDepthStencilDescriptor.StencilEnable = StencilTestEnabled(descriptor) ? TRUE : FALSE;
-            mDepthStencilDescriptor.StencilReadMask =
-                static_cast<UINT8>(descriptor->stencilReadMask);
-            mDepthStencilDescriptor.StencilWriteMask =
-                static_cast<UINT8>(descriptor->stencilWriteMask);
+    mDepthStencilDescriptor.StencilEnable = StencilTestEnabled(descriptor) ? TRUE : FALSE;
+    mDepthStencilDescriptor.StencilReadMask = static_cast<UINT8>(descriptor->stencilReadMask);
+    mDepthStencilDescriptor.StencilWriteMask = static_cast<UINT8>(descriptor->stencilWriteMask);
 
-            mDepthStencilDescriptor.FrontFace = StencilOpDesc(descriptor->stencilFront);
-            mDepthStencilDescriptor.BackFace = StencilOpDesc(descriptor->stencilBack);
-            return mDepthStencilDescriptor;
-        }
+    mDepthStencilDescriptor.FrontFace = StencilOpDesc(descriptor->stencilFront);
+    mDepthStencilDescriptor.BackFace = StencilOpDesc(descriptor->stencilBack);
+    return mDepthStencilDescriptor;
+}
 
-        D3D12_INDEX_BUFFER_STRIP_CUT_VALUE ComputeIndexBufferStripCutValue(
-            wgpu::PrimitiveTopology primitiveTopology,
-            wgpu::IndexFormat indexFormat) {
-            if (primitiveTopology != wgpu::PrimitiveTopology::TriangleStrip &&
-                primitiveTopology != wgpu::PrimitiveTopology::LineStrip) {
-                return D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_DISABLED;
-            }
-
-            switch (indexFormat) {
-                case wgpu::IndexFormat::Uint16:
-                    return D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_0xFFFF;
-                case wgpu::IndexFormat::Uint32:
-                    return D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_0xFFFFFFFF;
-                case wgpu::IndexFormat::Undefined:
-                    return D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_DISABLED;
-            }
-        }
-
-    }  // anonymous namespace
-
-    Ref<RenderPipeline> RenderPipeline::CreateUninitialized(
-        Device* device,
-        const RenderPipelineDescriptor* descriptor) {
-        return AcquireRef(new RenderPipeline(device, descriptor));
+D3D12_INDEX_BUFFER_STRIP_CUT_VALUE ComputeIndexBufferStripCutValue(
+    wgpu::PrimitiveTopology primitiveTopology,
+    wgpu::IndexFormat indexFormat) {
+    if (primitiveTopology != wgpu::PrimitiveTopology::TriangleStrip &&
+        primitiveTopology != wgpu::PrimitiveTopology::LineStrip) {
+        return D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_DISABLED;
     }
 
-    MaybeError RenderPipeline::Initialize() {
-        Device* device = ToBackend(GetDevice());
-        uint32_t compileFlags = 0;
+    switch (indexFormat) {
+        case wgpu::IndexFormat::Uint16:
+            return D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_0xFFFF;
+        case wgpu::IndexFormat::Uint32:
+            return D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_0xFFFFFFFF;
+        case wgpu::IndexFormat::Undefined:
+            return D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_DISABLED;
+    }
+}
 
-        if (!device->IsToggleEnabled(Toggle::UseDXC) &&
-            !device->IsToggleEnabled(Toggle::FxcOptimizations)) {
-            compileFlags |= D3DCOMPILE_OPTIMIZATION_LEVEL0;
-        }
+}  // anonymous namespace
 
-        if (device->IsToggleEnabled(Toggle::EmitHLSLDebugSymbols)) {
-            compileFlags |= D3DCOMPILE_DEBUG | D3DCOMPILE_SKIP_OPTIMIZATION;
-        }
+Ref<RenderPipeline> RenderPipeline::CreateUninitialized(
+    Device* device,
+    const RenderPipelineDescriptor* descriptor) {
+    return AcquireRef(new RenderPipeline(device, descriptor));
+}
 
-        // SPRIV-cross does matrix multiplication expecting row major matrices
-        compileFlags |= D3DCOMPILE_PACK_MATRIX_ROW_MAJOR;
+MaybeError RenderPipeline::Initialize() {
+    Device* device = ToBackend(GetDevice());
+    uint32_t compileFlags = 0;
 
-        // FXC can miscompile code that depends on special float values (NaN, INF, etc) when IEEE
-        // strictness is not enabled. See crbug.com/tint/976.
-        compileFlags |= D3DCOMPILE_IEEE_STRICTNESS;
-
-        D3D12_GRAPHICS_PIPELINE_STATE_DESC descriptorD3D12 = {};
-
-        PerStage<ProgrammableStage> pipelineStages = GetAllStages();
-
-        PerStage<D3D12_SHADER_BYTECODE*> shaders;
-        shaders[SingleShaderStage::Vertex] = &descriptorD3D12.VS;
-        shaders[SingleShaderStage::Fragment] = &descriptorD3D12.PS;
-
-        PerStage<CompiledShader> compiledShader;
-
-        for (auto stage : IterateStages(GetStageMask())) {
-            DAWN_TRY_ASSIGN(
-                compiledShader[stage],
-                ToBackend(pipelineStages[stage].module)
-                    ->Compile(pipelineStages[stage], stage, ToBackend(GetLayout()), compileFlags));
-            *shaders[stage] = compiledShader[stage].GetD3D12ShaderBytecode();
-        }
-
-        mUsesVertexOrInstanceIndex =
-            compiledShader[SingleShaderStage::Vertex].usesVertexOrInstanceIndex;
-
-        PipelineLayout* layout = ToBackend(GetLayout());
-
-        descriptorD3D12.pRootSignature = layout->GetRootSignature();
-
-        // D3D12 logs warnings if any empty input state is used
-        std::array<D3D12_INPUT_ELEMENT_DESC, kMaxVertexAttributes> inputElementDescriptors;
-        if (GetAttributeLocationsUsed().any()) {
-            descriptorD3D12.InputLayout = ComputeInputLayout(&inputElementDescriptors);
-        }
-
-        descriptorD3D12.IBStripCutValue =
-            ComputeIndexBufferStripCutValue(GetPrimitiveTopology(), GetStripIndexFormat());
-
-        descriptorD3D12.RasterizerState.FillMode = D3D12_FILL_MODE_SOLID;
-        descriptorD3D12.RasterizerState.CullMode = D3D12CullMode(GetCullMode());
-        descriptorD3D12.RasterizerState.FrontCounterClockwise =
-            (GetFrontFace() == wgpu::FrontFace::CCW) ? TRUE : FALSE;
-        descriptorD3D12.RasterizerState.DepthBias = GetDepthBias();
-        descriptorD3D12.RasterizerState.DepthBiasClamp = GetDepthBiasClamp();
-        descriptorD3D12.RasterizerState.SlopeScaledDepthBias = GetDepthBiasSlopeScale();
-        descriptorD3D12.RasterizerState.DepthClipEnable = TRUE;
-        descriptorD3D12.RasterizerState.MultisampleEnable = (GetSampleCount() > 1) ? TRUE : FALSE;
-        descriptorD3D12.RasterizerState.AntialiasedLineEnable = FALSE;
-        descriptorD3D12.RasterizerState.ForcedSampleCount = 0;
-        descriptorD3D12.RasterizerState.ConservativeRaster =
-            D3D12_CONSERVATIVE_RASTERIZATION_MODE_OFF;
-
-        if (HasDepthStencilAttachment()) {
-            descriptorD3D12.DSVFormat = D3D12TextureFormat(GetDepthStencilFormat());
-        }
-
-        static_assert(kMaxColorAttachments == 8);
-        for (uint8_t i = 0; i < kMaxColorAttachments; i++) {
-            descriptorD3D12.RTVFormats[i] = DXGI_FORMAT_UNKNOWN;
-            descriptorD3D12.BlendState.RenderTarget[i].BlendEnable = false;
-            descriptorD3D12.BlendState.RenderTarget[i].RenderTargetWriteMask = 0;
-            descriptorD3D12.BlendState.RenderTarget[i].LogicOpEnable = false;
-            descriptorD3D12.BlendState.RenderTarget[i].LogicOp = D3D12_LOGIC_OP_NOOP;
-        }
-        ColorAttachmentIndex highestColorAttachmentIndexPlusOne =
-            GetHighestBitIndexPlusOne(GetColorAttachmentsMask());
-        for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
-            descriptorD3D12.RTVFormats[static_cast<uint8_t>(i)] =
-                D3D12TextureFormat(GetColorAttachmentFormat(i));
-            descriptorD3D12.BlendState.RenderTarget[static_cast<uint8_t>(i)] =
-                ComputeColorDesc(GetColorTargetState(i));
-        }
-        ASSERT(highestColorAttachmentIndexPlusOne <= kMaxColorAttachmentsTyped);
-        descriptorD3D12.NumRenderTargets = static_cast<uint8_t>(highestColorAttachmentIndexPlusOne);
-
-        descriptorD3D12.BlendState.AlphaToCoverageEnable = IsAlphaToCoverageEnabled();
-        descriptorD3D12.BlendState.IndependentBlendEnable = TRUE;
-
-        descriptorD3D12.DepthStencilState = ComputeDepthStencilDesc(GetDepthStencilState());
-
-        descriptorD3D12.SampleMask = GetSampleMask();
-        descriptorD3D12.PrimitiveTopologyType = D3D12PrimitiveTopologyType(GetPrimitiveTopology());
-        descriptorD3D12.SampleDesc.Count = GetSampleCount();
-        descriptorD3D12.SampleDesc.Quality = 0;
-
-        mD3d12PrimitiveTopology = D3D12PrimitiveTopology(GetPrimitiveTopology());
-
-        DAWN_TRY(CheckHRESULT(device->GetD3D12Device()->CreateGraphicsPipelineState(
-                                  &descriptorD3D12, IID_PPV_ARGS(&mPipelineState)),
-                              "D3D12 create graphics pipeline state"));
-
-        SetLabelImpl();
-
-        return {};
+    if (!device->IsToggleEnabled(Toggle::UseDXC) &&
+        !device->IsToggleEnabled(Toggle::FxcOptimizations)) {
+        compileFlags |= D3DCOMPILE_OPTIMIZATION_LEVEL0;
     }
 
-    RenderPipeline::~RenderPipeline() = default;
-
-    void RenderPipeline::DestroyImpl() {
-        RenderPipelineBase::DestroyImpl();
-        ToBackend(GetDevice())->ReferenceUntilUnused(mPipelineState);
+    if (device->IsToggleEnabled(Toggle::EmitHLSLDebugSymbols)) {
+        compileFlags |= D3DCOMPILE_DEBUG | D3DCOMPILE_SKIP_OPTIMIZATION;
     }
 
-    D3D12_PRIMITIVE_TOPOLOGY RenderPipeline::GetD3D12PrimitiveTopology() const {
-        return mD3d12PrimitiveTopology;
+    // SPRIV-cross does matrix multiplication expecting row major matrices
+    compileFlags |= D3DCOMPILE_PACK_MATRIX_ROW_MAJOR;
+
+    // FXC can miscompile code that depends on special float values (NaN, INF, etc) when IEEE
+    // strictness is not enabled. See crbug.com/tint/976.
+    compileFlags |= D3DCOMPILE_IEEE_STRICTNESS;
+
+    D3D12_GRAPHICS_PIPELINE_STATE_DESC descriptorD3D12 = {};
+
+    PerStage<ProgrammableStage> pipelineStages = GetAllStages();
+
+    PerStage<D3D12_SHADER_BYTECODE*> shaders;
+    shaders[SingleShaderStage::Vertex] = &descriptorD3D12.VS;
+    shaders[SingleShaderStage::Fragment] = &descriptorD3D12.PS;
+
+    PerStage<CompiledShader> compiledShader;
+
+    for (auto stage : IterateStages(GetStageMask())) {
+        DAWN_TRY_ASSIGN(compiledShader[stage], ToBackend(pipelineStages[stage].module)
+                                                   ->Compile(pipelineStages[stage], stage,
+                                                             ToBackend(GetLayout()), compileFlags));
+        *shaders[stage] = compiledShader[stage].GetD3D12ShaderBytecode();
     }
 
-    ID3D12PipelineState* RenderPipeline::GetPipelineState() const {
-        return mPipelineState.Get();
+    mUsesVertexOrInstanceIndex =
+        compiledShader[SingleShaderStage::Vertex].usesVertexOrInstanceIndex;
+
+    PipelineLayout* layout = ToBackend(GetLayout());
+
+    descriptorD3D12.pRootSignature = layout->GetRootSignature();
+
+    // D3D12 logs warnings if any empty input state is used
+    std::array<D3D12_INPUT_ELEMENT_DESC, kMaxVertexAttributes> inputElementDescriptors;
+    if (GetAttributeLocationsUsed().any()) {
+        descriptorD3D12.InputLayout = ComputeInputLayout(&inputElementDescriptors);
     }
 
-    bool RenderPipeline::UsesVertexOrInstanceIndex() const {
-        return mUsesVertexOrInstanceIndex;
+    descriptorD3D12.IBStripCutValue =
+        ComputeIndexBufferStripCutValue(GetPrimitiveTopology(), GetStripIndexFormat());
+
+    descriptorD3D12.RasterizerState.FillMode = D3D12_FILL_MODE_SOLID;
+    descriptorD3D12.RasterizerState.CullMode = D3D12CullMode(GetCullMode());
+    descriptorD3D12.RasterizerState.FrontCounterClockwise =
+        (GetFrontFace() == wgpu::FrontFace::CCW) ? TRUE : FALSE;
+    descriptorD3D12.RasterizerState.DepthBias = GetDepthBias();
+    descriptorD3D12.RasterizerState.DepthBiasClamp = GetDepthBiasClamp();
+    descriptorD3D12.RasterizerState.SlopeScaledDepthBias = GetDepthBiasSlopeScale();
+    descriptorD3D12.RasterizerState.DepthClipEnable = TRUE;
+    descriptorD3D12.RasterizerState.MultisampleEnable = (GetSampleCount() > 1) ? TRUE : FALSE;
+    descriptorD3D12.RasterizerState.AntialiasedLineEnable = FALSE;
+    descriptorD3D12.RasterizerState.ForcedSampleCount = 0;
+    descriptorD3D12.RasterizerState.ConservativeRaster = D3D12_CONSERVATIVE_RASTERIZATION_MODE_OFF;
+
+    if (HasDepthStencilAttachment()) {
+        descriptorD3D12.DSVFormat = D3D12TextureFormat(GetDepthStencilFormat());
     }
 
-    void RenderPipeline::SetLabelImpl() {
-        SetDebugName(ToBackend(GetDevice()), GetPipelineState(), "Dawn_RenderPipeline", GetLabel());
+    static_assert(kMaxColorAttachments == 8);
+    for (uint8_t i = 0; i < kMaxColorAttachments; i++) {
+        descriptorD3D12.RTVFormats[i] = DXGI_FORMAT_UNKNOWN;
+        descriptorD3D12.BlendState.RenderTarget[i].BlendEnable = false;
+        descriptorD3D12.BlendState.RenderTarget[i].RenderTargetWriteMask = 0;
+        descriptorD3D12.BlendState.RenderTarget[i].LogicOpEnable = false;
+        descriptorD3D12.BlendState.RenderTarget[i].LogicOp = D3D12_LOGIC_OP_NOOP;
+    }
+    ColorAttachmentIndex highestColorAttachmentIndexPlusOne =
+        GetHighestBitIndexPlusOne(GetColorAttachmentsMask());
+    for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
+        descriptorD3D12.RTVFormats[static_cast<uint8_t>(i)] =
+            D3D12TextureFormat(GetColorAttachmentFormat(i));
+        descriptorD3D12.BlendState.RenderTarget[static_cast<uint8_t>(i)] =
+            ComputeColorDesc(GetColorTargetState(i));
+    }
+    ASSERT(highestColorAttachmentIndexPlusOne <= kMaxColorAttachmentsTyped);
+    descriptorD3D12.NumRenderTargets = static_cast<uint8_t>(highestColorAttachmentIndexPlusOne);
+
+    descriptorD3D12.BlendState.AlphaToCoverageEnable = IsAlphaToCoverageEnabled();
+    descriptorD3D12.BlendState.IndependentBlendEnable = TRUE;
+
+    descriptorD3D12.DepthStencilState = ComputeDepthStencilDesc(GetDepthStencilState());
+
+    descriptorD3D12.SampleMask = GetSampleMask();
+    descriptorD3D12.PrimitiveTopologyType = D3D12PrimitiveTopologyType(GetPrimitiveTopology());
+    descriptorD3D12.SampleDesc.Count = GetSampleCount();
+    descriptorD3D12.SampleDesc.Quality = 0;
+
+    mD3d12PrimitiveTopology = D3D12PrimitiveTopology(GetPrimitiveTopology());
+
+    DAWN_TRY(CheckHRESULT(device->GetD3D12Device()->CreateGraphicsPipelineState(
+                              &descriptorD3D12, IID_PPV_ARGS(&mPipelineState)),
+                          "D3D12 create graphics pipeline state"));
+
+    SetLabelImpl();
+
+    return {};
+}
+
+RenderPipeline::~RenderPipeline() = default;
+
+void RenderPipeline::DestroyImpl() {
+    RenderPipelineBase::DestroyImpl();
+    ToBackend(GetDevice())->ReferenceUntilUnused(mPipelineState);
+}
+
+D3D12_PRIMITIVE_TOPOLOGY RenderPipeline::GetD3D12PrimitiveTopology() const {
+    return mD3d12PrimitiveTopology;
+}
+
+ID3D12PipelineState* RenderPipeline::GetPipelineState() const {
+    return mPipelineState.Get();
+}
+
+bool RenderPipeline::UsesVertexOrInstanceIndex() const {
+    return mUsesVertexOrInstanceIndex;
+}
+
+void RenderPipeline::SetLabelImpl() {
+    SetDebugName(ToBackend(GetDevice()), GetPipelineState(), "Dawn_RenderPipeline", GetLabel());
+}
+
+ComPtr<ID3D12CommandSignature> RenderPipeline::GetDrawIndirectCommandSignature() {
+    if (mUsesVertexOrInstanceIndex) {
+        return ToBackend(GetLayout())->GetDrawIndirectCommandSignatureWithInstanceVertexOffsets();
     }
 
-    ComPtr<ID3D12CommandSignature> RenderPipeline::GetDrawIndirectCommandSignature() {
-        if (mUsesVertexOrInstanceIndex) {
-            return ToBackend(GetLayout())
-                ->GetDrawIndirectCommandSignatureWithInstanceVertexOffsets();
+    return ToBackend(GetDevice())->GetDrawIndirectSignature();
+}
+
+ComPtr<ID3D12CommandSignature> RenderPipeline::GetDrawIndexedIndirectCommandSignature() {
+    if (mUsesVertexOrInstanceIndex) {
+        return ToBackend(GetLayout())
+            ->GetDrawIndexedIndirectCommandSignatureWithInstanceVertexOffsets();
+    }
+
+    return ToBackend(GetDevice())->GetDrawIndexedIndirectSignature();
+}
+
+D3D12_INPUT_LAYOUT_DESC RenderPipeline::ComputeInputLayout(
+    std::array<D3D12_INPUT_ELEMENT_DESC, kMaxVertexAttributes>* inputElementDescriptors) {
+    unsigned int count = 0;
+    for (VertexAttributeLocation loc : IterateBitSet(GetAttributeLocationsUsed())) {
+        D3D12_INPUT_ELEMENT_DESC& inputElementDescriptor = (*inputElementDescriptors)[count++];
+
+        const VertexAttributeInfo& attribute = GetAttribute(loc);
+
+        // If the HLSL semantic is TEXCOORDN the SemanticName should be "TEXCOORD" and the
+        // SemanticIndex N
+        inputElementDescriptor.SemanticName = "TEXCOORD";
+        inputElementDescriptor.SemanticIndex = static_cast<uint8_t>(loc);
+        inputElementDescriptor.Format = VertexFormatType(attribute.format);
+        inputElementDescriptor.InputSlot = static_cast<uint8_t>(attribute.vertexBufferSlot);
+
+        const VertexBufferInfo& input = GetVertexBuffer(attribute.vertexBufferSlot);
+
+        inputElementDescriptor.AlignedByteOffset = attribute.offset;
+        inputElementDescriptor.InputSlotClass = VertexStepModeFunction(input.stepMode);
+        if (inputElementDescriptor.InputSlotClass == D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA) {
+            inputElementDescriptor.InstanceDataStepRate = 0;
+        } else {
+            inputElementDescriptor.InstanceDataStepRate = 1;
         }
-
-        return ToBackend(GetDevice())->GetDrawIndirectSignature();
     }
 
-    ComPtr<ID3D12CommandSignature> RenderPipeline::GetDrawIndexedIndirectCommandSignature() {
-        if (mUsesVertexOrInstanceIndex) {
-            return ToBackend(GetLayout())
-                ->GetDrawIndexedIndirectCommandSignatureWithInstanceVertexOffsets();
-        }
+    D3D12_INPUT_LAYOUT_DESC inputLayoutDescriptor;
+    inputLayoutDescriptor.pInputElementDescs = &(*inputElementDescriptors)[0];
+    inputLayoutDescriptor.NumElements = count;
+    return inputLayoutDescriptor;
+}
 
-        return ToBackend(GetDevice())->GetDrawIndexedIndirectSignature();
-    }
-
-    D3D12_INPUT_LAYOUT_DESC RenderPipeline::ComputeInputLayout(
-        std::array<D3D12_INPUT_ELEMENT_DESC, kMaxVertexAttributes>* inputElementDescriptors) {
-        unsigned int count = 0;
-        for (VertexAttributeLocation loc : IterateBitSet(GetAttributeLocationsUsed())) {
-            D3D12_INPUT_ELEMENT_DESC& inputElementDescriptor = (*inputElementDescriptors)[count++];
-
-            const VertexAttributeInfo& attribute = GetAttribute(loc);
-
-            // If the HLSL semantic is TEXCOORDN the SemanticName should be "TEXCOORD" and the
-            // SemanticIndex N
-            inputElementDescriptor.SemanticName = "TEXCOORD";
-            inputElementDescriptor.SemanticIndex = static_cast<uint8_t>(loc);
-            inputElementDescriptor.Format = VertexFormatType(attribute.format);
-            inputElementDescriptor.InputSlot = static_cast<uint8_t>(attribute.vertexBufferSlot);
-
-            const VertexBufferInfo& input = GetVertexBuffer(attribute.vertexBufferSlot);
-
-            inputElementDescriptor.AlignedByteOffset = attribute.offset;
-            inputElementDescriptor.InputSlotClass = VertexStepModeFunction(input.stepMode);
-            if (inputElementDescriptor.InputSlotClass ==
-                D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA) {
-                inputElementDescriptor.InstanceDataStepRate = 0;
-            } else {
-                inputElementDescriptor.InstanceDataStepRate = 1;
-            }
-        }
-
-        D3D12_INPUT_LAYOUT_DESC inputLayoutDescriptor;
-        inputLayoutDescriptor.pInputElementDescs = &(*inputElementDescriptors)[0];
-        inputLayoutDescriptor.NumElements = count;
-        return inputLayoutDescriptor;
-    }
-
-    void RenderPipeline::InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
-                                         WGPUCreateRenderPipelineAsyncCallback callback,
-                                         void* userdata) {
-        std::unique_ptr<CreateRenderPipelineAsyncTask> asyncTask =
-            std::make_unique<CreateRenderPipelineAsyncTask>(std::move(renderPipeline), callback,
-                                                            userdata);
-        CreateRenderPipelineAsyncTask::RunAsync(std::move(asyncTask));
-    }
+void RenderPipeline::InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+                                     WGPUCreateRenderPipelineAsyncCallback callback,
+                                     void* userdata) {
+    std::unique_ptr<CreateRenderPipelineAsyncTask> asyncTask =
+        std::make_unique<CreateRenderPipelineAsyncTask>(std::move(renderPipeline), callback,
+                                                        userdata);
+    CreateRenderPipelineAsyncTask::RunAsync(std::move(asyncTask));
+}
 
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/RenderPipelineD3D12.h b/src/dawn/native/d3d12/RenderPipelineD3D12.h
index 0af5e1d..51a9361 100644
--- a/src/dawn/native/d3d12/RenderPipelineD3D12.h
+++ b/src/dawn/native/d3d12/RenderPipelineD3D12.h
@@ -22,44 +22,44 @@
 
 namespace dawn::native::d3d12 {
 
-    class Device;
+class Device;
 
-    class RenderPipeline final : public RenderPipelineBase {
-      public:
-        static Ref<RenderPipeline> CreateUninitialized(Device* device,
-                                                       const RenderPipelineDescriptor* descriptor);
-        static void InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
-                                    WGPUCreateRenderPipelineAsyncCallback callback,
-                                    void* userdata);
-        RenderPipeline() = delete;
+class RenderPipeline final : public RenderPipelineBase {
+  public:
+    static Ref<RenderPipeline> CreateUninitialized(Device* device,
+                                                   const RenderPipelineDescriptor* descriptor);
+    static void InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+                                WGPUCreateRenderPipelineAsyncCallback callback,
+                                void* userdata);
+    RenderPipeline() = delete;
 
-        MaybeError Initialize() override;
+    MaybeError Initialize() override;
 
-        D3D12_PRIMITIVE_TOPOLOGY GetD3D12PrimitiveTopology() const;
-        ID3D12PipelineState* GetPipelineState() const;
+    D3D12_PRIMITIVE_TOPOLOGY GetD3D12PrimitiveTopology() const;
+    ID3D12PipelineState* GetPipelineState() const;
 
-        bool UsesVertexOrInstanceIndex() const;
+    bool UsesVertexOrInstanceIndex() const;
 
-        // Dawn API
-        void SetLabelImpl() override;
+    // Dawn API
+    void SetLabelImpl() override;
 
-        ComPtr<ID3D12CommandSignature> GetDrawIndirectCommandSignature();
+    ComPtr<ID3D12CommandSignature> GetDrawIndirectCommandSignature();
 
-        ComPtr<ID3D12CommandSignature> GetDrawIndexedIndirectCommandSignature();
+    ComPtr<ID3D12CommandSignature> GetDrawIndexedIndirectCommandSignature();
 
-      private:
-        ~RenderPipeline() override;
+  private:
+    ~RenderPipeline() override;
 
-        void DestroyImpl() override;
+    void DestroyImpl() override;
 
-        using RenderPipelineBase::RenderPipelineBase;
-        D3D12_INPUT_LAYOUT_DESC ComputeInputLayout(
-            std::array<D3D12_INPUT_ELEMENT_DESC, kMaxVertexAttributes>* inputElementDescriptors);
+    using RenderPipelineBase::RenderPipelineBase;
+    D3D12_INPUT_LAYOUT_DESC ComputeInputLayout(
+        std::array<D3D12_INPUT_ELEMENT_DESC, kMaxVertexAttributes>* inputElementDescriptors);
 
-        D3D12_PRIMITIVE_TOPOLOGY mD3d12PrimitiveTopology;
-        ComPtr<ID3D12PipelineState> mPipelineState;
-        bool mUsesVertexOrInstanceIndex;
-    };
+    D3D12_PRIMITIVE_TOPOLOGY mD3d12PrimitiveTopology;
+    ComPtr<ID3D12PipelineState> mPipelineState;
+    bool mUsesVertexOrInstanceIndex;
+};
 
 }  // namespace dawn::native::d3d12
 
diff --git a/src/dawn/native/d3d12/ResidencyManagerD3D12.cpp b/src/dawn/native/d3d12/ResidencyManagerD3D12.cpp
index 67c1ba6..2952505 100644
--- a/src/dawn/native/d3d12/ResidencyManagerD3D12.cpp
+++ b/src/dawn/native/d3d12/ResidencyManagerD3D12.cpp
@@ -25,350 +25,346 @@
 
 namespace dawn::native::d3d12 {
 
-    ResidencyManager::ResidencyManager(Device* device)
-        : mDevice(device),
-          mResidencyManagementEnabled(
-              device->IsToggleEnabled(Toggle::UseD3D12ResidencyManagement)) {
-        UpdateVideoMemoryInfo();
-    }
+ResidencyManager::ResidencyManager(Device* device)
+    : mDevice(device),
+      mResidencyManagementEnabled(device->IsToggleEnabled(Toggle::UseD3D12ResidencyManagement)) {
+    UpdateVideoMemoryInfo();
+}
 
-    // Increments number of locks on a heap to ensure the heap remains resident.
-    MaybeError ResidencyManager::LockAllocation(Pageable* pageable) {
-        if (!mResidencyManagementEnabled) {
-            return {};
-        }
-
-        // If the heap isn't already resident, make it resident.
-        if (!pageable->IsInResidencyLRUCache() && !pageable->IsResidencyLocked()) {
-            ID3D12Pageable* d3d12Pageable = pageable->GetD3D12Pageable();
-            uint64_t size = pageable->GetSize();
-
-            DAWN_TRY(MakeAllocationsResident(GetMemorySegmentInfo(pageable->GetMemorySegment()),
-                                             size, 1, &d3d12Pageable));
-        }
-
-        // Since we can't evict the heap, it's unnecessary to track the heap in the LRU Cache.
-        if (pageable->IsInResidencyLRUCache()) {
-            pageable->RemoveFromList();
-        }
-
-        pageable->IncrementResidencyLock();
-
+// Increments number of locks on a heap to ensure the heap remains resident.
+MaybeError ResidencyManager::LockAllocation(Pageable* pageable) {
+    if (!mResidencyManagementEnabled) {
         return {};
     }
 
-    // Decrements number of locks on a heap. When the number of locks becomes zero, the heap is
-    // inserted into the LRU cache and becomes eligible for eviction.
-    void ResidencyManager::UnlockAllocation(Pageable* pageable) {
-        if (!mResidencyManagementEnabled) {
-            return;
-        }
+    // If the heap isn't already resident, make it resident.
+    if (!pageable->IsInResidencyLRUCache() && !pageable->IsResidencyLocked()) {
+        ID3D12Pageable* d3d12Pageable = pageable->GetD3D12Pageable();
+        uint64_t size = pageable->GetSize();
 
-        ASSERT(pageable->IsResidencyLocked());
-        ASSERT(!pageable->IsInResidencyLRUCache());
-        pageable->DecrementResidencyLock();
-
-        // If another lock still exists on the heap, nothing further should be done.
-        if (pageable->IsResidencyLocked()) {
-            return;
-        }
-
-        // When all locks have been removed, the resource remains resident and becomes tracked in
-        // the corresponding LRU.
-        TrackResidentAllocation(pageable);
+        DAWN_TRY(MakeAllocationsResident(GetMemorySegmentInfo(pageable->GetMemorySegment()), size,
+                                         1, &d3d12Pageable));
     }
 
-    // Returns the appropriate MemorySegmentInfo for a given MemorySegment.
-    ResidencyManager::MemorySegmentInfo* ResidencyManager::GetMemorySegmentInfo(
-        MemorySegment memorySegment) {
-        switch (memorySegment) {
-            case MemorySegment::Local:
-                return &mVideoMemoryInfo.local;
-            case MemorySegment::NonLocal:
-                ASSERT(!mDevice->GetDeviceInfo().isUMA);
-                return &mVideoMemoryInfo.nonLocal;
-            default:
-                UNREACHABLE();
-        }
-    }
-
-    // Allows an application component external to Dawn to cap Dawn's residency budgets to prevent
-    // competition for device memory. Returns the amount of memory reserved, which may be less
-    // that the requested reservation when under pressure.
-    uint64_t ResidencyManager::SetExternalMemoryReservation(MemorySegment segment,
-                                                            uint64_t requestedReservationSize) {
-        MemorySegmentInfo* segmentInfo = GetMemorySegmentInfo(segment);
-
-        segmentInfo->externalRequest = requestedReservationSize;
-
-        UpdateMemorySegmentInfo(segmentInfo);
-
-        return segmentInfo->externalReservation;
-    }
-
-    void ResidencyManager::UpdateVideoMemoryInfo() {
-        UpdateMemorySegmentInfo(&mVideoMemoryInfo.local);
-        if (!mDevice->GetDeviceInfo().isUMA) {
-            UpdateMemorySegmentInfo(&mVideoMemoryInfo.nonLocal);
-        }
-    }
-
-    void ResidencyManager::UpdateMemorySegmentInfo(MemorySegmentInfo* segmentInfo) {
-        DXGI_QUERY_VIDEO_MEMORY_INFO queryVideoMemoryInfo;
-
-        ToBackend(mDevice->GetAdapter())
-            ->GetHardwareAdapter()
-            ->QueryVideoMemoryInfo(0, segmentInfo->dxgiSegment, &queryVideoMemoryInfo);
-
-        // The video memory budget provided by QueryVideoMemoryInfo is defined by the operating
-        // system, and may be lower than expected in certain scenarios. Under memory pressure, we
-        // cap the external reservation to half the available budget, which prevents the external
-        // component from consuming a disproportionate share of memory and ensures that Dawn can
-        // continue to make forward progress. Note the choice to halve memory is arbitrarily chosen
-        // and subject to future experimentation.
-        segmentInfo->externalReservation =
-            std::min(queryVideoMemoryInfo.Budget / 2, segmentInfo->externalRequest);
-
-        segmentInfo->usage = queryVideoMemoryInfo.CurrentUsage - segmentInfo->externalReservation;
-
-        // If we're restricting the budget for testing, leave the budget as is.
-        if (mRestrictBudgetForTesting) {
-            return;
-        }
-
-        // We cap Dawn's budget to 95% of the provided budget. Leaving some budget unused
-        // decreases fluctuations in the operating-system-defined budget, which improves stability
-        // for both Dawn and other applications on the system. Note the value of 95% is arbitrarily
-        // chosen and subject to future experimentation.
-        static constexpr float kBudgetCap = 0.95;
-        segmentInfo->budget =
-            (queryVideoMemoryInfo.Budget - segmentInfo->externalReservation) * kBudgetCap;
-    }
-
-    // Removes a heap from the LRU and returns the least recently used heap when possible. Returns
-    // nullptr when nothing further can be evicted.
-    ResultOrError<Pageable*> ResidencyManager::RemoveSingleEntryFromLRU(
-        MemorySegmentInfo* memorySegment) {
-        // If the LRU is empty, return nullptr to allow execution to continue. Note that fully
-        // emptying the LRU is undesirable, because it can mean either 1) the LRU is not accurately
-        // accounting for Dawn's GPU allocations, or 2) a component external to Dawn is using all of
-        // the process budget and starving Dawn, which will cause thrash.
-        if (memorySegment->lruCache.empty()) {
-            return nullptr;
-        }
-
-        Pageable* pageable = memorySegment->lruCache.head()->value();
-
-        ExecutionSerial lastSubmissionSerial = pageable->GetLastSubmission();
-
-        // If the next candidate for eviction was inserted into the LRU during the current serial,
-        // it is because more memory is being used in a single command list than is available.
-        // In this scenario, we cannot make any more resources resident and thrashing must occur.
-        if (lastSubmissionSerial == mDevice->GetPendingCommandSerial()) {
-            return nullptr;
-        }
-
-        // We must ensure that any previous use of a resource has completed before the resource can
-        // be evicted.
-        if (lastSubmissionSerial > mDevice->GetCompletedCommandSerial()) {
-            DAWN_TRY(mDevice->WaitForSerial(lastSubmissionSerial));
-        }
-
+    // Since we can't evict the heap, it's unnecessary to track the heap in the LRU Cache.
+    if (pageable->IsInResidencyLRUCache()) {
         pageable->RemoveFromList();
-        return pageable;
     }
 
-    MaybeError ResidencyManager::EnsureCanAllocate(uint64_t allocationSize,
-                                                   MemorySegment memorySegment) {
-        if (!mResidencyManagementEnabled) {
-            return {};
-        }
+    pageable->IncrementResidencyLock();
 
-        uint64_t bytesEvicted;
-        DAWN_TRY_ASSIGN(bytesEvicted,
-                        EnsureCanMakeResident(allocationSize, GetMemorySegmentInfo(memorySegment)));
-        DAWN_UNUSED(bytesEvicted);
+    return {};
+}
 
-        return {};
+// Decrements number of locks on a heap. When the number of locks becomes zero, the heap is
+// inserted into the LRU cache and becomes eligible for eviction.
+void ResidencyManager::UnlockAllocation(Pageable* pageable) {
+    if (!mResidencyManagementEnabled) {
+        return;
     }
 
-    // Any time we need to make something resident, we must check that we have enough free memory to
-    // make the new object resident while also staying within budget. If there isn't enough
-    // memory, we should evict until there is. Returns the number of bytes evicted.
-    ResultOrError<uint64_t> ResidencyManager::EnsureCanMakeResident(
-        uint64_t sizeToMakeResident,
-        MemorySegmentInfo* memorySegment) {
-        ASSERT(mResidencyManagementEnabled);
+    ASSERT(pageable->IsResidencyLocked());
+    ASSERT(!pageable->IsInResidencyLRUCache());
+    pageable->DecrementResidencyLock();
 
-        UpdateMemorySegmentInfo(memorySegment);
-
-        uint64_t memoryUsageAfterMakeResident = sizeToMakeResident + memorySegment->usage;
-
-        // Return when we can call MakeResident and remain under budget.
-        if (memoryUsageAfterMakeResident < memorySegment->budget) {
-            return 0;
-        }
-
-        std::vector<ID3D12Pageable*> resourcesToEvict;
-        uint64_t sizeNeededToBeUnderBudget = memoryUsageAfterMakeResident - memorySegment->budget;
-        uint64_t sizeEvicted = 0;
-        while (sizeEvicted < sizeNeededToBeUnderBudget) {
-            Pageable* pageable;
-            DAWN_TRY_ASSIGN(pageable, RemoveSingleEntryFromLRU(memorySegment));
-
-            // If no heap was returned, then nothing more can be evicted.
-            if (pageable == nullptr) {
-                break;
-            }
-
-            sizeEvicted += pageable->GetSize();
-            resourcesToEvict.push_back(pageable->GetD3D12Pageable());
-        }
-
-        if (resourcesToEvict.size() > 0) {
-            DAWN_TRY(CheckHRESULT(
-                mDevice->GetD3D12Device()->Evict(resourcesToEvict.size(), resourcesToEvict.data()),
-                "Evicting resident heaps to free memory"));
-        }
-
-        return sizeEvicted;
+    // If another lock still exists on the heap, nothing further should be done.
+    if (pageable->IsResidencyLocked()) {
+        return;
     }
 
-    // Given a list of heaps that are pending usage, this function will estimate memory needed,
-    // evict resources until enough space is available, then make resident any heaps scheduled for
-    // usage.
-    MaybeError ResidencyManager::EnsureHeapsAreResident(Heap** heaps, size_t heapCount) {
-        if (!mResidencyManagementEnabled) {
-            return {};
-        }
+    // When all locks have been removed, the resource remains resident and becomes tracked in
+    // the corresponding LRU.
+    TrackResidentAllocation(pageable);
+}
 
-        std::vector<ID3D12Pageable*> localHeapsToMakeResident;
-        std::vector<ID3D12Pageable*> nonLocalHeapsToMakeResident;
-        uint64_t localSizeToMakeResident = 0;
-        uint64_t nonLocalSizeToMakeResident = 0;
-
-        ExecutionSerial pendingCommandSerial = mDevice->GetPendingCommandSerial();
-        for (size_t i = 0; i < heapCount; i++) {
-            Heap* heap = heaps[i];
-
-            // Heaps that are locked resident are not tracked in the LRU cache.
-            if (heap->IsResidencyLocked()) {
-                continue;
-            }
-
-            if (heap->IsInResidencyLRUCache()) {
-                // If the heap is already in the LRU, we must remove it and append again below to
-                // update its position in the LRU.
-                heap->RemoveFromList();
-            } else {
-                if (heap->GetMemorySegment() == MemorySegment::Local) {
-                    localSizeToMakeResident += heap->GetSize();
-                    localHeapsToMakeResident.push_back(heap->GetD3D12Pageable());
-                } else {
-                    nonLocalSizeToMakeResident += heap->GetSize();
-                    nonLocalHeapsToMakeResident.push_back(heap->GetD3D12Pageable());
-                }
-            }
-
-            // If we submit a command list to the GPU, we must ensure that heaps referenced by that
-            // command list stay resident at least until that command list has finished execution.
-            // Setting this serial unnecessarily can leave the LRU in a state where nothing is
-            // eligible for eviction, even though some evictions may be possible.
-            heap->SetLastSubmission(pendingCommandSerial);
-
-            // Insert the heap into the appropriate LRU.
-            TrackResidentAllocation(heap);
-        }
-
-        if (localSizeToMakeResident > 0) {
-            return MakeAllocationsResident(&mVideoMemoryInfo.local, localSizeToMakeResident,
-                                           localHeapsToMakeResident.size(),
-                                           localHeapsToMakeResident.data());
-        }
-
-        if (nonLocalSizeToMakeResident > 0) {
+// Returns the appropriate MemorySegmentInfo for a given MemorySegment.
+ResidencyManager::MemorySegmentInfo* ResidencyManager::GetMemorySegmentInfo(
+    MemorySegment memorySegment) {
+    switch (memorySegment) {
+        case MemorySegment::Local:
+            return &mVideoMemoryInfo.local;
+        case MemorySegment::NonLocal:
             ASSERT(!mDevice->GetDeviceInfo().isUMA);
-            return MakeAllocationsResident(&mVideoMemoryInfo.nonLocal, nonLocalSizeToMakeResident,
-                                           nonLocalHeapsToMakeResident.size(),
-                                           nonLocalHeapsToMakeResident.data());
-        }
+            return &mVideoMemoryInfo.nonLocal;
+        default:
+            UNREACHABLE();
+    }
+}
 
+// Allows an application component external to Dawn to cap Dawn's residency budgets to prevent
+// competition for device memory. Returns the amount of memory reserved, which may be less
+// that the requested reservation when under pressure.
+uint64_t ResidencyManager::SetExternalMemoryReservation(MemorySegment segment,
+                                                        uint64_t requestedReservationSize) {
+    MemorySegmentInfo* segmentInfo = GetMemorySegmentInfo(segment);
+
+    segmentInfo->externalRequest = requestedReservationSize;
+
+    UpdateMemorySegmentInfo(segmentInfo);
+
+    return segmentInfo->externalReservation;
+}
+
+void ResidencyManager::UpdateVideoMemoryInfo() {
+    UpdateMemorySegmentInfo(&mVideoMemoryInfo.local);
+    if (!mDevice->GetDeviceInfo().isUMA) {
+        UpdateMemorySegmentInfo(&mVideoMemoryInfo.nonLocal);
+    }
+}
+
+void ResidencyManager::UpdateMemorySegmentInfo(MemorySegmentInfo* segmentInfo) {
+    DXGI_QUERY_VIDEO_MEMORY_INFO queryVideoMemoryInfo;
+
+    ToBackend(mDevice->GetAdapter())
+        ->GetHardwareAdapter()
+        ->QueryVideoMemoryInfo(0, segmentInfo->dxgiSegment, &queryVideoMemoryInfo);
+
+    // The video memory budget provided by QueryVideoMemoryInfo is defined by the operating
+    // system, and may be lower than expected in certain scenarios. Under memory pressure, we
+    // cap the external reservation to half the available budget, which prevents the external
+    // component from consuming a disproportionate share of memory and ensures that Dawn can
+    // continue to make forward progress. Note the choice to halve memory is arbitrarily chosen
+    // and subject to future experimentation.
+    segmentInfo->externalReservation =
+        std::min(queryVideoMemoryInfo.Budget / 2, segmentInfo->externalRequest);
+
+    segmentInfo->usage = queryVideoMemoryInfo.CurrentUsage - segmentInfo->externalReservation;
+
+    // If we're restricting the budget for testing, leave the budget as is.
+    if (mRestrictBudgetForTesting) {
+        return;
+    }
+
+    // We cap Dawn's budget to 95% of the provided budget. Leaving some budget unused
+    // decreases fluctuations in the operating-system-defined budget, which improves stability
+    // for both Dawn and other applications on the system. Note the value of 95% is arbitrarily
+    // chosen and subject to future experimentation.
+    static constexpr float kBudgetCap = 0.95;
+    segmentInfo->budget =
+        (queryVideoMemoryInfo.Budget - segmentInfo->externalReservation) * kBudgetCap;
+}
+
+// Removes a heap from the LRU and returns the least recently used heap when possible. Returns
+// nullptr when nothing further can be evicted.
+ResultOrError<Pageable*> ResidencyManager::RemoveSingleEntryFromLRU(
+    MemorySegmentInfo* memorySegment) {
+    // If the LRU is empty, return nullptr to allow execution to continue. Note that fully
+    // emptying the LRU is undesirable, because it can mean either 1) the LRU is not accurately
+    // accounting for Dawn's GPU allocations, or 2) a component external to Dawn is using all of
+    // the process budget and starving Dawn, which will cause thrash.
+    if (memorySegment->lruCache.empty()) {
+        return nullptr;
+    }
+
+    Pageable* pageable = memorySegment->lruCache.head()->value();
+
+    ExecutionSerial lastSubmissionSerial = pageable->GetLastSubmission();
+
+    // If the next candidate for eviction was inserted into the LRU during the current serial,
+    // it is because more memory is being used in a single command list than is available.
+    // In this scenario, we cannot make any more resources resident and thrashing must occur.
+    if (lastSubmissionSerial == mDevice->GetPendingCommandSerial()) {
+        return nullptr;
+    }
+
+    // We must ensure that any previous use of a resource has completed before the resource can
+    // be evicted.
+    if (lastSubmissionSerial > mDevice->GetCompletedCommandSerial()) {
+        DAWN_TRY(mDevice->WaitForSerial(lastSubmissionSerial));
+    }
+
+    pageable->RemoveFromList();
+    return pageable;
+}
+
+MaybeError ResidencyManager::EnsureCanAllocate(uint64_t allocationSize,
+                                               MemorySegment memorySegment) {
+    if (!mResidencyManagementEnabled) {
         return {};
     }
 
-    MaybeError ResidencyManager::MakeAllocationsResident(MemorySegmentInfo* segment,
-                                                         uint64_t sizeToMakeResident,
-                                                         uint64_t numberOfObjectsToMakeResident,
-                                                         ID3D12Pageable** allocations) {
-        uint64_t bytesEvicted;
-        DAWN_TRY_ASSIGN(bytesEvicted, EnsureCanMakeResident(sizeToMakeResident, segment));
-        DAWN_UNUSED(bytesEvicted);
+    uint64_t bytesEvicted;
+    DAWN_TRY_ASSIGN(bytesEvicted,
+                    EnsureCanMakeResident(allocationSize, GetMemorySegmentInfo(memorySegment)));
+    DAWN_UNUSED(bytesEvicted);
 
-        // Note that MakeResident is a synchronous function and can add a significant
-        // overhead to command recording. In the future, it may be possible to decrease this
-        // overhead by using MakeResident on a secondary thread, or by instead making use of
-        // the EnqueueMakeResident function (which is not available on all Windows 10
-        // platforms).
-        HRESULT hr =
-            mDevice->GetD3D12Device()->MakeResident(numberOfObjectsToMakeResident, allocations);
+    return {};
+}
 
-        // A MakeResident call can fail if there's not enough available memory. This
-        // could occur when there's significant fragmentation or if the allocation size
-        // estimates are incorrect. We may be able to continue execution by evicting some
-        // more memory and calling MakeResident again.
-        while (FAILED(hr)) {
-            constexpr uint32_t kAdditonalSizeToEvict = 50000000;  // 50MB
+// Any time we need to make something resident, we must check that we have enough free memory to
+// make the new object resident while also staying within budget. If there isn't enough
+// memory, we should evict until there is. Returns the number of bytes evicted.
+ResultOrError<uint64_t> ResidencyManager::EnsureCanMakeResident(uint64_t sizeToMakeResident,
+                                                                MemorySegmentInfo* memorySegment) {
+    ASSERT(mResidencyManagementEnabled);
 
-            uint64_t sizeEvicted = 0;
+    UpdateMemorySegmentInfo(memorySegment);
 
-            DAWN_TRY_ASSIGN(sizeEvicted, EnsureCanMakeResident(kAdditonalSizeToEvict, segment));
+    uint64_t memoryUsageAfterMakeResident = sizeToMakeResident + memorySegment->usage;
 
-            // If nothing can be evicted after MakeResident has failed, we cannot continue
-            // execution and must throw a fatal error.
-            if (sizeEvicted == 0) {
-                return DAWN_OUT_OF_MEMORY_ERROR(
-                    "MakeResident has failed due to excessive video memory usage.");
+    // Return when we can call MakeResident and remain under budget.
+    if (memoryUsageAfterMakeResident < memorySegment->budget) {
+        return 0;
+    }
+
+    std::vector<ID3D12Pageable*> resourcesToEvict;
+    uint64_t sizeNeededToBeUnderBudget = memoryUsageAfterMakeResident - memorySegment->budget;
+    uint64_t sizeEvicted = 0;
+    while (sizeEvicted < sizeNeededToBeUnderBudget) {
+        Pageable* pageable;
+        DAWN_TRY_ASSIGN(pageable, RemoveSingleEntryFromLRU(memorySegment));
+
+        // If no heap was returned, then nothing more can be evicted.
+        if (pageable == nullptr) {
+            break;
+        }
+
+        sizeEvicted += pageable->GetSize();
+        resourcesToEvict.push_back(pageable->GetD3D12Pageable());
+    }
+
+    if (resourcesToEvict.size() > 0) {
+        DAWN_TRY(CheckHRESULT(
+            mDevice->GetD3D12Device()->Evict(resourcesToEvict.size(), resourcesToEvict.data()),
+            "Evicting resident heaps to free memory"));
+    }
+
+    return sizeEvicted;
+}
+
+// Given a list of heaps that are pending usage, this function will estimate memory needed,
+// evict resources until enough space is available, then make resident any heaps scheduled for
+// usage.
+MaybeError ResidencyManager::EnsureHeapsAreResident(Heap** heaps, size_t heapCount) {
+    if (!mResidencyManagementEnabled) {
+        return {};
+    }
+
+    std::vector<ID3D12Pageable*> localHeapsToMakeResident;
+    std::vector<ID3D12Pageable*> nonLocalHeapsToMakeResident;
+    uint64_t localSizeToMakeResident = 0;
+    uint64_t nonLocalSizeToMakeResident = 0;
+
+    ExecutionSerial pendingCommandSerial = mDevice->GetPendingCommandSerial();
+    for (size_t i = 0; i < heapCount; i++) {
+        Heap* heap = heaps[i];
+
+        // Heaps that are locked resident are not tracked in the LRU cache.
+        if (heap->IsResidencyLocked()) {
+            continue;
+        }
+
+        if (heap->IsInResidencyLRUCache()) {
+            // If the heap is already in the LRU, we must remove it and append again below to
+            // update its position in the LRU.
+            heap->RemoveFromList();
+        } else {
+            if (heap->GetMemorySegment() == MemorySegment::Local) {
+                localSizeToMakeResident += heap->GetSize();
+                localHeapsToMakeResident.push_back(heap->GetD3D12Pageable());
+            } else {
+                nonLocalSizeToMakeResident += heap->GetSize();
+                nonLocalHeapsToMakeResident.push_back(heap->GetD3D12Pageable());
             }
-
-            hr =
-                mDevice->GetD3D12Device()->MakeResident(numberOfObjectsToMakeResident, allocations);
         }
 
-        return {};
+        // If we submit a command list to the GPU, we must ensure that heaps referenced by that
+        // command list stay resident at least until that command list has finished execution.
+        // Setting this serial unnecessarily can leave the LRU in a state where nothing is
+        // eligible for eviction, even though some evictions may be possible.
+        heap->SetLastSubmission(pendingCommandSerial);
+
+        // Insert the heap into the appropriate LRU.
+        TrackResidentAllocation(heap);
     }
 
-    // Inserts a heap at the bottom of the LRU. The passed heap must be resident or scheduled to
-    // become resident within the current serial. Failing to call this function when an allocation
-    // is implicitly made resident will cause the residency manager to view the allocation as
-    // non-resident and call MakeResident - which will make D3D12's internal residency refcount on
-    // the allocation out of sync with Dawn.
-    void ResidencyManager::TrackResidentAllocation(Pageable* pageable) {
-        if (!mResidencyManagementEnabled) {
-            return;
+    if (localSizeToMakeResident > 0) {
+        return MakeAllocationsResident(&mVideoMemoryInfo.local, localSizeToMakeResident,
+                                       localHeapsToMakeResident.size(),
+                                       localHeapsToMakeResident.data());
+    }
+
+    if (nonLocalSizeToMakeResident > 0) {
+        ASSERT(!mDevice->GetDeviceInfo().isUMA);
+        return MakeAllocationsResident(&mVideoMemoryInfo.nonLocal, nonLocalSizeToMakeResident,
+                                       nonLocalHeapsToMakeResident.size(),
+                                       nonLocalHeapsToMakeResident.data());
+    }
+
+    return {};
+}
+
+MaybeError ResidencyManager::MakeAllocationsResident(MemorySegmentInfo* segment,
+                                                     uint64_t sizeToMakeResident,
+                                                     uint64_t numberOfObjectsToMakeResident,
+                                                     ID3D12Pageable** allocations) {
+    uint64_t bytesEvicted;
+    DAWN_TRY_ASSIGN(bytesEvicted, EnsureCanMakeResident(sizeToMakeResident, segment));
+    DAWN_UNUSED(bytesEvicted);
+
+    // Note that MakeResident is a synchronous function and can add a significant
+    // overhead to command recording. In the future, it may be possible to decrease this
+    // overhead by using MakeResident on a secondary thread, or by instead making use of
+    // the EnqueueMakeResident function (which is not available on all Windows 10
+    // platforms).
+    HRESULT hr =
+        mDevice->GetD3D12Device()->MakeResident(numberOfObjectsToMakeResident, allocations);
+
+    // A MakeResident call can fail if there's not enough available memory. This
+    // could occur when there's significant fragmentation or if the allocation size
+    // estimates are incorrect. We may be able to continue execution by evicting some
+    // more memory and calling MakeResident again.
+    while (FAILED(hr)) {
+        constexpr uint32_t kAdditonalSizeToEvict = 50000000;  // 50MB
+
+        uint64_t sizeEvicted = 0;
+
+        DAWN_TRY_ASSIGN(sizeEvicted, EnsureCanMakeResident(kAdditonalSizeToEvict, segment));
+
+        // If nothing can be evicted after MakeResident has failed, we cannot continue
+        // execution and must throw a fatal error.
+        if (sizeEvicted == 0) {
+            return DAWN_OUT_OF_MEMORY_ERROR(
+                "MakeResident has failed due to excessive video memory usage.");
         }
 
-        ASSERT(pageable->IsInList() == false);
-        GetMemorySegmentInfo(pageable->GetMemorySegment())->lruCache.Append(pageable);
+        hr = mDevice->GetD3D12Device()->MakeResident(numberOfObjectsToMakeResident, allocations);
     }
 
-    // Places an artifical cap on Dawn's budget so we can test in a predictable manner. If used,
-    // this function must be called before any resources have been created.
-    void ResidencyManager::RestrictBudgetForTesting(uint64_t artificialBudgetCap) {
-        ASSERT(mVideoMemoryInfo.nonLocal.lruCache.empty());
-        ASSERT(!mRestrictBudgetForTesting);
+    return {};
+}
 
-        mRestrictBudgetForTesting = true;
-        UpdateVideoMemoryInfo();
-
-        // Dawn has a non-zero memory usage even before any resources have been created, and this
-        // value can vary depending on the environment Dawn is running in. By adding this in
-        // addition to the artificial budget cap, we can create a predictable and reproducible
-        // budget for testing.
-        mVideoMemoryInfo.local.budget = mVideoMemoryInfo.local.usage + artificialBudgetCap;
-        if (!mDevice->GetDeviceInfo().isUMA) {
-            mVideoMemoryInfo.nonLocal.budget =
-                mVideoMemoryInfo.nonLocal.usage + artificialBudgetCap;
-        }
+// Inserts a heap at the bottom of the LRU. The passed heap must be resident or scheduled to
+// become resident within the current serial. Failing to call this function when an allocation
+// is implicitly made resident will cause the residency manager to view the allocation as
+// non-resident and call MakeResident - which will make D3D12's internal residency refcount on
+// the allocation out of sync with Dawn.
+void ResidencyManager::TrackResidentAllocation(Pageable* pageable) {
+    if (!mResidencyManagementEnabled) {
+        return;
     }
 
+    ASSERT(pageable->IsInList() == false);
+    GetMemorySegmentInfo(pageable->GetMemorySegment())->lruCache.Append(pageable);
+}
+
+// Places an artifical cap on Dawn's budget so we can test in a predictable manner. If used,
+// this function must be called before any resources have been created.
+void ResidencyManager::RestrictBudgetForTesting(uint64_t artificialBudgetCap) {
+    ASSERT(mVideoMemoryInfo.nonLocal.lruCache.empty());
+    ASSERT(!mRestrictBudgetForTesting);
+
+    mRestrictBudgetForTesting = true;
+    UpdateVideoMemoryInfo();
+
+    // Dawn has a non-zero memory usage even before any resources have been created, and this
+    // value can vary depending on the environment Dawn is running in. By adding this in
+    // addition to the artificial budget cap, we can create a predictable and reproducible
+    // budget for testing.
+    mVideoMemoryInfo.local.budget = mVideoMemoryInfo.local.usage + artificialBudgetCap;
+    if (!mDevice->GetDeviceInfo().isUMA) {
+        mVideoMemoryInfo.nonLocal.budget = mVideoMemoryInfo.nonLocal.usage + artificialBudgetCap;
+    }
+}
+
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/ResidencyManagerD3D12.h b/src/dawn/native/d3d12/ResidencyManagerD3D12.h
index e425545..97ff434 100644
--- a/src/dawn/native/d3d12/ResidencyManagerD3D12.h
+++ b/src/dawn/native/d3d12/ResidencyManagerD3D12.h
@@ -24,59 +24,58 @@
 
 namespace dawn::native::d3d12 {
 
-    class Device;
-    class Heap;
-    class Pageable;
+class Device;
+class Heap;
+class Pageable;
 
-    class ResidencyManager {
-      public:
-        explicit ResidencyManager(Device* device);
+class ResidencyManager {
+  public:
+    explicit ResidencyManager(Device* device);
 
-        MaybeError LockAllocation(Pageable* pageable);
-        void UnlockAllocation(Pageable* pageable);
+    MaybeError LockAllocation(Pageable* pageable);
+    void UnlockAllocation(Pageable* pageable);
 
-        MaybeError EnsureCanAllocate(uint64_t allocationSize, MemorySegment memorySegment);
-        MaybeError EnsureHeapsAreResident(Heap** heaps, size_t heapCount);
+    MaybeError EnsureCanAllocate(uint64_t allocationSize, MemorySegment memorySegment);
+    MaybeError EnsureHeapsAreResident(Heap** heaps, size_t heapCount);
 
-        uint64_t SetExternalMemoryReservation(MemorySegment segment,
-                                              uint64_t requestedReservationSize);
+    uint64_t SetExternalMemoryReservation(MemorySegment segment, uint64_t requestedReservationSize);
 
-        void TrackResidentAllocation(Pageable* pageable);
+    void TrackResidentAllocation(Pageable* pageable);
 
-        void RestrictBudgetForTesting(uint64_t artificialBudgetCap);
+    void RestrictBudgetForTesting(uint64_t artificialBudgetCap);
 
-      private:
-        struct MemorySegmentInfo {
-            const DXGI_MEMORY_SEGMENT_GROUP dxgiSegment;
-            LinkedList<Pageable> lruCache = {};
-            uint64_t budget = 0;
-            uint64_t usage = 0;
-            uint64_t externalReservation = 0;
-            uint64_t externalRequest = 0;
-        };
-
-        struct VideoMemoryInfo {
-            MemorySegmentInfo local = {DXGI_MEMORY_SEGMENT_GROUP_LOCAL};
-            MemorySegmentInfo nonLocal = {DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL};
-        };
-
-        MemorySegmentInfo* GetMemorySegmentInfo(MemorySegment memorySegment);
-        ResultOrError<uint64_t> EnsureCanMakeResident(uint64_t allocationSize,
-                                                      MemorySegmentInfo* memorySegment);
-        ResultOrError<Pageable*> RemoveSingleEntryFromLRU(MemorySegmentInfo* memorySegment);
-        MaybeError MakeAllocationsResident(MemorySegmentInfo* segment,
-                                           uint64_t sizeToMakeResident,
-                                           uint64_t numberOfObjectsToMakeResident,
-                                           ID3D12Pageable** allocations);
-        void UpdateVideoMemoryInfo();
-        void UpdateMemorySegmentInfo(MemorySegmentInfo* segmentInfo);
-
-        Device* mDevice;
-        bool mResidencyManagementEnabled = false;
-        bool mRestrictBudgetForTesting = false;
-        VideoMemoryInfo mVideoMemoryInfo = {};
+  private:
+    struct MemorySegmentInfo {
+        const DXGI_MEMORY_SEGMENT_GROUP dxgiSegment;
+        LinkedList<Pageable> lruCache = {};
+        uint64_t budget = 0;
+        uint64_t usage = 0;
+        uint64_t externalReservation = 0;
+        uint64_t externalRequest = 0;
     };
 
+    struct VideoMemoryInfo {
+        MemorySegmentInfo local = {DXGI_MEMORY_SEGMENT_GROUP_LOCAL};
+        MemorySegmentInfo nonLocal = {DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL};
+    };
+
+    MemorySegmentInfo* GetMemorySegmentInfo(MemorySegment memorySegment);
+    ResultOrError<uint64_t> EnsureCanMakeResident(uint64_t allocationSize,
+                                                  MemorySegmentInfo* memorySegment);
+    ResultOrError<Pageable*> RemoveSingleEntryFromLRU(MemorySegmentInfo* memorySegment);
+    MaybeError MakeAllocationsResident(MemorySegmentInfo* segment,
+                                       uint64_t sizeToMakeResident,
+                                       uint64_t numberOfObjectsToMakeResident,
+                                       ID3D12Pageable** allocations);
+    void UpdateVideoMemoryInfo();
+    void UpdateMemorySegmentInfo(MemorySegmentInfo* segmentInfo);
+
+    Device* mDevice;
+    bool mResidencyManagementEnabled = false;
+    bool mRestrictBudgetForTesting = false;
+    VideoMemoryInfo mVideoMemoryInfo = {};
+};
+
 }  // namespace dawn::native::d3d12
 
 #endif  // SRC_DAWN_NATIVE_D3D12_RESIDENCYMANAGERD3D12_H_
diff --git a/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.cpp b/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.cpp
index 40d1731..dbcea82 100644
--- a/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.cpp
+++ b/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.cpp
@@ -25,396 +25,393 @@
 #include "dawn/native/d3d12/UtilsD3D12.h"
 
 namespace dawn::native::d3d12 {
-    namespace {
-        MemorySegment GetMemorySegment(Device* device, D3D12_HEAP_TYPE heapType) {
-            if (device->GetDeviceInfo().isUMA) {
-                return MemorySegment::Local;
-            }
+namespace {
+MemorySegment GetMemorySegment(Device* device, D3D12_HEAP_TYPE heapType) {
+    if (device->GetDeviceInfo().isUMA) {
+        return MemorySegment::Local;
+    }
 
-            D3D12_HEAP_PROPERTIES heapProperties =
-                device->GetD3D12Device()->GetCustomHeapProperties(0, heapType);
+    D3D12_HEAP_PROPERTIES heapProperties =
+        device->GetD3D12Device()->GetCustomHeapProperties(0, heapType);
 
-            if (heapProperties.MemoryPoolPreference == D3D12_MEMORY_POOL_L1) {
-                return MemorySegment::Local;
-            }
+    if (heapProperties.MemoryPoolPreference == D3D12_MEMORY_POOL_L1) {
+        return MemorySegment::Local;
+    }
 
-            return MemorySegment::NonLocal;
+    return MemorySegment::NonLocal;
+}
+
+D3D12_HEAP_TYPE GetD3D12HeapType(ResourceHeapKind resourceHeapKind) {
+    switch (resourceHeapKind) {
+        case Readback_OnlyBuffers:
+        case Readback_AllBuffersAndTextures:
+            return D3D12_HEAP_TYPE_READBACK;
+        case Default_AllBuffersAndTextures:
+        case Default_OnlyBuffers:
+        case Default_OnlyNonRenderableOrDepthTextures:
+        case Default_OnlyRenderableOrDepthTextures:
+            return D3D12_HEAP_TYPE_DEFAULT;
+        case Upload_OnlyBuffers:
+        case Upload_AllBuffersAndTextures:
+            return D3D12_HEAP_TYPE_UPLOAD;
+        case EnumCount:
+            UNREACHABLE();
+    }
+}
+
+D3D12_HEAP_FLAGS GetD3D12HeapFlags(ResourceHeapKind resourceHeapKind) {
+    switch (resourceHeapKind) {
+        case Default_AllBuffersAndTextures:
+        case Readback_AllBuffersAndTextures:
+        case Upload_AllBuffersAndTextures:
+            return D3D12_HEAP_FLAG_ALLOW_ALL_BUFFERS_AND_TEXTURES;
+        case Default_OnlyBuffers:
+        case Readback_OnlyBuffers:
+        case Upload_OnlyBuffers:
+            return D3D12_HEAP_FLAG_ALLOW_ONLY_BUFFERS;
+        case Default_OnlyNonRenderableOrDepthTextures:
+            return D3D12_HEAP_FLAG_ALLOW_ONLY_NON_RT_DS_TEXTURES;
+        case Default_OnlyRenderableOrDepthTextures:
+            return D3D12_HEAP_FLAG_ALLOW_ONLY_RT_DS_TEXTURES;
+        case EnumCount:
+            UNREACHABLE();
+    }
+}
+
+ResourceHeapKind GetResourceHeapKind(D3D12_RESOURCE_DIMENSION dimension,
+                                     D3D12_HEAP_TYPE heapType,
+                                     D3D12_RESOURCE_FLAGS flags,
+                                     uint32_t resourceHeapTier) {
+    if (resourceHeapTier >= 2) {
+        switch (heapType) {
+            case D3D12_HEAP_TYPE_UPLOAD:
+                return Upload_AllBuffersAndTextures;
+            case D3D12_HEAP_TYPE_DEFAULT:
+                return Default_AllBuffersAndTextures;
+            case D3D12_HEAP_TYPE_READBACK:
+                return Readback_AllBuffersAndTextures;
+            default:
+                UNREACHABLE();
         }
+    }
 
-        D3D12_HEAP_TYPE GetD3D12HeapType(ResourceHeapKind resourceHeapKind) {
-            switch (resourceHeapKind) {
-                case Readback_OnlyBuffers:
-                case Readback_AllBuffersAndTextures:
-                    return D3D12_HEAP_TYPE_READBACK;
-                case Default_AllBuffersAndTextures:
-                case Default_OnlyBuffers:
-                case Default_OnlyNonRenderableOrDepthTextures:
-                case Default_OnlyRenderableOrDepthTextures:
-                    return D3D12_HEAP_TYPE_DEFAULT;
-                case Upload_OnlyBuffers:
-                case Upload_AllBuffersAndTextures:
-                    return D3D12_HEAP_TYPE_UPLOAD;
-                case EnumCount:
-                    UNREACHABLE();
-            }
-        }
-
-        D3D12_HEAP_FLAGS GetD3D12HeapFlags(ResourceHeapKind resourceHeapKind) {
-            switch (resourceHeapKind) {
-                case Default_AllBuffersAndTextures:
-                case Readback_AllBuffersAndTextures:
-                case Upload_AllBuffersAndTextures:
-                    return D3D12_HEAP_FLAG_ALLOW_ALL_BUFFERS_AND_TEXTURES;
-                case Default_OnlyBuffers:
-                case Readback_OnlyBuffers:
-                case Upload_OnlyBuffers:
-                    return D3D12_HEAP_FLAG_ALLOW_ONLY_BUFFERS;
-                case Default_OnlyNonRenderableOrDepthTextures:
-                    return D3D12_HEAP_FLAG_ALLOW_ONLY_NON_RT_DS_TEXTURES;
-                case Default_OnlyRenderableOrDepthTextures:
-                    return D3D12_HEAP_FLAG_ALLOW_ONLY_RT_DS_TEXTURES;
-                case EnumCount:
-                    UNREACHABLE();
-            }
-        }
-
-        ResourceHeapKind GetResourceHeapKind(D3D12_RESOURCE_DIMENSION dimension,
-                                             D3D12_HEAP_TYPE heapType,
-                                             D3D12_RESOURCE_FLAGS flags,
-                                             uint32_t resourceHeapTier) {
-            if (resourceHeapTier >= 2) {
-                switch (heapType) {
-                    case D3D12_HEAP_TYPE_UPLOAD:
-                        return Upload_AllBuffersAndTextures;
-                    case D3D12_HEAP_TYPE_DEFAULT:
-                        return Default_AllBuffersAndTextures;
-                    case D3D12_HEAP_TYPE_READBACK:
-                        return Readback_AllBuffersAndTextures;
-                    default:
-                        UNREACHABLE();
-                }
-            }
-
-            switch (dimension) {
-                case D3D12_RESOURCE_DIMENSION_BUFFER: {
-                    switch (heapType) {
-                        case D3D12_HEAP_TYPE_UPLOAD:
-                            return Upload_OnlyBuffers;
-                        case D3D12_HEAP_TYPE_DEFAULT:
-                            return Default_OnlyBuffers;
-                        case D3D12_HEAP_TYPE_READBACK:
-                            return Readback_OnlyBuffers;
-                        default:
-                            UNREACHABLE();
-                    }
-                    break;
-                }
-                case D3D12_RESOURCE_DIMENSION_TEXTURE1D:
-                case D3D12_RESOURCE_DIMENSION_TEXTURE2D:
-                case D3D12_RESOURCE_DIMENSION_TEXTURE3D: {
-                    switch (heapType) {
-                        case D3D12_HEAP_TYPE_DEFAULT: {
-                            if ((flags & D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL) ||
-                                (flags & D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET)) {
-                                return Default_OnlyRenderableOrDepthTextures;
-                            }
-                            return Default_OnlyNonRenderableOrDepthTextures;
-                        }
-
-                        default:
-                            UNREACHABLE();
-                    }
-                    break;
-                }
+    switch (dimension) {
+        case D3D12_RESOURCE_DIMENSION_BUFFER: {
+            switch (heapType) {
+                case D3D12_HEAP_TYPE_UPLOAD:
+                    return Upload_OnlyBuffers;
+                case D3D12_HEAP_TYPE_DEFAULT:
+                    return Default_OnlyBuffers;
+                case D3D12_HEAP_TYPE_READBACK:
+                    return Readback_OnlyBuffers;
                 default:
                     UNREACHABLE();
             }
+            break;
         }
+        case D3D12_RESOURCE_DIMENSION_TEXTURE1D:
+        case D3D12_RESOURCE_DIMENSION_TEXTURE2D:
+        case D3D12_RESOURCE_DIMENSION_TEXTURE3D: {
+            switch (heapType) {
+                case D3D12_HEAP_TYPE_DEFAULT: {
+                    if ((flags & D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL) ||
+                        (flags & D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET)) {
+                        return Default_OnlyRenderableOrDepthTextures;
+                    }
+                    return Default_OnlyNonRenderableOrDepthTextures;
+                }
 
-        uint64_t GetResourcePlacementAlignment(ResourceHeapKind resourceHeapKind,
-                                               uint32_t sampleCount,
-                                               uint64_t requestedAlignment) {
-            switch (resourceHeapKind) {
-                // Small resources can take advantage of smaller alignments. For example,
-                // if the most detailed mip can fit under 64KB, 4KB alignments can be used.
-                // Must be non-depth or without render-target to use small resource alignment.
-                // This also applies to MSAA textures (4MB => 64KB).
-                //
-                // Note: Only known to be used for small textures; however, MSDN suggests
-                // it could be extended for more cases. If so, this could default to always
-                // attempt small resource placement.
-                // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ns-d3d12-d3d12_resource_desc
-                case Default_OnlyNonRenderableOrDepthTextures:
-                    return (sampleCount > 1) ? D3D12_SMALL_MSAA_RESOURCE_PLACEMENT_ALIGNMENT
-                                             : D3D12_SMALL_RESOURCE_PLACEMENT_ALIGNMENT;
                 default:
-                    return requestedAlignment;
+                    UNREACHABLE();
             }
+            break;
         }
+        default:
+            UNREACHABLE();
+    }
+}
 
-        bool IsClearValueOptimizable(const D3D12_RESOURCE_DESC& resourceDescriptor) {
-            // Optimized clear color cannot be set on buffers, non-render-target/depth-stencil
-            // textures, or typeless resources
-            // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12device-createcommittedresource
-            // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12device-createplacedresource
-            return !IsTypeless(resourceDescriptor.Format) &&
-                   resourceDescriptor.Dimension != D3D12_RESOURCE_DIMENSION_BUFFER &&
-                   (resourceDescriptor.Flags & (D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET |
-                                                D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL)) != 0;
-        }
+uint64_t GetResourcePlacementAlignment(ResourceHeapKind resourceHeapKind,
+                                       uint32_t sampleCount,
+                                       uint64_t requestedAlignment) {
+    switch (resourceHeapKind) {
+        // Small resources can take advantage of smaller alignments. For example,
+        // if the most detailed mip can fit under 64KB, 4KB alignments can be used.
+        // Must be non-depth or without render-target to use small resource alignment.
+        // This also applies to MSAA textures (4MB => 64KB).
+        //
+        // Note: Only known to be used for small textures; however, MSDN suggests
+        // it could be extended for more cases. If so, this could default to always
+        // attempt small resource placement.
+        // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ns-d3d12-d3d12_resource_desc
+        case Default_OnlyNonRenderableOrDepthTextures:
+            return (sampleCount > 1) ? D3D12_SMALL_MSAA_RESOURCE_PLACEMENT_ALIGNMENT
+                                     : D3D12_SMALL_RESOURCE_PLACEMENT_ALIGNMENT;
+        default:
+            return requestedAlignment;
+    }
+}
 
-    }  // namespace
+bool IsClearValueOptimizable(const D3D12_RESOURCE_DESC& resourceDescriptor) {
+    // Optimized clear color cannot be set on buffers, non-render-target/depth-stencil
+    // textures, or typeless resources
+    // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12device-createcommittedresource
+    // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12device-createplacedresource
+    return !IsTypeless(resourceDescriptor.Format) &&
+           resourceDescriptor.Dimension != D3D12_RESOURCE_DIMENSION_BUFFER &&
+           (resourceDescriptor.Flags & (D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET |
+                                        D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL)) != 0;
+}
 
-    ResourceAllocatorManager::ResourceAllocatorManager(Device* device) : mDevice(device) {
-        mResourceHeapTier = (mDevice->IsToggleEnabled(Toggle::UseD3D12ResourceHeapTier2))
-                                ? mDevice->GetDeviceInfo().resourceHeapTier
-                                : 1;
+}  // namespace
 
-        for (uint32_t i = 0; i < ResourceHeapKind::EnumCount; i++) {
-            const ResourceHeapKind resourceHeapKind = static_cast<ResourceHeapKind>(i);
-            mHeapAllocators[i] = std::make_unique<HeapAllocator>(
-                mDevice, GetD3D12HeapType(resourceHeapKind), GetD3D12HeapFlags(resourceHeapKind),
-                GetMemorySegment(device, GetD3D12HeapType(resourceHeapKind)));
-            mPooledHeapAllocators[i] =
-                std::make_unique<PooledResourceMemoryAllocator>(mHeapAllocators[i].get());
-            mSubAllocatedResourceAllocators[i] = std::make_unique<BuddyMemoryAllocator>(
-                kMaxHeapSize, kMinHeapSize, mPooledHeapAllocators[i].get());
+ResourceAllocatorManager::ResourceAllocatorManager(Device* device) : mDevice(device) {
+    mResourceHeapTier = (mDevice->IsToggleEnabled(Toggle::UseD3D12ResourceHeapTier2))
+                            ? mDevice->GetDeviceInfo().resourceHeapTier
+                            : 1;
+
+    for (uint32_t i = 0; i < ResourceHeapKind::EnumCount; i++) {
+        const ResourceHeapKind resourceHeapKind = static_cast<ResourceHeapKind>(i);
+        mHeapAllocators[i] = std::make_unique<HeapAllocator>(
+            mDevice, GetD3D12HeapType(resourceHeapKind), GetD3D12HeapFlags(resourceHeapKind),
+            GetMemorySegment(device, GetD3D12HeapType(resourceHeapKind)));
+        mPooledHeapAllocators[i] =
+            std::make_unique<PooledResourceMemoryAllocator>(mHeapAllocators[i].get());
+        mSubAllocatedResourceAllocators[i] = std::make_unique<BuddyMemoryAllocator>(
+            kMaxHeapSize, kMinHeapSize, mPooledHeapAllocators[i].get());
+    }
+}
+
+ResultOrError<ResourceHeapAllocation> ResourceAllocatorManager::AllocateMemory(
+    D3D12_HEAP_TYPE heapType,
+    const D3D12_RESOURCE_DESC& resourceDescriptor,
+    D3D12_RESOURCE_STATES initialUsage) {
+    // In order to suppress a warning in the D3D12 debug layer, we need to specify an
+    // optimized clear value. As there are no negative consequences when picking a mismatched
+    // clear value, we use zero as the optimized clear value. This also enables fast clears on
+    // some architectures.
+    D3D12_CLEAR_VALUE zero{};
+    D3D12_CLEAR_VALUE* optimizedClearValue = nullptr;
+    if (IsClearValueOptimizable(resourceDescriptor)) {
+        zero.Format = resourceDescriptor.Format;
+        optimizedClearValue = &zero;
+    }
+
+    // TODO(crbug.com/dawn/849): Conditionally disable sub-allocation.
+    // For very large resources, there is no benefit to suballocate.
+    // For very small resources, it is inefficent to suballocate given the min. heap
+    // size could be much larger then the resource allocation.
+    // Attempt to satisfy the request using sub-allocation (placed resource in a heap).
+    if (!mDevice->IsToggleEnabled(Toggle::DisableResourceSuballocation)) {
+        ResourceHeapAllocation subAllocation;
+        DAWN_TRY_ASSIGN(subAllocation, CreatePlacedResource(heapType, resourceDescriptor,
+                                                            optimizedClearValue, initialUsage));
+        if (subAllocation.GetInfo().mMethod != AllocationMethod::kInvalid) {
+            return std::move(subAllocation);
         }
     }
 
-    ResultOrError<ResourceHeapAllocation> ResourceAllocatorManager::AllocateMemory(
-        D3D12_HEAP_TYPE heapType,
-        const D3D12_RESOURCE_DESC& resourceDescriptor,
-        D3D12_RESOURCE_STATES initialUsage) {
-        // In order to suppress a warning in the D3D12 debug layer, we need to specify an
-        // optimized clear value. As there are no negative consequences when picking a mismatched
-        // clear value, we use zero as the optimized clear value. This also enables fast clears on
-        // some architectures.
-        D3D12_CLEAR_VALUE zero{};
-        D3D12_CLEAR_VALUE* optimizedClearValue = nullptr;
-        if (IsClearValueOptimizable(resourceDescriptor)) {
-            zero.Format = resourceDescriptor.Format;
-            optimizedClearValue = &zero;
-        }
-
-        // TODO(crbug.com/dawn/849): Conditionally disable sub-allocation.
-        // For very large resources, there is no benefit to suballocate.
-        // For very small resources, it is inefficent to suballocate given the min. heap
-        // size could be much larger then the resource allocation.
-        // Attempt to satisfy the request using sub-allocation (placed resource in a heap).
-        if (!mDevice->IsToggleEnabled(Toggle::DisableResourceSuballocation)) {
-            ResourceHeapAllocation subAllocation;
-            DAWN_TRY_ASSIGN(subAllocation, CreatePlacedResource(heapType, resourceDescriptor,
-                                                                optimizedClearValue, initialUsage));
-            if (subAllocation.GetInfo().mMethod != AllocationMethod::kInvalid) {
-                return std::move(subAllocation);
-            }
-        }
-
-        // If sub-allocation fails, fall-back to direct allocation (committed resource).
-        ResourceHeapAllocation directAllocation;
-        DAWN_TRY_ASSIGN(directAllocation,
-                        CreateCommittedResource(heapType, resourceDescriptor, optimizedClearValue,
-                                                initialUsage));
-        if (directAllocation.GetInfo().mMethod != AllocationMethod::kInvalid) {
-            return std::move(directAllocation);
-        }
-
-        // If direct allocation fails, the system is probably out of memory.
-        return DAWN_OUT_OF_MEMORY_ERROR("Allocation failed");
+    // If sub-allocation fails, fall-back to direct allocation (committed resource).
+    ResourceHeapAllocation directAllocation;
+    DAWN_TRY_ASSIGN(directAllocation, CreateCommittedResource(heapType, resourceDescriptor,
+                                                              optimizedClearValue, initialUsage));
+    if (directAllocation.GetInfo().mMethod != AllocationMethod::kInvalid) {
+        return std::move(directAllocation);
     }
 
-    void ResourceAllocatorManager::Tick(ExecutionSerial completedSerial) {
-        for (ResourceHeapAllocation& allocation :
-             mAllocationsToDelete.IterateUpTo(completedSerial)) {
-            if (allocation.GetInfo().mMethod == AllocationMethod::kSubAllocated) {
-                FreeMemory(allocation);
-            }
+    // If direct allocation fails, the system is probably out of memory.
+    return DAWN_OUT_OF_MEMORY_ERROR("Allocation failed");
+}
+
+void ResourceAllocatorManager::Tick(ExecutionSerial completedSerial) {
+    for (ResourceHeapAllocation& allocation : mAllocationsToDelete.IterateUpTo(completedSerial)) {
+        if (allocation.GetInfo().mMethod == AllocationMethod::kSubAllocated) {
+            FreeMemory(allocation);
         }
-        mAllocationsToDelete.ClearUpTo(completedSerial);
-        mHeapsToDelete.ClearUpTo(completedSerial);
+    }
+    mAllocationsToDelete.ClearUpTo(completedSerial);
+    mHeapsToDelete.ClearUpTo(completedSerial);
+}
+
+void ResourceAllocatorManager::DeallocateMemory(ResourceHeapAllocation& allocation) {
+    if (allocation.GetInfo().mMethod == AllocationMethod::kInvalid) {
+        return;
     }
 
-    void ResourceAllocatorManager::DeallocateMemory(ResourceHeapAllocation& allocation) {
-        if (allocation.GetInfo().mMethod == AllocationMethod::kInvalid) {
-            return;
-        }
+    mAllocationsToDelete.Enqueue(allocation, mDevice->GetPendingCommandSerial());
 
-        mAllocationsToDelete.Enqueue(allocation, mDevice->GetPendingCommandSerial());
-
-        // Directly allocated ResourceHeapAllocations are created with a heap object that must be
-        // manually deleted upon deallocation. See ResourceAllocatorManager::CreateCommittedResource
-        // for more information. Acquire this heap as a unique_ptr and add it to the queue of heaps
-        // to delete. It cannot be deleted immediately because it may be in use by in-flight or
-        // pending commands.
-        if (allocation.GetInfo().mMethod == AllocationMethod::kDirect) {
-            mHeapsToDelete.Enqueue(std::unique_ptr<ResourceHeapBase>(allocation.GetResourceHeap()),
-                                   mDevice->GetPendingCommandSerial());
-        }
-
-        // Invalidate the allocation immediately in case one accidentally
-        // calls DeallocateMemory again using the same allocation.
-        allocation.Invalidate();
-
-        ASSERT(allocation.GetD3D12Resource() == nullptr);
+    // Directly allocated ResourceHeapAllocations are created with a heap object that must be
+    // manually deleted upon deallocation. See ResourceAllocatorManager::CreateCommittedResource
+    // for more information. Acquire this heap as a unique_ptr and add it to the queue of heaps
+    // to delete. It cannot be deleted immediately because it may be in use by in-flight or
+    // pending commands.
+    if (allocation.GetInfo().mMethod == AllocationMethod::kDirect) {
+        mHeapsToDelete.Enqueue(std::unique_ptr<ResourceHeapBase>(allocation.GetResourceHeap()),
+                               mDevice->GetPendingCommandSerial());
     }
 
-    void ResourceAllocatorManager::FreeMemory(ResourceHeapAllocation& allocation) {
-        ASSERT(allocation.GetInfo().mMethod == AllocationMethod::kSubAllocated);
+    // Invalidate the allocation immediately in case one accidentally
+    // calls DeallocateMemory again using the same allocation.
+    allocation.Invalidate();
 
-        D3D12_HEAP_PROPERTIES heapProp;
-        allocation.GetD3D12Resource()->GetHeapProperties(&heapProp, nullptr);
+    ASSERT(allocation.GetD3D12Resource() == nullptr);
+}
 
-        const D3D12_RESOURCE_DESC resourceDescriptor = allocation.GetD3D12Resource()->GetDesc();
+void ResourceAllocatorManager::FreeMemory(ResourceHeapAllocation& allocation) {
+    ASSERT(allocation.GetInfo().mMethod == AllocationMethod::kSubAllocated);
 
-        const size_t resourceHeapKindIndex =
-            GetResourceHeapKind(resourceDescriptor.Dimension, heapProp.Type,
-                                resourceDescriptor.Flags, mResourceHeapTier);
+    D3D12_HEAP_PROPERTIES heapProp;
+    allocation.GetD3D12Resource()->GetHeapProperties(&heapProp, nullptr);
 
-        mSubAllocatedResourceAllocators[resourceHeapKindIndex]->Deallocate(allocation);
-    }
+    const D3D12_RESOURCE_DESC resourceDescriptor = allocation.GetD3D12Resource()->GetDesc();
 
-    ResultOrError<ResourceHeapAllocation> ResourceAllocatorManager::CreatePlacedResource(
-        D3D12_HEAP_TYPE heapType,
-        const D3D12_RESOURCE_DESC& requestedResourceDescriptor,
-        const D3D12_CLEAR_VALUE* optimizedClearValue,
-        D3D12_RESOURCE_STATES initialUsage) {
-        const ResourceHeapKind resourceHeapKind =
-            GetResourceHeapKind(requestedResourceDescriptor.Dimension, heapType,
-                                requestedResourceDescriptor.Flags, mResourceHeapTier);
+    const size_t resourceHeapKindIndex = GetResourceHeapKind(
+        resourceDescriptor.Dimension, heapProp.Type, resourceDescriptor.Flags, mResourceHeapTier);
 
-        D3D12_RESOURCE_DESC resourceDescriptor = requestedResourceDescriptor;
-        resourceDescriptor.Alignment = GetResourcePlacementAlignment(
-            resourceHeapKind, requestedResourceDescriptor.SampleDesc.Count,
-            requestedResourceDescriptor.Alignment);
+    mSubAllocatedResourceAllocators[resourceHeapKindIndex]->Deallocate(allocation);
+}
 
-        // TODO(bryan.bernhart): Figure out how to compute the alignment without calling this
-        // twice.
-        D3D12_RESOURCE_ALLOCATION_INFO resourceInfo =
+ResultOrError<ResourceHeapAllocation> ResourceAllocatorManager::CreatePlacedResource(
+    D3D12_HEAP_TYPE heapType,
+    const D3D12_RESOURCE_DESC& requestedResourceDescriptor,
+    const D3D12_CLEAR_VALUE* optimizedClearValue,
+    D3D12_RESOURCE_STATES initialUsage) {
+    const ResourceHeapKind resourceHeapKind =
+        GetResourceHeapKind(requestedResourceDescriptor.Dimension, heapType,
+                            requestedResourceDescriptor.Flags, mResourceHeapTier);
+
+    D3D12_RESOURCE_DESC resourceDescriptor = requestedResourceDescriptor;
+    resourceDescriptor.Alignment = GetResourcePlacementAlignment(
+        resourceHeapKind, requestedResourceDescriptor.SampleDesc.Count,
+        requestedResourceDescriptor.Alignment);
+
+    // TODO(bryan.bernhart): Figure out how to compute the alignment without calling this
+    // twice.
+    D3D12_RESOURCE_ALLOCATION_INFO resourceInfo =
+        mDevice->GetD3D12Device()->GetResourceAllocationInfo(0, 1, &resourceDescriptor);
+
+    // If the requested resource alignment was rejected, let D3D tell us what the
+    // required alignment is for this resource.
+    if (resourceDescriptor.Alignment != 0 &&
+        resourceDescriptor.Alignment != resourceInfo.Alignment) {
+        resourceDescriptor.Alignment = 0;
+        resourceInfo =
             mDevice->GetD3D12Device()->GetResourceAllocationInfo(0, 1, &resourceDescriptor);
-
-        // If the requested resource alignment was rejected, let D3D tell us what the
-        // required alignment is for this resource.
-        if (resourceDescriptor.Alignment != 0 &&
-            resourceDescriptor.Alignment != resourceInfo.Alignment) {
-            resourceDescriptor.Alignment = 0;
-            resourceInfo =
-                mDevice->GetD3D12Device()->GetResourceAllocationInfo(0, 1, &resourceDescriptor);
-        }
-
-        // If d3d tells us the resource size is invalid, treat the error as OOM.
-        // Otherwise, creating the resource could cause a device loss (too large).
-        // This is because NextPowerOfTwo(UINT64_MAX) overflows and proceeds to
-        // incorrectly allocate a mismatched size.
-        if (resourceInfo.SizeInBytes == 0 ||
-            resourceInfo.SizeInBytes == std::numeric_limits<uint64_t>::max()) {
-            return DAWN_OUT_OF_MEMORY_ERROR(absl::StrFormat(
-                "Resource allocation size (%u) was invalid.", resourceInfo.SizeInBytes));
-        }
-
-        BuddyMemoryAllocator* allocator =
-            mSubAllocatedResourceAllocators[static_cast<size_t>(resourceHeapKind)].get();
-
-        ResourceMemoryAllocation allocation;
-        DAWN_TRY_ASSIGN(allocation,
-                        allocator->Allocate(resourceInfo.SizeInBytes, resourceInfo.Alignment));
-        if (allocation.GetInfo().mMethod == AllocationMethod::kInvalid) {
-            return ResourceHeapAllocation{};  // invalid
-        }
-
-        Heap* heap = ToBackend(allocation.GetResourceHeap());
-
-        // Before calling CreatePlacedResource, we must ensure the target heap is resident.
-        // CreatePlacedResource will fail if it is not.
-        DAWN_TRY(mDevice->GetResidencyManager()->LockAllocation(heap));
-
-        // With placed resources, a single heap can be reused.
-        // The resource placed at an offset is only reclaimed
-        // upon Tick or after the last command list using the resource has completed
-        // on the GPU. This means the same physical memory is not reused
-        // within the same command-list and does not require additional synchronization (aliasing
-        // barrier).
-        // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12device-createplacedresource
-        ComPtr<ID3D12Resource> placedResource;
-        DAWN_TRY(CheckOutOfMemoryHRESULT(
-            mDevice->GetD3D12Device()->CreatePlacedResource(
-                heap->GetD3D12Heap(), allocation.GetOffset(), &resourceDescriptor, initialUsage,
-                optimizedClearValue, IID_PPV_ARGS(&placedResource)),
-            "ID3D12Device::CreatePlacedResource"));
-
-        // After CreatePlacedResource has finished, the heap can be unlocked from residency. This
-        // will insert it into the residency LRU.
-        mDevice->GetResidencyManager()->UnlockAllocation(heap);
-
-        return ResourceHeapAllocation{allocation.GetInfo(), allocation.GetOffset(),
-                                      std::move(placedResource), heap};
     }
 
-    ResultOrError<ResourceHeapAllocation> ResourceAllocatorManager::CreateCommittedResource(
-        D3D12_HEAP_TYPE heapType,
-        const D3D12_RESOURCE_DESC& resourceDescriptor,
-        const D3D12_CLEAR_VALUE* optimizedClearValue,
-        D3D12_RESOURCE_STATES initialUsage) {
-        D3D12_HEAP_PROPERTIES heapProperties;
-        heapProperties.Type = heapType;
-        heapProperties.CPUPageProperty = D3D12_CPU_PAGE_PROPERTY_UNKNOWN;
-        heapProperties.MemoryPoolPreference = D3D12_MEMORY_POOL_UNKNOWN;
-        heapProperties.CreationNodeMask = 0;
-        heapProperties.VisibleNodeMask = 0;
-
-        // If d3d tells us the resource size is invalid, treat the error as OOM.
-        // Otherwise, creating the resource could cause a device loss (too large).
-        // This is because NextPowerOfTwo(UINT64_MAX) overflows and proceeds to
-        // incorrectly allocate a mismatched size.
-        D3D12_RESOURCE_ALLOCATION_INFO resourceInfo =
-            mDevice->GetD3D12Device()->GetResourceAllocationInfo(0, 1, &resourceDescriptor);
-        if (resourceInfo.SizeInBytes == 0 ||
-            resourceInfo.SizeInBytes == std::numeric_limits<uint64_t>::max()) {
-            return DAWN_OUT_OF_MEMORY_ERROR("Resource allocation size was invalid.");
-        }
-
-        if (resourceInfo.SizeInBytes > kMaxHeapSize) {
-            return ResourceHeapAllocation{};  // Invalid
-        }
-
-        // CreateCommittedResource will implicitly make the created resource resident. We must
-        // ensure enough free memory exists before allocating to avoid an out-of-memory error when
-        // overcommitted.
-        DAWN_TRY(mDevice->GetResidencyManager()->EnsureCanAllocate(
-            resourceInfo.SizeInBytes, GetMemorySegment(mDevice, heapType)));
-
-        // Note: Heap flags are inferred by the resource descriptor and do not need to be explicitly
-        // provided to CreateCommittedResource.
-        ComPtr<ID3D12Resource> committedResource;
-        DAWN_TRY(CheckOutOfMemoryHRESULT(
-            mDevice->GetD3D12Device()->CreateCommittedResource(
-                &heapProperties, D3D12_HEAP_FLAG_NONE, &resourceDescriptor, initialUsage,
-                optimizedClearValue, IID_PPV_ARGS(&committedResource)),
-            "ID3D12Device::CreateCommittedResource"));
-
-        // When using CreateCommittedResource, D3D12 creates an implicit heap that contains the
-        // resource allocation. Because Dawn's memory residency management occurs at the resource
-        // heap granularity, every directly allocated ResourceHeapAllocation also stores a Heap
-        // object. This object is created manually, and must be deleted manually upon deallocation
-        // of the committed resource.
-        Heap* heap = new Heap(committedResource, GetMemorySegment(mDevice, heapType),
-                              resourceInfo.SizeInBytes);
-
-        // Calling CreateCommittedResource implicitly calls MakeResident on the resource. We must
-        // track this to avoid calling MakeResident a second time.
-        mDevice->GetResidencyManager()->TrackResidentAllocation(heap);
-
-        AllocationInfo info;
-        info.mMethod = AllocationMethod::kDirect;
-
-        return ResourceHeapAllocation{info,
-                                      /*offset*/ 0, std::move(committedResource), heap};
+    // If d3d tells us the resource size is invalid, treat the error as OOM.
+    // Otherwise, creating the resource could cause a device loss (too large).
+    // This is because NextPowerOfTwo(UINT64_MAX) overflows and proceeds to
+    // incorrectly allocate a mismatched size.
+    if (resourceInfo.SizeInBytes == 0 ||
+        resourceInfo.SizeInBytes == std::numeric_limits<uint64_t>::max()) {
+        return DAWN_OUT_OF_MEMORY_ERROR(absl::StrFormat(
+            "Resource allocation size (%u) was invalid.", resourceInfo.SizeInBytes));
     }
 
-    void ResourceAllocatorManager::DestroyPool() {
-        for (auto& alloc : mPooledHeapAllocators) {
-            alloc->DestroyPool();
-        }
+    BuddyMemoryAllocator* allocator =
+        mSubAllocatedResourceAllocators[static_cast<size_t>(resourceHeapKind)].get();
+
+    ResourceMemoryAllocation allocation;
+    DAWN_TRY_ASSIGN(allocation,
+                    allocator->Allocate(resourceInfo.SizeInBytes, resourceInfo.Alignment));
+    if (allocation.GetInfo().mMethod == AllocationMethod::kInvalid) {
+        return ResourceHeapAllocation{};  // invalid
     }
 
+    Heap* heap = ToBackend(allocation.GetResourceHeap());
+
+    // Before calling CreatePlacedResource, we must ensure the target heap is resident.
+    // CreatePlacedResource will fail if it is not.
+    DAWN_TRY(mDevice->GetResidencyManager()->LockAllocation(heap));
+
+    // With placed resources, a single heap can be reused.
+    // The resource placed at an offset is only reclaimed
+    // upon Tick or after the last command list using the resource has completed
+    // on the GPU. This means the same physical memory is not reused
+    // within the same command-list and does not require additional synchronization (aliasing
+    // barrier).
+    // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12device-createplacedresource
+    ComPtr<ID3D12Resource> placedResource;
+    DAWN_TRY(CheckOutOfMemoryHRESULT(
+        mDevice->GetD3D12Device()->CreatePlacedResource(
+            heap->GetD3D12Heap(), allocation.GetOffset(), &resourceDescriptor, initialUsage,
+            optimizedClearValue, IID_PPV_ARGS(&placedResource)),
+        "ID3D12Device::CreatePlacedResource"));
+
+    // After CreatePlacedResource has finished, the heap can be unlocked from residency. This
+    // will insert it into the residency LRU.
+    mDevice->GetResidencyManager()->UnlockAllocation(heap);
+
+    return ResourceHeapAllocation{allocation.GetInfo(), allocation.GetOffset(),
+                                  std::move(placedResource), heap};
+}
+
+ResultOrError<ResourceHeapAllocation> ResourceAllocatorManager::CreateCommittedResource(
+    D3D12_HEAP_TYPE heapType,
+    const D3D12_RESOURCE_DESC& resourceDescriptor,
+    const D3D12_CLEAR_VALUE* optimizedClearValue,
+    D3D12_RESOURCE_STATES initialUsage) {
+    D3D12_HEAP_PROPERTIES heapProperties;
+    heapProperties.Type = heapType;
+    heapProperties.CPUPageProperty = D3D12_CPU_PAGE_PROPERTY_UNKNOWN;
+    heapProperties.MemoryPoolPreference = D3D12_MEMORY_POOL_UNKNOWN;
+    heapProperties.CreationNodeMask = 0;
+    heapProperties.VisibleNodeMask = 0;
+
+    // If d3d tells us the resource size is invalid, treat the error as OOM.
+    // Otherwise, creating the resource could cause a device loss (too large).
+    // This is because NextPowerOfTwo(UINT64_MAX) overflows and proceeds to
+    // incorrectly allocate a mismatched size.
+    D3D12_RESOURCE_ALLOCATION_INFO resourceInfo =
+        mDevice->GetD3D12Device()->GetResourceAllocationInfo(0, 1, &resourceDescriptor);
+    if (resourceInfo.SizeInBytes == 0 ||
+        resourceInfo.SizeInBytes == std::numeric_limits<uint64_t>::max()) {
+        return DAWN_OUT_OF_MEMORY_ERROR("Resource allocation size was invalid.");
+    }
+
+    if (resourceInfo.SizeInBytes > kMaxHeapSize) {
+        return ResourceHeapAllocation{};  // Invalid
+    }
+
+    // CreateCommittedResource will implicitly make the created resource resident. We must
+    // ensure enough free memory exists before allocating to avoid an out-of-memory error when
+    // overcommitted.
+    DAWN_TRY(mDevice->GetResidencyManager()->EnsureCanAllocate(
+        resourceInfo.SizeInBytes, GetMemorySegment(mDevice, heapType)));
+
+    // Note: Heap flags are inferred by the resource descriptor and do not need to be explicitly
+    // provided to CreateCommittedResource.
+    ComPtr<ID3D12Resource> committedResource;
+    DAWN_TRY(CheckOutOfMemoryHRESULT(
+        mDevice->GetD3D12Device()->CreateCommittedResource(
+            &heapProperties, D3D12_HEAP_FLAG_NONE, &resourceDescriptor, initialUsage,
+            optimizedClearValue, IID_PPV_ARGS(&committedResource)),
+        "ID3D12Device::CreateCommittedResource"));
+
+    // When using CreateCommittedResource, D3D12 creates an implicit heap that contains the
+    // resource allocation. Because Dawn's memory residency management occurs at the resource
+    // heap granularity, every directly allocated ResourceHeapAllocation also stores a Heap
+    // object. This object is created manually, and must be deleted manually upon deallocation
+    // of the committed resource.
+    Heap* heap =
+        new Heap(committedResource, GetMemorySegment(mDevice, heapType), resourceInfo.SizeInBytes);
+
+    // Calling CreateCommittedResource implicitly calls MakeResident on the resource. We must
+    // track this to avoid calling MakeResident a second time.
+    mDevice->GetResidencyManager()->TrackResidentAllocation(heap);
+
+    AllocationInfo info;
+    info.mMethod = AllocationMethod::kDirect;
+
+    return ResourceHeapAllocation{info,
+                                  /*offset*/ 0, std::move(committedResource), heap};
+}
+
+void ResourceAllocatorManager::DestroyPool() {
+    for (auto& alloc : mPooledHeapAllocators) {
+        alloc->DestroyPool();
+    }
+}
+
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.h b/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.h
index 0bde8fc..5b6dfd8 100644
--- a/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.h
+++ b/src/dawn/native/d3d12/ResourceAllocatorManagerD3D12.h
@@ -27,81 +27,81 @@
 
 namespace dawn::native::d3d12 {
 
-    class Device;
+class Device;
 
-    // Resource heap types + flags combinations are named after the D3D constants.
-    // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ne-d3d12-d3d12_heap_flags
-    // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ne-d3d12-d3d12_heap_type
-    enum ResourceHeapKind {
-        // Resource heap tier 2
-        // Allows resource heaps to contain all buffer and textures types.
-        // This enables better heap re-use by avoiding the need for separate heaps and
-        // also reduces fragmentation.
-        Readback_AllBuffersAndTextures,
-        Upload_AllBuffersAndTextures,
-        Default_AllBuffersAndTextures,
+// Resource heap types + flags combinations are named after the D3D constants.
+// https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ne-d3d12-d3d12_heap_flags
+// https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ne-d3d12-d3d12_heap_type
+enum ResourceHeapKind {
+    // Resource heap tier 2
+    // Allows resource heaps to contain all buffer and textures types.
+    // This enables better heap re-use by avoiding the need for separate heaps and
+    // also reduces fragmentation.
+    Readback_AllBuffersAndTextures,
+    Upload_AllBuffersAndTextures,
+    Default_AllBuffersAndTextures,
 
-        // Resource heap tier 1
-        // Resource heaps only support types from a single resource category.
-        Readback_OnlyBuffers,
-        Upload_OnlyBuffers,
-        Default_OnlyBuffers,
+    // Resource heap tier 1
+    // Resource heaps only support types from a single resource category.
+    Readback_OnlyBuffers,
+    Upload_OnlyBuffers,
+    Default_OnlyBuffers,
 
-        Default_OnlyNonRenderableOrDepthTextures,
-        Default_OnlyRenderableOrDepthTextures,
+    Default_OnlyNonRenderableOrDepthTextures,
+    Default_OnlyRenderableOrDepthTextures,
 
-        EnumCount,
-        InvalidEnum = EnumCount,
-    };
+    EnumCount,
+    InvalidEnum = EnumCount,
+};
 
-    // Manages a list of resource allocators used by the device to create resources using
-    // multiple allocation methods.
-    class ResourceAllocatorManager {
-      public:
-        explicit ResourceAllocatorManager(Device* device);
+// Manages a list of resource allocators used by the device to create resources using
+// multiple allocation methods.
+class ResourceAllocatorManager {
+  public:
+    explicit ResourceAllocatorManager(Device* device);
 
-        ResultOrError<ResourceHeapAllocation> AllocateMemory(
-            D3D12_HEAP_TYPE heapType,
-            const D3D12_RESOURCE_DESC& resourceDescriptor,
-            D3D12_RESOURCE_STATES initialUsage);
+    ResultOrError<ResourceHeapAllocation> AllocateMemory(
+        D3D12_HEAP_TYPE heapType,
+        const D3D12_RESOURCE_DESC& resourceDescriptor,
+        D3D12_RESOURCE_STATES initialUsage);
 
-        void DeallocateMemory(ResourceHeapAllocation& allocation);
+    void DeallocateMemory(ResourceHeapAllocation& allocation);
 
-        void Tick(ExecutionSerial lastCompletedSerial);
+    void Tick(ExecutionSerial lastCompletedSerial);
 
-        void DestroyPool();
+    void DestroyPool();
 
-      private:
-        void FreeMemory(ResourceHeapAllocation& allocation);
+  private:
+    void FreeMemory(ResourceHeapAllocation& allocation);
 
-        ResultOrError<ResourceHeapAllocation> CreatePlacedResource(
-            D3D12_HEAP_TYPE heapType,
-            const D3D12_RESOURCE_DESC& requestedResourceDescriptor,
-            const D3D12_CLEAR_VALUE* optimizedClearValue,
-            D3D12_RESOURCE_STATES initialUsage);
+    ResultOrError<ResourceHeapAllocation> CreatePlacedResource(
+        D3D12_HEAP_TYPE heapType,
+        const D3D12_RESOURCE_DESC& requestedResourceDescriptor,
+        const D3D12_CLEAR_VALUE* optimizedClearValue,
+        D3D12_RESOURCE_STATES initialUsage);
 
-        ResultOrError<ResourceHeapAllocation> CreateCommittedResource(
-            D3D12_HEAP_TYPE heapType,
-            const D3D12_RESOURCE_DESC& resourceDescriptor,
-            const D3D12_CLEAR_VALUE* optimizedClearValue,
-            D3D12_RESOURCE_STATES initialUsage);
+    ResultOrError<ResourceHeapAllocation> CreateCommittedResource(
+        D3D12_HEAP_TYPE heapType,
+        const D3D12_RESOURCE_DESC& resourceDescriptor,
+        const D3D12_CLEAR_VALUE* optimizedClearValue,
+        D3D12_RESOURCE_STATES initialUsage);
 
-        Device* mDevice;
-        uint32_t mResourceHeapTier;
+    Device* mDevice;
+    uint32_t mResourceHeapTier;
 
-        static constexpr uint64_t kMaxHeapSize = 32ll * 1024ll * 1024ll * 1024ll;  // 32GB
-        static constexpr uint64_t kMinHeapSize = 4ll * 1024ll * 1024ll;            // 4MB
+    static constexpr uint64_t kMaxHeapSize = 32ll * 1024ll * 1024ll * 1024ll;  // 32GB
+    static constexpr uint64_t kMinHeapSize = 4ll * 1024ll * 1024ll;            // 4MB
 
-        std::array<std::unique_ptr<BuddyMemoryAllocator>, ResourceHeapKind::EnumCount>
-            mSubAllocatedResourceAllocators;
-        std::array<std::unique_ptr<HeapAllocator>, ResourceHeapKind::EnumCount> mHeapAllocators;
+    std::array<std::unique_ptr<BuddyMemoryAllocator>, ResourceHeapKind::EnumCount>
+        mSubAllocatedResourceAllocators;
+    std::array<std::unique_ptr<HeapAllocator>, ResourceHeapKind::EnumCount> mHeapAllocators;
 
-        std::array<std::unique_ptr<PooledResourceMemoryAllocator>, ResourceHeapKind::EnumCount>
-            mPooledHeapAllocators;
+    std::array<std::unique_ptr<PooledResourceMemoryAllocator>, ResourceHeapKind::EnumCount>
+        mPooledHeapAllocators;
 
-        SerialQueue<ExecutionSerial, ResourceHeapAllocation> mAllocationsToDelete;
-        SerialQueue<ExecutionSerial, std::unique_ptr<ResourceHeapBase>> mHeapsToDelete;
-    };
+    SerialQueue<ExecutionSerial, ResourceHeapAllocation> mAllocationsToDelete;
+    SerialQueue<ExecutionSerial, std::unique_ptr<ResourceHeapBase>> mHeapsToDelete;
+};
 
 }  // namespace dawn::native::d3d12
 
diff --git a/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.cpp b/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.cpp
index 296ce77..862f87b 100644
--- a/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.cpp
+++ b/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.cpp
@@ -20,24 +20,24 @@
 #include "dawn/native/d3d12/HeapD3D12.h"
 
 namespace dawn::native::d3d12 {
-    ResourceHeapAllocation::ResourceHeapAllocation(const AllocationInfo& info,
-                                                   uint64_t offset,
-                                                   ComPtr<ID3D12Resource> resource,
-                                                   Heap* heap)
-        : ResourceMemoryAllocation(info, offset, heap), mResource(std::move(resource)) {
-        ASSERT((info.mMethod == AllocationMethod::kExternal) == (heap == nullptr));
-    }
+ResourceHeapAllocation::ResourceHeapAllocation(const AllocationInfo& info,
+                                               uint64_t offset,
+                                               ComPtr<ID3D12Resource> resource,
+                                               Heap* heap)
+    : ResourceMemoryAllocation(info, offset, heap), mResource(std::move(resource)) {
+    ASSERT((info.mMethod == AllocationMethod::kExternal) == (heap == nullptr));
+}
 
-    void ResourceHeapAllocation::Invalidate() {
-        ResourceMemoryAllocation::Invalidate();
-        mResource.Reset();
-    }
+void ResourceHeapAllocation::Invalidate() {
+    ResourceMemoryAllocation::Invalidate();
+    mResource.Reset();
+}
 
-    ID3D12Resource* ResourceHeapAllocation::GetD3D12Resource() const {
-        return mResource.Get();
-    }
+ID3D12Resource* ResourceHeapAllocation::GetD3D12Resource() const {
+    return mResource.Get();
+}
 
-    D3D12_GPU_VIRTUAL_ADDRESS ResourceHeapAllocation::GetGPUPointer() const {
-        return mResource->GetGPUVirtualAddress();
-    }
+D3D12_GPU_VIRTUAL_ADDRESS ResourceHeapAllocation::GetGPUPointer() const {
+    return mResource->GetGPUVirtualAddress();
+}
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.h b/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.h
index 7cd765c..9215199 100644
--- a/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.h
+++ b/src/dawn/native/d3d12/ResourceHeapAllocationD3D12.h
@@ -21,27 +21,27 @@
 
 namespace dawn::native::d3d12 {
 
-    class Heap;
+class Heap;
 
-    class ResourceHeapAllocation : public ResourceMemoryAllocation {
-      public:
-        ResourceHeapAllocation() = default;
-        ResourceHeapAllocation(const AllocationInfo& info,
-                               uint64_t offset,
-                               ComPtr<ID3D12Resource> resource,
-                               Heap* heap);
-        ~ResourceHeapAllocation() override = default;
-        ResourceHeapAllocation(const ResourceHeapAllocation&) = default;
-        ResourceHeapAllocation& operator=(const ResourceHeapAllocation&) = default;
+class ResourceHeapAllocation : public ResourceMemoryAllocation {
+  public:
+    ResourceHeapAllocation() = default;
+    ResourceHeapAllocation(const AllocationInfo& info,
+                           uint64_t offset,
+                           ComPtr<ID3D12Resource> resource,
+                           Heap* heap);
+    ~ResourceHeapAllocation() override = default;
+    ResourceHeapAllocation(const ResourceHeapAllocation&) = default;
+    ResourceHeapAllocation& operator=(const ResourceHeapAllocation&) = default;
 
-        void Invalidate() override;
+    void Invalidate() override;
 
-        ID3D12Resource* GetD3D12Resource() const;
-        D3D12_GPU_VIRTUAL_ADDRESS GetGPUPointer() const;
+    ID3D12Resource* GetD3D12Resource() const;
+    D3D12_GPU_VIRTUAL_ADDRESS GetGPUPointer() const;
 
-      private:
-        ComPtr<ID3D12Resource> mResource;
-    };
+  private:
+    ComPtr<ID3D12Resource> mResource;
+};
 
 }  // namespace dawn::native::d3d12
 
diff --git a/src/dawn/native/d3d12/SamplerD3D12.cpp b/src/dawn/native/d3d12/SamplerD3D12.cpp
index d0c6bbc..e9e4be6 100644
--- a/src/dawn/native/d3d12/SamplerD3D12.cpp
+++ b/src/dawn/native/d3d12/SamplerD3D12.cpp
@@ -21,88 +21,87 @@
 
 namespace dawn::native::d3d12 {
 
-    namespace {
-        D3D12_TEXTURE_ADDRESS_MODE AddressMode(wgpu::AddressMode mode) {
-            switch (mode) {
-                case wgpu::AddressMode::Repeat:
-                    return D3D12_TEXTURE_ADDRESS_MODE_WRAP;
-                case wgpu::AddressMode::MirrorRepeat:
-                    return D3D12_TEXTURE_ADDRESS_MODE_MIRROR;
-                case wgpu::AddressMode::ClampToEdge:
-                    return D3D12_TEXTURE_ADDRESS_MODE_CLAMP;
-            }
-        }
-    }  // namespace
+namespace {
+D3D12_TEXTURE_ADDRESS_MODE AddressMode(wgpu::AddressMode mode) {
+    switch (mode) {
+        case wgpu::AddressMode::Repeat:
+            return D3D12_TEXTURE_ADDRESS_MODE_WRAP;
+        case wgpu::AddressMode::MirrorRepeat:
+            return D3D12_TEXTURE_ADDRESS_MODE_MIRROR;
+        case wgpu::AddressMode::ClampToEdge:
+            return D3D12_TEXTURE_ADDRESS_MODE_CLAMP;
+    }
+}
+}  // namespace
 
-    // static
-    Ref<Sampler> Sampler::Create(Device* device, const SamplerDescriptor* descriptor) {
-        return AcquireRef(new Sampler(device, descriptor));
+// static
+Ref<Sampler> Sampler::Create(Device* device, const SamplerDescriptor* descriptor) {
+    return AcquireRef(new Sampler(device, descriptor));
+}
+
+Sampler::Sampler(Device* device, const SamplerDescriptor* descriptor)
+    : SamplerBase(device, descriptor) {
+    D3D12_FILTER_TYPE minFilter;
+    switch (descriptor->minFilter) {
+        case wgpu::FilterMode::Nearest:
+            minFilter = D3D12_FILTER_TYPE_POINT;
+            break;
+        case wgpu::FilterMode::Linear:
+            minFilter = D3D12_FILTER_TYPE_LINEAR;
+            break;
     }
 
-    Sampler::Sampler(Device* device, const SamplerDescriptor* descriptor)
-        : SamplerBase(device, descriptor) {
-        D3D12_FILTER_TYPE minFilter;
-        switch (descriptor->minFilter) {
-            case wgpu::FilterMode::Nearest:
-                minFilter = D3D12_FILTER_TYPE_POINT;
-                break;
-            case wgpu::FilterMode::Linear:
-                minFilter = D3D12_FILTER_TYPE_LINEAR;
-                break;
-        }
-
-        D3D12_FILTER_TYPE magFilter;
-        switch (descriptor->magFilter) {
-            case wgpu::FilterMode::Nearest:
-                magFilter = D3D12_FILTER_TYPE_POINT;
-                break;
-            case wgpu::FilterMode::Linear:
-                magFilter = D3D12_FILTER_TYPE_LINEAR;
-                break;
-        }
-
-        D3D12_FILTER_TYPE mipmapFilter;
-        switch (descriptor->mipmapFilter) {
-            case wgpu::FilterMode::Nearest:
-                mipmapFilter = D3D12_FILTER_TYPE_POINT;
-                break;
-            case wgpu::FilterMode::Linear:
-                mipmapFilter = D3D12_FILTER_TYPE_LINEAR;
-                break;
-        }
-
-        D3D12_FILTER_REDUCTION_TYPE reduction =
-            descriptor->compare == wgpu::CompareFunction::Undefined
-                ? D3D12_FILTER_REDUCTION_TYPE_STANDARD
-                : D3D12_FILTER_REDUCTION_TYPE_COMPARISON;
-
-        // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ns-d3d12-d3d12_sampler_desc
-        mSamplerDesc.MaxAnisotropy = std::min<uint16_t>(GetMaxAnisotropy(), 16u);
-
-        if (mSamplerDesc.MaxAnisotropy > 1) {
-            mSamplerDesc.Filter = D3D12_ENCODE_ANISOTROPIC_FILTER(reduction);
-        } else {
-            mSamplerDesc.Filter =
-                D3D12_ENCODE_BASIC_FILTER(minFilter, magFilter, mipmapFilter, reduction);
-        }
-
-        mSamplerDesc.AddressU = AddressMode(descriptor->addressModeU);
-        mSamplerDesc.AddressV = AddressMode(descriptor->addressModeV);
-        mSamplerDesc.AddressW = AddressMode(descriptor->addressModeW);
-        mSamplerDesc.MipLODBias = 0.f;
-
-        if (descriptor->compare != wgpu::CompareFunction::Undefined) {
-            mSamplerDesc.ComparisonFunc = ToD3D12ComparisonFunc(descriptor->compare);
-        } else {
-            // Still set the function so it's not garbage.
-            mSamplerDesc.ComparisonFunc = D3D12_COMPARISON_FUNC_NEVER;
-        }
-        mSamplerDesc.MinLOD = descriptor->lodMinClamp;
-        mSamplerDesc.MaxLOD = descriptor->lodMaxClamp;
+    D3D12_FILTER_TYPE magFilter;
+    switch (descriptor->magFilter) {
+        case wgpu::FilterMode::Nearest:
+            magFilter = D3D12_FILTER_TYPE_POINT;
+            break;
+        case wgpu::FilterMode::Linear:
+            magFilter = D3D12_FILTER_TYPE_LINEAR;
+            break;
     }
 
-    const D3D12_SAMPLER_DESC& Sampler::GetSamplerDescriptor() const {
-        return mSamplerDesc;
+    D3D12_FILTER_TYPE mipmapFilter;
+    switch (descriptor->mipmapFilter) {
+        case wgpu::FilterMode::Nearest:
+            mipmapFilter = D3D12_FILTER_TYPE_POINT;
+            break;
+        case wgpu::FilterMode::Linear:
+            mipmapFilter = D3D12_FILTER_TYPE_LINEAR;
+            break;
     }
 
+    D3D12_FILTER_REDUCTION_TYPE reduction = descriptor->compare == wgpu::CompareFunction::Undefined
+                                                ? D3D12_FILTER_REDUCTION_TYPE_STANDARD
+                                                : D3D12_FILTER_REDUCTION_TYPE_COMPARISON;
+
+    // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ns-d3d12-d3d12_sampler_desc
+    mSamplerDesc.MaxAnisotropy = std::min<uint16_t>(GetMaxAnisotropy(), 16u);
+
+    if (mSamplerDesc.MaxAnisotropy > 1) {
+        mSamplerDesc.Filter = D3D12_ENCODE_ANISOTROPIC_FILTER(reduction);
+    } else {
+        mSamplerDesc.Filter =
+            D3D12_ENCODE_BASIC_FILTER(minFilter, magFilter, mipmapFilter, reduction);
+    }
+
+    mSamplerDesc.AddressU = AddressMode(descriptor->addressModeU);
+    mSamplerDesc.AddressV = AddressMode(descriptor->addressModeV);
+    mSamplerDesc.AddressW = AddressMode(descriptor->addressModeW);
+    mSamplerDesc.MipLODBias = 0.f;
+
+    if (descriptor->compare != wgpu::CompareFunction::Undefined) {
+        mSamplerDesc.ComparisonFunc = ToD3D12ComparisonFunc(descriptor->compare);
+    } else {
+        // Still set the function so it's not garbage.
+        mSamplerDesc.ComparisonFunc = D3D12_COMPARISON_FUNC_NEVER;
+    }
+    mSamplerDesc.MinLOD = descriptor->lodMinClamp;
+    mSamplerDesc.MaxLOD = descriptor->lodMaxClamp;
+}
+
+const D3D12_SAMPLER_DESC& Sampler::GetSamplerDescriptor() const {
+    return mSamplerDesc;
+}
+
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/SamplerD3D12.h b/src/dawn/native/d3d12/SamplerD3D12.h
index c1013c1..530720c 100644
--- a/src/dawn/native/d3d12/SamplerD3D12.h
+++ b/src/dawn/native/d3d12/SamplerD3D12.h
@@ -21,19 +21,19 @@
 
 namespace dawn::native::d3d12 {
 
-    class Device;
+class Device;
 
-    class Sampler final : public SamplerBase {
-      public:
-        static Ref<Sampler> Create(Device* device, const SamplerDescriptor* descriptor);
+class Sampler final : public SamplerBase {
+  public:
+    static Ref<Sampler> Create(Device* device, const SamplerDescriptor* descriptor);
 
-        const D3D12_SAMPLER_DESC& GetSamplerDescriptor() const;
+    const D3D12_SAMPLER_DESC& GetSamplerDescriptor() const;
 
-      private:
-        Sampler(Device* device, const SamplerDescriptor* descriptor);
-        ~Sampler() override = default;
-        D3D12_SAMPLER_DESC mSamplerDesc = {};
-    };
+  private:
+    Sampler(Device* device, const SamplerDescriptor* descriptor);
+    ~Sampler() override = default;
+    D3D12_SAMPLER_DESC mSamplerDesc = {};
+};
 
 }  // namespace dawn::native::d3d12
 
diff --git a/src/dawn/native/d3d12/SamplerHeapCacheD3D12.cpp b/src/dawn/native/d3d12/SamplerHeapCacheD3D12.cpp
index 62e455d..f1d12e3 100644
--- a/src/dawn/native/d3d12/SamplerHeapCacheD3D12.cpp
+++ b/src/dawn/native/d3d12/SamplerHeapCacheD3D12.cpp
@@ -28,141 +28,137 @@
 
 namespace dawn::native::d3d12 {
 
-    SamplerHeapCacheEntry::SamplerHeapCacheEntry(std::vector<Sampler*> samplers)
-        : mSamplers(std::move(samplers)) {
+SamplerHeapCacheEntry::SamplerHeapCacheEntry(std::vector<Sampler*> samplers)
+    : mSamplers(std::move(samplers)) {}
+
+SamplerHeapCacheEntry::SamplerHeapCacheEntry(SamplerHeapCache* cache,
+                                             StagingDescriptorAllocator* allocator,
+                                             std::vector<Sampler*> samplers,
+                                             CPUDescriptorHeapAllocation allocation)
+    : mCPUAllocation(std::move(allocation)),
+      mSamplers(std::move(samplers)),
+      mAllocator(allocator),
+      mCache(cache) {
+    ASSERT(mCache != nullptr);
+    ASSERT(mCPUAllocation.IsValid());
+    ASSERT(!mSamplers.empty());
+}
+
+std::vector<Sampler*>&& SamplerHeapCacheEntry::AcquireSamplers() {
+    return std::move(mSamplers);
+}
+
+SamplerHeapCacheEntry::~SamplerHeapCacheEntry() {
+    // If this is a blueprint then the CPU allocation cannot exist and has no entry to remove.
+    if (mCPUAllocation.IsValid()) {
+        mCache->RemoveCacheEntry(this);
+        mAllocator->Deallocate(&mCPUAllocation);
     }
 
-    SamplerHeapCacheEntry::SamplerHeapCacheEntry(SamplerHeapCache* cache,
-                                                 StagingDescriptorAllocator* allocator,
-                                                 std::vector<Sampler*> samplers,
-                                                 CPUDescriptorHeapAllocation allocation)
-        : mCPUAllocation(std::move(allocation)),
-          mSamplers(std::move(samplers)),
-          mAllocator(allocator),
-          mCache(cache) {
-        ASSERT(mCache != nullptr);
-        ASSERT(mCPUAllocation.IsValid());
-        ASSERT(!mSamplers.empty());
-    }
+    ASSERT(!mCPUAllocation.IsValid());
+}
 
-    std::vector<Sampler*>&& SamplerHeapCacheEntry::AcquireSamplers() {
-        return std::move(mSamplers);
-    }
-
-    SamplerHeapCacheEntry::~SamplerHeapCacheEntry() {
-        // If this is a blueprint then the CPU allocation cannot exist and has no entry to remove.
-        if (mCPUAllocation.IsValid()) {
-            mCache->RemoveCacheEntry(this);
-            mAllocator->Deallocate(&mCPUAllocation);
-        }
-
-        ASSERT(!mCPUAllocation.IsValid());
-    }
-
-    bool SamplerHeapCacheEntry::Populate(Device* device,
-                                         ShaderVisibleDescriptorAllocator* allocator) {
-        if (allocator->IsAllocationStillValid(mGPUAllocation)) {
-            return true;
-        }
-
-        ASSERT(!mSamplers.empty());
-
-        // Attempt to allocate descriptors for the currently bound shader-visible heaps.
-        // If either failed, return early to re-allocate and switch the heaps.
-        const uint32_t descriptorCount = mSamplers.size();
-        D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptor;
-        if (!allocator->AllocateGPUDescriptors(descriptorCount, device->GetPendingCommandSerial(),
-                                               &baseCPUDescriptor, &mGPUAllocation)) {
-            return false;
-        }
-
-        // CPU bindgroups are sparsely allocated across CPU heaps. Instead of doing
-        // simple copies per bindgroup, a single non-simple copy could be issued.
-        // TODO(dawn:155): Consider doing this optimization.
-        device->GetD3D12Device()->CopyDescriptorsSimple(descriptorCount, baseCPUDescriptor,
-                                                        mCPUAllocation.GetBaseDescriptor(),
-                                                        D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
-
+bool SamplerHeapCacheEntry::Populate(Device* device, ShaderVisibleDescriptorAllocator* allocator) {
+    if (allocator->IsAllocationStillValid(mGPUAllocation)) {
         return true;
     }
 
-    D3D12_GPU_DESCRIPTOR_HANDLE SamplerHeapCacheEntry::GetBaseDescriptor() const {
-        return mGPUAllocation.GetBaseDescriptor();
+    ASSERT(!mSamplers.empty());
+
+    // Attempt to allocate descriptors for the currently bound shader-visible heaps.
+    // If either failed, return early to re-allocate and switch the heaps.
+    const uint32_t descriptorCount = mSamplers.size();
+    D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptor;
+    if (!allocator->AllocateGPUDescriptors(descriptorCount, device->GetPendingCommandSerial(),
+                                           &baseCPUDescriptor, &mGPUAllocation)) {
+        return false;
     }
 
-    ResultOrError<Ref<SamplerHeapCacheEntry>> SamplerHeapCache::GetOrCreate(
-        const BindGroup* group,
-        StagingDescriptorAllocator* samplerAllocator) {
-        const BindGroupLayout* bgl = ToBackend(group->GetLayout());
+    // CPU bindgroups are sparsely allocated across CPU heaps. Instead of doing
+    // simple copies per bindgroup, a single non-simple copy could be issued.
+    // TODO(dawn:155): Consider doing this optimization.
+    device->GetD3D12Device()->CopyDescriptorsSimple(descriptorCount, baseCPUDescriptor,
+                                                    mCPUAllocation.GetBaseDescriptor(),
+                                                    D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
 
-        // If a previously created bindgroup used the same samplers, the backing sampler heap
-        // allocation can be reused. The packed list of samplers acts as the key to lookup the
-        // allocation in a cache.
-        // TODO(dawn:155): Avoid re-allocating the vector each lookup.
-        std::vector<Sampler*> samplers;
-        samplers.reserve(bgl->GetSamplerDescriptorCount());
+    return true;
+}
 
-        for (BindingIndex bindingIndex = bgl->GetDynamicBufferCount();
-             bindingIndex < bgl->GetBindingCount(); ++bindingIndex) {
-            const BindingInfo& bindingInfo = bgl->GetBindingInfo(bindingIndex);
-            if (bindingInfo.bindingType == BindingInfoType::Sampler) {
-                samplers.push_back(ToBackend(group->GetBindingAsSampler(bindingIndex)));
-            }
+D3D12_GPU_DESCRIPTOR_HANDLE SamplerHeapCacheEntry::GetBaseDescriptor() const {
+    return mGPUAllocation.GetBaseDescriptor();
+}
+
+ResultOrError<Ref<SamplerHeapCacheEntry>> SamplerHeapCache::GetOrCreate(
+    const BindGroup* group,
+    StagingDescriptorAllocator* samplerAllocator) {
+    const BindGroupLayout* bgl = ToBackend(group->GetLayout());
+
+    // If a previously created bindgroup used the same samplers, the backing sampler heap
+    // allocation can be reused. The packed list of samplers acts as the key to lookup the
+    // allocation in a cache.
+    // TODO(dawn:155): Avoid re-allocating the vector each lookup.
+    std::vector<Sampler*> samplers;
+    samplers.reserve(bgl->GetSamplerDescriptorCount());
+
+    for (BindingIndex bindingIndex = bgl->GetDynamicBufferCount();
+         bindingIndex < bgl->GetBindingCount(); ++bindingIndex) {
+        const BindingInfo& bindingInfo = bgl->GetBindingInfo(bindingIndex);
+        if (bindingInfo.bindingType == BindingInfoType::Sampler) {
+            samplers.push_back(ToBackend(group->GetBindingAsSampler(bindingIndex)));
         }
-
-        // Check the cache if there exists a sampler heap allocation that corresponds to the
-        // samplers.
-        SamplerHeapCacheEntry blueprint(std::move(samplers));
-        auto iter = mCache.find(&blueprint);
-        if (iter != mCache.end()) {
-            return Ref<SamplerHeapCacheEntry>(*iter);
-        }
-
-        // Steal the sampler vector back from the blueprint to avoid creating a new copy for the
-        // real entry below.
-        samplers = std::move(blueprint.AcquireSamplers());
-
-        CPUDescriptorHeapAllocation allocation;
-        DAWN_TRY_ASSIGN(allocation, samplerAllocator->AllocateCPUDescriptors());
-
-        const uint32_t samplerSizeIncrement = samplerAllocator->GetSizeIncrement();
-        ID3D12Device* d3d12Device = mDevice->GetD3D12Device();
-
-        for (uint32_t i = 0; i < samplers.size(); ++i) {
-            const auto& samplerDesc = samplers[i]->GetSamplerDescriptor();
-            d3d12Device->CreateSampler(&samplerDesc,
-                                       allocation.OffsetFrom(samplerSizeIncrement, i));
-        }
-
-        Ref<SamplerHeapCacheEntry> entry = AcquireRef(new SamplerHeapCacheEntry(
-            this, samplerAllocator, std::move(samplers), std::move(allocation)));
-        mCache.insert(entry.Get());
-        return std::move(entry);
     }
 
-    SamplerHeapCache::SamplerHeapCache(Device* device) : mDevice(device) {
+    // Check the cache if there exists a sampler heap allocation that corresponds to the
+    // samplers.
+    SamplerHeapCacheEntry blueprint(std::move(samplers));
+    auto iter = mCache.find(&blueprint);
+    if (iter != mCache.end()) {
+        return Ref<SamplerHeapCacheEntry>(*iter);
     }
 
-    SamplerHeapCache::~SamplerHeapCache() {
-        ASSERT(mCache.empty());
+    // Steal the sampler vector back from the blueprint to avoid creating a new copy for the
+    // real entry below.
+    samplers = std::move(blueprint.AcquireSamplers());
+
+    CPUDescriptorHeapAllocation allocation;
+    DAWN_TRY_ASSIGN(allocation, samplerAllocator->AllocateCPUDescriptors());
+
+    const uint32_t samplerSizeIncrement = samplerAllocator->GetSizeIncrement();
+    ID3D12Device* d3d12Device = mDevice->GetD3D12Device();
+
+    for (uint32_t i = 0; i < samplers.size(); ++i) {
+        const auto& samplerDesc = samplers[i]->GetSamplerDescriptor();
+        d3d12Device->CreateSampler(&samplerDesc, allocation.OffsetFrom(samplerSizeIncrement, i));
     }
 
-    void SamplerHeapCache::RemoveCacheEntry(SamplerHeapCacheEntry* entry) {
-        ASSERT(entry->GetRefCountForTesting() == 0);
-        size_t removedCount = mCache.erase(entry);
-        ASSERT(removedCount == 1);
-    }
+    Ref<SamplerHeapCacheEntry> entry = AcquireRef(new SamplerHeapCacheEntry(
+        this, samplerAllocator, std::move(samplers), std::move(allocation)));
+    mCache.insert(entry.Get());
+    return std::move(entry);
+}
 
-    size_t SamplerHeapCacheEntry::HashFunc::operator()(const SamplerHeapCacheEntry* entry) const {
-        size_t hash = 0;
-        for (const Sampler* sampler : entry->mSamplers) {
-            HashCombine(&hash, sampler);
-        }
-        return hash;
-    }
+SamplerHeapCache::SamplerHeapCache(Device* device) : mDevice(device) {}
 
-    bool SamplerHeapCacheEntry::EqualityFunc::operator()(const SamplerHeapCacheEntry* a,
-                                                         const SamplerHeapCacheEntry* b) const {
-        return a->mSamplers == b->mSamplers;
+SamplerHeapCache::~SamplerHeapCache() {
+    ASSERT(mCache.empty());
+}
+
+void SamplerHeapCache::RemoveCacheEntry(SamplerHeapCacheEntry* entry) {
+    ASSERT(entry->GetRefCountForTesting() == 0);
+    size_t removedCount = mCache.erase(entry);
+    ASSERT(removedCount == 1);
+}
+
+size_t SamplerHeapCacheEntry::HashFunc::operator()(const SamplerHeapCacheEntry* entry) const {
+    size_t hash = 0;
+    for (const Sampler* sampler : entry->mSamplers) {
+        HashCombine(&hash, sampler);
     }
+    return hash;
+}
+
+bool SamplerHeapCacheEntry::EqualityFunc::operator()(const SamplerHeapCacheEntry* a,
+                                                     const SamplerHeapCacheEntry* b) const {
+    return a->mSamplers == b->mSamplers;
+}
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/SamplerHeapCacheD3D12.h b/src/dawn/native/d3d12/SamplerHeapCacheD3D12.h
index f58a970..e701957 100644
--- a/src/dawn/native/d3d12/SamplerHeapCacheD3D12.h
+++ b/src/dawn/native/d3d12/SamplerHeapCacheD3D12.h
@@ -35,74 +35,74 @@
 // and switches incur expensive pipeline flushes.
 namespace dawn::native::d3d12 {
 
-    class BindGroup;
-    class Device;
-    class Sampler;
-    class SamplerHeapCache;
-    class StagingDescriptorAllocator;
-    class ShaderVisibleDescriptorAllocator;
+class BindGroup;
+class Device;
+class Sampler;
+class SamplerHeapCache;
+class StagingDescriptorAllocator;
+class ShaderVisibleDescriptorAllocator;
 
-    // Wraps sampler descriptor heap allocations in a cache.
-    class SamplerHeapCacheEntry : public RefCounted {
-      public:
-        SamplerHeapCacheEntry() = default;
-        explicit SamplerHeapCacheEntry(std::vector<Sampler*> samplers);
-        SamplerHeapCacheEntry(SamplerHeapCache* cache,
-                              StagingDescriptorAllocator* allocator,
-                              std::vector<Sampler*> samplers,
-                              CPUDescriptorHeapAllocation allocation);
-        ~SamplerHeapCacheEntry() override;
+// Wraps sampler descriptor heap allocations in a cache.
+class SamplerHeapCacheEntry : public RefCounted {
+  public:
+    SamplerHeapCacheEntry() = default;
+    explicit SamplerHeapCacheEntry(std::vector<Sampler*> samplers);
+    SamplerHeapCacheEntry(SamplerHeapCache* cache,
+                          StagingDescriptorAllocator* allocator,
+                          std::vector<Sampler*> samplers,
+                          CPUDescriptorHeapAllocation allocation);
+    ~SamplerHeapCacheEntry() override;
 
-        D3D12_GPU_DESCRIPTOR_HANDLE GetBaseDescriptor() const;
+    D3D12_GPU_DESCRIPTOR_HANDLE GetBaseDescriptor() const;
 
-        std::vector<Sampler*>&& AcquireSamplers();
+    std::vector<Sampler*>&& AcquireSamplers();
 
-        bool Populate(Device* device, ShaderVisibleDescriptorAllocator* allocator);
+    bool Populate(Device* device, ShaderVisibleDescriptorAllocator* allocator);
 
-        // Functors necessary for the unordered_map<SamplerHeapCacheEntry*>-based cache.
-        struct HashFunc {
-            size_t operator()(const SamplerHeapCacheEntry* entry) const;
-        };
-
-        struct EqualityFunc {
-            bool operator()(const SamplerHeapCacheEntry* a, const SamplerHeapCacheEntry* b) const;
-        };
-
-      private:
-        CPUDescriptorHeapAllocation mCPUAllocation;
-        GPUDescriptorHeapAllocation mGPUAllocation;
-
-        // Storing raw pointer because the sampler object will be already hashed
-        // by the device and will already be unique.
-        std::vector<Sampler*> mSamplers;
-
-        StagingDescriptorAllocator* mAllocator = nullptr;
-        SamplerHeapCache* mCache = nullptr;
+    // Functors necessary for the unordered_map<SamplerHeapCacheEntry*>-based cache.
+    struct HashFunc {
+        size_t operator()(const SamplerHeapCacheEntry* entry) const;
     };
 
-    // Cache descriptor heap allocations so that we don't create duplicate ones for every
-    // BindGroup.
-    class SamplerHeapCache {
-      public:
-        explicit SamplerHeapCache(Device* device);
-        ~SamplerHeapCache();
-
-        ResultOrError<Ref<SamplerHeapCacheEntry>> GetOrCreate(
-            const BindGroup* group,
-            StagingDescriptorAllocator* samplerAllocator);
-
-        void RemoveCacheEntry(SamplerHeapCacheEntry* entry);
-
-      private:
-        Device* mDevice;
-
-        using Cache = std::unordered_set<SamplerHeapCacheEntry*,
-                                         SamplerHeapCacheEntry::HashFunc,
-                                         SamplerHeapCacheEntry::EqualityFunc>;
-
-        Cache mCache;
+    struct EqualityFunc {
+        bool operator()(const SamplerHeapCacheEntry* a, const SamplerHeapCacheEntry* b) const;
     };
 
+  private:
+    CPUDescriptorHeapAllocation mCPUAllocation;
+    GPUDescriptorHeapAllocation mGPUAllocation;
+
+    // Storing raw pointer because the sampler object will be already hashed
+    // by the device and will already be unique.
+    std::vector<Sampler*> mSamplers;
+
+    StagingDescriptorAllocator* mAllocator = nullptr;
+    SamplerHeapCache* mCache = nullptr;
+};
+
+// Cache descriptor heap allocations so that we don't create duplicate ones for every
+// BindGroup.
+class SamplerHeapCache {
+  public:
+    explicit SamplerHeapCache(Device* device);
+    ~SamplerHeapCache();
+
+    ResultOrError<Ref<SamplerHeapCacheEntry>> GetOrCreate(
+        const BindGroup* group,
+        StagingDescriptorAllocator* samplerAllocator);
+
+    void RemoveCacheEntry(SamplerHeapCacheEntry* entry);
+
+  private:
+    Device* mDevice;
+
+    using Cache = std::unordered_set<SamplerHeapCacheEntry*,
+                                     SamplerHeapCacheEntry::HashFunc,
+                                     SamplerHeapCacheEntry::EqualityFunc>;
+
+    Cache mCache;
+};
+
 }  // namespace dawn::native::d3d12
 
 #endif  // SRC_DAWN_NATIVE_D3D12_SAMPLERHEAPCACHED3D12_H_
diff --git a/src/dawn/native/d3d12/ShaderModuleD3D12.cpp b/src/dawn/native/d3d12/ShaderModuleD3D12.cpp
index cffb38d..efb6487 100644
--- a/src/dawn/native/d3d12/ShaderModuleD3D12.cpp
+++ b/src/dawn/native/d3d12/ShaderModuleD3D12.cpp
@@ -44,787 +44,774 @@
 
 namespace dawn::native::d3d12 {
 
-    namespace {
-        ResultOrError<uint64_t> GetDXCompilerVersion(ComPtr<IDxcValidator> dxcValidator) {
-            ComPtr<IDxcVersionInfo> versionInfo;
-            DAWN_TRY(CheckHRESULT(dxcValidator.As(&versionInfo),
-                                  "D3D12 QueryInterface IDxcValidator to IDxcVersionInfo"));
+namespace {
+ResultOrError<uint64_t> GetDXCompilerVersion(ComPtr<IDxcValidator> dxcValidator) {
+    ComPtr<IDxcVersionInfo> versionInfo;
+    DAWN_TRY(CheckHRESULT(dxcValidator.As(&versionInfo),
+                          "D3D12 QueryInterface IDxcValidator to IDxcVersionInfo"));
 
-            uint32_t compilerMajor, compilerMinor;
-            DAWN_TRY(CheckHRESULT(versionInfo->GetVersion(&compilerMajor, &compilerMinor),
-                                  "IDxcVersionInfo::GetVersion"));
+    uint32_t compilerMajor, compilerMinor;
+    DAWN_TRY(CheckHRESULT(versionInfo->GetVersion(&compilerMajor, &compilerMinor),
+                          "IDxcVersionInfo::GetVersion"));
 
-            // Pack both into a single version number.
-            return (uint64_t(compilerMajor) << uint64_t(32)) + compilerMinor;
+    // Pack both into a single version number.
+    return (uint64_t(compilerMajor) << uint64_t(32)) + compilerMinor;
+}
+
+uint64_t GetD3DCompilerVersion() {
+    return D3D_COMPILER_VERSION;
+}
+
+struct CompareBindingPoint {
+    constexpr bool operator()(const tint::transform::BindingPoint& lhs,
+                              const tint::transform::BindingPoint& rhs) const {
+        if (lhs.group != rhs.group) {
+            return lhs.group < rhs.group;
+        } else {
+            return lhs.binding < rhs.binding;
+        }
+    }
+};
+
+void Serialize(std::stringstream& output, const tint::ast::Access& access) {
+    output << access;
+}
+
+void Serialize(std::stringstream& output, const tint::transform::BindingPoint& binding_point) {
+    output << "(BindingPoint";
+    output << " group=" << binding_point.group;
+    output << " binding=" << binding_point.binding;
+    output << ")";
+}
+
+template <typename T, typename = typename std::enable_if<std::is_fundamental<T>::value>::type>
+void Serialize(std::stringstream& output, const T& val) {
+    output << val;
+}
+
+template <typename T>
+void Serialize(std::stringstream& output,
+               const std::unordered_map<tint::transform::BindingPoint, T>& map) {
+    output << "(map";
+
+    std::map<tint::transform::BindingPoint, T, CompareBindingPoint> sorted(map.begin(), map.end());
+    for (auto& [bindingPoint, value] : sorted) {
+        output << " ";
+        Serialize(output, bindingPoint);
+        output << "=";
+        Serialize(output, value);
+    }
+    output << ")";
+}
+
+void Serialize(std::stringstream& output,
+               const tint::writer::ArrayLengthFromUniformOptions& arrayLengthFromUniform) {
+    output << "(ArrayLengthFromUniformOptions";
+    output << " ubo_binding=";
+    Serialize(output, arrayLengthFromUniform.ubo_binding);
+    output << " bindpoint_to_size_index=";
+    Serialize(output, arrayLengthFromUniform.bindpoint_to_size_index);
+    output << ")";
+}
+
+// 32 bit float has 7 decimal digits of precision so setting n to 8 should be enough
+std::string FloatToStringWithPrecision(float v, std::streamsize n = 8) {
+    std::ostringstream out;
+    out.precision(n);
+    out << std::fixed << v;
+    return out.str();
+}
+
+std::string GetHLSLValueString(EntryPointMetadata::OverridableConstant::Type dawnType,
+                               const OverridableConstantScalar* entry,
+                               double value = 0) {
+    switch (dawnType) {
+        case EntryPointMetadata::OverridableConstant::Type::Boolean:
+            return std::to_string(entry ? entry->b : static_cast<int32_t>(value));
+        case EntryPointMetadata::OverridableConstant::Type::Float32:
+            return FloatToStringWithPrecision(entry ? entry->f32 : static_cast<float>(value));
+        case EntryPointMetadata::OverridableConstant::Type::Int32:
+            return std::to_string(entry ? entry->i32 : static_cast<int32_t>(value));
+        case EntryPointMetadata::OverridableConstant::Type::Uint32:
+            return std::to_string(entry ? entry->u32 : static_cast<uint32_t>(value));
+        default:
+            UNREACHABLE();
+    }
+}
+
+constexpr char kSpecConstantPrefix[] = "WGSL_SPEC_CONSTANT_";
+
+void GetOverridableConstantsDefines(
+    std::vector<std::pair<std::string, std::string>>* defineStrings,
+    const PipelineConstantEntries* pipelineConstantEntries,
+    const EntryPointMetadata::OverridableConstantsMap* shaderEntryPointConstants) {
+    std::unordered_set<std::string> overriddenConstants;
+
+    // Set pipeline overridden values
+    for (const auto& [name, value] : *pipelineConstantEntries) {
+        overriddenConstants.insert(name);
+
+        // This is already validated so `name` must exist
+        const auto& moduleConstant = shaderEntryPointConstants->at(name);
+
+        defineStrings->emplace_back(
+            kSpecConstantPrefix + std::to_string(static_cast<int32_t>(moduleConstant.id)),
+            GetHLSLValueString(moduleConstant.type, nullptr, value));
+    }
+
+    // Set shader initialized default values
+    for (const auto& iter : *shaderEntryPointConstants) {
+        const std::string& name = iter.first;
+        if (overriddenConstants.count(name) != 0) {
+            // This constant already has overridden value
+            continue;
         }
 
-        uint64_t GetD3DCompilerVersion() {
-            return D3D_COMPILER_VERSION;
+        const auto& moduleConstant = shaderEntryPointConstants->at(name);
+
+        // Uninitialized default values are okay since they ar only defined to pass
+        // compilation but not used
+        defineStrings->emplace_back(
+            kSpecConstantPrefix + std::to_string(static_cast<int32_t>(moduleConstant.id)),
+            GetHLSLValueString(moduleConstant.type, &moduleConstant.defaultValue));
+    }
+}
+
+// The inputs to a shader compilation. These have been intentionally isolated from the
+// device to help ensure that the pipeline cache key contains all inputs for compilation.
+struct ShaderCompilationRequest {
+    enum Compiler { FXC, DXC };
+
+    // Common inputs
+    Compiler compiler;
+    const tint::Program* program;
+    const char* entryPointName;
+    SingleShaderStage stage;
+    uint32_t compileFlags;
+    bool disableSymbolRenaming;
+    tint::transform::BindingRemapper::BindingPoints remappedBindingPoints;
+    tint::transform::BindingRemapper::AccessControls remappedAccessControls;
+    bool isRobustnessEnabled;
+    bool usesNumWorkgroups;
+    uint32_t numWorkgroupsRegisterSpace;
+    uint32_t numWorkgroupsShaderRegister;
+    tint::writer::ArrayLengthFromUniformOptions arrayLengthFromUniform;
+    std::vector<std::pair<std::string, std::string>> defineStrings;
+
+    // FXC/DXC common inputs
+    bool disableWorkgroupInit;
+
+    // FXC inputs
+    uint64_t fxcVersion;
+
+    // DXC inputs
+    uint64_t dxcVersion;
+    const D3D12DeviceInfo* deviceInfo;
+    bool hasShaderFloat16Feature;
+
+    static ResultOrError<ShaderCompilationRequest> Create(
+        const char* entryPointName,
+        SingleShaderStage stage,
+        const PipelineLayout* layout,
+        uint32_t compileFlags,
+        const Device* device,
+        const tint::Program* program,
+        const EntryPointMetadata& entryPoint,
+        const ProgrammableStage& programmableStage) {
+        Compiler compiler;
+        uint64_t dxcVersion = 0;
+        if (device->IsToggleEnabled(Toggle::UseDXC)) {
+            compiler = Compiler::DXC;
+            DAWN_TRY_ASSIGN(dxcVersion, GetDXCompilerVersion(device->GetDxcValidator()));
+        } else {
+            compiler = Compiler::FXC;
         }
 
-        struct CompareBindingPoint {
-            constexpr bool operator()(const tint::transform::BindingPoint& lhs,
-                                      const tint::transform::BindingPoint& rhs) const {
-                if (lhs.group != rhs.group) {
-                    return lhs.group < rhs.group;
-                } else {
-                    return lhs.binding < rhs.binding;
-                }
-            }
-        };
+        using tint::transform::BindingPoint;
+        using tint::transform::BindingRemapper;
 
-        void Serialize(std::stringstream& output, const tint::ast::Access& access) {
-            output << access;
-        }
+        BindingRemapper::BindingPoints remappedBindingPoints;
+        BindingRemapper::AccessControls remappedAccessControls;
 
-        void Serialize(std::stringstream& output,
-                       const tint::transform::BindingPoint& binding_point) {
-            output << "(BindingPoint";
-            output << " group=" << binding_point.group;
-            output << " binding=" << binding_point.binding;
-            output << ")";
-        }
+        tint::writer::ArrayLengthFromUniformOptions arrayLengthFromUniform;
+        arrayLengthFromUniform.ubo_binding = {
+            layout->GetDynamicStorageBufferLengthsRegisterSpace(),
+            layout->GetDynamicStorageBufferLengthsShaderRegister()};
 
-        template <typename T,
-                  typename = typename std::enable_if<std::is_fundamental<T>::value>::type>
-        void Serialize(std::stringstream& output, const T& val) {
-            output << val;
-        }
+        const BindingInfoArray& moduleBindingInfo = entryPoint.bindings;
+        for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+            const BindGroupLayout* bgl = ToBackend(layout->GetBindGroupLayout(group));
+            const auto& groupBindingInfo = moduleBindingInfo[group];
 
-        template <typename T>
-        void Serialize(std::stringstream& output,
-                       const std::unordered_map<tint::transform::BindingPoint, T>& map) {
-            output << "(map";
-
-            std::map<tint::transform::BindingPoint, T, CompareBindingPoint> sorted(map.begin(),
-                                                                                   map.end());
-            for (auto& [bindingPoint, value] : sorted) {
-                output << " ";
-                Serialize(output, bindingPoint);
-                output << "=";
-                Serialize(output, value);
-            }
-            output << ")";
-        }
-
-        void Serialize(std::stringstream& output,
-                       const tint::writer::ArrayLengthFromUniformOptions& arrayLengthFromUniform) {
-            output << "(ArrayLengthFromUniformOptions";
-            output << " ubo_binding=";
-            Serialize(output, arrayLengthFromUniform.ubo_binding);
-            output << " bindpoint_to_size_index=";
-            Serialize(output, arrayLengthFromUniform.bindpoint_to_size_index);
-            output << ")";
-        }
-
-        // 32 bit float has 7 decimal digits of precision so setting n to 8 should be enough
-        std::string FloatToStringWithPrecision(float v, std::streamsize n = 8) {
-            std::ostringstream out;
-            out.precision(n);
-            out << std::fixed << v;
-            return out.str();
-        }
-
-        std::string GetHLSLValueString(EntryPointMetadata::OverridableConstant::Type dawnType,
-                                       const OverridableConstantScalar* entry,
-                                       double value = 0) {
-            switch (dawnType) {
-                case EntryPointMetadata::OverridableConstant::Type::Boolean:
-                    return std::to_string(entry ? entry->b : static_cast<int32_t>(value));
-                case EntryPointMetadata::OverridableConstant::Type::Float32:
-                    return FloatToStringWithPrecision(entry ? entry->f32
-                                                            : static_cast<float>(value));
-                case EntryPointMetadata::OverridableConstant::Type::Int32:
-                    return std::to_string(entry ? entry->i32 : static_cast<int32_t>(value));
-                case EntryPointMetadata::OverridableConstant::Type::Uint32:
-                    return std::to_string(entry ? entry->u32 : static_cast<uint32_t>(value));
-                default:
-                    UNREACHABLE();
-            }
-        }
-
-        constexpr char kSpecConstantPrefix[] = "WGSL_SPEC_CONSTANT_";
-
-        void GetOverridableConstantsDefines(
-            std::vector<std::pair<std::string, std::string>>* defineStrings,
-            const PipelineConstantEntries* pipelineConstantEntries,
-            const EntryPointMetadata::OverridableConstantsMap* shaderEntryPointConstants) {
-            std::unordered_set<std::string> overriddenConstants;
-
-            // Set pipeline overridden values
-            for (const auto& [name, value] : *pipelineConstantEntries) {
-                overriddenConstants.insert(name);
-
-                // This is already validated so `name` must exist
-                const auto& moduleConstant = shaderEntryPointConstants->at(name);
-
-                defineStrings->emplace_back(
-                    kSpecConstantPrefix + std::to_string(static_cast<int32_t>(moduleConstant.id)),
-                    GetHLSLValueString(moduleConstant.type, nullptr, value));
-            }
-
-            // Set shader initialized default values
-            for (const auto& iter : *shaderEntryPointConstants) {
-                const std::string& name = iter.first;
-                if (overriddenConstants.count(name) != 0) {
-                    // This constant already has overridden value
-                    continue;
+            // d3d12::BindGroupLayout packs the bindings per HLSL register-space. We modify
+            // the Tint AST to make the "bindings" decoration match the offset chosen by
+            // d3d12::BindGroupLayout so that Tint produces HLSL with the correct registers
+            // assigned to each interface variable.
+            for (const auto& [binding, bindingInfo] : groupBindingInfo) {
+                BindingIndex bindingIndex = bgl->GetBindingIndex(binding);
+                BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
+                                             static_cast<uint32_t>(binding)};
+                BindingPoint dstBindingPoint{static_cast<uint32_t>(group),
+                                             bgl->GetShaderRegister(bindingIndex)};
+                if (srcBindingPoint != dstBindingPoint) {
+                    remappedBindingPoints.emplace(srcBindingPoint, dstBindingPoint);
                 }
 
-                const auto& moduleConstant = shaderEntryPointConstants->at(name);
-
-                // Uninitialized default values are okay since they ar only defined to pass
-                // compilation but not used
-                defineStrings->emplace_back(
-                    kSpecConstantPrefix + std::to_string(static_cast<int32_t>(moduleConstant.id)),
-                    GetHLSLValueString(moduleConstant.type, &moduleConstant.defaultValue));
-            }
-        }
-
-        // The inputs to a shader compilation. These have been intentionally isolated from the
-        // device to help ensure that the pipeline cache key contains all inputs for compilation.
-        struct ShaderCompilationRequest {
-            enum Compiler { FXC, DXC };
-
-            // Common inputs
-            Compiler compiler;
-            const tint::Program* program;
-            const char* entryPointName;
-            SingleShaderStage stage;
-            uint32_t compileFlags;
-            bool disableSymbolRenaming;
-            tint::transform::BindingRemapper::BindingPoints remappedBindingPoints;
-            tint::transform::BindingRemapper::AccessControls remappedAccessControls;
-            bool isRobustnessEnabled;
-            bool usesNumWorkgroups;
-            uint32_t numWorkgroupsRegisterSpace;
-            uint32_t numWorkgroupsShaderRegister;
-            tint::writer::ArrayLengthFromUniformOptions arrayLengthFromUniform;
-            std::vector<std::pair<std::string, std::string>> defineStrings;
-
-            // FXC/DXC common inputs
-            bool disableWorkgroupInit;
-
-            // FXC inputs
-            uint64_t fxcVersion;
-
-            // DXC inputs
-            uint64_t dxcVersion;
-            const D3D12DeviceInfo* deviceInfo;
-            bool hasShaderFloat16Feature;
-
-            static ResultOrError<ShaderCompilationRequest> Create(
-                const char* entryPointName,
-                SingleShaderStage stage,
-                const PipelineLayout* layout,
-                uint32_t compileFlags,
-                const Device* device,
-                const tint::Program* program,
-                const EntryPointMetadata& entryPoint,
-                const ProgrammableStage& programmableStage) {
-                Compiler compiler;
-                uint64_t dxcVersion = 0;
-                if (device->IsToggleEnabled(Toggle::UseDXC)) {
-                    compiler = Compiler::DXC;
-                    DAWN_TRY_ASSIGN(dxcVersion, GetDXCompilerVersion(device->GetDxcValidator()));
-                } else {
-                    compiler = Compiler::FXC;
-                }
-
-                using tint::transform::BindingPoint;
-                using tint::transform::BindingRemapper;
-
-                BindingRemapper::BindingPoints remappedBindingPoints;
-                BindingRemapper::AccessControls remappedAccessControls;
-
-                tint::writer::ArrayLengthFromUniformOptions arrayLengthFromUniform;
-                arrayLengthFromUniform.ubo_binding = {
-                    layout->GetDynamicStorageBufferLengthsRegisterSpace(),
-                    layout->GetDynamicStorageBufferLengthsShaderRegister()};
-
-                const BindingInfoArray& moduleBindingInfo = entryPoint.bindings;
-                for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
-                    const BindGroupLayout* bgl = ToBackend(layout->GetBindGroupLayout(group));
-                    const auto& groupBindingInfo = moduleBindingInfo[group];
-
-                    // d3d12::BindGroupLayout packs the bindings per HLSL register-space. We modify
-                    // the Tint AST to make the "bindings" decoration match the offset chosen by
-                    // d3d12::BindGroupLayout so that Tint produces HLSL with the correct registers
-                    // assigned to each interface variable.
-                    for (const auto& [binding, bindingInfo] : groupBindingInfo) {
-                        BindingIndex bindingIndex = bgl->GetBindingIndex(binding);
-                        BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
-                                                     static_cast<uint32_t>(binding)};
-                        BindingPoint dstBindingPoint{static_cast<uint32_t>(group),
-                                                     bgl->GetShaderRegister(bindingIndex)};
-                        if (srcBindingPoint != dstBindingPoint) {
-                            remappedBindingPoints.emplace(srcBindingPoint, dstBindingPoint);
-                        }
-
-                        // Declaring a read-only storage buffer in HLSL but specifying a storage
-                        // buffer in the BGL produces the wrong output. Force read-only storage
-                        // buffer bindings to be treated as UAV instead of SRV. Internal storage
-                        // buffer is a storage buffer used in the internal pipeline.
-                        const bool forceStorageBufferAsUAV =
-                            (bindingInfo.buffer.type == wgpu::BufferBindingType::ReadOnlyStorage &&
-                             (bgl->GetBindingInfo(bindingIndex).buffer.type ==
-                                  wgpu::BufferBindingType::Storage ||
-                              bgl->GetBindingInfo(bindingIndex).buffer.type ==
-                                  kInternalStorageBufferBinding));
-                        if (forceStorageBufferAsUAV) {
-                            remappedAccessControls.emplace(srcBindingPoint,
-                                                           tint::ast::Access::kReadWrite);
-                        }
-                    }
-
-                    // Add arrayLengthFromUniform options
-                    {
-                        for (const auto& bindingAndRegisterOffset :
-                             layout->GetDynamicStorageBufferLengthInfo()[group]
-                                 .bindingAndRegisterOffsets) {
-                            BindingNumber binding = bindingAndRegisterOffset.binding;
-                            uint32_t registerOffset = bindingAndRegisterOffset.registerOffset;
-
-                            BindingPoint bindingPoint{static_cast<uint32_t>(group),
-                                                      static_cast<uint32_t>(binding)};
-                            // Get the renamed binding point if it was remapped.
-                            auto it = remappedBindingPoints.find(bindingPoint);
-                            if (it != remappedBindingPoints.end()) {
-                                bindingPoint = it->second;
-                            }
-
-                            arrayLengthFromUniform.bindpoint_to_size_index.emplace(bindingPoint,
-                                                                                   registerOffset);
-                        }
-                    }
-                }
-
-                ShaderCompilationRequest request;
-                request.compiler = compiler;
-                request.program = program;
-                request.entryPointName = entryPointName;
-                request.stage = stage;
-                request.compileFlags = compileFlags;
-                request.disableSymbolRenaming =
-                    device->IsToggleEnabled(Toggle::DisableSymbolRenaming);
-                request.remappedBindingPoints = std::move(remappedBindingPoints);
-                request.remappedAccessControls = std::move(remappedAccessControls);
-                request.isRobustnessEnabled = device->IsRobustnessEnabled();
-                request.disableWorkgroupInit =
-                    device->IsToggleEnabled(Toggle::DisableWorkgroupInit);
-                request.usesNumWorkgroups = entryPoint.usesNumWorkgroups;
-                request.numWorkgroupsShaderRegister = layout->GetNumWorkgroupsShaderRegister();
-                request.numWorkgroupsRegisterSpace = layout->GetNumWorkgroupsRegisterSpace();
-                request.arrayLengthFromUniform = std::move(arrayLengthFromUniform);
-                request.fxcVersion = compiler == Compiler::FXC ? GetD3DCompilerVersion() : 0;
-                request.dxcVersion = compiler == Compiler::DXC ? dxcVersion : 0;
-                request.deviceInfo = &device->GetDeviceInfo();
-                request.hasShaderFloat16Feature = device->IsFeatureEnabled(Feature::ShaderFloat16);
-
-                GetOverridableConstantsDefines(
-                    &request.defineStrings, &programmableStage.constants,
-                    &programmableStage.module->GetEntryPoint(programmableStage.entryPoint)
-                         .overridableConstants);
-
-                return std::move(request);
-            }
-
-            // TODO(dawn:1341): Move to use CacheKey instead of the vector.
-            ResultOrError<std::vector<uint8_t>> CreateCacheKey() const {
-                // Generate the WGSL from the Tint program so it's normalized.
-                // TODO(tint:1180): Consider using a binary serialization of the tint AST for a more
-                // compact representation.
-                auto result = tint::writer::wgsl::Generate(program, tint::writer::wgsl::Options{});
-                if (!result.success) {
-                    std::ostringstream errorStream;
-                    errorStream << "Tint WGSL failure:" << std::endl;
-                    errorStream << "Generator: " << result.error << std::endl;
-                    return DAWN_INTERNAL_ERROR(errorStream.str().c_str());
-                }
-
-                std::stringstream stream;
-
-                // Prefix the key with the type to avoid collisions from another type that could
-                // have the same key.
-                stream << static_cast<uint32_t>(CacheKey::Type::Shader);
-                stream << "\n";
-
-                stream << result.wgsl.length();
-                stream << "\n";
-
-                stream << result.wgsl;
-                stream << "\n";
-
-                stream << "(ShaderCompilationRequest";
-                stream << " compiler=" << compiler;
-                stream << " entryPointName=" << entryPointName;
-                stream << " stage=" << uint32_t(stage);
-                stream << " compileFlags=" << compileFlags;
-                stream << " disableSymbolRenaming=" << disableSymbolRenaming;
-
-                stream << " remappedBindingPoints=";
-                Serialize(stream, remappedBindingPoints);
-
-                stream << " remappedAccessControls=";
-                Serialize(stream, remappedAccessControls);
-
-                stream << " useNumWorkgroups=" << usesNumWorkgroups;
-                stream << " numWorkgroupsRegisterSpace=" << numWorkgroupsRegisterSpace;
-                stream << " numWorkgroupsShaderRegister=" << numWorkgroupsShaderRegister;
-
-                stream << " arrayLengthFromUniform=";
-                Serialize(stream, arrayLengthFromUniform);
-
-                stream << " shaderModel=" << deviceInfo->shaderModel;
-                stream << " disableWorkgroupInit=" << disableWorkgroupInit;
-                stream << " isRobustnessEnabled=" << isRobustnessEnabled;
-                stream << " fxcVersion=" << fxcVersion;
-                stream << " dxcVersion=" << dxcVersion;
-                stream << " hasShaderFloat16Feature=" << hasShaderFloat16Feature;
-
-                stream << " defines={";
-                for (const auto& [name, value] : defineStrings) {
-                    stream << " <" << name << "," << value << ">";
-                }
-                stream << " }";
-
-                stream << ")";
-                stream << "\n";
-
-                return std::vector<uint8_t>(std::istreambuf_iterator<char>{stream},
-                                            std::istreambuf_iterator<char>{});
-            }
-        };
-
-        std::vector<const wchar_t*> GetDXCArguments(uint32_t compileFlags, bool enable16BitTypes) {
-            std::vector<const wchar_t*> arguments;
-            if (compileFlags & D3DCOMPILE_ENABLE_BACKWARDS_COMPATIBILITY) {
-                arguments.push_back(L"/Gec");
-            }
-            if (compileFlags & D3DCOMPILE_IEEE_STRICTNESS) {
-                arguments.push_back(L"/Gis");
-            }
-            constexpr uint32_t d3dCompileFlagsBits = D3DCOMPILE_OPTIMIZATION_LEVEL2;
-            if (compileFlags & d3dCompileFlagsBits) {
-                switch (compileFlags & D3DCOMPILE_OPTIMIZATION_LEVEL2) {
-                    case D3DCOMPILE_OPTIMIZATION_LEVEL0:
-                        arguments.push_back(L"/O0");
-                        break;
-                    case D3DCOMPILE_OPTIMIZATION_LEVEL2:
-                        arguments.push_back(L"/O2");
-                        break;
-                    case D3DCOMPILE_OPTIMIZATION_LEVEL3:
-                        arguments.push_back(L"/O3");
-                        break;
-                }
-            }
-            if (compileFlags & D3DCOMPILE_DEBUG) {
-                arguments.push_back(L"/Zi");
-            }
-            if (compileFlags & D3DCOMPILE_PACK_MATRIX_ROW_MAJOR) {
-                arguments.push_back(L"/Zpr");
-            }
-            if (compileFlags & D3DCOMPILE_PACK_MATRIX_COLUMN_MAJOR) {
-                arguments.push_back(L"/Zpc");
-            }
-            if (compileFlags & D3DCOMPILE_AVOID_FLOW_CONTROL) {
-                arguments.push_back(L"/Gfa");
-            }
-            if (compileFlags & D3DCOMPILE_PREFER_FLOW_CONTROL) {
-                arguments.push_back(L"/Gfp");
-            }
-            if (compileFlags & D3DCOMPILE_RESOURCES_MAY_ALIAS) {
-                arguments.push_back(L"/res_may_alias");
-            }
-
-            if (enable16BitTypes) {
-                // enable-16bit-types are only allowed in -HV 2018 (default)
-                arguments.push_back(L"/enable-16bit-types");
-            }
-
-            arguments.push_back(L"-HV");
-            arguments.push_back(L"2018");
-
-            return arguments;
-        }
-
-        ResultOrError<ComPtr<IDxcBlob>> CompileShaderDXC(IDxcLibrary* dxcLibrary,
-                                                         IDxcCompiler* dxcCompiler,
-                                                         const ShaderCompilationRequest& request,
-                                                         const std::string& hlslSource) {
-            ComPtr<IDxcBlobEncoding> sourceBlob;
-            DAWN_TRY(
-                CheckHRESULT(dxcLibrary->CreateBlobWithEncodingOnHeapCopy(
-                                 hlslSource.c_str(), hlslSource.length(), CP_UTF8, &sourceBlob),
-                             "DXC create blob"));
-
-            std::wstring entryPointW;
-            DAWN_TRY_ASSIGN(entryPointW, ConvertStringToWstring(request.entryPointName));
-
-            std::vector<const wchar_t*> arguments =
-                GetDXCArguments(request.compileFlags, request.hasShaderFloat16Feature);
-
-            // Build defines for overridable constants
-            std::vector<std::pair<std::wstring, std::wstring>> defineStrings;
-            defineStrings.reserve(request.defineStrings.size());
-            for (const auto& [name, value] : request.defineStrings) {
-                defineStrings.emplace_back(UTF8ToWStr(name.c_str()), UTF8ToWStr(value.c_str()));
-            }
-
-            std::vector<DxcDefine> dxcDefines;
-            dxcDefines.reserve(defineStrings.size());
-            for (const auto& [name, value] : defineStrings) {
-                dxcDefines.push_back({name.c_str(), value.c_str()});
-            }
-
-            ComPtr<IDxcOperationResult> result;
-            DAWN_TRY(CheckHRESULT(
-                dxcCompiler->Compile(sourceBlob.Get(), nullptr, entryPointW.c_str(),
-                                     request.deviceInfo->shaderProfiles[request.stage].c_str(),
-                                     arguments.data(), arguments.size(), dxcDefines.data(),
-                                     dxcDefines.size(), nullptr, &result),
-                "DXC compile"));
-
-            HRESULT hr;
-            DAWN_TRY(CheckHRESULT(result->GetStatus(&hr), "DXC get status"));
-
-            if (FAILED(hr)) {
-                ComPtr<IDxcBlobEncoding> errors;
-                DAWN_TRY(CheckHRESULT(result->GetErrorBuffer(&errors), "DXC get error buffer"));
-
-                return DAWN_FORMAT_VALIDATION_ERROR("DXC compile failed with: %s",
-                                                    static_cast<char*>(errors->GetBufferPointer()));
-            }
-
-            ComPtr<IDxcBlob> compiledShader;
-            DAWN_TRY(CheckHRESULT(result->GetResult(&compiledShader), "DXC get result"));
-            return std::move(compiledShader);
-        }
-
-        std::string CompileFlagsToStringFXC(uint32_t compileFlags) {
-            struct Flag {
-                uint32_t value;
-                const char* name;
-            };
-            constexpr Flag flags[] = {
-            // Populated from d3dcompiler.h
-#define F(f) Flag{f, #f}
-                F(D3DCOMPILE_DEBUG),
-                F(D3DCOMPILE_SKIP_VALIDATION),
-                F(D3DCOMPILE_SKIP_OPTIMIZATION),
-                F(D3DCOMPILE_PACK_MATRIX_ROW_MAJOR),
-                F(D3DCOMPILE_PACK_MATRIX_COLUMN_MAJOR),
-                F(D3DCOMPILE_PARTIAL_PRECISION),
-                F(D3DCOMPILE_FORCE_VS_SOFTWARE_NO_OPT),
-                F(D3DCOMPILE_FORCE_PS_SOFTWARE_NO_OPT),
-                F(D3DCOMPILE_NO_PRESHADER),
-                F(D3DCOMPILE_AVOID_FLOW_CONTROL),
-                F(D3DCOMPILE_PREFER_FLOW_CONTROL),
-                F(D3DCOMPILE_ENABLE_STRICTNESS),
-                F(D3DCOMPILE_ENABLE_BACKWARDS_COMPATIBILITY),
-                F(D3DCOMPILE_IEEE_STRICTNESS),
-                F(D3DCOMPILE_RESERVED16),
-                F(D3DCOMPILE_RESERVED17),
-                F(D3DCOMPILE_WARNINGS_ARE_ERRORS),
-                F(D3DCOMPILE_RESOURCES_MAY_ALIAS),
-                F(D3DCOMPILE_ENABLE_UNBOUNDED_DESCRIPTOR_TABLES),
-                F(D3DCOMPILE_ALL_RESOURCES_BOUND),
-                F(D3DCOMPILE_DEBUG_NAME_FOR_SOURCE),
-                F(D3DCOMPILE_DEBUG_NAME_FOR_BINARY),
-#undef F
-            };
-
-            std::string result;
-            for (const Flag& f : flags) {
-                if ((compileFlags & f.value) != 0) {
-                    result += f.name + std::string("\n");
+                // Declaring a read-only storage buffer in HLSL but specifying a storage
+                // buffer in the BGL produces the wrong output. Force read-only storage
+                // buffer bindings to be treated as UAV instead of SRV. Internal storage
+                // buffer is a storage buffer used in the internal pipeline.
+                const bool forceStorageBufferAsUAV =
+                    (bindingInfo.buffer.type == wgpu::BufferBindingType::ReadOnlyStorage &&
+                     (bgl->GetBindingInfo(bindingIndex).buffer.type ==
+                          wgpu::BufferBindingType::Storage ||
+                      bgl->GetBindingInfo(bindingIndex).buffer.type ==
+                          kInternalStorageBufferBinding));
+                if (forceStorageBufferAsUAV) {
+                    remappedAccessControls.emplace(srcBindingPoint, tint::ast::Access::kReadWrite);
                 }
             }
 
-            // Optimization level must be handled separately as two bits are used, and the values
-            // don't map neatly to 0-3.
-            constexpr uint32_t d3dCompileFlagsBits = D3DCOMPILE_OPTIMIZATION_LEVEL2;
-            switch (compileFlags & d3dCompileFlagsBits) {
-                case D3DCOMPILE_OPTIMIZATION_LEVEL0:
-                    result += "D3DCOMPILE_OPTIMIZATION_LEVEL0";
-                    break;
-                case D3DCOMPILE_OPTIMIZATION_LEVEL1:
-                    result += "D3DCOMPILE_OPTIMIZATION_LEVEL1";
-                    break;
-                case D3DCOMPILE_OPTIMIZATION_LEVEL2:
-                    result += "D3DCOMPILE_OPTIMIZATION_LEVEL2";
-                    break;
-                case D3DCOMPILE_OPTIMIZATION_LEVEL3:
-                    result += "D3DCOMPILE_OPTIMIZATION_LEVEL3";
-                    break;
-            }
-            result += std::string("\n");
-
-            return result;
-        }
-
-        ResultOrError<ComPtr<ID3DBlob>> CompileShaderFXC(const PlatformFunctions* functions,
-                                                         const ShaderCompilationRequest& request,
-                                                         const std::string& hlslSource) {
-            const char* targetProfile = nullptr;
-            switch (request.stage) {
-                case SingleShaderStage::Vertex:
-                    targetProfile = "vs_5_1";
-                    break;
-                case SingleShaderStage::Fragment:
-                    targetProfile = "ps_5_1";
-                    break;
-                case SingleShaderStage::Compute:
-                    targetProfile = "cs_5_1";
-                    break;
-            }
-
-            ComPtr<ID3DBlob> compiledShader;
-            ComPtr<ID3DBlob> errors;
-
-            // Build defines for overridable constants
-            const D3D_SHADER_MACRO* pDefines = nullptr;
-            std::vector<D3D_SHADER_MACRO> fxcDefines;
-            if (request.defineStrings.size() > 0) {
-                fxcDefines.reserve(request.defineStrings.size() + 1);
-                for (const auto& [name, value] : request.defineStrings) {
-                    fxcDefines.push_back({name.c_str(), value.c_str()});
-                }
-                // d3dCompile D3D_SHADER_MACRO* pDefines is a nullptr terminated array
-                fxcDefines.push_back({nullptr, nullptr});
-                pDefines = fxcDefines.data();
-            }
-
-            DAWN_INVALID_IF(FAILED(functions->d3dCompile(
-                                hlslSource.c_str(), hlslSource.length(), nullptr, pDefines, nullptr,
-                                request.entryPointName, targetProfile, request.compileFlags, 0,
-                                &compiledShader, &errors)),
-                            "D3D compile failed with: %s",
-                            static_cast<char*>(errors->GetBufferPointer()));
-
-            return std::move(compiledShader);
-        }
-
-        ResultOrError<std::string> TranslateToHLSL(dawn::platform::Platform* platform,
-                                                   const ShaderCompilationRequest& request,
-                                                   std::string* remappedEntryPointName) {
-            std::ostringstream errorStream;
-            errorStream << "Tint HLSL failure:" << std::endl;
-
-            tint::transform::Manager transformManager;
-            tint::transform::DataMap transformInputs;
-
-            if (request.isRobustnessEnabled) {
-                transformManager.Add<tint::transform::Robustness>();
-            }
-
-            transformManager.Add<tint::transform::BindingRemapper>();
-
-            transformManager.Add<tint::transform::SingleEntryPoint>();
-            transformInputs.Add<tint::transform::SingleEntryPoint::Config>(request.entryPointName);
-
-            transformManager.Add<tint::transform::Renamer>();
-
-            if (request.disableSymbolRenaming) {
-                // We still need to rename HLSL reserved keywords
-                transformInputs.Add<tint::transform::Renamer::Config>(
-                    tint::transform::Renamer::Target::kHlslKeywords);
-            }
-
-            // D3D12 registers like `t3` and `c3` have the same bindingOffset number in
-            // the remapping but should not be considered a collision because they have
-            // different types.
-            const bool mayCollide = true;
-            transformInputs.Add<tint::transform::BindingRemapper::Remappings>(
-                std::move(request.remappedBindingPoints), std::move(request.remappedAccessControls),
-                mayCollide);
-
-            tint::Program transformedProgram;
-            tint::transform::DataMap transformOutputs;
+            // Add arrayLengthFromUniform options
             {
-                TRACE_EVENT0(platform, General, "RunTransforms");
-                DAWN_TRY_ASSIGN(transformedProgram,
-                                RunTransforms(&transformManager, request.program, transformInputs,
-                                              &transformOutputs, nullptr));
-            }
+                for (const auto& bindingAndRegisterOffset :
+                     layout->GetDynamicStorageBufferLengthInfo()[group].bindingAndRegisterOffsets) {
+                    BindingNumber binding = bindingAndRegisterOffset.binding;
+                    uint32_t registerOffset = bindingAndRegisterOffset.registerOffset;
 
-            if (auto* data = transformOutputs.Get<tint::transform::Renamer::Data>()) {
-                auto it = data->remappings.find(request.entryPointName);
-                if (it != data->remappings.end()) {
-                    *remappedEntryPointName = it->second;
-                } else {
-                    DAWN_INVALID_IF(!request.disableSymbolRenaming,
-                                    "Could not find remapped name for entry point.");
+                    BindingPoint bindingPoint{static_cast<uint32_t>(group),
+                                              static_cast<uint32_t>(binding)};
+                    // Get the renamed binding point if it was remapped.
+                    auto it = remappedBindingPoints.find(bindingPoint);
+                    if (it != remappedBindingPoints.end()) {
+                        bindingPoint = it->second;
+                    }
 
-                    *remappedEntryPointName = request.entryPointName;
+                    arrayLengthFromUniform.bindpoint_to_size_index.emplace(bindingPoint,
+                                                                           registerOffset);
                 }
-            } else {
-                return DAWN_FORMAT_VALIDATION_ERROR("Transform output missing renamer data.");
-            }
-
-            tint::writer::hlsl::Options options;
-            options.disable_workgroup_init = request.disableWorkgroupInit;
-            if (request.usesNumWorkgroups) {
-                options.root_constant_binding_point.group = request.numWorkgroupsRegisterSpace;
-                options.root_constant_binding_point.binding = request.numWorkgroupsShaderRegister;
-            }
-            // TODO(dawn:549): HLSL generation outputs the indices into the
-            // array_length_from_uniform buffer that were actually used. When the blob cache can
-            // store more than compiled shaders, we should reflect these used indices and store
-            // them as well. This would allow us to only upload root constants that are actually
-            // read by the shader.
-            options.array_length_from_uniform = request.arrayLengthFromUniform;
-            TRACE_EVENT0(platform, General, "tint::writer::hlsl::Generate");
-            auto result = tint::writer::hlsl::Generate(&transformedProgram, options);
-            DAWN_INVALID_IF(!result.success, "An error occured while generating HLSL: %s",
-                            result.error);
-
-            return std::move(result.hlsl);
-        }
-
-        template <typename F>
-        MaybeError CompileShader(dawn::platform::Platform* platform,
-                                 const PlatformFunctions* functions,
-                                 IDxcLibrary* dxcLibrary,
-                                 IDxcCompiler* dxcCompiler,
-                                 ShaderCompilationRequest&& request,
-                                 bool dumpShaders,
-                                 F&& DumpShadersEmitLog,
-                                 CompiledShader* compiledShader) {
-            // Compile the source shader to HLSL.
-            std::string hlslSource;
-            std::string remappedEntryPoint;
-            DAWN_TRY_ASSIGN(hlslSource, TranslateToHLSL(platform, request, &remappedEntryPoint));
-            if (dumpShaders) {
-                std::ostringstream dumpedMsg;
-                dumpedMsg << "/* Dumped generated HLSL */" << std::endl << hlslSource;
-                DumpShadersEmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
-            }
-            request.entryPointName = remappedEntryPoint.c_str();
-            switch (request.compiler) {
-                case ShaderCompilationRequest::Compiler::DXC: {
-                    TRACE_EVENT0(platform, General, "CompileShaderDXC");
-                    DAWN_TRY_ASSIGN(compiledShader->compiledDXCShader,
-                                    CompileShaderDXC(dxcLibrary, dxcCompiler, request, hlslSource));
-                    break;
-                }
-                case ShaderCompilationRequest::Compiler::FXC: {
-                    TRACE_EVENT0(platform, General, "CompileShaderFXC");
-                    DAWN_TRY_ASSIGN(compiledShader->compiledFXCShader,
-                                    CompileShaderFXC(functions, request, hlslSource));
-                    break;
-                }
-            }
-
-            if (dumpShaders && request.compiler == ShaderCompilationRequest::Compiler::FXC) {
-                std::ostringstream dumpedMsg;
-                dumpedMsg << "/* FXC compile flags */ " << std::endl
-                          << CompileFlagsToStringFXC(request.compileFlags) << std::endl;
-
-                dumpedMsg << "/* Dumped disassembled DXBC */" << std::endl;
-
-                ComPtr<ID3DBlob> disassembly;
-                if (FAILED(functions->d3dDisassemble(
-                        compiledShader->compiledFXCShader->GetBufferPointer(),
-                        compiledShader->compiledFXCShader->GetBufferSize(), 0, nullptr,
-                        &disassembly))) {
-                    dumpedMsg << "D3D disassemble failed" << std::endl;
-                } else {
-                    dumpedMsg << reinterpret_cast<const char*>(disassembly->GetBufferPointer());
-                }
-                DumpShadersEmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
-            }
-
-            return {};
-        }
-
-    }  // anonymous namespace
-
-    // static
-    ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
-                                                          const ShaderModuleDescriptor* descriptor,
-                                                          ShaderModuleParseResult* parseResult) {
-        Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
-        DAWN_TRY(module->Initialize(parseResult));
-        return module;
-    }
-
-    ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
-        : ShaderModuleBase(device, descriptor) {
-    }
-
-    MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
-        ScopedTintICEHandler scopedICEHandler(GetDevice());
-        return InitializeBase(parseResult);
-    }
-
-    ResultOrError<CompiledShader> ShaderModule::Compile(const ProgrammableStage& programmableStage,
-                                                        SingleShaderStage stage,
-                                                        const PipelineLayout* layout,
-                                                        uint32_t compileFlags) {
-        TRACE_EVENT0(GetDevice()->GetPlatform(), General, "ShaderModuleD3D12::Compile");
-        ASSERT(!IsError());
-
-        ScopedTintICEHandler scopedICEHandler(GetDevice());
-
-        Device* device = ToBackend(GetDevice());
-
-        CompiledShader compiledShader = {};
-
-        tint::transform::Manager transformManager;
-        tint::transform::DataMap transformInputs;
-
-        const tint::Program* program = GetTintProgram();
-        tint::Program programAsValue;
-
-        AddExternalTextureTransform(layout, &transformManager, &transformInputs);
-
-        if (stage == SingleShaderStage::Vertex) {
-            transformManager.Add<tint::transform::FirstIndexOffset>();
-            transformInputs.Add<tint::transform::FirstIndexOffset::BindingPoint>(
-                layout->GetFirstIndexOffsetShaderRegister(),
-                layout->GetFirstIndexOffsetRegisterSpace());
-        }
-
-        tint::transform::DataMap transformOutputs;
-        DAWN_TRY_ASSIGN(programAsValue, RunTransforms(&transformManager, program, transformInputs,
-                                                      &transformOutputs, nullptr));
-        program = &programAsValue;
-
-        if (stage == SingleShaderStage::Vertex) {
-            if (auto* data = transformOutputs.Get<tint::transform::FirstIndexOffset::Data>()) {
-                // TODO(dawn:549): Consider adding this information to the pipeline cache once we
-                // can store more than the shader blob in it.
-                compiledShader.usesVertexOrInstanceIndex = data->has_vertex_or_instance_index;
             }
         }
 
         ShaderCompilationRequest request;
-        DAWN_TRY_ASSIGN(
-            request, ShaderCompilationRequest::Create(
-                         programmableStage.entryPoint.c_str(), stage, layout, compileFlags, device,
-                         program, GetEntryPoint(programmableStage.entryPoint), programmableStage));
+        request.compiler = compiler;
+        request.program = program;
+        request.entryPointName = entryPointName;
+        request.stage = stage;
+        request.compileFlags = compileFlags;
+        request.disableSymbolRenaming = device->IsToggleEnabled(Toggle::DisableSymbolRenaming);
+        request.remappedBindingPoints = std::move(remappedBindingPoints);
+        request.remappedAccessControls = std::move(remappedAccessControls);
+        request.isRobustnessEnabled = device->IsRobustnessEnabled();
+        request.disableWorkgroupInit = device->IsToggleEnabled(Toggle::DisableWorkgroupInit);
+        request.usesNumWorkgroups = entryPoint.usesNumWorkgroups;
+        request.numWorkgroupsShaderRegister = layout->GetNumWorkgroupsShaderRegister();
+        request.numWorkgroupsRegisterSpace = layout->GetNumWorkgroupsRegisterSpace();
+        request.arrayLengthFromUniform = std::move(arrayLengthFromUniform);
+        request.fxcVersion = compiler == Compiler::FXC ? GetD3DCompilerVersion() : 0;
+        request.dxcVersion = compiler == Compiler::DXC ? dxcVersion : 0;
+        request.deviceInfo = &device->GetDeviceInfo();
+        request.hasShaderFloat16Feature = device->IsFeatureEnabled(Feature::ShaderFloat16);
 
-        // TODO(dawn:1341): Add shader cache key generation and caching for the compiled shader.
-        DAWN_TRY(CompileShader(
-            device->GetPlatform(), device->GetFunctions(),
-            device->IsToggleEnabled(Toggle::UseDXC) ? device->GetDxcLibrary().Get() : nullptr,
-            device->IsToggleEnabled(Toggle::UseDXC) ? device->GetDxcCompiler().Get() : nullptr,
-            std::move(request), device->IsToggleEnabled(Toggle::DumpShaders),
-            [&](WGPULoggingType loggingType, const char* message) {
-                GetDevice()->EmitLog(loggingType, message);
-            },
-            &compiledShader));
-        return std::move(compiledShader);
+        GetOverridableConstantsDefines(
+            &request.defineStrings, &programmableStage.constants,
+            &programmableStage.module->GetEntryPoint(programmableStage.entryPoint)
+                 .overridableConstants);
+
+        return std::move(request);
     }
 
-    D3D12_SHADER_BYTECODE CompiledShader::GetD3D12ShaderBytecode() const {
-        if (compiledFXCShader != nullptr) {
-            return {compiledFXCShader->GetBufferPointer(), compiledFXCShader->GetBufferSize()};
-        } else if (compiledDXCShader != nullptr) {
-            return {compiledDXCShader->GetBufferPointer(), compiledDXCShader->GetBufferSize()};
+    // TODO(dawn:1341): Move to use CacheKey instead of the vector.
+    ResultOrError<std::vector<uint8_t>> CreateCacheKey() const {
+        // Generate the WGSL from the Tint program so it's normalized.
+        // TODO(tint:1180): Consider using a binary serialization of the tint AST for a more
+        // compact representation.
+        auto result = tint::writer::wgsl::Generate(program, tint::writer::wgsl::Options{});
+        if (!result.success) {
+            std::ostringstream errorStream;
+            errorStream << "Tint WGSL failure:" << std::endl;
+            errorStream << "Generator: " << result.error << std::endl;
+            return DAWN_INTERNAL_ERROR(errorStream.str().c_str());
         }
-        UNREACHABLE();
-        return {};
+
+        std::stringstream stream;
+
+        // Prefix the key with the type to avoid collisions from another type that could
+        // have the same key.
+        stream << static_cast<uint32_t>(CacheKey::Type::Shader);
+        stream << "\n";
+
+        stream << result.wgsl.length();
+        stream << "\n";
+
+        stream << result.wgsl;
+        stream << "\n";
+
+        stream << "(ShaderCompilationRequest";
+        stream << " compiler=" << compiler;
+        stream << " entryPointName=" << entryPointName;
+        stream << " stage=" << uint32_t(stage);
+        stream << " compileFlags=" << compileFlags;
+        stream << " disableSymbolRenaming=" << disableSymbolRenaming;
+
+        stream << " remappedBindingPoints=";
+        Serialize(stream, remappedBindingPoints);
+
+        stream << " remappedAccessControls=";
+        Serialize(stream, remappedAccessControls);
+
+        stream << " useNumWorkgroups=" << usesNumWorkgroups;
+        stream << " numWorkgroupsRegisterSpace=" << numWorkgroupsRegisterSpace;
+        stream << " numWorkgroupsShaderRegister=" << numWorkgroupsShaderRegister;
+
+        stream << " arrayLengthFromUniform=";
+        Serialize(stream, arrayLengthFromUniform);
+
+        stream << " shaderModel=" << deviceInfo->shaderModel;
+        stream << " disableWorkgroupInit=" << disableWorkgroupInit;
+        stream << " isRobustnessEnabled=" << isRobustnessEnabled;
+        stream << " fxcVersion=" << fxcVersion;
+        stream << " dxcVersion=" << dxcVersion;
+        stream << " hasShaderFloat16Feature=" << hasShaderFloat16Feature;
+
+        stream << " defines={";
+        for (const auto& [name, value] : defineStrings) {
+            stream << " <" << name << "," << value << ">";
+        }
+        stream << " }";
+
+        stream << ")";
+        stream << "\n";
+
+        return std::vector<uint8_t>(std::istreambuf_iterator<char>{stream},
+                                    std::istreambuf_iterator<char>{});
     }
+};
+
+std::vector<const wchar_t*> GetDXCArguments(uint32_t compileFlags, bool enable16BitTypes) {
+    std::vector<const wchar_t*> arguments;
+    if (compileFlags & D3DCOMPILE_ENABLE_BACKWARDS_COMPATIBILITY) {
+        arguments.push_back(L"/Gec");
+    }
+    if (compileFlags & D3DCOMPILE_IEEE_STRICTNESS) {
+        arguments.push_back(L"/Gis");
+    }
+    constexpr uint32_t d3dCompileFlagsBits = D3DCOMPILE_OPTIMIZATION_LEVEL2;
+    if (compileFlags & d3dCompileFlagsBits) {
+        switch (compileFlags & D3DCOMPILE_OPTIMIZATION_LEVEL2) {
+            case D3DCOMPILE_OPTIMIZATION_LEVEL0:
+                arguments.push_back(L"/O0");
+                break;
+            case D3DCOMPILE_OPTIMIZATION_LEVEL2:
+                arguments.push_back(L"/O2");
+                break;
+            case D3DCOMPILE_OPTIMIZATION_LEVEL3:
+                arguments.push_back(L"/O3");
+                break;
+        }
+    }
+    if (compileFlags & D3DCOMPILE_DEBUG) {
+        arguments.push_back(L"/Zi");
+    }
+    if (compileFlags & D3DCOMPILE_PACK_MATRIX_ROW_MAJOR) {
+        arguments.push_back(L"/Zpr");
+    }
+    if (compileFlags & D3DCOMPILE_PACK_MATRIX_COLUMN_MAJOR) {
+        arguments.push_back(L"/Zpc");
+    }
+    if (compileFlags & D3DCOMPILE_AVOID_FLOW_CONTROL) {
+        arguments.push_back(L"/Gfa");
+    }
+    if (compileFlags & D3DCOMPILE_PREFER_FLOW_CONTROL) {
+        arguments.push_back(L"/Gfp");
+    }
+    if (compileFlags & D3DCOMPILE_RESOURCES_MAY_ALIAS) {
+        arguments.push_back(L"/res_may_alias");
+    }
+
+    if (enable16BitTypes) {
+        // enable-16bit-types are only allowed in -HV 2018 (default)
+        arguments.push_back(L"/enable-16bit-types");
+    }
+
+    arguments.push_back(L"-HV");
+    arguments.push_back(L"2018");
+
+    return arguments;
+}
+
+ResultOrError<ComPtr<IDxcBlob>> CompileShaderDXC(IDxcLibrary* dxcLibrary,
+                                                 IDxcCompiler* dxcCompiler,
+                                                 const ShaderCompilationRequest& request,
+                                                 const std::string& hlslSource) {
+    ComPtr<IDxcBlobEncoding> sourceBlob;
+    DAWN_TRY(CheckHRESULT(dxcLibrary->CreateBlobWithEncodingOnHeapCopy(
+                              hlslSource.c_str(), hlslSource.length(), CP_UTF8, &sourceBlob),
+                          "DXC create blob"));
+
+    std::wstring entryPointW;
+    DAWN_TRY_ASSIGN(entryPointW, ConvertStringToWstring(request.entryPointName));
+
+    std::vector<const wchar_t*> arguments =
+        GetDXCArguments(request.compileFlags, request.hasShaderFloat16Feature);
+
+    // Build defines for overridable constants
+    std::vector<std::pair<std::wstring, std::wstring>> defineStrings;
+    defineStrings.reserve(request.defineStrings.size());
+    for (const auto& [name, value] : request.defineStrings) {
+        defineStrings.emplace_back(UTF8ToWStr(name.c_str()), UTF8ToWStr(value.c_str()));
+    }
+
+    std::vector<DxcDefine> dxcDefines;
+    dxcDefines.reserve(defineStrings.size());
+    for (const auto& [name, value] : defineStrings) {
+        dxcDefines.push_back({name.c_str(), value.c_str()});
+    }
+
+    ComPtr<IDxcOperationResult> result;
+    DAWN_TRY(
+        CheckHRESULT(dxcCompiler->Compile(sourceBlob.Get(), nullptr, entryPointW.c_str(),
+                                          request.deviceInfo->shaderProfiles[request.stage].c_str(),
+                                          arguments.data(), arguments.size(), dxcDefines.data(),
+                                          dxcDefines.size(), nullptr, &result),
+                     "DXC compile"));
+
+    HRESULT hr;
+    DAWN_TRY(CheckHRESULT(result->GetStatus(&hr), "DXC get status"));
+
+    if (FAILED(hr)) {
+        ComPtr<IDxcBlobEncoding> errors;
+        DAWN_TRY(CheckHRESULT(result->GetErrorBuffer(&errors), "DXC get error buffer"));
+
+        return DAWN_FORMAT_VALIDATION_ERROR("DXC compile failed with: %s",
+                                            static_cast<char*>(errors->GetBufferPointer()));
+    }
+
+    ComPtr<IDxcBlob> compiledShader;
+    DAWN_TRY(CheckHRESULT(result->GetResult(&compiledShader), "DXC get result"));
+    return std::move(compiledShader);
+}
+
+std::string CompileFlagsToStringFXC(uint32_t compileFlags) {
+    struct Flag {
+        uint32_t value;
+        const char* name;
+    };
+    constexpr Flag flags[] = {
+    // Populated from d3dcompiler.h
+#define F(f) Flag{f, #f}
+        F(D3DCOMPILE_DEBUG),
+        F(D3DCOMPILE_SKIP_VALIDATION),
+        F(D3DCOMPILE_SKIP_OPTIMIZATION),
+        F(D3DCOMPILE_PACK_MATRIX_ROW_MAJOR),
+        F(D3DCOMPILE_PACK_MATRIX_COLUMN_MAJOR),
+        F(D3DCOMPILE_PARTIAL_PRECISION),
+        F(D3DCOMPILE_FORCE_VS_SOFTWARE_NO_OPT),
+        F(D3DCOMPILE_FORCE_PS_SOFTWARE_NO_OPT),
+        F(D3DCOMPILE_NO_PRESHADER),
+        F(D3DCOMPILE_AVOID_FLOW_CONTROL),
+        F(D3DCOMPILE_PREFER_FLOW_CONTROL),
+        F(D3DCOMPILE_ENABLE_STRICTNESS),
+        F(D3DCOMPILE_ENABLE_BACKWARDS_COMPATIBILITY),
+        F(D3DCOMPILE_IEEE_STRICTNESS),
+        F(D3DCOMPILE_RESERVED16),
+        F(D3DCOMPILE_RESERVED17),
+        F(D3DCOMPILE_WARNINGS_ARE_ERRORS),
+        F(D3DCOMPILE_RESOURCES_MAY_ALIAS),
+        F(D3DCOMPILE_ENABLE_UNBOUNDED_DESCRIPTOR_TABLES),
+        F(D3DCOMPILE_ALL_RESOURCES_BOUND),
+        F(D3DCOMPILE_DEBUG_NAME_FOR_SOURCE),
+        F(D3DCOMPILE_DEBUG_NAME_FOR_BINARY),
+#undef F
+    };
+
+    std::string result;
+    for (const Flag& f : flags) {
+        if ((compileFlags & f.value) != 0) {
+            result += f.name + std::string("\n");
+        }
+    }
+
+    // Optimization level must be handled separately as two bits are used, and the values
+    // don't map neatly to 0-3.
+    constexpr uint32_t d3dCompileFlagsBits = D3DCOMPILE_OPTIMIZATION_LEVEL2;
+    switch (compileFlags & d3dCompileFlagsBits) {
+        case D3DCOMPILE_OPTIMIZATION_LEVEL0:
+            result += "D3DCOMPILE_OPTIMIZATION_LEVEL0";
+            break;
+        case D3DCOMPILE_OPTIMIZATION_LEVEL1:
+            result += "D3DCOMPILE_OPTIMIZATION_LEVEL1";
+            break;
+        case D3DCOMPILE_OPTIMIZATION_LEVEL2:
+            result += "D3DCOMPILE_OPTIMIZATION_LEVEL2";
+            break;
+        case D3DCOMPILE_OPTIMIZATION_LEVEL3:
+            result += "D3DCOMPILE_OPTIMIZATION_LEVEL3";
+            break;
+    }
+    result += std::string("\n");
+
+    return result;
+}
+
+ResultOrError<ComPtr<ID3DBlob>> CompileShaderFXC(const PlatformFunctions* functions,
+                                                 const ShaderCompilationRequest& request,
+                                                 const std::string& hlslSource) {
+    const char* targetProfile = nullptr;
+    switch (request.stage) {
+        case SingleShaderStage::Vertex:
+            targetProfile = "vs_5_1";
+            break;
+        case SingleShaderStage::Fragment:
+            targetProfile = "ps_5_1";
+            break;
+        case SingleShaderStage::Compute:
+            targetProfile = "cs_5_1";
+            break;
+    }
+
+    ComPtr<ID3DBlob> compiledShader;
+    ComPtr<ID3DBlob> errors;
+
+    // Build defines for overridable constants
+    const D3D_SHADER_MACRO* pDefines = nullptr;
+    std::vector<D3D_SHADER_MACRO> fxcDefines;
+    if (request.defineStrings.size() > 0) {
+        fxcDefines.reserve(request.defineStrings.size() + 1);
+        for (const auto& [name, value] : request.defineStrings) {
+            fxcDefines.push_back({name.c_str(), value.c_str()});
+        }
+        // d3dCompile D3D_SHADER_MACRO* pDefines is a nullptr terminated array
+        fxcDefines.push_back({nullptr, nullptr});
+        pDefines = fxcDefines.data();
+    }
+
+    DAWN_INVALID_IF(
+        FAILED(functions->d3dCompile(hlslSource.c_str(), hlslSource.length(), nullptr, pDefines,
+                                     nullptr, request.entryPointName, targetProfile,
+                                     request.compileFlags, 0, &compiledShader, &errors)),
+        "D3D compile failed with: %s", static_cast<char*>(errors->GetBufferPointer()));
+
+    return std::move(compiledShader);
+}
+
+ResultOrError<std::string> TranslateToHLSL(dawn::platform::Platform* platform,
+                                           const ShaderCompilationRequest& request,
+                                           std::string* remappedEntryPointName) {
+    std::ostringstream errorStream;
+    errorStream << "Tint HLSL failure:" << std::endl;
+
+    tint::transform::Manager transformManager;
+    tint::transform::DataMap transformInputs;
+
+    if (request.isRobustnessEnabled) {
+        transformManager.Add<tint::transform::Robustness>();
+    }
+
+    transformManager.Add<tint::transform::BindingRemapper>();
+
+    transformManager.Add<tint::transform::SingleEntryPoint>();
+    transformInputs.Add<tint::transform::SingleEntryPoint::Config>(request.entryPointName);
+
+    transformManager.Add<tint::transform::Renamer>();
+
+    if (request.disableSymbolRenaming) {
+        // We still need to rename HLSL reserved keywords
+        transformInputs.Add<tint::transform::Renamer::Config>(
+            tint::transform::Renamer::Target::kHlslKeywords);
+    }
+
+    // D3D12 registers like `t3` and `c3` have the same bindingOffset number in
+    // the remapping but should not be considered a collision because they have
+    // different types.
+    const bool mayCollide = true;
+    transformInputs.Add<tint::transform::BindingRemapper::Remappings>(
+        std::move(request.remappedBindingPoints), std::move(request.remappedAccessControls),
+        mayCollide);
+
+    tint::Program transformedProgram;
+    tint::transform::DataMap transformOutputs;
+    {
+        TRACE_EVENT0(platform, General, "RunTransforms");
+        DAWN_TRY_ASSIGN(transformedProgram,
+                        RunTransforms(&transformManager, request.program, transformInputs,
+                                      &transformOutputs, nullptr));
+    }
+
+    if (auto* data = transformOutputs.Get<tint::transform::Renamer::Data>()) {
+        auto it = data->remappings.find(request.entryPointName);
+        if (it != data->remappings.end()) {
+            *remappedEntryPointName = it->second;
+        } else {
+            DAWN_INVALID_IF(!request.disableSymbolRenaming,
+                            "Could not find remapped name for entry point.");
+
+            *remappedEntryPointName = request.entryPointName;
+        }
+    } else {
+        return DAWN_FORMAT_VALIDATION_ERROR("Transform output missing renamer data.");
+    }
+
+    tint::writer::hlsl::Options options;
+    options.disable_workgroup_init = request.disableWorkgroupInit;
+    if (request.usesNumWorkgroups) {
+        options.root_constant_binding_point.group = request.numWorkgroupsRegisterSpace;
+        options.root_constant_binding_point.binding = request.numWorkgroupsShaderRegister;
+    }
+    // TODO(dawn:549): HLSL generation outputs the indices into the
+    // array_length_from_uniform buffer that were actually used. When the blob cache can
+    // store more than compiled shaders, we should reflect these used indices and store
+    // them as well. This would allow us to only upload root constants that are actually
+    // read by the shader.
+    options.array_length_from_uniform = request.arrayLengthFromUniform;
+    TRACE_EVENT0(platform, General, "tint::writer::hlsl::Generate");
+    auto result = tint::writer::hlsl::Generate(&transformedProgram, options);
+    DAWN_INVALID_IF(!result.success, "An error occured while generating HLSL: %s", result.error);
+
+    return std::move(result.hlsl);
+}
+
+template <typename F>
+MaybeError CompileShader(dawn::platform::Platform* platform,
+                         const PlatformFunctions* functions,
+                         IDxcLibrary* dxcLibrary,
+                         IDxcCompiler* dxcCompiler,
+                         ShaderCompilationRequest&& request,
+                         bool dumpShaders,
+                         F&& DumpShadersEmitLog,
+                         CompiledShader* compiledShader) {
+    // Compile the source shader to HLSL.
+    std::string hlslSource;
+    std::string remappedEntryPoint;
+    DAWN_TRY_ASSIGN(hlslSource, TranslateToHLSL(platform, request, &remappedEntryPoint));
+    if (dumpShaders) {
+        std::ostringstream dumpedMsg;
+        dumpedMsg << "/* Dumped generated HLSL */" << std::endl << hlslSource;
+        DumpShadersEmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
+    }
+    request.entryPointName = remappedEntryPoint.c_str();
+    switch (request.compiler) {
+        case ShaderCompilationRequest::Compiler::DXC: {
+            TRACE_EVENT0(platform, General, "CompileShaderDXC");
+            DAWN_TRY_ASSIGN(compiledShader->compiledDXCShader,
+                            CompileShaderDXC(dxcLibrary, dxcCompiler, request, hlslSource));
+            break;
+        }
+        case ShaderCompilationRequest::Compiler::FXC: {
+            TRACE_EVENT0(platform, General, "CompileShaderFXC");
+            DAWN_TRY_ASSIGN(compiledShader->compiledFXCShader,
+                            CompileShaderFXC(functions, request, hlslSource));
+            break;
+        }
+    }
+
+    if (dumpShaders && request.compiler == ShaderCompilationRequest::Compiler::FXC) {
+        std::ostringstream dumpedMsg;
+        dumpedMsg << "/* FXC compile flags */ " << std::endl
+                  << CompileFlagsToStringFXC(request.compileFlags) << std::endl;
+
+        dumpedMsg << "/* Dumped disassembled DXBC */" << std::endl;
+
+        ComPtr<ID3DBlob> disassembly;
+        if (FAILED(functions->d3dDisassemble(compiledShader->compiledFXCShader->GetBufferPointer(),
+                                             compiledShader->compiledFXCShader->GetBufferSize(), 0,
+                                             nullptr, &disassembly))) {
+            dumpedMsg << "D3D disassemble failed" << std::endl;
+        } else {
+            dumpedMsg << reinterpret_cast<const char*>(disassembly->GetBufferPointer());
+        }
+        DumpShadersEmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
+    }
+
+    return {};
+}
+
+}  // anonymous namespace
+
+// static
+ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
+                                                      const ShaderModuleDescriptor* descriptor,
+                                                      ShaderModuleParseResult* parseResult) {
+    Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
+    DAWN_TRY(module->Initialize(parseResult));
+    return module;
+}
+
+ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
+    : ShaderModuleBase(device, descriptor) {}
+
+MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
+    ScopedTintICEHandler scopedICEHandler(GetDevice());
+    return InitializeBase(parseResult);
+}
+
+ResultOrError<CompiledShader> ShaderModule::Compile(const ProgrammableStage& programmableStage,
+                                                    SingleShaderStage stage,
+                                                    const PipelineLayout* layout,
+                                                    uint32_t compileFlags) {
+    TRACE_EVENT0(GetDevice()->GetPlatform(), General, "ShaderModuleD3D12::Compile");
+    ASSERT(!IsError());
+
+    ScopedTintICEHandler scopedICEHandler(GetDevice());
+
+    Device* device = ToBackend(GetDevice());
+
+    CompiledShader compiledShader = {};
+
+    tint::transform::Manager transformManager;
+    tint::transform::DataMap transformInputs;
+
+    const tint::Program* program = GetTintProgram();
+    tint::Program programAsValue;
+
+    AddExternalTextureTransform(layout, &transformManager, &transformInputs);
+
+    if (stage == SingleShaderStage::Vertex) {
+        transformManager.Add<tint::transform::FirstIndexOffset>();
+        transformInputs.Add<tint::transform::FirstIndexOffset::BindingPoint>(
+            layout->GetFirstIndexOffsetShaderRegister(),
+            layout->GetFirstIndexOffsetRegisterSpace());
+    }
+
+    tint::transform::DataMap transformOutputs;
+    DAWN_TRY_ASSIGN(programAsValue, RunTransforms(&transformManager, program, transformInputs,
+                                                  &transformOutputs, nullptr));
+    program = &programAsValue;
+
+    if (stage == SingleShaderStage::Vertex) {
+        if (auto* data = transformOutputs.Get<tint::transform::FirstIndexOffset::Data>()) {
+            // TODO(dawn:549): Consider adding this information to the pipeline cache once we
+            // can store more than the shader blob in it.
+            compiledShader.usesVertexOrInstanceIndex = data->has_vertex_or_instance_index;
+        }
+    }
+
+    ShaderCompilationRequest request;
+    DAWN_TRY_ASSIGN(request,
+                    ShaderCompilationRequest::Create(
+                        programmableStage.entryPoint.c_str(), stage, layout, compileFlags, device,
+                        program, GetEntryPoint(programmableStage.entryPoint), programmableStage));
+
+    // TODO(dawn:1341): Add shader cache key generation and caching for the compiled shader.
+    DAWN_TRY(CompileShader(
+        device->GetPlatform(), device->GetFunctions(),
+        device->IsToggleEnabled(Toggle::UseDXC) ? device->GetDxcLibrary().Get() : nullptr,
+        device->IsToggleEnabled(Toggle::UseDXC) ? device->GetDxcCompiler().Get() : nullptr,
+        std::move(request), device->IsToggleEnabled(Toggle::DumpShaders),
+        [&](WGPULoggingType loggingType, const char* message) {
+            GetDevice()->EmitLog(loggingType, message);
+        },
+        &compiledShader));
+    return std::move(compiledShader);
+}
+
+D3D12_SHADER_BYTECODE CompiledShader::GetD3D12ShaderBytecode() const {
+    if (compiledFXCShader != nullptr) {
+        return {compiledFXCShader->GetBufferPointer(), compiledFXCShader->GetBufferSize()};
+    } else if (compiledDXCShader != nullptr) {
+        return {compiledDXCShader->GetBufferPointer(), compiledDXCShader->GetBufferSize()};
+    }
+    UNREACHABLE();
+    return {};
+}
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/ShaderModuleD3D12.h b/src/dawn/native/d3d12/ShaderModuleD3D12.h
index 4a3a30f..a7323ef 100644
--- a/src/dawn/native/d3d12/ShaderModuleD3D12.h
+++ b/src/dawn/native/d3d12/ShaderModuleD3D12.h
@@ -20,40 +20,40 @@
 #include "dawn/native/d3d12/d3d12_platform.h"
 
 namespace dawn::native {
-    struct ProgrammableStage;
+struct ProgrammableStage;
 }  // namespace dawn::native
 
 namespace dawn::native::d3d12 {
 
-    class Device;
-    class PipelineLayout;
+class Device;
+class PipelineLayout;
 
-    // Manages a ref to one of the various representations of shader blobs and information used to
-    // emulate vertex/instance index starts
-    struct CompiledShader {
-        ComPtr<ID3DBlob> compiledFXCShader;
-        ComPtr<IDxcBlob> compiledDXCShader;
-        D3D12_SHADER_BYTECODE GetD3D12ShaderBytecode() const;
+// Manages a ref to one of the various representations of shader blobs and information used to
+// emulate vertex/instance index starts
+struct CompiledShader {
+    ComPtr<ID3DBlob> compiledFXCShader;
+    ComPtr<IDxcBlob> compiledDXCShader;
+    D3D12_SHADER_BYTECODE GetD3D12ShaderBytecode() const;
 
-        bool usesVertexOrInstanceIndex;
-    };
+    bool usesVertexOrInstanceIndex;
+};
 
-    class ShaderModule final : public ShaderModuleBase {
-      public:
-        static ResultOrError<Ref<ShaderModule>> Create(Device* device,
-                                                       const ShaderModuleDescriptor* descriptor,
-                                                       ShaderModuleParseResult* parseResult);
+class ShaderModule final : public ShaderModuleBase {
+  public:
+    static ResultOrError<Ref<ShaderModule>> Create(Device* device,
+                                                   const ShaderModuleDescriptor* descriptor,
+                                                   ShaderModuleParseResult* parseResult);
 
-        ResultOrError<CompiledShader> Compile(const ProgrammableStage& programmableStage,
-                                              SingleShaderStage stage,
-                                              const PipelineLayout* layout,
-                                              uint32_t compileFlags);
+    ResultOrError<CompiledShader> Compile(const ProgrammableStage& programmableStage,
+                                          SingleShaderStage stage,
+                                          const PipelineLayout* layout,
+                                          uint32_t compileFlags);
 
-      private:
-        ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
-        ~ShaderModule() override = default;
-        MaybeError Initialize(ShaderModuleParseResult* parseResult);
-    };
+  private:
+    ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
+    ~ShaderModule() override = default;
+    MaybeError Initialize(ShaderModuleParseResult* parseResult);
+};
 
 }  // namespace dawn::native::d3d12
 
diff --git a/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp b/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp
index 5eb1cf1..fe99a63 100644
--- a/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp
+++ b/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.cpp
@@ -25,235 +25,231 @@
 
 namespace dawn::native::d3d12 {
 
-    // Limits the min/max heap size to always be some known value for testing.
-    // Thresholds should be adjusted (lower == faster) to avoid tests taking too long to complete.
-    // We change the value from {1024, 512} to {32, 16} because we use blending
-    // for D3D12DescriptorHeapTests.EncodeManyUBO and R16Float has limited range
-    // and low precision at big integer.
-    static constexpr const uint32_t kShaderVisibleSmallHeapSizes[] = {32, 16};
+// Limits the min/max heap size to always be some known value for testing.
+// Thresholds should be adjusted (lower == faster) to avoid tests taking too long to complete.
+// We change the value from {1024, 512} to {32, 16} because we use blending
+// for D3D12DescriptorHeapTests.EncodeManyUBO and R16Float has limited range
+// and low precision at big integer.
+static constexpr const uint32_t kShaderVisibleSmallHeapSizes[] = {32, 16};
 
-    uint32_t GetD3D12ShaderVisibleHeapMinSize(D3D12_DESCRIPTOR_HEAP_TYPE heapType,
-                                              bool useSmallSize) {
-        if (useSmallSize) {
-            return kShaderVisibleSmallHeapSizes[heapType];
-        }
-
-        // Minimum heap size must be large enough to satisfy the largest descriptor allocation
-        // request and to amortize the cost of sub-allocation. But small enough to avoid wasting
-        // memory should only a tiny fraction ever be used.
-        // TODO(dawn:155): Figure out these values.
-        switch (heapType) {
-            case D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV:
-                return 4096;
-            case D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER:
-                return 256;
-            default:
-                UNREACHABLE();
-        }
+uint32_t GetD3D12ShaderVisibleHeapMinSize(D3D12_DESCRIPTOR_HEAP_TYPE heapType, bool useSmallSize) {
+    if (useSmallSize) {
+        return kShaderVisibleSmallHeapSizes[heapType];
     }
 
-    uint32_t GetD3D12ShaderVisibleHeapMaxSize(D3D12_DESCRIPTOR_HEAP_TYPE heapType,
-                                              bool useSmallSize) {
-        if (useSmallSize) {
-            return kShaderVisibleSmallHeapSizes[heapType];
-        }
+    // Minimum heap size must be large enough to satisfy the largest descriptor allocation
+    // request and to amortize the cost of sub-allocation. But small enough to avoid wasting
+    // memory should only a tiny fraction ever be used.
+    // TODO(dawn:155): Figure out these values.
+    switch (heapType) {
+        case D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV:
+            return 4096;
+        case D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER:
+            return 256;
+        default:
+            UNREACHABLE();
+    }
+}
 
-        switch (heapType) {
-            case D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV:
-                return D3D12_MAX_SHADER_VISIBLE_DESCRIPTOR_HEAP_SIZE_TIER_1;
-            case D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER:
-                return D3D12_MAX_SHADER_VISIBLE_SAMPLER_HEAP_SIZE;
-            default:
-                UNREACHABLE();
-        }
+uint32_t GetD3D12ShaderVisibleHeapMaxSize(D3D12_DESCRIPTOR_HEAP_TYPE heapType, bool useSmallSize) {
+    if (useSmallSize) {
+        return kShaderVisibleSmallHeapSizes[heapType];
     }
 
-    D3D12_DESCRIPTOR_HEAP_FLAGS GetD3D12HeapFlags(D3D12_DESCRIPTOR_HEAP_TYPE heapType) {
-        switch (heapType) {
-            case D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV:
-            case D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER:
-                return D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE;
-            default:
-                UNREACHABLE();
-        }
+    switch (heapType) {
+        case D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV:
+            return D3D12_MAX_SHADER_VISIBLE_DESCRIPTOR_HEAP_SIZE_TIER_1;
+        case D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER:
+            return D3D12_MAX_SHADER_VISIBLE_SAMPLER_HEAP_SIZE;
+        default:
+            UNREACHABLE();
+    }
+}
+
+D3D12_DESCRIPTOR_HEAP_FLAGS GetD3D12HeapFlags(D3D12_DESCRIPTOR_HEAP_TYPE heapType) {
+    switch (heapType) {
+        case D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV:
+        case D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER:
+            return D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE;
+        default:
+            UNREACHABLE();
+    }
+}
+
+// static
+ResultOrError<std::unique_ptr<ShaderVisibleDescriptorAllocator>>
+ShaderVisibleDescriptorAllocator::Create(Device* device, D3D12_DESCRIPTOR_HEAP_TYPE heapType) {
+    std::unique_ptr<ShaderVisibleDescriptorAllocator> allocator =
+        std::make_unique<ShaderVisibleDescriptorAllocator>(device, heapType);
+    DAWN_TRY(allocator->AllocateAndSwitchShaderVisibleHeap());
+    return std::move(allocator);
+}
+
+ShaderVisibleDescriptorAllocator::ShaderVisibleDescriptorAllocator(
+    Device* device,
+    D3D12_DESCRIPTOR_HEAP_TYPE heapType)
+    : mHeapType(heapType),
+      mDevice(device),
+      mSizeIncrement(device->GetD3D12Device()->GetDescriptorHandleIncrementSize(heapType)),
+      mDescriptorCount(GetD3D12ShaderVisibleHeapMinSize(
+          heapType,
+          mDevice->IsToggleEnabled(Toggle::UseD3D12SmallShaderVisibleHeapForTesting))) {
+    ASSERT(heapType == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV ||
+           heapType == D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
+}
+
+bool ShaderVisibleDescriptorAllocator::AllocateGPUDescriptors(
+    uint32_t descriptorCount,
+    ExecutionSerial pendingSerial,
+    D3D12_CPU_DESCRIPTOR_HANDLE* baseCPUDescriptor,
+    GPUDescriptorHeapAllocation* allocation) {
+    ASSERT(mHeap != nullptr);
+    const uint64_t startOffset = mAllocator.Allocate(descriptorCount, pendingSerial);
+    if (startOffset == RingBufferAllocator::kInvalidOffset) {
+        return false;
     }
 
-    // static
-    ResultOrError<std::unique_ptr<ShaderVisibleDescriptorAllocator>>
-    ShaderVisibleDescriptorAllocator::Create(Device* device, D3D12_DESCRIPTOR_HEAP_TYPE heapType) {
-        std::unique_ptr<ShaderVisibleDescriptorAllocator> allocator =
-            std::make_unique<ShaderVisibleDescriptorAllocator>(device, heapType);
-        DAWN_TRY(allocator->AllocateAndSwitchShaderVisibleHeap());
-        return std::move(allocator);
-    }
+    ID3D12DescriptorHeap* descriptorHeap = mHeap->GetD3D12DescriptorHeap();
 
-    ShaderVisibleDescriptorAllocator::ShaderVisibleDescriptorAllocator(
-        Device* device,
-        D3D12_DESCRIPTOR_HEAP_TYPE heapType)
-        : mHeapType(heapType),
-          mDevice(device),
-          mSizeIncrement(device->GetD3D12Device()->GetDescriptorHandleIncrementSize(heapType)),
-          mDescriptorCount(GetD3D12ShaderVisibleHeapMinSize(
-              heapType,
-              mDevice->IsToggleEnabled(Toggle::UseD3D12SmallShaderVisibleHeapForTesting))) {
-        ASSERT(heapType == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV ||
-               heapType == D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
-    }
+    const uint64_t heapOffset = mSizeIncrement * startOffset;
 
-    bool ShaderVisibleDescriptorAllocator::AllocateGPUDescriptors(
-        uint32_t descriptorCount,
-        ExecutionSerial pendingSerial,
-        D3D12_CPU_DESCRIPTOR_HANDLE* baseCPUDescriptor,
-        GPUDescriptorHeapAllocation* allocation) {
-        ASSERT(mHeap != nullptr);
-        const uint64_t startOffset = mAllocator.Allocate(descriptorCount, pendingSerial);
-        if (startOffset == RingBufferAllocator::kInvalidOffset) {
-            return false;
-        }
+    // Check for 32-bit overflow since CPU heap start handle uses size_t.
+    const size_t cpuHeapStartPtr = descriptorHeap->GetCPUDescriptorHandleForHeapStart().ptr;
 
-        ID3D12DescriptorHeap* descriptorHeap = mHeap->GetD3D12DescriptorHeap();
+    ASSERT(heapOffset <= std::numeric_limits<size_t>::max() - cpuHeapStartPtr);
 
-        const uint64_t heapOffset = mSizeIncrement * startOffset;
+    *baseCPUDescriptor = {cpuHeapStartPtr + static_cast<size_t>(heapOffset)};
 
-        // Check for 32-bit overflow since CPU heap start handle uses size_t.
-        const size_t cpuHeapStartPtr = descriptorHeap->GetCPUDescriptorHandleForHeapStart().ptr;
+    const D3D12_GPU_DESCRIPTOR_HANDLE baseGPUDescriptor = {
+        descriptorHeap->GetGPUDescriptorHandleForHeapStart().ptr + heapOffset};
 
-        ASSERT(heapOffset <= std::numeric_limits<size_t>::max() - cpuHeapStartPtr);
+    // Record both the device and heap serials to determine later if the allocations are
+    // still valid.
+    *allocation = GPUDescriptorHeapAllocation{baseGPUDescriptor, pendingSerial, mHeapSerial};
 
-        *baseCPUDescriptor = {cpuHeapStartPtr + static_cast<size_t>(heapOffset)};
+    return true;
+}
 
-        const D3D12_GPU_DESCRIPTOR_HANDLE baseGPUDescriptor = {
-            descriptorHeap->GetGPUDescriptorHandleForHeapStart().ptr + heapOffset};
+ID3D12DescriptorHeap* ShaderVisibleDescriptorAllocator::GetShaderVisibleHeap() const {
+    return mHeap->GetD3D12DescriptorHeap();
+}
 
-        // Record both the device and heap serials to determine later if the allocations are
-        // still valid.
-        *allocation = GPUDescriptorHeapAllocation{baseGPUDescriptor, pendingSerial, mHeapSerial};
+void ShaderVisibleDescriptorAllocator::Tick(ExecutionSerial completedSerial) {
+    mAllocator.Deallocate(completedSerial);
+}
 
-        return true;
-    }
+ResultOrError<std::unique_ptr<ShaderVisibleDescriptorHeap>>
+ShaderVisibleDescriptorAllocator::AllocateHeap(uint32_t descriptorCount) const {
+    // The size in bytes of a descriptor heap is best calculated by the increment size
+    // multiplied by the number of descriptors. In practice, this is only an estimate and
+    // the actual size may vary depending on the driver.
+    const uint64_t kSize = mSizeIncrement * descriptorCount;
 
-    ID3D12DescriptorHeap* ShaderVisibleDescriptorAllocator::GetShaderVisibleHeap() const {
-        return mHeap->GetD3D12DescriptorHeap();
-    }
+    DAWN_TRY(mDevice->GetResidencyManager()->EnsureCanAllocate(kSize, MemorySegment::Local));
 
-    void ShaderVisibleDescriptorAllocator::Tick(ExecutionSerial completedSerial) {
-        mAllocator.Deallocate(completedSerial);
-    }
+    ComPtr<ID3D12DescriptorHeap> d3d12DescriptorHeap;
+    D3D12_DESCRIPTOR_HEAP_DESC heapDescriptor;
+    heapDescriptor.Type = mHeapType;
+    heapDescriptor.NumDescriptors = descriptorCount;
+    heapDescriptor.Flags = GetD3D12HeapFlags(mHeapType);
+    heapDescriptor.NodeMask = 0;
+    DAWN_TRY(CheckOutOfMemoryHRESULT(mDevice->GetD3D12Device()->CreateDescriptorHeap(
+                                         &heapDescriptor, IID_PPV_ARGS(&d3d12DescriptorHeap)),
+                                     "ID3D12Device::CreateDescriptorHeap"));
 
-    ResultOrError<std::unique_ptr<ShaderVisibleDescriptorHeap>>
-    ShaderVisibleDescriptorAllocator::AllocateHeap(uint32_t descriptorCount) const {
-        // The size in bytes of a descriptor heap is best calculated by the increment size
-        // multiplied by the number of descriptors. In practice, this is only an estimate and
-        // the actual size may vary depending on the driver.
-        const uint64_t kSize = mSizeIncrement * descriptorCount;
+    std::unique_ptr<ShaderVisibleDescriptorHeap> descriptorHeap =
+        std::make_unique<ShaderVisibleDescriptorHeap>(std::move(d3d12DescriptorHeap), kSize);
 
-        DAWN_TRY(mDevice->GetResidencyManager()->EnsureCanAllocate(kSize, MemorySegment::Local));
+    // We must track the allocation in the LRU when it is created, otherwise the residency
+    // manager will see the allocation as non-resident in the later call to LockAllocation.
+    mDevice->GetResidencyManager()->TrackResidentAllocation(descriptorHeap.get());
 
-        ComPtr<ID3D12DescriptorHeap> d3d12DescriptorHeap;
-        D3D12_DESCRIPTOR_HEAP_DESC heapDescriptor;
-        heapDescriptor.Type = mHeapType;
-        heapDescriptor.NumDescriptors = descriptorCount;
-        heapDescriptor.Flags = GetD3D12HeapFlags(mHeapType);
-        heapDescriptor.NodeMask = 0;
-        DAWN_TRY(CheckOutOfMemoryHRESULT(mDevice->GetD3D12Device()->CreateDescriptorHeap(
-                                             &heapDescriptor, IID_PPV_ARGS(&d3d12DescriptorHeap)),
-                                         "ID3D12Device::CreateDescriptorHeap"));
+    return std::move(descriptorHeap);
+}
 
-        std::unique_ptr<ShaderVisibleDescriptorHeap> descriptorHeap =
-            std::make_unique<ShaderVisibleDescriptorHeap>(std::move(d3d12DescriptorHeap), kSize);
+// Creates a GPU descriptor heap that manages descriptors in a FIFO queue.
+MaybeError ShaderVisibleDescriptorAllocator::AllocateAndSwitchShaderVisibleHeap() {
+    std::unique_ptr<ShaderVisibleDescriptorHeap> descriptorHeap;
+    // Dynamically allocate using a two-phase allocation strategy.
+    // The first phase increasingly grows a small heap in binary sizes for light users while the
+    // second phase pool-allocates largest sized heaps for heavy users.
+    if (mHeap != nullptr) {
+        mDevice->GetResidencyManager()->UnlockAllocation(mHeap.get());
 
-        // We must track the allocation in the LRU when it is created, otherwise the residency
-        // manager will see the allocation as non-resident in the later call to LockAllocation.
-        mDevice->GetResidencyManager()->TrackResidentAllocation(descriptorHeap.get());
-
-        return std::move(descriptorHeap);
-    }
-
-    // Creates a GPU descriptor heap that manages descriptors in a FIFO queue.
-    MaybeError ShaderVisibleDescriptorAllocator::AllocateAndSwitchShaderVisibleHeap() {
-        std::unique_ptr<ShaderVisibleDescriptorHeap> descriptorHeap;
-        // Dynamically allocate using a two-phase allocation strategy.
-        // The first phase increasingly grows a small heap in binary sizes for light users while the
-        // second phase pool-allocates largest sized heaps for heavy users.
-        if (mHeap != nullptr) {
-            mDevice->GetResidencyManager()->UnlockAllocation(mHeap.get());
-
-            const uint32_t maxDescriptorCount = GetD3D12ShaderVisibleHeapMaxSize(
-                mHeapType,
-                mDevice->IsToggleEnabled(Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
-            if (mDescriptorCount < maxDescriptorCount) {
-                // Phase #1. Grow the heaps in powers-of-two.
-                mDevice->ReferenceUntilUnused(mHeap->GetD3D12DescriptorHeap());
-                mDescriptorCount = std::min(mDescriptorCount * 2, maxDescriptorCount);
-            } else {
-                // Phase #2. Pool-allocate heaps.
-                // Return the switched out heap to the pool and retrieve the oldest heap that is no
-                // longer used by GPU. This maintains a heap buffer to avoid frequently re-creating
-                // heaps for heavy users.
-                // TODO(dawn:256): Consider periodically triming to avoid OOM.
-                mPool.push_back({mDevice->GetPendingCommandSerial(), std::move(mHeap)});
-                if (mPool.front().heapSerial <= mDevice->GetCompletedCommandSerial()) {
-                    descriptorHeap = std::move(mPool.front().heap);
-                    mPool.pop_front();
-                }
+        const uint32_t maxDescriptorCount = GetD3D12ShaderVisibleHeapMaxSize(
+            mHeapType, mDevice->IsToggleEnabled(Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+        if (mDescriptorCount < maxDescriptorCount) {
+            // Phase #1. Grow the heaps in powers-of-two.
+            mDevice->ReferenceUntilUnused(mHeap->GetD3D12DescriptorHeap());
+            mDescriptorCount = std::min(mDescriptorCount * 2, maxDescriptorCount);
+        } else {
+            // Phase #2. Pool-allocate heaps.
+            // Return the switched out heap to the pool and retrieve the oldest heap that is no
+            // longer used by GPU. This maintains a heap buffer to avoid frequently re-creating
+            // heaps for heavy users.
+            // TODO(dawn:256): Consider periodically triming to avoid OOM.
+            mPool.push_back({mDevice->GetPendingCommandSerial(), std::move(mHeap)});
+            if (mPool.front().heapSerial <= mDevice->GetCompletedCommandSerial()) {
+                descriptorHeap = std::move(mPool.front().heap);
+                mPool.pop_front();
             }
         }
-
-        if (descriptorHeap == nullptr) {
-            DAWN_TRY_ASSIGN(descriptorHeap, AllocateHeap(mDescriptorCount));
-        }
-
-        DAWN_TRY(mDevice->GetResidencyManager()->LockAllocation(descriptorHeap.get()));
-
-        // Create a FIFO buffer from the recently created heap.
-        mHeap = std::move(descriptorHeap);
-        mAllocator = RingBufferAllocator(mDescriptorCount);
-
-        // Invalidate all bindgroup allocations on previously bound heaps by incrementing the heap
-        // serial. When a bindgroup attempts to re-populate, it will compare with its recorded
-        // heap serial.
-        mHeapSerial++;
-
-        return {};
     }
 
-    HeapVersionID ShaderVisibleDescriptorAllocator::GetShaderVisibleHeapSerialForTesting() const {
-        return mHeapSerial;
+    if (descriptorHeap == nullptr) {
+        DAWN_TRY_ASSIGN(descriptorHeap, AllocateHeap(mDescriptorCount));
     }
 
-    uint64_t ShaderVisibleDescriptorAllocator::GetShaderVisibleHeapSizeForTesting() const {
-        return mAllocator.GetSize();
-    }
+    DAWN_TRY(mDevice->GetResidencyManager()->LockAllocation(descriptorHeap.get()));
 
-    uint64_t ShaderVisibleDescriptorAllocator::GetShaderVisiblePoolSizeForTesting() const {
-        return mPool.size();
-    }
+    // Create a FIFO buffer from the recently created heap.
+    mHeap = std::move(descriptorHeap);
+    mAllocator = RingBufferAllocator(mDescriptorCount);
 
-    bool ShaderVisibleDescriptorAllocator::IsShaderVisibleHeapLockedResidentForTesting() const {
-        return mHeap->IsResidencyLocked();
-    }
+    // Invalidate all bindgroup allocations on previously bound heaps by incrementing the heap
+    // serial. When a bindgroup attempts to re-populate, it will compare with its recorded
+    // heap serial.
+    mHeapSerial++;
 
-    bool ShaderVisibleDescriptorAllocator::IsLastShaderVisibleHeapInLRUForTesting() const {
-        ASSERT(!mPool.empty());
-        return mPool.back().heap->IsInResidencyLRUCache();
-    }
+    return {};
+}
 
-    bool ShaderVisibleDescriptorAllocator::IsAllocationStillValid(
-        const GPUDescriptorHeapAllocation& allocation) const {
-        // Consider valid if allocated for the pending submit and the shader visible heaps
-        // have not switched over.
-        return (allocation.GetLastUsageSerial() > mDevice->GetCompletedCommandSerial() &&
-                allocation.GetHeapSerial() == mHeapSerial);
-    }
+HeapVersionID ShaderVisibleDescriptorAllocator::GetShaderVisibleHeapSerialForTesting() const {
+    return mHeapSerial;
+}
 
-    ShaderVisibleDescriptorHeap::ShaderVisibleDescriptorHeap(
-        ComPtr<ID3D12DescriptorHeap> d3d12DescriptorHeap,
-        uint64_t size)
-        : Pageable(d3d12DescriptorHeap, MemorySegment::Local, size),
-          mD3d12DescriptorHeap(std::move(d3d12DescriptorHeap)) {
-    }
+uint64_t ShaderVisibleDescriptorAllocator::GetShaderVisibleHeapSizeForTesting() const {
+    return mAllocator.GetSize();
+}
 
-    ID3D12DescriptorHeap* ShaderVisibleDescriptorHeap::GetD3D12DescriptorHeap() const {
-        return mD3d12DescriptorHeap.Get();
-    }
+uint64_t ShaderVisibleDescriptorAllocator::GetShaderVisiblePoolSizeForTesting() const {
+    return mPool.size();
+}
+
+bool ShaderVisibleDescriptorAllocator::IsShaderVisibleHeapLockedResidentForTesting() const {
+    return mHeap->IsResidencyLocked();
+}
+
+bool ShaderVisibleDescriptorAllocator::IsLastShaderVisibleHeapInLRUForTesting() const {
+    ASSERT(!mPool.empty());
+    return mPool.back().heap->IsInResidencyLRUCache();
+}
+
+bool ShaderVisibleDescriptorAllocator::IsAllocationStillValid(
+    const GPUDescriptorHeapAllocation& allocation) const {
+    // Consider valid if allocated for the pending submit and the shader visible heaps
+    // have not switched over.
+    return (allocation.GetLastUsageSerial() > mDevice->GetCompletedCommandSerial() &&
+            allocation.GetHeapSerial() == mHeapSerial);
+}
+
+ShaderVisibleDescriptorHeap::ShaderVisibleDescriptorHeap(
+    ComPtr<ID3D12DescriptorHeap> d3d12DescriptorHeap,
+    uint64_t size)
+    : Pageable(d3d12DescriptorHeap, MemorySegment::Local, size),
+      mD3d12DescriptorHeap(std::move(d3d12DescriptorHeap)) {}
+
+ID3D12DescriptorHeap* ShaderVisibleDescriptorHeap::GetD3D12DescriptorHeap() const {
+    return mD3d12DescriptorHeap.Get();
+}
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h b/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h
index 96cda8a..cf09f9d 100644
--- a/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h
+++ b/src/dawn/native/d3d12/ShaderVisibleDescriptorAllocatorD3D12.h
@@ -32,75 +32,74 @@
 // is returned to the pool.
 namespace dawn::native::d3d12 {
 
-    class Device;
-    class GPUDescriptorHeapAllocation;
+class Device;
+class GPUDescriptorHeapAllocation;
 
-    class ShaderVisibleDescriptorHeap : public Pageable {
-      public:
-        ShaderVisibleDescriptorHeap(ComPtr<ID3D12DescriptorHeap> d3d12DescriptorHeap,
-                                    uint64_t size);
-        ID3D12DescriptorHeap* GetD3D12DescriptorHeap() const;
+class ShaderVisibleDescriptorHeap : public Pageable {
+  public:
+    ShaderVisibleDescriptorHeap(ComPtr<ID3D12DescriptorHeap> d3d12DescriptorHeap, uint64_t size);
+    ID3D12DescriptorHeap* GetD3D12DescriptorHeap() const;
 
-      private:
-        ComPtr<ID3D12DescriptorHeap> mD3d12DescriptorHeap;
+  private:
+    ComPtr<ID3D12DescriptorHeap> mD3d12DescriptorHeap;
+};
+
+class ShaderVisibleDescriptorAllocator {
+  public:
+    static ResultOrError<std::unique_ptr<ShaderVisibleDescriptorAllocator>> Create(
+        Device* device,
+        D3D12_DESCRIPTOR_HEAP_TYPE heapType);
+
+    ShaderVisibleDescriptorAllocator(Device* device, D3D12_DESCRIPTOR_HEAP_TYPE heapType);
+
+    // Returns true if the allocation was successful, when false is returned the current heap is
+    // full and AllocateAndSwitchShaderVisibleHeap() must be called.
+    bool AllocateGPUDescriptors(uint32_t descriptorCount,
+                                ExecutionSerial pendingSerial,
+                                D3D12_CPU_DESCRIPTOR_HANDLE* baseCPUDescriptor,
+                                GPUDescriptorHeapAllocation* allocation);
+
+    void Tick(ExecutionSerial completedSerial);
+
+    ID3D12DescriptorHeap* GetShaderVisibleHeap() const;
+    MaybeError AllocateAndSwitchShaderVisibleHeap();
+
+    // For testing purposes only.
+    HeapVersionID GetShaderVisibleHeapSerialForTesting() const;
+    uint64_t GetShaderVisibleHeapSizeForTesting() const;
+    uint64_t GetShaderVisiblePoolSizeForTesting() const;
+    bool IsShaderVisibleHeapLockedResidentForTesting() const;
+    bool IsLastShaderVisibleHeapInLRUForTesting() const;
+
+    bool IsAllocationStillValid(const GPUDescriptorHeapAllocation& allocation) const;
+
+  private:
+    struct SerialDescriptorHeap {
+        ExecutionSerial heapSerial;
+        std::unique_ptr<ShaderVisibleDescriptorHeap> heap;
     };
 
-    class ShaderVisibleDescriptorAllocator {
-      public:
-        static ResultOrError<std::unique_ptr<ShaderVisibleDescriptorAllocator>> Create(
-            Device* device,
-            D3D12_DESCRIPTOR_HEAP_TYPE heapType);
+    ResultOrError<std::unique_ptr<ShaderVisibleDescriptorHeap>> AllocateHeap(
+        uint32_t descriptorCount) const;
 
-        ShaderVisibleDescriptorAllocator(Device* device, D3D12_DESCRIPTOR_HEAP_TYPE heapType);
+    std::unique_ptr<ShaderVisibleDescriptorHeap> mHeap;
+    RingBufferAllocator mAllocator;
+    std::list<SerialDescriptorHeap> mPool;
+    D3D12_DESCRIPTOR_HEAP_TYPE mHeapType;
 
-        // Returns true if the allocation was successful, when false is returned the current heap is
-        // full and AllocateAndSwitchShaderVisibleHeap() must be called.
-        bool AllocateGPUDescriptors(uint32_t descriptorCount,
-                                    ExecutionSerial pendingSerial,
-                                    D3D12_CPU_DESCRIPTOR_HANDLE* baseCPUDescriptor,
-                                    GPUDescriptorHeapAllocation* allocation);
+    Device* mDevice;
 
-        void Tick(ExecutionSerial completedSerial);
+    // The serial value of 0 means the shader-visible heaps have not been allocated.
+    // This value is never returned in the GPUDescriptorHeapAllocation after
+    // AllocateGPUDescriptors() is called.
+    HeapVersionID mHeapSerial = HeapVersionID(0);
 
-        ID3D12DescriptorHeap* GetShaderVisibleHeap() const;
-        MaybeError AllocateAndSwitchShaderVisibleHeap();
+    uint32_t mSizeIncrement;
 
-        // For testing purposes only.
-        HeapVersionID GetShaderVisibleHeapSerialForTesting() const;
-        uint64_t GetShaderVisibleHeapSizeForTesting() const;
-        uint64_t GetShaderVisiblePoolSizeForTesting() const;
-        bool IsShaderVisibleHeapLockedResidentForTesting() const;
-        bool IsLastShaderVisibleHeapInLRUForTesting() const;
-
-        bool IsAllocationStillValid(const GPUDescriptorHeapAllocation& allocation) const;
-
-      private:
-        struct SerialDescriptorHeap {
-            ExecutionSerial heapSerial;
-            std::unique_ptr<ShaderVisibleDescriptorHeap> heap;
-        };
-
-        ResultOrError<std::unique_ptr<ShaderVisibleDescriptorHeap>> AllocateHeap(
-            uint32_t descriptorCount) const;
-
-        std::unique_ptr<ShaderVisibleDescriptorHeap> mHeap;
-        RingBufferAllocator mAllocator;
-        std::list<SerialDescriptorHeap> mPool;
-        D3D12_DESCRIPTOR_HEAP_TYPE mHeapType;
-
-        Device* mDevice;
-
-        // The serial value of 0 means the shader-visible heaps have not been allocated.
-        // This value is never returned in the GPUDescriptorHeapAllocation after
-        // AllocateGPUDescriptors() is called.
-        HeapVersionID mHeapSerial = HeapVersionID(0);
-
-        uint32_t mSizeIncrement;
-
-        // The descriptor count is the current size of the heap in number of descriptors.
-        // This is stored on the allocator to avoid extra conversions.
-        uint32_t mDescriptorCount = 0;
-    };
+    // The descriptor count is the current size of the heap in number of descriptors.
+    // This is stored on the allocator to avoid extra conversions.
+    uint32_t mDescriptorCount = 0;
+};
 }  // namespace dawn::native::d3d12
 
 #endif  // SRC_DAWN_NATIVE_D3D12_SHADERVISIBLEDESCRIPTORALLOCATORD3D12_H_
diff --git a/src/dawn/native/d3d12/StagingBufferD3D12.cpp b/src/dawn/native/d3d12/StagingBufferD3D12.cpp
index e608a14..edaa2cf 100644
--- a/src/dawn/native/d3d12/StagingBufferD3D12.cpp
+++ b/src/dawn/native/d3d12/StagingBufferD3D12.cpp
@@ -21,57 +21,55 @@
 
 namespace dawn::native::d3d12 {
 
-    StagingBuffer::StagingBuffer(size_t size, Device* device)
-        : StagingBufferBase(size), mDevice(device) {
+StagingBuffer::StagingBuffer(size_t size, Device* device)
+    : StagingBufferBase(size), mDevice(device) {}
+
+MaybeError StagingBuffer::Initialize() {
+    D3D12_RESOURCE_DESC resourceDescriptor;
+    resourceDescriptor.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER;
+    resourceDescriptor.Alignment = 0;
+    resourceDescriptor.Width = GetSize();
+    resourceDescriptor.Height = 1;
+    resourceDescriptor.DepthOrArraySize = 1;
+    resourceDescriptor.MipLevels = 1;
+    resourceDescriptor.Format = DXGI_FORMAT_UNKNOWN;
+    resourceDescriptor.SampleDesc.Count = 1;
+    resourceDescriptor.SampleDesc.Quality = 0;
+    resourceDescriptor.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR;
+    resourceDescriptor.Flags = D3D12_RESOURCE_FLAG_NONE;
+
+    DAWN_TRY_ASSIGN(mUploadHeap, mDevice->AllocateMemory(D3D12_HEAP_TYPE_UPLOAD, resourceDescriptor,
+                                                         D3D12_RESOURCE_STATE_GENERIC_READ));
+
+    // The mapped buffer can be accessed at any time, so it must be locked to ensure it is never
+    // evicted. This buffer should already have been made resident when it was created.
+    DAWN_TRY(
+        mDevice->GetResidencyManager()->LockAllocation(ToBackend(mUploadHeap.GetResourceHeap())));
+
+    SetDebugName(mDevice, GetResource(), "Dawn_StagingBuffer");
+
+    return CheckHRESULT(GetResource()->Map(0, nullptr, &mMappedPointer), "ID3D12Resource::Map");
+}
+
+StagingBuffer::~StagingBuffer() {
+    // Always check if the allocation is valid before Unmap.
+    // The resource would not exist had it failed to allocate.
+    if (mUploadHeap.GetInfo().mMethod == AllocationMethod::kInvalid) {
+        return;
     }
 
-    MaybeError StagingBuffer::Initialize() {
-        D3D12_RESOURCE_DESC resourceDescriptor;
-        resourceDescriptor.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER;
-        resourceDescriptor.Alignment = 0;
-        resourceDescriptor.Width = GetSize();
-        resourceDescriptor.Height = 1;
-        resourceDescriptor.DepthOrArraySize = 1;
-        resourceDescriptor.MipLevels = 1;
-        resourceDescriptor.Format = DXGI_FORMAT_UNKNOWN;
-        resourceDescriptor.SampleDesc.Count = 1;
-        resourceDescriptor.SampleDesc.Quality = 0;
-        resourceDescriptor.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR;
-        resourceDescriptor.Flags = D3D12_RESOURCE_FLAG_NONE;
+    // The underlying heap was locked in residency upon creation. We must unlock it when this
+    // buffer becomes unmapped.
+    mDevice->GetResidencyManager()->UnlockAllocation(ToBackend(mUploadHeap.GetResourceHeap()));
 
-        DAWN_TRY_ASSIGN(mUploadHeap,
-                        mDevice->AllocateMemory(D3D12_HEAP_TYPE_UPLOAD, resourceDescriptor,
-                                                D3D12_RESOURCE_STATE_GENERIC_READ));
+    // Invalidate the CPU virtual address & flush cache (if needed).
+    GetResource()->Unmap(0, nullptr);
+    mMappedPointer = nullptr;
 
-        // The mapped buffer can be accessed at any time, so it must be locked to ensure it is never
-        // evicted. This buffer should already have been made resident when it was created.
-        DAWN_TRY(mDevice->GetResidencyManager()->LockAllocation(
-            ToBackend(mUploadHeap.GetResourceHeap())));
+    mDevice->DeallocateMemory(mUploadHeap);
+}
 
-        SetDebugName(mDevice, GetResource(), "Dawn_StagingBuffer");
-
-        return CheckHRESULT(GetResource()->Map(0, nullptr, &mMappedPointer), "ID3D12Resource::Map");
-    }
-
-    StagingBuffer::~StagingBuffer() {
-        // Always check if the allocation is valid before Unmap.
-        // The resource would not exist had it failed to allocate.
-        if (mUploadHeap.GetInfo().mMethod == AllocationMethod::kInvalid) {
-            return;
-        }
-
-        // The underlying heap was locked in residency upon creation. We must unlock it when this
-        // buffer becomes unmapped.
-        mDevice->GetResidencyManager()->UnlockAllocation(ToBackend(mUploadHeap.GetResourceHeap()));
-
-        // Invalidate the CPU virtual address & flush cache (if needed).
-        GetResource()->Unmap(0, nullptr);
-        mMappedPointer = nullptr;
-
-        mDevice->DeallocateMemory(mUploadHeap);
-    }
-
-    ID3D12Resource* StagingBuffer::GetResource() const {
-        return mUploadHeap.GetD3D12Resource();
-    }
+ID3D12Resource* StagingBuffer::GetResource() const {
+    return mUploadHeap.GetD3D12Resource();
+}
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/StagingBufferD3D12.h b/src/dawn/native/d3d12/StagingBufferD3D12.h
index 6e67a1a..dcbe7df 100644
--- a/src/dawn/native/d3d12/StagingBufferD3D12.h
+++ b/src/dawn/native/d3d12/StagingBufferD3D12.h
@@ -21,21 +21,21 @@
 
 namespace dawn::native::d3d12 {
 
-    class Device;
+class Device;
 
-    class StagingBuffer : public StagingBufferBase {
-      public:
-        StagingBuffer(size_t size, Device* device);
-        ~StagingBuffer() override;
+class StagingBuffer : public StagingBufferBase {
+  public:
+    StagingBuffer(size_t size, Device* device);
+    ~StagingBuffer() override;
 
-        ID3D12Resource* GetResource() const;
+    ID3D12Resource* GetResource() const;
 
-        MaybeError Initialize() override;
+    MaybeError Initialize() override;
 
-      private:
-        Device* mDevice;
-        ResourceHeapAllocation mUploadHeap;
-    };
+  private:
+    Device* mDevice;
+    ResourceHeapAllocation mUploadHeap;
+};
 }  // namespace dawn::native::d3d12
 
 #endif  // SRC_DAWN_NATIVE_D3D12_STAGINGBUFFERD3D12_H_
diff --git a/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.cpp b/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.cpp
index ccf0621..f30016a 100644
--- a/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.cpp
+++ b/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.cpp
@@ -22,133 +22,132 @@
 
 namespace dawn::native::d3d12 {
 
-    StagingDescriptorAllocator::StagingDescriptorAllocator(Device* device,
-                                                           uint32_t descriptorCount,
-                                                           uint32_t heapSize,
-                                                           D3D12_DESCRIPTOR_HEAP_TYPE heapType)
-        : mDevice(device),
-          mSizeIncrement(device->GetD3D12Device()->GetDescriptorHandleIncrementSize(heapType)),
-          mBlockSize(descriptorCount * mSizeIncrement),
-          mHeapSize(RoundUp(heapSize, descriptorCount)),
-          mHeapType(heapType) {
-        ASSERT(descriptorCount <= heapSize);
+StagingDescriptorAllocator::StagingDescriptorAllocator(Device* device,
+                                                       uint32_t descriptorCount,
+                                                       uint32_t heapSize,
+                                                       D3D12_DESCRIPTOR_HEAP_TYPE heapType)
+    : mDevice(device),
+      mSizeIncrement(device->GetD3D12Device()->GetDescriptorHandleIncrementSize(heapType)),
+      mBlockSize(descriptorCount * mSizeIncrement),
+      mHeapSize(RoundUp(heapSize, descriptorCount)),
+      mHeapType(heapType) {
+    ASSERT(descriptorCount <= heapSize);
+}
+
+StagingDescriptorAllocator::~StagingDescriptorAllocator() {
+    const Index freeBlockIndicesSize = GetFreeBlockIndicesSize();
+    for (auto& buffer : mPool) {
+        ASSERT(buffer.freeBlockIndices.size() == freeBlockIndicesSize);
+    }
+    ASSERT(mAvailableHeaps.size() == mPool.size());
+}
+
+ResultOrError<CPUDescriptorHeapAllocation> StagingDescriptorAllocator::AllocateCPUDescriptors() {
+    if (mAvailableHeaps.empty()) {
+        DAWN_TRY(AllocateCPUHeap());
     }
 
-    StagingDescriptorAllocator::~StagingDescriptorAllocator() {
-        const Index freeBlockIndicesSize = GetFreeBlockIndicesSize();
-        for (auto& buffer : mPool) {
-            ASSERT(buffer.freeBlockIndices.size() == freeBlockIndicesSize);
-        }
-        ASSERT(mAvailableHeaps.size() == mPool.size());
+    ASSERT(!mAvailableHeaps.empty());
+
+    const uint32_t heapIndex = mAvailableHeaps.back();
+    NonShaderVisibleBuffer& buffer = mPool[heapIndex];
+
+    ASSERT(!buffer.freeBlockIndices.empty());
+
+    const Index blockIndex = buffer.freeBlockIndices.back();
+
+    buffer.freeBlockIndices.pop_back();
+
+    if (buffer.freeBlockIndices.empty()) {
+        mAvailableHeaps.pop_back();
     }
 
-    ResultOrError<CPUDescriptorHeapAllocation>
-    StagingDescriptorAllocator::AllocateCPUDescriptors() {
-        if (mAvailableHeaps.empty()) {
-            DAWN_TRY(AllocateCPUHeap());
-        }
+    const D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptor = {
+        buffer.heap->GetCPUDescriptorHandleForHeapStart().ptr + (blockIndex * mBlockSize)};
 
-        ASSERT(!mAvailableHeaps.empty());
+    return CPUDescriptorHeapAllocation{baseCPUDescriptor, heapIndex};
+}
 
-        const uint32_t heapIndex = mAvailableHeaps.back();
-        NonShaderVisibleBuffer& buffer = mPool[heapIndex];
+MaybeError StagingDescriptorAllocator::AllocateCPUHeap() {
+    D3D12_DESCRIPTOR_HEAP_DESC heapDescriptor;
+    heapDescriptor.Type = mHeapType;
+    heapDescriptor.NumDescriptors = mHeapSize;
+    heapDescriptor.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_NONE;
+    heapDescriptor.NodeMask = 0;
 
-        ASSERT(!buffer.freeBlockIndices.empty());
+    ComPtr<ID3D12DescriptorHeap> heap;
+    DAWN_TRY(CheckHRESULT(
+        mDevice->GetD3D12Device()->CreateDescriptorHeap(&heapDescriptor, IID_PPV_ARGS(&heap)),
+        "ID3D12Device::CreateDescriptorHeap"));
 
-        const Index blockIndex = buffer.freeBlockIndices.back();
+    NonShaderVisibleBuffer newBuffer;
+    newBuffer.heap = std::move(heap);
 
-        buffer.freeBlockIndices.pop_back();
+    const Index freeBlockIndicesSize = GetFreeBlockIndicesSize();
+    newBuffer.freeBlockIndices.reserve(freeBlockIndicesSize);
 
-        if (buffer.freeBlockIndices.empty()) {
-            mAvailableHeaps.pop_back();
-        }
-
-        const D3D12_CPU_DESCRIPTOR_HANDLE baseCPUDescriptor = {
-            buffer.heap->GetCPUDescriptorHandleForHeapStart().ptr + (blockIndex * mBlockSize)};
-
-        return CPUDescriptorHeapAllocation{baseCPUDescriptor, heapIndex};
+    for (Index blockIndex = 0; blockIndex < freeBlockIndicesSize; blockIndex++) {
+        newBuffer.freeBlockIndices.push_back(blockIndex);
     }
 
-    MaybeError StagingDescriptorAllocator::AllocateCPUHeap() {
-        D3D12_DESCRIPTOR_HEAP_DESC heapDescriptor;
-        heapDescriptor.Type = mHeapType;
-        heapDescriptor.NumDescriptors = mHeapSize;
-        heapDescriptor.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_NONE;
-        heapDescriptor.NodeMask = 0;
+    mAvailableHeaps.push_back(mPool.size());
+    mPool.emplace_back(std::move(newBuffer));
 
-        ComPtr<ID3D12DescriptorHeap> heap;
-        DAWN_TRY(CheckHRESULT(
-            mDevice->GetD3D12Device()->CreateDescriptorHeap(&heapDescriptor, IID_PPV_ARGS(&heap)),
-            "ID3D12Device::CreateDescriptorHeap"));
+    return {};
+}
 
-        NonShaderVisibleBuffer newBuffer;
-        newBuffer.heap = std::move(heap);
+void StagingDescriptorAllocator::Deallocate(CPUDescriptorHeapAllocation* allocation) {
+    ASSERT(allocation->IsValid());
 
-        const Index freeBlockIndicesSize = GetFreeBlockIndicesSize();
-        newBuffer.freeBlockIndices.reserve(freeBlockIndicesSize);
+    const uint32_t heapIndex = allocation->GetHeapIndex();
 
-        for (Index blockIndex = 0; blockIndex < freeBlockIndicesSize; blockIndex++) {
-            newBuffer.freeBlockIndices.push_back(blockIndex);
-        }
+    ASSERT(heapIndex < mPool.size());
 
-        mAvailableHeaps.push_back(mPool.size());
-        mPool.emplace_back(std::move(newBuffer));
-
-        return {};
+    // Insert the deallocated block back into the free-list. Order does not matter. However,
+    // having blocks be non-contigious could slow down future allocations due to poor cache
+    // locality.
+    // TODO(dawn:155): Consider more optimization.
+    std::vector<Index>& freeBlockIndices = mPool[heapIndex].freeBlockIndices;
+    if (freeBlockIndices.empty()) {
+        mAvailableHeaps.emplace_back(heapIndex);
     }
 
-    void StagingDescriptorAllocator::Deallocate(CPUDescriptorHeapAllocation* allocation) {
-        ASSERT(allocation->IsValid());
+    const D3D12_CPU_DESCRIPTOR_HANDLE heapStart =
+        mPool[heapIndex].heap->GetCPUDescriptorHandleForHeapStart();
 
-        const uint32_t heapIndex = allocation->GetHeapIndex();
+    const D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor = allocation->OffsetFrom(0, 0);
 
-        ASSERT(heapIndex < mPool.size());
+    const Index blockIndex = (baseDescriptor.ptr - heapStart.ptr) / mBlockSize;
 
-        // Insert the deallocated block back into the free-list. Order does not matter. However,
-        // having blocks be non-contigious could slow down future allocations due to poor cache
-        // locality.
-        // TODO(dawn:155): Consider more optimization.
-        std::vector<Index>& freeBlockIndices = mPool[heapIndex].freeBlockIndices;
-        if (freeBlockIndices.empty()) {
-            mAvailableHeaps.emplace_back(heapIndex);
-        }
+    freeBlockIndices.emplace_back(blockIndex);
 
-        const D3D12_CPU_DESCRIPTOR_HANDLE heapStart =
-            mPool[heapIndex].heap->GetCPUDescriptorHandleForHeapStart();
+    // Invalidate the handle in case the developer accidentally uses it again.
+    allocation->Invalidate();
+}
 
-        const D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor = allocation->OffsetFrom(0, 0);
+uint32_t StagingDescriptorAllocator::GetSizeIncrement() const {
+    return mSizeIncrement;
+}
 
-        const Index blockIndex = (baseDescriptor.ptr - heapStart.ptr) / mBlockSize;
+StagingDescriptorAllocator::Index StagingDescriptorAllocator::GetFreeBlockIndicesSize() const {
+    return ((mHeapSize * mSizeIncrement) / mBlockSize);
+}
 
-        freeBlockIndices.emplace_back(blockIndex);
+ResultOrError<CPUDescriptorHeapAllocation>
+StagingDescriptorAllocator::AllocateTransientCPUDescriptors() {
+    CPUDescriptorHeapAllocation allocation;
+    DAWN_TRY_ASSIGN(allocation, AllocateCPUDescriptors());
+    mAllocationsToDelete.Enqueue(allocation, mDevice->GetPendingCommandSerial());
+    return allocation;
+}
 
-        // Invalidate the handle in case the developer accidentally uses it again.
-        allocation->Invalidate();
+void StagingDescriptorAllocator::Tick(ExecutionSerial completedSerial) {
+    for (CPUDescriptorHeapAllocation& allocation :
+         mAllocationsToDelete.IterateUpTo(completedSerial)) {
+        Deallocate(&allocation);
     }
 
-    uint32_t StagingDescriptorAllocator::GetSizeIncrement() const {
-        return mSizeIncrement;
-    }
-
-    StagingDescriptorAllocator::Index StagingDescriptorAllocator::GetFreeBlockIndicesSize() const {
-        return ((mHeapSize * mSizeIncrement) / mBlockSize);
-    }
-
-    ResultOrError<CPUDescriptorHeapAllocation>
-    StagingDescriptorAllocator::AllocateTransientCPUDescriptors() {
-        CPUDescriptorHeapAllocation allocation;
-        DAWN_TRY_ASSIGN(allocation, AllocateCPUDescriptors());
-        mAllocationsToDelete.Enqueue(allocation, mDevice->GetPendingCommandSerial());
-        return allocation;
-    }
-
-    void StagingDescriptorAllocator::Tick(ExecutionSerial completedSerial) {
-        for (CPUDescriptorHeapAllocation& allocation :
-             mAllocationsToDelete.IterateUpTo(completedSerial)) {
-            Deallocate(&allocation);
-        }
-
-        mAllocationsToDelete.ClearUpTo(completedSerial);
-    }
+    mAllocationsToDelete.ClearUpTo(completedSerial);
+}
 
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h b/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h
index 3b409e2..0940867 100644
--- a/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h
+++ b/src/dawn/native/d3d12/StagingDescriptorAllocatorD3D12.h
@@ -33,54 +33,54 @@
 // offset is inserted back into the free-list.
 namespace dawn::native::d3d12 {
 
-    class Device;
+class Device;
 
-    class StagingDescriptorAllocator {
-      public:
-        StagingDescriptorAllocator() = default;
-        StagingDescriptorAllocator(Device* device,
-                                   uint32_t descriptorCount,
-                                   uint32_t heapSize,
-                                   D3D12_DESCRIPTOR_HEAP_TYPE heapType);
-        ~StagingDescriptorAllocator();
+class StagingDescriptorAllocator {
+  public:
+    StagingDescriptorAllocator() = default;
+    StagingDescriptorAllocator(Device* device,
+                               uint32_t descriptorCount,
+                               uint32_t heapSize,
+                               D3D12_DESCRIPTOR_HEAP_TYPE heapType);
+    ~StagingDescriptorAllocator();
 
-        ResultOrError<CPUDescriptorHeapAllocation> AllocateCPUDescriptors();
+    ResultOrError<CPUDescriptorHeapAllocation> AllocateCPUDescriptors();
 
-        // Will call Deallocate when the serial is passed.
-        ResultOrError<CPUDescriptorHeapAllocation> AllocateTransientCPUDescriptors();
+    // Will call Deallocate when the serial is passed.
+    ResultOrError<CPUDescriptorHeapAllocation> AllocateTransientCPUDescriptors();
 
-        void Deallocate(CPUDescriptorHeapAllocation* allocation);
+    void Deallocate(CPUDescriptorHeapAllocation* allocation);
 
-        uint32_t GetSizeIncrement() const;
+    uint32_t GetSizeIncrement() const;
 
-        void Tick(ExecutionSerial completedSerial);
+    void Tick(ExecutionSerial completedSerial);
 
-      private:
-        using Index = uint16_t;
+  private:
+    using Index = uint16_t;
 
-        struct NonShaderVisibleBuffer {
-            ComPtr<ID3D12DescriptorHeap> heap;
-            std::vector<Index> freeBlockIndices;
-        };
-
-        MaybeError AllocateCPUHeap();
-
-        Index GetFreeBlockIndicesSize() const;
-
-        std::vector<uint32_t> mAvailableHeaps;  // Indices into the pool.
-        std::vector<NonShaderVisibleBuffer> mPool;
-
-        Device* mDevice;
-
-        uint32_t mSizeIncrement;  // Size of the descriptor (in bytes).
-        uint32_t mBlockSize;      // Size of the block of descriptors (in bytes).
-        uint32_t mHeapSize;       // Size of the heap (in number of descriptors).
-
-        D3D12_DESCRIPTOR_HEAP_TYPE mHeapType;
-
-        SerialQueue<ExecutionSerial, CPUDescriptorHeapAllocation> mAllocationsToDelete;
+    struct NonShaderVisibleBuffer {
+        ComPtr<ID3D12DescriptorHeap> heap;
+        std::vector<Index> freeBlockIndices;
     };
 
+    MaybeError AllocateCPUHeap();
+
+    Index GetFreeBlockIndicesSize() const;
+
+    std::vector<uint32_t> mAvailableHeaps;  // Indices into the pool.
+    std::vector<NonShaderVisibleBuffer> mPool;
+
+    Device* mDevice;
+
+    uint32_t mSizeIncrement;  // Size of the descriptor (in bytes).
+    uint32_t mBlockSize;      // Size of the block of descriptors (in bytes).
+    uint32_t mHeapSize;       // Size of the heap (in number of descriptors).
+
+    D3D12_DESCRIPTOR_HEAP_TYPE mHeapType;
+
+    SerialQueue<ExecutionSerial, CPUDescriptorHeapAllocation> mAllocationsToDelete;
+};
+
 }  // namespace dawn::native::d3d12
 
 #endif  // SRC_DAWN_NATIVE_D3D12_STAGINGDESCRIPTORALLOCATORD3D12_H_
diff --git a/src/dawn/native/d3d12/SwapChainD3D12.cpp b/src/dawn/native/d3d12/SwapChainD3D12.cpp
index 38d47d0..32116c4 100644
--- a/src/dawn/native/d3d12/SwapChainD3D12.cpp
+++ b/src/dawn/native/d3d12/SwapChainD3D12.cpp
@@ -25,352 +25,350 @@
 #include "dawn/native/d3d12/TextureD3D12.h"
 
 namespace dawn::native::d3d12 {
-    namespace {
+namespace {
 
-        uint32_t PresentModeToBufferCount(wgpu::PresentMode mode) {
-            switch (mode) {
-                case wgpu::PresentMode::Immediate:
-                case wgpu::PresentMode::Fifo:
-                    return 2;
-                case wgpu::PresentMode::Mailbox:
-                    return 3;
-            }
-        }
+uint32_t PresentModeToBufferCount(wgpu::PresentMode mode) {
+    switch (mode) {
+        case wgpu::PresentMode::Immediate:
+        case wgpu::PresentMode::Fifo:
+            return 2;
+        case wgpu::PresentMode::Mailbox:
+            return 3;
+    }
+}
 
-        uint32_t PresentModeToSwapInterval(wgpu::PresentMode mode) {
-            switch (mode) {
-                case wgpu::PresentMode::Immediate:
-                case wgpu::PresentMode::Mailbox:
-                    return 0;
-                case wgpu::PresentMode::Fifo:
-                    return 1;
-            }
-        }
+uint32_t PresentModeToSwapInterval(wgpu::PresentMode mode) {
+    switch (mode) {
+        case wgpu::PresentMode::Immediate:
+        case wgpu::PresentMode::Mailbox:
+            return 0;
+        case wgpu::PresentMode::Fifo:
+            return 1;
+    }
+}
 
-        UINT PresentModeToSwapChainFlags(wgpu::PresentMode mode) {
-            UINT flags = DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH;
+UINT PresentModeToSwapChainFlags(wgpu::PresentMode mode) {
+    UINT flags = DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH;
 
-            if (mode == wgpu::PresentMode::Immediate) {
-                flags |= DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING;
-            }
-
-            return flags;
-        }
-
-        DXGI_USAGE ToDXGIUsage(wgpu::TextureUsage usage) {
-            DXGI_USAGE dxgiUsage = DXGI_CPU_ACCESS_NONE;
-            if (usage & wgpu::TextureUsage::TextureBinding) {
-                dxgiUsage |= DXGI_USAGE_SHADER_INPUT;
-            }
-            if (usage & wgpu::TextureUsage::StorageBinding) {
-                dxgiUsage |= DXGI_USAGE_UNORDERED_ACCESS;
-            }
-            if (usage & wgpu::TextureUsage::RenderAttachment) {
-                dxgiUsage |= DXGI_USAGE_RENDER_TARGET_OUTPUT;
-            }
-            return dxgiUsage;
-        }
-
-    }  // namespace
-
-    // OldSwapChain
-
-    // static
-    Ref<OldSwapChain> OldSwapChain::Create(Device* device, const SwapChainDescriptor* descriptor) {
-        return AcquireRef(new OldSwapChain(device, descriptor));
+    if (mode == wgpu::PresentMode::Immediate) {
+        flags |= DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING;
     }
 
-    OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
-        : OldSwapChainBase(device, descriptor) {
-        const auto& im = GetImplementation();
-        DawnWSIContextD3D12 wsiContext = {};
-        wsiContext.device = ToAPI(GetDevice());
-        im.Init(im.userData, &wsiContext);
+    return flags;
+}
 
-        ASSERT(im.textureUsage != WGPUTextureUsage_None);
-        mTextureUsage = static_cast<wgpu::TextureUsage>(im.textureUsage);
+DXGI_USAGE ToDXGIUsage(wgpu::TextureUsage usage) {
+    DXGI_USAGE dxgiUsage = DXGI_CPU_ACCESS_NONE;
+    if (usage & wgpu::TextureUsage::TextureBinding) {
+        dxgiUsage |= DXGI_USAGE_SHADER_INPUT;
+    }
+    if (usage & wgpu::TextureUsage::StorageBinding) {
+        dxgiUsage |= DXGI_USAGE_UNORDERED_ACCESS;
+    }
+    if (usage & wgpu::TextureUsage::RenderAttachment) {
+        dxgiUsage |= DXGI_USAGE_RENDER_TARGET_OUTPUT;
+    }
+    return dxgiUsage;
+}
+
+}  // namespace
+
+// OldSwapChain
+
+// static
+Ref<OldSwapChain> OldSwapChain::Create(Device* device, const SwapChainDescriptor* descriptor) {
+    return AcquireRef(new OldSwapChain(device, descriptor));
+}
+
+OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
+    : OldSwapChainBase(device, descriptor) {
+    const auto& im = GetImplementation();
+    DawnWSIContextD3D12 wsiContext = {};
+    wsiContext.device = ToAPI(GetDevice());
+    im.Init(im.userData, &wsiContext);
+
+    ASSERT(im.textureUsage != WGPUTextureUsage_None);
+    mTextureUsage = static_cast<wgpu::TextureUsage>(im.textureUsage);
+}
+
+OldSwapChain::~OldSwapChain() = default;
+
+TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
+    DeviceBase* device = GetDevice();
+    const auto& im = GetImplementation();
+    DawnSwapChainNextTexture next = {};
+    DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
+    if (error) {
+        device->HandleError(InternalErrorType::Internal, error);
+        return nullptr;
     }
 
-    OldSwapChain::~OldSwapChain() = default;
-
-    TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
-        DeviceBase* device = GetDevice();
-        const auto& im = GetImplementation();
-        DawnSwapChainNextTexture next = {};
-        DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
-        if (error) {
-            device->HandleError(InternalErrorType::Internal, error);
-            return nullptr;
-        }
-
-        ComPtr<ID3D12Resource> d3d12Texture = static_cast<ID3D12Resource*>(next.texture.ptr);
-        Ref<Texture> dawnTexture;
-        if (device->ConsumedError(
-                Texture::Create(ToBackend(GetDevice()), descriptor, std::move(d3d12Texture)),
-                &dawnTexture)) {
-            return nullptr;
-        }
-
-        return dawnTexture.Detach();
+    ComPtr<ID3D12Resource> d3d12Texture = static_cast<ID3D12Resource*>(next.texture.ptr);
+    Ref<Texture> dawnTexture;
+    if (device->ConsumedError(
+            Texture::Create(ToBackend(GetDevice()), descriptor, std::move(d3d12Texture)),
+            &dawnTexture)) {
+        return nullptr;
     }
 
-    MaybeError OldSwapChain::OnBeforePresent(TextureViewBase* view) {
-        Device* device = ToBackend(GetDevice());
+    return dawnTexture.Detach();
+}
 
-        CommandRecordingContext* commandContext;
-        DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
+MaybeError OldSwapChain::OnBeforePresent(TextureViewBase* view) {
+    Device* device = ToBackend(GetDevice());
 
-        // Perform the necessary transition for the texture to be presented.
-        ToBackend(view->GetTexture())
-            ->TrackUsageAndTransitionNow(commandContext, mTextureUsage,
-                                         view->GetSubresourceRange());
+    CommandRecordingContext* commandContext;
+    DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
 
-        DAWN_TRY(device->ExecutePendingCommandContext());
+    // Perform the necessary transition for the texture to be presented.
+    ToBackend(view->GetTexture())
+        ->TrackUsageAndTransitionNow(commandContext, mTextureUsage, view->GetSubresourceRange());
 
-        return {};
+    DAWN_TRY(device->ExecutePendingCommandContext());
+
+    return {};
+}
+
+// SwapChain
+
+// static
+ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
+                                                Surface* surface,
+                                                NewSwapChainBase* previousSwapChain,
+                                                const SwapChainDescriptor* descriptor) {
+    Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
+    DAWN_TRY(swapchain->Initialize(previousSwapChain));
+    return swapchain;
+}
+
+SwapChain::~SwapChain() = default;
+
+void SwapChain::DestroyImpl() {
+    SwapChainBase::DestroyImpl();
+    DetachFromSurface();
+}
+
+// Initializes the swapchain on the surface. Note that `previousSwapChain` may or may not be
+// nullptr. If it is not nullptr it means that it is the swapchain previously in use on the
+// surface and that we have a chance to reuse it's underlying IDXGISwapChain and "buffers".
+MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
+    ASSERT(GetSurface()->GetType() == Surface::Type::WindowsHWND);
+
+    // Precompute the configuration parameters we want for the DXGI swapchain.
+    mConfig.bufferCount = PresentModeToBufferCount(GetPresentMode());
+    mConfig.format = D3D12TextureFormat(GetFormat());
+    mConfig.swapChainFlags = PresentModeToSwapChainFlags(GetPresentMode());
+    mConfig.usage = ToDXGIUsage(GetUsage());
+
+    // There is no previous swapchain so we can create one directly and don't have anything else
+    // to do.
+    if (previousSwapChain == nullptr) {
+        return InitializeSwapChainFromScratch();
     }
 
-    // SwapChain
+    // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by
+    // multiple backends one after the other. It probably needs to block until the backend
+    // and GPU are completely finished with the previous swapchain.
+    DAWN_INVALID_IF(previousSwapChain->GetBackendType() != wgpu::BackendType::D3D12,
+                    "D3D12 SwapChain cannot switch backend types from %s to %s.",
+                    previousSwapChain->GetBackendType(), wgpu::BackendType::D3D12);
 
-    // static
-    ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
-                                                    Surface* surface,
-                                                    NewSwapChainBase* previousSwapChain,
-                                                    const SwapChainDescriptor* descriptor) {
-        Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
-        DAWN_TRY(swapchain->Initialize(previousSwapChain));
-        return swapchain;
-    }
+    // TODO(crbug.com/dawn/269): use ToBackend once OldSwapChainBase is removed.
+    SwapChain* previousD3D12SwapChain = static_cast<SwapChain*>(previousSwapChain);
 
-    SwapChain::~SwapChain() = default;
+    // TODO(crbug.com/dawn/269): Figure out switching an HWND between devices, it might
+    // require just losing the reference to the swapchain, but might also need to wait for
+    // all previous operations to complete.
+    DAWN_INVALID_IF(GetDevice() != previousSwapChain->GetDevice(),
+                    "D3D12 SwapChain cannot switch between D3D Devices");
 
-    void SwapChain::DestroyImpl() {
-        SwapChainBase::DestroyImpl();
-        DetachFromSurface();
-    }
+    // The previous swapchain is on the same device so we want to reuse it but it is still not
+    // always possible. Because DXGI requires that a new swapchain be created if the
+    // DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING flag is changed.
+    bool canReuseSwapChain =
+        ((mConfig.swapChainFlags ^ previousD3D12SwapChain->mConfig.swapChainFlags) &
+         DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING) == 0;
 
-    // Initializes the swapchain on the surface. Note that `previousSwapChain` may or may not be
-    // nullptr. If it is not nullptr it means that it is the swapchain previously in use on the
-    // surface and that we have a chance to reuse it's underlying IDXGISwapChain and "buffers".
-    MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
-        ASSERT(GetSurface()->GetType() == Surface::Type::WindowsHWND);
-
-        // Precompute the configuration parameters we want for the DXGI swapchain.
-        mConfig.bufferCount = PresentModeToBufferCount(GetPresentMode());
-        mConfig.format = D3D12TextureFormat(GetFormat());
-        mConfig.swapChainFlags = PresentModeToSwapChainFlags(GetPresentMode());
-        mConfig.usage = ToDXGIUsage(GetUsage());
-
-        // There is no previous swapchain so we can create one directly and don't have anything else
-        // to do.
-        if (previousSwapChain == nullptr) {
-            return InitializeSwapChainFromScratch();
-        }
-
-        // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by
-        // multiple backends one after the other. It probably needs to block until the backend
-        // and GPU are completely finished with the previous swapchain.
-        DAWN_INVALID_IF(previousSwapChain->GetBackendType() != wgpu::BackendType::D3D12,
-                        "D3D12 SwapChain cannot switch backend types from %s to %s.",
-                        previousSwapChain->GetBackendType(), wgpu::BackendType::D3D12);
-
-        // TODO(crbug.com/dawn/269): use ToBackend once OldSwapChainBase is removed.
-        SwapChain* previousD3D12SwapChain = static_cast<SwapChain*>(previousSwapChain);
-
-        // TODO(crbug.com/dawn/269): Figure out switching an HWND between devices, it might
-        // require just losing the reference to the swapchain, but might also need to wait for
-        // all previous operations to complete.
-        DAWN_INVALID_IF(GetDevice() != previousSwapChain->GetDevice(),
-                        "D3D12 SwapChain cannot switch between D3D Devices");
-
-        // The previous swapchain is on the same device so we want to reuse it but it is still not
-        // always possible. Because DXGI requires that a new swapchain be created if the
-        // DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING flag is changed.
-        bool canReuseSwapChain =
-            ((mConfig.swapChainFlags ^ previousD3D12SwapChain->mConfig.swapChainFlags) &
-             DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING) == 0;
-
-        // We can't reuse the previous swapchain, so we destroy it and wait for all of its reference
-        // to be forgotten (otherwise DXGI complains that there are outstanding references).
-        if (!canReuseSwapChain) {
-            DAWN_TRY(previousD3D12SwapChain->DetachAndWaitForDeallocation());
-            return InitializeSwapChainFromScratch();
-        }
-
-        // After all this we know we can reuse the swapchain, see if it is possible to also reuse
-        // the buffers.
-        mDXGISwapChain = std::move(previousD3D12SwapChain->mDXGISwapChain);
-
-        bool canReuseBuffers = GetWidth() == previousSwapChain->GetWidth() &&
-                               GetHeight() == previousSwapChain->GetHeight() &&
-                               GetFormat() == previousSwapChain->GetFormat() &&
-                               GetPresentMode() == previousSwapChain->GetPresentMode();
-        if (canReuseBuffers) {
-            mBuffers = std::move(previousD3D12SwapChain->mBuffers);
-            mBufferLastUsedSerials = std::move(previousD3D12SwapChain->mBufferLastUsedSerials);
-            mCurrentBuffer = previousD3D12SwapChain->mCurrentBuffer;
-            return {};
-        }
-
-        // We can't reuse the buffers so we need to resize, IDXGSwapChain->ResizeBuffers requires
-        // that all references to buffers are lost before it is called. Contrary to D3D11, the
-        // application is responsible for keeping references to the buffers until the GPU is done
-        // using them so we have no choice but to synchrounously wait for all operations to complete
-        // on the previous swapchain and then lose references to its buffers.
+    // We can't reuse the previous swapchain, so we destroy it and wait for all of its reference
+    // to be forgotten (otherwise DXGI complains that there are outstanding references).
+    if (!canReuseSwapChain) {
         DAWN_TRY(previousD3D12SwapChain->DetachAndWaitForDeallocation());
-        DAWN_TRY(
-            CheckHRESULT(mDXGISwapChain->ResizeBuffers(mConfig.bufferCount, GetWidth(), GetHeight(),
-                                                       mConfig.format, mConfig.swapChainFlags),
-                         "IDXGISwapChain::ResizeBuffer"));
-        return CollectSwapChainBuffers();
+        return InitializeSwapChainFromScratch();
     }
 
-    MaybeError SwapChain::InitializeSwapChainFromScratch() {
-        ASSERT(mDXGISwapChain == nullptr);
+    // After all this we know we can reuse the swapchain, see if it is possible to also reuse
+    // the buffers.
+    mDXGISwapChain = std::move(previousD3D12SwapChain->mDXGISwapChain);
 
-        Device* device = ToBackend(GetDevice());
-
-        DXGI_SWAP_CHAIN_DESC1 swapChainDesc = {};
-        swapChainDesc.Width = GetWidth();
-        swapChainDesc.Height = GetHeight();
-        swapChainDesc.Format = mConfig.format;
-        swapChainDesc.Stereo = false;
-        swapChainDesc.SampleDesc.Count = 1;
-        swapChainDesc.SampleDesc.Quality = 0;
-        swapChainDesc.BufferUsage = mConfig.usage;
-        swapChainDesc.BufferCount = mConfig.bufferCount;
-        swapChainDesc.Scaling = DXGI_SCALING_STRETCH;
-        swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_DISCARD;
-        swapChainDesc.AlphaMode = DXGI_ALPHA_MODE_IGNORE;
-        swapChainDesc.Flags = mConfig.swapChainFlags;
-
-        ComPtr<IDXGIFactory2> factory2 = nullptr;
-        DAWN_TRY(CheckHRESULT(device->GetFactory()->QueryInterface(IID_PPV_ARGS(&factory2)),
-                              "Getting IDXGIFactory2"));
-
-        ComPtr<IDXGISwapChain1> swapChain1;
-        switch (GetSurface()->GetType()) {
-            case Surface::Type::WindowsHWND: {
-                DAWN_TRY(CheckHRESULT(
-                    factory2->CreateSwapChainForHwnd(device->GetCommandQueue().Get(),
-                                                     static_cast<HWND>(GetSurface()->GetHWND()),
-                                                     &swapChainDesc, nullptr, nullptr, &swapChain1),
-                    "Creating the IDXGISwapChain1"));
-                break;
-            }
-            case Surface::Type::WindowsCoreWindow: {
-                DAWN_TRY(CheckHRESULT(
-                    factory2->CreateSwapChainForCoreWindow(device->GetCommandQueue().Get(),
-                                                           GetSurface()->GetCoreWindow(),
-                                                           &swapChainDesc, nullptr, &swapChain1),
-                    "Creating the IDXGISwapChain1"));
-                break;
-            }
-            case Surface::Type::WindowsSwapChainPanel: {
-                DAWN_TRY(CheckHRESULT(
-                    factory2->CreateSwapChainForComposition(device->GetCommandQueue().Get(),
-                                                            &swapChainDesc, nullptr, &swapChain1),
-                    "Creating the IDXGISwapChain1"));
-                ComPtr<ISwapChainPanelNative> swapChainPanelNative;
-                DAWN_TRY(CheckHRESULT(GetSurface()->GetSwapChainPanel()->QueryInterface(
-                                          IID_PPV_ARGS(&swapChainPanelNative)),
-                                      "Getting ISwapChainPanelNative"));
-                DAWN_TRY(CheckHRESULT(swapChainPanelNative->SetSwapChain(swapChain1.Get()),
-                                      "Setting SwapChain"));
-                break;
-            }
-            default:
-                UNREACHABLE();
-        }
-
-        DAWN_TRY(CheckHRESULT(swapChain1.As(&mDXGISwapChain), "Gettting IDXGISwapChain1"));
-
-        return CollectSwapChainBuffers();
-    }
-
-    MaybeError SwapChain::CollectSwapChainBuffers() {
-        ASSERT(mDXGISwapChain != nullptr);
-        ASSERT(mBuffers.empty());
-
-        mBuffers.resize(mConfig.bufferCount);
-        for (uint32_t i = 0; i < mConfig.bufferCount; i++) {
-            DAWN_TRY(CheckHRESULT(mDXGISwapChain->GetBuffer(i, IID_PPV_ARGS(&mBuffers[i])),
-                                  "Getting IDXGISwapChain buffer"));
-        }
-
-        // Pretend all the buffers were last used at the beginning of time.
-        mBufferLastUsedSerials.resize(mConfig.bufferCount, ExecutionSerial(0));
+    bool canReuseBuffers = GetWidth() == previousSwapChain->GetWidth() &&
+                           GetHeight() == previousSwapChain->GetHeight() &&
+                           GetFormat() == previousSwapChain->GetFormat() &&
+                           GetPresentMode() == previousSwapChain->GetPresentMode();
+    if (canReuseBuffers) {
+        mBuffers = std::move(previousD3D12SwapChain->mBuffers);
+        mBufferLastUsedSerials = std::move(previousD3D12SwapChain->mBufferLastUsedSerials);
+        mCurrentBuffer = previousD3D12SwapChain->mCurrentBuffer;
         return {};
     }
 
-    MaybeError SwapChain::PresentImpl() {
-        Device* device = ToBackend(GetDevice());
+    // We can't reuse the buffers so we need to resize, IDXGSwapChain->ResizeBuffers requires
+    // that all references to buffers are lost before it is called. Contrary to D3D11, the
+    // application is responsible for keeping references to the buffers until the GPU is done
+    // using them so we have no choice but to synchrounously wait for all operations to complete
+    // on the previous swapchain and then lose references to its buffers.
+    DAWN_TRY(previousD3D12SwapChain->DetachAndWaitForDeallocation());
+    DAWN_TRY(
+        CheckHRESULT(mDXGISwapChain->ResizeBuffers(mConfig.bufferCount, GetWidth(), GetHeight(),
+                                                   mConfig.format, mConfig.swapChainFlags),
+                     "IDXGISwapChain::ResizeBuffer"));
+    return CollectSwapChainBuffers();
+}
 
-        // Transition the texture to the present state as required by IDXGISwapChain1::Present()
-        // TODO(crbug.com/dawn/269): Remove the need for this by eagerly transitioning the
-        // presentable texture to present at the end of submits that use them.
-        CommandRecordingContext* commandContext;
-        DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
-        mApiTexture->TrackUsageAndTransitionNow(commandContext, kPresentTextureUsage,
-                                                mApiTexture->GetAllSubresources());
-        DAWN_TRY(device->ExecutePendingCommandContext());
+MaybeError SwapChain::InitializeSwapChainFromScratch() {
+    ASSERT(mDXGISwapChain == nullptr);
 
-        // Do the actual present. DXGI_STATUS_OCCLUDED is a valid return value that's just a
-        // message to the application that it could stop rendering.
-        HRESULT presentResult =
-            mDXGISwapChain->Present(PresentModeToSwapInterval(GetPresentMode()), 0);
-        if (presentResult != DXGI_STATUS_OCCLUDED) {
-            DAWN_TRY(CheckHRESULT(presentResult, "IDXGISwapChain::Present"));
+    Device* device = ToBackend(GetDevice());
+
+    DXGI_SWAP_CHAIN_DESC1 swapChainDesc = {};
+    swapChainDesc.Width = GetWidth();
+    swapChainDesc.Height = GetHeight();
+    swapChainDesc.Format = mConfig.format;
+    swapChainDesc.Stereo = false;
+    swapChainDesc.SampleDesc.Count = 1;
+    swapChainDesc.SampleDesc.Quality = 0;
+    swapChainDesc.BufferUsage = mConfig.usage;
+    swapChainDesc.BufferCount = mConfig.bufferCount;
+    swapChainDesc.Scaling = DXGI_SCALING_STRETCH;
+    swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_DISCARD;
+    swapChainDesc.AlphaMode = DXGI_ALPHA_MODE_IGNORE;
+    swapChainDesc.Flags = mConfig.swapChainFlags;
+
+    ComPtr<IDXGIFactory2> factory2 = nullptr;
+    DAWN_TRY(CheckHRESULT(device->GetFactory()->QueryInterface(IID_PPV_ARGS(&factory2)),
+                          "Getting IDXGIFactory2"));
+
+    ComPtr<IDXGISwapChain1> swapChain1;
+    switch (GetSurface()->GetType()) {
+        case Surface::Type::WindowsHWND: {
+            DAWN_TRY(CheckHRESULT(
+                factory2->CreateSwapChainForHwnd(device->GetCommandQueue().Get(),
+                                                 static_cast<HWND>(GetSurface()->GetHWND()),
+                                                 &swapChainDesc, nullptr, nullptr, &swapChain1),
+                "Creating the IDXGISwapChain1"));
+            break;
         }
+        case Surface::Type::WindowsCoreWindow: {
+            DAWN_TRY(CheckHRESULT(
+                factory2->CreateSwapChainForCoreWindow(device->GetCommandQueue().Get(),
+                                                       GetSurface()->GetCoreWindow(),
+                                                       &swapChainDesc, nullptr, &swapChain1),
+                "Creating the IDXGISwapChain1"));
+            break;
+        }
+        case Surface::Type::WindowsSwapChainPanel: {
+            DAWN_TRY(CheckHRESULT(
+                factory2->CreateSwapChainForComposition(device->GetCommandQueue().Get(),
+                                                        &swapChainDesc, nullptr, &swapChain1),
+                "Creating the IDXGISwapChain1"));
+            ComPtr<ISwapChainPanelNative> swapChainPanelNative;
+            DAWN_TRY(CheckHRESULT(GetSurface()->GetSwapChainPanel()->QueryInterface(
+                                      IID_PPV_ARGS(&swapChainPanelNative)),
+                                  "Getting ISwapChainPanelNative"));
+            DAWN_TRY(CheckHRESULT(swapChainPanelNative->SetSwapChain(swapChain1.Get()),
+                                  "Setting SwapChain"));
+            break;
+        }
+        default:
+            UNREACHABLE();
+    }
 
-        // Record that "new" is the last time the buffer has been used.
-        DAWN_TRY(device->NextSerial());
-        mBufferLastUsedSerials[mCurrentBuffer] = device->GetPendingCommandSerial();
+    DAWN_TRY(CheckHRESULT(swapChain1.As(&mDXGISwapChain), "Gettting IDXGISwapChain1"));
 
+    return CollectSwapChainBuffers();
+}
+
+MaybeError SwapChain::CollectSwapChainBuffers() {
+    ASSERT(mDXGISwapChain != nullptr);
+    ASSERT(mBuffers.empty());
+
+    mBuffers.resize(mConfig.bufferCount);
+    for (uint32_t i = 0; i < mConfig.bufferCount; i++) {
+        DAWN_TRY(CheckHRESULT(mDXGISwapChain->GetBuffer(i, IID_PPV_ARGS(&mBuffers[i])),
+                              "Getting IDXGISwapChain buffer"));
+    }
+
+    // Pretend all the buffers were last used at the beginning of time.
+    mBufferLastUsedSerials.resize(mConfig.bufferCount, ExecutionSerial(0));
+    return {};
+}
+
+MaybeError SwapChain::PresentImpl() {
+    Device* device = ToBackend(GetDevice());
+
+    // Transition the texture to the present state as required by IDXGISwapChain1::Present()
+    // TODO(crbug.com/dawn/269): Remove the need for this by eagerly transitioning the
+    // presentable texture to present at the end of submits that use them.
+    CommandRecordingContext* commandContext;
+    DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
+    mApiTexture->TrackUsageAndTransitionNow(commandContext, kPresentTextureUsage,
+                                            mApiTexture->GetAllSubresources());
+    DAWN_TRY(device->ExecutePendingCommandContext());
+
+    // Do the actual present. DXGI_STATUS_OCCLUDED is a valid return value that's just a
+    // message to the application that it could stop rendering.
+    HRESULT presentResult = mDXGISwapChain->Present(PresentModeToSwapInterval(GetPresentMode()), 0);
+    if (presentResult != DXGI_STATUS_OCCLUDED) {
+        DAWN_TRY(CheckHRESULT(presentResult, "IDXGISwapChain::Present"));
+    }
+
+    // Record that "new" is the last time the buffer has been used.
+    DAWN_TRY(device->NextSerial());
+    mBufferLastUsedSerials[mCurrentBuffer] = device->GetPendingCommandSerial();
+
+    mApiTexture->APIDestroy();
+    mApiTexture = nullptr;
+
+    return {};
+}
+
+ResultOrError<Ref<TextureViewBase>> SwapChain::GetCurrentTextureViewImpl() {
+    Device* device = ToBackend(GetDevice());
+
+    // Synchronously wait until previous operations on the next swapchain buffer are finished.
+    // This is the logic that performs frame pacing.
+    // TODO(crbug.com/dawn/269): Consider whether this should  be lifted for Mailbox so that
+    // there is not frame pacing.
+    mCurrentBuffer = mDXGISwapChain->GetCurrentBackBufferIndex();
+    DAWN_TRY(device->WaitForSerial(mBufferLastUsedSerials[mCurrentBuffer]));
+
+    // Create the API side objects for this use of the swapchain's buffer.
+    TextureDescriptor descriptor = GetSwapChainBaseTextureDescriptor(this);
+    DAWN_TRY_ASSIGN(mApiTexture,
+                    Texture::Create(ToBackend(GetDevice()), &descriptor, mBuffers[mCurrentBuffer]));
+    return mApiTexture->CreateView();
+}
+
+MaybeError SwapChain::DetachAndWaitForDeallocation() {
+    DetachFromSurface();
+
+    // DetachFromSurface calls Texture->Destroy that enqueues the D3D12 resource in a
+    // SerialQueue with the current "pending serial" so that we don't destroy the texture
+    // before it is finished being used. Flush the commands and wait for that serial to be
+    // passed, then Tick the device to make sure the reference to the D3D12 texture is removed.
+    Device* device = ToBackend(GetDevice());
+    DAWN_TRY(device->NextSerial());
+    DAWN_TRY(device->WaitForSerial(device->GetLastSubmittedCommandSerial()));
+    return device->TickImpl();
+}
+
+void SwapChain::DetachFromSurfaceImpl() {
+    if (mApiTexture != nullptr) {
         mApiTexture->APIDestroy();
         mApiTexture = nullptr;
-
-        return {};
     }
 
-    ResultOrError<Ref<TextureViewBase>> SwapChain::GetCurrentTextureViewImpl() {
-        Device* device = ToBackend(GetDevice());
-
-        // Synchronously wait until previous operations on the next swapchain buffer are finished.
-        // This is the logic that performs frame pacing.
-        // TODO(crbug.com/dawn/269): Consider whether this should  be lifted for Mailbox so that
-        // there is not frame pacing.
-        mCurrentBuffer = mDXGISwapChain->GetCurrentBackBufferIndex();
-        DAWN_TRY(device->WaitForSerial(mBufferLastUsedSerials[mCurrentBuffer]));
-
-        // Create the API side objects for this use of the swapchain's buffer.
-        TextureDescriptor descriptor = GetSwapChainBaseTextureDescriptor(this);
-        DAWN_TRY_ASSIGN(mApiTexture, Texture::Create(ToBackend(GetDevice()), &descriptor,
-                                                     mBuffers[mCurrentBuffer]));
-        return mApiTexture->CreateView();
-    }
-
-    MaybeError SwapChain::DetachAndWaitForDeallocation() {
-        DetachFromSurface();
-
-        // DetachFromSurface calls Texture->Destroy that enqueues the D3D12 resource in a
-        // SerialQueue with the current "pending serial" so that we don't destroy the texture
-        // before it is finished being used. Flush the commands and wait for that serial to be
-        // passed, then Tick the device to make sure the reference to the D3D12 texture is removed.
-        Device* device = ToBackend(GetDevice());
-        DAWN_TRY(device->NextSerial());
-        DAWN_TRY(device->WaitForSerial(device->GetLastSubmittedCommandSerial()));
-        return device->TickImpl();
-    }
-
-    void SwapChain::DetachFromSurfaceImpl() {
-        if (mApiTexture != nullptr) {
-            mApiTexture->APIDestroy();
-            mApiTexture = nullptr;
-        }
-
-        mDXGISwapChain = nullptr;
-        mBuffers.clear();
-    }
+    mDXGISwapChain = nullptr;
+    mBuffers.clear();
+}
 
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/SwapChainD3D12.h b/src/dawn/native/d3d12/SwapChainD3D12.h
index dd59c18..53ad519 100644
--- a/src/dawn/native/d3d12/SwapChainD3D12.h
+++ b/src/dawn/native/d3d12/SwapChainD3D12.h
@@ -24,67 +24,67 @@
 
 namespace dawn::native::d3d12 {
 
-    class Device;
-    class Texture;
+class Device;
+class Texture;
 
-    class OldSwapChain final : public OldSwapChainBase {
-      public:
-        static Ref<OldSwapChain> Create(Device* device, const SwapChainDescriptor* descriptor);
+class OldSwapChain final : public OldSwapChainBase {
+  public:
+    static Ref<OldSwapChain> Create(Device* device, const SwapChainDescriptor* descriptor);
 
-      protected:
-        OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
-        ~OldSwapChain() override;
-        TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
-        MaybeError OnBeforePresent(TextureViewBase* view) override;
+  protected:
+    OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
+    ~OldSwapChain() override;
+    TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
+    MaybeError OnBeforePresent(TextureViewBase* view) override;
 
-        wgpu::TextureUsage mTextureUsage;
+    wgpu::TextureUsage mTextureUsage;
+};
+
+class SwapChain final : public NewSwapChainBase {
+  public:
+    static ResultOrError<Ref<SwapChain>> Create(Device* device,
+                                                Surface* surface,
+                                                NewSwapChainBase* previousSwapChain,
+                                                const SwapChainDescriptor* descriptor);
+
+  private:
+    ~SwapChain() override;
+
+    void DestroyImpl() override;
+
+    using NewSwapChainBase::NewSwapChainBase;
+    MaybeError Initialize(NewSwapChainBase* previousSwapChain);
+
+    struct Config {
+        // Information that's passed to the D3D12 swapchain creation call.
+        UINT bufferCount;
+        UINT swapChainFlags;
+        DXGI_FORMAT format;
+        DXGI_USAGE usage;
     };
 
-    class SwapChain final : public NewSwapChainBase {
-      public:
-        static ResultOrError<Ref<SwapChain>> Create(Device* device,
-                                                    Surface* surface,
-                                                    NewSwapChainBase* previousSwapChain,
-                                                    const SwapChainDescriptor* descriptor);
+    // NewSwapChainBase implementation
+    MaybeError PresentImpl() override;
+    ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewImpl() override;
+    void DetachFromSurfaceImpl() override;
 
-      private:
-        ~SwapChain() override;
+    // Does the swapchain initialization steps assuming there is nothing we can reuse.
+    MaybeError InitializeSwapChainFromScratch();
+    // Does the swapchain initialization step of gathering the buffers.
+    MaybeError CollectSwapChainBuffers();
+    // Calls DetachFromSurface but also synchronously waits until all references to the
+    // swapchain and buffers are removed, as that's a constraint for some DXGI operations.
+    MaybeError DetachAndWaitForDeallocation();
 
-        void DestroyImpl() override;
+    Config mConfig;
 
-        using NewSwapChainBase::NewSwapChainBase;
-        MaybeError Initialize(NewSwapChainBase* previousSwapChain);
+    ComPtr<IDXGISwapChain3> mDXGISwapChain;
+    std::vector<ComPtr<ID3D12Resource>> mBuffers;
+    std::vector<ExecutionSerial> mBufferLastUsedSerials;
+    uint32_t mCurrentBuffer = 0;
 
-        struct Config {
-            // Information that's passed to the D3D12 swapchain creation call.
-            UINT bufferCount;
-            UINT swapChainFlags;
-            DXGI_FORMAT format;
-            DXGI_USAGE usage;
-        };
-
-        // NewSwapChainBase implementation
-        MaybeError PresentImpl() override;
-        ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewImpl() override;
-        void DetachFromSurfaceImpl() override;
-
-        // Does the swapchain initialization steps assuming there is nothing we can reuse.
-        MaybeError InitializeSwapChainFromScratch();
-        // Does the swapchain initialization step of gathering the buffers.
-        MaybeError CollectSwapChainBuffers();
-        // Calls DetachFromSurface but also synchronously waits until all references to the
-        // swapchain and buffers are removed, as that's a constraint for some DXGI operations.
-        MaybeError DetachAndWaitForDeallocation();
-
-        Config mConfig;
-
-        ComPtr<IDXGISwapChain3> mDXGISwapChain;
-        std::vector<ComPtr<ID3D12Resource>> mBuffers;
-        std::vector<ExecutionSerial> mBufferLastUsedSerials;
-        uint32_t mCurrentBuffer = 0;
-
-        Ref<Texture> mApiTexture;
-    };
+    Ref<Texture> mApiTexture;
+};
 
 }  // namespace dawn::native::d3d12
 
diff --git a/src/dawn/native/d3d12/TextureCopySplitter.cpp b/src/dawn/native/d3d12/TextureCopySplitter.cpp
index 83e55fd..7dac662 100644
--- a/src/dawn/native/d3d12/TextureCopySplitter.cpp
+++ b/src/dawn/native/d3d12/TextureCopySplitter.cpp
@@ -20,520 +20,518 @@
 
 namespace dawn::native::d3d12 {
 
-    namespace {
-        Origin3D ComputeTexelOffsets(const TexelBlockInfo& blockInfo,
-                                     uint32_t offset,
-                                     uint32_t bytesPerRow) {
-            ASSERT(bytesPerRow != 0);
-            uint32_t byteOffsetX = offset % bytesPerRow;
-            uint32_t byteOffsetY = offset - byteOffsetX;
+namespace {
+Origin3D ComputeTexelOffsets(const TexelBlockInfo& blockInfo,
+                             uint32_t offset,
+                             uint32_t bytesPerRow) {
+    ASSERT(bytesPerRow != 0);
+    uint32_t byteOffsetX = offset % bytesPerRow;
+    uint32_t byteOffsetY = offset - byteOffsetX;
 
-            return {byteOffsetX / blockInfo.byteSize * blockInfo.width,
-                    byteOffsetY / bytesPerRow * blockInfo.height, 0};
-        }
+    return {byteOffsetX / blockInfo.byteSize * blockInfo.width,
+            byteOffsetY / bytesPerRow * blockInfo.height, 0};
+}
 
-        uint64_t OffsetToFirstCopiedTexel(const TexelBlockInfo& blockInfo,
-                                          uint32_t bytesPerRow,
-                                          uint64_t alignedOffset,
-                                          Origin3D bufferOffset) {
-            ASSERT(bufferOffset.z == 0);
-            return alignedOffset + bufferOffset.x * blockInfo.byteSize / blockInfo.width +
-                   bufferOffset.y * bytesPerRow / blockInfo.height;
-        }
+uint64_t OffsetToFirstCopiedTexel(const TexelBlockInfo& blockInfo,
+                                  uint32_t bytesPerRow,
+                                  uint64_t alignedOffset,
+                                  Origin3D bufferOffset) {
+    ASSERT(bufferOffset.z == 0);
+    return alignedOffset + bufferOffset.x * blockInfo.byteSize / blockInfo.width +
+           bufferOffset.y * bytesPerRow / blockInfo.height;
+}
 
-        uint64_t AlignDownForDataPlacement(uint32_t offset) {
-            return offset & ~static_cast<uint64_t>(D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT - 1);
-        }
-    }  // namespace
+uint64_t AlignDownForDataPlacement(uint32_t offset) {
+    return offset & ~static_cast<uint64_t>(D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT - 1);
+}
+}  // namespace
 
-    TextureCopySubresource::CopyInfo* TextureCopySubresource::AddCopy() {
-        ASSERT(this->count < kMaxTextureCopyRegions);
-        return &this->copies[this->count++];
-    }
+TextureCopySubresource::CopyInfo* TextureCopySubresource::AddCopy() {
+    ASSERT(this->count < kMaxTextureCopyRegions);
+    return &this->copies[this->count++];
+}
 
-    TextureCopySubresource Compute2DTextureCopySubresource(Origin3D origin,
-                                                           Extent3D copySize,
-                                                           const TexelBlockInfo& blockInfo,
-                                                           uint64_t offset,
-                                                           uint32_t bytesPerRow) {
-        TextureCopySubresource copy;
+TextureCopySubresource Compute2DTextureCopySubresource(Origin3D origin,
+                                                       Extent3D copySize,
+                                                       const TexelBlockInfo& blockInfo,
+                                                       uint64_t offset,
+                                                       uint32_t bytesPerRow) {
+    TextureCopySubresource copy;
 
-        ASSERT(bytesPerRow % blockInfo.byteSize == 0);
+    ASSERT(bytesPerRow % blockInfo.byteSize == 0);
 
-        // The copies must be 512-aligned. To do this, we calculate the first 512-aligned address
-        // preceding our data.
-        uint64_t alignedOffset = AlignDownForDataPlacement(offset);
+    // The copies must be 512-aligned. To do this, we calculate the first 512-aligned address
+    // preceding our data.
+    uint64_t alignedOffset = AlignDownForDataPlacement(offset);
 
-        // If the provided offset to the data was already 512-aligned, we can simply copy the data
-        // without further translation.
-        if (offset == alignedOffset) {
-            copy.count = 1;
-
-            copy.copies[0].alignedOffset = alignedOffset;
-            copy.copies[0].textureOffset = origin;
-            copy.copies[0].copySize = copySize;
-            copy.copies[0].bufferOffset = {0, 0, 0};
-            copy.copies[0].bufferSize = copySize;
-
-            return copy;
-        }
-
-        ASSERT(alignedOffset < offset);
-        ASSERT(offset - alignedOffset < D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT);
-
-        // We must reinterpret our aligned offset into X and Y offsets with respect to the row
-        // pitch.
-        //
-        // You can visualize the data in the buffer like this:
-        // |-----------------------++++++++++++++++++++++++++++++++|
-        // ^ 512-aligned address   ^ Aligned offset               ^ End of copy data
-        //
-        // Now when you consider the row pitch, you can visualize the data like this:
-        // |~~~~~~~~~~~~~~~~|
-        // |~~~~~+++++++++++|
-        // |++++++++++++++++|
-        // |+++++~~~~~~~~~~~|
-        // |<---row pitch-->|
-        //
-        // The X and Y offsets calculated in ComputeTexelOffsets can be visualized like this:
-        // |YYYYYYYYYYYYYYYY|
-        // |XXXXXX++++++++++|
-        // |++++++++++++++++|
-        // |++++++~~~~~~~~~~|
-        // |<---row pitch-->|
-        Origin3D texelOffset = ComputeTexelOffsets(
-            blockInfo, static_cast<uint32_t>(offset - alignedOffset), bytesPerRow);
-
-        ASSERT(texelOffset.y <= blockInfo.height);
-        ASSERT(texelOffset.z == 0);
-
-        uint32_t copyBytesPerRowPitch = copySize.width / blockInfo.width * blockInfo.byteSize;
-        uint32_t byteOffsetInRowPitch = texelOffset.x / blockInfo.width * blockInfo.byteSize;
-        if (copyBytesPerRowPitch + byteOffsetInRowPitch <= bytesPerRow) {
-            // The region's rows fit inside the bytes per row. In this case, extend the width of the
-            // PlacedFootprint and copy the buffer with an offset location
-            //  |<------------- bytes per row ------------->|
-            //
-            //  |-------------------------------------------|
-            //  |                                           |
-            //  |                 +++++++++++++++++~~~~~~~~~|
-            //  |~~~~~~~~~~~~~~~~~+++++++++++++++++~~~~~~~~~|
-            //  |~~~~~~~~~~~~~~~~~+++++++++++++++++~~~~~~~~~|
-            //  |~~~~~~~~~~~~~~~~~+++++++++++++++++~~~~~~~~~|
-            //  |~~~~~~~~~~~~~~~~~+++++++++++++++++         |
-            //  |-------------------------------------------|
-
-            // Copy 0:
-            //  |----------------------------------|
-            //  |                                  |
-            //  |                 +++++++++++++++++|
-            //  |~~~~~~~~~~~~~~~~~+++++++++++++++++|
-            //  |~~~~~~~~~~~~~~~~~+++++++++++++++++|
-            //  |~~~~~~~~~~~~~~~~~+++++++++++++++++|
-            //  |~~~~~~~~~~~~~~~~~+++++++++++++++++|
-            //  |----------------------------------|
-
-            copy.count = 1;
-
-            copy.copies[0].alignedOffset = alignedOffset;
-            copy.copies[0].textureOffset = origin;
-            copy.copies[0].copySize = copySize;
-            copy.copies[0].bufferOffset = texelOffset;
-
-            copy.copies[0].bufferSize.width = copySize.width + texelOffset.x;
-            copy.copies[0].bufferSize.height = copySize.height + texelOffset.y;
-            copy.copies[0].bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers;
-
-            return copy;
-        }
-
-        // The region's rows straddle the bytes per row. Split the copy into two copies
-        //  |<------------- bytes per row ------------->|
-        //
-        //  |-------------------------------------------|
-        //  |                                           |
-        //  |                                   ++++++++|
-        //  |+++++++++~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
-        //  |+++++++++~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
-        //  |+++++++++~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
-        //  |+++++++++~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
-        //  |+++++++++                                  |
-        //  |-------------------------------------------|
-
-        //  Copy 0:
-        //  |-------------------------------------------|
-        //  |                                           |
-        //  |                                   ++++++++|
-        //  |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
-        //  |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
-        //  |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
-        //  |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
-        //  |-------------------------------------------|
-
-        //  Copy 1:
-        //  |---------|
-        //  |         |
-        //  |         |
-        //  |+++++++++|
-        //  |+++++++++|
-        //  |+++++++++|
-        //  |+++++++++|
-        //  |+++++++++|
-        //  |---------|
-
-        copy.count = 2;
+    // If the provided offset to the data was already 512-aligned, we can simply copy the data
+    // without further translation.
+    if (offset == alignedOffset) {
+        copy.count = 1;
 
         copy.copies[0].alignedOffset = alignedOffset;
         copy.copies[0].textureOffset = origin;
-
-        ASSERT(bytesPerRow > byteOffsetInRowPitch);
-        uint32_t texelsPerRow = bytesPerRow / blockInfo.byteSize * blockInfo.width;
-        copy.copies[0].copySize.width = texelsPerRow - texelOffset.x;
-        copy.copies[0].copySize.height = copySize.height;
-        copy.copies[0].copySize.depthOrArrayLayers = copySize.depthOrArrayLayers;
-
-        copy.copies[0].bufferOffset = texelOffset;
-        copy.copies[0].bufferSize.width = texelsPerRow;
-        copy.copies[0].bufferSize.height = copySize.height + texelOffset.y;
-        copy.copies[0].bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers;
-
-        uint64_t offsetForCopy1 =
-            offset + copy.copies[0].copySize.width / blockInfo.width * blockInfo.byteSize;
-        uint64_t alignedOffsetForCopy1 = AlignDownForDataPlacement(offsetForCopy1);
-        Origin3D texelOffsetForCopy1 = ComputeTexelOffsets(
-            blockInfo, static_cast<uint32_t>(offsetForCopy1 - alignedOffsetForCopy1), bytesPerRow);
-
-        ASSERT(texelOffsetForCopy1.y <= blockInfo.height);
-        ASSERT(texelOffsetForCopy1.z == 0);
-
-        copy.copies[1].alignedOffset = alignedOffsetForCopy1;
-        copy.copies[1].textureOffset.x = origin.x + copy.copies[0].copySize.width;
-        copy.copies[1].textureOffset.y = origin.y;
-        copy.copies[1].textureOffset.z = origin.z;
-
-        ASSERT(copySize.width > copy.copies[0].copySize.width);
-        copy.copies[1].copySize.width = copySize.width - copy.copies[0].copySize.width;
-        copy.copies[1].copySize.height = copySize.height;
-        copy.copies[1].copySize.depthOrArrayLayers = copySize.depthOrArrayLayers;
-
-        copy.copies[1].bufferOffset = texelOffsetForCopy1;
-        copy.copies[1].bufferSize.width = copy.copies[1].copySize.width + texelOffsetForCopy1.x;
-        copy.copies[1].bufferSize.height = copySize.height + texelOffsetForCopy1.y;
-        copy.copies[1].bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers;
+        copy.copies[0].copySize = copySize;
+        copy.copies[0].bufferOffset = {0, 0, 0};
+        copy.copies[0].bufferSize = copySize;
 
         return copy;
     }
 
-    TextureCopySplits Compute2DTextureCopySplits(Origin3D origin,
-                                                 Extent3D copySize,
-                                                 const TexelBlockInfo& blockInfo,
-                                                 uint64_t offset,
-                                                 uint32_t bytesPerRow,
-                                                 uint32_t rowsPerImage) {
-        TextureCopySplits copies;
+    ASSERT(alignedOffset < offset);
+    ASSERT(offset - alignedOffset < D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT);
 
-        const uint64_t bytesPerLayer = bytesPerRow * rowsPerImage;
+    // We must reinterpret our aligned offset into X and Y offsets with respect to the row
+    // pitch.
+    //
+    // You can visualize the data in the buffer like this:
+    // |-----------------------++++++++++++++++++++++++++++++++|
+    // ^ 512-aligned address   ^ Aligned offset               ^ End of copy data
+    //
+    // Now when you consider the row pitch, you can visualize the data like this:
+    // |~~~~~~~~~~~~~~~~|
+    // |~~~~~+++++++++++|
+    // |++++++++++++++++|
+    // |+++++~~~~~~~~~~~|
+    // |<---row pitch-->|
+    //
+    // The X and Y offsets calculated in ComputeTexelOffsets can be visualized like this:
+    // |YYYYYYYYYYYYYYYY|
+    // |XXXXXX++++++++++|
+    // |++++++++++++++++|
+    // |++++++~~~~~~~~~~|
+    // |<---row pitch-->|
+    Origin3D texelOffset =
+        ComputeTexelOffsets(blockInfo, static_cast<uint32_t>(offset - alignedOffset), bytesPerRow);
 
-        // The function Compute2DTextureCopySubresource() decides how to split the copy based on:
-        // - the alignment of the buffer offset with D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512)
-        // - the alignment of the buffer offset with D3D12_TEXTURE_DATA_PITCH_ALIGNMENT (256)
-        // Each layer of a 2D array might need to be split, but because of the WebGPU
-        // constraint that "bytesPerRow" must be a multiple of 256, all odd (resp. all even) layers
-        // will be at an offset multiple of 512 of each other, which means they will all result in
-        // the same 2D split. Thus we can just compute the copy splits for the first and second
-        // layers, and reuse them for the remaining layers by adding the related offset of each
-        // layer. Moreover, if "rowsPerImage" is even, both the first and second copy layers can
-        // share the same copy split, so in this situation we just need to compute copy split once
-        // and reuse it for all the layers.
-        Extent3D copyOneLayerSize = copySize;
-        Origin3D copyFirstLayerOrigin = origin;
-        copyOneLayerSize.depthOrArrayLayers = 1;
-        copyFirstLayerOrigin.z = 0;
+    ASSERT(texelOffset.y <= blockInfo.height);
+    ASSERT(texelOffset.z == 0);
 
-        copies.copySubresources[0] = Compute2DTextureCopySubresource(
-            copyFirstLayerOrigin, copyOneLayerSize, blockInfo, offset, bytesPerRow);
+    uint32_t copyBytesPerRowPitch = copySize.width / blockInfo.width * blockInfo.byteSize;
+    uint32_t byteOffsetInRowPitch = texelOffset.x / blockInfo.width * blockInfo.byteSize;
+    if (copyBytesPerRowPitch + byteOffsetInRowPitch <= bytesPerRow) {
+        // The region's rows fit inside the bytes per row. In this case, extend the width of the
+        // PlacedFootprint and copy the buffer with an offset location
+        //  |<------------- bytes per row ------------->|
+        //
+        //  |-------------------------------------------|
+        //  |                                           |
+        //  |                 +++++++++++++++++~~~~~~~~~|
+        //  |~~~~~~~~~~~~~~~~~+++++++++++++++++~~~~~~~~~|
+        //  |~~~~~~~~~~~~~~~~~+++++++++++++++++~~~~~~~~~|
+        //  |~~~~~~~~~~~~~~~~~+++++++++++++++++~~~~~~~~~|
+        //  |~~~~~~~~~~~~~~~~~+++++++++++++++++         |
+        //  |-------------------------------------------|
 
-        // When the copy only refers one texture 2D array layer,
-        // copies.copySubresources[1] will never be used so we can safely early return here.
-        if (copySize.depthOrArrayLayers == 1) {
-            return copies;
-        }
+        // Copy 0:
+        //  |----------------------------------|
+        //  |                                  |
+        //  |                 +++++++++++++++++|
+        //  |~~~~~~~~~~~~~~~~~+++++++++++++++++|
+        //  |~~~~~~~~~~~~~~~~~+++++++++++++++++|
+        //  |~~~~~~~~~~~~~~~~~+++++++++++++++++|
+        //  |~~~~~~~~~~~~~~~~~+++++++++++++++++|
+        //  |----------------------------------|
 
-        if (bytesPerLayer % D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT == 0) {
-            copies.copySubresources[1] = copies.copySubresources[0];
-            copies.copySubresources[1].copies[0].alignedOffset += bytesPerLayer;
-            copies.copySubresources[1].copies[1].alignedOffset += bytesPerLayer;
-        } else {
-            const uint64_t bufferOffsetNextLayer = offset + bytesPerLayer;
-            copies.copySubresources[1] =
-                Compute2DTextureCopySubresource(copyFirstLayerOrigin, copyOneLayerSize, blockInfo,
-                                                bufferOffsetNextLayer, bytesPerRow);
-        }
+        copy.count = 1;
 
+        copy.copies[0].alignedOffset = alignedOffset;
+        copy.copies[0].textureOffset = origin;
+        copy.copies[0].copySize = copySize;
+        copy.copies[0].bufferOffset = texelOffset;
+
+        copy.copies[0].bufferSize.width = copySize.width + texelOffset.x;
+        copy.copies[0].bufferSize.height = copySize.height + texelOffset.y;
+        copy.copies[0].bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers;
+
+        return copy;
+    }
+
+    // The region's rows straddle the bytes per row. Split the copy into two copies
+    //  |<------------- bytes per row ------------->|
+    //
+    //  |-------------------------------------------|
+    //  |                                           |
+    //  |                                   ++++++++|
+    //  |+++++++++~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+    //  |+++++++++~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+    //  |+++++++++~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+    //  |+++++++++~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+    //  |+++++++++                                  |
+    //  |-------------------------------------------|
+
+    //  Copy 0:
+    //  |-------------------------------------------|
+    //  |                                           |
+    //  |                                   ++++++++|
+    //  |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+    //  |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+    //  |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+    //  |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~++++++++|
+    //  |-------------------------------------------|
+
+    //  Copy 1:
+    //  |---------|
+    //  |         |
+    //  |         |
+    //  |+++++++++|
+    //  |+++++++++|
+    //  |+++++++++|
+    //  |+++++++++|
+    //  |+++++++++|
+    //  |---------|
+
+    copy.count = 2;
+
+    copy.copies[0].alignedOffset = alignedOffset;
+    copy.copies[0].textureOffset = origin;
+
+    ASSERT(bytesPerRow > byteOffsetInRowPitch);
+    uint32_t texelsPerRow = bytesPerRow / blockInfo.byteSize * blockInfo.width;
+    copy.copies[0].copySize.width = texelsPerRow - texelOffset.x;
+    copy.copies[0].copySize.height = copySize.height;
+    copy.copies[0].copySize.depthOrArrayLayers = copySize.depthOrArrayLayers;
+
+    copy.copies[0].bufferOffset = texelOffset;
+    copy.copies[0].bufferSize.width = texelsPerRow;
+    copy.copies[0].bufferSize.height = copySize.height + texelOffset.y;
+    copy.copies[0].bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers;
+
+    uint64_t offsetForCopy1 =
+        offset + copy.copies[0].copySize.width / blockInfo.width * blockInfo.byteSize;
+    uint64_t alignedOffsetForCopy1 = AlignDownForDataPlacement(offsetForCopy1);
+    Origin3D texelOffsetForCopy1 = ComputeTexelOffsets(
+        blockInfo, static_cast<uint32_t>(offsetForCopy1 - alignedOffsetForCopy1), bytesPerRow);
+
+    ASSERT(texelOffsetForCopy1.y <= blockInfo.height);
+    ASSERT(texelOffsetForCopy1.z == 0);
+
+    copy.copies[1].alignedOffset = alignedOffsetForCopy1;
+    copy.copies[1].textureOffset.x = origin.x + copy.copies[0].copySize.width;
+    copy.copies[1].textureOffset.y = origin.y;
+    copy.copies[1].textureOffset.z = origin.z;
+
+    ASSERT(copySize.width > copy.copies[0].copySize.width);
+    copy.copies[1].copySize.width = copySize.width - copy.copies[0].copySize.width;
+    copy.copies[1].copySize.height = copySize.height;
+    copy.copies[1].copySize.depthOrArrayLayers = copySize.depthOrArrayLayers;
+
+    copy.copies[1].bufferOffset = texelOffsetForCopy1;
+    copy.copies[1].bufferSize.width = copy.copies[1].copySize.width + texelOffsetForCopy1.x;
+    copy.copies[1].bufferSize.height = copySize.height + texelOffsetForCopy1.y;
+    copy.copies[1].bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers;
+
+    return copy;
+}
+
+TextureCopySplits Compute2DTextureCopySplits(Origin3D origin,
+                                             Extent3D copySize,
+                                             const TexelBlockInfo& blockInfo,
+                                             uint64_t offset,
+                                             uint32_t bytesPerRow,
+                                             uint32_t rowsPerImage) {
+    TextureCopySplits copies;
+
+    const uint64_t bytesPerLayer = bytesPerRow * rowsPerImage;
+
+    // The function Compute2DTextureCopySubresource() decides how to split the copy based on:
+    // - the alignment of the buffer offset with D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512)
+    // - the alignment of the buffer offset with D3D12_TEXTURE_DATA_PITCH_ALIGNMENT (256)
+    // Each layer of a 2D array might need to be split, but because of the WebGPU
+    // constraint that "bytesPerRow" must be a multiple of 256, all odd (resp. all even) layers
+    // will be at an offset multiple of 512 of each other, which means they will all result in
+    // the same 2D split. Thus we can just compute the copy splits for the first and second
+    // layers, and reuse them for the remaining layers by adding the related offset of each
+    // layer. Moreover, if "rowsPerImage" is even, both the first and second copy layers can
+    // share the same copy split, so in this situation we just need to compute copy split once
+    // and reuse it for all the layers.
+    Extent3D copyOneLayerSize = copySize;
+    Origin3D copyFirstLayerOrigin = origin;
+    copyOneLayerSize.depthOrArrayLayers = 1;
+    copyFirstLayerOrigin.z = 0;
+
+    copies.copySubresources[0] = Compute2DTextureCopySubresource(
+        copyFirstLayerOrigin, copyOneLayerSize, blockInfo, offset, bytesPerRow);
+
+    // When the copy only refers one texture 2D array layer,
+    // copies.copySubresources[1] will never be used so we can safely early return here.
+    if (copySize.depthOrArrayLayers == 1) {
         return copies;
     }
 
-    void Recompute3DTextureCopyRegionWithEmptyFirstRowAndEvenCopyHeight(
-        Origin3D origin,
-        Extent3D copySize,
-        const TexelBlockInfo& blockInfo,
-        uint32_t bytesPerRow,
-        uint32_t rowsPerImage,
-        TextureCopySubresource& copy,
-        uint32_t i) {
-        // Let's assign data and show why copy region generated by ComputeTextureCopySubresource
-        // is incorrect if there is an empty row at the beginning of the copy block.
-        // Assuming that bytesPerRow is 256 and we are doing a B2T copy, and copy size is {width: 2,
-        // height: 4, depthOrArrayLayers: 3}. Then the data layout in buffer is demonstrated
-        // as below:
-        //
-        //               |<----- bytes per row ------>|
-        //
-        //               |----------------------------|
-        //  row (N - 1)  |                            |
-        //  row N        |                 ++~~~~~~~~~|
-        //  row (N + 1)  |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
-        //  row (N + 2)  |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
-        //  row (N + 3)  |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
-        //  row (N + 4)  |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
-        //  row (N + 5)  |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
-        //  row (N + 6)  |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
-        //  row (N + 7)  |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
-        //  row (N + 8)  |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
-        //  row (N + 9)  |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
-        //  row (N + 10) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
-        //  row (N + 11) |~~~~~~~~~~~~~~~~~++         |
-        //               |----------------------------|
-
-        // The copy we mean to do is the following:
-        //
-        //   - image 0: row N to row (N + 3),
-        //   - image 1: row (N + 4) to row (N + 7),
-        //   - image 2: row (N + 8) to row (N + 11).
-        //
-        // Note that alignedOffset is at the beginning of row (N - 1), while buffer offset makes
-        // the copy start at row N. Row (N - 1) is the empty row between alignedOffset and offset.
-        //
-        // The 2D copy region of image 0 we received from Compute2DTextureCopySubresource() is
-        // the following:
-        //
-        //              |-------------------|
-        //  row (N - 1) |                   |
-        //  row N       |                 ++|
-        //  row (N + 1) |~~~~~~~~~~~~~~~~~++|
-        //  row (N + 2) |~~~~~~~~~~~~~~~~~++|
-        //  row (N + 3) |~~~~~~~~~~~~~~~~~++|
-        //              |-------------------|
-        //
-        // However, if we simply expand the copy region of image 0 to all depth ranges of a 3D
-        // texture, we will copy 5 rows every time, and every first row of each slice will be
-        // skipped. As a result, the copied data will be:
-        //
-        //   - image 0: row N to row (N + 3), which is correct. Row (N - 1) is skipped.
-        //   - image 1: row (N + 5) to row (N + 8) because row (N + 4) is skipped. It is incorrect.
-        //
-        // Likewise, all other image followed will be incorrect because we wrongly keep skipping
-        // one row for each depth slice.
-        //
-        // Solution: split the copy region to two copies: copy 3 (rowsPerImage - 1) rows in and
-        // expand to all depth slices in the first copy. 3 rows + one skipped rows = 4 rows, which
-        // equals to rowsPerImage. Then copy the last row in the second copy. However, the copy
-        // block of the last row of the last image may out-of-bound (see the details below), so
-        // we need an extra copy for the very last row.
-
-        // Copy 0: copy 3 rows, not 4 rows.
-        //                _____________________
-        //               /                    /|
-        //              /                    / |
-        //              |-------------------|  |
-        //  row (N - 1) |                   |  |
-        //  row N       |                 ++|  |
-        //  row (N + 1) |~~~~~~~~~~~~~~~~~++| /
-        //  row (N + 2) |~~~~~~~~~~~~~~~~~++|/
-        //              |-------------------|
-
-        // Copy 1: move down two rows and copy the last row on image 0, and expand to
-        // copySize.depthOrArrayLayers - 1 depth slices. Note that if we expand it to all depth
-        // slices, the last copy block will be row (N + 9) to row (N + 12). Row (N + 11) might
-        // be the last row of the entire buffer. Then row (N + 12) will be out-of-bound.
-        //                _____________________
-        //               /                    /|
-        //              /                    / |
-        //              |-------------------|  |
-        //  row (N + 1) |                   |  |
-        //  row (N + 2) |                   |  |
-        //  row (N + 3) |                 ++| /
-        //  row (N + 4) |~~~~~~~~~~~~~~~~~~~|/
-        //              |-------------------|
-        //
-        //  copy 2: copy the last row of the last image.
-        //              |-------------------|
-        //  row (N + 11)|                 ++|
-        //              |-------------------|
-
-        // Copy 0: copy copySize.height - 1 rows
-        TextureCopySubresource::CopyInfo& copy0 = copy.copies[i];
-        copy0.copySize.height = copySize.height - blockInfo.height;
-        copy0.bufferSize.height = rowsPerImage * blockInfo.height;  // rowsPerImageInTexels
-
-        // Copy 1: move down 2 rows and copy the last row on image 0, and expand to all depth slices
-        // but the last one.
-        TextureCopySubresource::CopyInfo* copy1 = copy.AddCopy();
-        *copy1 = copy0;
-        copy1->alignedOffset += 2 * bytesPerRow;
-        copy1->textureOffset.y += copySize.height - blockInfo.height;
-        // Offset two rows from the copy height for the bufferOffset (See the figure above):
-        //   - one for the row we advanced in the buffer: row (N + 4).
-        //   - one for the last row we want to copy: row (N + 3) itself.
-        copy1->bufferOffset.y = copySize.height - 2 * blockInfo.height;
-        copy1->copySize.height = blockInfo.height;
-        copy1->copySize.depthOrArrayLayers--;
-        copy1->bufferSize.depthOrArrayLayers--;
-
-        // Copy 2: copy the last row of the last image.
-        uint64_t offsetForCopy0 = OffsetToFirstCopiedTexel(blockInfo, bytesPerRow,
-                                                           copy0.alignedOffset, copy0.bufferOffset);
-        uint64_t offsetForLastRowOfLastImage =
-            offsetForCopy0 + bytesPerRow * (copy0.copySize.height +
-                                            rowsPerImage * (copySize.depthOrArrayLayers - 1));
-        uint64_t alignedOffsetForLastRowOfLastImage =
-            AlignDownForDataPlacement(offsetForLastRowOfLastImage);
-        Origin3D texelOffsetForLastRowOfLastImage = ComputeTexelOffsets(
-            blockInfo,
-            static_cast<uint32_t>(offsetForLastRowOfLastImage - alignedOffsetForLastRowOfLastImage),
-            bytesPerRow);
-
-        TextureCopySubresource::CopyInfo* copy2 = copy.AddCopy();
-        copy2->alignedOffset = alignedOffsetForLastRowOfLastImage;
-        copy2->textureOffset = copy1->textureOffset;
-        copy2->textureOffset.z = origin.z + copySize.depthOrArrayLayers - 1;
-        copy2->copySize = copy1->copySize;
-        copy2->copySize.depthOrArrayLayers = 1;
-        copy2->bufferOffset = texelOffsetForLastRowOfLastImage;
-        copy2->bufferSize.width = copy1->bufferSize.width;
-        ASSERT(copy2->copySize.height == 1);
-        copy2->bufferSize.height = copy2->bufferOffset.y + copy2->copySize.height;
-        copy2->bufferSize.depthOrArrayLayers = 1;
+    if (bytesPerLayer % D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT == 0) {
+        copies.copySubresources[1] = copies.copySubresources[0];
+        copies.copySubresources[1].copies[0].alignedOffset += bytesPerLayer;
+        copies.copySubresources[1].copies[1].alignedOffset += bytesPerLayer;
+    } else {
+        const uint64_t bufferOffsetNextLayer = offset + bytesPerLayer;
+        copies.copySubresources[1] = Compute2DTextureCopySubresource(
+            copyFirstLayerOrigin, copyOneLayerSize, blockInfo, bufferOffsetNextLayer, bytesPerRow);
     }
 
-    void Recompute3DTextureCopyRegionWithEmptyFirstRowAndOddCopyHeight(Extent3D copySize,
-                                                                       uint32_t bytesPerRow,
-                                                                       TextureCopySubresource& copy,
-                                                                       uint32_t i) {
-        // Read the comments of Recompute3DTextureCopyRegionWithEmptyFirstRowAndEvenCopyHeight() for
-        // the reason why it is incorrect if we simply extend the copy region to all depth slices
-        // when there is an empty first row at the copy region.
-        //
-        // If the copy height is odd, we can use two copies to make it correct:
-        //   - copy 0: only copy the first depth slice. Keep other arguments the same.
-        //   - copy 1: copy all rest depth slices because it will start without an empty row if
-        //     copy height is odd. Odd height + one (empty row) is even. An even row number times
-        //     bytesPerRow (256) will be aligned to D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512)
+    return copies;
+}
 
-        // Copy 0: copy the first depth slice (image 0)
-        TextureCopySubresource::CopyInfo& copy0 = copy.copies[i];
-        copy0.copySize.depthOrArrayLayers = 1;
-        copy0.bufferSize.depthOrArrayLayers = 1;
+void Recompute3DTextureCopyRegionWithEmptyFirstRowAndEvenCopyHeight(Origin3D origin,
+                                                                    Extent3D copySize,
+                                                                    const TexelBlockInfo& blockInfo,
+                                                                    uint32_t bytesPerRow,
+                                                                    uint32_t rowsPerImage,
+                                                                    TextureCopySubresource& copy,
+                                                                    uint32_t i) {
+    // Let's assign data and show why copy region generated by ComputeTextureCopySubresource
+    // is incorrect if there is an empty row at the beginning of the copy block.
+    // Assuming that bytesPerRow is 256 and we are doing a B2T copy, and copy size is {width: 2,
+    // height: 4, depthOrArrayLayers: 3}. Then the data layout in buffer is demonstrated
+    // as below:
+    //
+    //               |<----- bytes per row ------>|
+    //
+    //               |----------------------------|
+    //  row (N - 1)  |                            |
+    //  row N        |                 ++~~~~~~~~~|
+    //  row (N + 1)  |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+    //  row (N + 2)  |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+    //  row (N + 3)  |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+    //  row (N + 4)  |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+    //  row (N + 5)  |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+    //  row (N + 6)  |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+    //  row (N + 7)  |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+    //  row (N + 8)  |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+    //  row (N + 9)  |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+    //  row (N + 10) |~~~~~~~~~~~~~~~~~++~~~~~~~~~|
+    //  row (N + 11) |~~~~~~~~~~~~~~~~~++         |
+    //               |----------------------------|
 
-        // Copy 1: copy the rest depth slices in one shot
-        TextureCopySubresource::CopyInfo* copy1 = copy.AddCopy();
-        *copy1 = copy0;
-        ASSERT(copySize.height % 2 == 1);
-        copy1->alignedOffset += (copySize.height + 1) * bytesPerRow;
-        ASSERT(copy1->alignedOffset % D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT == 0);
-        // textureOffset.z should add one because the first slice has already been copied in copy0.
-        copy1->textureOffset.z++;
-        // bufferOffset.y should be 0 because we skipped the first depth slice and there is no empty
-        // row in this copy region.
-        copy1->bufferOffset.y = 0;
-        copy1->copySize.height = copySize.height;
-        copy1->copySize.depthOrArrayLayers = copySize.depthOrArrayLayers - 1;
-        copy1->bufferSize.height = copySize.height;
-        copy1->bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers - 1;
-    }
+    // The copy we mean to do is the following:
+    //
+    //   - image 0: row N to row (N + 3),
+    //   - image 1: row (N + 4) to row (N + 7),
+    //   - image 2: row (N + 8) to row (N + 11).
+    //
+    // Note that alignedOffset is at the beginning of row (N - 1), while buffer offset makes
+    // the copy start at row N. Row (N - 1) is the empty row between alignedOffset and offset.
+    //
+    // The 2D copy region of image 0 we received from Compute2DTextureCopySubresource() is
+    // the following:
+    //
+    //              |-------------------|
+    //  row (N - 1) |                   |
+    //  row N       |                 ++|
+    //  row (N + 1) |~~~~~~~~~~~~~~~~~++|
+    //  row (N + 2) |~~~~~~~~~~~~~~~~~++|
+    //  row (N + 3) |~~~~~~~~~~~~~~~~~++|
+    //              |-------------------|
+    //
+    // However, if we simply expand the copy region of image 0 to all depth ranges of a 3D
+    // texture, we will copy 5 rows every time, and every first row of each slice will be
+    // skipped. As a result, the copied data will be:
+    //
+    //   - image 0: row N to row (N + 3), which is correct. Row (N - 1) is skipped.
+    //   - image 1: row (N + 5) to row (N + 8) because row (N + 4) is skipped. It is incorrect.
+    //
+    // Likewise, all other image followed will be incorrect because we wrongly keep skipping
+    // one row for each depth slice.
+    //
+    // Solution: split the copy region to two copies: copy 3 (rowsPerImage - 1) rows in and
+    // expand to all depth slices in the first copy. 3 rows + one skipped rows = 4 rows, which
+    // equals to rowsPerImage. Then copy the last row in the second copy. However, the copy
+    // block of the last row of the last image may out-of-bound (see the details below), so
+    // we need an extra copy for the very last row.
 
-    TextureCopySubresource Compute3DTextureCopySplits(Origin3D origin,
-                                                      Extent3D copySize,
-                                                      const TexelBlockInfo& blockInfo,
-                                                      uint64_t offset,
-                                                      uint32_t bytesPerRow,
-                                                      uint32_t rowsPerImage) {
-        // To compute the copy region(s) for 3D textures, we call Compute2DTextureCopySubresource
-        // and get copy region(s) for the first slice of the copy, then extend to all depth slices
-        // and become a 3D copy. However, this doesn't work as easily as that due to some corner
-        // cases.
-        //
-        // For example, if bufferSize.height is greater than rowsPerImage in the generated copy
-        // region and we simply extend the 2D copy region to all copied depth slices, copied data
-        // will be incorrectly offset for each depth slice except the first one.
-        //
-        // For these special cases, we need to recompute the copy regions for 3D textures via
-        // split the incorrect copy region to a couple more copy regions.
+    // Copy 0: copy 3 rows, not 4 rows.
+    //                _____________________
+    //               /                    /|
+    //              /                    / |
+    //              |-------------------|  |
+    //  row (N - 1) |                   |  |
+    //  row N       |                 ++|  |
+    //  row (N + 1) |~~~~~~~~~~~~~~~~~++| /
+    //  row (N + 2) |~~~~~~~~~~~~~~~~~++|/
+    //              |-------------------|
 
-        // Call Compute2DTextureCopySubresource and get copy regions. This function has already
-        // forwarded "copySize.depthOrArrayLayers" to all depth slices.
-        TextureCopySubresource copySubresource =
-            Compute2DTextureCopySubresource(origin, copySize, blockInfo, offset, bytesPerRow);
+    // Copy 1: move down two rows and copy the last row on image 0, and expand to
+    // copySize.depthOrArrayLayers - 1 depth slices. Note that if we expand it to all depth
+    // slices, the last copy block will be row (N + 9) to row (N + 12). Row (N + 11) might
+    // be the last row of the entire buffer. Then row (N + 12) will be out-of-bound.
+    //                _____________________
+    //               /                    /|
+    //              /                    / |
+    //              |-------------------|  |
+    //  row (N + 1) |                   |  |
+    //  row (N + 2) |                   |  |
+    //  row (N + 3) |                 ++| /
+    //  row (N + 4) |~~~~~~~~~~~~~~~~~~~|/
+    //              |-------------------|
+    //
+    //  copy 2: copy the last row of the last image.
+    //              |-------------------|
+    //  row (N + 11)|                 ++|
+    //              |-------------------|
 
-        ASSERT(copySubresource.count <= 2);
-        // If copySize.depth is 1, we can return copySubresource. Because we don't need to extend
-        // the copy region(s) to other depth slice(s).
-        if (copySize.depthOrArrayLayers == 1) {
-            return copySubresource;
-        }
+    // Copy 0: copy copySize.height - 1 rows
+    TextureCopySubresource::CopyInfo& copy0 = copy.copies[i];
+    copy0.copySize.height = copySize.height - blockInfo.height;
+    copy0.bufferSize.height = rowsPerImage * blockInfo.height;  // rowsPerImageInTexels
 
-        uint32_t rowsPerImageInTexels = rowsPerImage * blockInfo.height;
-        // The copy region(s) generated by Compute2DTextureCopySubresource might be incorrect.
-        // However, we may append a couple more copy regions in the for loop below. We don't need
-        // to revise these new added copy regions.
-        uint32_t originalCopyCount = copySubresource.count;
-        for (uint32_t i = 0; i < originalCopyCount; ++i) {
-            // There can be one empty row at most in a copy region.
-            ASSERT(copySubresource.copies[i].bufferSize.height <=
-                   rowsPerImageInTexels + blockInfo.height);
-            Extent3D& bufferSize = copySubresource.copies[i].bufferSize;
+    // Copy 1: move down 2 rows and copy the last row on image 0, and expand to all depth slices
+    // but the last one.
+    TextureCopySubresource::CopyInfo* copy1 = copy.AddCopy();
+    *copy1 = copy0;
+    copy1->alignedOffset += 2 * bytesPerRow;
+    copy1->textureOffset.y += copySize.height - blockInfo.height;
+    // Offset two rows from the copy height for the bufferOffset (See the figure above):
+    //   - one for the row we advanced in the buffer: row (N + 4).
+    //   - one for the last row we want to copy: row (N + 3) itself.
+    copy1->bufferOffset.y = copySize.height - 2 * blockInfo.height;
+    copy1->copySize.height = blockInfo.height;
+    copy1->copySize.depthOrArrayLayers--;
+    copy1->bufferSize.depthOrArrayLayers--;
 
-            if (bufferSize.height == rowsPerImageInTexels) {
-                // If the copy region's bufferSize.height equals to rowsPerImageInTexels, we can use
-                // this copy region without any modification.
-                continue;
-            }
+    // Copy 2: copy the last row of the last image.
+    uint64_t offsetForCopy0 =
+        OffsetToFirstCopiedTexel(blockInfo, bytesPerRow, copy0.alignedOffset, copy0.bufferOffset);
+    uint64_t offsetForLastRowOfLastImage =
+        offsetForCopy0 +
+        bytesPerRow * (copy0.copySize.height + rowsPerImage * (copySize.depthOrArrayLayers - 1));
+    uint64_t alignedOffsetForLastRowOfLastImage =
+        AlignDownForDataPlacement(offsetForLastRowOfLastImage);
+    Origin3D texelOffsetForLastRowOfLastImage = ComputeTexelOffsets(
+        blockInfo,
+        static_cast<uint32_t>(offsetForLastRowOfLastImage - alignedOffsetForLastRowOfLastImage),
+        bytesPerRow);
 
-            if (bufferSize.height < rowsPerImageInTexels) {
-                // If we are copying multiple depth slices, we should skip rowsPerImageInTexels rows
-                // for each slice even though we only copy partial rows in each slice sometimes.
-                bufferSize.height = rowsPerImageInTexels;
-            } else {
-                // bufferSize.height > rowsPerImageInTexels. There is an empty row in this copy
-                // region due to alignment adjustment.
+    TextureCopySubresource::CopyInfo* copy2 = copy.AddCopy();
+    copy2->alignedOffset = alignedOffsetForLastRowOfLastImage;
+    copy2->textureOffset = copy1->textureOffset;
+    copy2->textureOffset.z = origin.z + copySize.depthOrArrayLayers - 1;
+    copy2->copySize = copy1->copySize;
+    copy2->copySize.depthOrArrayLayers = 1;
+    copy2->bufferOffset = texelOffsetForLastRowOfLastImage;
+    copy2->bufferSize.width = copy1->bufferSize.width;
+    ASSERT(copy2->copySize.height == 1);
+    copy2->bufferSize.height = copy2->bufferOffset.y + copy2->copySize.height;
+    copy2->bufferSize.depthOrArrayLayers = 1;
+}
 
-                // bytesPerRow is definitely 256, and it is definitely a full copy on height.
-                // Otherwise, bufferSize.height wount be greater than rowsPerImageInTexels and
-                // there won't be an empty row at the beginning of this copy region.
-                ASSERT(bytesPerRow == D3D12_TEXTURE_DATA_PITCH_ALIGNMENT);
-                ASSERT(copySize.height == rowsPerImageInTexels);
+void Recompute3DTextureCopyRegionWithEmptyFirstRowAndOddCopyHeight(Extent3D copySize,
+                                                                   uint32_t bytesPerRow,
+                                                                   TextureCopySubresource& copy,
+                                                                   uint32_t i) {
+    // Read the comments of Recompute3DTextureCopyRegionWithEmptyFirstRowAndEvenCopyHeight() for
+    // the reason why it is incorrect if we simply extend the copy region to all depth slices
+    // when there is an empty first row at the copy region.
+    //
+    // If the copy height is odd, we can use two copies to make it correct:
+    //   - copy 0: only copy the first depth slice. Keep other arguments the same.
+    //   - copy 1: copy all rest depth slices because it will start without an empty row if
+    //     copy height is odd. Odd height + one (empty row) is even. An even row number times
+    //     bytesPerRow (256) will be aligned to D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512)
 
-                if (copySize.height % 2 == 0) {
-                    // If copySize.height is even and there is an empty row at the beginning of the
-                    // first slice of the copy region, the offset of all depth slices will never be
-                    // aligned to D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512) and there is always
-                    // an empty row at each depth slice. We need a totally different approach to
-                    // split the copy region.
-                    Recompute3DTextureCopyRegionWithEmptyFirstRowAndEvenCopyHeight(
-                        origin, copySize, blockInfo, bytesPerRow, rowsPerImage, copySubresource, i);
-                } else {
-                    // If copySize.height is odd and there is an empty row at the beginning of the
-                    // first slice of the copy region, we can split the copy region into two copies:
-                    // copy0 to copy the first slice, copy1 to copy the rest slices because the
-                    // offset of slice 1 is aligned to D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512)
-                    // without an empty row. This is an easier case relative to cases with even copy
-                    // height.
-                    Recompute3DTextureCopyRegionWithEmptyFirstRowAndOddCopyHeight(
-                        copySize, bytesPerRow, copySubresource, i);
-                }
-            }
-        }
+    // Copy 0: copy the first depth slice (image 0)
+    TextureCopySubresource::CopyInfo& copy0 = copy.copies[i];
+    copy0.copySize.depthOrArrayLayers = 1;
+    copy0.bufferSize.depthOrArrayLayers = 1;
 
+    // Copy 1: copy the rest depth slices in one shot
+    TextureCopySubresource::CopyInfo* copy1 = copy.AddCopy();
+    *copy1 = copy0;
+    ASSERT(copySize.height % 2 == 1);
+    copy1->alignedOffset += (copySize.height + 1) * bytesPerRow;
+    ASSERT(copy1->alignedOffset % D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT == 0);
+    // textureOffset.z should add one because the first slice has already been copied in copy0.
+    copy1->textureOffset.z++;
+    // bufferOffset.y should be 0 because we skipped the first depth slice and there is no empty
+    // row in this copy region.
+    copy1->bufferOffset.y = 0;
+    copy1->copySize.height = copySize.height;
+    copy1->copySize.depthOrArrayLayers = copySize.depthOrArrayLayers - 1;
+    copy1->bufferSize.height = copySize.height;
+    copy1->bufferSize.depthOrArrayLayers = copySize.depthOrArrayLayers - 1;
+}
+
+TextureCopySubresource Compute3DTextureCopySplits(Origin3D origin,
+                                                  Extent3D copySize,
+                                                  const TexelBlockInfo& blockInfo,
+                                                  uint64_t offset,
+                                                  uint32_t bytesPerRow,
+                                                  uint32_t rowsPerImage) {
+    // To compute the copy region(s) for 3D textures, we call Compute2DTextureCopySubresource
+    // and get copy region(s) for the first slice of the copy, then extend to all depth slices
+    // and become a 3D copy. However, this doesn't work as easily as that due to some corner
+    // cases.
+    //
+    // For example, if bufferSize.height is greater than rowsPerImage in the generated copy
+    // region and we simply extend the 2D copy region to all copied depth slices, copied data
+    // will be incorrectly offset for each depth slice except the first one.
+    //
+    // For these special cases, we need to recompute the copy regions for 3D textures via
+    // split the incorrect copy region to a couple more copy regions.
+
+    // Call Compute2DTextureCopySubresource and get copy regions. This function has already
+    // forwarded "copySize.depthOrArrayLayers" to all depth slices.
+    TextureCopySubresource copySubresource =
+        Compute2DTextureCopySubresource(origin, copySize, blockInfo, offset, bytesPerRow);
+
+    ASSERT(copySubresource.count <= 2);
+    // If copySize.depth is 1, we can return copySubresource. Because we don't need to extend
+    // the copy region(s) to other depth slice(s).
+    if (copySize.depthOrArrayLayers == 1) {
         return copySubresource;
     }
+
+    uint32_t rowsPerImageInTexels = rowsPerImage * blockInfo.height;
+    // The copy region(s) generated by Compute2DTextureCopySubresource might be incorrect.
+    // However, we may append a couple more copy regions in the for loop below. We don't need
+    // to revise these new added copy regions.
+    uint32_t originalCopyCount = copySubresource.count;
+    for (uint32_t i = 0; i < originalCopyCount; ++i) {
+        // There can be one empty row at most in a copy region.
+        ASSERT(copySubresource.copies[i].bufferSize.height <=
+               rowsPerImageInTexels + blockInfo.height);
+        Extent3D& bufferSize = copySubresource.copies[i].bufferSize;
+
+        if (bufferSize.height == rowsPerImageInTexels) {
+            // If the copy region's bufferSize.height equals to rowsPerImageInTexels, we can use
+            // this copy region without any modification.
+            continue;
+        }
+
+        if (bufferSize.height < rowsPerImageInTexels) {
+            // If we are copying multiple depth slices, we should skip rowsPerImageInTexels rows
+            // for each slice even though we only copy partial rows in each slice sometimes.
+            bufferSize.height = rowsPerImageInTexels;
+        } else {
+            // bufferSize.height > rowsPerImageInTexels. There is an empty row in this copy
+            // region due to alignment adjustment.
+
+            // bytesPerRow is definitely 256, and it is definitely a full copy on height.
+            // Otherwise, bufferSize.height wount be greater than rowsPerImageInTexels and
+            // there won't be an empty row at the beginning of this copy region.
+            ASSERT(bytesPerRow == D3D12_TEXTURE_DATA_PITCH_ALIGNMENT);
+            ASSERT(copySize.height == rowsPerImageInTexels);
+
+            if (copySize.height % 2 == 0) {
+                // If copySize.height is even and there is an empty row at the beginning of the
+                // first slice of the copy region, the offset of all depth slices will never be
+                // aligned to D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512) and there is always
+                // an empty row at each depth slice. We need a totally different approach to
+                // split the copy region.
+                Recompute3DTextureCopyRegionWithEmptyFirstRowAndEvenCopyHeight(
+                    origin, copySize, blockInfo, bytesPerRow, rowsPerImage, copySubresource, i);
+            } else {
+                // If copySize.height is odd and there is an empty row at the beginning of the
+                // first slice of the copy region, we can split the copy region into two copies:
+                // copy0 to copy the first slice, copy1 to copy the rest slices because the
+                // offset of slice 1 is aligned to D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT (512)
+                // without an empty row. This is an easier case relative to cases with even copy
+                // height.
+                Recompute3DTextureCopyRegionWithEmptyFirstRowAndOddCopyHeight(copySize, bytesPerRow,
+                                                                              copySubresource, i);
+            }
+        }
+    }
+
+    return copySubresource;
+}
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/TextureCopySplitter.h b/src/dawn/native/d3d12/TextureCopySplitter.h
index 19b02de..4e60396 100644
--- a/src/dawn/native/d3d12/TextureCopySplitter.h
+++ b/src/dawn/native/d3d12/TextureCopySplitter.h
@@ -21,77 +21,77 @@
 
 namespace dawn::native {
 
-    struct TexelBlockInfo;
+struct TexelBlockInfo;
 
 }  // namespace dawn::native
 
 namespace dawn::native::d3d12 {
 
-    struct TextureCopySubresource {
-        static constexpr unsigned int kMaxTextureCopyRegions = 4;
+struct TextureCopySubresource {
+    static constexpr unsigned int kMaxTextureCopyRegions = 4;
 
-        struct CopyInfo {
-            uint64_t alignedOffset = 0;
-            Origin3D textureOffset;
-            Origin3D bufferOffset;
-            Extent3D bufferSize;
+    struct CopyInfo {
+        uint64_t alignedOffset = 0;
+        Origin3D textureOffset;
+        Origin3D bufferOffset;
+        Extent3D bufferSize;
 
-            Extent3D copySize;
-        };
-
-        CopyInfo* AddCopy();
-
-        uint32_t count = 0;
-        std::array<CopyInfo, kMaxTextureCopyRegions> copies;
+        Extent3D copySize;
     };
 
-    struct TextureCopySplits {
-        static constexpr uint32_t kMaxTextureCopySubresources = 2;
+    CopyInfo* AddCopy();
 
-        std::array<TextureCopySubresource, kMaxTextureCopySubresources> copySubresources;
-    };
+    uint32_t count = 0;
+    std::array<CopyInfo, kMaxTextureCopyRegions> copies;
+};
 
-    // This function is shared by 2D and 3D texture copy splitter. But it only knows how to handle
-    // 2D non-arrayed textures correctly, and just forwards "copySize.depthOrArrayLayers". See
-    // details in Compute{2D|3D}TextureCopySplits about how we generate copy regions for 2D array
-    // and 3D textures based on this function.
-    // The resulting copies triggered by API like CopyTextureRegion are equivalent to the copy
-    // regions defines by the arguments of TextureCopySubresource returned by this function and its
-    // counterparts. These arguments should strictly conform to particular invariants. Otherwise,
-    // D3D12 driver may report validation errors when we call CopyTextureRegion. Some important
-    // invariants are listed below. For more details
-    // of these invariants, see src/dawn/tests/unittests/d3d12/CopySplitTests.cpp.
-    //   - Inside each copy region: 1) its buffer offset plus copy size should be less than its
-    //     buffer size, 2) its buffer offset on y-axis should be less than copy format's
-    //     blockInfo.height, 3) its buffer offset on z-axis should be 0.
-    //   - Each copy region has an offset (aka alignedOffset) aligned to
-    //     D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT
-    //   - The buffer footprint of each copy region should be entirely within the copied buffer,
-    //     which means that the last "texel" of the buffer footprint doesn't go past the end of
-    //     the buffer even though the last "texel" might not be copied.
-    //   - If there are multiple copy regions, each copy region should not overlap with the others.
-    //   - Copy region(s) combined should exactly be equivalent to the texture region to be copied.
-    //   - Every pixel accessed by every copy region should not be out of the bound of the copied
-    //     texture and buffer.
-    TextureCopySubresource Compute2DTextureCopySubresource(Origin3D origin,
-                                                           Extent3D copySize,
-                                                           const TexelBlockInfo& blockInfo,
-                                                           uint64_t offset,
-                                                           uint32_t bytesPerRow);
+struct TextureCopySplits {
+    static constexpr uint32_t kMaxTextureCopySubresources = 2;
 
-    TextureCopySplits Compute2DTextureCopySplits(Origin3D origin,
-                                                 Extent3D copySize,
-                                                 const TexelBlockInfo& blockInfo,
-                                                 uint64_t offset,
-                                                 uint32_t bytesPerRow,
-                                                 uint32_t rowsPerImage);
+    std::array<TextureCopySubresource, kMaxTextureCopySubresources> copySubresources;
+};
 
-    TextureCopySubresource Compute3DTextureCopySplits(Origin3D origin,
-                                                      Extent3D copySize,
-                                                      const TexelBlockInfo& blockInfo,
-                                                      uint64_t offset,
-                                                      uint32_t bytesPerRow,
-                                                      uint32_t rowsPerImage);
+// This function is shared by 2D and 3D texture copy splitter. But it only knows how to handle
+// 2D non-arrayed textures correctly, and just forwards "copySize.depthOrArrayLayers". See
+// details in Compute{2D|3D}TextureCopySplits about how we generate copy regions for 2D array
+// and 3D textures based on this function.
+// The resulting copies triggered by API like CopyTextureRegion are equivalent to the copy
+// regions defines by the arguments of TextureCopySubresource returned by this function and its
+// counterparts. These arguments should strictly conform to particular invariants. Otherwise,
+// D3D12 driver may report validation errors when we call CopyTextureRegion. Some important
+// invariants are listed below. For more details
+// of these invariants, see src/dawn/tests/unittests/d3d12/CopySplitTests.cpp.
+//   - Inside each copy region: 1) its buffer offset plus copy size should be less than its
+//     buffer size, 2) its buffer offset on y-axis should be less than copy format's
+//     blockInfo.height, 3) its buffer offset on z-axis should be 0.
+//   - Each copy region has an offset (aka alignedOffset) aligned to
+//     D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT
+//   - The buffer footprint of each copy region should be entirely within the copied buffer,
+//     which means that the last "texel" of the buffer footprint doesn't go past the end of
+//     the buffer even though the last "texel" might not be copied.
+//   - If there are multiple copy regions, each copy region should not overlap with the others.
+//   - Copy region(s) combined should exactly be equivalent to the texture region to be copied.
+//   - Every pixel accessed by every copy region should not be out of the bound of the copied
+//     texture and buffer.
+TextureCopySubresource Compute2DTextureCopySubresource(Origin3D origin,
+                                                       Extent3D copySize,
+                                                       const TexelBlockInfo& blockInfo,
+                                                       uint64_t offset,
+                                                       uint32_t bytesPerRow);
+
+TextureCopySplits Compute2DTextureCopySplits(Origin3D origin,
+                                             Extent3D copySize,
+                                             const TexelBlockInfo& blockInfo,
+                                             uint64_t offset,
+                                             uint32_t bytesPerRow,
+                                             uint32_t rowsPerImage);
+
+TextureCopySubresource Compute3DTextureCopySplits(Origin3D origin,
+                                                  Extent3D copySize,
+                                                  const TexelBlockInfo& blockInfo,
+                                                  uint64_t offset,
+                                                  uint32_t bytesPerRow,
+                                                  uint32_t rowsPerImage);
 }  // namespace dawn::native::d3d12
 
 #endif  // SRC_DAWN_NATIVE_D3D12_TEXTURECOPYSPLITTER_H_
diff --git a/src/dawn/native/d3d12/TextureD3D12.cpp b/src/dawn/native/d3d12/TextureD3D12.cpp
index 77edd7f..59e27a5 100644
--- a/src/dawn/native/d3d12/TextureD3D12.cpp
+++ b/src/dawn/native/d3d12/TextureD3D12.cpp
@@ -36,886 +36,872 @@
 
 namespace dawn::native::d3d12 {
 
-    namespace {
+namespace {
 
-        D3D12_RESOURCE_STATES D3D12TextureUsage(wgpu::TextureUsage usage, const Format& format) {
-            D3D12_RESOURCE_STATES resourceState = D3D12_RESOURCE_STATE_COMMON;
+D3D12_RESOURCE_STATES D3D12TextureUsage(wgpu::TextureUsage usage, const Format& format) {
+    D3D12_RESOURCE_STATES resourceState = D3D12_RESOURCE_STATE_COMMON;
 
-            if (usage & kPresentTextureUsage) {
-                // The present usage is only used internally by the swapchain and is never used in
-                // combination with other usages.
-                ASSERT(usage == kPresentTextureUsage);
-                return D3D12_RESOURCE_STATE_PRESENT;
-            }
+    if (usage & kPresentTextureUsage) {
+        // The present usage is only used internally by the swapchain and is never used in
+        // combination with other usages.
+        ASSERT(usage == kPresentTextureUsage);
+        return D3D12_RESOURCE_STATE_PRESENT;
+    }
 
-            if (usage & wgpu::TextureUsage::CopySrc) {
-                resourceState |= D3D12_RESOURCE_STATE_COPY_SOURCE;
-            }
-            if (usage & wgpu::TextureUsage::CopyDst) {
-                resourceState |= D3D12_RESOURCE_STATE_COPY_DEST;
-            }
-            if (usage & (wgpu::TextureUsage::TextureBinding)) {
-                resourceState |= (D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE |
-                                  D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE);
-            }
-            if (usage & wgpu::TextureUsage::StorageBinding) {
-                resourceState |= D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
-            }
-            if (usage & wgpu::TextureUsage::RenderAttachment) {
-                if (format.HasDepthOrStencil()) {
-                    resourceState |= D3D12_RESOURCE_STATE_DEPTH_WRITE;
-                } else {
-                    resourceState |= D3D12_RESOURCE_STATE_RENDER_TARGET;
-                }
-            }
-
-            if (usage & kReadOnlyRenderAttachment) {
-                // There is no STENCIL_READ state. Readonly for stencil is bundled with DEPTH_READ.
-                resourceState |= D3D12_RESOURCE_STATE_DEPTH_READ;
-            }
-
-            return resourceState;
+    if (usage & wgpu::TextureUsage::CopySrc) {
+        resourceState |= D3D12_RESOURCE_STATE_COPY_SOURCE;
+    }
+    if (usage & wgpu::TextureUsage::CopyDst) {
+        resourceState |= D3D12_RESOURCE_STATE_COPY_DEST;
+    }
+    if (usage & (wgpu::TextureUsage::TextureBinding)) {
+        resourceState |= (D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE |
+                          D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE);
+    }
+    if (usage & wgpu::TextureUsage::StorageBinding) {
+        resourceState |= D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
+    }
+    if (usage & wgpu::TextureUsage::RenderAttachment) {
+        if (format.HasDepthOrStencil()) {
+            resourceState |= D3D12_RESOURCE_STATE_DEPTH_WRITE;
+        } else {
+            resourceState |= D3D12_RESOURCE_STATE_RENDER_TARGET;
         }
+    }
 
-        D3D12_RESOURCE_FLAGS D3D12ResourceFlags(wgpu::TextureUsage usage,
-                                                const Format& format,
-                                                bool isMultisampledTexture) {
-            D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE;
+    if (usage & kReadOnlyRenderAttachment) {
+        // There is no STENCIL_READ state. Readonly for stencil is bundled with DEPTH_READ.
+        resourceState |= D3D12_RESOURCE_STATE_DEPTH_READ;
+    }
 
-            if (usage & wgpu::TextureUsage::StorageBinding) {
-                flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
-            }
+    return resourceState;
+}
 
-            // A multisampled resource must have either D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET or
-            // D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL set in D3D12_RESOURCE_DESC::Flags.
-            // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_resource_desc
-            if ((usage & wgpu::TextureUsage::RenderAttachment) != 0 || isMultisampledTexture) {
-                if (format.HasDepthOrStencil()) {
-                    flags |= D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL;
-                } else {
-                    flags |= D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET;
-                }
-            }
+D3D12_RESOURCE_FLAGS D3D12ResourceFlags(wgpu::TextureUsage usage,
+                                        const Format& format,
+                                        bool isMultisampledTexture) {
+    D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE;
 
-            ASSERT(!(flags & D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL) ||
-                   flags == D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL);
-            return flags;
+    if (usage & wgpu::TextureUsage::StorageBinding) {
+        flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
+    }
+
+    // A multisampled resource must have either D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET or
+    // D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL set in D3D12_RESOURCE_DESC::Flags.
+    // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_resource_desc
+    if ((usage & wgpu::TextureUsage::RenderAttachment) != 0 || isMultisampledTexture) {
+        if (format.HasDepthOrStencil()) {
+            flags |= D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL;
+        } else {
+            flags |= D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET;
         }
+    }
 
-        D3D12_RESOURCE_DIMENSION D3D12TextureDimension(wgpu::TextureDimension dimension) {
-            switch (dimension) {
-                case wgpu::TextureDimension::e1D:
-                    return D3D12_RESOURCE_DIMENSION_TEXTURE1D;
-                case wgpu::TextureDimension::e2D:
-                    return D3D12_RESOURCE_DIMENSION_TEXTURE2D;
-                case wgpu::TextureDimension::e3D:
-                    return D3D12_RESOURCE_DIMENSION_TEXTURE3D;
+    ASSERT(!(flags & D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL) ||
+           flags == D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL);
+    return flags;
+}
+
+D3D12_RESOURCE_DIMENSION D3D12TextureDimension(wgpu::TextureDimension dimension) {
+    switch (dimension) {
+        case wgpu::TextureDimension::e1D:
+            return D3D12_RESOURCE_DIMENSION_TEXTURE1D;
+        case wgpu::TextureDimension::e2D:
+            return D3D12_RESOURCE_DIMENSION_TEXTURE2D;
+        case wgpu::TextureDimension::e3D:
+            return D3D12_RESOURCE_DIMENSION_TEXTURE3D;
+    }
+}
+
+DXGI_FORMAT D3D12TypelessTextureFormat(wgpu::TextureFormat format) {
+    switch (format) {
+        case wgpu::TextureFormat::R8Unorm:
+        case wgpu::TextureFormat::R8Snorm:
+        case wgpu::TextureFormat::R8Uint:
+        case wgpu::TextureFormat::R8Sint:
+            return DXGI_FORMAT_R8_TYPELESS;
+
+        case wgpu::TextureFormat::R16Uint:
+        case wgpu::TextureFormat::R16Sint:
+        case wgpu::TextureFormat::R16Float:
+        case wgpu::TextureFormat::Depth16Unorm:
+            return DXGI_FORMAT_R16_TYPELESS;
+
+        case wgpu::TextureFormat::RG8Unorm:
+        case wgpu::TextureFormat::RG8Snorm:
+        case wgpu::TextureFormat::RG8Uint:
+        case wgpu::TextureFormat::RG8Sint:
+            return DXGI_FORMAT_R8G8_TYPELESS;
+
+        case wgpu::TextureFormat::R32Uint:
+        case wgpu::TextureFormat::R32Sint:
+        case wgpu::TextureFormat::R32Float:
+            return DXGI_FORMAT_R32_TYPELESS;
+
+        case wgpu::TextureFormat::RG16Uint:
+        case wgpu::TextureFormat::RG16Sint:
+        case wgpu::TextureFormat::RG16Float:
+            return DXGI_FORMAT_R16G16_TYPELESS;
+
+        case wgpu::TextureFormat::RGBA8Unorm:
+        case wgpu::TextureFormat::RGBA8UnormSrgb:
+        case wgpu::TextureFormat::RGBA8Snorm:
+        case wgpu::TextureFormat::RGBA8Uint:
+        case wgpu::TextureFormat::RGBA8Sint:
+            return DXGI_FORMAT_R8G8B8A8_TYPELESS;
+
+        case wgpu::TextureFormat::BGRA8Unorm:
+        case wgpu::TextureFormat::BGRA8UnormSrgb:
+            return DXGI_FORMAT_B8G8R8A8_TYPELESS;
+
+        case wgpu::TextureFormat::RGB10A2Unorm:
+            return DXGI_FORMAT_R10G10B10A2_TYPELESS;
+
+        case wgpu::TextureFormat::RG11B10Ufloat:
+            return DXGI_FORMAT_R11G11B10_FLOAT;
+        case wgpu::TextureFormat::RGB9E5Ufloat:
+            return DXGI_FORMAT_R9G9B9E5_SHAREDEXP;
+
+        case wgpu::TextureFormat::RG32Uint:
+        case wgpu::TextureFormat::RG32Sint:
+        case wgpu::TextureFormat::RG32Float:
+            return DXGI_FORMAT_R32G32_TYPELESS;
+
+        case wgpu::TextureFormat::RGBA16Uint:
+        case wgpu::TextureFormat::RGBA16Sint:
+        case wgpu::TextureFormat::RGBA16Float:
+            return DXGI_FORMAT_R16G16B16A16_TYPELESS;
+
+        case wgpu::TextureFormat::RGBA32Uint:
+        case wgpu::TextureFormat::RGBA32Sint:
+        case wgpu::TextureFormat::RGBA32Float:
+            return DXGI_FORMAT_R32G32B32A32_TYPELESS;
+
+        case wgpu::TextureFormat::Depth32Float:
+        case wgpu::TextureFormat::Depth24Plus:
+            return DXGI_FORMAT_R32_TYPELESS;
+
+        // Depth24UnormStencil8 is the smallest format supported on D3D12 that has stencil.
+        case wgpu::TextureFormat::Stencil8:
+        case wgpu::TextureFormat::Depth24UnormStencil8:
+            return DXGI_FORMAT_R24G8_TYPELESS;
+        case wgpu::TextureFormat::Depth24PlusStencil8:
+        case wgpu::TextureFormat::Depth32FloatStencil8:
+            return DXGI_FORMAT_R32G8X24_TYPELESS;
+
+        case wgpu::TextureFormat::BC1RGBAUnorm:
+        case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+            return DXGI_FORMAT_BC1_TYPELESS;
+
+        case wgpu::TextureFormat::BC2RGBAUnorm:
+        case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+            return DXGI_FORMAT_BC2_TYPELESS;
+
+        case wgpu::TextureFormat::BC3RGBAUnorm:
+        case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+            return DXGI_FORMAT_BC3_TYPELESS;
+
+        case wgpu::TextureFormat::BC4RSnorm:
+        case wgpu::TextureFormat::BC4RUnorm:
+            return DXGI_FORMAT_BC4_TYPELESS;
+
+        case wgpu::TextureFormat::BC5RGSnorm:
+        case wgpu::TextureFormat::BC5RGUnorm:
+            return DXGI_FORMAT_BC5_TYPELESS;
+
+        case wgpu::TextureFormat::BC6HRGBFloat:
+        case wgpu::TextureFormat::BC6HRGBUfloat:
+            return DXGI_FORMAT_BC6H_TYPELESS;
+
+        case wgpu::TextureFormat::BC7RGBAUnorm:
+        case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+            return DXGI_FORMAT_BC7_TYPELESS;
+
+        case wgpu::TextureFormat::ETC2RGB8Unorm:
+        case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+        case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+        case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+        case wgpu::TextureFormat::ETC2RGBA8Unorm:
+        case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+        case wgpu::TextureFormat::EACR11Unorm:
+        case wgpu::TextureFormat::EACR11Snorm:
+        case wgpu::TextureFormat::EACRG11Unorm:
+        case wgpu::TextureFormat::EACRG11Snorm:
+
+        case wgpu::TextureFormat::ASTC4x4Unorm:
+        case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+        case wgpu::TextureFormat::ASTC5x4Unorm:
+        case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+        case wgpu::TextureFormat::ASTC5x5Unorm:
+        case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+        case wgpu::TextureFormat::ASTC6x5Unorm:
+        case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+        case wgpu::TextureFormat::ASTC6x6Unorm:
+        case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+        case wgpu::TextureFormat::ASTC8x5Unorm:
+        case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+        case wgpu::TextureFormat::ASTC8x6Unorm:
+        case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+        case wgpu::TextureFormat::ASTC8x8Unorm:
+        case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+        case wgpu::TextureFormat::ASTC10x5Unorm:
+        case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+        case wgpu::TextureFormat::ASTC10x6Unorm:
+        case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+        case wgpu::TextureFormat::ASTC10x8Unorm:
+        case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+        case wgpu::TextureFormat::ASTC10x10Unorm:
+        case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+        case wgpu::TextureFormat::ASTC12x10Unorm:
+        case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+        case wgpu::TextureFormat::ASTC12x12Unorm:
+        case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+
+        case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+        case wgpu::TextureFormat::Undefined:
+            UNREACHABLE();
+    }
+}
+
+}  // namespace
+
+DXGI_FORMAT D3D12TextureFormat(wgpu::TextureFormat format) {
+    switch (format) {
+        case wgpu::TextureFormat::R8Unorm:
+            return DXGI_FORMAT_R8_UNORM;
+        case wgpu::TextureFormat::R8Snorm:
+            return DXGI_FORMAT_R8_SNORM;
+        case wgpu::TextureFormat::R8Uint:
+            return DXGI_FORMAT_R8_UINT;
+        case wgpu::TextureFormat::R8Sint:
+            return DXGI_FORMAT_R8_SINT;
+
+        case wgpu::TextureFormat::R16Uint:
+            return DXGI_FORMAT_R16_UINT;
+        case wgpu::TextureFormat::R16Sint:
+            return DXGI_FORMAT_R16_SINT;
+        case wgpu::TextureFormat::R16Float:
+            return DXGI_FORMAT_R16_FLOAT;
+        case wgpu::TextureFormat::RG8Unorm:
+            return DXGI_FORMAT_R8G8_UNORM;
+        case wgpu::TextureFormat::RG8Snorm:
+            return DXGI_FORMAT_R8G8_SNORM;
+        case wgpu::TextureFormat::RG8Uint:
+            return DXGI_FORMAT_R8G8_UINT;
+        case wgpu::TextureFormat::RG8Sint:
+            return DXGI_FORMAT_R8G8_SINT;
+
+        case wgpu::TextureFormat::R32Uint:
+            return DXGI_FORMAT_R32_UINT;
+        case wgpu::TextureFormat::R32Sint:
+            return DXGI_FORMAT_R32_SINT;
+        case wgpu::TextureFormat::R32Float:
+            return DXGI_FORMAT_R32_FLOAT;
+        case wgpu::TextureFormat::RG16Uint:
+            return DXGI_FORMAT_R16G16_UINT;
+        case wgpu::TextureFormat::RG16Sint:
+            return DXGI_FORMAT_R16G16_SINT;
+        case wgpu::TextureFormat::RG16Float:
+            return DXGI_FORMAT_R16G16_FLOAT;
+        case wgpu::TextureFormat::RGBA8Unorm:
+            return DXGI_FORMAT_R8G8B8A8_UNORM;
+        case wgpu::TextureFormat::RGBA8UnormSrgb:
+            return DXGI_FORMAT_R8G8B8A8_UNORM_SRGB;
+        case wgpu::TextureFormat::RGBA8Snorm:
+            return DXGI_FORMAT_R8G8B8A8_SNORM;
+        case wgpu::TextureFormat::RGBA8Uint:
+            return DXGI_FORMAT_R8G8B8A8_UINT;
+        case wgpu::TextureFormat::RGBA8Sint:
+            return DXGI_FORMAT_R8G8B8A8_SINT;
+        case wgpu::TextureFormat::BGRA8Unorm:
+            return DXGI_FORMAT_B8G8R8A8_UNORM;
+        case wgpu::TextureFormat::BGRA8UnormSrgb:
+            return DXGI_FORMAT_B8G8R8A8_UNORM_SRGB;
+        case wgpu::TextureFormat::RGB10A2Unorm:
+            return DXGI_FORMAT_R10G10B10A2_UNORM;
+        case wgpu::TextureFormat::RG11B10Ufloat:
+            return DXGI_FORMAT_R11G11B10_FLOAT;
+        case wgpu::TextureFormat::RGB9E5Ufloat:
+            return DXGI_FORMAT_R9G9B9E5_SHAREDEXP;
+
+        case wgpu::TextureFormat::RG32Uint:
+            return DXGI_FORMAT_R32G32_UINT;
+        case wgpu::TextureFormat::RG32Sint:
+            return DXGI_FORMAT_R32G32_SINT;
+        case wgpu::TextureFormat::RG32Float:
+            return DXGI_FORMAT_R32G32_FLOAT;
+        case wgpu::TextureFormat::RGBA16Uint:
+            return DXGI_FORMAT_R16G16B16A16_UINT;
+        case wgpu::TextureFormat::RGBA16Sint:
+            return DXGI_FORMAT_R16G16B16A16_SINT;
+        case wgpu::TextureFormat::RGBA16Float:
+            return DXGI_FORMAT_R16G16B16A16_FLOAT;
+
+        case wgpu::TextureFormat::RGBA32Uint:
+            return DXGI_FORMAT_R32G32B32A32_UINT;
+        case wgpu::TextureFormat::RGBA32Sint:
+            return DXGI_FORMAT_R32G32B32A32_SINT;
+        case wgpu::TextureFormat::RGBA32Float:
+            return DXGI_FORMAT_R32G32B32A32_FLOAT;
+
+        case wgpu::TextureFormat::Depth16Unorm:
+            return DXGI_FORMAT_D16_UNORM;
+        case wgpu::TextureFormat::Depth32Float:
+        case wgpu::TextureFormat::Depth24Plus:
+            return DXGI_FORMAT_D32_FLOAT;
+        // Depth24UnormStencil8 is the smallest format supported on D3D12 that has stencil.
+        case wgpu::TextureFormat::Stencil8:
+        case wgpu::TextureFormat::Depth24UnormStencil8:
+            return DXGI_FORMAT_D24_UNORM_S8_UINT;
+        case wgpu::TextureFormat::Depth24PlusStencil8:
+        case wgpu::TextureFormat::Depth32FloatStencil8:
+            return DXGI_FORMAT_D32_FLOAT_S8X24_UINT;
+
+        case wgpu::TextureFormat::BC1RGBAUnorm:
+            return DXGI_FORMAT_BC1_UNORM;
+        case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+            return DXGI_FORMAT_BC1_UNORM_SRGB;
+        case wgpu::TextureFormat::BC2RGBAUnorm:
+            return DXGI_FORMAT_BC2_UNORM;
+        case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+            return DXGI_FORMAT_BC2_UNORM_SRGB;
+        case wgpu::TextureFormat::BC3RGBAUnorm:
+            return DXGI_FORMAT_BC3_UNORM;
+        case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+            return DXGI_FORMAT_BC3_UNORM_SRGB;
+        case wgpu::TextureFormat::BC4RSnorm:
+            return DXGI_FORMAT_BC4_SNORM;
+        case wgpu::TextureFormat::BC4RUnorm:
+            return DXGI_FORMAT_BC4_UNORM;
+        case wgpu::TextureFormat::BC5RGSnorm:
+            return DXGI_FORMAT_BC5_SNORM;
+        case wgpu::TextureFormat::BC5RGUnorm:
+            return DXGI_FORMAT_BC5_UNORM;
+        case wgpu::TextureFormat::BC6HRGBFloat:
+            return DXGI_FORMAT_BC6H_SF16;
+        case wgpu::TextureFormat::BC6HRGBUfloat:
+            return DXGI_FORMAT_BC6H_UF16;
+        case wgpu::TextureFormat::BC7RGBAUnorm:
+            return DXGI_FORMAT_BC7_UNORM;
+        case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+            return DXGI_FORMAT_BC7_UNORM_SRGB;
+
+        case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+            return DXGI_FORMAT_NV12;
+
+        case wgpu::TextureFormat::ETC2RGB8Unorm:
+        case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+        case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+        case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+        case wgpu::TextureFormat::ETC2RGBA8Unorm:
+        case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+        case wgpu::TextureFormat::EACR11Unorm:
+        case wgpu::TextureFormat::EACR11Snorm:
+        case wgpu::TextureFormat::EACRG11Unorm:
+        case wgpu::TextureFormat::EACRG11Snorm:
+
+        case wgpu::TextureFormat::ASTC4x4Unorm:
+        case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+        case wgpu::TextureFormat::ASTC5x4Unorm:
+        case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+        case wgpu::TextureFormat::ASTC5x5Unorm:
+        case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+        case wgpu::TextureFormat::ASTC6x5Unorm:
+        case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+        case wgpu::TextureFormat::ASTC6x6Unorm:
+        case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+        case wgpu::TextureFormat::ASTC8x5Unorm:
+        case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+        case wgpu::TextureFormat::ASTC8x6Unorm:
+        case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+        case wgpu::TextureFormat::ASTC8x8Unorm:
+        case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+        case wgpu::TextureFormat::ASTC10x5Unorm:
+        case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+        case wgpu::TextureFormat::ASTC10x6Unorm:
+        case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+        case wgpu::TextureFormat::ASTC10x8Unorm:
+        case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+        case wgpu::TextureFormat::ASTC10x10Unorm:
+        case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+        case wgpu::TextureFormat::ASTC12x10Unorm:
+        case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+        case wgpu::TextureFormat::ASTC12x12Unorm:
+        case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+
+        case wgpu::TextureFormat::Undefined:
+            UNREACHABLE();
+    }
+}
+
+MaybeError ValidateTextureDescriptorCanBeWrapped(const TextureDescriptor* descriptor) {
+    DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
+                    "Texture dimension (%s) is not %s.", descriptor->dimension,
+                    wgpu::TextureDimension::e2D);
+
+    DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
+                    descriptor->mipLevelCount);
+
+    DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1, "Array layer count (%u) is not 1.",
+                    descriptor->size.depthOrArrayLayers);
+
+    DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
+                    descriptor->sampleCount);
+
+    return {};
+}
+
+MaybeError ValidateD3D12TextureCanBeWrapped(ID3D12Resource* d3d12Resource,
+                                            const TextureDescriptor* dawnDescriptor) {
+    const D3D12_RESOURCE_DESC d3dDescriptor = d3d12Resource->GetDesc();
+    DAWN_INVALID_IF(
+        (dawnDescriptor->size.width != d3dDescriptor.Width) ||
+            (dawnDescriptor->size.height != d3dDescriptor.Height) ||
+            (dawnDescriptor->size.depthOrArrayLayers != 1),
+        "D3D12 texture size (Width: %u, Height: %u, DepthOrArraySize: 1) doesn't match Dawn "
+        "descriptor size (width: %u, height: %u, depthOrArrayLayers: %u).",
+        d3dDescriptor.Width, d3dDescriptor.Height, dawnDescriptor->size.width,
+        dawnDescriptor->size.height, dawnDescriptor->size.depthOrArrayLayers);
+
+    const DXGI_FORMAT dxgiFormatFromDescriptor = D3D12TextureFormat(dawnDescriptor->format);
+    DAWN_INVALID_IF(dxgiFormatFromDescriptor != d3dDescriptor.Format,
+                    "D3D12 texture format (%x) is not compatible with Dawn descriptor format (%s).",
+                    d3dDescriptor.Format, dawnDescriptor->format);
+
+    DAWN_INVALID_IF(d3dDescriptor.MipLevels != 1,
+                    "D3D12 texture number of miplevels (%u) is not 1.", d3dDescriptor.MipLevels);
+
+    DAWN_INVALID_IF(d3dDescriptor.DepthOrArraySize != 1, "D3D12 texture array size (%u) is not 1.",
+                    d3dDescriptor.DepthOrArraySize);
+
+    // Shared textures cannot be multi-sample so no need to check those.
+    ASSERT(d3dDescriptor.SampleDesc.Count == 1);
+    ASSERT(d3dDescriptor.SampleDesc.Quality == 0);
+
+    return {};
+}
+
+// https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ne-d3d12-d3d12_shared_resource_compatibility_tier
+MaybeError ValidateD3D12VideoTextureCanBeShared(Device* device, DXGI_FORMAT textureFormat) {
+    const bool supportsSharedResourceCapabilityTier1 =
+        device->GetDeviceInfo().supportsSharedResourceCapabilityTier1;
+    switch (textureFormat) {
+        // MSDN docs are not correct, NV12 requires at-least tier 1.
+        case DXGI_FORMAT_NV12:
+            if (supportsSharedResourceCapabilityTier1) {
+                return {};
             }
+            break;
+        default:
+            break;
+    }
+
+    return DAWN_FORMAT_VALIDATION_ERROR("DXGI format does not support cross-API sharing.");
+}
+
+// static
+ResultOrError<Ref<Texture>> Texture::Create(Device* device, const TextureDescriptor* descriptor) {
+    Ref<Texture> dawnTexture =
+        AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
+
+    DAWN_INVALID_IF(dawnTexture->GetFormat().IsMultiPlanar(),
+                    "Cannot create a multi-planar formatted texture directly");
+
+    DAWN_TRY(dawnTexture->InitializeAsInternalTexture());
+    return std::move(dawnTexture);
+}
+
+// static
+ResultOrError<Ref<Texture>> Texture::CreateExternalImage(
+    Device* device,
+    const TextureDescriptor* descriptor,
+    ComPtr<ID3D12Resource> d3d12Texture,
+    Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
+    bool isSwapChainTexture,
+    bool isInitialized) {
+    Ref<Texture> dawnTexture =
+        AcquireRef(new Texture(device, descriptor, TextureState::OwnedExternal));
+    DAWN_TRY(dawnTexture->InitializeAsExternalTexture(
+        descriptor, std::move(d3d12Texture), std::move(d3d11on12Resource), isSwapChainTexture));
+
+    // Importing a multi-planar format must be initialized. This is required because
+    // a shared multi-planar format cannot be initialized by Dawn.
+    DAWN_INVALID_IF(
+        !isInitialized && dawnTexture->GetFormat().IsMultiPlanar(),
+        "Cannot create a texture with a multi-planar format (%s) with uninitialized data.",
+        dawnTexture->GetFormat().format);
+
+    dawnTexture->SetIsSubresourceContentInitialized(isInitialized,
+                                                    dawnTexture->GetAllSubresources());
+    return std::move(dawnTexture);
+}
+
+// static
+ResultOrError<Ref<Texture>> Texture::Create(Device* device,
+                                            const TextureDescriptor* descriptor,
+                                            ComPtr<ID3D12Resource> d3d12Texture) {
+    Ref<Texture> dawnTexture =
+        AcquireRef(new Texture(device, descriptor, TextureState::OwnedExternal));
+    DAWN_TRY(dawnTexture->InitializeAsSwapChainTexture(std::move(d3d12Texture)));
+    return std::move(dawnTexture);
+}
+
+MaybeError Texture::InitializeAsExternalTexture(const TextureDescriptor* descriptor,
+                                                ComPtr<ID3D12Resource> d3d12Texture,
+                                                Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
+                                                bool isSwapChainTexture) {
+    mD3D11on12Resource = std::move(d3d11on12Resource);
+    mSwapChainTexture = isSwapChainTexture;
+
+    D3D12_RESOURCE_DESC desc = d3d12Texture->GetDesc();
+    mD3D12ResourceFlags = desc.Flags;
+
+    AllocationInfo info;
+    info.mMethod = AllocationMethod::kExternal;
+    // When creating the ResourceHeapAllocation, the resource heap is set to nullptr because the
+    // texture is owned externally. The texture's owning entity must remain responsible for
+    // memory management.
+    mResourceAllocation = {info, 0, std::move(d3d12Texture), nullptr};
+
+    SetLabelHelper("Dawn_ExternalTexture");
+
+    return {};
+}
+
+MaybeError Texture::InitializeAsInternalTexture() {
+    D3D12_RESOURCE_DESC resourceDescriptor;
+    resourceDescriptor.Dimension = D3D12TextureDimension(GetDimension());
+    resourceDescriptor.Alignment = 0;
+
+    const Extent3D& size = GetSize();
+    resourceDescriptor.Width = size.width;
+    resourceDescriptor.Height = size.height;
+    resourceDescriptor.DepthOrArraySize = size.depthOrArrayLayers;
+
+    // This will need to be much more nuanced when WebGPU has
+    // texture view compatibility rules.
+    const bool needsTypelessFormat = GetFormat().HasDepthOrStencil() &&
+                                     (GetInternalUsage() & wgpu::TextureUsage::TextureBinding) != 0;
+
+    DXGI_FORMAT dxgiFormat = needsTypelessFormat ? D3D12TypelessTextureFormat(GetFormat().format)
+                                                 : D3D12TextureFormat(GetFormat().format);
+
+    resourceDescriptor.MipLevels = static_cast<UINT16>(GetNumMipLevels());
+    resourceDescriptor.Format = dxgiFormat;
+    resourceDescriptor.SampleDesc.Count = GetSampleCount();
+    resourceDescriptor.SampleDesc.Quality = 0;
+    resourceDescriptor.Layout = D3D12_TEXTURE_LAYOUT_UNKNOWN;
+    resourceDescriptor.Flags =
+        D3D12ResourceFlags(GetInternalUsage(), GetFormat(), IsMultisampledTexture());
+    mD3D12ResourceFlags = resourceDescriptor.Flags;
+
+    DAWN_TRY_ASSIGN(mResourceAllocation,
+                    ToBackend(GetDevice())
+                        ->AllocateMemory(D3D12_HEAP_TYPE_DEFAULT, resourceDescriptor,
+                                         D3D12_RESOURCE_STATE_COMMON));
+
+    SetLabelImpl();
+
+    Device* device = ToBackend(GetDevice());
+
+    if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
+        CommandRecordingContext* commandContext;
+        DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
+
+        DAWN_TRY(
+            ClearTexture(commandContext, GetAllSubresources(), TextureBase::ClearValue::NonZero));
+    }
+
+    return {};
+}
+
+MaybeError Texture::InitializeAsSwapChainTexture(ComPtr<ID3D12Resource> d3d12Texture) {
+    AllocationInfo info;
+    info.mMethod = AllocationMethod::kExternal;
+    // When creating the ResourceHeapAllocation, the resource heap is set to nullptr because the
+    // texture is owned externally. The texture's owning entity must remain responsible for
+    // memory management.
+    mResourceAllocation = {info, 0, std::move(d3d12Texture), nullptr};
+
+    SetLabelHelper("Dawn_SwapChainTexture");
+
+    return {};
+}
+
+Texture::Texture(Device* device, const TextureDescriptor* descriptor, TextureState state)
+    : TextureBase(device, descriptor, state),
+      mSubresourceStateAndDecay(
+          GetFormat().aspects,
+          GetArrayLayers(),
+          GetNumMipLevels(),
+          {D3D12_RESOURCE_STATES::D3D12_RESOURCE_STATE_COMMON, kMaxExecutionSerial, false}) {}
+
+Texture::~Texture() {}
+
+void Texture::DestroyImpl() {
+    TextureBase::DestroyImpl();
+
+    Device* device = ToBackend(GetDevice());
+
+    // In PIX's D3D12-only mode, there is no way to determine frame boundaries
+    // for WebGPU since Dawn does not manage DXGI swap chains. Without assistance,
+    // PIX will wait forever for a present that never happens.
+    // If we know we're dealing with a swapbuffer texture, inform PIX we've
+    // "presented" the texture so it can determine frame boundaries and use its
+    // contents for the UI.
+    if (mSwapChainTexture) {
+        ID3D12SharingContract* d3dSharingContract = device->GetSharingContract();
+        if (d3dSharingContract != nullptr) {
+            d3dSharingContract->Present(mResourceAllocation.GetD3D12Resource(), 0, 0);
         }
+    }
 
-        DXGI_FORMAT D3D12TypelessTextureFormat(wgpu::TextureFormat format) {
-            switch (format) {
-                case wgpu::TextureFormat::R8Unorm:
-                case wgpu::TextureFormat::R8Snorm:
-                case wgpu::TextureFormat::R8Uint:
-                case wgpu::TextureFormat::R8Sint:
-                    return DXGI_FORMAT_R8_TYPELESS;
+    device->DeallocateMemory(mResourceAllocation);
 
-                case wgpu::TextureFormat::R16Uint:
-                case wgpu::TextureFormat::R16Sint:
-                case wgpu::TextureFormat::R16Float:
-                case wgpu::TextureFormat::Depth16Unorm:
-                    return DXGI_FORMAT_R16_TYPELESS;
+    // Now that we've deallocated the memory, the texture is no longer a swap chain texture.
+    // We can set mSwapChainTexture to false to avoid passing a nullptr to
+    // ID3D12SharingContract::Present.
+    mSwapChainTexture = false;
 
-                case wgpu::TextureFormat::RG8Unorm:
-                case wgpu::TextureFormat::RG8Snorm:
-                case wgpu::TextureFormat::RG8Uint:
-                case wgpu::TextureFormat::RG8Sint:
-                    return DXGI_FORMAT_R8G8_TYPELESS;
+    // Now that the texture has been destroyed. It should release the refptr
+    // of the d3d11on12 resource.
+    mD3D11on12Resource = nullptr;
+}
 
-                case wgpu::TextureFormat::R32Uint:
-                case wgpu::TextureFormat::R32Sint:
-                case wgpu::TextureFormat::R32Float:
-                    return DXGI_FORMAT_R32_TYPELESS;
+DXGI_FORMAT Texture::GetD3D12Format() const {
+    return D3D12TextureFormat(GetFormat().format);
+}
 
-                case wgpu::TextureFormat::RG16Uint:
-                case wgpu::TextureFormat::RG16Sint:
-                case wgpu::TextureFormat::RG16Float:
-                    return DXGI_FORMAT_R16G16_TYPELESS;
+ID3D12Resource* Texture::GetD3D12Resource() const {
+    return mResourceAllocation.GetD3D12Resource();
+}
 
-                case wgpu::TextureFormat::RGBA8Unorm:
-                case wgpu::TextureFormat::RGBA8UnormSrgb:
-                case wgpu::TextureFormat::RGBA8Snorm:
-                case wgpu::TextureFormat::RGBA8Uint:
-                case wgpu::TextureFormat::RGBA8Sint:
-                    return DXGI_FORMAT_R8G8B8A8_TYPELESS;
+DXGI_FORMAT Texture::GetD3D12CopyableSubresourceFormat(Aspect aspect) const {
+    ASSERT(GetFormat().aspects & aspect);
 
-                case wgpu::TextureFormat::BGRA8Unorm:
-                case wgpu::TextureFormat::BGRA8UnormSrgb:
-                    return DXGI_FORMAT_B8G8R8A8_TYPELESS;
-
-                case wgpu::TextureFormat::RGB10A2Unorm:
-                    return DXGI_FORMAT_R10G10B10A2_TYPELESS;
-
-                case wgpu::TextureFormat::RG11B10Ufloat:
-                    return DXGI_FORMAT_R11G11B10_FLOAT;
-                case wgpu::TextureFormat::RGB9E5Ufloat:
-                    return DXGI_FORMAT_R9G9B9E5_SHAREDEXP;
-
-                case wgpu::TextureFormat::RG32Uint:
-                case wgpu::TextureFormat::RG32Sint:
-                case wgpu::TextureFormat::RG32Float:
-                    return DXGI_FORMAT_R32G32_TYPELESS;
-
-                case wgpu::TextureFormat::RGBA16Uint:
-                case wgpu::TextureFormat::RGBA16Sint:
-                case wgpu::TextureFormat::RGBA16Float:
-                    return DXGI_FORMAT_R16G16B16A16_TYPELESS;
-
-                case wgpu::TextureFormat::RGBA32Uint:
-                case wgpu::TextureFormat::RGBA32Sint:
-                case wgpu::TextureFormat::RGBA32Float:
-                    return DXGI_FORMAT_R32G32B32A32_TYPELESS;
-
-                case wgpu::TextureFormat::Depth32Float:
-                case wgpu::TextureFormat::Depth24Plus:
-                    return DXGI_FORMAT_R32_TYPELESS;
-
-                // Depth24UnormStencil8 is the smallest format supported on D3D12 that has stencil.
-                case wgpu::TextureFormat::Stencil8:
-                case wgpu::TextureFormat::Depth24UnormStencil8:
-                    return DXGI_FORMAT_R24G8_TYPELESS;
-                case wgpu::TextureFormat::Depth24PlusStencil8:
-                case wgpu::TextureFormat::Depth32FloatStencil8:
-                    return DXGI_FORMAT_R32G8X24_TYPELESS;
-
-                case wgpu::TextureFormat::BC1RGBAUnorm:
-                case wgpu::TextureFormat::BC1RGBAUnormSrgb:
-                    return DXGI_FORMAT_BC1_TYPELESS;
-
-                case wgpu::TextureFormat::BC2RGBAUnorm:
-                case wgpu::TextureFormat::BC2RGBAUnormSrgb:
-                    return DXGI_FORMAT_BC2_TYPELESS;
-
-                case wgpu::TextureFormat::BC3RGBAUnorm:
-                case wgpu::TextureFormat::BC3RGBAUnormSrgb:
-                    return DXGI_FORMAT_BC3_TYPELESS;
-
-                case wgpu::TextureFormat::BC4RSnorm:
-                case wgpu::TextureFormat::BC4RUnorm:
-                    return DXGI_FORMAT_BC4_TYPELESS;
-
-                case wgpu::TextureFormat::BC5RGSnorm:
-                case wgpu::TextureFormat::BC5RGUnorm:
-                    return DXGI_FORMAT_BC5_TYPELESS;
-
-                case wgpu::TextureFormat::BC6HRGBFloat:
-                case wgpu::TextureFormat::BC6HRGBUfloat:
-                    return DXGI_FORMAT_BC6H_TYPELESS;
-
-                case wgpu::TextureFormat::BC7RGBAUnorm:
-                case wgpu::TextureFormat::BC7RGBAUnormSrgb:
-                    return DXGI_FORMAT_BC7_TYPELESS;
-
-                case wgpu::TextureFormat::ETC2RGB8Unorm:
-                case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
-                case wgpu::TextureFormat::ETC2RGB8A1Unorm:
-                case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
-                case wgpu::TextureFormat::ETC2RGBA8Unorm:
-                case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
-                case wgpu::TextureFormat::EACR11Unorm:
-                case wgpu::TextureFormat::EACR11Snorm:
-                case wgpu::TextureFormat::EACRG11Unorm:
-                case wgpu::TextureFormat::EACRG11Snorm:
-
-                case wgpu::TextureFormat::ASTC4x4Unorm:
-                case wgpu::TextureFormat::ASTC4x4UnormSrgb:
-                case wgpu::TextureFormat::ASTC5x4Unorm:
-                case wgpu::TextureFormat::ASTC5x4UnormSrgb:
-                case wgpu::TextureFormat::ASTC5x5Unorm:
-                case wgpu::TextureFormat::ASTC5x5UnormSrgb:
-                case wgpu::TextureFormat::ASTC6x5Unorm:
-                case wgpu::TextureFormat::ASTC6x5UnormSrgb:
-                case wgpu::TextureFormat::ASTC6x6Unorm:
-                case wgpu::TextureFormat::ASTC6x6UnormSrgb:
-                case wgpu::TextureFormat::ASTC8x5Unorm:
-                case wgpu::TextureFormat::ASTC8x5UnormSrgb:
-                case wgpu::TextureFormat::ASTC8x6Unorm:
-                case wgpu::TextureFormat::ASTC8x6UnormSrgb:
-                case wgpu::TextureFormat::ASTC8x8Unorm:
-                case wgpu::TextureFormat::ASTC8x8UnormSrgb:
-                case wgpu::TextureFormat::ASTC10x5Unorm:
-                case wgpu::TextureFormat::ASTC10x5UnormSrgb:
-                case wgpu::TextureFormat::ASTC10x6Unorm:
-                case wgpu::TextureFormat::ASTC10x6UnormSrgb:
-                case wgpu::TextureFormat::ASTC10x8Unorm:
-                case wgpu::TextureFormat::ASTC10x8UnormSrgb:
-                case wgpu::TextureFormat::ASTC10x10Unorm:
-                case wgpu::TextureFormat::ASTC10x10UnormSrgb:
-                case wgpu::TextureFormat::ASTC12x10Unorm:
-                case wgpu::TextureFormat::ASTC12x10UnormSrgb:
-                case wgpu::TextureFormat::ASTC12x12Unorm:
-                case wgpu::TextureFormat::ASTC12x12UnormSrgb:
-
-                case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
-                case wgpu::TextureFormat::Undefined:
+    switch (GetFormat().format) {
+        case wgpu::TextureFormat::Depth24UnormStencil8:
+        case wgpu::TextureFormat::Depth24PlusStencil8:
+        case wgpu::TextureFormat::Depth32FloatStencil8:
+        case wgpu::TextureFormat::Stencil8:
+            switch (aspect) {
+                case Aspect::Depth:
+                    return DXGI_FORMAT_R32_FLOAT;
+                case Aspect::Stencil:
+                    return DXGI_FORMAT_R8_UINT;
+                default:
                     UNREACHABLE();
             }
-        }
+        default:
+            ASSERT(HasOneBit(GetFormat().aspects));
+            return GetD3D12Format();
+    }
+}
 
-    }  // namespace
+MaybeError Texture::AcquireKeyedMutex() {
+    ASSERT(mD3D11on12Resource != nullptr);
+    return mD3D11on12Resource->AcquireKeyedMutex();
+}
 
-    DXGI_FORMAT D3D12TextureFormat(wgpu::TextureFormat format) {
-        switch (format) {
-            case wgpu::TextureFormat::R8Unorm:
-                return DXGI_FORMAT_R8_UNORM;
-            case wgpu::TextureFormat::R8Snorm:
-                return DXGI_FORMAT_R8_SNORM;
-            case wgpu::TextureFormat::R8Uint:
-                return DXGI_FORMAT_R8_UINT;
-            case wgpu::TextureFormat::R8Sint:
-                return DXGI_FORMAT_R8_SINT;
+void Texture::ReleaseKeyedMutex() {
+    ASSERT(mD3D11on12Resource != nullptr);
+    mD3D11on12Resource->ReleaseKeyedMutex();
+}
 
-            case wgpu::TextureFormat::R16Uint:
-                return DXGI_FORMAT_R16_UINT;
-            case wgpu::TextureFormat::R16Sint:
-                return DXGI_FORMAT_R16_SINT;
-            case wgpu::TextureFormat::R16Float:
-                return DXGI_FORMAT_R16_FLOAT;
-            case wgpu::TextureFormat::RG8Unorm:
-                return DXGI_FORMAT_R8G8_UNORM;
-            case wgpu::TextureFormat::RG8Snorm:
-                return DXGI_FORMAT_R8G8_SNORM;
-            case wgpu::TextureFormat::RG8Uint:
-                return DXGI_FORMAT_R8G8_UINT;
-            case wgpu::TextureFormat::RG8Sint:
-                return DXGI_FORMAT_R8G8_SINT;
+void Texture::TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+                                         wgpu::TextureUsage usage,
+                                         const SubresourceRange& range) {
+    TrackUsageAndTransitionNow(commandContext, D3D12TextureUsage(usage, GetFormat()), range);
+}
 
-            case wgpu::TextureFormat::R32Uint:
-                return DXGI_FORMAT_R32_UINT;
-            case wgpu::TextureFormat::R32Sint:
-                return DXGI_FORMAT_R32_SINT;
-            case wgpu::TextureFormat::R32Float:
-                return DXGI_FORMAT_R32_FLOAT;
-            case wgpu::TextureFormat::RG16Uint:
-                return DXGI_FORMAT_R16G16_UINT;
-            case wgpu::TextureFormat::RG16Sint:
-                return DXGI_FORMAT_R16G16_SINT;
-            case wgpu::TextureFormat::RG16Float:
-                return DXGI_FORMAT_R16G16_FLOAT;
-            case wgpu::TextureFormat::RGBA8Unorm:
-                return DXGI_FORMAT_R8G8B8A8_UNORM;
-            case wgpu::TextureFormat::RGBA8UnormSrgb:
-                return DXGI_FORMAT_R8G8B8A8_UNORM_SRGB;
-            case wgpu::TextureFormat::RGBA8Snorm:
-                return DXGI_FORMAT_R8G8B8A8_SNORM;
-            case wgpu::TextureFormat::RGBA8Uint:
-                return DXGI_FORMAT_R8G8B8A8_UINT;
-            case wgpu::TextureFormat::RGBA8Sint:
-                return DXGI_FORMAT_R8G8B8A8_SINT;
-            case wgpu::TextureFormat::BGRA8Unorm:
-                return DXGI_FORMAT_B8G8R8A8_UNORM;
-            case wgpu::TextureFormat::BGRA8UnormSrgb:
-                return DXGI_FORMAT_B8G8R8A8_UNORM_SRGB;
-            case wgpu::TextureFormat::RGB10A2Unorm:
-                return DXGI_FORMAT_R10G10B10A2_UNORM;
-            case wgpu::TextureFormat::RG11B10Ufloat:
-                return DXGI_FORMAT_R11G11B10_FLOAT;
-            case wgpu::TextureFormat::RGB9E5Ufloat:
-                return DXGI_FORMAT_R9G9B9E5_SHAREDEXP;
+void Texture::TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
+                                            wgpu::TextureUsage usage) {
+    TrackUsageAndTransitionNow(commandContext, D3D12TextureUsage(usage, GetFormat()),
+                               GetAllSubresources());
+}
 
-            case wgpu::TextureFormat::RG32Uint:
-                return DXGI_FORMAT_R32G32_UINT;
-            case wgpu::TextureFormat::RG32Sint:
-                return DXGI_FORMAT_R32G32_SINT;
-            case wgpu::TextureFormat::RG32Float:
-                return DXGI_FORMAT_R32G32_FLOAT;
-            case wgpu::TextureFormat::RGBA16Uint:
-                return DXGI_FORMAT_R16G16B16A16_UINT;
-            case wgpu::TextureFormat::RGBA16Sint:
-                return DXGI_FORMAT_R16G16B16A16_SINT;
-            case wgpu::TextureFormat::RGBA16Float:
-                return DXGI_FORMAT_R16G16B16A16_FLOAT;
+void Texture::TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
+                                            D3D12_RESOURCE_STATES newState) {
+    TrackUsageAndTransitionNow(commandContext, newState, GetAllSubresources());
+}
 
-            case wgpu::TextureFormat::RGBA32Uint:
-                return DXGI_FORMAT_R32G32B32A32_UINT;
-            case wgpu::TextureFormat::RGBA32Sint:
-                return DXGI_FORMAT_R32G32B32A32_SINT;
-            case wgpu::TextureFormat::RGBA32Float:
-                return DXGI_FORMAT_R32G32B32A32_FLOAT;
-
-            case wgpu::TextureFormat::Depth16Unorm:
-                return DXGI_FORMAT_D16_UNORM;
-            case wgpu::TextureFormat::Depth32Float:
-            case wgpu::TextureFormat::Depth24Plus:
-                return DXGI_FORMAT_D32_FLOAT;
-            // Depth24UnormStencil8 is the smallest format supported on D3D12 that has stencil.
-            case wgpu::TextureFormat::Stencil8:
-            case wgpu::TextureFormat::Depth24UnormStencil8:
-                return DXGI_FORMAT_D24_UNORM_S8_UINT;
-            case wgpu::TextureFormat::Depth24PlusStencil8:
-            case wgpu::TextureFormat::Depth32FloatStencil8:
-                return DXGI_FORMAT_D32_FLOAT_S8X24_UINT;
-
-            case wgpu::TextureFormat::BC1RGBAUnorm:
-                return DXGI_FORMAT_BC1_UNORM;
-            case wgpu::TextureFormat::BC1RGBAUnormSrgb:
-                return DXGI_FORMAT_BC1_UNORM_SRGB;
-            case wgpu::TextureFormat::BC2RGBAUnorm:
-                return DXGI_FORMAT_BC2_UNORM;
-            case wgpu::TextureFormat::BC2RGBAUnormSrgb:
-                return DXGI_FORMAT_BC2_UNORM_SRGB;
-            case wgpu::TextureFormat::BC3RGBAUnorm:
-                return DXGI_FORMAT_BC3_UNORM;
-            case wgpu::TextureFormat::BC3RGBAUnormSrgb:
-                return DXGI_FORMAT_BC3_UNORM_SRGB;
-            case wgpu::TextureFormat::BC4RSnorm:
-                return DXGI_FORMAT_BC4_SNORM;
-            case wgpu::TextureFormat::BC4RUnorm:
-                return DXGI_FORMAT_BC4_UNORM;
-            case wgpu::TextureFormat::BC5RGSnorm:
-                return DXGI_FORMAT_BC5_SNORM;
-            case wgpu::TextureFormat::BC5RGUnorm:
-                return DXGI_FORMAT_BC5_UNORM;
-            case wgpu::TextureFormat::BC6HRGBFloat:
-                return DXGI_FORMAT_BC6H_SF16;
-            case wgpu::TextureFormat::BC6HRGBUfloat:
-                return DXGI_FORMAT_BC6H_UF16;
-            case wgpu::TextureFormat::BC7RGBAUnorm:
-                return DXGI_FORMAT_BC7_UNORM;
-            case wgpu::TextureFormat::BC7RGBAUnormSrgb:
-                return DXGI_FORMAT_BC7_UNORM_SRGB;
-
-            case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
-                return DXGI_FORMAT_NV12;
-
-            case wgpu::TextureFormat::ETC2RGB8Unorm:
-            case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
-            case wgpu::TextureFormat::ETC2RGB8A1Unorm:
-            case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
-            case wgpu::TextureFormat::ETC2RGBA8Unorm:
-            case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
-            case wgpu::TextureFormat::EACR11Unorm:
-            case wgpu::TextureFormat::EACR11Snorm:
-            case wgpu::TextureFormat::EACRG11Unorm:
-            case wgpu::TextureFormat::EACRG11Snorm:
-
-            case wgpu::TextureFormat::ASTC4x4Unorm:
-            case wgpu::TextureFormat::ASTC4x4UnormSrgb:
-            case wgpu::TextureFormat::ASTC5x4Unorm:
-            case wgpu::TextureFormat::ASTC5x4UnormSrgb:
-            case wgpu::TextureFormat::ASTC5x5Unorm:
-            case wgpu::TextureFormat::ASTC5x5UnormSrgb:
-            case wgpu::TextureFormat::ASTC6x5Unorm:
-            case wgpu::TextureFormat::ASTC6x5UnormSrgb:
-            case wgpu::TextureFormat::ASTC6x6Unorm:
-            case wgpu::TextureFormat::ASTC6x6UnormSrgb:
-            case wgpu::TextureFormat::ASTC8x5Unorm:
-            case wgpu::TextureFormat::ASTC8x5UnormSrgb:
-            case wgpu::TextureFormat::ASTC8x6Unorm:
-            case wgpu::TextureFormat::ASTC8x6UnormSrgb:
-            case wgpu::TextureFormat::ASTC8x8Unorm:
-            case wgpu::TextureFormat::ASTC8x8UnormSrgb:
-            case wgpu::TextureFormat::ASTC10x5Unorm:
-            case wgpu::TextureFormat::ASTC10x5UnormSrgb:
-            case wgpu::TextureFormat::ASTC10x6Unorm:
-            case wgpu::TextureFormat::ASTC10x6UnormSrgb:
-            case wgpu::TextureFormat::ASTC10x8Unorm:
-            case wgpu::TextureFormat::ASTC10x8UnormSrgb:
-            case wgpu::TextureFormat::ASTC10x10Unorm:
-            case wgpu::TextureFormat::ASTC10x10UnormSrgb:
-            case wgpu::TextureFormat::ASTC12x10Unorm:
-            case wgpu::TextureFormat::ASTC12x10UnormSrgb:
-            case wgpu::TextureFormat::ASTC12x12Unorm:
-            case wgpu::TextureFormat::ASTC12x12UnormSrgb:
-
-            case wgpu::TextureFormat::Undefined:
-                UNREACHABLE();
-        }
+void Texture::TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+                                         D3D12_RESOURCE_STATES newState,
+                                         const SubresourceRange& range) {
+    if (mResourceAllocation.GetInfo().mMethod != AllocationMethod::kExternal) {
+        // Track the underlying heap to ensure residency.
+        Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+        commandContext->TrackHeapUsage(heap, GetDevice()->GetPendingCommandSerial());
     }
 
-    MaybeError ValidateTextureDescriptorCanBeWrapped(const TextureDescriptor* descriptor) {
-        DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
-                        "Texture dimension (%s) is not %s.", descriptor->dimension,
-                        wgpu::TextureDimension::e2D);
+    std::vector<D3D12_RESOURCE_BARRIER> barriers;
 
-        DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
-                        descriptor->mipLevelCount);
-
-        DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1,
-                        "Array layer count (%u) is not 1.", descriptor->size.depthOrArrayLayers);
-
-        DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
-                        descriptor->sampleCount);
-
-        return {};
+    // TODO(enga): Consider adding a Count helper.
+    uint32_t aspectCount = 0;
+    for (Aspect aspect : IterateEnumMask(range.aspects)) {
+        aspectCount++;
+        DAWN_UNUSED(aspect);
     }
 
-    MaybeError ValidateD3D12TextureCanBeWrapped(ID3D12Resource* d3d12Resource,
-                                                const TextureDescriptor* dawnDescriptor) {
-        const D3D12_RESOURCE_DESC d3dDescriptor = d3d12Resource->GetDesc();
-        DAWN_INVALID_IF(
-            (dawnDescriptor->size.width != d3dDescriptor.Width) ||
-                (dawnDescriptor->size.height != d3dDescriptor.Height) ||
-                (dawnDescriptor->size.depthOrArrayLayers != 1),
-            "D3D12 texture size (Width: %u, Height: %u, DepthOrArraySize: 1) doesn't match Dawn "
-            "descriptor size (width: %u, height: %u, depthOrArrayLayers: %u).",
-            d3dDescriptor.Width, d3dDescriptor.Height, dawnDescriptor->size.width,
-            dawnDescriptor->size.height, dawnDescriptor->size.depthOrArrayLayers);
+    barriers.reserve(range.levelCount * range.layerCount * aspectCount);
 
-        const DXGI_FORMAT dxgiFormatFromDescriptor = D3D12TextureFormat(dawnDescriptor->format);
-        DAWN_INVALID_IF(
-            dxgiFormatFromDescriptor != d3dDescriptor.Format,
-            "D3D12 texture format (%x) is not compatible with Dawn descriptor format (%s).",
-            d3dDescriptor.Format, dawnDescriptor->format);
-
-        DAWN_INVALID_IF(d3dDescriptor.MipLevels != 1,
-                        "D3D12 texture number of miplevels (%u) is not 1.",
-                        d3dDescriptor.MipLevels);
-
-        DAWN_INVALID_IF(d3dDescriptor.DepthOrArraySize != 1,
-                        "D3D12 texture array size (%u) is not 1.", d3dDescriptor.DepthOrArraySize);
-
-        // Shared textures cannot be multi-sample so no need to check those.
-        ASSERT(d3dDescriptor.SampleDesc.Count == 1);
-        ASSERT(d3dDescriptor.SampleDesc.Quality == 0);
-
-        return {};
+    TransitionUsageAndGetResourceBarrier(commandContext, &barriers, newState, range);
+    if (barriers.size()) {
+        commandContext->GetCommandList()->ResourceBarrier(barriers.size(), barriers.data());
     }
+}
 
-    // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/ne-d3d12-d3d12_shared_resource_compatibility_tier
-    MaybeError ValidateD3D12VideoTextureCanBeShared(Device* device, DXGI_FORMAT textureFormat) {
-        const bool supportsSharedResourceCapabilityTier1 =
-            device->GetDeviceInfo().supportsSharedResourceCapabilityTier1;
-        switch (textureFormat) {
-            // MSDN docs are not correct, NV12 requires at-least tier 1.
-            case DXGI_FORMAT_NV12:
-                if (supportsSharedResourceCapabilityTier1) {
-                    return {};
-                }
-                break;
-            default:
-                break;
-        }
+void Texture::TransitionSubresourceRange(std::vector<D3D12_RESOURCE_BARRIER>* barriers,
+                                         const SubresourceRange& range,
+                                         StateAndDecay* state,
+                                         D3D12_RESOURCE_STATES newState,
+                                         ExecutionSerial pendingCommandSerial) const {
+    D3D12_RESOURCE_STATES lastState = state->lastState;
 
-        return DAWN_FORMAT_VALIDATION_ERROR("DXGI format does not support cross-API sharing.");
-    }
+    // If the transition is from-UAV-to-UAV, then a UAV barrier is needed.
+    // If one of the usages isn't UAV, then other barriers are used.
+    bool needsUAVBarrier = lastState == D3D12_RESOURCE_STATE_UNORDERED_ACCESS &&
+                           newState == D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
 
-    // static
-    ResultOrError<Ref<Texture>> Texture::Create(Device* device,
-                                                const TextureDescriptor* descriptor) {
-        Ref<Texture> dawnTexture =
-            AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
-
-        DAWN_INVALID_IF(dawnTexture->GetFormat().IsMultiPlanar(),
-                        "Cannot create a multi-planar formatted texture directly");
-
-        DAWN_TRY(dawnTexture->InitializeAsInternalTexture());
-        return std::move(dawnTexture);
-    }
-
-    // static
-    ResultOrError<Ref<Texture>> Texture::CreateExternalImage(
-        Device* device,
-        const TextureDescriptor* descriptor,
-        ComPtr<ID3D12Resource> d3d12Texture,
-        Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
-        bool isSwapChainTexture,
-        bool isInitialized) {
-        Ref<Texture> dawnTexture =
-            AcquireRef(new Texture(device, descriptor, TextureState::OwnedExternal));
-        DAWN_TRY(dawnTexture->InitializeAsExternalTexture(
-            descriptor, std::move(d3d12Texture), std::move(d3d11on12Resource), isSwapChainTexture));
-
-        // Importing a multi-planar format must be initialized. This is required because
-        // a shared multi-planar format cannot be initialized by Dawn.
-        DAWN_INVALID_IF(
-            !isInitialized && dawnTexture->GetFormat().IsMultiPlanar(),
-            "Cannot create a texture with a multi-planar format (%s) with uninitialized data.",
-            dawnTexture->GetFormat().format);
-
-        dawnTexture->SetIsSubresourceContentInitialized(isInitialized,
-                                                        dawnTexture->GetAllSubresources());
-        return std::move(dawnTexture);
-    }
-
-    // static
-    ResultOrError<Ref<Texture>> Texture::Create(Device* device,
-                                                const TextureDescriptor* descriptor,
-                                                ComPtr<ID3D12Resource> d3d12Texture) {
-        Ref<Texture> dawnTexture =
-            AcquireRef(new Texture(device, descriptor, TextureState::OwnedExternal));
-        DAWN_TRY(dawnTexture->InitializeAsSwapChainTexture(std::move(d3d12Texture)));
-        return std::move(dawnTexture);
-    }
-
-    MaybeError Texture::InitializeAsExternalTexture(
-        const TextureDescriptor* descriptor,
-        ComPtr<ID3D12Resource> d3d12Texture,
-        Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
-        bool isSwapChainTexture) {
-        mD3D11on12Resource = std::move(d3d11on12Resource);
-        mSwapChainTexture = isSwapChainTexture;
-
-        D3D12_RESOURCE_DESC desc = d3d12Texture->GetDesc();
-        mD3D12ResourceFlags = desc.Flags;
-
-        AllocationInfo info;
-        info.mMethod = AllocationMethod::kExternal;
-        // When creating the ResourceHeapAllocation, the resource heap is set to nullptr because the
-        // texture is owned externally. The texture's owning entity must remain responsible for
-        // memory management.
-        mResourceAllocation = {info, 0, std::move(d3d12Texture), nullptr};
-
-        SetLabelHelper("Dawn_ExternalTexture");
-
-        return {};
-    }
-
-    MaybeError Texture::InitializeAsInternalTexture() {
-        D3D12_RESOURCE_DESC resourceDescriptor;
-        resourceDescriptor.Dimension = D3D12TextureDimension(GetDimension());
-        resourceDescriptor.Alignment = 0;
-
-        const Extent3D& size = GetSize();
-        resourceDescriptor.Width = size.width;
-        resourceDescriptor.Height = size.height;
-        resourceDescriptor.DepthOrArraySize = size.depthOrArrayLayers;
-
-        // This will need to be much more nuanced when WebGPU has
-        // texture view compatibility rules.
-        const bool needsTypelessFormat =
-            GetFormat().HasDepthOrStencil() &&
-            (GetInternalUsage() & wgpu::TextureUsage::TextureBinding) != 0;
-
-        DXGI_FORMAT dxgiFormat = needsTypelessFormat
-                                     ? D3D12TypelessTextureFormat(GetFormat().format)
-                                     : D3D12TextureFormat(GetFormat().format);
-
-        resourceDescriptor.MipLevels = static_cast<UINT16>(GetNumMipLevels());
-        resourceDescriptor.Format = dxgiFormat;
-        resourceDescriptor.SampleDesc.Count = GetSampleCount();
-        resourceDescriptor.SampleDesc.Quality = 0;
-        resourceDescriptor.Layout = D3D12_TEXTURE_LAYOUT_UNKNOWN;
-        resourceDescriptor.Flags =
-            D3D12ResourceFlags(GetInternalUsage(), GetFormat(), IsMultisampledTexture());
-        mD3D12ResourceFlags = resourceDescriptor.Flags;
-
-        DAWN_TRY_ASSIGN(mResourceAllocation,
-                        ToBackend(GetDevice())
-                            ->AllocateMemory(D3D12_HEAP_TYPE_DEFAULT, resourceDescriptor,
-                                             D3D12_RESOURCE_STATE_COMMON));
-
-        SetLabelImpl();
-
-        Device* device = ToBackend(GetDevice());
-
-        if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
-            CommandRecordingContext* commandContext;
-            DAWN_TRY_ASSIGN(commandContext, device->GetPendingCommandContext());
-
-            DAWN_TRY(ClearTexture(commandContext, GetAllSubresources(),
-                                  TextureBase::ClearValue::NonZero));
-        }
-
-        return {};
-    }
-
-    MaybeError Texture::InitializeAsSwapChainTexture(ComPtr<ID3D12Resource> d3d12Texture) {
-        AllocationInfo info;
-        info.mMethod = AllocationMethod::kExternal;
-        // When creating the ResourceHeapAllocation, the resource heap is set to nullptr because the
-        // texture is owned externally. The texture's owning entity must remain responsible for
-        // memory management.
-        mResourceAllocation = {info, 0, std::move(d3d12Texture), nullptr};
-
-        SetLabelHelper("Dawn_SwapChainTexture");
-
-        return {};
-    }
-
-    Texture::Texture(Device* device, const TextureDescriptor* descriptor, TextureState state)
-        : TextureBase(device, descriptor, state),
-          mSubresourceStateAndDecay(
-              GetFormat().aspects,
-              GetArrayLayers(),
-              GetNumMipLevels(),
-              {D3D12_RESOURCE_STATES::D3D12_RESOURCE_STATE_COMMON, kMaxExecutionSerial, false}) {
-    }
-
-    Texture::~Texture() {
-    }
-
-    void Texture::DestroyImpl() {
-        TextureBase::DestroyImpl();
-
-        Device* device = ToBackend(GetDevice());
-
-        // In PIX's D3D12-only mode, there is no way to determine frame boundaries
-        // for WebGPU since Dawn does not manage DXGI swap chains. Without assistance,
-        // PIX will wait forever for a present that never happens.
-        // If we know we're dealing with a swapbuffer texture, inform PIX we've
-        // "presented" the texture so it can determine frame boundaries and use its
-        // contents for the UI.
-        if (mSwapChainTexture) {
-            ID3D12SharingContract* d3dSharingContract = device->GetSharingContract();
-            if (d3dSharingContract != nullptr) {
-                d3dSharingContract->Present(mResourceAllocation.GetD3D12Resource(), 0, 0);
-            }
-        }
-
-        device->DeallocateMemory(mResourceAllocation);
-
-        // Now that we've deallocated the memory, the texture is no longer a swap chain texture.
-        // We can set mSwapChainTexture to false to avoid passing a nullptr to
-        // ID3D12SharingContract::Present.
-        mSwapChainTexture = false;
-
-        // Now that the texture has been destroyed. It should release the refptr
-        // of the d3d11on12 resource.
-        mD3D11on12Resource = nullptr;
-    }
-
-    DXGI_FORMAT Texture::GetD3D12Format() const {
-        return D3D12TextureFormat(GetFormat().format);
-    }
-
-    ID3D12Resource* Texture::GetD3D12Resource() const {
-        return mResourceAllocation.GetD3D12Resource();
-    }
-
-    DXGI_FORMAT Texture::GetD3D12CopyableSubresourceFormat(Aspect aspect) const {
-        ASSERT(GetFormat().aspects & aspect);
-
-        switch (GetFormat().format) {
-            case wgpu::TextureFormat::Depth24UnormStencil8:
-            case wgpu::TextureFormat::Depth24PlusStencil8:
-            case wgpu::TextureFormat::Depth32FloatStencil8:
-            case wgpu::TextureFormat::Stencil8:
-                switch (aspect) {
-                    case Aspect::Depth:
-                        return DXGI_FORMAT_R32_FLOAT;
-                    case Aspect::Stencil:
-                        return DXGI_FORMAT_R8_UINT;
-                    default:
-                        UNREACHABLE();
-                }
-            default:
-                ASSERT(HasOneBit(GetFormat().aspects));
-                return GetD3D12Format();
-        }
-    }
-
-    MaybeError Texture::AcquireKeyedMutex() {
-        ASSERT(mD3D11on12Resource != nullptr);
-        return mD3D11on12Resource->AcquireKeyedMutex();
-    }
-
-    void Texture::ReleaseKeyedMutex() {
-        ASSERT(mD3D11on12Resource != nullptr);
-        mD3D11on12Resource->ReleaseKeyedMutex();
-    }
-
-    void Texture::TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
-                                             wgpu::TextureUsage usage,
-                                             const SubresourceRange& range) {
-        TrackUsageAndTransitionNow(commandContext, D3D12TextureUsage(usage, GetFormat()), range);
-    }
-
-    void Texture::TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
-                                                wgpu::TextureUsage usage) {
-        TrackUsageAndTransitionNow(commandContext, D3D12TextureUsage(usage, GetFormat()),
-                                   GetAllSubresources());
-    }
-
-    void Texture::TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
-                                                D3D12_RESOURCE_STATES newState) {
-        TrackUsageAndTransitionNow(commandContext, newState, GetAllSubresources());
-    }
-
-    void Texture::TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
-                                             D3D12_RESOURCE_STATES newState,
-                                             const SubresourceRange& range) {
-        if (mResourceAllocation.GetInfo().mMethod != AllocationMethod::kExternal) {
-            // Track the underlying heap to ensure residency.
-            Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
-            commandContext->TrackHeapUsage(heap, GetDevice()->GetPendingCommandSerial());
-        }
-
-        std::vector<D3D12_RESOURCE_BARRIER> barriers;
-
-        // TODO(enga): Consider adding a Count helper.
-        uint32_t aspectCount = 0;
-        for (Aspect aspect : IterateEnumMask(range.aspects)) {
-            aspectCount++;
-            DAWN_UNUSED(aspect);
-        }
-
-        barriers.reserve(range.levelCount * range.layerCount * aspectCount);
-
-        TransitionUsageAndGetResourceBarrier(commandContext, &barriers, newState, range);
-        if (barriers.size()) {
-            commandContext->GetCommandList()->ResourceBarrier(barriers.size(), barriers.data());
-        }
-    }
-
-    void Texture::TransitionSubresourceRange(std::vector<D3D12_RESOURCE_BARRIER>* barriers,
-                                             const SubresourceRange& range,
-                                             StateAndDecay* state,
-                                             D3D12_RESOURCE_STATES newState,
-                                             ExecutionSerial pendingCommandSerial) const {
-        D3D12_RESOURCE_STATES lastState = state->lastState;
-
-        // If the transition is from-UAV-to-UAV, then a UAV barrier is needed.
-        // If one of the usages isn't UAV, then other barriers are used.
-        bool needsUAVBarrier = lastState == D3D12_RESOURCE_STATE_UNORDERED_ACCESS &&
-                               newState == D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
-
-        if (needsUAVBarrier) {
-            D3D12_RESOURCE_BARRIER barrier;
-            barrier.Type = D3D12_RESOURCE_BARRIER_TYPE_UAV;
-            barrier.Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
-            barrier.UAV.pResource = GetD3D12Resource();
-            barriers->push_back(barrier);
-            return;
-        }
-
-        // Reuse the subresource(s) directly and avoid transition when it isn't needed, and
-        // return false.
-        if (lastState == newState) {
-            return;
-        }
-
-        // The COMMON state represents a state where no write operations can be pending, and
-        // where all pixels are uncompressed. This makes it possible to transition to and
-        // from some states without synchronization (i.e. without an explicit
-        // ResourceBarrier call). Textures can be implicitly promoted to 1) a single write
-        // state, or 2) multiple read states. Textures will implicitly decay to the COMMON
-        // state when all of the following are true: 1) the texture is accessed on a command
-        // list, 2) the ExecuteCommandLists call that uses that command list has ended, and
-        // 3) the texture was promoted implicitly to a read-only state and is still in that
-        // state.
-        // https://docs.microsoft.com/en-us/windows/desktop/direct3d12/using-resource-barriers-to-synchronize-resource-states-in-direct3d-12#implicit-state-transitions
-
-        // To track implicit decays, we must record the pending serial on which that
-        // transition will occur. When that texture is used again, the previously recorded
-        // serial must be compared to the last completed serial to determine if the texture
-        // has implicity decayed to the common state.
-        if (state->isValidToDecay && pendingCommandSerial > state->lastDecaySerial) {
-            lastState = D3D12_RESOURCE_STATE_COMMON;
-        }
-
-        // Update the tracked state.
-        state->lastState = newState;
-
-        // Destination states that qualify for an implicit promotion for a
-        // non-simultaneous-access texture: NON_PIXEL_SHADER_RESOURCE,
-        // PIXEL_SHADER_RESOURCE, COPY_SRC, COPY_DEST.
-        {
-            static constexpr D3D12_RESOURCE_STATES kD3D12PromotableReadOnlyStates =
-                D3D12_RESOURCE_STATE_COPY_SOURCE | D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE |
-                D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE;
-
-            if (lastState == D3D12_RESOURCE_STATE_COMMON) {
-                if (IsSubset(newState, kD3D12PromotableReadOnlyStates)) {
-                    // Implicit texture state decays can only occur when the texture was implicitly
-                    // transitioned to a read-only state. isValidToDecay is needed to differentiate
-                    // between resources that were implictly or explicitly transitioned to a
-                    // read-only state.
-                    state->isValidToDecay = true;
-                    state->lastDecaySerial = pendingCommandSerial;
-                    return;
-                } else if (newState == D3D12_RESOURCE_STATE_COPY_DEST) {
-                    state->isValidToDecay = false;
-                    return;
-                }
-            }
-        }
-
+    if (needsUAVBarrier) {
         D3D12_RESOURCE_BARRIER barrier;
-        barrier.Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION;
+        barrier.Type = D3D12_RESOURCE_BARRIER_TYPE_UAV;
         barrier.Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
-        barrier.Transition.pResource = GetD3D12Resource();
-        barrier.Transition.StateBefore = lastState;
-        barrier.Transition.StateAfter = newState;
+        barrier.UAV.pResource = GetD3D12Resource();
+        barriers->push_back(barrier);
+        return;
+    }
 
-        bool isFullRange = range.baseArrayLayer == 0 && range.baseMipLevel == 0 &&
-                           range.layerCount == GetArrayLayers() &&
-                           range.levelCount == GetNumMipLevels() &&
-                           range.aspects == GetFormat().aspects;
+    // Reuse the subresource(s) directly and avoid transition when it isn't needed, and
+    // return false.
+    if (lastState == newState) {
+        return;
+    }
 
-        // Use a single transition for all subresources if possible.
-        if (isFullRange) {
-            barrier.Transition.Subresource = D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES;
-            barriers->push_back(barrier);
-        } else {
-            for (Aspect aspect : IterateEnumMask(range.aspects)) {
-                for (uint32_t arrayLayer = 0; arrayLayer < range.layerCount; ++arrayLayer) {
-                    for (uint32_t mipLevel = 0; mipLevel < range.levelCount; ++mipLevel) {
-                        barrier.Transition.Subresource =
-                            GetSubresourceIndex(range.baseMipLevel + mipLevel,
-                                                range.baseArrayLayer + arrayLayer, aspect);
-                        barriers->push_back(barrier);
-                    }
+    // The COMMON state represents a state where no write operations can be pending, and
+    // where all pixels are uncompressed. This makes it possible to transition to and
+    // from some states without synchronization (i.e. without an explicit
+    // ResourceBarrier call). Textures can be implicitly promoted to 1) a single write
+    // state, or 2) multiple read states. Textures will implicitly decay to the COMMON
+    // state when all of the following are true: 1) the texture is accessed on a command
+    // list, 2) the ExecuteCommandLists call that uses that command list has ended, and
+    // 3) the texture was promoted implicitly to a read-only state and is still in that
+    // state.
+    // https://docs.microsoft.com/en-us/windows/desktop/direct3d12/using-resource-barriers-to-synchronize-resource-states-in-direct3d-12#implicit-state-transitions
+
+    // To track implicit decays, we must record the pending serial on which that
+    // transition will occur. When that texture is used again, the previously recorded
+    // serial must be compared to the last completed serial to determine if the texture
+    // has implicity decayed to the common state.
+    if (state->isValidToDecay && pendingCommandSerial > state->lastDecaySerial) {
+        lastState = D3D12_RESOURCE_STATE_COMMON;
+    }
+
+    // Update the tracked state.
+    state->lastState = newState;
+
+    // Destination states that qualify for an implicit promotion for a
+    // non-simultaneous-access texture: NON_PIXEL_SHADER_RESOURCE,
+    // PIXEL_SHADER_RESOURCE, COPY_SRC, COPY_DEST.
+    {
+        static constexpr D3D12_RESOURCE_STATES kD3D12PromotableReadOnlyStates =
+            D3D12_RESOURCE_STATE_COPY_SOURCE | D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE |
+            D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE;
+
+        if (lastState == D3D12_RESOURCE_STATE_COMMON) {
+            if (IsSubset(newState, kD3D12PromotableReadOnlyStates)) {
+                // Implicit texture state decays can only occur when the texture was implicitly
+                // transitioned to a read-only state. isValidToDecay is needed to differentiate
+                // between resources that were implictly or explicitly transitioned to a
+                // read-only state.
+                state->isValidToDecay = true;
+                state->lastDecaySerial = pendingCommandSerial;
+                return;
+            } else if (newState == D3D12_RESOURCE_STATE_COPY_DEST) {
+                state->isValidToDecay = false;
+                return;
+            }
+        }
+    }
+
+    D3D12_RESOURCE_BARRIER barrier;
+    barrier.Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION;
+    barrier.Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
+    barrier.Transition.pResource = GetD3D12Resource();
+    barrier.Transition.StateBefore = lastState;
+    barrier.Transition.StateAfter = newState;
+
+    bool isFullRange = range.baseArrayLayer == 0 && range.baseMipLevel == 0 &&
+                       range.layerCount == GetArrayLayers() &&
+                       range.levelCount == GetNumMipLevels() &&
+                       range.aspects == GetFormat().aspects;
+
+    // Use a single transition for all subresources if possible.
+    if (isFullRange) {
+        barrier.Transition.Subresource = D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES;
+        barriers->push_back(barrier);
+    } else {
+        for (Aspect aspect : IterateEnumMask(range.aspects)) {
+            for (uint32_t arrayLayer = 0; arrayLayer < range.layerCount; ++arrayLayer) {
+                for (uint32_t mipLevel = 0; mipLevel < range.levelCount; ++mipLevel) {
+                    barrier.Transition.Subresource = GetSubresourceIndex(
+                        range.baseMipLevel + mipLevel, range.baseArrayLayer + arrayLayer, aspect);
+                    barriers->push_back(barrier);
                 }
             }
         }
-
-        state->isValidToDecay = false;
     }
 
-    void Texture::HandleTransitionSpecialCases(CommandRecordingContext* commandContext) {
-        // Textures with keyed mutexes can be written from other graphics queues. Hence, they
-        // must be acquired before command list submission to ensure work from the other queues
-        // has finished. See Device::ExecuteCommandContext.
-        if (mD3D11on12Resource != nullptr) {
-            commandContext->AddToSharedTextureList(this);
-        }
+    state->isValidToDecay = false;
+}
+
+void Texture::HandleTransitionSpecialCases(CommandRecordingContext* commandContext) {
+    // Textures with keyed mutexes can be written from other graphics queues. Hence, they
+    // must be acquired before command list submission to ensure work from the other queues
+    // has finished. See Device::ExecuteCommandContext.
+    if (mD3D11on12Resource != nullptr) {
+        commandContext->AddToSharedTextureList(this);
+    }
+}
+
+void Texture::TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+                                                   std::vector<D3D12_RESOURCE_BARRIER>* barrier,
+                                                   wgpu::TextureUsage usage,
+                                                   const SubresourceRange& range) {
+    TransitionUsageAndGetResourceBarrier(commandContext, barrier,
+                                         D3D12TextureUsage(usage, GetFormat()), range);
+}
+
+void Texture::TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+                                                   std::vector<D3D12_RESOURCE_BARRIER>* barriers,
+                                                   D3D12_RESOURCE_STATES newState,
+                                                   const SubresourceRange& range) {
+    HandleTransitionSpecialCases(commandContext);
+
+    const ExecutionSerial pendingCommandSerial = ToBackend(GetDevice())->GetPendingCommandSerial();
+
+    mSubresourceStateAndDecay.Update(range, [&](const SubresourceRange& updateRange,
+                                                StateAndDecay* state) {
+        TransitionSubresourceRange(barriers, updateRange, state, newState, pendingCommandSerial);
+    });
+}
+
+void Texture::TrackUsageAndGetResourceBarrierForPass(CommandRecordingContext* commandContext,
+                                                     std::vector<D3D12_RESOURCE_BARRIER>* barriers,
+                                                     const TextureSubresourceUsage& textureUsages) {
+    if (mResourceAllocation.GetInfo().mMethod != AllocationMethod::kExternal) {
+        // Track the underlying heap to ensure residency.
+        Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
+        commandContext->TrackHeapUsage(heap, GetDevice()->GetPendingCommandSerial());
     }
 
-    void Texture::TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
-                                                       std::vector<D3D12_RESOURCE_BARRIER>* barrier,
-                                                       wgpu::TextureUsage usage,
-                                                       const SubresourceRange& range) {
-        TransitionUsageAndGetResourceBarrier(commandContext, barrier,
-                                             D3D12TextureUsage(usage, GetFormat()), range);
-    }
+    HandleTransitionSpecialCases(commandContext);
 
-    void Texture::TransitionUsageAndGetResourceBarrier(
-        CommandRecordingContext* commandContext,
-        std::vector<D3D12_RESOURCE_BARRIER>* barriers,
-        D3D12_RESOURCE_STATES newState,
-        const SubresourceRange& range) {
-        HandleTransitionSpecialCases(commandContext);
+    const ExecutionSerial pendingCommandSerial = ToBackend(GetDevice())->GetPendingCommandSerial();
 
-        const ExecutionSerial pendingCommandSerial =
-            ToBackend(GetDevice())->GetPendingCommandSerial();
-
-        mSubresourceStateAndDecay.Update(
-            range, [&](const SubresourceRange& updateRange, StateAndDecay* state) {
-                TransitionSubresourceRange(barriers, updateRange, state, newState,
-                                           pendingCommandSerial);
-            });
-    }
-
-    void Texture::TrackUsageAndGetResourceBarrierForPass(
-        CommandRecordingContext* commandContext,
-        std::vector<D3D12_RESOURCE_BARRIER>* barriers,
-        const TextureSubresourceUsage& textureUsages) {
-        if (mResourceAllocation.GetInfo().mMethod != AllocationMethod::kExternal) {
-            // Track the underlying heap to ensure residency.
-            Heap* heap = ToBackend(mResourceAllocation.GetResourceHeap());
-            commandContext->TrackHeapUsage(heap, GetDevice()->GetPendingCommandSerial());
-        }
-
-        HandleTransitionSpecialCases(commandContext);
-
-        const ExecutionSerial pendingCommandSerial =
-            ToBackend(GetDevice())->GetPendingCommandSerial();
-
-        mSubresourceStateAndDecay.Merge(textureUsages, [&](const SubresourceRange& mergeRange,
-                                                           StateAndDecay* state,
-                                                           wgpu::TextureUsage usage) {
+    mSubresourceStateAndDecay.Merge(
+        textureUsages,
+        [&](const SubresourceRange& mergeRange, StateAndDecay* state, wgpu::TextureUsage usage) {
             // Skip if this subresource is not used during the current pass
             if (usage == wgpu::TextureUsage::None) {
                 return;
@@ -924,484 +910,479 @@
             D3D12_RESOURCE_STATES newState = D3D12TextureUsage(usage, GetFormat());
             TransitionSubresourceRange(barriers, mergeRange, state, newState, pendingCommandSerial);
         });
-    }
+}
 
-    D3D12_RENDER_TARGET_VIEW_DESC Texture::GetRTVDescriptor(const Format& format,
-                                                            uint32_t mipLevel,
-                                                            uint32_t baseSlice,
-                                                            uint32_t sliceCount) const {
-        D3D12_RENDER_TARGET_VIEW_DESC rtvDesc;
-        rtvDesc.Format = D3D12TextureFormat(format.format);
-        if (IsMultisampledTexture()) {
-            ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
-            ASSERT(GetNumMipLevels() == 1);
-            ASSERT(sliceCount == 1);
-            ASSERT(baseSlice == 0);
-            ASSERT(mipLevel == 0);
-            rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2DMS;
-            return rtvDesc;
-        }
-        switch (GetDimension()) {
-            case wgpu::TextureDimension::e2D:
-                // Currently we always use D3D12_TEX2D_ARRAY_RTV because we cannot specify base
-                // array layer and layer count in D3D12_TEX2D_RTV. For 2D texture views, we treat
-                // them as 1-layer 2D array textures. (Just like how we treat SRVs)
-                // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_rtv
-                // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_array
-                // _rtv
-                rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2DARRAY;
-                rtvDesc.Texture2DArray.FirstArraySlice = baseSlice;
-                rtvDesc.Texture2DArray.ArraySize = sliceCount;
-                rtvDesc.Texture2DArray.MipSlice = mipLevel;
-                rtvDesc.Texture2DArray.PlaneSlice = 0;
-                break;
-            case wgpu::TextureDimension::e3D:
-                rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE3D;
-                rtvDesc.Texture3D.MipSlice = mipLevel;
-                rtvDesc.Texture3D.FirstWSlice = baseSlice;
-                rtvDesc.Texture3D.WSize = sliceCount;
-                break;
-            case wgpu::TextureDimension::e1D:
-                UNREACHABLE();
-                break;
-        }
+D3D12_RENDER_TARGET_VIEW_DESC Texture::GetRTVDescriptor(const Format& format,
+                                                        uint32_t mipLevel,
+                                                        uint32_t baseSlice,
+                                                        uint32_t sliceCount) const {
+    D3D12_RENDER_TARGET_VIEW_DESC rtvDesc;
+    rtvDesc.Format = D3D12TextureFormat(format.format);
+    if (IsMultisampledTexture()) {
+        ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
+        ASSERT(GetNumMipLevels() == 1);
+        ASSERT(sliceCount == 1);
+        ASSERT(baseSlice == 0);
+        ASSERT(mipLevel == 0);
+        rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2DMS;
         return rtvDesc;
     }
+    switch (GetDimension()) {
+        case wgpu::TextureDimension::e2D:
+            // Currently we always use D3D12_TEX2D_ARRAY_RTV because we cannot specify base
+            // array layer and layer count in D3D12_TEX2D_RTV. For 2D texture views, we treat
+            // them as 1-layer 2D array textures. (Just like how we treat SRVs)
+            // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_rtv
+            // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_array
+            // _rtv
+            rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE2DARRAY;
+            rtvDesc.Texture2DArray.FirstArraySlice = baseSlice;
+            rtvDesc.Texture2DArray.ArraySize = sliceCount;
+            rtvDesc.Texture2DArray.MipSlice = mipLevel;
+            rtvDesc.Texture2DArray.PlaneSlice = 0;
+            break;
+        case wgpu::TextureDimension::e3D:
+            rtvDesc.ViewDimension = D3D12_RTV_DIMENSION_TEXTURE3D;
+            rtvDesc.Texture3D.MipSlice = mipLevel;
+            rtvDesc.Texture3D.FirstWSlice = baseSlice;
+            rtvDesc.Texture3D.WSize = sliceCount;
+            break;
+        case wgpu::TextureDimension::e1D:
+            UNREACHABLE();
+            break;
+    }
+    return rtvDesc;
+}
 
-    D3D12_DEPTH_STENCIL_VIEW_DESC Texture::GetDSVDescriptor(uint32_t mipLevel,
-                                                            uint32_t baseArrayLayer,
-                                                            uint32_t layerCount,
-                                                            Aspect aspects,
-                                                            bool depthReadOnly,
-                                                            bool stencilReadOnly) const {
-        D3D12_DEPTH_STENCIL_VIEW_DESC dsvDesc;
-        dsvDesc.Format = GetD3D12Format();
-        dsvDesc.Flags = D3D12_DSV_FLAG_NONE;
-        if (depthReadOnly && aspects & Aspect::Depth) {
-            dsvDesc.Flags |= D3D12_DSV_FLAG_READ_ONLY_DEPTH;
-        }
-        if (stencilReadOnly && aspects & Aspect::Stencil) {
-            dsvDesc.Flags |= D3D12_DSV_FLAG_READ_ONLY_STENCIL;
-        }
-
-        if (IsMultisampledTexture()) {
-            ASSERT(GetNumMipLevels() == 1);
-            ASSERT(layerCount == 1);
-            ASSERT(baseArrayLayer == 0);
-            ASSERT(mipLevel == 0);
-            dsvDesc.ViewDimension = D3D12_DSV_DIMENSION_TEXTURE2DMS;
-        } else {
-            dsvDesc.ViewDimension = D3D12_DSV_DIMENSION_TEXTURE2DARRAY;
-            dsvDesc.Texture2DArray.FirstArraySlice = baseArrayLayer;
-            dsvDesc.Texture2DArray.ArraySize = layerCount;
-            dsvDesc.Texture2DArray.MipSlice = mipLevel;
-        }
-
-        return dsvDesc;
+D3D12_DEPTH_STENCIL_VIEW_DESC Texture::GetDSVDescriptor(uint32_t mipLevel,
+                                                        uint32_t baseArrayLayer,
+                                                        uint32_t layerCount,
+                                                        Aspect aspects,
+                                                        bool depthReadOnly,
+                                                        bool stencilReadOnly) const {
+    D3D12_DEPTH_STENCIL_VIEW_DESC dsvDesc;
+    dsvDesc.Format = GetD3D12Format();
+    dsvDesc.Flags = D3D12_DSV_FLAG_NONE;
+    if (depthReadOnly && aspects & Aspect::Depth) {
+        dsvDesc.Flags |= D3D12_DSV_FLAG_READ_ONLY_DEPTH;
+    }
+    if (stencilReadOnly && aspects & Aspect::Stencil) {
+        dsvDesc.Flags |= D3D12_DSV_FLAG_READ_ONLY_STENCIL;
     }
 
-    MaybeError Texture::ClearTexture(CommandRecordingContext* commandContext,
-                                     const SubresourceRange& range,
-                                     TextureBase::ClearValue clearValue) {
-        ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
+    if (IsMultisampledTexture()) {
+        ASSERT(GetNumMipLevels() == 1);
+        ASSERT(layerCount == 1);
+        ASSERT(baseArrayLayer == 0);
+        ASSERT(mipLevel == 0);
+        dsvDesc.ViewDimension = D3D12_DSV_DIMENSION_TEXTURE2DMS;
+    } else {
+        dsvDesc.ViewDimension = D3D12_DSV_DIMENSION_TEXTURE2DARRAY;
+        dsvDesc.Texture2DArray.FirstArraySlice = baseArrayLayer;
+        dsvDesc.Texture2DArray.ArraySize = layerCount;
+        dsvDesc.Texture2DArray.MipSlice = mipLevel;
+    }
 
-        Device* device = ToBackend(GetDevice());
+    return dsvDesc;
+}
 
-        uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
-        float fClearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0.f : 1.f;
+MaybeError Texture::ClearTexture(CommandRecordingContext* commandContext,
+                                 const SubresourceRange& range,
+                                 TextureBase::ClearValue clearValue) {
+    ID3D12GraphicsCommandList* commandList = commandContext->GetCommandList();
 
-        if ((mD3D12ResourceFlags & D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL) != 0) {
-            TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_DEPTH_WRITE, range);
+    Device* device = ToBackend(GetDevice());
 
-            for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
-                 ++level) {
-                for (uint32_t layer = range.baseArrayLayer;
-                     layer < range.baseArrayLayer + range.layerCount; ++layer) {
-                    // Iterate the aspects individually to determine which clear flags to use.
-                    D3D12_CLEAR_FLAGS clearFlags = {};
-                    for (Aspect aspect : IterateEnumMask(range.aspects)) {
-                        if (clearValue == TextureBase::ClearValue::Zero &&
-                            IsSubresourceContentInitialized(
-                                SubresourceRange::SingleMipAndLayer(level, layer, aspect))) {
-                            // Skip lazy clears if already initialized.
-                            continue;
-                        }
+    uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
+    float fClearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0.f : 1.f;
 
-                        switch (aspect) {
-                            case Aspect::Depth:
-                                clearFlags |= D3D12_CLEAR_FLAG_DEPTH;
-                                break;
-                            case Aspect::Stencil:
-                                clearFlags |= D3D12_CLEAR_FLAG_STENCIL;
-                                break;
-                            default:
-                                UNREACHABLE();
-                        }
-                    }
+    if ((mD3D12ResourceFlags & D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL) != 0) {
+        TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_DEPTH_WRITE, range);
 
-                    if (clearFlags == 0) {
-                        continue;
-                    }
-
-                    CPUDescriptorHeapAllocation dsvHandle;
-                    DAWN_TRY_ASSIGN(
-                        dsvHandle,
-                        device->GetDepthStencilViewAllocator()->AllocateTransientCPUDescriptors());
-                    const D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor =
-                        dsvHandle.GetBaseDescriptor();
-                    D3D12_DEPTH_STENCIL_VIEW_DESC dsvDesc =
-                        GetDSVDescriptor(level, layer, 1, range.aspects, false, false);
-                    device->GetD3D12Device()->CreateDepthStencilView(GetD3D12Resource(), &dsvDesc,
-                                                                     baseDescriptor);
-
-                    commandList->ClearDepthStencilView(baseDescriptor, clearFlags, fClearColor,
-                                                       clearColor, 0, nullptr);
-                }
-            }
-        } else if ((mD3D12ResourceFlags & D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET) != 0) {
-            TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_RENDER_TARGET, range);
-
-            const float clearColorRGBA[4] = {fClearColor, fClearColor, fClearColor, fClearColor};
-
-            ASSERT(range.aspects == Aspect::Color);
-            for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
-                 ++level) {
-                for (uint32_t layer = range.baseArrayLayer;
-                     layer < range.baseArrayLayer + range.layerCount; ++layer) {
+        for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+             ++level) {
+            for (uint32_t layer = range.baseArrayLayer;
+                 layer < range.baseArrayLayer + range.layerCount; ++layer) {
+                // Iterate the aspects individually to determine which clear flags to use.
+                D3D12_CLEAR_FLAGS clearFlags = {};
+                for (Aspect aspect : IterateEnumMask(range.aspects)) {
                     if (clearValue == TextureBase::ClearValue::Zero &&
                         IsSubresourceContentInitialized(
-                            SubresourceRange::SingleMipAndLayer(level, layer, Aspect::Color))) {
+                            SubresourceRange::SingleMipAndLayer(level, layer, aspect))) {
                         // Skip lazy clears if already initialized.
                         continue;
                     }
 
-                    CPUDescriptorHeapAllocation rtvHeap;
-                    DAWN_TRY_ASSIGN(
-                        rtvHeap,
-                        device->GetRenderTargetViewAllocator()->AllocateTransientCPUDescriptors());
-                    const D3D12_CPU_DESCRIPTOR_HANDLE rtvHandle = rtvHeap.GetBaseDescriptor();
-
-                    uint32_t baseSlice = layer;
-                    uint32_t sliceCount = 1;
-                    if (GetDimension() == wgpu::TextureDimension::e3D) {
-                        baseSlice = 0;
-                        sliceCount = std::max(GetDepth() >> level, 1u);
-                    }
-                    D3D12_RENDER_TARGET_VIEW_DESC rtvDesc =
-                        GetRTVDescriptor(GetFormat(), level, baseSlice, sliceCount);
-                    device->GetD3D12Device()->CreateRenderTargetView(GetD3D12Resource(), &rtvDesc,
-                                                                     rtvHandle);
-                    commandList->ClearRenderTargetView(rtvHandle, clearColorRGBA, 0, nullptr);
-                }
-            }
-        } else {
-            // create temp buffer with clear color to copy to the texture image
-            TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_COPY_DEST, range);
-
-            for (Aspect aspect : IterateEnumMask(range.aspects)) {
-                const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(aspect).block;
-
-                Extent3D largestMipSize = GetMipLevelPhysicalSize(range.baseMipLevel);
-
-                uint32_t bytesPerRow =
-                    Align((largestMipSize.width / blockInfo.width) * blockInfo.byteSize,
-                          kTextureBytesPerRowAlignment);
-                uint64_t bufferSize = bytesPerRow * (largestMipSize.height / blockInfo.height) *
-                                      largestMipSize.depthOrArrayLayers;
-                DynamicUploader* uploader = device->GetDynamicUploader();
-                UploadHandle uploadHandle;
-                DAWN_TRY_ASSIGN(uploadHandle,
-                                uploader->Allocate(bufferSize, device->GetPendingCommandSerial(),
-                                                   blockInfo.byteSize));
-                memset(uploadHandle.mappedBuffer, clearColor, bufferSize);
-
-                for (uint32_t level = range.baseMipLevel;
-                     level < range.baseMipLevel + range.levelCount; ++level) {
-                    // compute d3d12 texture copy locations for texture and buffer
-                    Extent3D copySize = GetMipLevelPhysicalSize(level);
-
-                    for (uint32_t layer = range.baseArrayLayer;
-                         layer < range.baseArrayLayer + range.layerCount; ++layer) {
-                        if (clearValue == TextureBase::ClearValue::Zero &&
-                            IsSubresourceContentInitialized(
-                                SubresourceRange::SingleMipAndLayer(level, layer, aspect))) {
-                            // Skip lazy clears if already initialized.
-                            continue;
-                        }
-
-                        TextureCopy textureCopy;
-                        textureCopy.texture = this;
-                        textureCopy.origin = {0, 0, layer};
-                        textureCopy.mipLevel = level;
-                        textureCopy.aspect = aspect;
-                        RecordBufferTextureCopyWithBufferHandle(
-                            BufferTextureCopyDirection::B2T, commandList,
-                            ToBackend(uploadHandle.stagingBuffer)->GetResource(),
-                            uploadHandle.startOffset, bytesPerRow, GetHeight(), textureCopy,
-                            copySize);
-                    }
-                }
-            }
-        }
-        if (clearValue == TextureBase::ClearValue::Zero) {
-            SetIsSubresourceContentInitialized(true, range);
-            GetDevice()->IncrementLazyClearCountForTesting();
-        }
-        return {};
-    }
-
-    void Texture::SetLabelHelper(const char* prefix) {
-        SetDebugName(ToBackend(GetDevice()), mResourceAllocation.GetD3D12Resource(), prefix,
-                     GetLabel());
-    }
-
-    void Texture::SetLabelImpl() {
-        SetLabelHelper("Dawn_InternalTexture");
-    }
-
-    void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
-                                                      const SubresourceRange& range) {
-        if (!ToBackend(GetDevice())->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
-            return;
-        }
-        if (!IsSubresourceContentInitialized(range)) {
-            // If subresource has not been initialized, clear it to black as it could contain
-            // dirty bits from recycled memory
-            GetDevice()->ConsumedError(
-                ClearTexture(commandContext, range, TextureBase::ClearValue::Zero));
-        }
-    }
-
-    bool Texture::StateAndDecay::operator==(const Texture::StateAndDecay& other) const {
-        return lastState == other.lastState && lastDecaySerial == other.lastDecaySerial &&
-               isValidToDecay == other.isValidToDecay;
-    }
-
-    // static
-    Ref<TextureView> TextureView::Create(TextureBase* texture,
-                                         const TextureViewDescriptor* descriptor) {
-        return AcquireRef(new TextureView(texture, descriptor));
-    }
-
-    TextureView::TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor)
-        : TextureViewBase(texture, descriptor) {
-        mSrvDesc.Format = D3D12TextureFormat(descriptor->format);
-        mSrvDesc.Shader4ComponentMapping = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING;
-
-        UINT planeSlice = 0;
-        const Format& textureFormat = texture->GetFormat();
-        if (textureFormat.HasDepthOrStencil()) {
-            // Configure the SRV descriptor to reinterpret the texture allocated as
-            // TYPELESS as a single-plane shader-accessible view.
-            switch (textureFormat.format) {
-                case wgpu::TextureFormat::Depth32Float:
-                case wgpu::TextureFormat::Depth24Plus:
-                    mSrvDesc.Format = DXGI_FORMAT_R32_FLOAT;
-                    break;
-                case wgpu::TextureFormat::Depth16Unorm:
-                    mSrvDesc.Format = DXGI_FORMAT_R16_UNORM;
-                    break;
-                case wgpu::TextureFormat::Stencil8:
-                case wgpu::TextureFormat::Depth24UnormStencil8: {
-                    Aspect aspects = SelectFormatAspects(textureFormat, descriptor->aspect);
-                    ASSERT(aspects != Aspect::None);
-                    if (!HasZeroOrOneBits(aspects)) {
-                        // A single aspect is not selected. The texture view must not be
-                        // sampled.
-                        mSrvDesc.Format = DXGI_FORMAT_UNKNOWN;
-                        break;
-                    }
-                    switch (aspects) {
+                    switch (aspect) {
                         case Aspect::Depth:
-                            planeSlice = 0;
-                            mSrvDesc.Format = DXGI_FORMAT_R24_UNORM_X8_TYPELESS;
+                            clearFlags |= D3D12_CLEAR_FLAG_DEPTH;
                             break;
                         case Aspect::Stencil:
-                            planeSlice = 1;
-                            mSrvDesc.Format = DXGI_FORMAT_X24_TYPELESS_G8_UINT;
-                            // Stencil is accessed using the .g component in the shader.
-                            // Map it to the zeroth component to match other APIs.
-                            mSrvDesc.Shader4ComponentMapping =
-                                D3D12_ENCODE_SHADER_4_COMPONENT_MAPPING(
-                                    D3D12_SHADER_COMPONENT_MAPPING_FROM_MEMORY_COMPONENT_1,
-                                    D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0,
-                                    D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0,
-                                    D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_1);
+                            clearFlags |= D3D12_CLEAR_FLAG_STENCIL;
                             break;
                         default:
                             UNREACHABLE();
-                            break;
                     }
-                    break;
                 }
-                case wgpu::TextureFormat::Depth24PlusStencil8:
-                case wgpu::TextureFormat::Depth32FloatStencil8: {
-                    Aspect aspects = SelectFormatAspects(textureFormat, descriptor->aspect);
-                    ASSERT(aspects != Aspect::None);
-                    if (!HasZeroOrOneBits(aspects)) {
-                        // A single aspect is not selected. The texture view must not be
-                        // sampled.
-                        mSrvDesc.Format = DXGI_FORMAT_UNKNOWN;
-                        break;
-                    }
-                    switch (aspects) {
-                        case Aspect::Depth:
-                            planeSlice = 0;
-                            mSrvDesc.Format = DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS;
-                            break;
-                        case Aspect::Stencil:
-                            planeSlice = 1;
-                            mSrvDesc.Format = DXGI_FORMAT_X32_TYPELESS_G8X24_UINT;
-                            // Stencil is accessed using the .g component in the shader.
-                            // Map it to the zeroth component to match other APIs.
-                            mSrvDesc.Shader4ComponentMapping =
-                                D3D12_ENCODE_SHADER_4_COMPONENT_MAPPING(
-                                    D3D12_SHADER_COMPONENT_MAPPING_FROM_MEMORY_COMPONENT_1,
-                                    D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0,
-                                    D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0,
-                                    D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_1);
-                            break;
-                        default:
-                            UNREACHABLE();
-                            break;
-                    }
-                    break;
+
+                if (clearFlags == 0) {
+                    continue;
                 }
-                default:
-                    UNREACHABLE();
-                    break;
+
+                CPUDescriptorHeapAllocation dsvHandle;
+                DAWN_TRY_ASSIGN(
+                    dsvHandle,
+                    device->GetDepthStencilViewAllocator()->AllocateTransientCPUDescriptors());
+                const D3D12_CPU_DESCRIPTOR_HANDLE baseDescriptor = dsvHandle.GetBaseDescriptor();
+                D3D12_DEPTH_STENCIL_VIEW_DESC dsvDesc =
+                    GetDSVDescriptor(level, layer, 1, range.aspects, false, false);
+                device->GetD3D12Device()->CreateDepthStencilView(GetD3D12Resource(), &dsvDesc,
+                                                                 baseDescriptor);
+
+                commandList->ClearDepthStencilView(baseDescriptor, clearFlags, fClearColor,
+                                                   clearColor, 0, nullptr);
             }
         }
+    } else if ((mD3D12ResourceFlags & D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET) != 0) {
+        TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_RENDER_TARGET, range);
 
-        // Per plane view formats must have the plane slice number be the index of the plane in the
-        // array of textures.
-        if (texture->GetFormat().IsMultiPlanar()) {
-            const Aspect planeAspect = ConvertViewAspect(GetFormat(), descriptor->aspect);
-            planeSlice = GetAspectIndex(planeAspect);
-            mSrvDesc.Format =
-                D3D12TextureFormat(texture->GetFormat().GetAspectInfo(planeAspect).format);
-        }
+        const float clearColorRGBA[4] = {fClearColor, fClearColor, fClearColor, fClearColor};
 
-        // Currently we always use D3D12_TEX2D_ARRAY_SRV because we cannot specify base array layer
-        // and layer count in D3D12_TEX2D_SRV. For 2D texture views, we treat them as 1-layer 2D
-        // array textures.
-        // Multisampled textures may only be one array layer, so we use
-        // D3D12_SRV_DIMENSION_TEXTURE2DMS.
-        // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_srv
-        // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_array_srv
-        if (GetTexture()->IsMultisampledTexture()) {
-            switch (descriptor->dimension) {
-                case wgpu::TextureViewDimension::e2DArray:
-                    ASSERT(texture->GetArrayLayers() == 1);
-                    [[fallthrough]];
-                case wgpu::TextureViewDimension::e2D:
-                    ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
-                    mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE2DMS;
-                    break;
+        ASSERT(range.aspects == Aspect::Color);
+        for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+             ++level) {
+            for (uint32_t layer = range.baseArrayLayer;
+                 layer < range.baseArrayLayer + range.layerCount; ++layer) {
+                if (clearValue == TextureBase::ClearValue::Zero &&
+                    IsSubresourceContentInitialized(
+                        SubresourceRange::SingleMipAndLayer(level, layer, Aspect::Color))) {
+                    // Skip lazy clears if already initialized.
+                    continue;
+                }
 
-                default:
-                    UNREACHABLE();
+                CPUDescriptorHeapAllocation rtvHeap;
+                DAWN_TRY_ASSIGN(
+                    rtvHeap,
+                    device->GetRenderTargetViewAllocator()->AllocateTransientCPUDescriptors());
+                const D3D12_CPU_DESCRIPTOR_HANDLE rtvHandle = rtvHeap.GetBaseDescriptor();
+
+                uint32_t baseSlice = layer;
+                uint32_t sliceCount = 1;
+                if (GetDimension() == wgpu::TextureDimension::e3D) {
+                    baseSlice = 0;
+                    sliceCount = std::max(GetDepth() >> level, 1u);
+                }
+                D3D12_RENDER_TARGET_VIEW_DESC rtvDesc =
+                    GetRTVDescriptor(GetFormat(), level, baseSlice, sliceCount);
+                device->GetD3D12Device()->CreateRenderTargetView(GetD3D12Resource(), &rtvDesc,
+                                                                 rtvHandle);
+                commandList->ClearRenderTargetView(rtvHandle, clearColorRGBA, 0, nullptr);
             }
-        } else {
-            switch (descriptor->dimension) {
-                case wgpu::TextureViewDimension::e1D:
-                    mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE1D;
-                    mSrvDesc.Texture1D.MipLevels = descriptor->mipLevelCount;
-                    mSrvDesc.Texture1D.MostDetailedMip = descriptor->baseMipLevel;
-                    mSrvDesc.Texture1D.ResourceMinLODClamp = 0;
-                    break;
+        }
+    } else {
+        // create temp buffer with clear color to copy to the texture image
+        TrackUsageAndTransitionNow(commandContext, D3D12_RESOURCE_STATE_COPY_DEST, range);
 
-                case wgpu::TextureViewDimension::e2D:
-                case wgpu::TextureViewDimension::e2DArray:
-                    ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
-                    mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE2DARRAY;
-                    mSrvDesc.Texture2DArray.ArraySize = descriptor->arrayLayerCount;
-                    mSrvDesc.Texture2DArray.FirstArraySlice = descriptor->baseArrayLayer;
-                    mSrvDesc.Texture2DArray.MipLevels = descriptor->mipLevelCount;
-                    mSrvDesc.Texture2DArray.MostDetailedMip = descriptor->baseMipLevel;
-                    mSrvDesc.Texture2DArray.PlaneSlice = planeSlice;
-                    mSrvDesc.Texture2DArray.ResourceMinLODClamp = 0;
-                    break;
-                case wgpu::TextureViewDimension::Cube:
-                case wgpu::TextureViewDimension::CubeArray:
-                    ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
-                    ASSERT(descriptor->arrayLayerCount % 6 == 0);
-                    mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURECUBEARRAY;
-                    mSrvDesc.TextureCubeArray.First2DArrayFace = descriptor->baseArrayLayer;
-                    mSrvDesc.TextureCubeArray.NumCubes = descriptor->arrayLayerCount / 6;
-                    mSrvDesc.TextureCubeArray.MostDetailedMip = descriptor->baseMipLevel;
-                    mSrvDesc.TextureCubeArray.MipLevels = descriptor->mipLevelCount;
-                    mSrvDesc.TextureCubeArray.ResourceMinLODClamp = 0;
-                    break;
-                case wgpu::TextureViewDimension::e3D:
-                    ASSERT(texture->GetDimension() == wgpu::TextureDimension::e3D);
-                    mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE3D;
-                    mSrvDesc.Texture3D.MostDetailedMip = descriptor->baseMipLevel;
-                    mSrvDesc.Texture3D.MipLevels = descriptor->mipLevelCount;
-                    mSrvDesc.Texture3D.ResourceMinLODClamp = 0;
-                    break;
+        for (Aspect aspect : IterateEnumMask(range.aspects)) {
+            const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(aspect).block;
 
-                case wgpu::TextureViewDimension::Undefined:
-                    UNREACHABLE();
+            Extent3D largestMipSize = GetMipLevelPhysicalSize(range.baseMipLevel);
+
+            uint32_t bytesPerRow =
+                Align((largestMipSize.width / blockInfo.width) * blockInfo.byteSize,
+                      kTextureBytesPerRowAlignment);
+            uint64_t bufferSize = bytesPerRow * (largestMipSize.height / blockInfo.height) *
+                                  largestMipSize.depthOrArrayLayers;
+            DynamicUploader* uploader = device->GetDynamicUploader();
+            UploadHandle uploadHandle;
+            DAWN_TRY_ASSIGN(uploadHandle,
+                            uploader->Allocate(bufferSize, device->GetPendingCommandSerial(),
+                                               blockInfo.byteSize));
+            memset(uploadHandle.mappedBuffer, clearColor, bufferSize);
+
+            for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+                 ++level) {
+                // compute d3d12 texture copy locations for texture and buffer
+                Extent3D copySize = GetMipLevelPhysicalSize(level);
+
+                for (uint32_t layer = range.baseArrayLayer;
+                     layer < range.baseArrayLayer + range.layerCount; ++layer) {
+                    if (clearValue == TextureBase::ClearValue::Zero &&
+                        IsSubresourceContentInitialized(
+                            SubresourceRange::SingleMipAndLayer(level, layer, aspect))) {
+                        // Skip lazy clears if already initialized.
+                        continue;
+                    }
+
+                    TextureCopy textureCopy;
+                    textureCopy.texture = this;
+                    textureCopy.origin = {0, 0, layer};
+                    textureCopy.mipLevel = level;
+                    textureCopy.aspect = aspect;
+                    RecordBufferTextureCopyWithBufferHandle(
+                        BufferTextureCopyDirection::B2T, commandList,
+                        ToBackend(uploadHandle.stagingBuffer)->GetResource(),
+                        uploadHandle.startOffset, bytesPerRow, GetHeight(), textureCopy, copySize);
+                }
             }
         }
     }
-
-    DXGI_FORMAT TextureView::GetD3D12Format() const {
-        return D3D12TextureFormat(GetFormat().format);
+    if (clearValue == TextureBase::ClearValue::Zero) {
+        SetIsSubresourceContentInitialized(true, range);
+        GetDevice()->IncrementLazyClearCountForTesting();
     }
+    return {};
+}
 
-    const D3D12_SHADER_RESOURCE_VIEW_DESC& TextureView::GetSRVDescriptor() const {
-        ASSERT(mSrvDesc.Format != DXGI_FORMAT_UNKNOWN);
-        return mSrvDesc;
+void Texture::SetLabelHelper(const char* prefix) {
+    SetDebugName(ToBackend(GetDevice()), mResourceAllocation.GetD3D12Resource(), prefix,
+                 GetLabel());
+}
+
+void Texture::SetLabelImpl() {
+    SetLabelHelper("Dawn_InternalTexture");
+}
+
+void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
+                                                  const SubresourceRange& range) {
+    if (!ToBackend(GetDevice())->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
+        return;
     }
-
-    D3D12_RENDER_TARGET_VIEW_DESC TextureView::GetRTVDescriptor() const {
-        return ToBackend(GetTexture())
-            ->GetRTVDescriptor(GetFormat(), GetBaseMipLevel(), GetBaseArrayLayer(),
-                               GetLayerCount());
+    if (!IsSubresourceContentInitialized(range)) {
+        // If subresource has not been initialized, clear it to black as it could contain
+        // dirty bits from recycled memory
+        GetDevice()->ConsumedError(
+            ClearTexture(commandContext, range, TextureBase::ClearValue::Zero));
     }
+}
 
-    D3D12_DEPTH_STENCIL_VIEW_DESC TextureView::GetDSVDescriptor(bool depthReadOnly,
-                                                                bool stencilReadOnly) const {
-        ASSERT(GetLevelCount() == 1);
-        return ToBackend(GetTexture())
-            ->GetDSVDescriptor(GetBaseMipLevel(), GetBaseArrayLayer(), GetLayerCount(),
-                               GetAspects(), depthReadOnly, stencilReadOnly);
-    }
+bool Texture::StateAndDecay::operator==(const Texture::StateAndDecay& other) const {
+    return lastState == other.lastState && lastDecaySerial == other.lastDecaySerial &&
+           isValidToDecay == other.isValidToDecay;
+}
 
-    D3D12_UNORDERED_ACCESS_VIEW_DESC TextureView::GetUAVDescriptor() const {
-        D3D12_UNORDERED_ACCESS_VIEW_DESC uavDesc;
-        uavDesc.Format = GetD3D12Format();
+// static
+Ref<TextureView> TextureView::Create(TextureBase* texture,
+                                     const TextureViewDescriptor* descriptor) {
+    return AcquireRef(new TextureView(texture, descriptor));
+}
 
-        ASSERT(!GetTexture()->IsMultisampledTexture());
-        switch (GetDimension()) {
-            case wgpu::TextureViewDimension::e1D:
-                uavDesc.ViewDimension = D3D12_UAV_DIMENSION_TEXTURE1D;
-                uavDesc.Texture1D.MipSlice = GetBaseMipLevel();
+TextureView::TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor)
+    : TextureViewBase(texture, descriptor) {
+    mSrvDesc.Format = D3D12TextureFormat(descriptor->format);
+    mSrvDesc.Shader4ComponentMapping = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING;
+
+    UINT planeSlice = 0;
+    const Format& textureFormat = texture->GetFormat();
+    if (textureFormat.HasDepthOrStencil()) {
+        // Configure the SRV descriptor to reinterpret the texture allocated as
+        // TYPELESS as a single-plane shader-accessible view.
+        switch (textureFormat.format) {
+            case wgpu::TextureFormat::Depth32Float:
+            case wgpu::TextureFormat::Depth24Plus:
+                mSrvDesc.Format = DXGI_FORMAT_R32_FLOAT;
                 break;
+            case wgpu::TextureFormat::Depth16Unorm:
+                mSrvDesc.Format = DXGI_FORMAT_R16_UNORM;
+                break;
+            case wgpu::TextureFormat::Stencil8:
+            case wgpu::TextureFormat::Depth24UnormStencil8: {
+                Aspect aspects = SelectFormatAspects(textureFormat, descriptor->aspect);
+                ASSERT(aspects != Aspect::None);
+                if (!HasZeroOrOneBits(aspects)) {
+                    // A single aspect is not selected. The texture view must not be
+                    // sampled.
+                    mSrvDesc.Format = DXGI_FORMAT_UNKNOWN;
+                    break;
+                }
+                switch (aspects) {
+                    case Aspect::Depth:
+                        planeSlice = 0;
+                        mSrvDesc.Format = DXGI_FORMAT_R24_UNORM_X8_TYPELESS;
+                        break;
+                    case Aspect::Stencil:
+                        planeSlice = 1;
+                        mSrvDesc.Format = DXGI_FORMAT_X24_TYPELESS_G8_UINT;
+                        // Stencil is accessed using the .g component in the shader.
+                        // Map it to the zeroth component to match other APIs.
+                        mSrvDesc.Shader4ComponentMapping = D3D12_ENCODE_SHADER_4_COMPONENT_MAPPING(
+                            D3D12_SHADER_COMPONENT_MAPPING_FROM_MEMORY_COMPONENT_1,
+                            D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0,
+                            D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0,
+                            D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_1);
+                        break;
+                    default:
+                        UNREACHABLE();
+                        break;
+                }
+                break;
+            }
+            case wgpu::TextureFormat::Depth24PlusStencil8:
+            case wgpu::TextureFormat::Depth32FloatStencil8: {
+                Aspect aspects = SelectFormatAspects(textureFormat, descriptor->aspect);
+                ASSERT(aspects != Aspect::None);
+                if (!HasZeroOrOneBits(aspects)) {
+                    // A single aspect is not selected. The texture view must not be
+                    // sampled.
+                    mSrvDesc.Format = DXGI_FORMAT_UNKNOWN;
+                    break;
+                }
+                switch (aspects) {
+                    case Aspect::Depth:
+                        planeSlice = 0;
+                        mSrvDesc.Format = DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS;
+                        break;
+                    case Aspect::Stencil:
+                        planeSlice = 1;
+                        mSrvDesc.Format = DXGI_FORMAT_X32_TYPELESS_G8X24_UINT;
+                        // Stencil is accessed using the .g component in the shader.
+                        // Map it to the zeroth component to match other APIs.
+                        mSrvDesc.Shader4ComponentMapping = D3D12_ENCODE_SHADER_4_COMPONENT_MAPPING(
+                            D3D12_SHADER_COMPONENT_MAPPING_FROM_MEMORY_COMPONENT_1,
+                            D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0,
+                            D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0,
+                            D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_1);
+                        break;
+                    default:
+                        UNREACHABLE();
+                        break;
+                }
+                break;
+            }
+            default:
+                UNREACHABLE();
+                break;
+        }
+    }
+
+    // Per plane view formats must have the plane slice number be the index of the plane in the
+    // array of textures.
+    if (texture->GetFormat().IsMultiPlanar()) {
+        const Aspect planeAspect = ConvertViewAspect(GetFormat(), descriptor->aspect);
+        planeSlice = GetAspectIndex(planeAspect);
+        mSrvDesc.Format =
+            D3D12TextureFormat(texture->GetFormat().GetAspectInfo(planeAspect).format);
+    }
+
+    // Currently we always use D3D12_TEX2D_ARRAY_SRV because we cannot specify base array layer
+    // and layer count in D3D12_TEX2D_SRV. For 2D texture views, we treat them as 1-layer 2D
+    // array textures.
+    // Multisampled textures may only be one array layer, so we use
+    // D3D12_SRV_DIMENSION_TEXTURE2DMS.
+    // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_srv
+    // https://docs.microsoft.com/en-us/windows/desktop/api/d3d12/ns-d3d12-d3d12_tex2d_array_srv
+    if (GetTexture()->IsMultisampledTexture()) {
+        switch (descriptor->dimension) {
+            case wgpu::TextureViewDimension::e2DArray:
+                ASSERT(texture->GetArrayLayers() == 1);
+                [[fallthrough]];
+            case wgpu::TextureViewDimension::e2D:
+                ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+                mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE2DMS;
+                break;
+
+            default:
+                UNREACHABLE();
+        }
+    } else {
+        switch (descriptor->dimension) {
+            case wgpu::TextureViewDimension::e1D:
+                mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE1D;
+                mSrvDesc.Texture1D.MipLevels = descriptor->mipLevelCount;
+                mSrvDesc.Texture1D.MostDetailedMip = descriptor->baseMipLevel;
+                mSrvDesc.Texture1D.ResourceMinLODClamp = 0;
+                break;
+
             case wgpu::TextureViewDimension::e2D:
             case wgpu::TextureViewDimension::e2DArray:
-                uavDesc.ViewDimension = D3D12_UAV_DIMENSION_TEXTURE2DARRAY;
-                uavDesc.Texture2DArray.FirstArraySlice = GetBaseArrayLayer();
-                uavDesc.Texture2DArray.ArraySize = GetLayerCount();
-                uavDesc.Texture2DArray.MipSlice = GetBaseMipLevel();
-                uavDesc.Texture2DArray.PlaneSlice = 0;
+                ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+                mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE2DARRAY;
+                mSrvDesc.Texture2DArray.ArraySize = descriptor->arrayLayerCount;
+                mSrvDesc.Texture2DArray.FirstArraySlice = descriptor->baseArrayLayer;
+                mSrvDesc.Texture2DArray.MipLevels = descriptor->mipLevelCount;
+                mSrvDesc.Texture2DArray.MostDetailedMip = descriptor->baseMipLevel;
+                mSrvDesc.Texture2DArray.PlaneSlice = planeSlice;
+                mSrvDesc.Texture2DArray.ResourceMinLODClamp = 0;
                 break;
-            case wgpu::TextureViewDimension::e3D:
-                uavDesc.ViewDimension = D3D12_UAV_DIMENSION_TEXTURE3D;
-                uavDesc.Texture3D.FirstWSlice = 0;
-                uavDesc.Texture3D.WSize = GetTexture()->GetDepth() >> GetBaseMipLevel();
-                uavDesc.Texture3D.MipSlice = GetBaseMipLevel();
-                break;
-            // Cube and Cubemap can't be used as storage texture. So there is no need to create UAV
-            // descriptor for them.
             case wgpu::TextureViewDimension::Cube:
             case wgpu::TextureViewDimension::CubeArray:
+                ASSERT(texture->GetDimension() == wgpu::TextureDimension::e2D);
+                ASSERT(descriptor->arrayLayerCount % 6 == 0);
+                mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURECUBEARRAY;
+                mSrvDesc.TextureCubeArray.First2DArrayFace = descriptor->baseArrayLayer;
+                mSrvDesc.TextureCubeArray.NumCubes = descriptor->arrayLayerCount / 6;
+                mSrvDesc.TextureCubeArray.MostDetailedMip = descriptor->baseMipLevel;
+                mSrvDesc.TextureCubeArray.MipLevels = descriptor->mipLevelCount;
+                mSrvDesc.TextureCubeArray.ResourceMinLODClamp = 0;
+                break;
+            case wgpu::TextureViewDimension::e3D:
+                ASSERT(texture->GetDimension() == wgpu::TextureDimension::e3D);
+                mSrvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE3D;
+                mSrvDesc.Texture3D.MostDetailedMip = descriptor->baseMipLevel;
+                mSrvDesc.Texture3D.MipLevels = descriptor->mipLevelCount;
+                mSrvDesc.Texture3D.ResourceMinLODClamp = 0;
+                break;
+
             case wgpu::TextureViewDimension::Undefined:
                 UNREACHABLE();
         }
-        return uavDesc;
     }
+}
+
+DXGI_FORMAT TextureView::GetD3D12Format() const {
+    return D3D12TextureFormat(GetFormat().format);
+}
+
+const D3D12_SHADER_RESOURCE_VIEW_DESC& TextureView::GetSRVDescriptor() const {
+    ASSERT(mSrvDesc.Format != DXGI_FORMAT_UNKNOWN);
+    return mSrvDesc;
+}
+
+D3D12_RENDER_TARGET_VIEW_DESC TextureView::GetRTVDescriptor() const {
+    return ToBackend(GetTexture())
+        ->GetRTVDescriptor(GetFormat(), GetBaseMipLevel(), GetBaseArrayLayer(), GetLayerCount());
+}
+
+D3D12_DEPTH_STENCIL_VIEW_DESC TextureView::GetDSVDescriptor(bool depthReadOnly,
+                                                            bool stencilReadOnly) const {
+    ASSERT(GetLevelCount() == 1);
+    return ToBackend(GetTexture())
+        ->GetDSVDescriptor(GetBaseMipLevel(), GetBaseArrayLayer(), GetLayerCount(), GetAspects(),
+                           depthReadOnly, stencilReadOnly);
+}
+
+D3D12_UNORDERED_ACCESS_VIEW_DESC TextureView::GetUAVDescriptor() const {
+    D3D12_UNORDERED_ACCESS_VIEW_DESC uavDesc;
+    uavDesc.Format = GetD3D12Format();
+
+    ASSERT(!GetTexture()->IsMultisampledTexture());
+    switch (GetDimension()) {
+        case wgpu::TextureViewDimension::e1D:
+            uavDesc.ViewDimension = D3D12_UAV_DIMENSION_TEXTURE1D;
+            uavDesc.Texture1D.MipSlice = GetBaseMipLevel();
+            break;
+        case wgpu::TextureViewDimension::e2D:
+        case wgpu::TextureViewDimension::e2DArray:
+            uavDesc.ViewDimension = D3D12_UAV_DIMENSION_TEXTURE2DARRAY;
+            uavDesc.Texture2DArray.FirstArraySlice = GetBaseArrayLayer();
+            uavDesc.Texture2DArray.ArraySize = GetLayerCount();
+            uavDesc.Texture2DArray.MipSlice = GetBaseMipLevel();
+            uavDesc.Texture2DArray.PlaneSlice = 0;
+            break;
+        case wgpu::TextureViewDimension::e3D:
+            uavDesc.ViewDimension = D3D12_UAV_DIMENSION_TEXTURE3D;
+            uavDesc.Texture3D.FirstWSlice = 0;
+            uavDesc.Texture3D.WSize = GetTexture()->GetDepth() >> GetBaseMipLevel();
+            uavDesc.Texture3D.MipSlice = GetBaseMipLevel();
+            break;
+        // Cube and Cubemap can't be used as storage texture. So there is no need to create UAV
+        // descriptor for them.
+        case wgpu::TextureViewDimension::Cube:
+        case wgpu::TextureViewDimension::CubeArray:
+        case wgpu::TextureViewDimension::Undefined:
+            UNREACHABLE();
+    }
+    return uavDesc;
+}
 
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/TextureD3D12.h b/src/dawn/native/d3d12/TextureD3D12.h
index ba010eb..05b80db 100644
--- a/src/dawn/native/d3d12/TextureD3D12.h
+++ b/src/dawn/native/d3d12/TextureD3D12.h
@@ -28,138 +28,135 @@
 
 namespace dawn::native::d3d12 {
 
-    class CommandRecordingContext;
-    class Device;
-    class D3D11on12ResourceCacheEntry;
+class CommandRecordingContext;
+class Device;
+class D3D11on12ResourceCacheEntry;
 
-    DXGI_FORMAT D3D12TextureFormat(wgpu::TextureFormat format);
-    MaybeError ValidateD3D12TextureCanBeWrapped(ID3D12Resource* d3d12Resource,
-                                                const TextureDescriptor* descriptor);
-    MaybeError ValidateTextureDescriptorCanBeWrapped(const TextureDescriptor* descriptor);
-    MaybeError ValidateD3D12VideoTextureCanBeShared(Device* device, DXGI_FORMAT textureFormat);
+DXGI_FORMAT D3D12TextureFormat(wgpu::TextureFormat format);
+MaybeError ValidateD3D12TextureCanBeWrapped(ID3D12Resource* d3d12Resource,
+                                            const TextureDescriptor* descriptor);
+MaybeError ValidateTextureDescriptorCanBeWrapped(const TextureDescriptor* descriptor);
+MaybeError ValidateD3D12VideoTextureCanBeShared(Device* device, DXGI_FORMAT textureFormat);
 
-    class Texture final : public TextureBase {
-      public:
-        static ResultOrError<Ref<Texture>> Create(Device* device,
-                                                  const TextureDescriptor* descriptor);
-        static ResultOrError<Ref<Texture>> CreateExternalImage(
-            Device* device,
-            const TextureDescriptor* descriptor,
-            ComPtr<ID3D12Resource> d3d12Texture,
-            Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
-            bool isSwapChainTexture,
-            bool isInitialized);
-        static ResultOrError<Ref<Texture>> Create(Device* device,
-                                                  const TextureDescriptor* descriptor,
-                                                  ComPtr<ID3D12Resource> d3d12Texture);
+class Texture final : public TextureBase {
+  public:
+    static ResultOrError<Ref<Texture>> Create(Device* device, const TextureDescriptor* descriptor);
+    static ResultOrError<Ref<Texture>> CreateExternalImage(
+        Device* device,
+        const TextureDescriptor* descriptor,
+        ComPtr<ID3D12Resource> d3d12Texture,
+        Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
+        bool isSwapChainTexture,
+        bool isInitialized);
+    static ResultOrError<Ref<Texture>> Create(Device* device,
+                                              const TextureDescriptor* descriptor,
+                                              ComPtr<ID3D12Resource> d3d12Texture);
 
-        DXGI_FORMAT GetD3D12Format() const;
-        ID3D12Resource* GetD3D12Resource() const;
-        DXGI_FORMAT GetD3D12CopyableSubresourceFormat(Aspect aspect) const;
+    DXGI_FORMAT GetD3D12Format() const;
+    ID3D12Resource* GetD3D12Resource() const;
+    DXGI_FORMAT GetD3D12CopyableSubresourceFormat(Aspect aspect) const;
 
-        D3D12_RENDER_TARGET_VIEW_DESC GetRTVDescriptor(const Format& format,
-                                                       uint32_t mipLevel,
-                                                       uint32_t baseSlice,
-                                                       uint32_t sliceCount) const;
-        D3D12_DEPTH_STENCIL_VIEW_DESC GetDSVDescriptor(uint32_t mipLevel,
-                                                       uint32_t baseArrayLayer,
-                                                       uint32_t layerCount,
-                                                       Aspect aspects,
-                                                       bool depthReadOnly,
-                                                       bool stencilReadOnly) const;
+    D3D12_RENDER_TARGET_VIEW_DESC GetRTVDescriptor(const Format& format,
+                                                   uint32_t mipLevel,
+                                                   uint32_t baseSlice,
+                                                   uint32_t sliceCount) const;
+    D3D12_DEPTH_STENCIL_VIEW_DESC GetDSVDescriptor(uint32_t mipLevel,
+                                                   uint32_t baseArrayLayer,
+                                                   uint32_t layerCount,
+                                                   Aspect aspects,
+                                                   bool depthReadOnly,
+                                                   bool stencilReadOnly) const;
 
-        void EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
-                                                 const SubresourceRange& range);
+    void EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
+                                             const SubresourceRange& range);
 
-        MaybeError AcquireKeyedMutex();
-        void ReleaseKeyedMutex();
+    MaybeError AcquireKeyedMutex();
+    void ReleaseKeyedMutex();
 
-        void TrackUsageAndGetResourceBarrierForPass(CommandRecordingContext* commandContext,
-                                                    std::vector<D3D12_RESOURCE_BARRIER>* barrier,
-                                                    const TextureSubresourceUsage& textureUsages);
-        void TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
-                                                  std::vector<D3D12_RESOURCE_BARRIER>* barrier,
-                                                  wgpu::TextureUsage usage,
-                                                  const SubresourceRange& range);
-        void TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
-                                        wgpu::TextureUsage usage,
-                                        const SubresourceRange& range);
-        void TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
-                                        D3D12_RESOURCE_STATES newState,
-                                        const SubresourceRange& range);
-        void TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
-                                           wgpu::TextureUsage usage);
-        void TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
-                                           D3D12_RESOURCE_STATES newState);
+    void TrackUsageAndGetResourceBarrierForPass(CommandRecordingContext* commandContext,
+                                                std::vector<D3D12_RESOURCE_BARRIER>* barrier,
+                                                const TextureSubresourceUsage& textureUsages);
+    void TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+                                              std::vector<D3D12_RESOURCE_BARRIER>* barrier,
+                                              wgpu::TextureUsage usage,
+                                              const SubresourceRange& range);
+    void TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+                                    wgpu::TextureUsage usage,
+                                    const SubresourceRange& range);
+    void TrackUsageAndTransitionNow(CommandRecordingContext* commandContext,
+                                    D3D12_RESOURCE_STATES newState,
+                                    const SubresourceRange& range);
+    void TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
+                                       wgpu::TextureUsage usage);
+    void TrackAllUsageAndTransitionNow(CommandRecordingContext* commandContext,
+                                       D3D12_RESOURCE_STATES newState);
 
-      private:
-        Texture(Device* device, const TextureDescriptor* descriptor, TextureState state);
-        ~Texture() override;
-        using TextureBase::TextureBase;
+  private:
+    Texture(Device* device, const TextureDescriptor* descriptor, TextureState state);
+    ~Texture() override;
+    using TextureBase::TextureBase;
 
-        MaybeError InitializeAsInternalTexture();
-        MaybeError InitializeAsExternalTexture(const TextureDescriptor* descriptor,
-                                               ComPtr<ID3D12Resource> d3d12Texture,
-                                               Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
-                                               bool isSwapChainTexture);
-        MaybeError InitializeAsSwapChainTexture(ComPtr<ID3D12Resource> d3d12Texture);
+    MaybeError InitializeAsInternalTexture();
+    MaybeError InitializeAsExternalTexture(const TextureDescriptor* descriptor,
+                                           ComPtr<ID3D12Resource> d3d12Texture,
+                                           Ref<D3D11on12ResourceCacheEntry> d3d11on12Resource,
+                                           bool isSwapChainTexture);
+    MaybeError InitializeAsSwapChainTexture(ComPtr<ID3D12Resource> d3d12Texture);
 
-        void SetLabelHelper(const char* prefix);
+    void SetLabelHelper(const char* prefix);
 
-        // Dawn API
-        void SetLabelImpl() override;
-        void DestroyImpl() override;
+    // Dawn API
+    void SetLabelImpl() override;
+    void DestroyImpl() override;
 
-        MaybeError ClearTexture(CommandRecordingContext* commandContext,
-                                const SubresourceRange& range,
-                                TextureBase::ClearValue clearValue);
+    MaybeError ClearTexture(CommandRecordingContext* commandContext,
+                            const SubresourceRange& range,
+                            TextureBase::ClearValue clearValue);
 
-        // Barriers implementation details.
-        struct StateAndDecay {
-            D3D12_RESOURCE_STATES lastState;
-            ExecutionSerial lastDecaySerial;
-            bool isValidToDecay;
+    // Barriers implementation details.
+    struct StateAndDecay {
+        D3D12_RESOURCE_STATES lastState;
+        ExecutionSerial lastDecaySerial;
+        bool isValidToDecay;
 
-            bool operator==(const StateAndDecay& other) const;
-        };
-        void TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
-                                                  std::vector<D3D12_RESOURCE_BARRIER>* barrier,
-                                                  D3D12_RESOURCE_STATES newState,
-                                                  const SubresourceRange& range);
-        void TransitionSubresourceRange(std::vector<D3D12_RESOURCE_BARRIER>* barriers,
-                                        const SubresourceRange& range,
-                                        StateAndDecay* state,
-                                        D3D12_RESOURCE_STATES subresourceNewState,
-                                        ExecutionSerial pendingCommandSerial) const;
-        void HandleTransitionSpecialCases(CommandRecordingContext* commandContext);
-
-        SubresourceStorage<StateAndDecay> mSubresourceStateAndDecay;
-
-        ResourceHeapAllocation mResourceAllocation;
-        bool mSwapChainTexture = false;
-        D3D12_RESOURCE_FLAGS mD3D12ResourceFlags;
-
-        Ref<D3D11on12ResourceCacheEntry> mD3D11on12Resource;
+        bool operator==(const StateAndDecay& other) const;
     };
+    void TransitionUsageAndGetResourceBarrier(CommandRecordingContext* commandContext,
+                                              std::vector<D3D12_RESOURCE_BARRIER>* barrier,
+                                              D3D12_RESOURCE_STATES newState,
+                                              const SubresourceRange& range);
+    void TransitionSubresourceRange(std::vector<D3D12_RESOURCE_BARRIER>* barriers,
+                                    const SubresourceRange& range,
+                                    StateAndDecay* state,
+                                    D3D12_RESOURCE_STATES subresourceNewState,
+                                    ExecutionSerial pendingCommandSerial) const;
+    void HandleTransitionSpecialCases(CommandRecordingContext* commandContext);
 
-    class TextureView final : public TextureViewBase {
-      public:
-        static Ref<TextureView> Create(TextureBase* texture,
-                                       const TextureViewDescriptor* descriptor);
+    SubresourceStorage<StateAndDecay> mSubresourceStateAndDecay;
 
-        DXGI_FORMAT GetD3D12Format() const;
+    ResourceHeapAllocation mResourceAllocation;
+    bool mSwapChainTexture = false;
+    D3D12_RESOURCE_FLAGS mD3D12ResourceFlags;
 
-        const D3D12_SHADER_RESOURCE_VIEW_DESC& GetSRVDescriptor() const;
-        D3D12_RENDER_TARGET_VIEW_DESC GetRTVDescriptor() const;
-        D3D12_DEPTH_STENCIL_VIEW_DESC GetDSVDescriptor(bool depthReadOnly,
-                                                       bool stencilReadOnly) const;
-        D3D12_UNORDERED_ACCESS_VIEW_DESC GetUAVDescriptor() const;
+    Ref<D3D11on12ResourceCacheEntry> mD3D11on12Resource;
+};
 
-      private:
-        TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor);
+class TextureView final : public TextureViewBase {
+  public:
+    static Ref<TextureView> Create(TextureBase* texture, const TextureViewDescriptor* descriptor);
 
-        D3D12_SHADER_RESOURCE_VIEW_DESC mSrvDesc;
-    };
+    DXGI_FORMAT GetD3D12Format() const;
+
+    const D3D12_SHADER_RESOURCE_VIEW_DESC& GetSRVDescriptor() const;
+    D3D12_RENDER_TARGET_VIEW_DESC GetRTVDescriptor() const;
+    D3D12_DEPTH_STENCIL_VIEW_DESC GetDSVDescriptor(bool depthReadOnly, bool stencilReadOnly) const;
+    D3D12_UNORDERED_ACCESS_VIEW_DESC GetUAVDescriptor() const;
+
+  private:
+    TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor);
+
+    D3D12_SHADER_RESOURCE_VIEW_DESC mSrvDesc;
+};
 }  // namespace dawn::native::d3d12
 
 #endif  // SRC_DAWN_NATIVE_D3D12_TEXTURED3D12_H_
diff --git a/src/dawn/native/d3d12/UtilsD3D12.cpp b/src/dawn/native/d3d12/UtilsD3D12.cpp
index e559f37..0833e01 100644
--- a/src/dawn/native/d3d12/UtilsD3D12.cpp
+++ b/src/dawn/native/d3d12/UtilsD3D12.cpp
@@ -28,369 +28,362 @@
 
 namespace dawn::native::d3d12 {
 
-    namespace {
+namespace {
 
-        uint64_t RequiredCopySizeByD3D12(const uint32_t bytesPerRow,
-                                         const uint32_t rowsPerImage,
-                                         const Extent3D& copySize,
-                                         const TexelBlockInfo& blockInfo) {
-            uint64_t bytesPerImage = Safe32x32(bytesPerRow, rowsPerImage);
+uint64_t RequiredCopySizeByD3D12(const uint32_t bytesPerRow,
+                                 const uint32_t rowsPerImage,
+                                 const Extent3D& copySize,
+                                 const TexelBlockInfo& blockInfo) {
+    uint64_t bytesPerImage = Safe32x32(bytesPerRow, rowsPerImage);
 
-            // Required copy size for B2T/T2B copy on D3D12 is smaller than (but very close to)
-            // depth * bytesPerImage. The latter is already checked at ComputeRequiredBytesInCopy()
-            // in CommandValidation.cpp.
-            uint64_t requiredCopySizeByD3D12 = bytesPerImage * (copySize.depthOrArrayLayers - 1);
+    // Required copy size for B2T/T2B copy on D3D12 is smaller than (but very close to)
+    // depth * bytesPerImage. The latter is already checked at ComputeRequiredBytesInCopy()
+    // in CommandValidation.cpp.
+    uint64_t requiredCopySizeByD3D12 = bytesPerImage * (copySize.depthOrArrayLayers - 1);
 
-            // When calculating the required copy size for B2T/T2B copy, D3D12 doesn't respect
-            // rowsPerImage paddings on the last image for 3D texture, but it does respect
-            // bytesPerRow paddings on the last row.
-            ASSERT(blockInfo.width == 1);
-            ASSERT(blockInfo.height == 1);
-            uint64_t lastRowBytes = Safe32x32(blockInfo.byteSize, copySize.width);
-            ASSERT(rowsPerImage > copySize.height);
-            uint64_t lastImageBytesByD3D12 =
-                Safe32x32(bytesPerRow, rowsPerImage - 1) + lastRowBytes;
+    // When calculating the required copy size for B2T/T2B copy, D3D12 doesn't respect
+    // rowsPerImage paddings on the last image for 3D texture, but it does respect
+    // bytesPerRow paddings on the last row.
+    ASSERT(blockInfo.width == 1);
+    ASSERT(blockInfo.height == 1);
+    uint64_t lastRowBytes = Safe32x32(blockInfo.byteSize, copySize.width);
+    ASSERT(rowsPerImage > copySize.height);
+    uint64_t lastImageBytesByD3D12 = Safe32x32(bytesPerRow, rowsPerImage - 1) + lastRowBytes;
 
-            requiredCopySizeByD3D12 += lastImageBytesByD3D12;
-            return requiredCopySizeByD3D12;
-        }
+    requiredCopySizeByD3D12 += lastImageBytesByD3D12;
+    return requiredCopySizeByD3D12;
+}
 
-        // This function is used to access whether we need a workaround for D3D12's wrong algorithm
-        // of calculating required buffer size for B2T/T2B copy. The workaround is needed only when
-        //   - The corresponding toggle is enabled.
-        //   - It is a 3D texture (so the format is uncompressed).
-        //   - There are multiple depth images to be copied (copySize.depthOrArrayLayers > 1).
-        //   - It has rowsPerImage paddings (rowsPerImage > copySize.height).
-        //   - The buffer size doesn't meet D3D12's requirement.
-        bool NeedBufferSizeWorkaroundForBufferTextureCopyOnD3D12(const BufferCopy& bufferCopy,
-                                                                 const TextureCopy& textureCopy,
-                                                                 const Extent3D& copySize) {
-            TextureBase* texture = textureCopy.texture.Get();
-            Device* device = ToBackend(texture->GetDevice());
+// This function is used to access whether we need a workaround for D3D12's wrong algorithm
+// of calculating required buffer size for B2T/T2B copy. The workaround is needed only when
+//   - The corresponding toggle is enabled.
+//   - It is a 3D texture (so the format is uncompressed).
+//   - There are multiple depth images to be copied (copySize.depthOrArrayLayers > 1).
+//   - It has rowsPerImage paddings (rowsPerImage > copySize.height).
+//   - The buffer size doesn't meet D3D12's requirement.
+bool NeedBufferSizeWorkaroundForBufferTextureCopyOnD3D12(const BufferCopy& bufferCopy,
+                                                         const TextureCopy& textureCopy,
+                                                         const Extent3D& copySize) {
+    TextureBase* texture = textureCopy.texture.Get();
+    Device* device = ToBackend(texture->GetDevice());
 
-            if (!device->IsToggleEnabled(
-                    Toggle::D3D12SplitBufferTextureCopyForRowsPerImagePaddings) ||
-                texture->GetDimension() != wgpu::TextureDimension::e3D ||
-                copySize.depthOrArrayLayers <= 1 || bufferCopy.rowsPerImage <= copySize.height) {
-                return false;
-            }
-
-            const TexelBlockInfo& blockInfo =
-                texture->GetFormat().GetAspectInfo(textureCopy.aspect).block;
-            uint64_t requiredCopySizeByD3D12 = RequiredCopySizeByD3D12(
-                bufferCopy.bytesPerRow, bufferCopy.rowsPerImage, copySize, blockInfo);
-            return bufferCopy.buffer->GetAllocatedSize() - bufferCopy.offset <
-                   requiredCopySizeByD3D12;
-        }
-
-    }  // anonymous namespace
-
-    ResultOrError<std::wstring> ConvertStringToWstring(const char* str) {
-        size_t len = strlen(str);
-        if (len == 0) {
-            return std::wstring();
-        }
-        int numChars = MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, str, len, nullptr, 0);
-        if (numChars == 0) {
-            return DAWN_INTERNAL_ERROR("Failed to convert string to wide string");
-        }
-        std::wstring result;
-        result.resize(numChars);
-        int numConvertedChars =
-            MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, str, len, &result[0], numChars);
-        if (numConvertedChars != numChars) {
-            return DAWN_INTERNAL_ERROR("Failed to convert string to wide string");
-        }
-        return std::move(result);
+    if (!device->IsToggleEnabled(Toggle::D3D12SplitBufferTextureCopyForRowsPerImagePaddings) ||
+        texture->GetDimension() != wgpu::TextureDimension::e3D ||
+        copySize.depthOrArrayLayers <= 1 || bufferCopy.rowsPerImage <= copySize.height) {
+        return false;
     }
 
-    D3D12_COMPARISON_FUNC ToD3D12ComparisonFunc(wgpu::CompareFunction func) {
-        switch (func) {
-            case wgpu::CompareFunction::Never:
-                return D3D12_COMPARISON_FUNC_NEVER;
-            case wgpu::CompareFunction::Less:
-                return D3D12_COMPARISON_FUNC_LESS;
-            case wgpu::CompareFunction::LessEqual:
-                return D3D12_COMPARISON_FUNC_LESS_EQUAL;
-            case wgpu::CompareFunction::Greater:
-                return D3D12_COMPARISON_FUNC_GREATER;
-            case wgpu::CompareFunction::GreaterEqual:
-                return D3D12_COMPARISON_FUNC_GREATER_EQUAL;
-            case wgpu::CompareFunction::Equal:
-                return D3D12_COMPARISON_FUNC_EQUAL;
-            case wgpu::CompareFunction::NotEqual:
-                return D3D12_COMPARISON_FUNC_NOT_EQUAL;
-            case wgpu::CompareFunction::Always:
-                return D3D12_COMPARISON_FUNC_ALWAYS;
+    const TexelBlockInfo& blockInfo = texture->GetFormat().GetAspectInfo(textureCopy.aspect).block;
+    uint64_t requiredCopySizeByD3D12 = RequiredCopySizeByD3D12(
+        bufferCopy.bytesPerRow, bufferCopy.rowsPerImage, copySize, blockInfo);
+    return bufferCopy.buffer->GetAllocatedSize() - bufferCopy.offset < requiredCopySizeByD3D12;
+}
 
-            case wgpu::CompareFunction::Undefined:
-                UNREACHABLE();
+}  // anonymous namespace
+
+ResultOrError<std::wstring> ConvertStringToWstring(const char* str) {
+    size_t len = strlen(str);
+    if (len == 0) {
+        return std::wstring();
+    }
+    int numChars = MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, str, len, nullptr, 0);
+    if (numChars == 0) {
+        return DAWN_INTERNAL_ERROR("Failed to convert string to wide string");
+    }
+    std::wstring result;
+    result.resize(numChars);
+    int numConvertedChars =
+        MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, str, len, &result[0], numChars);
+    if (numConvertedChars != numChars) {
+        return DAWN_INTERNAL_ERROR("Failed to convert string to wide string");
+    }
+    return std::move(result);
+}
+
+D3D12_COMPARISON_FUNC ToD3D12ComparisonFunc(wgpu::CompareFunction func) {
+    switch (func) {
+        case wgpu::CompareFunction::Never:
+            return D3D12_COMPARISON_FUNC_NEVER;
+        case wgpu::CompareFunction::Less:
+            return D3D12_COMPARISON_FUNC_LESS;
+        case wgpu::CompareFunction::LessEqual:
+            return D3D12_COMPARISON_FUNC_LESS_EQUAL;
+        case wgpu::CompareFunction::Greater:
+            return D3D12_COMPARISON_FUNC_GREATER;
+        case wgpu::CompareFunction::GreaterEqual:
+            return D3D12_COMPARISON_FUNC_GREATER_EQUAL;
+        case wgpu::CompareFunction::Equal:
+            return D3D12_COMPARISON_FUNC_EQUAL;
+        case wgpu::CompareFunction::NotEqual:
+            return D3D12_COMPARISON_FUNC_NOT_EQUAL;
+        case wgpu::CompareFunction::Always:
+            return D3D12_COMPARISON_FUNC_ALWAYS;
+
+        case wgpu::CompareFunction::Undefined:
+            UNREACHABLE();
+    }
+}
+
+D3D12_TEXTURE_COPY_LOCATION ComputeTextureCopyLocationForTexture(const Texture* texture,
+                                                                 uint32_t level,
+                                                                 uint32_t layer,
+                                                                 Aspect aspect) {
+    D3D12_TEXTURE_COPY_LOCATION copyLocation;
+    copyLocation.pResource = texture->GetD3D12Resource();
+    copyLocation.Type = D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX;
+    copyLocation.SubresourceIndex = texture->GetSubresourceIndex(level, layer, aspect);
+
+    return copyLocation;
+}
+
+D3D12_TEXTURE_COPY_LOCATION ComputeBufferLocationForCopyTextureRegion(
+    const Texture* texture,
+    ID3D12Resource* bufferResource,
+    const Extent3D& bufferSize,
+    const uint64_t offset,
+    const uint32_t rowPitch,
+    Aspect aspect) {
+    D3D12_TEXTURE_COPY_LOCATION bufferLocation;
+    bufferLocation.pResource = bufferResource;
+    bufferLocation.Type = D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT;
+    bufferLocation.PlacedFootprint.Offset = offset;
+    bufferLocation.PlacedFootprint.Footprint.Format =
+        texture->GetD3D12CopyableSubresourceFormat(aspect);
+    bufferLocation.PlacedFootprint.Footprint.Width = bufferSize.width;
+    bufferLocation.PlacedFootprint.Footprint.Height = bufferSize.height;
+    bufferLocation.PlacedFootprint.Footprint.Depth = bufferSize.depthOrArrayLayers;
+    bufferLocation.PlacedFootprint.Footprint.RowPitch = rowPitch;
+    return bufferLocation;
+}
+
+D3D12_BOX ComputeD3D12BoxFromOffsetAndSize(const Origin3D& offset, const Extent3D& copySize) {
+    D3D12_BOX sourceRegion;
+    sourceRegion.left = offset.x;
+    sourceRegion.top = offset.y;
+    sourceRegion.front = offset.z;
+    sourceRegion.right = offset.x + copySize.width;
+    sourceRegion.bottom = offset.y + copySize.height;
+    sourceRegion.back = offset.z + copySize.depthOrArrayLayers;
+    return sourceRegion;
+}
+
+bool IsTypeless(DXGI_FORMAT format) {
+    // List generated from <dxgiformat.h>
+    switch (format) {
+        case DXGI_FORMAT_R32G32B32A32_TYPELESS:
+        case DXGI_FORMAT_R32G32B32_TYPELESS:
+        case DXGI_FORMAT_R16G16B16A16_TYPELESS:
+        case DXGI_FORMAT_R32G32_TYPELESS:
+        case DXGI_FORMAT_R32G8X24_TYPELESS:
+        case DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS:
+        case DXGI_FORMAT_R10G10B10A2_TYPELESS:
+        case DXGI_FORMAT_R8G8B8A8_TYPELESS:
+        case DXGI_FORMAT_R16G16_TYPELESS:
+        case DXGI_FORMAT_R32_TYPELESS:
+        case DXGI_FORMAT_R24G8_TYPELESS:
+        case DXGI_FORMAT_R24_UNORM_X8_TYPELESS:
+        case DXGI_FORMAT_R8G8_TYPELESS:
+        case DXGI_FORMAT_R16_TYPELESS:
+        case DXGI_FORMAT_R8_TYPELESS:
+        case DXGI_FORMAT_BC1_TYPELESS:
+        case DXGI_FORMAT_BC2_TYPELESS:
+        case DXGI_FORMAT_BC3_TYPELESS:
+        case DXGI_FORMAT_BC4_TYPELESS:
+        case DXGI_FORMAT_BC5_TYPELESS:
+        case DXGI_FORMAT_B8G8R8A8_TYPELESS:
+        case DXGI_FORMAT_B8G8R8X8_TYPELESS:
+        case DXGI_FORMAT_BC6H_TYPELESS:
+        case DXGI_FORMAT_BC7_TYPELESS:
+            return true;
+        default:
+            return false;
+    }
+}
+
+void RecordBufferTextureCopyFromSplits(BufferTextureCopyDirection direction,
+                                       ID3D12GraphicsCommandList* commandList,
+                                       const TextureCopySubresource& baseCopySplit,
+                                       ID3D12Resource* bufferResource,
+                                       uint64_t baseOffset,
+                                       uint64_t bufferBytesPerRow,
+                                       TextureBase* textureBase,
+                                       uint32_t textureMiplevel,
+                                       uint32_t textureLayer,
+                                       Aspect aspect) {
+    Texture* texture = ToBackend(textureBase);
+    const D3D12_TEXTURE_COPY_LOCATION textureLocation =
+        ComputeTextureCopyLocationForTexture(texture, textureMiplevel, textureLayer, aspect);
+
+    for (uint32_t i = 0; i < baseCopySplit.count; ++i) {
+        const TextureCopySubresource::CopyInfo& info = baseCopySplit.copies[i];
+
+        // TODO(jiawei.shao@intel.com): pre-compute bufferLocation and sourceRegion as
+        // members in TextureCopySubresource::CopyInfo.
+        const uint64_t offsetBytes = info.alignedOffset + baseOffset;
+        const D3D12_TEXTURE_COPY_LOCATION bufferLocation =
+            ComputeBufferLocationForCopyTextureRegion(texture, bufferResource, info.bufferSize,
+                                                      offsetBytes, bufferBytesPerRow, aspect);
+        if (direction == BufferTextureCopyDirection::B2T) {
+            const D3D12_BOX sourceRegion =
+                ComputeD3D12BoxFromOffsetAndSize(info.bufferOffset, info.copySize);
+
+            commandList->CopyTextureRegion(&textureLocation, info.textureOffset.x,
+                                           info.textureOffset.y, info.textureOffset.z,
+                                           &bufferLocation, &sourceRegion);
+        } else {
+            ASSERT(direction == BufferTextureCopyDirection::T2B);
+            const D3D12_BOX sourceRegion =
+                ComputeD3D12BoxFromOffsetAndSize(info.textureOffset, info.copySize);
+
+            commandList->CopyTextureRegion(&bufferLocation, info.bufferOffset.x,
+                                           info.bufferOffset.y, info.bufferOffset.z,
+                                           &textureLocation, &sourceRegion);
         }
     }
+}
 
-    D3D12_TEXTURE_COPY_LOCATION ComputeTextureCopyLocationForTexture(const Texture* texture,
-                                                                     uint32_t level,
-                                                                     uint32_t layer,
-                                                                     Aspect aspect) {
-        D3D12_TEXTURE_COPY_LOCATION copyLocation;
-        copyLocation.pResource = texture->GetD3D12Resource();
-        copyLocation.Type = D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX;
-        copyLocation.SubresourceIndex = texture->GetSubresourceIndex(level, layer, aspect);
+void Record2DBufferTextureCopyWithSplit(BufferTextureCopyDirection direction,
+                                        ID3D12GraphicsCommandList* commandList,
+                                        ID3D12Resource* bufferResource,
+                                        const uint64_t offset,
+                                        const uint32_t bytesPerRow,
+                                        const uint32_t rowsPerImage,
+                                        const TextureCopy& textureCopy,
+                                        const TexelBlockInfo& blockInfo,
+                                        const Extent3D& copySize) {
+    // See comments in Compute2DTextureCopySplits() for more details.
+    const TextureCopySplits copySplits = Compute2DTextureCopySplits(
+        textureCopy.origin, copySize, blockInfo, offset, bytesPerRow, rowsPerImage);
 
-        return copyLocation;
+    const uint64_t bytesPerLayer = bytesPerRow * rowsPerImage;
+
+    // copySplits.copySubresources[1] is always calculated for the second copy layer with
+    // extra "bytesPerLayer" copy offset compared with the first copy layer. So
+    // here we use an array bufferOffsetsForNextLayer to record the extra offsets
+    // for each copy layer: bufferOffsetsForNextLayer[0] is the extra offset for
+    // the next copy layer that uses copySplits.copySubresources[0], and
+    // bufferOffsetsForNextLayer[1] is the extra offset for the next copy layer
+    // that uses copySplits.copySubresources[1].
+    std::array<uint64_t, TextureCopySplits::kMaxTextureCopySubresources> bufferOffsetsForNextLayer =
+        {{0u, 0u}};
+
+    for (uint32_t copyLayer = 0; copyLayer < copySize.depthOrArrayLayers; ++copyLayer) {
+        const uint32_t splitIndex = copyLayer % copySplits.copySubresources.size();
+
+        const TextureCopySubresource& copySplitPerLayerBase =
+            copySplits.copySubresources[splitIndex];
+        const uint64_t bufferOffsetForNextLayer = bufferOffsetsForNextLayer[splitIndex];
+        const uint32_t copyTextureLayer = copyLayer + textureCopy.origin.z;
+
+        RecordBufferTextureCopyFromSplits(direction, commandList, copySplitPerLayerBase,
+                                          bufferResource, bufferOffsetForNextLayer, bytesPerRow,
+                                          textureCopy.texture.Get(), textureCopy.mipLevel,
+                                          copyTextureLayer, textureCopy.aspect);
+
+        bufferOffsetsForNextLayer[splitIndex] += bytesPerLayer * copySplits.copySubresources.size();
     }
+}
 
-    D3D12_TEXTURE_COPY_LOCATION ComputeBufferLocationForCopyTextureRegion(
-        const Texture* texture,
-        ID3D12Resource* bufferResource,
-        const Extent3D& bufferSize,
-        const uint64_t offset,
-        const uint32_t rowPitch,
-        Aspect aspect) {
-        D3D12_TEXTURE_COPY_LOCATION bufferLocation;
-        bufferLocation.pResource = bufferResource;
-        bufferLocation.Type = D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT;
-        bufferLocation.PlacedFootprint.Offset = offset;
-        bufferLocation.PlacedFootprint.Footprint.Format =
-            texture->GetD3D12CopyableSubresourceFormat(aspect);
-        bufferLocation.PlacedFootprint.Footprint.Width = bufferSize.width;
-        bufferLocation.PlacedFootprint.Footprint.Height = bufferSize.height;
-        bufferLocation.PlacedFootprint.Footprint.Depth = bufferSize.depthOrArrayLayers;
-        bufferLocation.PlacedFootprint.Footprint.RowPitch = rowPitch;
-        return bufferLocation;
-    }
+void RecordBufferTextureCopyWithBufferHandle(BufferTextureCopyDirection direction,
+                                             ID3D12GraphicsCommandList* commandList,
+                                             ID3D12Resource* bufferResource,
+                                             const uint64_t offset,
+                                             const uint32_t bytesPerRow,
+                                             const uint32_t rowsPerImage,
+                                             const TextureCopy& textureCopy,
+                                             const Extent3D& copySize) {
+    ASSERT(HasOneBit(textureCopy.aspect));
 
-    D3D12_BOX ComputeD3D12BoxFromOffsetAndSize(const Origin3D& offset, const Extent3D& copySize) {
-        D3D12_BOX sourceRegion;
-        sourceRegion.left = offset.x;
-        sourceRegion.top = offset.y;
-        sourceRegion.front = offset.z;
-        sourceRegion.right = offset.x + copySize.width;
-        sourceRegion.bottom = offset.y + copySize.height;
-        sourceRegion.back = offset.z + copySize.depthOrArrayLayers;
-        return sourceRegion;
-    }
+    TextureBase* texture = textureCopy.texture.Get();
+    const TexelBlockInfo& blockInfo = texture->GetFormat().GetAspectInfo(textureCopy.aspect).block;
 
-    bool IsTypeless(DXGI_FORMAT format) {
-        // List generated from <dxgiformat.h>
-        switch (format) {
-            case DXGI_FORMAT_R32G32B32A32_TYPELESS:
-            case DXGI_FORMAT_R32G32B32_TYPELESS:
-            case DXGI_FORMAT_R16G16B16A16_TYPELESS:
-            case DXGI_FORMAT_R32G32_TYPELESS:
-            case DXGI_FORMAT_R32G8X24_TYPELESS:
-            case DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS:
-            case DXGI_FORMAT_R10G10B10A2_TYPELESS:
-            case DXGI_FORMAT_R8G8B8A8_TYPELESS:
-            case DXGI_FORMAT_R16G16_TYPELESS:
-            case DXGI_FORMAT_R32_TYPELESS:
-            case DXGI_FORMAT_R24G8_TYPELESS:
-            case DXGI_FORMAT_R24_UNORM_X8_TYPELESS:
-            case DXGI_FORMAT_R8G8_TYPELESS:
-            case DXGI_FORMAT_R16_TYPELESS:
-            case DXGI_FORMAT_R8_TYPELESS:
-            case DXGI_FORMAT_BC1_TYPELESS:
-            case DXGI_FORMAT_BC2_TYPELESS:
-            case DXGI_FORMAT_BC3_TYPELESS:
-            case DXGI_FORMAT_BC4_TYPELESS:
-            case DXGI_FORMAT_BC5_TYPELESS:
-            case DXGI_FORMAT_B8G8R8A8_TYPELESS:
-            case DXGI_FORMAT_B8G8R8X8_TYPELESS:
-            case DXGI_FORMAT_BC6H_TYPELESS:
-            case DXGI_FORMAT_BC7_TYPELESS:
-                return true;
-            default:
-                return false;
-        }
-    }
+    switch (texture->GetDimension()) {
+        case wgpu::TextureDimension::e1D: {
+            // 1D textures copy splits are a subset of the single-layer 2D texture copy splits,
+            // at least while 1D textures can only have a single array layer.
+            ASSERT(texture->GetArrayLayers() == 1);
 
-    void RecordBufferTextureCopyFromSplits(BufferTextureCopyDirection direction,
-                                           ID3D12GraphicsCommandList* commandList,
-                                           const TextureCopySubresource& baseCopySplit,
-                                           ID3D12Resource* bufferResource,
-                                           uint64_t baseOffset,
-                                           uint64_t bufferBytesPerRow,
-                                           TextureBase* textureBase,
-                                           uint32_t textureMiplevel,
-                                           uint32_t textureLayer,
-                                           Aspect aspect) {
-        Texture* texture = ToBackend(textureBase);
-        const D3D12_TEXTURE_COPY_LOCATION textureLocation =
-            ComputeTextureCopyLocationForTexture(texture, textureMiplevel, textureLayer, aspect);
-
-        for (uint32_t i = 0; i < baseCopySplit.count; ++i) {
-            const TextureCopySubresource::CopyInfo& info = baseCopySplit.copies[i];
-
-            // TODO(jiawei.shao@intel.com): pre-compute bufferLocation and sourceRegion as
-            // members in TextureCopySubresource::CopyInfo.
-            const uint64_t offsetBytes = info.alignedOffset + baseOffset;
-            const D3D12_TEXTURE_COPY_LOCATION bufferLocation =
-                ComputeBufferLocationForCopyTextureRegion(texture, bufferResource, info.bufferSize,
-                                                          offsetBytes, bufferBytesPerRow, aspect);
-            if (direction == BufferTextureCopyDirection::B2T) {
-                const D3D12_BOX sourceRegion =
-                    ComputeD3D12BoxFromOffsetAndSize(info.bufferOffset, info.copySize);
-
-                commandList->CopyTextureRegion(&textureLocation, info.textureOffset.x,
-                                               info.textureOffset.y, info.textureOffset.z,
-                                               &bufferLocation, &sourceRegion);
-            } else {
-                ASSERT(direction == BufferTextureCopyDirection::T2B);
-                const D3D12_BOX sourceRegion =
-                    ComputeD3D12BoxFromOffsetAndSize(info.textureOffset, info.copySize);
-
-                commandList->CopyTextureRegion(&bufferLocation, info.bufferOffset.x,
-                                               info.bufferOffset.y, info.bufferOffset.z,
-                                               &textureLocation, &sourceRegion);
-            }
-        }
-    }
-
-    void Record2DBufferTextureCopyWithSplit(BufferTextureCopyDirection direction,
-                                            ID3D12GraphicsCommandList* commandList,
-                                            ID3D12Resource* bufferResource,
-                                            const uint64_t offset,
-                                            const uint32_t bytesPerRow,
-                                            const uint32_t rowsPerImage,
-                                            const TextureCopy& textureCopy,
-                                            const TexelBlockInfo& blockInfo,
-                                            const Extent3D& copySize) {
-        // See comments in Compute2DTextureCopySplits() for more details.
-        const TextureCopySplits copySplits = Compute2DTextureCopySplits(
-            textureCopy.origin, copySize, blockInfo, offset, bytesPerRow, rowsPerImage);
-
-        const uint64_t bytesPerLayer = bytesPerRow * rowsPerImage;
-
-        // copySplits.copySubresources[1] is always calculated for the second copy layer with
-        // extra "bytesPerLayer" copy offset compared with the first copy layer. So
-        // here we use an array bufferOffsetsForNextLayer to record the extra offsets
-        // for each copy layer: bufferOffsetsForNextLayer[0] is the extra offset for
-        // the next copy layer that uses copySplits.copySubresources[0], and
-        // bufferOffsetsForNextLayer[1] is the extra offset for the next copy layer
-        // that uses copySplits.copySubresources[1].
-        std::array<uint64_t, TextureCopySplits::kMaxTextureCopySubresources>
-            bufferOffsetsForNextLayer = {{0u, 0u}};
-
-        for (uint32_t copyLayer = 0; copyLayer < copySize.depthOrArrayLayers; ++copyLayer) {
-            const uint32_t splitIndex = copyLayer % copySplits.copySubresources.size();
-
-            const TextureCopySubresource& copySplitPerLayerBase =
-                copySplits.copySubresources[splitIndex];
-            const uint64_t bufferOffsetForNextLayer = bufferOffsetsForNextLayer[splitIndex];
-            const uint32_t copyTextureLayer = copyLayer + textureCopy.origin.z;
-
-            RecordBufferTextureCopyFromSplits(direction, commandList, copySplitPerLayerBase,
-                                              bufferResource, bufferOffsetForNextLayer, bytesPerRow,
-                                              textureCopy.texture.Get(), textureCopy.mipLevel,
-                                              copyTextureLayer, textureCopy.aspect);
-
-            bufferOffsetsForNextLayer[splitIndex] +=
-                bytesPerLayer * copySplits.copySubresources.size();
-        }
-    }
-
-    void RecordBufferTextureCopyWithBufferHandle(BufferTextureCopyDirection direction,
-                                                 ID3D12GraphicsCommandList* commandList,
-                                                 ID3D12Resource* bufferResource,
-                                                 const uint64_t offset,
-                                                 const uint32_t bytesPerRow,
-                                                 const uint32_t rowsPerImage,
-                                                 const TextureCopy& textureCopy,
-                                                 const Extent3D& copySize) {
-        ASSERT(HasOneBit(textureCopy.aspect));
-
-        TextureBase* texture = textureCopy.texture.Get();
-        const TexelBlockInfo& blockInfo =
-            texture->GetFormat().GetAspectInfo(textureCopy.aspect).block;
-
-        switch (texture->GetDimension()) {
-            case wgpu::TextureDimension::e1D: {
-                // 1D textures copy splits are a subset of the single-layer 2D texture copy splits,
-                // at least while 1D textures can only have a single array layer.
-                ASSERT(texture->GetArrayLayers() == 1);
-
-                TextureCopySubresource copyRegions = Compute2DTextureCopySubresource(
-                    textureCopy.origin, copySize, blockInfo, offset, bytesPerRow);
-                RecordBufferTextureCopyFromSplits(direction, commandList, copyRegions,
-                                                  bufferResource, 0, bytesPerRow, texture,
-                                                  textureCopy.mipLevel, 0, textureCopy.aspect);
-                break;
-            }
-
-            // Record the CopyTextureRegion commands for 2D textures, with special handling of array
-            // layers since each require their own set of copies.
-            case wgpu::TextureDimension::e2D:
-                Record2DBufferTextureCopyWithSplit(direction, commandList, bufferResource, offset,
-                                                   bytesPerRow, rowsPerImage, textureCopy,
-                                                   blockInfo, copySize);
-                break;
-
-            case wgpu::TextureDimension::e3D: {
-                // See comments in Compute3DTextureCopySplits() for more details.
-                TextureCopySubresource copyRegions = Compute3DTextureCopySplits(
-                    textureCopy.origin, copySize, blockInfo, offset, bytesPerRow, rowsPerImage);
-
-                RecordBufferTextureCopyFromSplits(direction, commandList, copyRegions,
-                                                  bufferResource, 0, bytesPerRow, texture,
-                                                  textureCopy.mipLevel, 0, textureCopy.aspect);
-                break;
-            }
-        }
-    }
-
-    void RecordBufferTextureCopy(BufferTextureCopyDirection direction,
-                                 ID3D12GraphicsCommandList* commandList,
-                                 const BufferCopy& bufferCopy,
-                                 const TextureCopy& textureCopy,
-                                 const Extent3D& copySize) {
-        ID3D12Resource* bufferResource = ToBackend(bufferCopy.buffer)->GetD3D12Resource();
-
-        if (NeedBufferSizeWorkaroundForBufferTextureCopyOnD3D12(bufferCopy, textureCopy,
-                                                                copySize)) {
-            // Split the copy into two copies if the size of bufferCopy.buffer doesn't meet D3D12's
-            // requirement and a workaround is needed:
-            //   - The first copy will copy all depth images but the last depth image,
-            //   - The second copy will copy the last depth image.
-            Extent3D extentForAllButTheLastImage = copySize;
-            extentForAllButTheLastImage.depthOrArrayLayers -= 1;
-            RecordBufferTextureCopyWithBufferHandle(
-                direction, commandList, bufferResource, bufferCopy.offset, bufferCopy.bytesPerRow,
-                bufferCopy.rowsPerImage, textureCopy, extentForAllButTheLastImage);
-
-            Extent3D extentForTheLastImage = copySize;
-            extentForTheLastImage.depthOrArrayLayers = 1;
-
-            TextureCopy textureCopyForTheLastImage = textureCopy;
-            textureCopyForTheLastImage.origin.z += copySize.depthOrArrayLayers - 1;
-
-            uint64_t copiedBytes = bufferCopy.bytesPerRow * bufferCopy.rowsPerImage *
-                                   (copySize.depthOrArrayLayers - 1);
-            RecordBufferTextureCopyWithBufferHandle(
-                direction, commandList, bufferResource, bufferCopy.offset + copiedBytes,
-                bufferCopy.bytesPerRow, bufferCopy.rowsPerImage, textureCopyForTheLastImage,
-                extentForTheLastImage);
-            return;
+            TextureCopySubresource copyRegions = Compute2DTextureCopySubresource(
+                textureCopy.origin, copySize, blockInfo, offset, bytesPerRow);
+            RecordBufferTextureCopyFromSplits(direction, commandList, copyRegions, bufferResource,
+                                              0, bytesPerRow, texture, textureCopy.mipLevel, 0,
+                                              textureCopy.aspect);
+            break;
         }
 
+        // Record the CopyTextureRegion commands for 2D textures, with special handling of array
+        // layers since each require their own set of copies.
+        case wgpu::TextureDimension::e2D:
+            Record2DBufferTextureCopyWithSplit(direction, commandList, bufferResource, offset,
+                                               bytesPerRow, rowsPerImage, textureCopy, blockInfo,
+                                               copySize);
+            break;
+
+        case wgpu::TextureDimension::e3D: {
+            // See comments in Compute3DTextureCopySplits() for more details.
+            TextureCopySubresource copyRegions = Compute3DTextureCopySplits(
+                textureCopy.origin, copySize, blockInfo, offset, bytesPerRow, rowsPerImage);
+
+            RecordBufferTextureCopyFromSplits(direction, commandList, copyRegions, bufferResource,
+                                              0, bytesPerRow, texture, textureCopy.mipLevel, 0,
+                                              textureCopy.aspect);
+            break;
+        }
+    }
+}
+
+void RecordBufferTextureCopy(BufferTextureCopyDirection direction,
+                             ID3D12GraphicsCommandList* commandList,
+                             const BufferCopy& bufferCopy,
+                             const TextureCopy& textureCopy,
+                             const Extent3D& copySize) {
+    ID3D12Resource* bufferResource = ToBackend(bufferCopy.buffer)->GetD3D12Resource();
+
+    if (NeedBufferSizeWorkaroundForBufferTextureCopyOnD3D12(bufferCopy, textureCopy, copySize)) {
+        // Split the copy into two copies if the size of bufferCopy.buffer doesn't meet D3D12's
+        // requirement and a workaround is needed:
+        //   - The first copy will copy all depth images but the last depth image,
+        //   - The second copy will copy the last depth image.
+        Extent3D extentForAllButTheLastImage = copySize;
+        extentForAllButTheLastImage.depthOrArrayLayers -= 1;
+        RecordBufferTextureCopyWithBufferHandle(
+            direction, commandList, bufferResource, bufferCopy.offset, bufferCopy.bytesPerRow,
+            bufferCopy.rowsPerImage, textureCopy, extentForAllButTheLastImage);
+
+        Extent3D extentForTheLastImage = copySize;
+        extentForTheLastImage.depthOrArrayLayers = 1;
+
+        TextureCopy textureCopyForTheLastImage = textureCopy;
+        textureCopyForTheLastImage.origin.z += copySize.depthOrArrayLayers - 1;
+
+        uint64_t copiedBytes =
+            bufferCopy.bytesPerRow * bufferCopy.rowsPerImage * (copySize.depthOrArrayLayers - 1);
         RecordBufferTextureCopyWithBufferHandle(direction, commandList, bufferResource,
-                                                bufferCopy.offset, bufferCopy.bytesPerRow,
-                                                bufferCopy.rowsPerImage, textureCopy, copySize);
+                                                bufferCopy.offset + copiedBytes,
+                                                bufferCopy.bytesPerRow, bufferCopy.rowsPerImage,
+                                                textureCopyForTheLastImage, extentForTheLastImage);
+        return;
     }
 
-    void SetDebugName(Device* device, ID3D12Object* object, const char* prefix, std::string label) {
-        if (!object) {
-            return;
-        }
+    RecordBufferTextureCopyWithBufferHandle(direction, commandList, bufferResource,
+                                            bufferCopy.offset, bufferCopy.bytesPerRow,
+                                            bufferCopy.rowsPerImage, textureCopy, copySize);
+}
 
-        if (label.empty() || !device->IsToggleEnabled(Toggle::UseUserDefinedLabelsInBackend)) {
-            object->SetPrivateData(WKPDID_D3DDebugObjectName, strlen(prefix), prefix);
-            return;
-        }
-
-        std::string objectName = prefix;
-        objectName += "_";
-        objectName += label;
-        object->SetPrivateData(WKPDID_D3DDebugObjectName, objectName.length(), objectName.c_str());
+void SetDebugName(Device* device, ID3D12Object* object, const char* prefix, std::string label) {
+    if (!object) {
+        return;
     }
 
+    if (label.empty() || !device->IsToggleEnabled(Toggle::UseUserDefinedLabelsInBackend)) {
+        object->SetPrivateData(WKPDID_D3DDebugObjectName, strlen(prefix), prefix);
+        return;
+    }
+
+    std::string objectName = prefix;
+    objectName += "_";
+    objectName += label;
+    object->SetPrivateData(WKPDID_D3DDebugObjectName, objectName.length(), objectName.c_str());
+}
+
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/native/d3d12/UtilsD3D12.h b/src/dawn/native/d3d12/UtilsD3D12.h
index 6bb908d..0bc5afb 100644
--- a/src/dawn/native/d3d12/UtilsD3D12.h
+++ b/src/dawn/native/d3d12/UtilsD3D12.h
@@ -26,50 +26,47 @@
 
 namespace dawn::native::d3d12 {
 
-    ResultOrError<std::wstring> ConvertStringToWstring(const char* str);
+ResultOrError<std::wstring> ConvertStringToWstring(const char* str);
 
-    D3D12_COMPARISON_FUNC ToD3D12ComparisonFunc(wgpu::CompareFunction func);
+D3D12_COMPARISON_FUNC ToD3D12ComparisonFunc(wgpu::CompareFunction func);
 
-    D3D12_TEXTURE_COPY_LOCATION ComputeTextureCopyLocationForTexture(const Texture* texture,
-                                                                     uint32_t level,
-                                                                     uint32_t layer,
-                                                                     Aspect aspect);
+D3D12_TEXTURE_COPY_LOCATION ComputeTextureCopyLocationForTexture(const Texture* texture,
+                                                                 uint32_t level,
+                                                                 uint32_t layer,
+                                                                 Aspect aspect);
 
-    D3D12_TEXTURE_COPY_LOCATION ComputeBufferLocationForCopyTextureRegion(
-        const Texture* texture,
-        ID3D12Resource* bufferResource,
-        const Extent3D& bufferSize,
-        const uint64_t offset,
-        const uint32_t rowPitch,
-        Aspect aspect);
-    D3D12_BOX ComputeD3D12BoxFromOffsetAndSize(const Origin3D& offset, const Extent3D& copySize);
+D3D12_TEXTURE_COPY_LOCATION ComputeBufferLocationForCopyTextureRegion(
+    const Texture* texture,
+    ID3D12Resource* bufferResource,
+    const Extent3D& bufferSize,
+    const uint64_t offset,
+    const uint32_t rowPitch,
+    Aspect aspect);
+D3D12_BOX ComputeD3D12BoxFromOffsetAndSize(const Origin3D& offset, const Extent3D& copySize);
 
-    bool IsTypeless(DXGI_FORMAT format);
+bool IsTypeless(DXGI_FORMAT format);
 
-    enum class BufferTextureCopyDirection {
-        B2T,
-        T2B,
-    };
+enum class BufferTextureCopyDirection {
+    B2T,
+    T2B,
+};
 
-    void RecordBufferTextureCopyWithBufferHandle(BufferTextureCopyDirection direction,
-                                                 ID3D12GraphicsCommandList* commandList,
-                                                 ID3D12Resource* bufferResource,
-                                                 const uint64_t offset,
-                                                 const uint32_t bytesPerRow,
-                                                 const uint32_t rowsPerImage,
-                                                 const TextureCopy& textureCopy,
-                                                 const Extent3D& copySize);
+void RecordBufferTextureCopyWithBufferHandle(BufferTextureCopyDirection direction,
+                                             ID3D12GraphicsCommandList* commandList,
+                                             ID3D12Resource* bufferResource,
+                                             const uint64_t offset,
+                                             const uint32_t bytesPerRow,
+                                             const uint32_t rowsPerImage,
+                                             const TextureCopy& textureCopy,
+                                             const Extent3D& copySize);
 
-    void RecordBufferTextureCopy(BufferTextureCopyDirection direction,
-                                 ID3D12GraphicsCommandList* commandList,
-                                 const BufferCopy& bufferCopy,
-                                 const TextureCopy& textureCopy,
-                                 const Extent3D& copySize);
+void RecordBufferTextureCopy(BufferTextureCopyDirection direction,
+                             ID3D12GraphicsCommandList* commandList,
+                             const BufferCopy& bufferCopy,
+                             const TextureCopy& textureCopy,
+                             const Extent3D& copySize);
 
-    void SetDebugName(Device* device,
-                      ID3D12Object* object,
-                      const char* prefix,
-                      std::string label = "");
+void SetDebugName(Device* device, ID3D12Object* object, const char* prefix, std::string label = "");
 
 }  // namespace dawn::native::d3d12
 
diff --git a/src/dawn/native/dawn_platform.h b/src/dawn/native/dawn_platform.h
index e4dc42a..23388b5 100644
--- a/src/dawn/native/dawn_platform.h
+++ b/src/dawn/native/dawn_platform.h
@@ -22,44 +22,44 @@
 
 namespace dawn::native {
 
-    // kEnumCount is a constant specifying the number of enums in a WebGPU enum type,
-    // if the enums are contiguous, making it suitable for iteration.
-    // It is defined in dawn_platform_autogen.h
-    template <typename T>
-    constexpr uint32_t kEnumCount = EnumCount<T>::value;
+// kEnumCount is a constant specifying the number of enums in a WebGPU enum type,
+// if the enums are contiguous, making it suitable for iteration.
+// It is defined in dawn_platform_autogen.h
+template <typename T>
+constexpr uint32_t kEnumCount = EnumCount<T>::value;
 
-    // Extra buffer usages
-    // Add an extra buffer usage and an extra binding type for binding the buffers with QueryResolve
-    // usage as storage buffer in the internal pipeline.
-    static constexpr wgpu::BufferUsage kInternalStorageBuffer =
-        static_cast<wgpu::BufferUsage>(0x40000000);
+// Extra buffer usages
+// Add an extra buffer usage and an extra binding type for binding the buffers with QueryResolve
+// usage as storage buffer in the internal pipeline.
+static constexpr wgpu::BufferUsage kInternalStorageBuffer =
+    static_cast<wgpu::BufferUsage>(0x40000000);
 
-    // Add an extra buffer usage (readonly storage buffer usage) for render pass resource tracking
-    static constexpr wgpu::BufferUsage kReadOnlyStorageBuffer =
-        static_cast<wgpu::BufferUsage>(0x80000000);
+// Add an extra buffer usage (readonly storage buffer usage) for render pass resource tracking
+static constexpr wgpu::BufferUsage kReadOnlyStorageBuffer =
+    static_cast<wgpu::BufferUsage>(0x80000000);
 
-    static constexpr wgpu::BufferUsage kAllInternalBufferUsages =
-        kInternalStorageBuffer | kReadOnlyStorageBuffer;
+static constexpr wgpu::BufferUsage kAllInternalBufferUsages =
+    kInternalStorageBuffer | kReadOnlyStorageBuffer;
 
-    // Extra texture usages
-    // Add an extra texture usage (readonly render attachment usage) for render pass resource
-    // tracking
-    static constexpr wgpu::TextureUsage kReadOnlyRenderAttachment =
-        static_cast<wgpu::TextureUsage>(0x40000000);
+// Extra texture usages
+// Add an extra texture usage (readonly render attachment usage) for render pass resource
+// tracking
+static constexpr wgpu::TextureUsage kReadOnlyRenderAttachment =
+    static_cast<wgpu::TextureUsage>(0x40000000);
 
-    // Internal usage to help tracking when a subresource is used as render attachment usage
-    // more than once in a render pass.
-    static constexpr wgpu::TextureUsage kAgainAsRenderAttachment =
-        static_cast<wgpu::TextureUsage>(0x80000001);
+// Internal usage to help tracking when a subresource is used as render attachment usage
+// more than once in a render pass.
+static constexpr wgpu::TextureUsage kAgainAsRenderAttachment =
+    static_cast<wgpu::TextureUsage>(0x80000001);
 
-    // Add an extra texture usage for textures that will be presented, for use in backends
-    // that needs to transition to present usage.
-    // This currently aliases wgpu::TextureUsage::Present, we would assign it
-    // some bit when wgpu::TextureUsage::Present is removed.
-    static constexpr wgpu::TextureUsage kPresentTextureUsage = wgpu::TextureUsage::Present;
+// Add an extra texture usage for textures that will be presented, for use in backends
+// that needs to transition to present usage.
+// This currently aliases wgpu::TextureUsage::Present, we would assign it
+// some bit when wgpu::TextureUsage::Present is removed.
+static constexpr wgpu::TextureUsage kPresentTextureUsage = wgpu::TextureUsage::Present;
 
-    static constexpr wgpu::BufferBindingType kInternalStorageBufferBinding =
-        static_cast<wgpu::BufferBindingType>(0xFFFFFFFF);
+static constexpr wgpu::BufferBindingType kInternalStorageBufferBinding =
+    static_cast<wgpu::BufferBindingType>(0xFFFFFFFF);
 }  // namespace dawn::native
 
 #endif  // SRC_DAWN_NATIVE_DAWN_PLATFORM_H_
diff --git a/src/dawn/native/metal/BackendMTL.h b/src/dawn/native/metal/BackendMTL.h
index 7f160db..fe6908f 100644
--- a/src/dawn/native/metal/BackendMTL.h
+++ b/src/dawn/native/metal/BackendMTL.h
@@ -21,14 +21,14 @@
 
 namespace dawn::native::metal {
 
-    class Backend : public BackendConnection {
-      public:
-        explicit Backend(InstanceBase* instance);
+class Backend : public BackendConnection {
+  public:
+    explicit Backend(InstanceBase* instance);
 
-        std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override;
-        ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
-            const AdapterDiscoveryOptionsBase* optionsBase) override;
-    };
+    std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override;
+    ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
+        const AdapterDiscoveryOptionsBase* optionsBase) override;
+};
 
 }  // namespace dawn::native::metal
 
diff --git a/src/dawn/native/metal/BackendMTL.mm b/src/dawn/native/metal/BackendMTL.mm
index 6f4751f..2545e11 100644
--- a/src/dawn/native/metal/BackendMTL.mm
+++ b/src/dawn/native/metal/BackendMTL.mm
@@ -26,479 +26,472 @@
 #include "dawn/native/metal/DeviceMTL.h"
 
 #if defined(DAWN_PLATFORM_MACOS)
-#    import <IOKit/IOKitLib.h>
-#    include "dawn/common/IOKitRef.h"
+#import <IOKit/IOKitLib.h>
+#include "dawn/common/IOKitRef.h"
 #endif
 
 #include <vector>
 
 namespace dawn::native::metal {
 
-    namespace {
+namespace {
 
-        struct PCIIDs {
-            uint32_t vendorId;
-            uint32_t deviceId;
-        };
+struct PCIIDs {
+    uint32_t vendorId;
+    uint32_t deviceId;
+};
 
-        struct Vendor {
-            const char* trademark;
-            uint32_t vendorId;
-        };
+struct Vendor {
+    const char* trademark;
+    uint32_t vendorId;
+};
 
 #if defined(DAWN_PLATFORM_MACOS)
-        const Vendor kVendors[] = {{"AMD", gpu_info::kVendorID_AMD},
-                                   {"Radeon", gpu_info::kVendorID_AMD},
-                                   {"Intel", gpu_info::kVendorID_Intel},
-                                   {"Geforce", gpu_info::kVendorID_Nvidia},
-                                   {"Quadro", gpu_info::kVendorID_Nvidia}};
+const Vendor kVendors[] = {{"AMD", gpu_info::kVendorID_AMD},
+                           {"Radeon", gpu_info::kVendorID_AMD},
+                           {"Intel", gpu_info::kVendorID_Intel},
+                           {"Geforce", gpu_info::kVendorID_Nvidia},
+                           {"Quadro", gpu_info::kVendorID_Nvidia}};
 
-        // Find vendor ID from MTLDevice name.
-        MaybeError GetVendorIdFromVendors(id<MTLDevice> device, PCIIDs* ids) {
-            uint32_t vendorId = 0;
-            const char* deviceName = [device.name UTF8String];
-            for (const auto& it : kVendors) {
-                if (strstr(deviceName, it.trademark) != nullptr) {
-                    vendorId = it.vendorId;
-                    break;
-                }
-            }
-
-            if (vendorId == 0) {
-                return DAWN_INTERNAL_ERROR("Failed to find vendor id with the device");
-            }
-
-            // Set vendor id with 0
-            *ids = PCIIDs{vendorId, 0};
-            return {};
+// Find vendor ID from MTLDevice name.
+MaybeError GetVendorIdFromVendors(id<MTLDevice> device, PCIIDs* ids) {
+    uint32_t vendorId = 0;
+    const char* deviceName = [device.name UTF8String];
+    for (const auto& it : kVendors) {
+        if (strstr(deviceName, it.trademark) != nullptr) {
+            vendorId = it.vendorId;
+            break;
         }
+    }
 
-        // Extracts an integer property from a registry entry.
-        uint32_t GetEntryProperty(io_registry_entry_t entry, CFStringRef name) {
-            uint32_t value = 0;
+    if (vendorId == 0) {
+        return DAWN_INTERNAL_ERROR("Failed to find vendor id with the device");
+    }
 
-            // Recursively search registry entry and its parents for property name
-            // The data should release with CFRelease
-            CFRef<CFDataRef> data =
-                AcquireCFRef(static_cast<CFDataRef>(IORegistryEntrySearchCFProperty(
-                    entry, kIOServicePlane, name, kCFAllocatorDefault,
-                    kIORegistryIterateRecursively | kIORegistryIterateParents)));
+    // Set vendor id with 0
+    *ids = PCIIDs{vendorId, 0};
+    return {};
+}
 
-            if (data == nullptr) {
-                return value;
-            }
+// Extracts an integer property from a registry entry.
+uint32_t GetEntryProperty(io_registry_entry_t entry, CFStringRef name) {
+    uint32_t value = 0;
 
-            // CFDataGetBytePtr() is guaranteed to return a read-only pointer
-            value = *reinterpret_cast<const uint32_t*>(CFDataGetBytePtr(data.Get()));
-            return value;
-        }
+    // Recursively search registry entry and its parents for property name
+    // The data should release with CFRelease
+    CFRef<CFDataRef> data = AcquireCFRef(static_cast<CFDataRef>(IORegistryEntrySearchCFProperty(
+        entry, kIOServicePlane, name, kCFAllocatorDefault,
+        kIORegistryIterateRecursively | kIORegistryIterateParents)));
 
-        // Queries the IO Registry to find the PCI device and vendor IDs of the MTLDevice.
-        // The registry entry correponding to [device registryID] doesn't contain the exact PCI ids
-        // because it corresponds to a driver. However its parent entry corresponds to the device
-        // itself and has uint32_t "device-id" and "registry-id" keys. For example on a dual-GPU
-        // MacBook Pro 2017 the IORegistry explorer shows the following tree (simplified here):
-        //
-        //  - PCI0@0
-        //  | - AppleACPIPCI
-        //  | | - IGPU@2 (type IOPCIDevice)
-        //  | | | - IntelAccelerator (type IOGraphicsAccelerator2)
-        //  | | - PEG0@1
-        //  | | | - IOPP
-        //  | | | | - GFX0@0 (type IOPCIDevice)
-        //  | | | | | - AMDRadeonX4000_AMDBaffinGraphicsAccelerator (type IOGraphicsAccelerator2)
-        //
-        // [device registryID] is the ID for one of the IOGraphicsAccelerator2 and we can see that
-        // their parent always is an IOPCIDevice that has properties for the device and vendor IDs.
-        MaybeError API_AVAILABLE(macos(10.13))
-            GetDeviceIORegistryPCIInfo(id<MTLDevice> device, PCIIDs* ids) {
-            // Get a matching dictionary for the IOGraphicsAccelerator2
-            CFRef<CFMutableDictionaryRef> matchingDict =
-                AcquireCFRef(IORegistryEntryIDMatching([device registryID]));
-            if (matchingDict == nullptr) {
-                return DAWN_INTERNAL_ERROR("Failed to create the matching dict for the device");
-            }
+    if (data == nullptr) {
+        return value;
+    }
 
-            // IOServiceGetMatchingService will consume the reference on the matching dictionary,
-            // so we don't need to release the dictionary.
-            IORef<io_registry_entry_t> acceleratorEntry = AcquireIORef(
-                IOServiceGetMatchingService(kIOMasterPortDefault, matchingDict.Detach()));
-            if (acceleratorEntry == IO_OBJECT_NULL) {
-                return DAWN_INTERNAL_ERROR(
-                    "Failed to get the IO registry entry for the accelerator");
-            }
+    // CFDataGetBytePtr() is guaranteed to return a read-only pointer
+    value = *reinterpret_cast<const uint32_t*>(CFDataGetBytePtr(data.Get()));
+    return value;
+}
 
-            // Get the parent entry that will be the IOPCIDevice
-            IORef<io_registry_entry_t> deviceEntry;
-            if (IORegistryEntryGetParentEntry(acceleratorEntry.Get(), kIOServicePlane,
-                                              deviceEntry.InitializeInto()) != kIOReturnSuccess) {
-                return DAWN_INTERNAL_ERROR("Failed to get the IO registry entry for the device");
-            }
+// Queries the IO Registry to find the PCI device and vendor IDs of the MTLDevice.
+// The registry entry correponding to [device registryID] doesn't contain the exact PCI ids
+// because it corresponds to a driver. However its parent entry corresponds to the device
+// itself and has uint32_t "device-id" and "registry-id" keys. For example on a dual-GPU
+// MacBook Pro 2017 the IORegistry explorer shows the following tree (simplified here):
+//
+//  - PCI0@0
+//  | - AppleACPIPCI
+//  | | - IGPU@2 (type IOPCIDevice)
+//  | | | - IntelAccelerator (type IOGraphicsAccelerator2)
+//  | | - PEG0@1
+//  | | | - IOPP
+//  | | | | - GFX0@0 (type IOPCIDevice)
+//  | | | | | - AMDRadeonX4000_AMDBaffinGraphicsAccelerator (type IOGraphicsAccelerator2)
+//
+// [device registryID] is the ID for one of the IOGraphicsAccelerator2 and we can see that
+// their parent always is an IOPCIDevice that has properties for the device and vendor IDs.
+MaybeError API_AVAILABLE(macos(10.13))
+    GetDeviceIORegistryPCIInfo(id<MTLDevice> device, PCIIDs* ids) {
+    // Get a matching dictionary for the IOGraphicsAccelerator2
+    CFRef<CFMutableDictionaryRef> matchingDict =
+        AcquireCFRef(IORegistryEntryIDMatching([device registryID]));
+    if (matchingDict == nullptr) {
+        return DAWN_INTERNAL_ERROR("Failed to create the matching dict for the device");
+    }
 
-            ASSERT(deviceEntry != IO_OBJECT_NULL);
+    // IOServiceGetMatchingService will consume the reference on the matching dictionary,
+    // so we don't need to release the dictionary.
+    IORef<io_registry_entry_t> acceleratorEntry =
+        AcquireIORef(IOServiceGetMatchingService(kIOMasterPortDefault, matchingDict.Detach()));
+    if (acceleratorEntry == IO_OBJECT_NULL) {
+        return DAWN_INTERNAL_ERROR("Failed to get the IO registry entry for the accelerator");
+    }
 
-            uint32_t vendorId = GetEntryProperty(deviceEntry.Get(), CFSTR("vendor-id"));
-            uint32_t deviceId = GetEntryProperty(deviceEntry.Get(), CFSTR("device-id"));
+    // Get the parent entry that will be the IOPCIDevice
+    IORef<io_registry_entry_t> deviceEntry;
+    if (IORegistryEntryGetParentEntry(acceleratorEntry.Get(), kIOServicePlane,
+                                      deviceEntry.InitializeInto()) != kIOReturnSuccess) {
+        return DAWN_INTERNAL_ERROR("Failed to get the IO registry entry for the device");
+    }
 
-            *ids = PCIIDs{vendorId, deviceId};
+    ASSERT(deviceEntry != IO_OBJECT_NULL);
 
-            return {};
-        }
+    uint32_t vendorId = GetEntryProperty(deviceEntry.Get(), CFSTR("vendor-id"));
+    uint32_t deviceId = GetEntryProperty(deviceEntry.Get(), CFSTR("device-id"));
 
-        MaybeError GetDevicePCIInfo(id<MTLDevice> device, PCIIDs* ids) {
-            // [device registryID] is introduced on macOS 10.13+, otherwise workaround to get vendor
-            // id by vendor name on old macOS
-            if (@available(macos 10.13, *)) {
-                return GetDeviceIORegistryPCIInfo(device, ids);
-            } else {
-                return GetVendorIdFromVendors(device, ids);
-            }
-        }
+    *ids = PCIIDs{vendorId, deviceId};
 
-        bool IsMetalSupported() {
-            // Metal was first introduced in macOS 10.11
-            // WebGPU is targeted at macOS 10.12+
-            // TODO(dawn:1181): Dawn native should allow non-conformant WebGPU on macOS 10.11
-            return IsMacOSVersionAtLeast(10, 12);
-        }
+    return {};
+}
+
+MaybeError GetDevicePCIInfo(id<MTLDevice> device, PCIIDs* ids) {
+    // [device registryID] is introduced on macOS 10.13+, otherwise workaround to get vendor
+    // id by vendor name on old macOS
+    if (@available(macos 10.13, *)) {
+        return GetDeviceIORegistryPCIInfo(device, ids);
+    } else {
+        return GetVendorIdFromVendors(device, ids);
+    }
+}
+
+bool IsMetalSupported() {
+    // Metal was first introduced in macOS 10.11
+    // WebGPU is targeted at macOS 10.12+
+    // TODO(dawn:1181): Dawn native should allow non-conformant WebGPU on macOS 10.11
+    return IsMacOSVersionAtLeast(10, 12);
+}
 #elif defined(DAWN_PLATFORM_IOS)
-        MaybeError GetDevicePCIInfo(id<MTLDevice> device, PCIIDs* ids) {
-            DAWN_UNUSED(device);
-            *ids = PCIIDs{0, 0};
-            return {};
-        }
+MaybeError GetDevicePCIInfo(id<MTLDevice> device, PCIIDs* ids) {
+    DAWN_UNUSED(device);
+    *ids = PCIIDs{0, 0};
+    return {};
+}
 
-        bool IsMetalSupported() {
-            return true;
-        }
+bool IsMetalSupported() {
+    return true;
+}
 #else
-#    error "Unsupported Apple platform."
+#error "Unsupported Apple platform."
 #endif
 
-        DAWN_NOINLINE bool IsCounterSamplingBoundarySupport(id<MTLDevice> device)
-            API_AVAILABLE(macos(11.0), ios(14.0)) {
-            bool isBlitBoundarySupported =
-                [device supportsCounterSampling:MTLCounterSamplingPointAtBlitBoundary];
-            bool isDispatchBoundarySupported =
-                [device supportsCounterSampling:MTLCounterSamplingPointAtDispatchBoundary];
-            bool isDrawBoundarySupported =
-                [device supportsCounterSampling:MTLCounterSamplingPointAtDrawBoundary];
+DAWN_NOINLINE bool IsCounterSamplingBoundarySupport(id<MTLDevice> device)
+    API_AVAILABLE(macos(11.0), ios(14.0)) {
+    bool isBlitBoundarySupported =
+        [device supportsCounterSampling:MTLCounterSamplingPointAtBlitBoundary];
+    bool isDispatchBoundarySupported =
+        [device supportsCounterSampling:MTLCounterSamplingPointAtDispatchBoundary];
+    bool isDrawBoundarySupported =
+        [device supportsCounterSampling:MTLCounterSamplingPointAtDrawBoundary];
 
-            return isBlitBoundarySupported && isDispatchBoundarySupported &&
-                   isDrawBoundarySupported;
+    return isBlitBoundarySupported && isDispatchBoundarySupported && isDrawBoundarySupported;
+}
+
+// This method has seen hard-to-debug crashes. See crbug.com/dawn/1102.
+// For now, it is written defensively, with many potentially unnecessary guards until
+// we narrow down the cause of the problem.
+DAWN_NOINLINE bool IsGPUCounterSupported(id<MTLDevice> device,
+                                         MTLCommonCounterSet counterSetName,
+                                         std::vector<MTLCommonCounter> counterNames)
+    API_AVAILABLE(macos(10.15), ios(14.0)) {
+    NSPRef<id<MTLCounterSet>> counterSet = nil;
+    if (![device respondsToSelector:@selector(counterSets)]) {
+        dawn::ErrorLog() << "MTLDevice does not respond to selector: counterSets.";
+        return false;
+    }
+    NSArray<id<MTLCounterSet>>* counterSets = device.counterSets;
+    if (counterSets == nil) {
+        // On some systems, [device counterSets] may be null and not an empty array.
+        return false;
+    }
+    // MTLDevice’s counterSets property declares which counter sets it supports. Check
+    // whether it's available on the device before requesting a counter set.
+    // Note: Don't do for..in loop to avoid potentially crashy interaction with
+    // NSFastEnumeration.
+    for (NSUInteger i = 0; i < counterSets.count; ++i) {
+        id<MTLCounterSet> set = [counterSets objectAtIndex:i];
+        if ([set.name caseInsensitiveCompare:counterSetName] == NSOrderedSame) {
+            counterSet = set;
+            break;
         }
+    }
 
-        // This method has seen hard-to-debug crashes. See crbug.com/dawn/1102.
-        // For now, it is written defensively, with many potentially unnecessary guards until
-        // we narrow down the cause of the problem.
-        DAWN_NOINLINE bool IsGPUCounterSupported(id<MTLDevice> device,
-                                                 MTLCommonCounterSet counterSetName,
-                                                 std::vector<MTLCommonCounter> counterNames)
-            API_AVAILABLE(macos(10.15), ios(14.0)) {
-            NSPRef<id<MTLCounterSet>> counterSet = nil;
-            if (![device respondsToSelector:@selector(counterSets)]) {
-                dawn::ErrorLog() << "MTLDevice does not respond to selector: counterSets.";
-                return false;
-            }
-            NSArray<id<MTLCounterSet>>* counterSets = device.counterSets;
-            if (counterSets == nil) {
-                // On some systems, [device counterSets] may be null and not an empty array.
-                return false;
-            }
-            // MTLDevice’s counterSets property declares which counter sets it supports. Check
-            // whether it's available on the device before requesting a counter set.
-            // Note: Don't do for..in loop to avoid potentially crashy interaction with
-            // NSFastEnumeration.
-            for (NSUInteger i = 0; i < counterSets.count; ++i) {
-                id<MTLCounterSet> set = [counterSets objectAtIndex:i];
-                if ([set.name caseInsensitiveCompare:counterSetName] == NSOrderedSame) {
-                    counterSet = set;
-                    break;
-                }
-            }
+    // The counter set is not supported.
+    if (counterSet == nil) {
+        return false;
+    }
 
-            // The counter set is not supported.
-            if (counterSet == nil) {
-                return false;
-            }
+    if (![*counterSet respondsToSelector:@selector(counters)]) {
+        dawn::ErrorLog() << "MTLCounterSet does not respond to selector: counters.";
+        return false;
+    }
+    NSArray<id<MTLCounter>>* countersInSet = (*counterSet).counters;
+    if (countersInSet == nil) {
+        // On some systems, [MTLCounterSet counters] may be null and not an empty array.
+        return false;
+    }
 
-            if (![*counterSet respondsToSelector:@selector(counters)]) {
-                dawn::ErrorLog() << "MTLCounterSet does not respond to selector: counters.";
-                return false;
+    // A GPU might support a counter set, but only support a subset of the counters in that
+    // set, check if the counter set supports all specific counters we need. Return false
+    // if there is a counter unsupported.
+    for (MTLCommonCounter counterName : counterNames) {
+        bool found = false;
+        // Note: Don't do for..in loop to avoid potentially crashy interaction with
+        // NSFastEnumeration.
+        for (NSUInteger i = 0; i < countersInSet.count; ++i) {
+            id<MTLCounter> counter = [countersInSet objectAtIndex:i];
+            if ([counter.name caseInsensitiveCompare:counterName] == NSOrderedSame) {
+                found = true;
+                break;
             }
-            NSArray<id<MTLCounter>>* countersInSet = (*counterSet).counters;
-            if (countersInSet == nil) {
-                // On some systems, [MTLCounterSet counters] may be null and not an empty array.
-                return false;
-            }
-
-            // A GPU might support a counter set, but only support a subset of the counters in that
-            // set, check if the counter set supports all specific counters we need. Return false
-            // if there is a counter unsupported.
-            for (MTLCommonCounter counterName : counterNames) {
-                bool found = false;
-                // Note: Don't do for..in loop to avoid potentially crashy interaction with
-                // NSFastEnumeration.
-                for (NSUInteger i = 0; i < countersInSet.count; ++i) {
-                    id<MTLCounter> counter = [countersInSet objectAtIndex:i];
-                    if ([counter.name caseInsensitiveCompare:counterName] == NSOrderedSame) {
-                        found = true;
-                        break;
-                    }
-                }
-                if (!found) {
-                    return false;
-                }
-            }
-
-            if (@available(macOS 11.0, iOS 14.0, *)) {
-                // Check whether it can read GPU counters at the specified command boundary. Apple
-                // family GPUs do not support sampling between different Metal commands, because
-                // they defer fragment processing until after the GPU processes all the primitives
-                // in the render pass.
-                if (!IsCounterSamplingBoundarySupport(device)) {
-                    return false;
-                }
-            }
-
-            return true;
         }
+        if (!found) {
+            return false;
+        }
+    }
 
-    }  // anonymous namespace
+    if (@available(macOS 11.0, iOS 14.0, *)) {
+        // Check whether it can read GPU counters at the specified command boundary. Apple
+        // family GPUs do not support sampling between different Metal commands, because
+        // they defer fragment processing until after the GPU processes all the primitives
+        // in the render pass.
+        if (!IsCounterSamplingBoundarySupport(device)) {
+            return false;
+        }
+    }
 
-    // The Metal backend's Adapter.
+    return true;
+}
 
-    class Adapter : public AdapterBase {
-      public:
-        Adapter(InstanceBase* instance, id<MTLDevice> device)
-            : AdapterBase(instance, wgpu::BackendType::Metal), mDevice(device) {
-            mName = std::string([[*mDevice name] UTF8String]);
+}  // anonymous namespace
 
-            PCIIDs ids;
-            if (!instance->ConsumedError(GetDevicePCIInfo(device, &ids))) {
-                mVendorId = ids.vendorId;
-                mDeviceId = ids.deviceId;
-            }
+// The Metal backend's Adapter.
+
+class Adapter : public AdapterBase {
+  public:
+    Adapter(InstanceBase* instance, id<MTLDevice> device)
+        : AdapterBase(instance, wgpu::BackendType::Metal), mDevice(device) {
+        mName = std::string([[*mDevice name] UTF8String]);
+
+        PCIIDs ids;
+        if (!instance->ConsumedError(GetDevicePCIInfo(device, &ids))) {
+            mVendorId = ids.vendorId;
+            mDeviceId = ids.deviceId;
+        }
 
 #if defined(DAWN_PLATFORM_IOS)
-            mAdapterType = wgpu::AdapterType::IntegratedGPU;
-            const char* systemName = "iOS ";
+        mAdapterType = wgpu::AdapterType::IntegratedGPU;
+        const char* systemName = "iOS ";
 #elif defined(DAWN_PLATFORM_MACOS)
-            if ([device isLowPower]) {
-                mAdapterType = wgpu::AdapterType::IntegratedGPU;
-            } else {
-                mAdapterType = wgpu::AdapterType::DiscreteGPU;
-            }
-            const char* systemName = "macOS ";
+        if ([device isLowPower]) {
+            mAdapterType = wgpu::AdapterType::IntegratedGPU;
+        } else {
+            mAdapterType = wgpu::AdapterType::DiscreteGPU;
+        }
+        const char* systemName = "macOS ";
 #else
-#    error "Unsupported Apple platform."
+#error "Unsupported Apple platform."
 #endif
 
-            NSString* osVersion = [[NSProcessInfo processInfo] operatingSystemVersionString];
-            mDriverDescription =
-                "Metal driver on " + std::string(systemName) + [osVersion UTF8String];
-        }
+        NSString* osVersion = [[NSProcessInfo processInfo] operatingSystemVersionString];
+        mDriverDescription = "Metal driver on " + std::string(systemName) + [osVersion UTF8String];
+    }
 
-        // AdapterBase Implementation
-        bool SupportsExternalImages() const override {
-            // Via dawn::native::metal::WrapIOSurface
-            return true;
-        }
+    // AdapterBase Implementation
+    bool SupportsExternalImages() const override {
+        // Via dawn::native::metal::WrapIOSurface
+        return true;
+    }
 
-      private:
-        ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(
-            const DeviceDescriptor* descriptor) override {
-            return Device::Create(this, mDevice, descriptor);
-        }
+  private:
+    ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(const DeviceDescriptor* descriptor) override {
+        return Device::Create(this, mDevice, descriptor);
+    }
 
-        MaybeError InitializeImpl() override {
-            return {};
-        }
+    MaybeError InitializeImpl() override { return {}; }
 
-        MaybeError InitializeSupportedFeaturesImpl() override {
-            // Check compressed texture format with deprecated MTLFeatureSet way.
+    MaybeError InitializeSupportedFeaturesImpl() override {
+        // Check compressed texture format with deprecated MTLFeatureSet way.
 #if defined(DAWN_PLATFORM_MACOS)
-            if ([*mDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v1]) {
+        if ([*mDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v1]) {
+            mSupportedFeatures.EnableFeature(Feature::TextureCompressionBC);
+        }
+#endif
+#if defined(DAWN_PLATFORM_IOS)
+        if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily1_v1]) {
+            mSupportedFeatures.EnableFeature(Feature::TextureCompressionETC2);
+        }
+        if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily2_v1]) {
+            mSupportedFeatures.EnableFeature(Feature::TextureCompressionASTC);
+        }
+#endif
+
+        // Check compressed texture format with MTLGPUFamily
+        if (@available(macOS 10.15, iOS 13.0, *)) {
+            if ([*mDevice supportsFamily:MTLGPUFamilyMac1]) {
                 mSupportedFeatures.EnableFeature(Feature::TextureCompressionBC);
             }
-#endif
-#if defined(DAWN_PLATFORM_IOS)
-            if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily1_v1]) {
+            if ([*mDevice supportsFamily:MTLGPUFamilyApple2]) {
                 mSupportedFeatures.EnableFeature(Feature::TextureCompressionETC2);
             }
-            if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily2_v1]) {
+            if ([*mDevice supportsFamily:MTLGPUFamilyApple3]) {
                 mSupportedFeatures.EnableFeature(Feature::TextureCompressionASTC);
             }
-#endif
-
-            // Check compressed texture format with MTLGPUFamily
-            if (@available(macOS 10.15, iOS 13.0, *)) {
-                if ([*mDevice supportsFamily:MTLGPUFamilyMac1]) {
-                    mSupportedFeatures.EnableFeature(Feature::TextureCompressionBC);
-                }
-                if ([*mDevice supportsFamily:MTLGPUFamilyApple2]) {
-                    mSupportedFeatures.EnableFeature(Feature::TextureCompressionETC2);
-                }
-                if ([*mDevice supportsFamily:MTLGPUFamilyApple3]) {
-                    mSupportedFeatures.EnableFeature(Feature::TextureCompressionASTC);
-                }
-            }
-
-            if (@available(macOS 10.15, iOS 14.0, *)) {
-                if (IsGPUCounterSupported(
-                        *mDevice, MTLCommonCounterSetStatistic,
-                        {MTLCommonCounterVertexInvocations, MTLCommonCounterClipperInvocations,
-                         MTLCommonCounterClipperPrimitivesOut, MTLCommonCounterFragmentInvocations,
-                         MTLCommonCounterComputeKernelInvocations})) {
-                    mSupportedFeatures.EnableFeature(Feature::PipelineStatisticsQuery);
-                }
-
-                if (IsGPUCounterSupported(*mDevice, MTLCommonCounterSetTimestamp,
-                                          {MTLCommonCounterTimestamp})) {
-                    bool enableTimestampQuery = true;
-
-#if defined(DAWN_PLATFORM_MACOS)
-                    // Disable timestamp query on < macOS 11.0 on AMD GPU because WriteTimestamp
-                    // fails to call without any copy commands on MTLBlitCommandEncoder. This issue
-                    // has been fixed on macOS 11.0. See crbug.com/dawn/545.
-                    if (gpu_info::IsAMD(mVendorId) && !IsMacOSVersionAtLeast(11)) {
-                        enableTimestampQuery = false;
-                    }
-#endif
-
-                    if (enableTimestampQuery) {
-                        mSupportedFeatures.EnableFeature(Feature::TimestampQuery);
-                    }
-                }
-            }
-
-            if (@available(macOS 10.11, iOS 11.0, *)) {
-                mSupportedFeatures.EnableFeature(Feature::DepthClamping);
-            }
-
-            if (@available(macOS 10.11, iOS 9.0, *)) {
-                mSupportedFeatures.EnableFeature(Feature::Depth32FloatStencil8);
-            }
-
-            // Uses newTextureWithDescriptor::iosurface::plane which is available
-            // on ios 11.0+ and macOS 11.0+
-            if (@available(macOS 10.11, iOS 11.0, *)) {
-                mSupportedFeatures.EnableFeature(Feature::MultiPlanarFormats);
-            }
-
-#if defined(DAWN_PLATFORM_MACOS)
-            // MTLPixelFormatDepth24Unorm_Stencil8 is only available on macOS 10.11+
-            if ([*mDevice isDepth24Stencil8PixelFormatSupported]) {
-                mSupportedFeatures.EnableFeature(Feature::Depth24UnormStencil8);
-            }
-#endif
-
-            return {};
         }
 
-        enum class MTLGPUFamily {
-            Apple1,
-            Apple2,
-            Apple3,
-            Apple4,
-            Apple5,
-            Apple6,
-            Apple7,
-            Mac1,
-            Mac2,
-        };
+        if (@available(macOS 10.15, iOS 14.0, *)) {
+            if (IsGPUCounterSupported(
+                    *mDevice, MTLCommonCounterSetStatistic,
+                    {MTLCommonCounterVertexInvocations, MTLCommonCounterClipperInvocations,
+                     MTLCommonCounterClipperPrimitivesOut, MTLCommonCounterFragmentInvocations,
+                     MTLCommonCounterComputeKernelInvocations})) {
+                mSupportedFeatures.EnableFeature(Feature::PipelineStatisticsQuery);
+            }
 
-        ResultOrError<MTLGPUFamily> GetMTLGPUFamily() const {
-            // https://developer.apple.com/documentation/metal/mtldevice/detecting_gpu_features_and_metal_software_versions?language=objc
+            if (IsGPUCounterSupported(*mDevice, MTLCommonCounterSetTimestamp,
+                                      {MTLCommonCounterTimestamp})) {
+                bool enableTimestampQuery = true;
 
-            if (@available(macOS 10.15, iOS 10.13, *)) {
-                if ([*mDevice supportsFamily:MTLGPUFamilyMac2]) {
-                    return MTLGPUFamily::Mac2;
+#if defined(DAWN_PLATFORM_MACOS)
+                // Disable timestamp query on < macOS 11.0 on AMD GPU because WriteTimestamp
+                // fails to call without any copy commands on MTLBlitCommandEncoder. This issue
+                // has been fixed on macOS 11.0. See crbug.com/dawn/545.
+                if (gpu_info::IsAMD(mVendorId) && !IsMacOSVersionAtLeast(11)) {
+                    enableTimestampQuery = false;
                 }
-                if ([*mDevice supportsFamily:MTLGPUFamilyMac1]) {
-                    return MTLGPUFamily::Mac1;
-                }
-                if ([*mDevice supportsFamily:MTLGPUFamilyApple7]) {
-                    return MTLGPUFamily::Apple7;
-                }
-                if ([*mDevice supportsFamily:MTLGPUFamilyApple6]) {
-                    return MTLGPUFamily::Apple6;
-                }
-                if ([*mDevice supportsFamily:MTLGPUFamilyApple5]) {
-                    return MTLGPUFamily::Apple5;
-                }
-                if ([*mDevice supportsFamily:MTLGPUFamilyApple4]) {
-                    return MTLGPUFamily::Apple4;
-                }
-                if ([*mDevice supportsFamily:MTLGPUFamilyApple3]) {
-                    return MTLGPUFamily::Apple3;
-                }
-                if ([*mDevice supportsFamily:MTLGPUFamilyApple2]) {
-                    return MTLGPUFamily::Apple2;
-                }
-                if ([*mDevice supportsFamily:MTLGPUFamilyApple1]) {
-                    return MTLGPUFamily::Apple1;
+#endif
+
+                if (enableTimestampQuery) {
+                    mSupportedFeatures.EnableFeature(Feature::TimestampQuery);
                 }
             }
+        }
+
+        if (@available(macOS 10.11, iOS 11.0, *)) {
+            mSupportedFeatures.EnableFeature(Feature::DepthClamping);
+        }
+
+        if (@available(macOS 10.11, iOS 9.0, *)) {
+            mSupportedFeatures.EnableFeature(Feature::Depth32FloatStencil8);
+        }
+
+        // Uses newTextureWithDescriptor::iosurface::plane which is available
+        // on ios 11.0+ and macOS 11.0+
+        if (@available(macOS 10.11, iOS 11.0, *)) {
+            mSupportedFeatures.EnableFeature(Feature::MultiPlanarFormats);
+        }
+
+#if defined(DAWN_PLATFORM_MACOS)
+        // MTLPixelFormatDepth24Unorm_Stencil8 is only available on macOS 10.11+
+        if ([*mDevice isDepth24Stencil8PixelFormatSupported]) {
+            mSupportedFeatures.EnableFeature(Feature::Depth24UnormStencil8);
+        }
+#endif
+
+        return {};
+    }
+
+    enum class MTLGPUFamily {
+        Apple1,
+        Apple2,
+        Apple3,
+        Apple4,
+        Apple5,
+        Apple6,
+        Apple7,
+        Mac1,
+        Mac2,
+    };
+
+    ResultOrError<MTLGPUFamily> GetMTLGPUFamily() const {
+        // https://developer.apple.com/documentation/metal/mtldevice/detecting_gpu_features_and_metal_software_versions?language=objc
+
+        if (@available(macOS 10.15, iOS 10.13, *)) {
+            if ([*mDevice supportsFamily:MTLGPUFamilyMac2]) {
+                return MTLGPUFamily::Mac2;
+            }
+            if ([*mDevice supportsFamily:MTLGPUFamilyMac1]) {
+                return MTLGPUFamily::Mac1;
+            }
+            if ([*mDevice supportsFamily:MTLGPUFamilyApple7]) {
+                return MTLGPUFamily::Apple7;
+            }
+            if ([*mDevice supportsFamily:MTLGPUFamilyApple6]) {
+                return MTLGPUFamily::Apple6;
+            }
+            if ([*mDevice supportsFamily:MTLGPUFamilyApple5]) {
+                return MTLGPUFamily::Apple5;
+            }
+            if ([*mDevice supportsFamily:MTLGPUFamilyApple4]) {
+                return MTLGPUFamily::Apple4;
+            }
+            if ([*mDevice supportsFamily:MTLGPUFamilyApple3]) {
+                return MTLGPUFamily::Apple3;
+            }
+            if ([*mDevice supportsFamily:MTLGPUFamilyApple2]) {
+                return MTLGPUFamily::Apple2;
+            }
+            if ([*mDevice supportsFamily:MTLGPUFamilyApple1]) {
+                return MTLGPUFamily::Apple1;
+            }
+        }
 
 #if TARGET_OS_OSX
-            if (@available(macOS 10.14, *)) {
-                if ([*mDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily2_v1]) {
-                    return MTLGPUFamily::Mac2;
-                }
+        if (@available(macOS 10.14, *)) {
+            if ([*mDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily2_v1]) {
+                return MTLGPUFamily::Mac2;
             }
-            if (@available(macOS 10.11, *)) {
-                if ([*mDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v1]) {
-                    return MTLGPUFamily::Mac1;
-                }
-            }
-#elif TARGET_OS_IOS
-            if (@available(iOS 10.11, *)) {
-                if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily4_v1]) {
-                    return MTLGPUFamily::Apple4;
-                }
-            }
-            if (@available(iOS 9.0, *)) {
-                if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v1]) {
-                    return MTLGPUFamily::Apple3;
-                }
-            }
-            if (@available(iOS 8.0, *)) {
-                if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily2_v1]) {
-                    return MTLGPUFamily::Apple2;
-                }
-            }
-            if (@available(iOS 8.0, *)) {
-                if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily1_v1]) {
-                    return MTLGPUFamily::Apple1;
-                }
-            }
-#endif
-            return DAWN_INTERNAL_ERROR("Unsupported Metal device");
         }
+        if (@available(macOS 10.11, *)) {
+            if ([*mDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v1]) {
+                return MTLGPUFamily::Mac1;
+            }
+        }
+#elif TARGET_OS_IOS
+        if (@available(iOS 10.11, *)) {
+            if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily4_v1]) {
+                return MTLGPUFamily::Apple4;
+            }
+        }
+        if (@available(iOS 9.0, *)) {
+            if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v1]) {
+                return MTLGPUFamily::Apple3;
+            }
+        }
+        if (@available(iOS 8.0, *)) {
+            if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily2_v1]) {
+                return MTLGPUFamily::Apple2;
+            }
+        }
+        if (@available(iOS 8.0, *)) {
+            if ([*mDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily1_v1]) {
+                return MTLGPUFamily::Apple1;
+            }
+        }
+#endif
+        return DAWN_INTERNAL_ERROR("Unsupported Metal device");
+    }
 
-        MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override {
-            struct MTLDeviceLimits {
-                uint32_t maxVertexAttribsPerDescriptor;
-                uint32_t maxBufferArgumentEntriesPerFunc;
-                uint32_t maxTextureArgumentEntriesPerFunc;
-                uint32_t maxSamplerStateArgumentEntriesPerFunc;
-                uint32_t maxThreadsPerThreadgroup;
-                uint32_t maxTotalThreadgroupMemory;
-                uint32_t maxFragmentInputComponents;
-                uint32_t max1DTextureSize;
-                uint32_t max2DTextureSize;
-                uint32_t max3DTextureSize;
-                uint32_t maxTextureArrayLayers;
-                uint32_t minBufferOffsetAlignment;
-            };
+    MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override {
+        struct MTLDeviceLimits {
+            uint32_t maxVertexAttribsPerDescriptor;
+            uint32_t maxBufferArgumentEntriesPerFunc;
+            uint32_t maxTextureArgumentEntriesPerFunc;
+            uint32_t maxSamplerStateArgumentEntriesPerFunc;
+            uint32_t maxThreadsPerThreadgroup;
+            uint32_t maxTotalThreadgroupMemory;
+            uint32_t maxFragmentInputComponents;
+            uint32_t max1DTextureSize;
+            uint32_t max2DTextureSize;
+            uint32_t max3DTextureSize;
+            uint32_t maxTextureArrayLayers;
+            uint32_t minBufferOffsetAlignment;
+        };
 
-            struct LimitsForFamily {
-                uint32_t MTLDeviceLimits::*limit;
-                ityp::array<MTLGPUFamily, uint32_t, 9> values;
-            };
+        struct LimitsForFamily {
+            uint32_t MTLDeviceLimits::*limit;
+            ityp::array<MTLGPUFamily, uint32_t, 9> values;
+        };
 
-            // clang-format off
+        // clang-format off
             // https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
             //                                                               Apple                                                      Mac
             //                                                                   1,      2,      3,      4,      5,      6,      7,       1,      2
@@ -516,159 +509,158 @@
                 {&MTLDeviceLimits::maxTextureArrayLayers,                 {  2048u,  2048u,  2048u,  2048u,  2048u,  2048u,  2048u,   2048u,  2048u }},
                 {&MTLDeviceLimits::minBufferOffsetAlignment,              {     4u,     4u,     4u,     4u,     4u,     4u,     4u,    256u,   256u }},
             };
-            // clang-format on
+        // clang-format on
 
-            MTLGPUFamily mtlGPUFamily;
-            DAWN_TRY_ASSIGN(mtlGPUFamily, GetMTLGPUFamily());
+        MTLGPUFamily mtlGPUFamily;
+        DAWN_TRY_ASSIGN(mtlGPUFamily, GetMTLGPUFamily());
 
-            MTLDeviceLimits mtlLimits;
-            for (const auto& limitsForFamily : kMTLLimits) {
-                mtlLimits.*limitsForFamily.limit = limitsForFamily.values[mtlGPUFamily];
-            }
-
-            GetDefaultLimits(&limits->v1);
-
-            limits->v1.maxTextureDimension1D = mtlLimits.max1DTextureSize;
-            limits->v1.maxTextureDimension2D = mtlLimits.max2DTextureSize;
-            limits->v1.maxTextureDimension3D = mtlLimits.max3DTextureSize;
-            limits->v1.maxTextureArrayLayers = mtlLimits.maxTextureArrayLayers;
-
-            uint32_t maxBuffersPerStage = mtlLimits.maxBufferArgumentEntriesPerFunc;
-            maxBuffersPerStage -= 1;  // One slot is reserved to store buffer lengths.
-
-            uint32_t baseMaxBuffersPerStage = limits->v1.maxStorageBuffersPerShaderStage +
-                                              limits->v1.maxUniformBuffersPerShaderStage +
-                                              limits->v1.maxVertexBuffers;
-
-            ASSERT(maxBuffersPerStage >= baseMaxBuffersPerStage);
-            {
-                uint32_t additional = maxBuffersPerStage - baseMaxBuffersPerStage;
-                limits->v1.maxStorageBuffersPerShaderStage += additional / 3;
-                limits->v1.maxUniformBuffersPerShaderStage += additional / 3;
-                limits->v1.maxVertexBuffers += (additional - 2 * (additional / 3));
-            }
-
-            uint32_t baseMaxTexturesPerStage = limits->v1.maxSampledTexturesPerShaderStage +
-                                               limits->v1.maxStorageTexturesPerShaderStage;
-
-            ASSERT(mtlLimits.maxTextureArgumentEntriesPerFunc >= baseMaxTexturesPerStage);
-            {
-                uint32_t additional =
-                    mtlLimits.maxTextureArgumentEntriesPerFunc - baseMaxTexturesPerStage;
-                limits->v1.maxSampledTexturesPerShaderStage += additional / 2;
-                limits->v1.maxStorageTexturesPerShaderStage += (additional - additional / 2);
-            }
-
-            limits->v1.maxSamplersPerShaderStage = mtlLimits.maxSamplerStateArgumentEntriesPerFunc;
-
-            // Metal limits are per-function, so the layout limits are the same as the stage
-            // limits. Note: this should likely change if the implementation uses Metal argument
-            // buffers. Non-dynamic buffers will probably be bound argument buffers, but dynamic
-            // buffers may be set directly.
-            //   Mac GPU families with tier 1 argument buffers support 64
-            //   buffers, 128 textures, and 16 samplers. Mac GPU families
-            //   with tier 2 argument buffers support 500000 buffers and
-            //   textures, and 1024 unique samplers
-            limits->v1.maxDynamicUniformBuffersPerPipelineLayout =
-                limits->v1.maxUniformBuffersPerShaderStage;
-            limits->v1.maxDynamicStorageBuffersPerPipelineLayout =
-                limits->v1.maxStorageBuffersPerShaderStage;
-
-            // The WebGPU limit is the limit across all vertex buffers, combined.
-            limits->v1.maxVertexAttributes =
-                limits->v1.maxVertexBuffers * mtlLimits.maxVertexAttribsPerDescriptor;
-
-            limits->v1.maxInterStageShaderComponents = mtlLimits.maxFragmentInputComponents;
-
-            limits->v1.maxComputeWorkgroupStorageSize = mtlLimits.maxTotalThreadgroupMemory;
-            limits->v1.maxComputeInvocationsPerWorkgroup = mtlLimits.maxThreadsPerThreadgroup;
-            limits->v1.maxComputeWorkgroupSizeX = mtlLimits.maxThreadsPerThreadgroup;
-            limits->v1.maxComputeWorkgroupSizeY = mtlLimits.maxThreadsPerThreadgroup;
-            limits->v1.maxComputeWorkgroupSizeZ = mtlLimits.maxThreadsPerThreadgroup;
-
-            limits->v1.minUniformBufferOffsetAlignment = mtlLimits.minBufferOffsetAlignment;
-            limits->v1.minStorageBufferOffsetAlignment = mtlLimits.minBufferOffsetAlignment;
-
-            uint64_t maxBufferSize = Buffer::QueryMaxBufferLength(*mDevice);
-
-            // Metal has no documented limit on the size of a binding. Use the maximum
-            // buffer size.
-            limits->v1.maxUniformBufferBindingSize = maxBufferSize;
-            limits->v1.maxStorageBufferBindingSize = maxBufferSize;
-
-            // TODO(crbug.com/dawn/685):
-            // LIMITS NOT SET:
-            // - maxBindGroups
-            // - maxVertexBufferArrayStride
-
-            return {};
+        MTLDeviceLimits mtlLimits;
+        for (const auto& limitsForFamily : kMTLLimits) {
+            mtlLimits.*limitsForFamily.limit = limitsForFamily.values[mtlGPUFamily];
         }
 
-        NSPRef<id<MTLDevice>> mDevice;
-    };
+        GetDefaultLimits(&limits->v1);
 
-    // Implementation of the Metal backend's BackendConnection
+        limits->v1.maxTextureDimension1D = mtlLimits.max1DTextureSize;
+        limits->v1.maxTextureDimension2D = mtlLimits.max2DTextureSize;
+        limits->v1.maxTextureDimension3D = mtlLimits.max3DTextureSize;
+        limits->v1.maxTextureArrayLayers = mtlLimits.maxTextureArrayLayers;
 
-    Backend::Backend(InstanceBase* instance)
-        : BackendConnection(instance, wgpu::BackendType::Metal) {
-        if (GetInstance()->IsBackendValidationEnabled()) {
-            setenv("METAL_DEVICE_WRAPPER_TYPE", "1", 1);
+        uint32_t maxBuffersPerStage = mtlLimits.maxBufferArgumentEntriesPerFunc;
+        maxBuffersPerStage -= 1;  // One slot is reserved to store buffer lengths.
+
+        uint32_t baseMaxBuffersPerStage = limits->v1.maxStorageBuffersPerShaderStage +
+                                          limits->v1.maxUniformBuffersPerShaderStage +
+                                          limits->v1.maxVertexBuffers;
+
+        ASSERT(maxBuffersPerStage >= baseMaxBuffersPerStage);
+        {
+            uint32_t additional = maxBuffersPerStage - baseMaxBuffersPerStage;
+            limits->v1.maxStorageBuffersPerShaderStage += additional / 3;
+            limits->v1.maxUniformBuffersPerShaderStage += additional / 3;
+            limits->v1.maxVertexBuffers += (additional - 2 * (additional / 3));
         }
+
+        uint32_t baseMaxTexturesPerStage = limits->v1.maxSampledTexturesPerShaderStage +
+                                           limits->v1.maxStorageTexturesPerShaderStage;
+
+        ASSERT(mtlLimits.maxTextureArgumentEntriesPerFunc >= baseMaxTexturesPerStage);
+        {
+            uint32_t additional =
+                mtlLimits.maxTextureArgumentEntriesPerFunc - baseMaxTexturesPerStage;
+            limits->v1.maxSampledTexturesPerShaderStage += additional / 2;
+            limits->v1.maxStorageTexturesPerShaderStage += (additional - additional / 2);
+        }
+
+        limits->v1.maxSamplersPerShaderStage = mtlLimits.maxSamplerStateArgumentEntriesPerFunc;
+
+        // Metal limits are per-function, so the layout limits are the same as the stage
+        // limits. Note: this should likely change if the implementation uses Metal argument
+        // buffers. Non-dynamic buffers will probably be bound argument buffers, but dynamic
+        // buffers may be set directly.
+        //   Mac GPU families with tier 1 argument buffers support 64
+        //   buffers, 128 textures, and 16 samplers. Mac GPU families
+        //   with tier 2 argument buffers support 500000 buffers and
+        //   textures, and 1024 unique samplers
+        limits->v1.maxDynamicUniformBuffersPerPipelineLayout =
+            limits->v1.maxUniformBuffersPerShaderStage;
+        limits->v1.maxDynamicStorageBuffersPerPipelineLayout =
+            limits->v1.maxStorageBuffersPerShaderStage;
+
+        // The WebGPU limit is the limit across all vertex buffers, combined.
+        limits->v1.maxVertexAttributes =
+            limits->v1.maxVertexBuffers * mtlLimits.maxVertexAttribsPerDescriptor;
+
+        limits->v1.maxInterStageShaderComponents = mtlLimits.maxFragmentInputComponents;
+
+        limits->v1.maxComputeWorkgroupStorageSize = mtlLimits.maxTotalThreadgroupMemory;
+        limits->v1.maxComputeInvocationsPerWorkgroup = mtlLimits.maxThreadsPerThreadgroup;
+        limits->v1.maxComputeWorkgroupSizeX = mtlLimits.maxThreadsPerThreadgroup;
+        limits->v1.maxComputeWorkgroupSizeY = mtlLimits.maxThreadsPerThreadgroup;
+        limits->v1.maxComputeWorkgroupSizeZ = mtlLimits.maxThreadsPerThreadgroup;
+
+        limits->v1.minUniformBufferOffsetAlignment = mtlLimits.minBufferOffsetAlignment;
+        limits->v1.minStorageBufferOffsetAlignment = mtlLimits.minBufferOffsetAlignment;
+
+        uint64_t maxBufferSize = Buffer::QueryMaxBufferLength(*mDevice);
+
+        // Metal has no documented limit on the size of a binding. Use the maximum
+        // buffer size.
+        limits->v1.maxUniformBufferBindingSize = maxBufferSize;
+        limits->v1.maxStorageBufferBindingSize = maxBufferSize;
+
+        // TODO(crbug.com/dawn/685):
+        // LIMITS NOT SET:
+        // - maxBindGroups
+        // - maxVertexBufferArrayStride
+
+        return {};
     }
 
-    std::vector<Ref<AdapterBase>> Backend::DiscoverDefaultAdapters() {
-        AdapterDiscoveryOptions options;
-        auto result = DiscoverAdapters(&options);
-        if (result.IsError()) {
-            GetInstance()->ConsumedError(result.AcquireError());
-            return {};
-        }
-        return result.AcquireSuccess();
+    NSPRef<id<MTLDevice>> mDevice;
+};
+
+// Implementation of the Metal backend's BackendConnection
+
+Backend::Backend(InstanceBase* instance) : BackendConnection(instance, wgpu::BackendType::Metal) {
+    if (GetInstance()->IsBackendValidationEnabled()) {
+        setenv("METAL_DEVICE_WRAPPER_TYPE", "1", 1);
     }
+}
 
-    ResultOrError<std::vector<Ref<AdapterBase>>> Backend::DiscoverAdapters(
-        const AdapterDiscoveryOptionsBase* optionsBase) {
-        ASSERT(optionsBase->backendType == WGPUBackendType_Metal);
+std::vector<Ref<AdapterBase>> Backend::DiscoverDefaultAdapters() {
+    AdapterDiscoveryOptions options;
+    auto result = DiscoverAdapters(&options);
+    if (result.IsError()) {
+        GetInstance()->ConsumedError(result.AcquireError());
+        return {};
+    }
+    return result.AcquireSuccess();
+}
 
-        std::vector<Ref<AdapterBase>> adapters;
-        BOOL supportedVersion = NO;
+ResultOrError<std::vector<Ref<AdapterBase>>> Backend::DiscoverAdapters(
+    const AdapterDiscoveryOptionsBase* optionsBase) {
+    ASSERT(optionsBase->backendType == WGPUBackendType_Metal);
+
+    std::vector<Ref<AdapterBase>> adapters;
+    BOOL supportedVersion = NO;
 #if defined(DAWN_PLATFORM_MACOS)
-        if (@available(macOS 10.11, *)) {
-            supportedVersion = YES;
+    if (@available(macOS 10.11, *)) {
+        supportedVersion = YES;
 
-            NSRef<NSArray<id<MTLDevice>>> devices = AcquireNSRef(MTLCopyAllDevices());
+        NSRef<NSArray<id<MTLDevice>>> devices = AcquireNSRef(MTLCopyAllDevices());
 
-            for (id<MTLDevice> device in devices.Get()) {
-                Ref<Adapter> adapter = AcquireRef(new Adapter(GetInstance(), device));
-                if (!GetInstance()->ConsumedError(adapter->Initialize())) {
-                    adapters.push_back(std::move(adapter));
-                }
-            }
-        }
-#endif
-
-#if defined(DAWN_PLATFORM_IOS)
-        if (@available(iOS 8.0, *)) {
-            supportedVersion = YES;
-            // iOS only has a single device so MTLCopyAllDevices doesn't exist there.
-            Ref<Adapter> adapter =
-                AcquireRef(new Adapter(GetInstance(), MTLCreateSystemDefaultDevice()));
+        for (id<MTLDevice> device in devices.Get()) {
+            Ref<Adapter> adapter = AcquireRef(new Adapter(GetInstance(), device));
             if (!GetInstance()->ConsumedError(adapter->Initialize())) {
                 adapters.push_back(std::move(adapter));
             }
         }
+    }
 #endif
-        if (!supportedVersion) {
-            UNREACHABLE();
-        }
-        return adapters;
-    }
 
-    BackendConnection* Connect(InstanceBase* instance) {
-        if (!IsMetalSupported()) {
-            return nullptr;
+#if defined(DAWN_PLATFORM_IOS)
+    if (@available(iOS 8.0, *)) {
+        supportedVersion = YES;
+        // iOS only has a single device so MTLCopyAllDevices doesn't exist there.
+        Ref<Adapter> adapter =
+            AcquireRef(new Adapter(GetInstance(), MTLCreateSystemDefaultDevice()));
+        if (!GetInstance()->ConsumedError(adapter->Initialize())) {
+            adapters.push_back(std::move(adapter));
         }
-        return new Backend(instance);
     }
+#endif
+    if (!supportedVersion) {
+        UNREACHABLE();
+    }
+    return adapters;
+}
+
+BackendConnection* Connect(InstanceBase* instance) {
+    if (!IsMetalSupported()) {
+        return nullptr;
+    }
+    return new Backend(instance);
+}
 
 }  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/BindGroupLayoutMTL.h b/src/dawn/native/metal/BindGroupLayoutMTL.h
index fbd344c..97688eb 100644
--- a/src/dawn/native/metal/BindGroupLayoutMTL.h
+++ b/src/dawn/native/metal/BindGroupLayoutMTL.h
@@ -20,26 +20,26 @@
 
 namespace dawn::native::metal {
 
-    class BindGroup;
-    class Device;
+class BindGroup;
+class Device;
 
-    class BindGroupLayout final : public BindGroupLayoutBase {
-      public:
-        static Ref<BindGroupLayout> Create(DeviceBase* device,
-                                           const BindGroupLayoutDescriptor* descriptor,
-                                           PipelineCompatibilityToken pipelineCompatibilityToken);
+class BindGroupLayout final : public BindGroupLayoutBase {
+  public:
+    static Ref<BindGroupLayout> Create(DeviceBase* device,
+                                       const BindGroupLayoutDescriptor* descriptor,
+                                       PipelineCompatibilityToken pipelineCompatibilityToken);
 
-        Ref<BindGroup> AllocateBindGroup(Device* device, const BindGroupDescriptor* descriptor);
-        void DeallocateBindGroup(BindGroup* bindGroup);
+    Ref<BindGroup> AllocateBindGroup(Device* device, const BindGroupDescriptor* descriptor);
+    void DeallocateBindGroup(BindGroup* bindGroup);
 
-      private:
-        BindGroupLayout(DeviceBase* device,
-                        const BindGroupLayoutDescriptor* descriptor,
-                        PipelineCompatibilityToken pipelineCompatibilityToken);
-        ~BindGroupLayout() override = default;
+  private:
+    BindGroupLayout(DeviceBase* device,
+                    const BindGroupLayoutDescriptor* descriptor,
+                    PipelineCompatibilityToken pipelineCompatibilityToken);
+    ~BindGroupLayout() override = default;
 
-        SlabAllocator<BindGroup> mBindGroupAllocator;
-    };
+    SlabAllocator<BindGroup> mBindGroupAllocator;
+};
 
 }  // namespace dawn::native::metal
 
diff --git a/src/dawn/native/metal/BindGroupLayoutMTL.mm b/src/dawn/native/metal/BindGroupLayoutMTL.mm
index e413bdd..2f1012c 100644
--- a/src/dawn/native/metal/BindGroupLayoutMTL.mm
+++ b/src/dawn/native/metal/BindGroupLayoutMTL.mm
@@ -18,28 +18,27 @@
 
 namespace dawn::native::metal {
 
-    // static
-    Ref<BindGroupLayout> BindGroupLayout::Create(
-        DeviceBase* device,
-        const BindGroupLayoutDescriptor* descriptor,
-        PipelineCompatibilityToken pipelineCompatibilityToken) {
-        return AcquireRef(new BindGroupLayout(device, descriptor, pipelineCompatibilityToken));
-    }
+// static
+Ref<BindGroupLayout> BindGroupLayout::Create(
+    DeviceBase* device,
+    const BindGroupLayoutDescriptor* descriptor,
+    PipelineCompatibilityToken pipelineCompatibilityToken) {
+    return AcquireRef(new BindGroupLayout(device, descriptor, pipelineCompatibilityToken));
+}
 
-    BindGroupLayout::BindGroupLayout(DeviceBase* device,
-                                     const BindGroupLayoutDescriptor* descriptor,
-                                     PipelineCompatibilityToken pipelineCompatibilityToken)
-        : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
-          mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
-    }
+BindGroupLayout::BindGroupLayout(DeviceBase* device,
+                                 const BindGroupLayoutDescriptor* descriptor,
+                                 PipelineCompatibilityToken pipelineCompatibilityToken)
+    : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
+      mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {}
 
-    Ref<BindGroup> BindGroupLayout::AllocateBindGroup(Device* device,
-                                                      const BindGroupDescriptor* descriptor) {
-        return AcquireRef(mBindGroupAllocator.Allocate(device, descriptor));
-    }
+Ref<BindGroup> BindGroupLayout::AllocateBindGroup(Device* device,
+                                                  const BindGroupDescriptor* descriptor) {
+    return AcquireRef(mBindGroupAllocator.Allocate(device, descriptor));
+}
 
-    void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup) {
-        mBindGroupAllocator.Deallocate(bindGroup);
-    }
+void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup) {
+    mBindGroupAllocator.Deallocate(bindGroup);
+}
 
 }  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/BindGroupMTL.h b/src/dawn/native/metal/BindGroupMTL.h
index 9fd71d2..5e75395 100644
--- a/src/dawn/native/metal/BindGroupMTL.h
+++ b/src/dawn/native/metal/BindGroupMTL.h
@@ -20,19 +20,19 @@
 
 namespace dawn::native::metal {
 
-    class Device;
+class Device;
 
-    class BindGroup final : public BindGroupBase, public PlacementAllocated {
-      public:
-        static Ref<BindGroup> Create(Device* device, const BindGroupDescriptor* descriptor);
+class BindGroup final : public BindGroupBase, public PlacementAllocated {
+  public:
+    static Ref<BindGroup> Create(Device* device, const BindGroupDescriptor* descriptor);
 
-        BindGroup(Device* device, const BindGroupDescriptor* descriptor);
+    BindGroup(Device* device, const BindGroupDescriptor* descriptor);
 
-      private:
-        ~BindGroup() override;
+  private:
+    ~BindGroup() override;
 
-        void DestroyImpl() override;
-    };
+    void DestroyImpl() override;
+};
 
 }  // namespace dawn::native::metal
 
diff --git a/src/dawn/native/metal/BindGroupMTL.mm b/src/dawn/native/metal/BindGroupMTL.mm
index a8e02a8..90b9e23 100644
--- a/src/dawn/native/metal/BindGroupMTL.mm
+++ b/src/dawn/native/metal/BindGroupMTL.mm
@@ -18,20 +18,19 @@
 #include "dawn/native/metal/DeviceMTL.h"
 namespace dawn::native::metal {
 
-    BindGroup::BindGroup(Device* device, const BindGroupDescriptor* descriptor)
-        : BindGroupBase(this, device, descriptor) {
-    }
+BindGroup::BindGroup(Device* device, const BindGroupDescriptor* descriptor)
+    : BindGroupBase(this, device, descriptor) {}
 
-    BindGroup::~BindGroup() = default;
+BindGroup::~BindGroup() = default;
 
-    void BindGroup::DestroyImpl() {
-        BindGroupBase::DestroyImpl();
-        ToBackend(GetLayout())->DeallocateBindGroup(this);
-    }
+void BindGroup::DestroyImpl() {
+    BindGroupBase::DestroyImpl();
+    ToBackend(GetLayout())->DeallocateBindGroup(this);
+}
 
-    // static
-    Ref<BindGroup> BindGroup::Create(Device* device, const BindGroupDescriptor* descriptor) {
-        return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
-    }
+// static
+Ref<BindGroup> BindGroup::Create(Device* device, const BindGroupDescriptor* descriptor) {
+    return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
+}
 
 }  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/BufferMTL.h b/src/dawn/native/metal/BufferMTL.h
index 4e36736..096237b 100644
--- a/src/dawn/native/metal/BufferMTL.h
+++ b/src/dawn/native/metal/BufferMTL.h
@@ -23,44 +23,43 @@
 
 namespace dawn::native::metal {
 
-    class CommandRecordingContext;
-    class Device;
+class CommandRecordingContext;
+class Device;
 
-    class Buffer final : public BufferBase {
-      public:
-        static ResultOrError<Ref<Buffer>> Create(Device* device,
-                                                 const BufferDescriptor* descriptor);
-        id<MTLBuffer> GetMTLBuffer() const;
+class Buffer final : public BufferBase {
+  public:
+    static ResultOrError<Ref<Buffer>> Create(Device* device, const BufferDescriptor* descriptor);
+    id<MTLBuffer> GetMTLBuffer() const;
 
-        bool EnsureDataInitialized(CommandRecordingContext* commandContext);
-        bool EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
-                                                uint64_t offset,
-                                                uint64_t size);
-        bool EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
-                                                const CopyTextureToBufferCmd* copy);
+    bool EnsureDataInitialized(CommandRecordingContext* commandContext);
+    bool EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+                                            uint64_t offset,
+                                            uint64_t size);
+    bool EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+                                            const CopyTextureToBufferCmd* copy);
 
-        static uint64_t QueryMaxBufferLength(id<MTLDevice> mtlDevice);
+    static uint64_t QueryMaxBufferLength(id<MTLDevice> mtlDevice);
 
-      private:
-        using BufferBase::BufferBase;
-        MaybeError Initialize(bool mappedAtCreation);
+  private:
+    using BufferBase::BufferBase;
+    MaybeError Initialize(bool mappedAtCreation);
 
-        ~Buffer() override;
-        MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
-        void UnmapImpl() override;
-        void DestroyImpl() override;
-        void* GetMappedPointerImpl() override;
-        bool IsCPUWritableAtCreation() const override;
-        MaybeError MapAtCreationImpl() override;
+    ~Buffer() override;
+    MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
+    void UnmapImpl() override;
+    void DestroyImpl() override;
+    void* GetMappedPointerImpl() override;
+    bool IsCPUWritableAtCreation() const override;
+    MaybeError MapAtCreationImpl() override;
 
-        void InitializeToZero(CommandRecordingContext* commandContext);
-        void ClearBuffer(CommandRecordingContext* commandContext,
-                         uint8_t clearValue,
-                         uint64_t offset = 0,
-                         uint64_t size = 0);
+    void InitializeToZero(CommandRecordingContext* commandContext);
+    void ClearBuffer(CommandRecordingContext* commandContext,
+                     uint8_t clearValue,
+                     uint64_t offset = 0,
+                     uint64_t size = 0);
 
-        NSPRef<id<MTLBuffer>> mMtlBuffer;
-    };
+    NSPRef<id<MTLBuffer>> mMtlBuffer;
+};
 
 }  // namespace dawn::native::metal
 
diff --git a/src/dawn/native/metal/BufferMTL.mm b/src/dawn/native/metal/BufferMTL.mm
index 695872a..e8d74c1 100644
--- a/src/dawn/native/metal/BufferMTL.mm
+++ b/src/dawn/native/metal/BufferMTL.mm
@@ -22,219 +22,216 @@
 #include <limits>
 
 namespace dawn::native::metal {
-    // The size of uniform buffer and storage buffer need to be aligned to 16 bytes which is the
-    // largest alignment of supported data types
-    static constexpr uint32_t kMinUniformOrStorageBufferAlignment = 16u;
+// The size of uniform buffer and storage buffer need to be aligned to 16 bytes which is the
+// largest alignment of supported data types
+static constexpr uint32_t kMinUniformOrStorageBufferAlignment = 16u;
 
-    // static
-    ResultOrError<Ref<Buffer>> Buffer::Create(Device* device, const BufferDescriptor* descriptor) {
-        Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor));
-        DAWN_TRY(buffer->Initialize(descriptor->mappedAtCreation));
-        return std::move(buffer);
+// static
+ResultOrError<Ref<Buffer>> Buffer::Create(Device* device, const BufferDescriptor* descriptor) {
+    Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor));
+    DAWN_TRY(buffer->Initialize(descriptor->mappedAtCreation));
+    return std::move(buffer);
+}
+
+// static
+uint64_t Buffer::QueryMaxBufferLength(id<MTLDevice> mtlDevice) {
+    if (@available(iOS 12, tvOS 12, macOS 10.14, *)) {
+        return [mtlDevice maxBufferLength];
     }
 
-    // static
-    uint64_t Buffer::QueryMaxBufferLength(id<MTLDevice> mtlDevice) {
-        if (@available(iOS 12, tvOS 12, macOS 10.14, *)) {
-            return [mtlDevice maxBufferLength];
-        }
-
-        // Earlier versions of Metal had maximums defined in the Metal feature set tables
-        // https://metalbyexample.com/wp-content/uploads/Metal-Feature-Set-Tables-2018.pdf
+    // Earlier versions of Metal had maximums defined in the Metal feature set tables
+    // https://metalbyexample.com/wp-content/uploads/Metal-Feature-Set-Tables-2018.pdf
 #if defined(DAWN_PLATFORM_MACOS)
-        // 10.12 and 10.13 have a 1Gb limit.
-        if (@available(macOS 10.12, *)) {
-            // |maxBufferLength| isn't always available on older systems. If available, use
-            // |recommendedMaxWorkingSetSize| instead. We can probably allocate more than this,
-            // but don't have a way to discover a better limit. MoltenVK also uses this heuristic.
-            return 1024 * 1024 * 1024;
-        }
-        // 10.11 has a 256Mb limit
-        if (@available(maxOS 10.11, *)) {
-            return 256 * 1024 * 1024;
-        }
-#else
-        // macOS / tvOS: 256Mb limit in versions without [MTLDevice maxBufferLength]
+    // 10.12 and 10.13 have a 1Gb limit.
+    if (@available(macOS 10.12, *)) {
+        // |maxBufferLength| isn't always available on older systems. If available, use
+        // |recommendedMaxWorkingSetSize| instead. We can probably allocate more than this,
+        // but don't have a way to discover a better limit. MoltenVK also uses this heuristic.
+        return 1024 * 1024 * 1024;
+    }
+    // 10.11 has a 256Mb limit
+    if (@available(maxOS 10.11, *)) {
         return 256 * 1024 * 1024;
+    }
+#else
+    // macOS / tvOS: 256Mb limit in versions without [MTLDevice maxBufferLength]
+    return 256 * 1024 * 1024;
 #endif
+}
+
+MaybeError Buffer::Initialize(bool mappedAtCreation) {
+    MTLResourceOptions storageMode;
+    if (GetUsage() & kMappableBufferUsages) {
+        storageMode = MTLResourceStorageModeShared;
+    } else {
+        storageMode = MTLResourceStorageModePrivate;
     }
 
-    MaybeError Buffer::Initialize(bool mappedAtCreation) {
-        MTLResourceOptions storageMode;
-        if (GetUsage() & kMappableBufferUsages) {
-            storageMode = MTLResourceStorageModeShared;
-        } else {
-            storageMode = MTLResourceStorageModePrivate;
-        }
-
-        uint32_t alignment = 1;
+    uint32_t alignment = 1;
 #ifdef DAWN_PLATFORM_MACOS
-        // [MTLBlitCommandEncoder fillBuffer] requires the size to be a multiple of 4 on MacOS.
-        alignment = 4;
+    // [MTLBlitCommandEncoder fillBuffer] requires the size to be a multiple of 4 on MacOS.
+    alignment = 4;
 #endif
 
-        // Metal validation layer requires the size of uniform buffer and storage buffer to be no
-        // less than the size of the buffer block defined in shader, and the overall size of the
-        // buffer must be aligned to the largest alignment of its members.
-        if (GetUsage() &
-            (wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage | kInternalStorageBuffer)) {
-            ASSERT(IsAligned(kMinUniformOrStorageBufferAlignment, alignment));
-            alignment = kMinUniformOrStorageBufferAlignment;
-        }
-
-        // The vertex pulling transform requires at least 4 bytes in the buffer.
-        // 0-sized vertex buffer bindings are allowed, so we always need an additional 4 bytes
-        // after the end.
-        NSUInteger extraBytes = 0u;
-        if ((GetUsage() & wgpu::BufferUsage::Vertex) != 0) {
-            extraBytes = 4u;
-        }
-
-        if (GetSize() > std::numeric_limits<NSUInteger>::max() - extraBytes) {
-            return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
-        }
-        NSUInteger currentSize =
-            std::max(static_cast<NSUInteger>(GetSize()) + extraBytes, NSUInteger(4));
-
-        if (currentSize > std::numeric_limits<NSUInteger>::max() - alignment) {
-            // Alignment would overlow.
-            return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
-        }
-        currentSize = Align(currentSize, alignment);
-
-        uint64_t maxBufferSize = QueryMaxBufferLength(ToBackend(GetDevice())->GetMTLDevice());
-        if (currentSize > maxBufferSize) {
-            return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
-        }
-
-        mAllocatedSize = currentSize;
-        mMtlBuffer.Acquire([ToBackend(GetDevice())->GetMTLDevice()
-            newBufferWithLength:currentSize
-                        options:storageMode]);
-        if (mMtlBuffer == nullptr) {
-            return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation failed");
-        }
-
-        // The buffers with mappedAtCreation == true will be initialized in
-        // BufferBase::MapAtCreation().
-        if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
-            !mappedAtCreation) {
-            CommandRecordingContext* commandContext =
-                ToBackend(GetDevice())->GetPendingCommandContext();
-            ClearBuffer(commandContext, uint8_t(1u));
-        }
-
-        // Initialize the padding bytes to zero.
-        if (GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse) &&
-            !mappedAtCreation) {
-            uint32_t paddingBytes = GetAllocatedSize() - GetSize();
-            if (paddingBytes > 0) {
-                uint32_t clearSize = Align(paddingBytes, 4);
-                uint64_t clearOffset = GetAllocatedSize() - clearSize;
-
-                CommandRecordingContext* commandContext =
-                    ToBackend(GetDevice())->GetPendingCommandContext();
-                ClearBuffer(commandContext, 0, clearOffset, clearSize);
-            }
-        }
-        return {};
+    // Metal validation layer requires the size of uniform buffer and storage buffer to be no
+    // less than the size of the buffer block defined in shader, and the overall size of the
+    // buffer must be aligned to the largest alignment of its members.
+    if (GetUsage() &
+        (wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage | kInternalStorageBuffer)) {
+        ASSERT(IsAligned(kMinUniformOrStorageBufferAlignment, alignment));
+        alignment = kMinUniformOrStorageBufferAlignment;
     }
 
-    Buffer::~Buffer() = default;
-
-    id<MTLBuffer> Buffer::GetMTLBuffer() const {
-        return mMtlBuffer.Get();
+    // The vertex pulling transform requires at least 4 bytes in the buffer.
+    // 0-sized vertex buffer bindings are allowed, so we always need an additional 4 bytes
+    // after the end.
+    NSUInteger extraBytes = 0u;
+    if ((GetUsage() & wgpu::BufferUsage::Vertex) != 0) {
+        extraBytes = 4u;
     }
 
-    bool Buffer::IsCPUWritableAtCreation() const {
-        // TODO(enga): Handle CPU-visible memory on UMA
-        return GetUsage() & kMappableBufferUsages;
+    if (GetSize() > std::numeric_limits<NSUInteger>::max() - extraBytes) {
+        return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
+    }
+    NSUInteger currentSize =
+        std::max(static_cast<NSUInteger>(GetSize()) + extraBytes, NSUInteger(4));
+
+    if (currentSize > std::numeric_limits<NSUInteger>::max() - alignment) {
+        // Alignment would overlow.
+        return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
+    }
+    currentSize = Align(currentSize, alignment);
+
+    uint64_t maxBufferSize = QueryMaxBufferLength(ToBackend(GetDevice())->GetMTLDevice());
+    if (currentSize > maxBufferSize) {
+        return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
     }
 
-    MaybeError Buffer::MapAtCreationImpl() {
-        return {};
+    mAllocatedSize = currentSize;
+    mMtlBuffer.Acquire([ToBackend(GetDevice())->GetMTLDevice() newBufferWithLength:currentSize
+                                                                           options:storageMode]);
+    if (mMtlBuffer == nullptr) {
+        return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation failed");
     }
 
-    MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
+    // The buffers with mappedAtCreation == true will be initialized in
+    // BufferBase::MapAtCreation().
+    if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
+        !mappedAtCreation) {
         CommandRecordingContext* commandContext =
             ToBackend(GetDevice())->GetPendingCommandContext();
-        EnsureDataInitialized(commandContext);
-
-        return {};
+        ClearBuffer(commandContext, uint8_t(1u));
     }
 
-    void* Buffer::GetMappedPointerImpl() {
-        return [*mMtlBuffer contents];
-    }
+    // Initialize the padding bytes to zero.
+    if (GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse) && !mappedAtCreation) {
+        uint32_t paddingBytes = GetAllocatedSize() - GetSize();
+        if (paddingBytes > 0) {
+            uint32_t clearSize = Align(paddingBytes, 4);
+            uint64_t clearOffset = GetAllocatedSize() - clearSize;
 
-    void Buffer::UnmapImpl() {
-        // Nothing to do, Metal StorageModeShared buffers are always mapped.
-    }
-
-    void Buffer::DestroyImpl() {
-        BufferBase::DestroyImpl();
-        mMtlBuffer = nullptr;
-    }
-
-    bool Buffer::EnsureDataInitialized(CommandRecordingContext* commandContext) {
-        if (!NeedsInitialization()) {
-            return false;
+            CommandRecordingContext* commandContext =
+                ToBackend(GetDevice())->GetPendingCommandContext();
+            ClearBuffer(commandContext, 0, clearOffset, clearSize);
         }
+    }
+    return {};
+}
 
-        InitializeToZero(commandContext);
-        return true;
+Buffer::~Buffer() = default;
+
+id<MTLBuffer> Buffer::GetMTLBuffer() const {
+    return mMtlBuffer.Get();
+}
+
+bool Buffer::IsCPUWritableAtCreation() const {
+    // TODO(enga): Handle CPU-visible memory on UMA
+    return GetUsage() & kMappableBufferUsages;
+}
+
+MaybeError Buffer::MapAtCreationImpl() {
+    return {};
+}
+
+MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
+    CommandRecordingContext* commandContext = ToBackend(GetDevice())->GetPendingCommandContext();
+    EnsureDataInitialized(commandContext);
+
+    return {};
+}
+
+void* Buffer::GetMappedPointerImpl() {
+    return [*mMtlBuffer contents];
+}
+
+void Buffer::UnmapImpl() {
+    // Nothing to do, Metal StorageModeShared buffers are always mapped.
+}
+
+void Buffer::DestroyImpl() {
+    BufferBase::DestroyImpl();
+    mMtlBuffer = nullptr;
+}
+
+bool Buffer::EnsureDataInitialized(CommandRecordingContext* commandContext) {
+    if (!NeedsInitialization()) {
+        return false;
     }
 
-    bool Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
-                                                    uint64_t offset,
-                                                    uint64_t size) {
-        if (!NeedsInitialization()) {
-            return false;
-        }
+    InitializeToZero(commandContext);
+    return true;
+}
 
-        if (IsFullBufferRange(offset, size)) {
-            SetIsDataInitialized();
-            return false;
-        }
-
-        InitializeToZero(commandContext);
-        return true;
+bool Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+                                                uint64_t offset,
+                                                uint64_t size) {
+    if (!NeedsInitialization()) {
+        return false;
     }
 
-    bool Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
-                                                    const CopyTextureToBufferCmd* copy) {
-        if (!NeedsInitialization()) {
-            return false;
-        }
-
-        if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
-            SetIsDataInitialized();
-            return false;
-        }
-
-        InitializeToZero(commandContext);
-        return true;
-    }
-
-    void Buffer::InitializeToZero(CommandRecordingContext* commandContext) {
-        ASSERT(NeedsInitialization());
-
-        ClearBuffer(commandContext, uint8_t(0u));
-
+    if (IsFullBufferRange(offset, size)) {
         SetIsDataInitialized();
-        GetDevice()->IncrementLazyClearCountForTesting();
+        return false;
     }
 
-    void Buffer::ClearBuffer(CommandRecordingContext* commandContext,
-                             uint8_t clearValue,
-                             uint64_t offset,
-                             uint64_t size) {
-        ASSERT(commandContext != nullptr);
-        size = size > 0 ? size : GetAllocatedSize();
-        ASSERT(size > 0);
-        [commandContext->EnsureBlit() fillBuffer:mMtlBuffer.Get()
-                                           range:NSMakeRange(offset, size)
-                                           value:clearValue];
+    InitializeToZero(commandContext);
+    return true;
+}
+
+bool Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* commandContext,
+                                                const CopyTextureToBufferCmd* copy) {
+    if (!NeedsInitialization()) {
+        return false;
     }
 
+    if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
+        SetIsDataInitialized();
+        return false;
+    }
+
+    InitializeToZero(commandContext);
+    return true;
+}
+
+void Buffer::InitializeToZero(CommandRecordingContext* commandContext) {
+    ASSERT(NeedsInitialization());
+
+    ClearBuffer(commandContext, uint8_t(0u));
+
+    SetIsDataInitialized();
+    GetDevice()->IncrementLazyClearCountForTesting();
+}
+
+void Buffer::ClearBuffer(CommandRecordingContext* commandContext,
+                         uint8_t clearValue,
+                         uint64_t offset,
+                         uint64_t size) {
+    ASSERT(commandContext != nullptr);
+    size = size > 0 ? size : GetAllocatedSize();
+    ASSERT(size > 0);
+    [commandContext->EnsureBlit() fillBuffer:mMtlBuffer.Get()
+                                       range:NSMakeRange(offset, size)
+                                       value:clearValue];
+}
+
 }  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/CommandBufferMTL.h b/src/dawn/native/metal/CommandBufferMTL.h
index d5612d7..8f7a983 100644
--- a/src/dawn/native/metal/CommandBufferMTL.h
+++ b/src/dawn/native/metal/CommandBufferMTL.h
@@ -21,40 +21,40 @@
 #import <Metal/Metal.h>
 
 namespace dawn::native {
-    class CommandEncoder;
+class CommandEncoder;
 }
 
 namespace dawn::native::metal {
 
-    class CommandRecordingContext;
-    class Device;
-    class Texture;
+class CommandRecordingContext;
+class Device;
+class Texture;
 
-    void RecordCopyBufferToTexture(CommandRecordingContext* commandContext,
-                                   id<MTLBuffer> mtlBuffer,
-                                   uint64_t bufferSize,
-                                   uint64_t offset,
-                                   uint32_t bytesPerRow,
-                                   uint32_t rowsPerImage,
-                                   Texture* texture,
-                                   uint32_t mipLevel,
-                                   const Origin3D& origin,
-                                   Aspect aspect,
-                                   const Extent3D& copySize);
+void RecordCopyBufferToTexture(CommandRecordingContext* commandContext,
+                               id<MTLBuffer> mtlBuffer,
+                               uint64_t bufferSize,
+                               uint64_t offset,
+                               uint32_t bytesPerRow,
+                               uint32_t rowsPerImage,
+                               Texture* texture,
+                               uint32_t mipLevel,
+                               const Origin3D& origin,
+                               Aspect aspect,
+                               const Extent3D& copySize);
 
-    class CommandBuffer final : public CommandBufferBase {
-      public:
-        static Ref<CommandBuffer> Create(CommandEncoder* encoder,
-                                         const CommandBufferDescriptor* descriptor);
+class CommandBuffer final : public CommandBufferBase {
+  public:
+    static Ref<CommandBuffer> Create(CommandEncoder* encoder,
+                                     const CommandBufferDescriptor* descriptor);
 
-        MaybeError FillCommands(CommandRecordingContext* commandContext);
+    MaybeError FillCommands(CommandRecordingContext* commandContext);
 
-      private:
-        using CommandBufferBase::CommandBufferBase;
+  private:
+    using CommandBufferBase::CommandBufferBase;
 
-        MaybeError EncodeComputePass(CommandRecordingContext* commandContext);
-        MaybeError EncodeRenderPass(id<MTLRenderCommandEncoder> encoder);
-    };
+    MaybeError EncodeComputePass(CommandRecordingContext* commandContext);
+    MaybeError EncodeRenderPass(id<MTLRenderCommandEncoder> encoder);
+};
 
 }  // namespace dawn::native::metal
 
diff --git a/src/dawn/native/metal/CommandBufferMTL.mm b/src/dawn/native/metal/CommandBufferMTL.mm
index b8861ff..121198f 100644
--- a/src/dawn/native/metal/CommandBufferMTL.mm
+++ b/src/dawn/native/metal/CommandBufferMTL.mm
@@ -36,1376 +36,1356 @@
 
 namespace dawn::native::metal {
 
-    namespace {
+namespace {
 
-        MTLIndexType MTLIndexFormat(wgpu::IndexFormat format) {
-            switch (format) {
-                case wgpu::IndexFormat::Uint16:
-                    return MTLIndexTypeUInt16;
-                case wgpu::IndexFormat::Uint32:
-                    return MTLIndexTypeUInt32;
-                case wgpu::IndexFormat::Undefined:
+MTLIndexType MTLIndexFormat(wgpu::IndexFormat format) {
+    switch (format) {
+        case wgpu::IndexFormat::Uint16:
+            return MTLIndexTypeUInt16;
+        case wgpu::IndexFormat::Uint32:
+            return MTLIndexTypeUInt32;
+        case wgpu::IndexFormat::Undefined:
+            UNREACHABLE();
+    }
+}
+
+NSRef<MTLRenderPassDescriptor> CreateMTLRenderPassDescriptor(BeginRenderPassCmd* renderPass) {
+    // Note that this creates a descriptor that's autoreleased so we don't use AcquireNSRef
+    NSRef<MTLRenderPassDescriptor> descriptorRef = [MTLRenderPassDescriptor renderPassDescriptor];
+    MTLRenderPassDescriptor* descriptor = descriptorRef.Get();
+
+    for (ColorAttachmentIndex attachment :
+         IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+        uint8_t i = static_cast<uint8_t>(attachment);
+        auto& attachmentInfo = renderPass->colorAttachments[attachment];
+
+        switch (attachmentInfo.loadOp) {
+            case wgpu::LoadOp::Clear:
+                descriptor.colorAttachments[i].loadAction = MTLLoadActionClear;
+                descriptor.colorAttachments[i].clearColor =
+                    MTLClearColorMake(attachmentInfo.clearColor.r, attachmentInfo.clearColor.g,
+                                      attachmentInfo.clearColor.b, attachmentInfo.clearColor.a);
+                break;
+
+            case wgpu::LoadOp::Load:
+                descriptor.colorAttachments[i].loadAction = MTLLoadActionLoad;
+                break;
+
+            case wgpu::LoadOp::Undefined:
+                UNREACHABLE();
+                break;
+        }
+
+        auto colorAttachment = ToBackend(attachmentInfo.view)->GetAttachmentInfo();
+        descriptor.colorAttachments[i].texture = colorAttachment.texture.Get();
+        descriptor.colorAttachments[i].level = colorAttachment.baseMipLevel;
+        descriptor.colorAttachments[i].slice = colorAttachment.baseArrayLayer;
+
+        bool hasResolveTarget = attachmentInfo.resolveTarget != nullptr;
+        if (hasResolveTarget) {
+            auto resolveAttachment = ToBackend(attachmentInfo.resolveTarget)->GetAttachmentInfo();
+            descriptor.colorAttachments[i].resolveTexture = resolveAttachment.texture.Get();
+            descriptor.colorAttachments[i].resolveLevel = resolveAttachment.baseMipLevel;
+            descriptor.colorAttachments[i].resolveSlice = resolveAttachment.baseArrayLayer;
+
+            switch (attachmentInfo.storeOp) {
+                case wgpu::StoreOp::Store:
+                    descriptor.colorAttachments[i].storeAction =
+                        kMTLStoreActionStoreAndMultisampleResolve;
+                    break;
+                case wgpu::StoreOp::Discard:
+                    descriptor.colorAttachments[i].storeAction = MTLStoreActionMultisampleResolve;
+                    break;
+                case wgpu::StoreOp::Undefined:
                     UNREACHABLE();
+                    break;
+            }
+        } else {
+            switch (attachmentInfo.storeOp) {
+                case wgpu::StoreOp::Store:
+                    descriptor.colorAttachments[i].storeAction = MTLStoreActionStore;
+                    break;
+                case wgpu::StoreOp::Discard:
+                    descriptor.colorAttachments[i].storeAction = MTLStoreActionDontCare;
+                    break;
+                case wgpu::StoreOp::Undefined:
+                    UNREACHABLE();
+                    break;
+            }
+        }
+    }
+
+    if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+        auto& attachmentInfo = renderPass->depthStencilAttachment;
+
+        auto depthStencilAttachment = ToBackend(attachmentInfo.view)->GetAttachmentInfo();
+        const Format& format = attachmentInfo.view->GetFormat();
+
+        if (format.HasDepth()) {
+            descriptor.depthAttachment.texture = depthStencilAttachment.texture.Get();
+            descriptor.depthAttachment.level = depthStencilAttachment.baseMipLevel;
+            descriptor.depthAttachment.slice = depthStencilAttachment.baseArrayLayer;
+
+            switch (attachmentInfo.depthStoreOp) {
+                case wgpu::StoreOp::Store:
+                    descriptor.depthAttachment.storeAction = MTLStoreActionStore;
+                    break;
+
+                case wgpu::StoreOp::Discard:
+                    descriptor.depthAttachment.storeAction = MTLStoreActionDontCare;
+                    break;
+
+                case wgpu::StoreOp::Undefined:
+                    UNREACHABLE();
+                    break;
+            }
+
+            switch (attachmentInfo.depthLoadOp) {
+                case wgpu::LoadOp::Clear:
+                    descriptor.depthAttachment.loadAction = MTLLoadActionClear;
+                    descriptor.depthAttachment.clearDepth = attachmentInfo.clearDepth;
+                    break;
+
+                case wgpu::LoadOp::Load:
+                    descriptor.depthAttachment.loadAction = MTLLoadActionLoad;
+                    break;
+
+                case wgpu::LoadOp::Undefined:
+                    UNREACHABLE();
+                    break;
             }
         }
 
-        NSRef<MTLRenderPassDescriptor> CreateMTLRenderPassDescriptor(
-            BeginRenderPassCmd* renderPass) {
-            // Note that this creates a descriptor that's autoreleased so we don't use AcquireNSRef
-            NSRef<MTLRenderPassDescriptor> descriptorRef =
-                [MTLRenderPassDescriptor renderPassDescriptor];
-            MTLRenderPassDescriptor* descriptor = descriptorRef.Get();
+        if (format.HasStencil()) {
+            descriptor.stencilAttachment.texture = depthStencilAttachment.texture.Get();
+            descriptor.stencilAttachment.level = depthStencilAttachment.baseMipLevel;
+            descriptor.stencilAttachment.slice = depthStencilAttachment.baseArrayLayer;
 
-            for (ColorAttachmentIndex attachment :
-                 IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
-                uint8_t i = static_cast<uint8_t>(attachment);
-                auto& attachmentInfo = renderPass->colorAttachments[attachment];
+            switch (attachmentInfo.stencilStoreOp) {
+                case wgpu::StoreOp::Store:
+                    descriptor.stencilAttachment.storeAction = MTLStoreActionStore;
+                    break;
 
-                switch (attachmentInfo.loadOp) {
-                    case wgpu::LoadOp::Clear:
-                        descriptor.colorAttachments[i].loadAction = MTLLoadActionClear;
-                        descriptor.colorAttachments[i].clearColor = MTLClearColorMake(
-                            attachmentInfo.clearColor.r, attachmentInfo.clearColor.g,
-                            attachmentInfo.clearColor.b, attachmentInfo.clearColor.a);
-                        break;
+                case wgpu::StoreOp::Discard:
+                    descriptor.stencilAttachment.storeAction = MTLStoreActionDontCare;
+                    break;
 
-                    case wgpu::LoadOp::Load:
-                        descriptor.colorAttachments[i].loadAction = MTLLoadActionLoad;
-                        break;
-
-                    case wgpu::LoadOp::Undefined:
-                        UNREACHABLE();
-                        break;
-                }
-
-                auto colorAttachment = ToBackend(attachmentInfo.view)->GetAttachmentInfo();
-                descriptor.colorAttachments[i].texture = colorAttachment.texture.Get();
-                descriptor.colorAttachments[i].level = colorAttachment.baseMipLevel;
-                descriptor.colorAttachments[i].slice = colorAttachment.baseArrayLayer;
-
-                bool hasResolveTarget = attachmentInfo.resolveTarget != nullptr;
-                if (hasResolveTarget) {
-                    auto resolveAttachment =
-                        ToBackend(attachmentInfo.resolveTarget)->GetAttachmentInfo();
-                    descriptor.colorAttachments[i].resolveTexture = resolveAttachment.texture.Get();
-                    descriptor.colorAttachments[i].resolveLevel = resolveAttachment.baseMipLevel;
-                    descriptor.colorAttachments[i].resolveSlice = resolveAttachment.baseArrayLayer;
-
-                    switch (attachmentInfo.storeOp) {
-                        case wgpu::StoreOp::Store:
-                            descriptor.colorAttachments[i].storeAction =
-                                kMTLStoreActionStoreAndMultisampleResolve;
-                            break;
-                        case wgpu::StoreOp::Discard:
-                            descriptor.colorAttachments[i].storeAction =
-                                MTLStoreActionMultisampleResolve;
-                            break;
-                        case wgpu::StoreOp::Undefined:
-                            UNREACHABLE();
-                            break;
-                    }
-                } else {
-                    switch (attachmentInfo.storeOp) {
-                        case wgpu::StoreOp::Store:
-                            descriptor.colorAttachments[i].storeAction = MTLStoreActionStore;
-                            break;
-                        case wgpu::StoreOp::Discard:
-                            descriptor.colorAttachments[i].storeAction = MTLStoreActionDontCare;
-                            break;
-                        case wgpu::StoreOp::Undefined:
-                            UNREACHABLE();
-                            break;
-                    }
-                }
+                case wgpu::StoreOp::Undefined:
+                    UNREACHABLE();
+                    break;
             }
 
-            if (renderPass->attachmentState->HasDepthStencilAttachment()) {
-                auto& attachmentInfo = renderPass->depthStencilAttachment;
+            switch (attachmentInfo.stencilLoadOp) {
+                case wgpu::LoadOp::Clear:
+                    descriptor.stencilAttachment.loadAction = MTLLoadActionClear;
+                    descriptor.stencilAttachment.clearStencil = attachmentInfo.clearStencil;
+                    break;
 
-                auto depthStencilAttachment = ToBackend(attachmentInfo.view)->GetAttachmentInfo();
-                const Format& format = attachmentInfo.view->GetFormat();
+                case wgpu::LoadOp::Load:
+                    descriptor.stencilAttachment.loadAction = MTLLoadActionLoad;
+                    break;
 
-                if (format.HasDepth()) {
-                    descriptor.depthAttachment.texture = depthStencilAttachment.texture.Get();
-                    descriptor.depthAttachment.level = depthStencilAttachment.baseMipLevel;
-                    descriptor.depthAttachment.slice = depthStencilAttachment.baseArrayLayer;
-
-                    switch (attachmentInfo.depthStoreOp) {
-                        case wgpu::StoreOp::Store:
-                            descriptor.depthAttachment.storeAction = MTLStoreActionStore;
-                            break;
-
-                        case wgpu::StoreOp::Discard:
-                            descriptor.depthAttachment.storeAction = MTLStoreActionDontCare;
-                            break;
-
-                        case wgpu::StoreOp::Undefined:
-                            UNREACHABLE();
-                            break;
-                    }
-
-                    switch (attachmentInfo.depthLoadOp) {
-                        case wgpu::LoadOp::Clear:
-                            descriptor.depthAttachment.loadAction = MTLLoadActionClear;
-                            descriptor.depthAttachment.clearDepth = attachmentInfo.clearDepth;
-                            break;
-
-                        case wgpu::LoadOp::Load:
-                            descriptor.depthAttachment.loadAction = MTLLoadActionLoad;
-                            break;
-
-                        case wgpu::LoadOp::Undefined:
-                            UNREACHABLE();
-                            break;
-                    }
-                }
-
-                if (format.HasStencil()) {
-                    descriptor.stencilAttachment.texture = depthStencilAttachment.texture.Get();
-                    descriptor.stencilAttachment.level = depthStencilAttachment.baseMipLevel;
-                    descriptor.stencilAttachment.slice = depthStencilAttachment.baseArrayLayer;
-
-                    switch (attachmentInfo.stencilStoreOp) {
-                        case wgpu::StoreOp::Store:
-                            descriptor.stencilAttachment.storeAction = MTLStoreActionStore;
-                            break;
-
-                        case wgpu::StoreOp::Discard:
-                            descriptor.stencilAttachment.storeAction = MTLStoreActionDontCare;
-                            break;
-
-                        case wgpu::StoreOp::Undefined:
-                            UNREACHABLE();
-                            break;
-                    }
-
-                    switch (attachmentInfo.stencilLoadOp) {
-                        case wgpu::LoadOp::Clear:
-                            descriptor.stencilAttachment.loadAction = MTLLoadActionClear;
-                            descriptor.stencilAttachment.clearStencil = attachmentInfo.clearStencil;
-                            break;
-
-                        case wgpu::LoadOp::Load:
-                            descriptor.stencilAttachment.loadAction = MTLLoadActionLoad;
-                            break;
-
-                        case wgpu::LoadOp::Undefined:
-                            UNREACHABLE();
-                            break;
-                    }
-                }
+                case wgpu::LoadOp::Undefined:
+                    UNREACHABLE();
+                    break;
             }
+        }
+    }
 
-            if (renderPass->occlusionQuerySet.Get() != nullptr) {
-                descriptor.visibilityResultBuffer =
-                    ToBackend(renderPass->occlusionQuerySet.Get())->GetVisibilityBuffer();
-            }
+    if (renderPass->occlusionQuerySet.Get() != nullptr) {
+        descriptor.visibilityResultBuffer =
+            ToBackend(renderPass->occlusionQuerySet.Get())->GetVisibilityBuffer();
+    }
 
-            return descriptorRef;
+    return descriptorRef;
+}
+
+// Metal uses a physical addressing mode which means buffers in the shading language are
+// just pointers to the virtual address of their start. This means there is no way to know
+// the length of a buffer to compute the length() of unsized arrays at the end of storage
+// buffers. Tint implements the length() of unsized arrays by requiring an extra
+// buffer that contains the length of other buffers. This structure that keeps track of the
+// length of storage buffers and can apply them to the reserved "buffer length buffer" when
+// needed for a draw or a dispatch.
+struct StorageBufferLengthTracker {
+    wgpu::ShaderStage dirtyStages = wgpu::ShaderStage::None;
+
+    // The lengths of buffers are stored as 32bit integers because that is the width the
+    // MSL code generated by Tint expects.
+    // UBOs require we align the max buffer count to 4 elements (16 bytes).
+    static constexpr size_t MaxBufferCount = ((kGenericMetalBufferSlots + 3) / 4) * 4;
+    PerStage<std::array<uint32_t, MaxBufferCount>> data;
+
+    void Apply(id<MTLRenderCommandEncoder> render,
+               RenderPipeline* pipeline,
+               bool enableVertexPulling) {
+        wgpu::ShaderStage stagesToApply =
+            dirtyStages & pipeline->GetStagesRequiringStorageBufferLength();
+
+        if (stagesToApply == wgpu::ShaderStage::None) {
+            return;
         }
 
-        // Metal uses a physical addressing mode which means buffers in the shading language are
-        // just pointers to the virtual address of their start. This means there is no way to know
-        // the length of a buffer to compute the length() of unsized arrays at the end of storage
-        // buffers. Tint implements the length() of unsized arrays by requiring an extra
-        // buffer that contains the length of other buffers. This structure that keeps track of the
-        // length of storage buffers and can apply them to the reserved "buffer length buffer" when
-        // needed for a draw or a dispatch.
-        struct StorageBufferLengthTracker {
-            wgpu::ShaderStage dirtyStages = wgpu::ShaderStage::None;
+        if (stagesToApply & wgpu::ShaderStage::Vertex) {
+            uint32_t bufferCount =
+                ToBackend(pipeline->GetLayout())->GetBufferBindingCount(SingleShaderStage::Vertex);
 
-            // The lengths of buffers are stored as 32bit integers because that is the width the
-            // MSL code generated by Tint expects.
-            // UBOs require we align the max buffer count to 4 elements (16 bytes).
-            static constexpr size_t MaxBufferCount = ((kGenericMetalBufferSlots + 3) / 4) * 4;
-            PerStage<std::array<uint32_t, MaxBufferCount>> data;
+            if (enableVertexPulling) {
+                bufferCount += pipeline->GetVertexBufferCount();
+            }
 
-            void Apply(id<MTLRenderCommandEncoder> render,
-                       RenderPipeline* pipeline,
-                       bool enableVertexPulling) {
-                wgpu::ShaderStage stagesToApply =
-                    dirtyStages & pipeline->GetStagesRequiringStorageBufferLength();
+            bufferCount = Align(bufferCount, 4);
+            ASSERT(bufferCount <= data[SingleShaderStage::Vertex].size());
 
-                if (stagesToApply == wgpu::ShaderStage::None) {
-                    return;
-                }
+            [render setVertexBytes:data[SingleShaderStage::Vertex].data()
+                            length:sizeof(uint32_t) * bufferCount
+                           atIndex:kBufferLengthBufferSlot];
+        }
 
-                if (stagesToApply & wgpu::ShaderStage::Vertex) {
-                    uint32_t bufferCount = ToBackend(pipeline->GetLayout())
-                                               ->GetBufferBindingCount(SingleShaderStage::Vertex);
+        if (stagesToApply & wgpu::ShaderStage::Fragment) {
+            uint32_t bufferCount = ToBackend(pipeline->GetLayout())
+                                       ->GetBufferBindingCount(SingleShaderStage::Fragment);
+            bufferCount = Align(bufferCount, 4);
+            ASSERT(bufferCount <= data[SingleShaderStage::Fragment].size());
 
-                    if (enableVertexPulling) {
-                        bufferCount += pipeline->GetVertexBufferCount();
+            [render setFragmentBytes:data[SingleShaderStage::Fragment].data()
+                              length:sizeof(uint32_t) * bufferCount
+                             atIndex:kBufferLengthBufferSlot];
+        }
+
+        // Only mark clean stages that were actually applied.
+        dirtyStages ^= stagesToApply;
+    }
+
+    void Apply(id<MTLComputeCommandEncoder> compute, ComputePipeline* pipeline) {
+        if (!(dirtyStages & wgpu::ShaderStage::Compute)) {
+            return;
+        }
+
+        if (!pipeline->RequiresStorageBufferLength()) {
+            return;
+        }
+
+        uint32_t bufferCount =
+            ToBackend(pipeline->GetLayout())->GetBufferBindingCount(SingleShaderStage::Compute);
+        bufferCount = Align(bufferCount, 4);
+        ASSERT(bufferCount <= data[SingleShaderStage::Compute].size());
+
+        [compute setBytes:data[SingleShaderStage::Compute].data()
+                   length:sizeof(uint32_t) * bufferCount
+                  atIndex:kBufferLengthBufferSlot];
+
+        dirtyStages ^= wgpu::ShaderStage::Compute;
+    }
+};
+
+// Keeps track of the dirty bind groups so they can be lazily applied when we know the
+// pipeline state.
+// Bind groups may be inherited because bind groups are packed in the buffer /
+// texture tables in contiguous order.
+class BindGroupTracker : public BindGroupTrackerBase<true, uint64_t> {
+  public:
+    explicit BindGroupTracker(StorageBufferLengthTracker* lengthTracker)
+        : BindGroupTrackerBase(), mLengthTracker(lengthTracker) {}
+
+    template <typename Encoder>
+    void Apply(Encoder encoder) {
+        BeforeApply();
+        for (BindGroupIndex index : IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
+            ApplyBindGroup(encoder, index, ToBackend(mBindGroups[index]),
+                           mDynamicOffsetCounts[index], mDynamicOffsets[index].data(),
+                           ToBackend(mPipelineLayout));
+        }
+        AfterApply();
+    }
+
+  private:
+    // Handles a call to SetBindGroup, directing the commands to the correct encoder.
+    // There is a single function that takes both encoders to factor code. Other approaches
+    // like templates wouldn't work because the name of methods are different between the
+    // two encoder types.
+    void ApplyBindGroupImpl(id<MTLRenderCommandEncoder> render,
+                            id<MTLComputeCommandEncoder> compute,
+                            BindGroupIndex index,
+                            BindGroup* group,
+                            uint32_t dynamicOffsetCount,
+                            uint64_t* dynamicOffsets,
+                            PipelineLayout* pipelineLayout) {
+        uint32_t currentDynamicBufferIndex = 0;
+
+        // TODO(crbug.com/dawn/854): Maintain buffers and offsets arrays in BindGroup
+        // so that we only have to do one setVertexBuffers and one setFragmentBuffers
+        // call here.
+        for (BindingIndex bindingIndex{0}; bindingIndex < group->GetLayout()->GetBindingCount();
+             ++bindingIndex) {
+            const BindingInfo& bindingInfo = group->GetLayout()->GetBindingInfo(bindingIndex);
+
+            bool hasVertStage =
+                bindingInfo.visibility & wgpu::ShaderStage::Vertex && render != nullptr;
+            bool hasFragStage =
+                bindingInfo.visibility & wgpu::ShaderStage::Fragment && render != nullptr;
+            bool hasComputeStage =
+                bindingInfo.visibility & wgpu::ShaderStage::Compute && compute != nullptr;
+
+            uint32_t vertIndex = 0;
+            uint32_t fragIndex = 0;
+            uint32_t computeIndex = 0;
+
+            if (hasVertStage) {
+                vertIndex = pipelineLayout->GetBindingIndexInfo(
+                    SingleShaderStage::Vertex)[index][bindingIndex];
+            }
+            if (hasFragStage) {
+                fragIndex = pipelineLayout->GetBindingIndexInfo(
+                    SingleShaderStage::Fragment)[index][bindingIndex];
+            }
+            if (hasComputeStage) {
+                computeIndex = pipelineLayout->GetBindingIndexInfo(
+                    SingleShaderStage::Compute)[index][bindingIndex];
+            }
+
+            switch (bindingInfo.bindingType) {
+                case BindingInfoType::Buffer: {
+                    const BufferBinding& binding = group->GetBindingAsBufferBinding(bindingIndex);
+                    const id<MTLBuffer> buffer = ToBackend(binding.buffer)->GetMTLBuffer();
+                    NSUInteger offset = binding.offset;
+
+                    // TODO(crbug.com/dawn/854): Record bound buffer status to use
+                    // setBufferOffset to achieve better performance.
+                    if (bindingInfo.buffer.hasDynamicOffset) {
+                        offset += dynamicOffsets[currentDynamicBufferIndex];
+                        currentDynamicBufferIndex++;
                     }
 
-                    bufferCount = Align(bufferCount, 4);
-                    ASSERT(bufferCount <= data[SingleShaderStage::Vertex].size());
-
-                    [render setVertexBytes:data[SingleShaderStage::Vertex].data()
-                                    length:sizeof(uint32_t) * bufferCount
-                                   atIndex:kBufferLengthBufferSlot];
-                }
-
-                if (stagesToApply & wgpu::ShaderStage::Fragment) {
-                    uint32_t bufferCount = ToBackend(pipeline->GetLayout())
-                                               ->GetBufferBindingCount(SingleShaderStage::Fragment);
-                    bufferCount = Align(bufferCount, 4);
-                    ASSERT(bufferCount <= data[SingleShaderStage::Fragment].size());
-
-                    [render setFragmentBytes:data[SingleShaderStage::Fragment].data()
-                                      length:sizeof(uint32_t) * bufferCount
-                                     atIndex:kBufferLengthBufferSlot];
-                }
-
-                // Only mark clean stages that were actually applied.
-                dirtyStages ^= stagesToApply;
-            }
-
-            void Apply(id<MTLComputeCommandEncoder> compute, ComputePipeline* pipeline) {
-                if (!(dirtyStages & wgpu::ShaderStage::Compute)) {
-                    return;
-                }
-
-                if (!pipeline->RequiresStorageBufferLength()) {
-                    return;
-                }
-
-                uint32_t bufferCount = ToBackend(pipeline->GetLayout())
-                                           ->GetBufferBindingCount(SingleShaderStage::Compute);
-                bufferCount = Align(bufferCount, 4);
-                ASSERT(bufferCount <= data[SingleShaderStage::Compute].size());
-
-                [compute setBytes:data[SingleShaderStage::Compute].data()
-                           length:sizeof(uint32_t) * bufferCount
-                          atIndex:kBufferLengthBufferSlot];
-
-                dirtyStages ^= wgpu::ShaderStage::Compute;
-            }
-        };
-
-        // Keeps track of the dirty bind groups so they can be lazily applied when we know the
-        // pipeline state.
-        // Bind groups may be inherited because bind groups are packed in the buffer /
-        // texture tables in contiguous order.
-        class BindGroupTracker : public BindGroupTrackerBase<true, uint64_t> {
-          public:
-            explicit BindGroupTracker(StorageBufferLengthTracker* lengthTracker)
-                : BindGroupTrackerBase(), mLengthTracker(lengthTracker) {
-            }
-
-            template <typename Encoder>
-            void Apply(Encoder encoder) {
-                BeforeApply();
-                for (BindGroupIndex index :
-                     IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
-                    ApplyBindGroup(encoder, index, ToBackend(mBindGroups[index]),
-                                   mDynamicOffsetCounts[index], mDynamicOffsets[index].data(),
-                                   ToBackend(mPipelineLayout));
-                }
-                AfterApply();
-            }
-
-          private:
-            // Handles a call to SetBindGroup, directing the commands to the correct encoder.
-            // There is a single function that takes both encoders to factor code. Other approaches
-            // like templates wouldn't work because the name of methods are different between the
-            // two encoder types.
-            void ApplyBindGroupImpl(id<MTLRenderCommandEncoder> render,
-                                    id<MTLComputeCommandEncoder> compute,
-                                    BindGroupIndex index,
-                                    BindGroup* group,
-                                    uint32_t dynamicOffsetCount,
-                                    uint64_t* dynamicOffsets,
-                                    PipelineLayout* pipelineLayout) {
-                uint32_t currentDynamicBufferIndex = 0;
-
-                // TODO(crbug.com/dawn/854): Maintain buffers and offsets arrays in BindGroup
-                // so that we only have to do one setVertexBuffers and one setFragmentBuffers
-                // call here.
-                for (BindingIndex bindingIndex{0};
-                     bindingIndex < group->GetLayout()->GetBindingCount(); ++bindingIndex) {
-                    const BindingInfo& bindingInfo =
-                        group->GetLayout()->GetBindingInfo(bindingIndex);
-
-                    bool hasVertStage =
-                        bindingInfo.visibility & wgpu::ShaderStage::Vertex && render != nullptr;
-                    bool hasFragStage =
-                        bindingInfo.visibility & wgpu::ShaderStage::Fragment && render != nullptr;
-                    bool hasComputeStage =
-                        bindingInfo.visibility & wgpu::ShaderStage::Compute && compute != nullptr;
-
-                    uint32_t vertIndex = 0;
-                    uint32_t fragIndex = 0;
-                    uint32_t computeIndex = 0;
-
                     if (hasVertStage) {
-                        vertIndex = pipelineLayout->GetBindingIndexInfo(
-                            SingleShaderStage::Vertex)[index][bindingIndex];
+                        mLengthTracker->data[SingleShaderStage::Vertex][vertIndex] = binding.size;
+                        mLengthTracker->dirtyStages |= wgpu::ShaderStage::Vertex;
+                        [render setVertexBuffers:&buffer
+                                         offsets:&offset
+                                       withRange:NSMakeRange(vertIndex, 1)];
                     }
                     if (hasFragStage) {
-                        fragIndex = pipelineLayout->GetBindingIndexInfo(
-                            SingleShaderStage::Fragment)[index][bindingIndex];
+                        mLengthTracker->data[SingleShaderStage::Fragment][fragIndex] = binding.size;
+                        mLengthTracker->dirtyStages |= wgpu::ShaderStage::Fragment;
+                        [render setFragmentBuffers:&buffer
+                                           offsets:&offset
+                                         withRange:NSMakeRange(fragIndex, 1)];
                     }
                     if (hasComputeStage) {
-                        computeIndex = pipelineLayout->GetBindingIndexInfo(
-                            SingleShaderStage::Compute)[index][bindingIndex];
+                        mLengthTracker->data[SingleShaderStage::Compute][computeIndex] =
+                            binding.size;
+                        mLengthTracker->dirtyStages |= wgpu::ShaderStage::Compute;
+                        [compute setBuffers:&buffer
+                                    offsets:&offset
+                                  withRange:NSMakeRange(computeIndex, 1)];
                     }
 
-                    switch (bindingInfo.bindingType) {
-                        case BindingInfoType::Buffer: {
-                            const BufferBinding& binding =
-                                group->GetBindingAsBufferBinding(bindingIndex);
-                            const id<MTLBuffer> buffer = ToBackend(binding.buffer)->GetMTLBuffer();
-                            NSUInteger offset = binding.offset;
-
-                            // TODO(crbug.com/dawn/854): Record bound buffer status to use
-                            // setBufferOffset to achieve better performance.
-                            if (bindingInfo.buffer.hasDynamicOffset) {
-                                offset += dynamicOffsets[currentDynamicBufferIndex];
-                                currentDynamicBufferIndex++;
-                            }
-
-                            if (hasVertStage) {
-                                mLengthTracker->data[SingleShaderStage::Vertex][vertIndex] =
-                                    binding.size;
-                                mLengthTracker->dirtyStages |= wgpu::ShaderStage::Vertex;
-                                [render setVertexBuffers:&buffer
-                                                 offsets:&offset
-                                               withRange:NSMakeRange(vertIndex, 1)];
-                            }
-                            if (hasFragStage) {
-                                mLengthTracker->data[SingleShaderStage::Fragment][fragIndex] =
-                                    binding.size;
-                                mLengthTracker->dirtyStages |= wgpu::ShaderStage::Fragment;
-                                [render setFragmentBuffers:&buffer
-                                                   offsets:&offset
-                                                 withRange:NSMakeRange(fragIndex, 1)];
-                            }
-                            if (hasComputeStage) {
-                                mLengthTracker->data[SingleShaderStage::Compute][computeIndex] =
-                                    binding.size;
-                                mLengthTracker->dirtyStages |= wgpu::ShaderStage::Compute;
-                                [compute setBuffers:&buffer
-                                            offsets:&offset
-                                          withRange:NSMakeRange(computeIndex, 1)];
-                            }
-
-                            break;
-                        }
-
-                        case BindingInfoType::Sampler: {
-                            auto sampler = ToBackend(group->GetBindingAsSampler(bindingIndex));
-                            if (hasVertStage) {
-                                [render setVertexSamplerState:sampler->GetMTLSamplerState()
-                                                      atIndex:vertIndex];
-                            }
-                            if (hasFragStage) {
-                                [render setFragmentSamplerState:sampler->GetMTLSamplerState()
-                                                        atIndex:fragIndex];
-                            }
-                            if (hasComputeStage) {
-                                [compute setSamplerState:sampler->GetMTLSamplerState()
-                                                 atIndex:computeIndex];
-                            }
-                            break;
-                        }
-
-                        case BindingInfoType::Texture:
-                        case BindingInfoType::StorageTexture: {
-                            auto textureView =
-                                ToBackend(group->GetBindingAsTextureView(bindingIndex));
-                            if (hasVertStage) {
-                                [render setVertexTexture:textureView->GetMTLTexture()
-                                                 atIndex:vertIndex];
-                            }
-                            if (hasFragStage) {
-                                [render setFragmentTexture:textureView->GetMTLTexture()
-                                                   atIndex:fragIndex];
-                            }
-                            if (hasComputeStage) {
-                                [compute setTexture:textureView->GetMTLTexture()
-                                            atIndex:computeIndex];
-                            }
-                            break;
-                        }
-
-                        case BindingInfoType::ExternalTexture:
-                            UNREACHABLE();
-                    }
-                }
-            }
-
-            template <typename... Args>
-            void ApplyBindGroup(id<MTLRenderCommandEncoder> encoder, Args&&... args) {
-                ApplyBindGroupImpl(encoder, nullptr, std::forward<Args&&>(args)...);
-            }
-
-            template <typename... Args>
-            void ApplyBindGroup(id<MTLComputeCommandEncoder> encoder, Args&&... args) {
-                ApplyBindGroupImpl(nullptr, encoder, std::forward<Args&&>(args)...);
-            }
-
-            StorageBufferLengthTracker* mLengthTracker;
-        };
-
-        // Keeps track of the dirty vertex buffer values so they can be lazily applied when we know
-        // all the relevant state.
-        class VertexBufferTracker {
-          public:
-            explicit VertexBufferTracker(StorageBufferLengthTracker* lengthTracker)
-                : mLengthTracker(lengthTracker) {
-            }
-
-            void OnSetVertexBuffer(VertexBufferSlot slot, Buffer* buffer, uint64_t offset) {
-                mVertexBuffers[slot] = buffer->GetMTLBuffer();
-                mVertexBufferOffsets[slot] = offset;
-
-                ASSERT(buffer->GetSize() < std::numeric_limits<uint32_t>::max());
-                mVertexBufferBindingSizes[slot] =
-                    static_cast<uint32_t>(buffer->GetAllocatedSize() - offset);
-                mDirtyVertexBuffers.set(slot);
-            }
-
-            void OnSetPipeline(RenderPipeline* lastPipeline, RenderPipeline* pipeline) {
-                // When a new pipeline is bound we must set all the vertex buffers again because
-                // they might have been offset by the pipeline layout, and they might be packed
-                // differently from the previous pipeline.
-                mDirtyVertexBuffers |= pipeline->GetVertexBufferSlotsUsed();
-            }
-
-            void Apply(id<MTLRenderCommandEncoder> encoder,
-                       RenderPipeline* pipeline,
-                       bool enableVertexPulling) {
-                const auto& vertexBuffersToApply =
-                    mDirtyVertexBuffers & pipeline->GetVertexBufferSlotsUsed();
-
-                for (VertexBufferSlot slot : IterateBitSet(vertexBuffersToApply)) {
-                    uint32_t metalIndex = pipeline->GetMtlVertexBufferIndex(slot);
-
-                    if (enableVertexPulling) {
-                        // Insert lengths for vertex buffers bound as storage buffers
-                        mLengthTracker->data[SingleShaderStage::Vertex][metalIndex] =
-                            mVertexBufferBindingSizes[slot];
-                        mLengthTracker->dirtyStages |= wgpu::ShaderStage::Vertex;
-                    }
-
-                    [encoder setVertexBuffers:&mVertexBuffers[slot]
-                                      offsets:&mVertexBufferOffsets[slot]
-                                    withRange:NSMakeRange(metalIndex, 1)];
-                }
-
-                mDirtyVertexBuffers.reset();
-            }
-
-          private:
-            // All the indices in these arrays are Dawn vertex buffer indices
-            ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mDirtyVertexBuffers;
-            ityp::array<VertexBufferSlot, id<MTLBuffer>, kMaxVertexBuffers> mVertexBuffers;
-            ityp::array<VertexBufferSlot, NSUInteger, kMaxVertexBuffers> mVertexBufferOffsets;
-            ityp::array<VertexBufferSlot, uint32_t, kMaxVertexBuffers> mVertexBufferBindingSizes;
-
-            StorageBufferLengthTracker* mLengthTracker;
-        };
-
-    }  // anonymous namespace
-
-    void RecordCopyBufferToTexture(CommandRecordingContext* commandContext,
-                                   id<MTLBuffer> mtlBuffer,
-                                   uint64_t bufferSize,
-                                   uint64_t offset,
-                                   uint32_t bytesPerRow,
-                                   uint32_t rowsPerImage,
-                                   Texture* texture,
-                                   uint32_t mipLevel,
-                                   const Origin3D& origin,
-                                   Aspect aspect,
-                                   const Extent3D& copySize) {
-        TextureBufferCopySplit splitCopies =
-            ComputeTextureBufferCopySplit(texture, mipLevel, origin, copySize, bufferSize, offset,
-                                          bytesPerRow, rowsPerImage, aspect);
-
-        MTLBlitOption blitOption = ComputeMTLBlitOption(texture->GetFormat(), aspect);
-
-        for (const auto& copyInfo : splitCopies) {
-            uint64_t bufferOffset = copyInfo.bufferOffset;
-            switch (texture->GetDimension()) {
-                case wgpu::TextureDimension::e1D: {
-                    [commandContext->EnsureBlit()
-                             copyFromBuffer:mtlBuffer
-                               sourceOffset:bufferOffset
-                          sourceBytesPerRow:copyInfo.bytesPerRow
-                        sourceBytesPerImage:copyInfo.bytesPerImage
-                                 sourceSize:MTLSizeMake(copyInfo.copyExtent.width, 1, 1)
-                                  toTexture:texture->GetMTLTexture()
-                           destinationSlice:0
-                           destinationLevel:mipLevel
-                          destinationOrigin:MTLOriginMake(copyInfo.textureOrigin.x, 0, 0)
-                                    options:blitOption];
                     break;
                 }
-                case wgpu::TextureDimension::e2D: {
-                    const MTLOrigin textureOrigin =
-                        MTLOriginMake(copyInfo.textureOrigin.x, copyInfo.textureOrigin.y, 0);
-                    const MTLSize copyExtent =
-                        MTLSizeMake(copyInfo.copyExtent.width, copyInfo.copyExtent.height, 1);
 
-                    for (uint32_t z = copyInfo.textureOrigin.z;
-                         z < copyInfo.textureOrigin.z + copyInfo.copyExtent.depthOrArrayLayers;
-                         ++z) {
-                        [commandContext->EnsureBlit() copyFromBuffer:mtlBuffer
-                                                        sourceOffset:bufferOffset
-                                                   sourceBytesPerRow:copyInfo.bytesPerRow
-                                                 sourceBytesPerImage:copyInfo.bytesPerImage
-                                                          sourceSize:copyExtent
-                                                           toTexture:texture->GetMTLTexture()
-                                                    destinationSlice:z
-                                                    destinationLevel:mipLevel
-                                                   destinationOrigin:textureOrigin
-                                                             options:blitOption];
-                        bufferOffset += copyInfo.bytesPerImage;
+                case BindingInfoType::Sampler: {
+                    auto sampler = ToBackend(group->GetBindingAsSampler(bindingIndex));
+                    if (hasVertStage) {
+                        [render setVertexSamplerState:sampler->GetMTLSamplerState()
+                                              atIndex:vertIndex];
+                    }
+                    if (hasFragStage) {
+                        [render setFragmentSamplerState:sampler->GetMTLSamplerState()
+                                                atIndex:fragIndex];
+                    }
+                    if (hasComputeStage) {
+                        [compute setSamplerState:sampler->GetMTLSamplerState()
+                                         atIndex:computeIndex];
                     }
                     break;
                 }
-                case wgpu::TextureDimension::e3D: {
-                    [commandContext->EnsureBlit()
-                             copyFromBuffer:mtlBuffer
-                               sourceOffset:bufferOffset
-                          sourceBytesPerRow:copyInfo.bytesPerRow
-                        sourceBytesPerImage:copyInfo.bytesPerImage
-                                 sourceSize:MTLSizeMake(copyInfo.copyExtent.width,
-                                                        copyInfo.copyExtent.height,
-                                                        copyInfo.copyExtent.depthOrArrayLayers)
-                                  toTexture:texture->GetMTLTexture()
-                           destinationSlice:0
-                           destinationLevel:mipLevel
-                          destinationOrigin:MTLOriginMake(copyInfo.textureOrigin.x,
-                                                          copyInfo.textureOrigin.y,
-                                                          copyInfo.textureOrigin.z)
-                                    options:blitOption];
+
+                case BindingInfoType::Texture:
+                case BindingInfoType::StorageTexture: {
+                    auto textureView = ToBackend(group->GetBindingAsTextureView(bindingIndex));
+                    if (hasVertStage) {
+                        [render setVertexTexture:textureView->GetMTLTexture() atIndex:vertIndex];
+                    }
+                    if (hasFragStage) {
+                        [render setFragmentTexture:textureView->GetMTLTexture() atIndex:fragIndex];
+                    }
+                    if (hasComputeStage) {
+                        [compute setTexture:textureView->GetMTLTexture() atIndex:computeIndex];
+                    }
                     break;
                 }
+
+                case BindingInfoType::ExternalTexture:
+                    UNREACHABLE();
             }
         }
     }
 
-    // static
-    Ref<CommandBuffer> CommandBuffer::Create(CommandEncoder* encoder,
-                                             const CommandBufferDescriptor* descriptor) {
-        return AcquireRef(new CommandBuffer(encoder, descriptor));
+    template <typename... Args>
+    void ApplyBindGroup(id<MTLRenderCommandEncoder> encoder, Args&&... args) {
+        ApplyBindGroupImpl(encoder, nullptr, std::forward<Args&&>(args)...);
     }
 
-    MaybeError CommandBuffer::FillCommands(CommandRecordingContext* commandContext) {
-        size_t nextComputePassNumber = 0;
-        size_t nextRenderPassNumber = 0;
+    template <typename... Args>
+    void ApplyBindGroup(id<MTLComputeCommandEncoder> encoder, Args&&... args) {
+        ApplyBindGroupImpl(nullptr, encoder, std::forward<Args&&>(args)...);
+    }
 
-        auto LazyClearSyncScope = [](const SyncScopeResourceUsage& scope,
-                                     CommandRecordingContext* commandContext) {
-            for (size_t i = 0; i < scope.textures.size(); ++i) {
-                Texture* texture = ToBackend(scope.textures[i]);
+    StorageBufferLengthTracker* mLengthTracker;
+};
 
-                // Clear subresources that are not render attachments. Render attachments will be
-                // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
-                // subresource has not been initialized before the render pass.
-                scope.textureUsages[i].Iterate(
-                    [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
-                        if (usage & ~wgpu::TextureUsage::RenderAttachment) {
-                            texture->EnsureSubresourceContentInitialized(commandContext, range);
-                        }
-                    });
+// Keeps track of the dirty vertex buffer values so they can be lazily applied when we know
+// all the relevant state.
+class VertexBufferTracker {
+  public:
+    explicit VertexBufferTracker(StorageBufferLengthTracker* lengthTracker)
+        : mLengthTracker(lengthTracker) {}
+
+    void OnSetVertexBuffer(VertexBufferSlot slot, Buffer* buffer, uint64_t offset) {
+        mVertexBuffers[slot] = buffer->GetMTLBuffer();
+        mVertexBufferOffsets[slot] = offset;
+
+        ASSERT(buffer->GetSize() < std::numeric_limits<uint32_t>::max());
+        mVertexBufferBindingSizes[slot] =
+            static_cast<uint32_t>(buffer->GetAllocatedSize() - offset);
+        mDirtyVertexBuffers.set(slot);
+    }
+
+    void OnSetPipeline(RenderPipeline* lastPipeline, RenderPipeline* pipeline) {
+        // When a new pipeline is bound we must set all the vertex buffers again because
+        // they might have been offset by the pipeline layout, and they might be packed
+        // differently from the previous pipeline.
+        mDirtyVertexBuffers |= pipeline->GetVertexBufferSlotsUsed();
+    }
+
+    void Apply(id<MTLRenderCommandEncoder> encoder,
+               RenderPipeline* pipeline,
+               bool enableVertexPulling) {
+        const auto& vertexBuffersToApply =
+            mDirtyVertexBuffers & pipeline->GetVertexBufferSlotsUsed();
+
+        for (VertexBufferSlot slot : IterateBitSet(vertexBuffersToApply)) {
+            uint32_t metalIndex = pipeline->GetMtlVertexBufferIndex(slot);
+
+            if (enableVertexPulling) {
+                // Insert lengths for vertex buffers bound as storage buffers
+                mLengthTracker->data[SingleShaderStage::Vertex][metalIndex] =
+                    mVertexBufferBindingSizes[slot];
+                mLengthTracker->dirtyStages |= wgpu::ShaderStage::Vertex;
             }
-            for (BufferBase* bufferBase : scope.buffers) {
-                ToBackend(bufferBase)->EnsureDataInitialized(commandContext);
+
+            [encoder setVertexBuffers:&mVertexBuffers[slot]
+                              offsets:&mVertexBufferOffsets[slot]
+                            withRange:NSMakeRange(metalIndex, 1)];
+        }
+
+        mDirtyVertexBuffers.reset();
+    }
+
+  private:
+    // All the indices in these arrays are Dawn vertex buffer indices
+    ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mDirtyVertexBuffers;
+    ityp::array<VertexBufferSlot, id<MTLBuffer>, kMaxVertexBuffers> mVertexBuffers;
+    ityp::array<VertexBufferSlot, NSUInteger, kMaxVertexBuffers> mVertexBufferOffsets;
+    ityp::array<VertexBufferSlot, uint32_t, kMaxVertexBuffers> mVertexBufferBindingSizes;
+
+    StorageBufferLengthTracker* mLengthTracker;
+};
+
+}  // anonymous namespace
+
+void RecordCopyBufferToTexture(CommandRecordingContext* commandContext,
+                               id<MTLBuffer> mtlBuffer,
+                               uint64_t bufferSize,
+                               uint64_t offset,
+                               uint32_t bytesPerRow,
+                               uint32_t rowsPerImage,
+                               Texture* texture,
+                               uint32_t mipLevel,
+                               const Origin3D& origin,
+                               Aspect aspect,
+                               const Extent3D& copySize) {
+    TextureBufferCopySplit splitCopies = ComputeTextureBufferCopySplit(
+        texture, mipLevel, origin, copySize, bufferSize, offset, bytesPerRow, rowsPerImage, aspect);
+
+    MTLBlitOption blitOption = ComputeMTLBlitOption(texture->GetFormat(), aspect);
+
+    for (const auto& copyInfo : splitCopies) {
+        uint64_t bufferOffset = copyInfo.bufferOffset;
+        switch (texture->GetDimension()) {
+            case wgpu::TextureDimension::e1D: {
+                [commandContext->EnsureBlit()
+                         copyFromBuffer:mtlBuffer
+                           sourceOffset:bufferOffset
+                      sourceBytesPerRow:copyInfo.bytesPerRow
+                    sourceBytesPerImage:copyInfo.bytesPerImage
+                             sourceSize:MTLSizeMake(copyInfo.copyExtent.width, 1, 1)
+                              toTexture:texture->GetMTLTexture()
+                       destinationSlice:0
+                       destinationLevel:mipLevel
+                      destinationOrigin:MTLOriginMake(copyInfo.textureOrigin.x, 0, 0)
+                                options:blitOption];
+                break;
             }
-        };
+            case wgpu::TextureDimension::e2D: {
+                const MTLOrigin textureOrigin =
+                    MTLOriginMake(copyInfo.textureOrigin.x, copyInfo.textureOrigin.y, 0);
+                const MTLSize copyExtent =
+                    MTLSizeMake(copyInfo.copyExtent.width, copyInfo.copyExtent.height, 1);
 
-        Command type;
-        while (mCommands.NextCommandId(&type)) {
-            switch (type) {
-                case Command::BeginComputePass: {
-                    mCommands.NextCommand<BeginComputePassCmd>();
-
-                    for (const SyncScopeResourceUsage& scope :
-                         GetResourceUsages().computePasses[nextComputePassNumber].dispatchUsages) {
-                        LazyClearSyncScope(scope, commandContext);
-                    }
-                    commandContext->EndBlit();
-
-                    DAWN_TRY(EncodeComputePass(commandContext));
-
-                    nextComputePassNumber++;
-                    break;
-                }
-
-                case Command::BeginRenderPass: {
-                    BeginRenderPassCmd* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
-
-                    LazyClearSyncScope(GetResourceUsages().renderPasses[nextRenderPassNumber],
-                                       commandContext);
-                    commandContext->EndBlit();
-
-                    LazyClearRenderPassAttachments(cmd);
-                    NSRef<MTLRenderPassDescriptor> descriptor = CreateMTLRenderPassDescriptor(cmd);
-                    DAWN_TRY(EncodeMetalRenderPass(
-                        ToBackend(GetDevice()), commandContext, descriptor.Get(), cmd->width,
-                        cmd->height, [this](id<MTLRenderCommandEncoder> encoder) -> MaybeError {
-                            return this->EncodeRenderPass(encoder);
-                        }));
-
-                    nextRenderPassNumber++;
-                    break;
-                }
-
-                case Command::CopyBufferToBuffer: {
-                    CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
-                    if (copy->size == 0) {
-                        // Skip no-op copies.
-                        break;
-                    }
-
-                    ToBackend(copy->source)->EnsureDataInitialized(commandContext);
-                    ToBackend(copy->destination)
-                        ->EnsureDataInitializedAsDestination(commandContext,
-                                                             copy->destinationOffset, copy->size);
-
-                    [commandContext->EnsureBlit()
-                           copyFromBuffer:ToBackend(copy->source)->GetMTLBuffer()
-                             sourceOffset:copy->sourceOffset
-                                 toBuffer:ToBackend(copy->destination)->GetMTLBuffer()
-                        destinationOffset:copy->destinationOffset
-                                     size:copy->size];
-                    break;
-                }
-
-                case Command::CopyBufferToTexture: {
-                    CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
-                    if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
-                        copy->copySize.depthOrArrayLayers == 0) {
-                        // Skip no-op copies.
-                        continue;
-                    }
-                    auto& src = copy->source;
-                    auto& dst = copy->destination;
-                    auto& copySize = copy->copySize;
-                    Buffer* buffer = ToBackend(src.buffer.Get());
-                    Texture* texture = ToBackend(dst.texture.Get());
-
-                    buffer->EnsureDataInitialized(commandContext);
-                    EnsureDestinationTextureInitialized(commandContext, texture, dst, copySize);
-
-                    RecordCopyBufferToTexture(commandContext, buffer->GetMTLBuffer(),
-                                              buffer->GetSize(), src.offset, src.bytesPerRow,
-                                              src.rowsPerImage, texture, dst.mipLevel, dst.origin,
-                                              dst.aspect, copySize);
-                    break;
-                }
-
-                case Command::CopyTextureToBuffer: {
-                    CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
-                    if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
-                        copy->copySize.depthOrArrayLayers == 0) {
-                        // Skip no-op copies.
-                        continue;
-                    }
-                    auto& src = copy->source;
-                    auto& dst = copy->destination;
-                    auto& copySize = copy->copySize;
-                    Texture* texture = ToBackend(src.texture.Get());
-                    Buffer* buffer = ToBackend(dst.buffer.Get());
-
-                    buffer->EnsureDataInitializedAsDestination(commandContext, copy);
-
-                    texture->EnsureSubresourceContentInitialized(
-                        commandContext, GetSubresourcesAffectedByCopy(src, copySize));
-
-                    TextureBufferCopySplit splitCopies = ComputeTextureBufferCopySplit(
-                        texture, src.mipLevel, src.origin, copySize, buffer->GetSize(), dst.offset,
-                        dst.bytesPerRow, dst.rowsPerImage, src.aspect);
-
-                    for (const auto& copyInfo : splitCopies) {
-                        MTLBlitOption blitOption =
-                            ComputeMTLBlitOption(texture->GetFormat(), src.aspect);
-                        uint64_t bufferOffset = copyInfo.bufferOffset;
-
-                        switch (texture->GetDimension()) {
-                            case wgpu::TextureDimension::e1D: {
-                                [commandContext->EnsureBlit()
-                                             copyFromTexture:texture->GetMTLTexture()
-                                                 sourceSlice:0
-                                                 sourceLevel:src.mipLevel
-                                                sourceOrigin:MTLOriginMake(copyInfo.textureOrigin.x,
-                                                                           0, 0)
-                                                  sourceSize:MTLSizeMake(copyInfo.copyExtent.width,
-                                                                         1, 1)
-                                                    toBuffer:buffer->GetMTLBuffer()
-                                           destinationOffset:bufferOffset
-                                      destinationBytesPerRow:copyInfo.bytesPerRow
-                                    destinationBytesPerImage:copyInfo.bytesPerImage
-                                                     options:blitOption];
-                                break;
-                            }
-
-                            case wgpu::TextureDimension::e2D: {
-                                const MTLOrigin textureOrigin = MTLOriginMake(
-                                    copyInfo.textureOrigin.x, copyInfo.textureOrigin.y, 0);
-                                const MTLSize copyExtent = MTLSizeMake(
-                                    copyInfo.copyExtent.width, copyInfo.copyExtent.height, 1);
-
-                                for (uint32_t z = copyInfo.textureOrigin.z;
-                                     z < copyInfo.textureOrigin.z +
-                                             copyInfo.copyExtent.depthOrArrayLayers;
-                                     ++z) {
-                                    [commandContext->EnsureBlit()
-                                                 copyFromTexture:texture->GetMTLTexture()
-                                                     sourceSlice:z
-                                                     sourceLevel:src.mipLevel
-                                                    sourceOrigin:textureOrigin
+                for (uint32_t z = copyInfo.textureOrigin.z;
+                     z < copyInfo.textureOrigin.z + copyInfo.copyExtent.depthOrArrayLayers; ++z) {
+                    [commandContext->EnsureBlit() copyFromBuffer:mtlBuffer
+                                                    sourceOffset:bufferOffset
+                                               sourceBytesPerRow:copyInfo.bytesPerRow
+                                             sourceBytesPerImage:copyInfo.bytesPerImage
                                                       sourceSize:copyExtent
-                                                        toBuffer:buffer->GetMTLBuffer()
-                                               destinationOffset:bufferOffset
-                                          destinationBytesPerRow:copyInfo.bytesPerRow
-                                        destinationBytesPerImage:copyInfo.bytesPerImage
+                                                       toTexture:texture->GetMTLTexture()
+                                                destinationSlice:z
+                                                destinationLevel:mipLevel
+                                               destinationOrigin:textureOrigin
                                                          options:blitOption];
-                                    bufferOffset += copyInfo.bytesPerImage;
-                                }
-                                break;
-                            }
-                            case wgpu::TextureDimension::e3D: {
+                    bufferOffset += copyInfo.bytesPerImage;
+                }
+                break;
+            }
+            case wgpu::TextureDimension::e3D: {
+                [commandContext->EnsureBlit()
+                         copyFromBuffer:mtlBuffer
+                           sourceOffset:bufferOffset
+                      sourceBytesPerRow:copyInfo.bytesPerRow
+                    sourceBytesPerImage:copyInfo.bytesPerImage
+                             sourceSize:MTLSizeMake(copyInfo.copyExtent.width,
+                                                    copyInfo.copyExtent.height,
+                                                    copyInfo.copyExtent.depthOrArrayLayers)
+                              toTexture:texture->GetMTLTexture()
+                       destinationSlice:0
+                       destinationLevel:mipLevel
+                      destinationOrigin:MTLOriginMake(copyInfo.textureOrigin.x,
+                                                      copyInfo.textureOrigin.y,
+                                                      copyInfo.textureOrigin.z)
+                                options:blitOption];
+                break;
+            }
+        }
+    }
+}
+
+// static
+Ref<CommandBuffer> CommandBuffer::Create(CommandEncoder* encoder,
+                                         const CommandBufferDescriptor* descriptor) {
+    return AcquireRef(new CommandBuffer(encoder, descriptor));
+}
+
+MaybeError CommandBuffer::FillCommands(CommandRecordingContext* commandContext) {
+    size_t nextComputePassNumber = 0;
+    size_t nextRenderPassNumber = 0;
+
+    auto LazyClearSyncScope = [](const SyncScopeResourceUsage& scope,
+                                 CommandRecordingContext* commandContext) {
+        for (size_t i = 0; i < scope.textures.size(); ++i) {
+            Texture* texture = ToBackend(scope.textures[i]);
+
+            // Clear subresources that are not render attachments. Render attachments will be
+            // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
+            // subresource has not been initialized before the render pass.
+            scope.textureUsages[i].Iterate(
+                [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
+                    if (usage & ~wgpu::TextureUsage::RenderAttachment) {
+                        texture->EnsureSubresourceContentInitialized(commandContext, range);
+                    }
+                });
+        }
+        for (BufferBase* bufferBase : scope.buffers) {
+            ToBackend(bufferBase)->EnsureDataInitialized(commandContext);
+        }
+    };
+
+    Command type;
+    while (mCommands.NextCommandId(&type)) {
+        switch (type) {
+            case Command::BeginComputePass: {
+                mCommands.NextCommand<BeginComputePassCmd>();
+
+                for (const SyncScopeResourceUsage& scope :
+                     GetResourceUsages().computePasses[nextComputePassNumber].dispatchUsages) {
+                    LazyClearSyncScope(scope, commandContext);
+                }
+                commandContext->EndBlit();
+
+                DAWN_TRY(EncodeComputePass(commandContext));
+
+                nextComputePassNumber++;
+                break;
+            }
+
+            case Command::BeginRenderPass: {
+                BeginRenderPassCmd* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
+
+                LazyClearSyncScope(GetResourceUsages().renderPasses[nextRenderPassNumber],
+                                   commandContext);
+                commandContext->EndBlit();
+
+                LazyClearRenderPassAttachments(cmd);
+                NSRef<MTLRenderPassDescriptor> descriptor = CreateMTLRenderPassDescriptor(cmd);
+                DAWN_TRY(EncodeMetalRenderPass(
+                    ToBackend(GetDevice()), commandContext, descriptor.Get(), cmd->width,
+                    cmd->height, [this](id<MTLRenderCommandEncoder> encoder) -> MaybeError {
+                        return this->EncodeRenderPass(encoder);
+                    }));
+
+                nextRenderPassNumber++;
+                break;
+            }
+
+            case Command::CopyBufferToBuffer: {
+                CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
+                if (copy->size == 0) {
+                    // Skip no-op copies.
+                    break;
+                }
+
+                ToBackend(copy->source)->EnsureDataInitialized(commandContext);
+                ToBackend(copy->destination)
+                    ->EnsureDataInitializedAsDestination(commandContext, copy->destinationOffset,
+                                                         copy->size);
+
+                [commandContext->EnsureBlit()
+                       copyFromBuffer:ToBackend(copy->source)->GetMTLBuffer()
+                         sourceOffset:copy->sourceOffset
+                             toBuffer:ToBackend(copy->destination)->GetMTLBuffer()
+                    destinationOffset:copy->destinationOffset
+                                 size:copy->size];
+                break;
+            }
+
+            case Command::CopyBufferToTexture: {
+                CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
+                if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+                    copy->copySize.depthOrArrayLayers == 0) {
+                    // Skip no-op copies.
+                    continue;
+                }
+                auto& src = copy->source;
+                auto& dst = copy->destination;
+                auto& copySize = copy->copySize;
+                Buffer* buffer = ToBackend(src.buffer.Get());
+                Texture* texture = ToBackend(dst.texture.Get());
+
+                buffer->EnsureDataInitialized(commandContext);
+                EnsureDestinationTextureInitialized(commandContext, texture, dst, copySize);
+
+                RecordCopyBufferToTexture(commandContext, buffer->GetMTLBuffer(), buffer->GetSize(),
+                                          src.offset, src.bytesPerRow, src.rowsPerImage, texture,
+                                          dst.mipLevel, dst.origin, dst.aspect, copySize);
+                break;
+            }
+
+            case Command::CopyTextureToBuffer: {
+                CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
+                if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+                    copy->copySize.depthOrArrayLayers == 0) {
+                    // Skip no-op copies.
+                    continue;
+                }
+                auto& src = copy->source;
+                auto& dst = copy->destination;
+                auto& copySize = copy->copySize;
+                Texture* texture = ToBackend(src.texture.Get());
+                Buffer* buffer = ToBackend(dst.buffer.Get());
+
+                buffer->EnsureDataInitializedAsDestination(commandContext, copy);
+
+                texture->EnsureSubresourceContentInitialized(
+                    commandContext, GetSubresourcesAffectedByCopy(src, copySize));
+
+                TextureBufferCopySplit splitCopies = ComputeTextureBufferCopySplit(
+                    texture, src.mipLevel, src.origin, copySize, buffer->GetSize(), dst.offset,
+                    dst.bytesPerRow, dst.rowsPerImage, src.aspect);
+
+                for (const auto& copyInfo : splitCopies) {
+                    MTLBlitOption blitOption =
+                        ComputeMTLBlitOption(texture->GetFormat(), src.aspect);
+                    uint64_t bufferOffset = copyInfo.bufferOffset;
+
+                    switch (texture->GetDimension()) {
+                        case wgpu::TextureDimension::e1D: {
+                            [commandContext->EnsureBlit()
+                                         copyFromTexture:texture->GetMTLTexture()
+                                             sourceSlice:0
+                                             sourceLevel:src.mipLevel
+                                            sourceOrigin:MTLOriginMake(copyInfo.textureOrigin.x, 0,
+                                                                       0)
+                                              sourceSize:MTLSizeMake(copyInfo.copyExtent.width, 1,
+                                                                     1)
+                                                toBuffer:buffer->GetMTLBuffer()
+                                       destinationOffset:bufferOffset
+                                  destinationBytesPerRow:copyInfo.bytesPerRow
+                                destinationBytesPerImage:copyInfo.bytesPerImage
+                                                 options:blitOption];
+                            break;
+                        }
+
+                        case wgpu::TextureDimension::e2D: {
+                            const MTLOrigin textureOrigin = MTLOriginMake(
+                                copyInfo.textureOrigin.x, copyInfo.textureOrigin.y, 0);
+                            const MTLSize copyExtent = MTLSizeMake(copyInfo.copyExtent.width,
+                                                                   copyInfo.copyExtent.height, 1);
+
+                            for (uint32_t z = copyInfo.textureOrigin.z;
+                                 z <
+                                 copyInfo.textureOrigin.z + copyInfo.copyExtent.depthOrArrayLayers;
+                                 ++z) {
                                 [commandContext->EnsureBlit()
                                              copyFromTexture:texture->GetMTLTexture()
-                                                 sourceSlice:0
+                                                 sourceSlice:z
                                                  sourceLevel:src.mipLevel
-                                                sourceOrigin:MTLOriginMake(copyInfo.textureOrigin.x,
-                                                                           copyInfo.textureOrigin.y,
-                                                                           copyInfo.textureOrigin.z)
-                                                  sourceSize:MTLSizeMake(copyInfo.copyExtent.width,
-                                                                         copyInfo.copyExtent.height,
-                                                                         copyInfo.copyExtent
-                                                                             .depthOrArrayLayers)
+                                                sourceOrigin:textureOrigin
+                                                  sourceSize:copyExtent
                                                     toBuffer:buffer->GetMTLBuffer()
                                            destinationOffset:bufferOffset
                                       destinationBytesPerRow:copyInfo.bytesPerRow
                                     destinationBytesPerImage:copyInfo.bytesPerImage
                                                      options:blitOption];
-                                break;
+                                bufferOffset += copyInfo.bytesPerImage;
                             }
+                            break;
                         }
-                    }
-                    break;
-                }
-
-                case Command::CopyTextureToTexture: {
-                    CopyTextureToTextureCmd* copy =
-                        mCommands.NextCommand<CopyTextureToTextureCmd>();
-                    if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
-                        copy->copySize.depthOrArrayLayers == 0) {
-                        // Skip no-op copies.
-                        continue;
-                    }
-                    Texture* srcTexture = ToBackend(copy->source.texture.Get());
-                    Texture* dstTexture = ToBackend(copy->destination.texture.Get());
-
-                    srcTexture->EnsureSubresourceContentInitialized(
-                        commandContext,
-                        GetSubresourcesAffectedByCopy(copy->source, copy->copySize));
-                    EnsureDestinationTextureInitialized(commandContext, dstTexture,
-                                                        copy->destination, copy->copySize);
-
-                    const MTLSize sizeOneSlice =
-                        MTLSizeMake(copy->copySize.width, copy->copySize.height, 1);
-
-                    uint32_t sourceLayer = 0;
-                    uint32_t sourceOriginZ = 0;
-
-                    uint32_t destinationLayer = 0;
-                    uint32_t destinationOriginZ = 0;
-
-                    uint32_t* sourceZPtr;
-                    if (srcTexture->GetDimension() == wgpu::TextureDimension::e2D) {
-                        sourceZPtr = &sourceLayer;
-                    } else {
-                        sourceZPtr = &sourceOriginZ;
-                    }
-
-                    uint32_t* destinationZPtr;
-                    if (dstTexture->GetDimension() == wgpu::TextureDimension::e2D) {
-                        destinationZPtr = &destinationLayer;
-                    } else {
-                        destinationZPtr = &destinationOriginZ;
-                    }
-
-                    // TODO(crbug.com/dawn/782): Do a single T2T copy if both are 1D or 3D.
-                    for (uint32_t z = 0; z < copy->copySize.depthOrArrayLayers; ++z) {
-                        *sourceZPtr = copy->source.origin.z + z;
-                        *destinationZPtr = copy->destination.origin.z + z;
-
-                        // Hold the ref until out of scope
-                        NSPRef<id<MTLTexture>> dstTextureView =
-                            dstTexture->CreateFormatView(srcTexture->GetFormat().format);
-
-                        [commandContext->EnsureBlit()
-                              copyFromTexture:srcTexture->GetMTLTexture()
-                                  sourceSlice:sourceLayer
-                                  sourceLevel:copy->source.mipLevel
-                                 sourceOrigin:MTLOriginMake(copy->source.origin.x,
-                                                            copy->source.origin.y, sourceOriginZ)
-                                   sourceSize:sizeOneSlice
-                                    toTexture:dstTextureView.Get()
-                             destinationSlice:destinationLayer
-                             destinationLevel:copy->destination.mipLevel
-                            destinationOrigin:MTLOriginMake(copy->destination.origin.x,
-                                                            copy->destination.origin.y,
-                                                            destinationOriginZ)];
-                    }
-                    break;
-                }
-
-                case Command::ClearBuffer: {
-                    ClearBufferCmd* cmd = mCommands.NextCommand<ClearBufferCmd>();
-                    if (cmd->size == 0) {
-                        // Skip no-op copies.
-                        break;
-                    }
-                    Buffer* dstBuffer = ToBackend(cmd->buffer.Get());
-
-                    bool clearedToZero = dstBuffer->EnsureDataInitializedAsDestination(
-                        commandContext, cmd->offset, cmd->size);
-
-                    if (!clearedToZero) {
-                        [commandContext->EnsureBlit() fillBuffer:dstBuffer->GetMTLBuffer()
-                                                           range:NSMakeRange(cmd->offset, cmd->size)
-                                                           value:0u];
-                    }
-
-                    break;
-                }
-
-                case Command::ResolveQuerySet: {
-                    ResolveQuerySetCmd* cmd = mCommands.NextCommand<ResolveQuerySetCmd>();
-                    QuerySet* querySet = ToBackend(cmd->querySet.Get());
-                    Buffer* destination = ToBackend(cmd->destination.Get());
-
-                    destination->EnsureDataInitializedAsDestination(
-                        commandContext, cmd->destinationOffset, cmd->queryCount * sizeof(uint64_t));
-
-                    if (querySet->GetQueryType() == wgpu::QueryType::Occlusion) {
-                        [commandContext->EnsureBlit()
-                               copyFromBuffer:querySet->GetVisibilityBuffer()
-                                 sourceOffset:NSUInteger(cmd->firstQuery * sizeof(uint64_t))
-                                     toBuffer:destination->GetMTLBuffer()
-                            destinationOffset:NSUInteger(cmd->destinationOffset)
-                                         size:NSUInteger(cmd->queryCount * sizeof(uint64_t))];
-                    } else {
-                        if (@available(macos 10.15, iOS 14.0, *)) {
+                        case wgpu::TextureDimension::e3D: {
                             [commandContext->EnsureBlit()
-                                  resolveCounters:querySet->GetCounterSampleBuffer()
-                                          inRange:NSMakeRange(cmd->firstQuery, cmd->queryCount)
-                                destinationBuffer:destination->GetMTLBuffer()
-                                destinationOffset:NSUInteger(cmd->destinationOffset)];
-                        } else {
-                            UNREACHABLE();
+                                         copyFromTexture:texture->GetMTLTexture()
+                                             sourceSlice:0
+                                             sourceLevel:src.mipLevel
+                                            sourceOrigin:MTLOriginMake(copyInfo.textureOrigin.x,
+                                                                       copyInfo.textureOrigin.y,
+                                                                       copyInfo.textureOrigin.z)
+                                              sourceSize:MTLSizeMake(
+                                                             copyInfo.copyExtent.width,
+                                                             copyInfo.copyExtent.height,
+                                                             copyInfo.copyExtent.depthOrArrayLayers)
+                                                toBuffer:buffer->GetMTLBuffer()
+                                       destinationOffset:bufferOffset
+                                  destinationBytesPerRow:copyInfo.bytesPerRow
+                                destinationBytesPerImage:copyInfo.bytesPerImage
+                                                 options:blitOption];
+                            break;
                         }
                     }
-                    break;
+                }
+                break;
+            }
+
+            case Command::CopyTextureToTexture: {
+                CopyTextureToTextureCmd* copy = mCommands.NextCommand<CopyTextureToTextureCmd>();
+                if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+                    copy->copySize.depthOrArrayLayers == 0) {
+                    // Skip no-op copies.
+                    continue;
+                }
+                Texture* srcTexture = ToBackend(copy->source.texture.Get());
+                Texture* dstTexture = ToBackend(copy->destination.texture.Get());
+
+                srcTexture->EnsureSubresourceContentInitialized(
+                    commandContext, GetSubresourcesAffectedByCopy(copy->source, copy->copySize));
+                EnsureDestinationTextureInitialized(commandContext, dstTexture, copy->destination,
+                                                    copy->copySize);
+
+                const MTLSize sizeOneSlice =
+                    MTLSizeMake(copy->copySize.width, copy->copySize.height, 1);
+
+                uint32_t sourceLayer = 0;
+                uint32_t sourceOriginZ = 0;
+
+                uint32_t destinationLayer = 0;
+                uint32_t destinationOriginZ = 0;
+
+                uint32_t* sourceZPtr;
+                if (srcTexture->GetDimension() == wgpu::TextureDimension::e2D) {
+                    sourceZPtr = &sourceLayer;
+                } else {
+                    sourceZPtr = &sourceOriginZ;
                 }
 
-                case Command::WriteTimestamp: {
-                    WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
-                    QuerySet* querySet = ToBackend(cmd->querySet.Get());
-
-                    if (@available(macos 10.15, iOS 14.0, *)) {
-                        [commandContext->EnsureBlit()
-                            sampleCountersInBuffer:querySet->GetCounterSampleBuffer()
-                                     atSampleIndex:NSUInteger(cmd->queryIndex)
-                                       withBarrier:YES];
-                    } else {
-                        UNREACHABLE();
-                    }
-                    break;
+                uint32_t* destinationZPtr;
+                if (dstTexture->GetDimension() == wgpu::TextureDimension::e2D) {
+                    destinationZPtr = &destinationLayer;
+                } else {
+                    destinationZPtr = &destinationOriginZ;
                 }
 
-                case Command::InsertDebugMarker: {
-                    // MTLCommandBuffer does not implement insertDebugSignpost
-                    SkipCommand(&mCommands, type);
-                    break;
-                }
+                // TODO(crbug.com/dawn/782): Do a single T2T copy if both are 1D or 3D.
+                for (uint32_t z = 0; z < copy->copySize.depthOrArrayLayers; ++z) {
+                    *sourceZPtr = copy->source.origin.z + z;
+                    *destinationZPtr = copy->destination.origin.z + z;
 
-                case Command::PopDebugGroup: {
-                    mCommands.NextCommand<PopDebugGroupCmd>();
-
-                    if (@available(macos 10.13, *)) {
-                        [commandContext->GetCommands() popDebugGroup];
-                    }
-                    break;
-                }
-
-                case Command::PushDebugGroup: {
-                    PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
-                    char* label = mCommands.NextData<char>(cmd->length + 1);
-
-                    if (@available(macos 10.13, *)) {
-                        NSRef<NSString> mtlLabel =
-                            AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
-                        [commandContext->GetCommands() pushDebugGroup:mtlLabel.Get()];
-                    }
-
-                    break;
-                }
-
-                case Command::WriteBuffer: {
-                    WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
-                    const uint64_t offset = write->offset;
-                    const uint64_t size = write->size;
-                    if (size == 0) {
-                        continue;
-                    }
-
-                    Buffer* dstBuffer = ToBackend(write->buffer.Get());
-                    uint8_t* data = mCommands.NextData<uint8_t>(size);
-                    Device* device = ToBackend(GetDevice());
-
-                    UploadHandle uploadHandle;
-                    DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
-                                                      size, device->GetPendingCommandSerial(),
-                                                      kCopyBufferToBufferOffsetAlignment));
-                    ASSERT(uploadHandle.mappedBuffer != nullptr);
-                    memcpy(uploadHandle.mappedBuffer, data, size);
-
-                    dstBuffer->EnsureDataInitializedAsDestination(commandContext, offset, size);
+                    // Hold the ref until out of scope
+                    NSPRef<id<MTLTexture>> dstTextureView =
+                        dstTexture->CreateFormatView(srcTexture->GetFormat().format);
 
                     [commandContext->EnsureBlit()
-                           copyFromBuffer:ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle()
-                             sourceOffset:uploadHandle.startOffset
-                                 toBuffer:dstBuffer->GetMTLBuffer()
-                        destinationOffset:offset
-                                     size:size];
-                    break;
+                          copyFromTexture:srcTexture->GetMTLTexture()
+                              sourceSlice:sourceLayer
+                              sourceLevel:copy->source.mipLevel
+                             sourceOrigin:MTLOriginMake(copy->source.origin.x,
+                                                        copy->source.origin.y, sourceOriginZ)
+                               sourceSize:sizeOneSlice
+                                toTexture:dstTextureView.Get()
+                         destinationSlice:destinationLayer
+                         destinationLevel:copy->destination.mipLevel
+                        destinationOrigin:MTLOriginMake(copy->destination.origin.x,
+                                                        copy->destination.origin.y,
+                                                        destinationOriginZ)];
                 }
-
-                default:
-                    UNREACHABLE();
+                break;
             }
-        }
 
-        commandContext->EndBlit();
-        return {};
-    }
-
-    MaybeError CommandBuffer::EncodeComputePass(CommandRecordingContext* commandContext) {
-        ComputePipeline* lastPipeline = nullptr;
-        StorageBufferLengthTracker storageBufferLengths = {};
-        BindGroupTracker bindGroups(&storageBufferLengths);
-
-        id<MTLComputeCommandEncoder> encoder = commandContext->BeginCompute();
-
-        Command type;
-        while (mCommands.NextCommandId(&type)) {
-            switch (type) {
-                case Command::EndComputePass: {
-                    mCommands.NextCommand<EndComputePassCmd>();
-                    commandContext->EndCompute();
-                    return {};
-                }
-
-                case Command::Dispatch: {
-                    DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
-
-                    // Skip noop dispatches, it can causes issues on some systems.
-                    if (dispatch->x == 0 || dispatch->y == 0 || dispatch->z == 0) {
-                        break;
-                    }
-
-                    bindGroups.Apply(encoder);
-                    storageBufferLengths.Apply(encoder, lastPipeline);
-
-                    [encoder dispatchThreadgroups:MTLSizeMake(dispatch->x, dispatch->y, dispatch->z)
-                            threadsPerThreadgroup:lastPipeline->GetLocalWorkGroupSize()];
+            case Command::ClearBuffer: {
+                ClearBufferCmd* cmd = mCommands.NextCommand<ClearBufferCmd>();
+                if (cmd->size == 0) {
+                    // Skip no-op copies.
                     break;
                 }
+                Buffer* dstBuffer = ToBackend(cmd->buffer.Get());
 
-                case Command::DispatchIndirect: {
-                    DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
+                bool clearedToZero = dstBuffer->EnsureDataInitializedAsDestination(
+                    commandContext, cmd->offset, cmd->size);
 
-                    bindGroups.Apply(encoder);
-                    storageBufferLengths.Apply(encoder, lastPipeline);
-
-                    Buffer* buffer = ToBackend(dispatch->indirectBuffer.Get());
-                    id<MTLBuffer> indirectBuffer = buffer->GetMTLBuffer();
-                    [encoder dispatchThreadgroupsWithIndirectBuffer:indirectBuffer
-                                               indirectBufferOffset:dispatch->indirectOffset
-                                              threadsPerThreadgroup:lastPipeline
-                                                                        ->GetLocalWorkGroupSize()];
-                    break;
+                if (!clearedToZero) {
+                    [commandContext->EnsureBlit() fillBuffer:dstBuffer->GetMTLBuffer()
+                                                       range:NSMakeRange(cmd->offset, cmd->size)
+                                                       value:0u];
                 }
 
-                case Command::SetComputePipeline: {
-                    SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
-                    lastPipeline = ToBackend(cmd->pipeline).Get();
+                break;
+            }
 
-                    bindGroups.OnSetPipeline(lastPipeline);
+            case Command::ResolveQuerySet: {
+                ResolveQuerySetCmd* cmd = mCommands.NextCommand<ResolveQuerySetCmd>();
+                QuerySet* querySet = ToBackend(cmd->querySet.Get());
+                Buffer* destination = ToBackend(cmd->destination.Get());
 
-                    lastPipeline->Encode(encoder);
-                    break;
-                }
+                destination->EnsureDataInitializedAsDestination(
+                    commandContext, cmd->destinationOffset, cmd->queryCount * sizeof(uint64_t));
 
-                case Command::SetBindGroup: {
-                    SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
-                    uint32_t* dynamicOffsets = nullptr;
-                    if (cmd->dynamicOffsetCount > 0) {
-                        dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
-                    }
-
-                    bindGroups.OnSetBindGroup(cmd->index, ToBackend(cmd->group.Get()),
-                                              cmd->dynamicOffsetCount, dynamicOffsets);
-                    break;
-                }
-
-                case Command::InsertDebugMarker: {
-                    InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
-                    char* label = mCommands.NextData<char>(cmd->length + 1);
-                    NSRef<NSString> mtlLabel =
-                        AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
-                    [encoder insertDebugSignpost:mtlLabel.Get()];
-                    break;
-                }
-
-                case Command::PopDebugGroup: {
-                    mCommands.NextCommand<PopDebugGroupCmd>();
-
-                    [encoder popDebugGroup];
-                    break;
-                }
-
-                case Command::PushDebugGroup: {
-                    PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
-                    char* label = mCommands.NextData<char>(cmd->length + 1);
-                    NSRef<NSString> mtlLabel =
-                        AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
-                    [encoder pushDebugGroup:mtlLabel.Get()];
-                    break;
-                }
-
-                case Command::WriteTimestamp: {
-                    WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
-                    QuerySet* querySet = ToBackend(cmd->querySet.Get());
-
+                if (querySet->GetQueryType() == wgpu::QueryType::Occlusion) {
+                    [commandContext->EnsureBlit()
+                           copyFromBuffer:querySet->GetVisibilityBuffer()
+                             sourceOffset:NSUInteger(cmd->firstQuery * sizeof(uint64_t))
+                                 toBuffer:destination->GetMTLBuffer()
+                        destinationOffset:NSUInteger(cmd->destinationOffset)
+                                     size:NSUInteger(cmd->queryCount * sizeof(uint64_t))];
+                } else {
                     if (@available(macos 10.15, iOS 14.0, *)) {
-                        [encoder sampleCountersInBuffer:querySet->GetCounterSampleBuffer()
-                                          atSampleIndex:NSUInteger(cmd->queryIndex)
-                                            withBarrier:YES];
+                        [commandContext->EnsureBlit()
+                              resolveCounters:querySet->GetCounterSampleBuffer()
+                                      inRange:NSMakeRange(cmd->firstQuery, cmd->queryCount)
+                            destinationBuffer:destination->GetMTLBuffer()
+                            destinationOffset:NSUInteger(cmd->destinationOffset)];
                     } else {
                         UNREACHABLE();
                     }
-                    break;
                 }
-
-                default: {
-                    UNREACHABLE();
-                    break;
-                }
+                break;
             }
-        }
 
-        // EndComputePass should have been called
-        UNREACHABLE();
+            case Command::WriteTimestamp: {
+                WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+                QuerySet* querySet = ToBackend(cmd->querySet.Get());
+
+                if (@available(macos 10.15, iOS 14.0, *)) {
+                    [commandContext->EnsureBlit()
+                        sampleCountersInBuffer:querySet->GetCounterSampleBuffer()
+                                 atSampleIndex:NSUInteger(cmd->queryIndex)
+                                   withBarrier:YES];
+                } else {
+                    UNREACHABLE();
+                }
+                break;
+            }
+
+            case Command::InsertDebugMarker: {
+                // MTLCommandBuffer does not implement insertDebugSignpost
+                SkipCommand(&mCommands, type);
+                break;
+            }
+
+            case Command::PopDebugGroup: {
+                mCommands.NextCommand<PopDebugGroupCmd>();
+
+                if (@available(macos 10.13, *)) {
+                    [commandContext->GetCommands() popDebugGroup];
+                }
+                break;
+            }
+
+            case Command::PushDebugGroup: {
+                PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
+                char* label = mCommands.NextData<char>(cmd->length + 1);
+
+                if (@available(macos 10.13, *)) {
+                    NSRef<NSString> mtlLabel =
+                        AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
+                    [commandContext->GetCommands() pushDebugGroup:mtlLabel.Get()];
+                }
+
+                break;
+            }
+
+            case Command::WriteBuffer: {
+                WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
+                const uint64_t offset = write->offset;
+                const uint64_t size = write->size;
+                if (size == 0) {
+                    continue;
+                }
+
+                Buffer* dstBuffer = ToBackend(write->buffer.Get());
+                uint8_t* data = mCommands.NextData<uint8_t>(size);
+                Device* device = ToBackend(GetDevice());
+
+                UploadHandle uploadHandle;
+                DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
+                                                  size, device->GetPendingCommandSerial(),
+                                                  kCopyBufferToBufferOffsetAlignment));
+                ASSERT(uploadHandle.mappedBuffer != nullptr);
+                memcpy(uploadHandle.mappedBuffer, data, size);
+
+                dstBuffer->EnsureDataInitializedAsDestination(commandContext, offset, size);
+
+                [commandContext->EnsureBlit()
+                       copyFromBuffer:ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle()
+                         sourceOffset:uploadHandle.startOffset
+                             toBuffer:dstBuffer->GetMTLBuffer()
+                    destinationOffset:offset
+                                 size:size];
+                break;
+            }
+
+            default:
+                UNREACHABLE();
+        }
     }
 
-    MaybeError CommandBuffer::EncodeRenderPass(id<MTLRenderCommandEncoder> encoder) {
-        bool enableVertexPulling = GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling);
-        RenderPipeline* lastPipeline = nullptr;
-        id<MTLBuffer> indexBuffer = nullptr;
-        uint32_t indexBufferBaseOffset = 0;
-        MTLIndexType indexBufferType;
-        uint64_t indexFormatSize = 0;
+    commandContext->EndBlit();
+    return {};
+}
 
-        StorageBufferLengthTracker storageBufferLengths = {};
-        VertexBufferTracker vertexBuffers(&storageBufferLengths);
-        BindGroupTracker bindGroups(&storageBufferLengths);
+MaybeError CommandBuffer::EncodeComputePass(CommandRecordingContext* commandContext) {
+    ComputePipeline* lastPipeline = nullptr;
+    StorageBufferLengthTracker storageBufferLengths = {};
+    BindGroupTracker bindGroups(&storageBufferLengths);
 
-        auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) {
-            switch (type) {
-                case Command::Draw: {
-                    DrawCmd* draw = iter->NextCommand<DrawCmd>();
+    id<MTLComputeCommandEncoder> encoder = commandContext->BeginCompute();
 
-                    vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
-                    bindGroups.Apply(encoder);
-                    storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
-
-                    // The instance count must be non-zero, otherwise no-op
-                    if (draw->instanceCount != 0) {
-                        // MTLFeatureSet_iOS_GPUFamily3_v1 does not support baseInstance
-                        if (draw->firstInstance == 0) {
-                            [encoder drawPrimitives:lastPipeline->GetMTLPrimitiveTopology()
-                                        vertexStart:draw->firstVertex
-                                        vertexCount:draw->vertexCount
-                                      instanceCount:draw->instanceCount];
-                        } else {
-                            [encoder drawPrimitives:lastPipeline->GetMTLPrimitiveTopology()
-                                        vertexStart:draw->firstVertex
-                                        vertexCount:draw->vertexCount
-                                      instanceCount:draw->instanceCount
-                                       baseInstance:draw->firstInstance];
-                        }
-                    }
-                    break;
-                }
-
-                case Command::DrawIndexed: {
-                    DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
-
-                    vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
-                    bindGroups.Apply(encoder);
-                    storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
-
-                    // The index and instance count must be non-zero, otherwise no-op
-                    if (draw->indexCount != 0 && draw->instanceCount != 0) {
-                        // MTLFeatureSet_iOS_GPUFamily3_v1 does not support baseInstance and
-                        // baseVertex.
-                        if (draw->baseVertex == 0 && draw->firstInstance == 0) {
-                            [encoder drawIndexedPrimitives:lastPipeline->GetMTLPrimitiveTopology()
-                                                indexCount:draw->indexCount
-                                                 indexType:indexBufferType
-                                               indexBuffer:indexBuffer
-                                         indexBufferOffset:indexBufferBaseOffset +
-                                                           draw->firstIndex * indexFormatSize
-                                             instanceCount:draw->instanceCount];
-                        } else {
-                            [encoder drawIndexedPrimitives:lastPipeline->GetMTLPrimitiveTopology()
-                                                indexCount:draw->indexCount
-                                                 indexType:indexBufferType
-                                               indexBuffer:indexBuffer
-                                         indexBufferOffset:indexBufferBaseOffset +
-                                                           draw->firstIndex * indexFormatSize
-                                             instanceCount:draw->instanceCount
-                                                baseVertex:draw->baseVertex
-                                              baseInstance:draw->firstInstance];
-                        }
-                    }
-                    break;
-                }
-
-                case Command::DrawIndirect: {
-                    DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
-
-                    vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
-                    bindGroups.Apply(encoder);
-                    storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
-
-                    Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
-                    id<MTLBuffer> indirectBuffer = buffer->GetMTLBuffer();
-                    [encoder drawPrimitives:lastPipeline->GetMTLPrimitiveTopology()
-                              indirectBuffer:indirectBuffer
-                        indirectBufferOffset:draw->indirectOffset];
-                    break;
-                }
-
-                case Command::DrawIndexedIndirect: {
-                    DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
-
-                    vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
-                    bindGroups.Apply(encoder);
-                    storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
-
-                    Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
-                    ASSERT(buffer != nullptr);
-
-                    id<MTLBuffer> indirectBuffer = buffer->GetMTLBuffer();
-                    [encoder drawIndexedPrimitives:lastPipeline->GetMTLPrimitiveTopology()
-                                         indexType:indexBufferType
-                                       indexBuffer:indexBuffer
-                                 indexBufferOffset:indexBufferBaseOffset
-                                    indirectBuffer:indirectBuffer
-                              indirectBufferOffset:draw->indirectOffset];
-                    break;
-                }
-
-                case Command::InsertDebugMarker: {
-                    InsertDebugMarkerCmd* cmd = iter->NextCommand<InsertDebugMarkerCmd>();
-                    char* label = iter->NextData<char>(cmd->length + 1);
-                    NSRef<NSString> mtlLabel =
-                        AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
-                    [encoder insertDebugSignpost:mtlLabel.Get()];
-                    break;
-                }
-
-                case Command::PopDebugGroup: {
-                    iter->NextCommand<PopDebugGroupCmd>();
-
-                    [encoder popDebugGroup];
-                    break;
-                }
-
-                case Command::PushDebugGroup: {
-                    PushDebugGroupCmd* cmd = iter->NextCommand<PushDebugGroupCmd>();
-                    char* label = iter->NextData<char>(cmd->length + 1);
-                    NSRef<NSString> mtlLabel =
-                        AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
-                    [encoder pushDebugGroup:mtlLabel.Get()];
-                    break;
-                }
-
-                case Command::SetRenderPipeline: {
-                    SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
-                    RenderPipeline* newPipeline = ToBackend(cmd->pipeline).Get();
-
-                    vertexBuffers.OnSetPipeline(lastPipeline, newPipeline);
-                    bindGroups.OnSetPipeline(newPipeline);
-
-                    [encoder setDepthStencilState:newPipeline->GetMTLDepthStencilState()];
-                    [encoder setFrontFacingWinding:newPipeline->GetMTLFrontFace()];
-                    [encoder setCullMode:newPipeline->GetMTLCullMode()];
-                    [encoder setDepthBias:newPipeline->GetDepthBias()
-                               slopeScale:newPipeline->GetDepthBiasSlopeScale()
-                                    clamp:newPipeline->GetDepthBiasClamp()];
-                    if (@available(macOS 10.11, iOS 11.0, *)) {
-                        MTLDepthClipMode clipMode = newPipeline->ShouldClampDepth()
-                                                        ? MTLDepthClipModeClamp
-                                                        : MTLDepthClipModeClip;
-                        [encoder setDepthClipMode:clipMode];
-                    }
-                    newPipeline->Encode(encoder);
-
-                    lastPipeline = newPipeline;
-                    break;
-                }
-
-                case Command::SetBindGroup: {
-                    SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
-                    uint32_t* dynamicOffsets = nullptr;
-                    if (cmd->dynamicOffsetCount > 0) {
-                        dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
-                    }
-
-                    bindGroups.OnSetBindGroup(cmd->index, ToBackend(cmd->group.Get()),
-                                              cmd->dynamicOffsetCount, dynamicOffsets);
-                    break;
-                }
-
-                case Command::SetIndexBuffer: {
-                    SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
-                    auto b = ToBackend(cmd->buffer.Get());
-                    indexBuffer = b->GetMTLBuffer();
-                    indexBufferBaseOffset = cmd->offset;
-                    indexBufferType = MTLIndexFormat(cmd->format);
-                    indexFormatSize = IndexFormatSize(cmd->format);
-                    break;
-                }
-
-                case Command::SetVertexBuffer: {
-                    SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
-
-                    vertexBuffers.OnSetVertexBuffer(cmd->slot, ToBackend(cmd->buffer.Get()),
-                                                    cmd->offset);
-                    break;
-                }
-
-                default:
-                    UNREACHABLE();
-                    break;
+    Command type;
+    while (mCommands.NextCommandId(&type)) {
+        switch (type) {
+            case Command::EndComputePass: {
+                mCommands.NextCommand<EndComputePassCmd>();
+                commandContext->EndCompute();
+                return {};
             }
-        };
 
-        Command type;
-        while (mCommands.NextCommandId(&type)) {
-            switch (type) {
-                case Command::EndRenderPass: {
-                    mCommands.NextCommand<EndRenderPassCmd>();
-                    return {};
-                }
+            case Command::Dispatch: {
+                DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
 
-                case Command::SetStencilReference: {
-                    SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
-                    [encoder setStencilReferenceValue:cmd->reference];
+                // Skip noop dispatches, it can causes issues on some systems.
+                if (dispatch->x == 0 || dispatch->y == 0 || dispatch->z == 0) {
                     break;
                 }
 
-                case Command::SetViewport: {
-                    SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
-                    MTLViewport viewport;
-                    viewport.originX = cmd->x;
-                    viewport.originY = cmd->y;
-                    viewport.width = cmd->width;
-                    viewport.height = cmd->height;
-                    viewport.znear = cmd->minDepth;
-                    viewport.zfar = cmd->maxDepth;
+                bindGroups.Apply(encoder);
+                storageBufferLengths.Apply(encoder, lastPipeline);
 
-                    [encoder setViewport:viewport];
-                    break;
+                [encoder dispatchThreadgroups:MTLSizeMake(dispatch->x, dispatch->y, dispatch->z)
+                        threadsPerThreadgroup:lastPipeline->GetLocalWorkGroupSize()];
+                break;
+            }
+
+            case Command::DispatchIndirect: {
+                DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
+
+                bindGroups.Apply(encoder);
+                storageBufferLengths.Apply(encoder, lastPipeline);
+
+                Buffer* buffer = ToBackend(dispatch->indirectBuffer.Get());
+                id<MTLBuffer> indirectBuffer = buffer->GetMTLBuffer();
+                [encoder
+                    dispatchThreadgroupsWithIndirectBuffer:indirectBuffer
+                                      indirectBufferOffset:dispatch->indirectOffset
+                                     threadsPerThreadgroup:lastPipeline->GetLocalWorkGroupSize()];
+                break;
+            }
+
+            case Command::SetComputePipeline: {
+                SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
+                lastPipeline = ToBackend(cmd->pipeline).Get();
+
+                bindGroups.OnSetPipeline(lastPipeline);
+
+                lastPipeline->Encode(encoder);
+                break;
+            }
+
+            case Command::SetBindGroup: {
+                SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
+                uint32_t* dynamicOffsets = nullptr;
+                if (cmd->dynamicOffsetCount > 0) {
+                    dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
                 }
 
-                case Command::SetScissorRect: {
-                    SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
-                    MTLScissorRect rect;
-                    rect.x = cmd->x;
-                    rect.y = cmd->y;
-                    rect.width = cmd->width;
-                    rect.height = cmd->height;
+                bindGroups.OnSetBindGroup(cmd->index, ToBackend(cmd->group.Get()),
+                                          cmd->dynamicOffsetCount, dynamicOffsets);
+                break;
+            }
 
-                    [encoder setScissorRect:rect];
-                    break;
+            case Command::InsertDebugMarker: {
+                InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
+                char* label = mCommands.NextData<char>(cmd->length + 1);
+                NSRef<NSString> mtlLabel =
+                    AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
+                [encoder insertDebugSignpost:mtlLabel.Get()];
+                break;
+            }
+
+            case Command::PopDebugGroup: {
+                mCommands.NextCommand<PopDebugGroupCmd>();
+
+                [encoder popDebugGroup];
+                break;
+            }
+
+            case Command::PushDebugGroup: {
+                PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
+                char* label = mCommands.NextData<char>(cmd->length + 1);
+                NSRef<NSString> mtlLabel =
+                    AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
+                [encoder pushDebugGroup:mtlLabel.Get()];
+                break;
+            }
+
+            case Command::WriteTimestamp: {
+                WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+                QuerySet* querySet = ToBackend(cmd->querySet.Get());
+
+                if (@available(macos 10.15, iOS 14.0, *)) {
+                    [encoder sampleCountersInBuffer:querySet->GetCounterSampleBuffer()
+                                      atSampleIndex:NSUInteger(cmd->queryIndex)
+                                        withBarrier:YES];
+                } else {
+                    UNREACHABLE();
                 }
+                break;
+            }
 
-                case Command::SetBlendConstant: {
-                    SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
-                    [encoder setBlendColorRed:cmd->color.r
-                                        green:cmd->color.g
-                                         blue:cmd->color.b
-                                        alpha:cmd->color.a];
-                    break;
-                }
+            default: {
+                UNREACHABLE();
+                break;
+            }
+        }
+    }
 
-                case Command::ExecuteBundles: {
-                    ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
-                    auto bundles = mCommands.NextData<Ref<RenderBundleBase>>(cmd->count);
+    // EndComputePass should have been called
+    UNREACHABLE();
+}
 
-                    for (uint32_t i = 0; i < cmd->count; ++i) {
-                        CommandIterator* iter = bundles[i]->GetCommands();
-                        iter->Reset();
-                        while (iter->NextCommandId(&type)) {
-                            EncodeRenderBundleCommand(iter, type);
-                        }
-                    }
-                    break;
-                }
+MaybeError CommandBuffer::EncodeRenderPass(id<MTLRenderCommandEncoder> encoder) {
+    bool enableVertexPulling = GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling);
+    RenderPipeline* lastPipeline = nullptr;
+    id<MTLBuffer> indexBuffer = nullptr;
+    uint32_t indexBufferBaseOffset = 0;
+    MTLIndexType indexBufferType;
+    uint64_t indexFormatSize = 0;
 
-                case Command::BeginOcclusionQuery: {
-                    BeginOcclusionQueryCmd* cmd = mCommands.NextCommand<BeginOcclusionQueryCmd>();
+    StorageBufferLengthTracker storageBufferLengths = {};
+    VertexBufferTracker vertexBuffers(&storageBufferLengths);
+    BindGroupTracker bindGroups(&storageBufferLengths);
 
-                    [encoder setVisibilityResultMode:MTLVisibilityResultModeBoolean
-                                              offset:cmd->queryIndex * sizeof(uint64_t)];
-                    break;
-                }
+    auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) {
+        switch (type) {
+            case Command::Draw: {
+                DrawCmd* draw = iter->NextCommand<DrawCmd>();
 
-                case Command::EndOcclusionQuery: {
-                    EndOcclusionQueryCmd* cmd = mCommands.NextCommand<EndOcclusionQueryCmd>();
+                vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
+                bindGroups.Apply(encoder);
+                storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
 
-                    [encoder setVisibilityResultMode:MTLVisibilityResultModeDisabled
-                                              offset:cmd->queryIndex * sizeof(uint64_t)];
-                    break;
-                }
-
-                case Command::WriteTimestamp: {
-                    WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
-                    QuerySet* querySet = ToBackend(cmd->querySet.Get());
-
-                    if (@available(macos 10.15, iOS 14.0, *)) {
-                        [encoder sampleCountersInBuffer:querySet->GetCounterSampleBuffer()
-                                          atSampleIndex:NSUInteger(cmd->queryIndex)
-                                            withBarrier:YES];
+                // The instance count must be non-zero, otherwise no-op
+                if (draw->instanceCount != 0) {
+                    // MTLFeatureSet_iOS_GPUFamily3_v1 does not support baseInstance
+                    if (draw->firstInstance == 0) {
+                        [encoder drawPrimitives:lastPipeline->GetMTLPrimitiveTopology()
+                                    vertexStart:draw->firstVertex
+                                    vertexCount:draw->vertexCount
+                                  instanceCount:draw->instanceCount];
                     } else {
-                        UNREACHABLE();
+                        [encoder drawPrimitives:lastPipeline->GetMTLPrimitiveTopology()
+                                    vertexStart:draw->firstVertex
+                                    vertexCount:draw->vertexCount
+                                  instanceCount:draw->instanceCount
+                                   baseInstance:draw->firstInstance];
                     }
-                    break;
+                }
+                break;
+            }
+
+            case Command::DrawIndexed: {
+                DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
+
+                vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
+                bindGroups.Apply(encoder);
+                storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
+
+                // The index and instance count must be non-zero, otherwise no-op
+                if (draw->indexCount != 0 && draw->instanceCount != 0) {
+                    // MTLFeatureSet_iOS_GPUFamily3_v1 does not support baseInstance and
+                    // baseVertex.
+                    if (draw->baseVertex == 0 && draw->firstInstance == 0) {
+                        [encoder drawIndexedPrimitives:lastPipeline->GetMTLPrimitiveTopology()
+                                            indexCount:draw->indexCount
+                                             indexType:indexBufferType
+                                           indexBuffer:indexBuffer
+                                     indexBufferOffset:indexBufferBaseOffset +
+                                                       draw->firstIndex * indexFormatSize
+                                         instanceCount:draw->instanceCount];
+                    } else {
+                        [encoder drawIndexedPrimitives:lastPipeline->GetMTLPrimitiveTopology()
+                                            indexCount:draw->indexCount
+                                             indexType:indexBufferType
+                                           indexBuffer:indexBuffer
+                                     indexBufferOffset:indexBufferBaseOffset +
+                                                       draw->firstIndex * indexFormatSize
+                                         instanceCount:draw->instanceCount
+                                            baseVertex:draw->baseVertex
+                                          baseInstance:draw->firstInstance];
+                    }
+                }
+                break;
+            }
+
+            case Command::DrawIndirect: {
+                DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
+
+                vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
+                bindGroups.Apply(encoder);
+                storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
+
+                Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
+                id<MTLBuffer> indirectBuffer = buffer->GetMTLBuffer();
+                [encoder drawPrimitives:lastPipeline->GetMTLPrimitiveTopology()
+                          indirectBuffer:indirectBuffer
+                    indirectBufferOffset:draw->indirectOffset];
+                break;
+            }
+
+            case Command::DrawIndexedIndirect: {
+                DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
+
+                vertexBuffers.Apply(encoder, lastPipeline, enableVertexPulling);
+                bindGroups.Apply(encoder);
+                storageBufferLengths.Apply(encoder, lastPipeline, enableVertexPulling);
+
+                Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
+                ASSERT(buffer != nullptr);
+
+                id<MTLBuffer> indirectBuffer = buffer->GetMTLBuffer();
+                [encoder drawIndexedPrimitives:lastPipeline->GetMTLPrimitiveTopology()
+                                     indexType:indexBufferType
+                                   indexBuffer:indexBuffer
+                             indexBufferOffset:indexBufferBaseOffset
+                                indirectBuffer:indirectBuffer
+                          indirectBufferOffset:draw->indirectOffset];
+                break;
+            }
+
+            case Command::InsertDebugMarker: {
+                InsertDebugMarkerCmd* cmd = iter->NextCommand<InsertDebugMarkerCmd>();
+                char* label = iter->NextData<char>(cmd->length + 1);
+                NSRef<NSString> mtlLabel =
+                    AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
+                [encoder insertDebugSignpost:mtlLabel.Get()];
+                break;
+            }
+
+            case Command::PopDebugGroup: {
+                iter->NextCommand<PopDebugGroupCmd>();
+
+                [encoder popDebugGroup];
+                break;
+            }
+
+            case Command::PushDebugGroup: {
+                PushDebugGroupCmd* cmd = iter->NextCommand<PushDebugGroupCmd>();
+                char* label = iter->NextData<char>(cmd->length + 1);
+                NSRef<NSString> mtlLabel =
+                    AcquireNSRef([[NSString alloc] initWithUTF8String:label]);
+                [encoder pushDebugGroup:mtlLabel.Get()];
+                break;
+            }
+
+            case Command::SetRenderPipeline: {
+                SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
+                RenderPipeline* newPipeline = ToBackend(cmd->pipeline).Get();
+
+                vertexBuffers.OnSetPipeline(lastPipeline, newPipeline);
+                bindGroups.OnSetPipeline(newPipeline);
+
+                [encoder setDepthStencilState:newPipeline->GetMTLDepthStencilState()];
+                [encoder setFrontFacingWinding:newPipeline->GetMTLFrontFace()];
+                [encoder setCullMode:newPipeline->GetMTLCullMode()];
+                [encoder setDepthBias:newPipeline->GetDepthBias()
+                           slopeScale:newPipeline->GetDepthBiasSlopeScale()
+                                clamp:newPipeline->GetDepthBiasClamp()];
+                if (@available(macOS 10.11, iOS 11.0, *)) {
+                    MTLDepthClipMode clipMode = newPipeline->ShouldClampDepth()
+                                                    ? MTLDepthClipModeClamp
+                                                    : MTLDepthClipModeClip;
+                    [encoder setDepthClipMode:clipMode];
+                }
+                newPipeline->Encode(encoder);
+
+                lastPipeline = newPipeline;
+                break;
+            }
+
+            case Command::SetBindGroup: {
+                SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
+                uint32_t* dynamicOffsets = nullptr;
+                if (cmd->dynamicOffsetCount > 0) {
+                    dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
                 }
 
-                default: {
-                    EncodeRenderBundleCommand(&mCommands, type);
-                    break;
+                bindGroups.OnSetBindGroup(cmd->index, ToBackend(cmd->group.Get()),
+                                          cmd->dynamicOffsetCount, dynamicOffsets);
+                break;
+            }
+
+            case Command::SetIndexBuffer: {
+                SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
+                auto b = ToBackend(cmd->buffer.Get());
+                indexBuffer = b->GetMTLBuffer();
+                indexBufferBaseOffset = cmd->offset;
+                indexBufferType = MTLIndexFormat(cmd->format);
+                indexFormatSize = IndexFormatSize(cmd->format);
+                break;
+            }
+
+            case Command::SetVertexBuffer: {
+                SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
+
+                vertexBuffers.OnSetVertexBuffer(cmd->slot, ToBackend(cmd->buffer.Get()),
+                                                cmd->offset);
+                break;
+            }
+
+            default:
+                UNREACHABLE();
+                break;
+        }
+    };
+
+    Command type;
+    while (mCommands.NextCommandId(&type)) {
+        switch (type) {
+            case Command::EndRenderPass: {
+                mCommands.NextCommand<EndRenderPassCmd>();
+                return {};
+            }
+
+            case Command::SetStencilReference: {
+                SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
+                [encoder setStencilReferenceValue:cmd->reference];
+                break;
+            }
+
+            case Command::SetViewport: {
+                SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
+                MTLViewport viewport;
+                viewport.originX = cmd->x;
+                viewport.originY = cmd->y;
+                viewport.width = cmd->width;
+                viewport.height = cmd->height;
+                viewport.znear = cmd->minDepth;
+                viewport.zfar = cmd->maxDepth;
+
+                [encoder setViewport:viewport];
+                break;
+            }
+
+            case Command::SetScissorRect: {
+                SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
+                MTLScissorRect rect;
+                rect.x = cmd->x;
+                rect.y = cmd->y;
+                rect.width = cmd->width;
+                rect.height = cmd->height;
+
+                [encoder setScissorRect:rect];
+                break;
+            }
+
+            case Command::SetBlendConstant: {
+                SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
+                [encoder setBlendColorRed:cmd->color.r
+                                    green:cmd->color.g
+                                     blue:cmd->color.b
+                                    alpha:cmd->color.a];
+                break;
+            }
+
+            case Command::ExecuteBundles: {
+                ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
+                auto bundles = mCommands.NextData<Ref<RenderBundleBase>>(cmd->count);
+
+                for (uint32_t i = 0; i < cmd->count; ++i) {
+                    CommandIterator* iter = bundles[i]->GetCommands();
+                    iter->Reset();
+                    while (iter->NextCommandId(&type)) {
+                        EncodeRenderBundleCommand(iter, type);
+                    }
                 }
+                break;
+            }
+
+            case Command::BeginOcclusionQuery: {
+                BeginOcclusionQueryCmd* cmd = mCommands.NextCommand<BeginOcclusionQueryCmd>();
+
+                [encoder setVisibilityResultMode:MTLVisibilityResultModeBoolean
+                                          offset:cmd->queryIndex * sizeof(uint64_t)];
+                break;
+            }
+
+            case Command::EndOcclusionQuery: {
+                EndOcclusionQueryCmd* cmd = mCommands.NextCommand<EndOcclusionQueryCmd>();
+
+                [encoder setVisibilityResultMode:MTLVisibilityResultModeDisabled
+                                          offset:cmd->queryIndex * sizeof(uint64_t)];
+                break;
+            }
+
+            case Command::WriteTimestamp: {
+                WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+                QuerySet* querySet = ToBackend(cmd->querySet.Get());
+
+                if (@available(macos 10.15, iOS 14.0, *)) {
+                    [encoder sampleCountersInBuffer:querySet->GetCounterSampleBuffer()
+                                      atSampleIndex:NSUInteger(cmd->queryIndex)
+                                        withBarrier:YES];
+                } else {
+                    UNREACHABLE();
+                }
+                break;
+            }
+
+            default: {
+                EncodeRenderBundleCommand(&mCommands, type);
+                break;
             }
         }
-
-        // EndRenderPass should have been called
-        UNREACHABLE();
     }
 
+    // EndRenderPass should have been called
+    UNREACHABLE();
+}
+
 }  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/CommandRecordingContext.h b/src/dawn/native/metal/CommandRecordingContext.h
index 925d8fa..b5ec3de 100644
--- a/src/dawn/native/metal/CommandRecordingContext.h
+++ b/src/dawn/native/metal/CommandRecordingContext.h
@@ -22,37 +22,37 @@
 
 namespace dawn::native::metal {
 
-    // This class wraps a MTLCommandBuffer and tracks which Metal encoder is open.
-    // Only one encoder may be open at a time.
-    class CommandRecordingContext : NonMovable {
-      public:
-        CommandRecordingContext();
-        ~CommandRecordingContext();
+// This class wraps a MTLCommandBuffer and tracks which Metal encoder is open.
+// Only one encoder may be open at a time.
+class CommandRecordingContext : NonMovable {
+  public:
+    CommandRecordingContext();
+    ~CommandRecordingContext();
 
-        id<MTLCommandBuffer> GetCommands();
-        void MarkUsed();
-        bool WasUsed() const;
+    id<MTLCommandBuffer> GetCommands();
+    void MarkUsed();
+    bool WasUsed() const;
 
-        MaybeError PrepareNextCommandBuffer(id<MTLCommandQueue> queue);
-        NSPRef<id<MTLCommandBuffer>> AcquireCommands();
+    MaybeError PrepareNextCommandBuffer(id<MTLCommandQueue> queue);
+    NSPRef<id<MTLCommandBuffer>> AcquireCommands();
 
-        id<MTLBlitCommandEncoder> EnsureBlit();
-        void EndBlit();
+    id<MTLBlitCommandEncoder> EnsureBlit();
+    void EndBlit();
 
-        id<MTLComputeCommandEncoder> BeginCompute();
-        void EndCompute();
+    id<MTLComputeCommandEncoder> BeginCompute();
+    void EndCompute();
 
-        id<MTLRenderCommandEncoder> BeginRender(MTLRenderPassDescriptor* descriptor);
-        void EndRender();
+    id<MTLRenderCommandEncoder> BeginRender(MTLRenderPassDescriptor* descriptor);
+    void EndRender();
 
-      private:
-        NSPRef<id<MTLCommandBuffer>> mCommands;
-        NSPRef<id<MTLBlitCommandEncoder>> mBlit;
-        NSPRef<id<MTLComputeCommandEncoder>> mCompute;
-        NSPRef<id<MTLRenderCommandEncoder>> mRender;
-        bool mInEncoder = false;
-        bool mUsed = false;
-    };
+  private:
+    NSPRef<id<MTLCommandBuffer>> mCommands;
+    NSPRef<id<MTLBlitCommandEncoder>> mBlit;
+    NSPRef<id<MTLComputeCommandEncoder>> mCompute;
+    NSPRef<id<MTLRenderCommandEncoder>> mRender;
+    bool mInEncoder = false;
+    bool mUsed = false;
+};
 
 }  // namespace dawn::native::metal
 
diff --git a/src/dawn/native/metal/CommandRecordingContext.mm b/src/dawn/native/metal/CommandRecordingContext.mm
index cced9a7..d4bbef0 100644
--- a/src/dawn/native/metal/CommandRecordingContext.mm
+++ b/src/dawn/native/metal/CommandRecordingContext.mm
@@ -18,115 +18,115 @@
 
 namespace dawn::native::metal {
 
-    CommandRecordingContext::CommandRecordingContext() = default;
+CommandRecordingContext::CommandRecordingContext() = default;
 
-    CommandRecordingContext::~CommandRecordingContext() {
-        // Commands must be acquired.
-        ASSERT(mCommands == nullptr);
+CommandRecordingContext::~CommandRecordingContext() {
+    // Commands must be acquired.
+    ASSERT(mCommands == nullptr);
+}
+
+id<MTLCommandBuffer> CommandRecordingContext::GetCommands() {
+    return mCommands.Get();
+}
+
+void CommandRecordingContext::MarkUsed() {
+    mUsed = true;
+}
+bool CommandRecordingContext::WasUsed() const {
+    return mUsed;
+}
+
+MaybeError CommandRecordingContext::PrepareNextCommandBuffer(id<MTLCommandQueue> queue) {
+    ASSERT(mCommands == nil);
+    ASSERT(!mUsed);
+
+    // The MTLCommandBuffer will be autoreleased by default.
+    // The autorelease pool may drain before the command buffer is submitted. Retain so it stays
+    // alive.
+    mCommands = AcquireNSPRef([[queue commandBuffer] retain]);
+    if (mCommands == nil) {
+        return DAWN_INTERNAL_ERROR("Failed to allocate an MTLCommandBuffer");
     }
 
-    id<MTLCommandBuffer> CommandRecordingContext::GetCommands() {
-        return mCommands.Get();
+    return {};
+}
+
+NSPRef<id<MTLCommandBuffer>> CommandRecordingContext::AcquireCommands() {
+    // A blit encoder can be left open from WriteBuffer, make sure we close it.
+    if (mCommands != nullptr) {
+        EndBlit();
     }
 
-    void CommandRecordingContext::MarkUsed() {
-        mUsed = true;
-    }
-    bool CommandRecordingContext::WasUsed() const {
-        return mUsed;
-    }
+    ASSERT(!mInEncoder);
+    mUsed = false;
+    return std::move(mCommands);
+}
 
-    MaybeError CommandRecordingContext::PrepareNextCommandBuffer(id<MTLCommandQueue> queue) {
-        ASSERT(mCommands == nil);
-        ASSERT(!mUsed);
+id<MTLBlitCommandEncoder> CommandRecordingContext::EnsureBlit() {
+    ASSERT(mCommands != nullptr);
 
-        // The MTLCommandBuffer will be autoreleased by default.
-        // The autorelease pool may drain before the command buffer is submitted. Retain so it stays
-        // alive.
-        mCommands = AcquireNSPRef([[queue commandBuffer] retain]);
-        if (mCommands == nil) {
-            return DAWN_INTERNAL_ERROR("Failed to allocate an MTLCommandBuffer");
-        }
-
-        return {};
-    }
-
-    NSPRef<id<MTLCommandBuffer>> CommandRecordingContext::AcquireCommands() {
-        // A blit encoder can be left open from WriteBuffer, make sure we close it.
-        if (mCommands != nullptr) {
-            EndBlit();
-        }
-
+    if (mBlit == nullptr) {
         ASSERT(!mInEncoder);
-        mUsed = false;
-        return std::move(mCommands);
-    }
-
-    id<MTLBlitCommandEncoder> CommandRecordingContext::EnsureBlit() {
-        ASSERT(mCommands != nullptr);
-
-        if (mBlit == nullptr) {
-            ASSERT(!mInEncoder);
-            mInEncoder = true;
-
-            // The encoder is created autoreleased. Retain it to avoid the autoreleasepool from
-            // draining from under us.
-            mBlit.Acquire([[*mCommands blitCommandEncoder] retain]);
-        }
-        return mBlit.Get();
-    }
-
-    void CommandRecordingContext::EndBlit() {
-        ASSERT(mCommands != nullptr);
-
-        if (mBlit != nullptr) {
-            [*mBlit endEncoding];
-            mBlit = nullptr;
-            mInEncoder = false;
-        }
-    }
-
-    id<MTLComputeCommandEncoder> CommandRecordingContext::BeginCompute() {
-        ASSERT(mCommands != nullptr);
-        ASSERT(mCompute == nullptr);
-        ASSERT(!mInEncoder);
-
         mInEncoder = true;
+
         // The encoder is created autoreleased. Retain it to avoid the autoreleasepool from
         // draining from under us.
-        mCompute.Acquire([[*mCommands computeCommandEncoder] retain]);
-        return mCompute.Get();
+        mBlit.Acquire([[*mCommands blitCommandEncoder] retain]);
     }
+    return mBlit.Get();
+}
 
-    void CommandRecordingContext::EndCompute() {
-        ASSERT(mCommands != nullptr);
-        ASSERT(mCompute != nullptr);
+void CommandRecordingContext::EndBlit() {
+    ASSERT(mCommands != nullptr);
 
-        [*mCompute endEncoding];
-        mCompute = nullptr;
+    if (mBlit != nullptr) {
+        [*mBlit endEncoding];
+        mBlit = nullptr;
         mInEncoder = false;
     }
+}
 
-    id<MTLRenderCommandEncoder> CommandRecordingContext::BeginRender(
-        MTLRenderPassDescriptor* descriptor) {
-        ASSERT(mCommands != nullptr);
-        ASSERT(mRender == nullptr);
-        ASSERT(!mInEncoder);
+id<MTLComputeCommandEncoder> CommandRecordingContext::BeginCompute() {
+    ASSERT(mCommands != nullptr);
+    ASSERT(mCompute == nullptr);
+    ASSERT(!mInEncoder);
 
-        mInEncoder = true;
-        // The encoder is created autoreleased. Retain it to avoid the autoreleasepool from
-        // draining from under us.
-        mRender.Acquire([[*mCommands renderCommandEncoderWithDescriptor:descriptor] retain]);
-        return mRender.Get();
-    }
+    mInEncoder = true;
+    // The encoder is created autoreleased. Retain it to avoid the autoreleasepool from
+    // draining from under us.
+    mCompute.Acquire([[*mCommands computeCommandEncoder] retain]);
+    return mCompute.Get();
+}
 
-    void CommandRecordingContext::EndRender() {
-        ASSERT(mCommands != nullptr);
-        ASSERT(mRender != nullptr);
+void CommandRecordingContext::EndCompute() {
+    ASSERT(mCommands != nullptr);
+    ASSERT(mCompute != nullptr);
 
-        [*mRender endEncoding];
-        mRender = nullptr;
-        mInEncoder = false;
-    }
+    [*mCompute endEncoding];
+    mCompute = nullptr;
+    mInEncoder = false;
+}
+
+id<MTLRenderCommandEncoder> CommandRecordingContext::BeginRender(
+    MTLRenderPassDescriptor* descriptor) {
+    ASSERT(mCommands != nullptr);
+    ASSERT(mRender == nullptr);
+    ASSERT(!mInEncoder);
+
+    mInEncoder = true;
+    // The encoder is created autoreleased. Retain it to avoid the autoreleasepool from
+    // draining from under us.
+    mRender.Acquire([[*mCommands renderCommandEncoderWithDescriptor:descriptor] retain]);
+    return mRender.Get();
+}
+
+void CommandRecordingContext::EndRender() {
+    ASSERT(mCommands != nullptr);
+    ASSERT(mRender != nullptr);
+
+    [*mRender endEncoding];
+    mRender = nullptr;
+    mInEncoder = false;
+}
 
 }  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/ComputePipelineMTL.h b/src/dawn/native/metal/ComputePipelineMTL.h
index d6c9aa3..22ac635 100644
--- a/src/dawn/native/metal/ComputePipelineMTL.h
+++ b/src/dawn/native/metal/ComputePipelineMTL.h
@@ -25,30 +25,29 @@
 
 namespace dawn::native::metal {
 
-    class Device;
+class Device;
 
-    class ComputePipeline final : public ComputePipelineBase {
-      public:
-        static Ref<ComputePipeline> CreateUninitialized(
-            Device* device,
-            const ComputePipelineDescriptor* descriptor);
-        static void InitializeAsync(Ref<ComputePipelineBase> computePipeline,
-                                    WGPUCreateComputePipelineAsyncCallback callback,
-                                    void* userdata);
+class ComputePipeline final : public ComputePipelineBase {
+  public:
+    static Ref<ComputePipeline> CreateUninitialized(Device* device,
+                                                    const ComputePipelineDescriptor* descriptor);
+    static void InitializeAsync(Ref<ComputePipelineBase> computePipeline,
+                                WGPUCreateComputePipelineAsyncCallback callback,
+                                void* userdata);
 
-        void Encode(id<MTLComputeCommandEncoder> encoder);
-        MTLSize GetLocalWorkGroupSize() const;
-        bool RequiresStorageBufferLength() const;
+    void Encode(id<MTLComputeCommandEncoder> encoder);
+    MTLSize GetLocalWorkGroupSize() const;
+    bool RequiresStorageBufferLength() const;
 
-      private:
-        using ComputePipelineBase::ComputePipelineBase;
-        MaybeError Initialize() override;
+  private:
+    using ComputePipelineBase::ComputePipelineBase;
+    MaybeError Initialize() override;
 
-        NSPRef<id<MTLComputePipelineState>> mMtlComputePipelineState;
-        MTLSize mLocalWorkgroupSize;
-        bool mRequiresStorageBufferLength;
-        std::vector<uint32_t> mWorkgroupAllocations;
-    };
+    NSPRef<id<MTLComputePipelineState>> mMtlComputePipelineState;
+    MTLSize mLocalWorkgroupSize;
+    bool mRequiresStorageBufferLength;
+    std::vector<uint32_t> mWorkgroupAllocations;
+};
 
 }  // namespace dawn::native::metal
 
diff --git a/src/dawn/native/metal/ComputePipelineMTL.mm b/src/dawn/native/metal/ComputePipelineMTL.mm
index 71d5a01..f350071 100644
--- a/src/dawn/native/metal/ComputePipelineMTL.mm
+++ b/src/dawn/native/metal/ComputePipelineMTL.mm
@@ -22,68 +22,67 @@
 
 namespace dawn::native::metal {
 
-    // static
-    Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
-        Device* device,
-        const ComputePipelineDescriptor* descriptor) {
-        return AcquireRef(new ComputePipeline(device, descriptor));
+// static
+Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
+    Device* device,
+    const ComputePipelineDescriptor* descriptor) {
+    return AcquireRef(new ComputePipeline(device, descriptor));
+}
+
+MaybeError ComputePipeline::Initialize() {
+    auto mtlDevice = ToBackend(GetDevice())->GetMTLDevice();
+
+    const ProgrammableStage& computeStage = GetStage(SingleShaderStage::Compute);
+    ShaderModule::MetalFunctionData computeData;
+
+    DAWN_TRY(CreateMTLFunction(computeStage, SingleShaderStage::Compute, ToBackend(GetLayout()),
+                               &computeData));
+
+    NSError* error = nullptr;
+    mMtlComputePipelineState.Acquire(
+        [mtlDevice newComputePipelineStateWithFunction:computeData.function.Get() error:&error]);
+    if (error != nullptr) {
+        return DAWN_INTERNAL_ERROR("Error creating pipeline state " +
+                                   std::string([error.localizedDescription UTF8String]));
     }
+    ASSERT(mMtlComputePipelineState != nil);
 
-    MaybeError ComputePipeline::Initialize() {
-        auto mtlDevice = ToBackend(GetDevice())->GetMTLDevice();
+    // Copy over the local workgroup size as it is passed to dispatch explicitly in Metal
+    Origin3D localSize = GetStage(SingleShaderStage::Compute).metadata->localWorkgroupSize;
+    mLocalWorkgroupSize = MTLSizeMake(localSize.x, localSize.y, localSize.z);
 
-        const ProgrammableStage& computeStage = GetStage(SingleShaderStage::Compute);
-        ShaderModule::MetalFunctionData computeData;
+    mRequiresStorageBufferLength = computeData.needsStorageBufferLength;
+    mWorkgroupAllocations = std::move(computeData.workgroupAllocations);
+    return {};
+}
 
-        DAWN_TRY(CreateMTLFunction(computeStage, SingleShaderStage::Compute, ToBackend(GetLayout()),
-                                   &computeData));
-
-        NSError* error = nullptr;
-        mMtlComputePipelineState.Acquire([mtlDevice
-            newComputePipelineStateWithFunction:computeData.function.Get()
-                                          error:&error]);
-        if (error != nullptr) {
-            return DAWN_INTERNAL_ERROR("Error creating pipeline state " +
-                                       std::string([error.localizedDescription UTF8String]));
+void ComputePipeline::Encode(id<MTLComputeCommandEncoder> encoder) {
+    [encoder setComputePipelineState:mMtlComputePipelineState.Get()];
+    for (size_t i = 0; i < mWorkgroupAllocations.size(); ++i) {
+        if (mWorkgroupAllocations[i] == 0) {
+            continue;
         }
-        ASSERT(mMtlComputePipelineState != nil);
-
-        // Copy over the local workgroup size as it is passed to dispatch explicitly in Metal
-        Origin3D localSize = GetStage(SingleShaderStage::Compute).metadata->localWorkgroupSize;
-        mLocalWorkgroupSize = MTLSizeMake(localSize.x, localSize.y, localSize.z);
-
-        mRequiresStorageBufferLength = computeData.needsStorageBufferLength;
-        mWorkgroupAllocations = std::move(computeData.workgroupAllocations);
-        return {};
+        // Size must be a multiple of 16 bytes.
+        uint32_t rounded = Align<uint32_t>(mWorkgroupAllocations[i], 16);
+        [encoder setThreadgroupMemoryLength:rounded atIndex:i];
     }
+}
 
-    void ComputePipeline::Encode(id<MTLComputeCommandEncoder> encoder) {
-        [encoder setComputePipelineState:mMtlComputePipelineState.Get()];
-        for (size_t i = 0; i < mWorkgroupAllocations.size(); ++i) {
-            if (mWorkgroupAllocations[i] == 0) {
-                continue;
-            }
-            // Size must be a multiple of 16 bytes.
-            uint32_t rounded = Align<uint32_t>(mWorkgroupAllocations[i], 16);
-            [encoder setThreadgroupMemoryLength:rounded atIndex:i];
-        }
-    }
+MTLSize ComputePipeline::GetLocalWorkGroupSize() const {
+    return mLocalWorkgroupSize;
+}
 
-    MTLSize ComputePipeline::GetLocalWorkGroupSize() const {
-        return mLocalWorkgroupSize;
-    }
+bool ComputePipeline::RequiresStorageBufferLength() const {
+    return mRequiresStorageBufferLength;
+}
 
-    bool ComputePipeline::RequiresStorageBufferLength() const {
-        return mRequiresStorageBufferLength;
-    }
-
-    void ComputePipeline::InitializeAsync(Ref<ComputePipelineBase> computePipeline,
-                                          WGPUCreateComputePipelineAsyncCallback callback,
-                                          void* userdata) {
-        std::unique_ptr<CreateComputePipelineAsyncTask> asyncTask =
-            std::make_unique<CreateComputePipelineAsyncTask>(std::move(computePipeline), callback,
-                                                             userdata);
-        CreateComputePipelineAsyncTask::RunAsync(std::move(asyncTask));
-    }
+void ComputePipeline::InitializeAsync(Ref<ComputePipelineBase> computePipeline,
+                                      WGPUCreateComputePipelineAsyncCallback callback,
+                                      void* userdata) {
+    std::unique_ptr<CreateComputePipelineAsyncTask> asyncTask =
+        std::make_unique<CreateComputePipelineAsyncTask>(std::move(computePipeline), callback,
+                                                         userdata);
+    CreateComputePipelineAsyncTask::RunAsync(std::move(asyncTask));
+}
 
 }  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/DeviceMTL.h b/src/dawn/native/metal/DeviceMTL.h
index 9794dd5..dcf8454 100644
--- a/src/dawn/native/metal/DeviceMTL.h
+++ b/src/dawn/native/metal/DeviceMTL.h
@@ -32,119 +32,116 @@
 
 namespace dawn::native::metal {
 
-    struct KalmanInfo;
+struct KalmanInfo;
 
-    class Device final : public DeviceBase {
-      public:
-        static ResultOrError<Ref<Device>> Create(AdapterBase* adapter,
-                                                 NSPRef<id<MTLDevice>> mtlDevice,
-                                                 const DeviceDescriptor* descriptor);
-        ~Device() override;
+class Device final : public DeviceBase {
+  public:
+    static ResultOrError<Ref<Device>> Create(AdapterBase* adapter,
+                                             NSPRef<id<MTLDevice>> mtlDevice,
+                                             const DeviceDescriptor* descriptor);
+    ~Device() override;
 
-        MaybeError Initialize(const DeviceDescriptor* descriptor);
+    MaybeError Initialize(const DeviceDescriptor* descriptor);
 
-        MaybeError TickImpl() override;
+    MaybeError TickImpl() override;
 
-        id<MTLDevice> GetMTLDevice();
-        id<MTLCommandQueue> GetMTLQueue();
+    id<MTLDevice> GetMTLDevice();
+    id<MTLCommandQueue> GetMTLQueue();
 
-        CommandRecordingContext* GetPendingCommandContext();
-        MaybeError SubmitPendingCommandBuffer();
+    CommandRecordingContext* GetPendingCommandContext();
+    MaybeError SubmitPendingCommandBuffer();
 
-        Ref<Texture> CreateTextureWrappingIOSurface(const ExternalImageDescriptor* descriptor,
-                                                    IOSurfaceRef ioSurface);
-        void WaitForCommandsToBeScheduled();
+    Ref<Texture> CreateTextureWrappingIOSurface(const ExternalImageDescriptor* descriptor,
+                                                IOSurfaceRef ioSurface);
+    void WaitForCommandsToBeScheduled();
 
-        ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
-        MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
-                                           uint64_t sourceOffset,
-                                           BufferBase* destination,
-                                           uint64_t destinationOffset,
-                                           uint64_t size) override;
-        MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
-                                            const TextureDataLayout& dataLayout,
-                                            TextureCopy* dst,
-                                            const Extent3D& copySizePixels) override;
+    ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
+    MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
+                                       uint64_t sourceOffset,
+                                       BufferBase* destination,
+                                       uint64_t destinationOffset,
+                                       uint64_t size) override;
+    MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
+                                        const TextureDataLayout& dataLayout,
+                                        TextureCopy* dst,
+                                        const Extent3D& copySizePixels) override;
 
-        uint32_t GetOptimalBytesPerRowAlignment() const override;
-        uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
+    uint32_t GetOptimalBytesPerRowAlignment() const override;
+    uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
 
-        float GetTimestampPeriodInNS() const override;
+    float GetTimestampPeriodInNS() const override;
 
-      private:
-        Device(AdapterBase* adapter,
-               NSPRef<id<MTLDevice>> mtlDevice,
-               const DeviceDescriptor* descriptor);
+  private:
+    Device(AdapterBase* adapter,
+           NSPRef<id<MTLDevice>> mtlDevice,
+           const DeviceDescriptor* descriptor);
 
-        ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
-            const BindGroupDescriptor* descriptor) override;
-        ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
-            const BindGroupLayoutDescriptor* descriptor,
-            PipelineCompatibilityToken pipelineCompatibilityToken) override;
-        ResultOrError<Ref<BufferBase>> CreateBufferImpl(
-            const BufferDescriptor* descriptor) override;
-        ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
-            CommandEncoder* encoder,
-            const CommandBufferDescriptor* descriptor) override;
-        ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
-            const PipelineLayoutDescriptor* descriptor) override;
-        ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
-            const QuerySetDescriptor* descriptor) override;
-        ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
-            const SamplerDescriptor* descriptor) override;
-        ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
-            const ShaderModuleDescriptor* descriptor,
-            ShaderModuleParseResult* parseResult) override;
-        ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
-            const SwapChainDescriptor* descriptor) override;
-        ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
-            Surface* surface,
-            NewSwapChainBase* previousSwapChain,
-            const SwapChainDescriptor* descriptor) override;
-        ResultOrError<Ref<TextureBase>> CreateTextureImpl(
-            const TextureDescriptor* descriptor) override;
-        ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
-            TextureBase* texture,
-            const TextureViewDescriptor* descriptor) override;
-        Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
-            const ComputePipelineDescriptor* descriptor) override;
-        Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
-            const RenderPipelineDescriptor* descriptor) override;
-        void InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
-                                                WGPUCreateComputePipelineAsyncCallback callback,
-                                                void* userdata) override;
-        void InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
-                                               WGPUCreateRenderPipelineAsyncCallback callback,
-                                               void* userdata) override;
+    ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
+        const BindGroupDescriptor* descriptor) override;
+    ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
+        const BindGroupLayoutDescriptor* descriptor,
+        PipelineCompatibilityToken pipelineCompatibilityToken) override;
+    ResultOrError<Ref<BufferBase>> CreateBufferImpl(const BufferDescriptor* descriptor) override;
+    ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
+        CommandEncoder* encoder,
+        const CommandBufferDescriptor* descriptor) override;
+    ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
+        const PipelineLayoutDescriptor* descriptor) override;
+    ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
+        const QuerySetDescriptor* descriptor) override;
+    ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(const SamplerDescriptor* descriptor) override;
+    ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
+        const ShaderModuleDescriptor* descriptor,
+        ShaderModuleParseResult* parseResult) override;
+    ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
+        const SwapChainDescriptor* descriptor) override;
+    ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
+        Surface* surface,
+        NewSwapChainBase* previousSwapChain,
+        const SwapChainDescriptor* descriptor) override;
+    ResultOrError<Ref<TextureBase>> CreateTextureImpl(const TextureDescriptor* descriptor) override;
+    ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
+        TextureBase* texture,
+        const TextureViewDescriptor* descriptor) override;
+    Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
+        const ComputePipelineDescriptor* descriptor) override;
+    Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
+        const RenderPipelineDescriptor* descriptor) override;
+    void InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
+                                            WGPUCreateComputePipelineAsyncCallback callback,
+                                            void* userdata) override;
+    void InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+                                           WGPUCreateRenderPipelineAsyncCallback callback,
+                                           void* userdata) override;
 
-        void InitTogglesFromDriver();
-        void DestroyImpl() override;
-        MaybeError WaitForIdleForDestruction() override;
-        ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
+    void InitTogglesFromDriver();
+    void DestroyImpl() override;
+    MaybeError WaitForIdleForDestruction() override;
+    ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
 
-        NSPRef<id<MTLDevice>> mMtlDevice;
-        NSPRef<id<MTLCommandQueue>> mCommandQueue;
+    NSPRef<id<MTLDevice>> mMtlDevice;
+    NSPRef<id<MTLCommandQueue>> mCommandQueue;
 
-        CommandRecordingContext mCommandContext;
+    CommandRecordingContext mCommandContext;
 
-        // The completed serial is updated in a Metal completion handler that can be fired on a
-        // different thread, so it needs to be atomic.
-        std::atomic<uint64_t> mCompletedSerial;
+    // The completed serial is updated in a Metal completion handler that can be fired on a
+    // different thread, so it needs to be atomic.
+    std::atomic<uint64_t> mCompletedSerial;
 
-        // mLastSubmittedCommands will be accessed in a Metal schedule handler that can be fired on
-        // a different thread so we guard access to it with a mutex.
-        std::mutex mLastSubmittedCommandsMutex;
-        NSPRef<id<MTLCommandBuffer>> mLastSubmittedCommands;
+    // mLastSubmittedCommands will be accessed in a Metal schedule handler that can be fired on
+    // a different thread so we guard access to it with a mutex.
+    std::mutex mLastSubmittedCommandsMutex;
+    NSPRef<id<MTLCommandBuffer>> mLastSubmittedCommands;
 
-        // The current estimation of timestamp period
-        float mTimestampPeriod = 1.0f;
-        // The base of CPU timestamp and GPU timestamp to measure the linear regression between GPU
-        // and CPU timestamps.
-        MTLTimestamp mCpuTimestamp API_AVAILABLE(macos(10.15), ios(14.0)) = 0;
-        MTLTimestamp mGpuTimestamp API_AVAILABLE(macos(10.15), ios(14.0)) = 0;
-        // The parameters for kalman filter
-        std::unique_ptr<KalmanInfo> mKalmanInfo;
-    };
+    // The current estimation of timestamp period
+    float mTimestampPeriod = 1.0f;
+    // The base of CPU timestamp and GPU timestamp to measure the linear regression between GPU
+    // and CPU timestamps.
+    MTLTimestamp mCpuTimestamp API_AVAILABLE(macos(10.15), ios(14.0)) = 0;
+    MTLTimestamp mGpuTimestamp API_AVAILABLE(macos(10.15), ios(14.0)) = 0;
+    // The parameters for kalman filter
+    std::unique_ptr<KalmanInfo> mKalmanInfo;
+};
 
 }  // namespace dawn::native::metal
 
diff --git a/src/dawn/native/metal/DeviceMTL.mm b/src/dawn/native/metal/DeviceMTL.mm
index fd8bf74..6cb7271 100644
--- a/src/dawn/native/metal/DeviceMTL.mm
+++ b/src/dawn/native/metal/DeviceMTL.mm
@@ -42,464 +42,455 @@
 
 namespace dawn::native::metal {
 
-    struct KalmanInfo {
-        float filterValue;  // The estimation value
-        float kalmanGain;   // The kalman gain
-        float R;            // The covariance of the observation noise
-        float P;            // The a posteriori estimate covariance
-    };
+struct KalmanInfo {
+    float filterValue;  // The estimation value
+    float kalmanGain;   // The kalman gain
+    float R;            // The covariance of the observation noise
+    float P;            // The a posteriori estimate covariance
+};
 
-    namespace {
+namespace {
 
-        // The time interval for each round of kalman filter
-        static constexpr uint64_t kFilterIntervalInMs = static_cast<uint64_t>(NSEC_PER_SEC / 10);
+// The time interval for each round of kalman filter
+static constexpr uint64_t kFilterIntervalInMs = static_cast<uint64_t>(NSEC_PER_SEC / 10);
 
-        // A simplified kalman filter for estimating timestamp period based on measured values
-        float KalmanFilter(KalmanInfo* info, float measuredValue) {
-            // Optimize kalman gain
-            info->kalmanGain = info->P / (info->P + info->R);
+// A simplified kalman filter for estimating timestamp period based on measured values
+float KalmanFilter(KalmanInfo* info, float measuredValue) {
+    // Optimize kalman gain
+    info->kalmanGain = info->P / (info->P + info->R);
 
-            // Correct filter value
-            info->filterValue =
-                info->kalmanGain * measuredValue + (1.0 - info->kalmanGain) * info->filterValue;
-            // Update estimate covariance
-            info->P = (1.0f - info->kalmanGain) * info->P;
-            return info->filterValue;
-        }
+    // Correct filter value
+    info->filterValue =
+        info->kalmanGain * measuredValue + (1.0 - info->kalmanGain) * info->filterValue;
+    // Update estimate covariance
+    info->P = (1.0f - info->kalmanGain) * info->P;
+    return info->filterValue;
+}
 
-        void API_AVAILABLE(macos(10.15), ios(14))
-            UpdateTimestampPeriod(id<MTLDevice> device,
-                                  KalmanInfo* info,
-                                  MTLTimestamp* cpuTimestampStart,
-                                  MTLTimestamp* gpuTimestampStart,
-                                  float* timestampPeriod) {
-            // The filter value is converged to an optimal value when the kalman gain is less than
-            // 0.01. At this time, the weight of the measured value is too small to change the next
-            // filter value, the sampling and calculations do not need to continue anymore.
-            if (info->kalmanGain < 0.01f) {
-                return;
-            }
-
-            MTLTimestamp cpuTimestampEnd = 0, gpuTimestampEnd = 0;
-            [device sampleTimestamps:&cpuTimestampEnd gpuTimestamp:&gpuTimestampEnd];
-
-            // Update the timestamp start values when timestamp reset happens
-            if (cpuTimestampEnd < *cpuTimestampStart || gpuTimestampEnd < *gpuTimestampStart) {
-                *cpuTimestampStart = cpuTimestampEnd;
-                *gpuTimestampStart = gpuTimestampEnd;
-                return;
-            }
-
-            if (cpuTimestampEnd - *cpuTimestampStart >= kFilterIntervalInMs) {
-                // The measured timestamp period
-                float measurement = (cpuTimestampEnd - *cpuTimestampStart) /
-                                    static_cast<float>(gpuTimestampEnd - *gpuTimestampStart);
-
-                // Measurement update
-                *timestampPeriod = KalmanFilter(info, measurement);
-
-                *cpuTimestampStart = cpuTimestampEnd;
-                *gpuTimestampStart = gpuTimestampEnd;
-            }
-        }
-
-    }  // namespace
-
-    // static
-    ResultOrError<Ref<Device>> Device::Create(AdapterBase* adapter,
-                                              NSPRef<id<MTLDevice>> mtlDevice,
-                                              const DeviceDescriptor* descriptor) {
-        Ref<Device> device = AcquireRef(new Device(adapter, std::move(mtlDevice), descriptor));
-        DAWN_TRY(device->Initialize(descriptor));
-        return device;
+void API_AVAILABLE(macos(10.15), ios(14)) UpdateTimestampPeriod(id<MTLDevice> device,
+                                                                KalmanInfo* info,
+                                                                MTLTimestamp* cpuTimestampStart,
+                                                                MTLTimestamp* gpuTimestampStart,
+                                                                float* timestampPeriod) {
+    // The filter value is converged to an optimal value when the kalman gain is less than
+    // 0.01. At this time, the weight of the measured value is too small to change the next
+    // filter value, the sampling and calculations do not need to continue anymore.
+    if (info->kalmanGain < 0.01f) {
+        return;
     }
 
-    Device::Device(AdapterBase* adapter,
-                   NSPRef<id<MTLDevice>> mtlDevice,
-                   const DeviceDescriptor* descriptor)
-        : DeviceBase(adapter, descriptor), mMtlDevice(std::move(mtlDevice)), mCompletedSerial(0) {
+    MTLTimestamp cpuTimestampEnd = 0, gpuTimestampEnd = 0;
+    [device sampleTimestamps:&cpuTimestampEnd gpuTimestamp:&gpuTimestampEnd];
+
+    // Update the timestamp start values when timestamp reset happens
+    if (cpuTimestampEnd < *cpuTimestampStart || gpuTimestampEnd < *gpuTimestampStart) {
+        *cpuTimestampStart = cpuTimestampEnd;
+        *gpuTimestampStart = gpuTimestampEnd;
+        return;
     }
 
-    Device::~Device() {
-        Destroy();
+    if (cpuTimestampEnd - *cpuTimestampStart >= kFilterIntervalInMs) {
+        // The measured timestamp period
+        float measurement = (cpuTimestampEnd - *cpuTimestampStart) /
+                            static_cast<float>(gpuTimestampEnd - *gpuTimestampStart);
+
+        // Measurement update
+        *timestampPeriod = KalmanFilter(info, measurement);
+
+        *cpuTimestampStart = cpuTimestampEnd;
+        *gpuTimestampStart = gpuTimestampEnd;
+    }
+}
+
+}  // namespace
+
+// static
+ResultOrError<Ref<Device>> Device::Create(AdapterBase* adapter,
+                                          NSPRef<id<MTLDevice>> mtlDevice,
+                                          const DeviceDescriptor* descriptor) {
+    Ref<Device> device = AcquireRef(new Device(adapter, std::move(mtlDevice), descriptor));
+    DAWN_TRY(device->Initialize(descriptor));
+    return device;
+}
+
+Device::Device(AdapterBase* adapter,
+               NSPRef<id<MTLDevice>> mtlDevice,
+               const DeviceDescriptor* descriptor)
+    : DeviceBase(adapter, descriptor), mMtlDevice(std::move(mtlDevice)), mCompletedSerial(0) {}
+
+Device::~Device() {
+    Destroy();
+}
+
+MaybeError Device::Initialize(const DeviceDescriptor* descriptor) {
+    InitTogglesFromDriver();
+
+    mCommandQueue.Acquire([*mMtlDevice newCommandQueue]);
+    if (mCommandQueue == nil) {
+        return DAWN_INTERNAL_ERROR("Failed to allocate MTLCommandQueue.");
     }
 
-    MaybeError Device::Initialize(const DeviceDescriptor* descriptor) {
-        InitTogglesFromDriver();
+    DAWN_TRY(mCommandContext.PrepareNextCommandBuffer(*mCommandQueue));
 
-        mCommandQueue.Acquire([*mMtlDevice newCommandQueue]);
-        if (mCommandQueue == nil) {
-            return DAWN_INTERNAL_ERROR("Failed to allocate MTLCommandQueue.");
+    if (IsFeatureEnabled(Feature::TimestampQuery) &&
+        !IsToggleEnabled(Toggle::DisableTimestampQueryConversion)) {
+        // Make a best guess of timestamp period based on device vendor info, and converge it to
+        // an accurate value by the following calculations.
+        mTimestampPeriod = gpu_info::IsIntel(GetAdapter()->GetVendorId()) ? 83.333f : 1.0f;
+
+        // Initialize kalman filter parameters
+        mKalmanInfo = std::make_unique<KalmanInfo>();
+        mKalmanInfo->filterValue = 0.0f;
+        mKalmanInfo->kalmanGain = 0.5f;
+        mKalmanInfo->R = 0.0001f;  // The smaller this value is, the smaller the error of measured
+                                   // value is, the more we can trust the measured value.
+        mKalmanInfo->P = 1.0f;
+
+        if (@available(macos 10.15, iOS 14.0, *)) {
+            // Sample CPU timestamp and GPU timestamp for first time at device creation
+            [*mMtlDevice sampleTimestamps:&mCpuTimestamp gpuTimestamp:&mGpuTimestamp];
         }
-
-        DAWN_TRY(mCommandContext.PrepareNextCommandBuffer(*mCommandQueue));
-
-        if (IsFeatureEnabled(Feature::TimestampQuery) &&
-            !IsToggleEnabled(Toggle::DisableTimestampQueryConversion)) {
-            // Make a best guess of timestamp period based on device vendor info, and converge it to
-            // an accurate value by the following calculations.
-            mTimestampPeriod = gpu_info::IsIntel(GetAdapter()->GetVendorId()) ? 83.333f : 1.0f;
-
-            // Initialize kalman filter parameters
-            mKalmanInfo = std::make_unique<KalmanInfo>();
-            mKalmanInfo->filterValue = 0.0f;
-            mKalmanInfo->kalmanGain = 0.5f;
-            mKalmanInfo->R =
-                0.0001f;  // The smaller this value is, the smaller the error of measured value is,
-                          // the more we can trust the measured value.
-            mKalmanInfo->P = 1.0f;
-
-            if (@available(macos 10.15, iOS 14.0, *)) {
-                // Sample CPU timestamp and GPU timestamp for first time at device creation
-                [*mMtlDevice sampleTimestamps:&mCpuTimestamp gpuTimestamp:&mGpuTimestamp];
-            }
-        }
-
-        return DeviceBase::Initialize(AcquireRef(new Queue(this, &descriptor->defaultQueue)));
     }
 
-    void Device::InitTogglesFromDriver() {
-        {
-            bool haveStoreAndMSAAResolve = false;
+    return DeviceBase::Initialize(AcquireRef(new Queue(this, &descriptor->defaultQueue)));
+}
+
+void Device::InitTogglesFromDriver() {
+    {
+        bool haveStoreAndMSAAResolve = false;
 #if defined(DAWN_PLATFORM_MACOS)
-            if (@available(macOS 10.12, *)) {
-                haveStoreAndMSAAResolve =
-                    [*mMtlDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v2];
-            }
-#elif defined(DAWN_PLATFORM_IOS)
+        if (@available(macOS 10.12, *)) {
             haveStoreAndMSAAResolve =
-                [*mMtlDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v2];
+                [*mMtlDevice supportsFeatureSet:MTLFeatureSet_macOS_GPUFamily1_v2];
+        }
+#elif defined(DAWN_PLATFORM_IOS)
+        haveStoreAndMSAAResolve = [*mMtlDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v2];
 #endif
-            // On tvOS, we would need MTLFeatureSet_tvOS_GPUFamily2_v1.
-            SetToggle(Toggle::EmulateStoreAndMSAAResolve, !haveStoreAndMSAAResolve);
+        // On tvOS, we would need MTLFeatureSet_tvOS_GPUFamily2_v1.
+        SetToggle(Toggle::EmulateStoreAndMSAAResolve, !haveStoreAndMSAAResolve);
 
-            bool haveSamplerCompare = true;
+        bool haveSamplerCompare = true;
 #if defined(DAWN_PLATFORM_IOS)
-            haveSamplerCompare = [*mMtlDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v1];
+        haveSamplerCompare = [*mMtlDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v1];
 #endif
-            // TODO(crbug.com/dawn/342): Investigate emulation -- possibly expensive.
-            SetToggle(Toggle::MetalDisableSamplerCompare, !haveSamplerCompare);
+        // TODO(crbug.com/dawn/342): Investigate emulation -- possibly expensive.
+        SetToggle(Toggle::MetalDisableSamplerCompare, !haveSamplerCompare);
 
-            bool haveBaseVertexBaseInstance = true;
+        bool haveBaseVertexBaseInstance = true;
 #if defined(DAWN_PLATFORM_IOS)
-            haveBaseVertexBaseInstance =
-                [*mMtlDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v1];
+        haveBaseVertexBaseInstance =
+            [*mMtlDevice supportsFeatureSet:MTLFeatureSet_iOS_GPUFamily3_v1];
 #endif
-            // TODO(crbug.com/dawn/343): Investigate emulation.
-            SetToggle(Toggle::DisableBaseVertex, !haveBaseVertexBaseInstance);
-            SetToggle(Toggle::DisableBaseInstance, !haveBaseVertexBaseInstance);
+        // TODO(crbug.com/dawn/343): Investigate emulation.
+        SetToggle(Toggle::DisableBaseVertex, !haveBaseVertexBaseInstance);
+        SetToggle(Toggle::DisableBaseInstance, !haveBaseVertexBaseInstance);
+    }
+
+    // Vertex buffer robustness is implemented by using programmable vertex pulling. Enable
+    // that code path if it isn't explicitly disabled.
+    if (IsRobustnessEnabled()) {
+        SetToggle(Toggle::MetalEnableVertexPulling, true);
+    }
+
+    // TODO(crbug.com/dawn/846): tighten this workaround when the driver bug is fixed.
+    SetToggle(Toggle::AlwaysResolveIntoZeroLevelAndLayer, true);
+
+    uint32_t deviceId = GetAdapter()->GetDeviceId();
+    uint32_t vendorId = GetAdapter()->GetVendorId();
+
+    // TODO(crbug.com/dawn/847): Use MTLStorageModeShared instead of MTLStorageModePrivate when
+    // creating MTLCounterSampleBuffer in QuerySet on Intel platforms, otherwise it fails to
+    // create the buffer. Change to use MTLStorageModePrivate when the bug is fixed.
+    if (@available(macOS 10.15, iOS 14.0, *)) {
+        bool useSharedMode = gpu_info::IsIntel(vendorId);
+        SetToggle(Toggle::MetalUseSharedModeForCounterSampleBuffer, useSharedMode);
+    }
+
+    // Rendering R8Unorm and RG8Unorm to small mip doesn't work properly on Intel.
+    // TODO(crbug.com/dawn/1071): Tighten the workaround when this issue is fixed.
+    if (gpu_info::IsIntel(vendorId)) {
+        SetToggle(Toggle::MetalRenderR8RG8UnormSmallMipToTempTexture, true);
+    }
+
+    // On some Intel GPU vertex only render pipeline get wrong depth result if no fragment
+    // shader provided. Create a placeholder fragment shader module to work around this issue.
+    if (gpu_info::IsIntel(vendorId)) {
+        bool usePlaceholderFragmentShader = true;
+        if (gpu_info::IsSkylake(deviceId)) {
+            usePlaceholderFragmentShader = false;
         }
+        SetToggle(Toggle::UsePlaceholderFragmentInVertexOnlyPipeline, usePlaceholderFragmentShader);
+    }
+}
 
-        // Vertex buffer robustness is implemented by using programmable vertex pulling. Enable
-        // that code path if it isn't explicitly disabled.
-        if (IsRobustnessEnabled()) {
-            SetToggle(Toggle::MetalEnableVertexPulling, true);
-        }
+ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
+    const BindGroupDescriptor* descriptor) {
+    return BindGroup::Create(this, descriptor);
+}
+ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
+    const BindGroupLayoutDescriptor* descriptor,
+    PipelineCompatibilityToken pipelineCompatibilityToken) {
+    return BindGroupLayout::Create(this, descriptor, pipelineCompatibilityToken);
+}
+ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
+    return Buffer::Create(this, descriptor);
+}
+ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
+    CommandEncoder* encoder,
+    const CommandBufferDescriptor* descriptor) {
+    return CommandBuffer::Create(encoder, descriptor);
+}
+Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
+    const ComputePipelineDescriptor* descriptor) {
+    return ComputePipeline::CreateUninitialized(this, descriptor);
+}
+ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
+    const PipelineLayoutDescriptor* descriptor) {
+    return PipelineLayout::Create(this, descriptor);
+}
+ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(const QuerySetDescriptor* descriptor) {
+    return QuerySet::Create(this, descriptor);
+}
+Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
+    const RenderPipelineDescriptor* descriptor) {
+    return RenderPipeline::CreateUninitialized(this, descriptor);
+}
+ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
+    return Sampler::Create(this, descriptor);
+}
+ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
+    const ShaderModuleDescriptor* descriptor,
+    ShaderModuleParseResult* parseResult) {
+    return ShaderModule::Create(this, descriptor, parseResult);
+}
+ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
+    const SwapChainDescriptor* descriptor) {
+    return OldSwapChain::Create(this, descriptor);
+}
+ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
+    Surface* surface,
+    NewSwapChainBase* previousSwapChain,
+    const SwapChainDescriptor* descriptor) {
+    return SwapChain::Create(this, surface, previousSwapChain, descriptor);
+}
+ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
+    return Texture::Create(this, descriptor);
+}
+ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
+    TextureBase* texture,
+    const TextureViewDescriptor* descriptor) {
+    return TextureView::Create(texture, descriptor);
+}
+void Device::InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
+                                                WGPUCreateComputePipelineAsyncCallback callback,
+                                                void* userdata) {
+    ComputePipeline::InitializeAsync(std::move(computePipeline), callback, userdata);
+}
+void Device::InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+                                               WGPUCreateRenderPipelineAsyncCallback callback,
+                                               void* userdata) {
+    RenderPipeline::InitializeAsync(std::move(renderPipeline), callback, userdata);
+}
 
-        // TODO(crbug.com/dawn/846): tighten this workaround when the driver bug is fixed.
-        SetToggle(Toggle::AlwaysResolveIntoZeroLevelAndLayer, true);
+ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
+    uint64_t frontendCompletedSerial{GetCompletedCommandSerial()};
+    if (frontendCompletedSerial > mCompletedSerial) {
+        // sometimes we increase the serials, in which case the completed serial in
+        // the device base will surpass the completed serial we have in the metal backend, so we
+        // must update ours when we see that the completed serial from device base has
+        // increased.
+        mCompletedSerial = frontendCompletedSerial;
+    }
+    return ExecutionSerial(mCompletedSerial.load());
+}
 
-        uint32_t deviceId = GetAdapter()->GetDeviceId();
-        uint32_t vendorId = GetAdapter()->GetVendorId();
+MaybeError Device::TickImpl() {
+    DAWN_TRY(SubmitPendingCommandBuffer());
 
-        // TODO(crbug.com/dawn/847): Use MTLStorageModeShared instead of MTLStorageModePrivate when
-        // creating MTLCounterSampleBuffer in QuerySet on Intel platforms, otherwise it fails to
-        // create the buffer. Change to use MTLStorageModePrivate when the bug is fixed.
-        if (@available(macOS 10.15, iOS 14.0, *)) {
-            bool useSharedMode = gpu_info::IsIntel(vendorId);
-            SetToggle(Toggle::MetalUseSharedModeForCounterSampleBuffer, useSharedMode);
-        }
-
-        // Rendering R8Unorm and RG8Unorm to small mip doesn't work properly on Intel.
-        // TODO(crbug.com/dawn/1071): Tighten the workaround when this issue is fixed.
-        if (gpu_info::IsIntel(vendorId)) {
-            SetToggle(Toggle::MetalRenderR8RG8UnormSmallMipToTempTexture, true);
-        }
-
-        // On some Intel GPU vertex only render pipeline get wrong depth result if no fragment
-        // shader provided. Create a placeholder fragment shader module to work around this issue.
-        if (gpu_info::IsIntel(vendorId)) {
-            bool usePlaceholderFragmentShader = true;
-            if (gpu_info::IsSkylake(deviceId)) {
-                usePlaceholderFragmentShader = false;
-            }
-            SetToggle(Toggle::UsePlaceholderFragmentInVertexOnlyPipeline,
-                      usePlaceholderFragmentShader);
+    // Just run timestamp period calculation when timestamp feature is enabled.
+    if (IsFeatureEnabled(Feature::TimestampQuery)) {
+        if (@available(macos 10.15, iOS 14.0, *)) {
+            UpdateTimestampPeriod(GetMTLDevice(), mKalmanInfo.get(), &mCpuTimestamp, &mGpuTimestamp,
+                                  &mTimestampPeriod);
         }
     }
 
-    ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
-        const BindGroupDescriptor* descriptor) {
-        return BindGroup::Create(this, descriptor);
-    }
-    ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
-        const BindGroupLayoutDescriptor* descriptor,
-        PipelineCompatibilityToken pipelineCompatibilityToken) {
-        return BindGroupLayout::Create(this, descriptor, pipelineCompatibilityToken);
-    }
-    ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
-        return Buffer::Create(this, descriptor);
-    }
-    ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
-        CommandEncoder* encoder,
-        const CommandBufferDescriptor* descriptor) {
-        return CommandBuffer::Create(encoder, descriptor);
-    }
-    Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
-        const ComputePipelineDescriptor* descriptor) {
-        return ComputePipeline::CreateUninitialized(this, descriptor);
-    }
-    ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
-        const PipelineLayoutDescriptor* descriptor) {
-        return PipelineLayout::Create(this, descriptor);
-    }
-    ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
-        const QuerySetDescriptor* descriptor) {
-        return QuerySet::Create(this, descriptor);
-    }
-    Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
-        const RenderPipelineDescriptor* descriptor) {
-        return RenderPipeline::CreateUninitialized(this, descriptor);
-    }
-    ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
-        return Sampler::Create(this, descriptor);
-    }
-    ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
-        const ShaderModuleDescriptor* descriptor,
-        ShaderModuleParseResult* parseResult) {
-        return ShaderModule::Create(this, descriptor, parseResult);
-    }
-    ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
-        const SwapChainDescriptor* descriptor) {
-        return OldSwapChain::Create(this, descriptor);
-    }
-    ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
-        Surface* surface,
-        NewSwapChainBase* previousSwapChain,
-        const SwapChainDescriptor* descriptor) {
-        return SwapChain::Create(this, surface, previousSwapChain, descriptor);
-    }
-    ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
-        return Texture::Create(this, descriptor);
-    }
-    ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
-        TextureBase* texture,
-        const TextureViewDescriptor* descriptor) {
-        return TextureView::Create(texture, descriptor);
-    }
-    void Device::InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
-                                                    WGPUCreateComputePipelineAsyncCallback callback,
-                                                    void* userdata) {
-        ComputePipeline::InitializeAsync(std::move(computePipeline), callback, userdata);
-    }
-    void Device::InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
-                                                   WGPUCreateRenderPipelineAsyncCallback callback,
-                                                   void* userdata) {
-        RenderPipeline::InitializeAsync(std::move(renderPipeline), callback, userdata);
-    }
+    return {};
+}
 
-    ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
-        uint64_t frontendCompletedSerial{GetCompletedCommandSerial()};
-        if (frontendCompletedSerial > mCompletedSerial) {
-            // sometimes we increase the serials, in which case the completed serial in
-            // the device base will surpass the completed serial we have in the metal backend, so we
-            // must update ours when we see that the completed serial from device base has
-            // increased.
-            mCompletedSerial = frontendCompletedSerial;
-        }
-        return ExecutionSerial(mCompletedSerial.load());
-    }
+id<MTLDevice> Device::GetMTLDevice() {
+    return mMtlDevice.Get();
+}
 
-    MaybeError Device::TickImpl() {
-        DAWN_TRY(SubmitPendingCommandBuffer());
+id<MTLCommandQueue> Device::GetMTLQueue() {
+    return mCommandQueue.Get();
+}
 
-        // Just run timestamp period calculation when timestamp feature is enabled.
-        if (IsFeatureEnabled(Feature::TimestampQuery)) {
-            if (@available(macos 10.15, iOS 14.0, *)) {
-                UpdateTimestampPeriod(GetMTLDevice(), mKalmanInfo.get(), &mCpuTimestamp,
-                                      &mGpuTimestamp, &mTimestampPeriod);
-            }
-        }
+CommandRecordingContext* Device::GetPendingCommandContext() {
+    mCommandContext.MarkUsed();
+    return &mCommandContext;
+}
 
+MaybeError Device::SubmitPendingCommandBuffer() {
+    if (!mCommandContext.WasUsed()) {
         return {};
     }
 
-    id<MTLDevice> Device::GetMTLDevice() {
-        return mMtlDevice.Get();
+    IncrementLastSubmittedCommandSerial();
+
+    // Acquire the pending command buffer, which is retained. It must be released later.
+    NSPRef<id<MTLCommandBuffer>> pendingCommands = mCommandContext.AcquireCommands();
+
+    // Replace mLastSubmittedCommands with the mutex held so we avoid races between the
+    // schedule handler and this code.
+    {
+        std::lock_guard<std::mutex> lock(mLastSubmittedCommandsMutex);
+        mLastSubmittedCommands = pendingCommands;
     }
 
-    id<MTLCommandQueue> Device::GetMTLQueue() {
-        return mCommandQueue.Get();
-    }
-
-    CommandRecordingContext* Device::GetPendingCommandContext() {
-        mCommandContext.MarkUsed();
-        return &mCommandContext;
-    }
-
-    MaybeError Device::SubmitPendingCommandBuffer() {
-        if (!mCommandContext.WasUsed()) {
-            return {};
+    // Make a local copy of the pointer to the commands because it's not clear how ObjC blocks
+    // handle types with copy / move constructors being referenced in the block..
+    id<MTLCommandBuffer> pendingCommandsPointer = pendingCommands.Get();
+    [*pendingCommands addScheduledHandler:^(id<MTLCommandBuffer>) {
+        // This is DRF because we hold the mutex for mLastSubmittedCommands and pendingCommands
+        // is a local value (and not the member itself).
+        std::lock_guard<std::mutex> lock(mLastSubmittedCommandsMutex);
+        if (this->mLastSubmittedCommands.Get() == pendingCommandsPointer) {
+            this->mLastSubmittedCommands = nullptr;
         }
+    }];
 
-        IncrementLastSubmittedCommandSerial();
+    // Update the completed serial once the completed handler is fired. Make a local copy of
+    // mLastSubmittedSerial so it is captured by value.
+    ExecutionSerial pendingSerial = GetLastSubmittedCommandSerial();
+    // this ObjC block runs on a different thread
+    [*pendingCommands addCompletedHandler:^(id<MTLCommandBuffer>) {
+        TRACE_EVENT_ASYNC_END0(GetPlatform(), GPUWork, "DeviceMTL::SubmitPendingCommandBuffer",
+                               uint64_t(pendingSerial));
+        ASSERT(uint64_t(pendingSerial) > mCompletedSerial.load());
+        this->mCompletedSerial = uint64_t(pendingSerial);
+    }];
 
-        // Acquire the pending command buffer, which is retained. It must be released later.
-        NSPRef<id<MTLCommandBuffer>> pendingCommands = mCommandContext.AcquireCommands();
+    TRACE_EVENT_ASYNC_BEGIN0(GetPlatform(), GPUWork, "DeviceMTL::SubmitPendingCommandBuffer",
+                             uint64_t(pendingSerial));
+    [*pendingCommands commit];
 
-        // Replace mLastSubmittedCommands with the mutex held so we avoid races between the
-        // schedule handler and this code.
-        {
-            std::lock_guard<std::mutex> lock(mLastSubmittedCommandsMutex);
-            mLastSubmittedCommands = pendingCommands;
-        }
+    return mCommandContext.PrepareNextCommandBuffer(*mCommandQueue);
+}
 
-        // Make a local copy of the pointer to the commands because it's not clear how ObjC blocks
-        // handle types with copy / move constructors being referenced in the block..
-        id<MTLCommandBuffer> pendingCommandsPointer = pendingCommands.Get();
-        [*pendingCommands addScheduledHandler:^(id<MTLCommandBuffer>) {
-            // This is DRF because we hold the mutex for mLastSubmittedCommands and pendingCommands
-            // is a local value (and not the member itself).
-            std::lock_guard<std::mutex> lock(mLastSubmittedCommandsMutex);
-            if (this->mLastSubmittedCommands.Get() == pendingCommandsPointer) {
-                this->mLastSubmittedCommands = nullptr;
-            }
-        }];
+ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
+    std::unique_ptr<StagingBufferBase> stagingBuffer = std::make_unique<StagingBuffer>(size, this);
+    DAWN_TRY(stagingBuffer->Initialize());
+    return std::move(stagingBuffer);
+}
 
-        // Update the completed serial once the completed handler is fired. Make a local copy of
-        // mLastSubmittedSerial so it is captured by value.
-        ExecutionSerial pendingSerial = GetLastSubmittedCommandSerial();
-        // this ObjC block runs on a different thread
-        [*pendingCommands addCompletedHandler:^(id<MTLCommandBuffer>) {
-            TRACE_EVENT_ASYNC_END0(GetPlatform(), GPUWork, "DeviceMTL::SubmitPendingCommandBuffer",
-                                   uint64_t(pendingSerial));
-            ASSERT(uint64_t(pendingSerial) > mCompletedSerial.load());
-            this->mCompletedSerial = uint64_t(pendingSerial);
-        }];
+MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
+                                           uint64_t sourceOffset,
+                                           BufferBase* destination,
+                                           uint64_t destinationOffset,
+                                           uint64_t size) {
+    // Metal validation layers forbid  0-sized copies, assert it is skipped prior to calling
+    // this function.
+    ASSERT(size != 0);
 
-        TRACE_EVENT_ASYNC_BEGIN0(GetPlatform(), GPUWork, "DeviceMTL::SubmitPendingCommandBuffer",
-                                 uint64_t(pendingSerial));
-        [*pendingCommands commit];
+    ToBackend(destination)
+        ->EnsureDataInitializedAsDestination(GetPendingCommandContext(), destinationOffset, size);
 
-        return mCommandContext.PrepareNextCommandBuffer(*mCommandQueue);
+    id<MTLBuffer> uploadBuffer = ToBackend(source)->GetBufferHandle();
+    id<MTLBuffer> buffer = ToBackend(destination)->GetMTLBuffer();
+    [GetPendingCommandContext()->EnsureBlit() copyFromBuffer:uploadBuffer
+                                                sourceOffset:sourceOffset
+                                                    toBuffer:buffer
+                                           destinationOffset:destinationOffset
+                                                        size:size];
+    return {};
+}
+
+// In Metal we don't write from the CPU to the texture directly which can be done using the
+// replaceRegion function, because the function requires a non-private storage mode and Dawn
+// sets the private storage mode by default for all textures except IOSurfaces on macOS.
+MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
+                                            const TextureDataLayout& dataLayout,
+                                            TextureCopy* dst,
+                                            const Extent3D& copySizePixels) {
+    Texture* texture = ToBackend(dst->texture.Get());
+    EnsureDestinationTextureInitialized(GetPendingCommandContext(), texture, *dst, copySizePixels);
+
+    RecordCopyBufferToTexture(GetPendingCommandContext(), ToBackend(source)->GetBufferHandle(),
+                              source->GetSize(), dataLayout.offset, dataLayout.bytesPerRow,
+                              dataLayout.rowsPerImage, texture, dst->mipLevel, dst->origin,
+                              dst->aspect, copySizePixels);
+    return {};
+}
+
+Ref<Texture> Device::CreateTextureWrappingIOSurface(const ExternalImageDescriptor* descriptor,
+                                                    IOSurfaceRef ioSurface) {
+    const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
+
+    if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
+        return nullptr;
+    }
+    if (ConsumedError(ValidateIOSurfaceCanBeWrapped(this, textureDescriptor, ioSurface))) {
+        return nullptr;
     }
 
-    ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
-        std::unique_ptr<StagingBufferBase> stagingBuffer =
-            std::make_unique<StagingBuffer>(size, this);
-        DAWN_TRY(stagingBuffer->Initialize());
-        return std::move(stagingBuffer);
+    Ref<Texture> result;
+    if (ConsumedError(Texture::CreateFromIOSurface(this, descriptor, ioSurface), &result)) {
+        return nullptr;
+    }
+    return result;
+}
+
+void Device::WaitForCommandsToBeScheduled() {
+    if (ConsumedError(SubmitPendingCommandBuffer())) {
+        return;
     }
 
-    MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
-                                               uint64_t sourceOffset,
-                                               BufferBase* destination,
-                                               uint64_t destinationOffset,
-                                               uint64_t size) {
-        // Metal validation layers forbid  0-sized copies, assert it is skipped prior to calling
-        // this function.
-        ASSERT(size != 0);
-
-        ToBackend(destination)
-            ->EnsureDataInitializedAsDestination(GetPendingCommandContext(), destinationOffset,
-                                                 size);
-
-        id<MTLBuffer> uploadBuffer = ToBackend(source)->GetBufferHandle();
-        id<MTLBuffer> buffer = ToBackend(destination)->GetMTLBuffer();
-        [GetPendingCommandContext()->EnsureBlit() copyFromBuffer:uploadBuffer
-                                                    sourceOffset:sourceOffset
-                                                        toBuffer:buffer
-                                               destinationOffset:destinationOffset
-                                                            size:size];
-        return {};
+    // Only lock the object while we take a reference to it, otherwise we could block further
+    // progress if the driver calls the scheduled handler (which also acquires the lock) before
+    // finishing the waitUntilScheduled.
+    NSPRef<id<MTLCommandBuffer>> lastSubmittedCommands;
+    {
+        std::lock_guard<std::mutex> lock(mLastSubmittedCommandsMutex);
+        lastSubmittedCommands = mLastSubmittedCommands;
     }
+    [*lastSubmittedCommands waitUntilScheduled];
+}
 
-    // In Metal we don't write from the CPU to the texture directly which can be done using the
-    // replaceRegion function, because the function requires a non-private storage mode and Dawn
-    // sets the private storage mode by default for all textures except IOSurfaces on macOS.
-    MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
-                                                const TextureDataLayout& dataLayout,
-                                                TextureCopy* dst,
-                                                const Extent3D& copySizePixels) {
-        Texture* texture = ToBackend(dst->texture.Get());
-        EnsureDestinationTextureInitialized(GetPendingCommandContext(), texture, *dst,
-                                            copySizePixels);
+MaybeError Device::WaitForIdleForDestruction() {
+    // Forget all pending commands.
+    mCommandContext.AcquireCommands();
+    DAWN_TRY(CheckPassedSerials());
 
-        RecordCopyBufferToTexture(GetPendingCommandContext(), ToBackend(source)->GetBufferHandle(),
-                                  source->GetSize(), dataLayout.offset, dataLayout.bytesPerRow,
-                                  dataLayout.rowsPerImage, texture, dst->mipLevel, dst->origin,
-                                  dst->aspect, copySizePixels);
-        return {};
-    }
-
-    Ref<Texture> Device::CreateTextureWrappingIOSurface(const ExternalImageDescriptor* descriptor,
-                                                        IOSurfaceRef ioSurface) {
-        const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
-
-        if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
-            return nullptr;
-        }
-        if (ConsumedError(ValidateIOSurfaceCanBeWrapped(this, textureDescriptor, ioSurface))) {
-            return nullptr;
-        }
-
-        Ref<Texture> result;
-        if (ConsumedError(Texture::CreateFromIOSurface(this, descriptor, ioSurface), &result)) {
-            return nullptr;
-        }
-        return result;
-    }
-
-    void Device::WaitForCommandsToBeScheduled() {
-        if (ConsumedError(SubmitPendingCommandBuffer())) {
-            return;
-        }
-
-        // Only lock the object while we take a reference to it, otherwise we could block further
-        // progress if the driver calls the scheduled handler (which also acquires the lock) before
-        // finishing the waitUntilScheduled.
-        NSPRef<id<MTLCommandBuffer>> lastSubmittedCommands;
-        {
-            std::lock_guard<std::mutex> lock(mLastSubmittedCommandsMutex);
-            lastSubmittedCommands = mLastSubmittedCommands;
-        }
-        [*lastSubmittedCommands waitUntilScheduled];
-    }
-
-    MaybeError Device::WaitForIdleForDestruction() {
-        // Forget all pending commands.
-        mCommandContext.AcquireCommands();
+    // Wait for all commands to be finished so we can free resources
+    while (GetCompletedCommandSerial() != GetLastSubmittedCommandSerial()) {
+        usleep(100);
         DAWN_TRY(CheckPassedSerials());
-
-        // Wait for all commands to be finished so we can free resources
-        while (GetCompletedCommandSerial() != GetLastSubmittedCommandSerial()) {
-            usleep(100);
-            DAWN_TRY(CheckPassedSerials());
-        }
-
-        return {};
     }
 
-    void Device::DestroyImpl() {
-        ASSERT(GetState() == State::Disconnected);
+    return {};
+}
 
-        // Forget all pending commands.
-        mCommandContext.AcquireCommands();
+void Device::DestroyImpl() {
+    ASSERT(GetState() == State::Disconnected);
 
-        mCommandQueue = nullptr;
-        mMtlDevice = nullptr;
-    }
+    // Forget all pending commands.
+    mCommandContext.AcquireCommands();
 
-    uint32_t Device::GetOptimalBytesPerRowAlignment() const {
-        return 1;
-    }
+    mCommandQueue = nullptr;
+    mMtlDevice = nullptr;
+}
 
-    uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
-        return 1;
-    }
+uint32_t Device::GetOptimalBytesPerRowAlignment() const {
+    return 1;
+}
 
-    float Device::GetTimestampPeriodInNS() const {
-        return mTimestampPeriod;
-    }
+uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
+    return 1;
+}
+
+float Device::GetTimestampPeriodInNS() const {
+    return mTimestampPeriod;
+}
 
 }  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/Forward.h b/src/dawn/native/metal/Forward.h
index ab85da5..44f7804 100644
--- a/src/dawn/native/metal/Forward.h
+++ b/src/dawn/native/metal/Forward.h
@@ -19,49 +19,49 @@
 
 namespace dawn::native::metal {
 
-    class Adapter;
-    class BindGroup;
-    class BindGroupLayout;
-    class Buffer;
-    class CommandBuffer;
-    class ComputePipeline;
-    class Device;
-    class Framebuffer;
-    class PipelineLayout;
-    class QuerySet;
-    class Queue;
-    class RenderPipeline;
-    class Sampler;
-    class ShaderModule;
-    class StagingBuffer;
-    class SwapChain;
-    class Texture;
-    class TextureView;
+class Adapter;
+class BindGroup;
+class BindGroupLayout;
+class Buffer;
+class CommandBuffer;
+class ComputePipeline;
+class Device;
+class Framebuffer;
+class PipelineLayout;
+class QuerySet;
+class Queue;
+class RenderPipeline;
+class Sampler;
+class ShaderModule;
+class StagingBuffer;
+class SwapChain;
+class Texture;
+class TextureView;
 
-    struct MetalBackendTraits {
-        using AdapterType = Adapter;
-        using BindGroupType = BindGroup;
-        using BindGroupLayoutType = BindGroupLayout;
-        using BufferType = Buffer;
-        using CommandBufferType = CommandBuffer;
-        using ComputePipelineType = ComputePipeline;
-        using DeviceType = Device;
-        using PipelineLayoutType = PipelineLayout;
-        using QuerySetType = QuerySet;
-        using QueueType = Queue;
-        using RenderPipelineType = RenderPipeline;
-        using SamplerType = Sampler;
-        using ShaderModuleType = ShaderModule;
-        using StagingBufferType = StagingBuffer;
-        using SwapChainType = SwapChain;
-        using TextureType = Texture;
-        using TextureViewType = TextureView;
-    };
+struct MetalBackendTraits {
+    using AdapterType = Adapter;
+    using BindGroupType = BindGroup;
+    using BindGroupLayoutType = BindGroupLayout;
+    using BufferType = Buffer;
+    using CommandBufferType = CommandBuffer;
+    using ComputePipelineType = ComputePipeline;
+    using DeviceType = Device;
+    using PipelineLayoutType = PipelineLayout;
+    using QuerySetType = QuerySet;
+    using QueueType = Queue;
+    using RenderPipelineType = RenderPipeline;
+    using SamplerType = Sampler;
+    using ShaderModuleType = ShaderModule;
+    using StagingBufferType = StagingBuffer;
+    using SwapChainType = SwapChain;
+    using TextureType = Texture;
+    using TextureViewType = TextureView;
+};
 
-    template <typename T>
-    auto ToBackend(T&& common) -> decltype(ToBackendBase<MetalBackendTraits>(common)) {
-        return ToBackendBase<MetalBackendTraits>(common);
-    }
+template <typename T>
+auto ToBackend(T&& common) -> decltype(ToBackendBase<MetalBackendTraits>(common)) {
+    return ToBackendBase<MetalBackendTraits>(common);
+}
 
 }  // namespace dawn::native::metal
 
diff --git a/src/dawn/native/metal/MetalBackend.mm b/src/dawn/native/metal/MetalBackend.mm
index c0214e5..f6cbbdc 100644
--- a/src/dawn/native/metal/MetalBackend.mm
+++ b/src/dawn/native/metal/MetalBackend.mm
@@ -22,28 +22,25 @@
 
 namespace dawn::native::metal {
 
-    id<MTLDevice> GetMetalDevice(WGPUDevice device) {
-        return ToBackend(FromAPI(device))->GetMTLDevice();
-    }
+id<MTLDevice> GetMetalDevice(WGPUDevice device) {
+    return ToBackend(FromAPI(device))->GetMTLDevice();
+}
 
-    AdapterDiscoveryOptions::AdapterDiscoveryOptions()
-        : AdapterDiscoveryOptionsBase(WGPUBackendType_Metal) {
-    }
+AdapterDiscoveryOptions::AdapterDiscoveryOptions()
+    : AdapterDiscoveryOptionsBase(WGPUBackendType_Metal) {}
 
-    ExternalImageDescriptorIOSurface::ExternalImageDescriptorIOSurface()
-        : ExternalImageDescriptor(ExternalImageType::IOSurface) {
-    }
+ExternalImageDescriptorIOSurface::ExternalImageDescriptorIOSurface()
+    : ExternalImageDescriptor(ExternalImageType::IOSurface) {}
 
-    WGPUTexture WrapIOSurface(WGPUDevice device,
-                              const ExternalImageDescriptorIOSurface* cDescriptor) {
-        Device* backendDevice = ToBackend(FromAPI(device));
-        Ref<TextureBase> texture =
-            backendDevice->CreateTextureWrappingIOSurface(cDescriptor, cDescriptor->ioSurface);
-        return ToAPI(texture.Detach());
-    }
+WGPUTexture WrapIOSurface(WGPUDevice device, const ExternalImageDescriptorIOSurface* cDescriptor) {
+    Device* backendDevice = ToBackend(FromAPI(device));
+    Ref<TextureBase> texture =
+        backendDevice->CreateTextureWrappingIOSurface(cDescriptor, cDescriptor->ioSurface);
+    return ToAPI(texture.Detach());
+}
 
-    void WaitForCommandsToBeScheduled(WGPUDevice device) {
-        ToBackend(FromAPI(device))->WaitForCommandsToBeScheduled();
-    }
+void WaitForCommandsToBeScheduled(WGPUDevice device) {
+    ToBackend(FromAPI(device))->WaitForCommandsToBeScheduled();
+}
 
 }  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/PipelineLayoutMTL.h b/src/dawn/native/metal/PipelineLayoutMTL.h
index 9e2ee15..1651954 100644
--- a/src/dawn/native/metal/PipelineLayoutMTL.h
+++ b/src/dawn/native/metal/PipelineLayoutMTL.h
@@ -25,37 +25,36 @@
 
 namespace dawn::native::metal {
 
-    class Device;
+class Device;
 
-    // The number of Metal buffers usable by applications in general
-    static constexpr size_t kMetalBufferTableSize = 31;
-    // The Metal buffer slot that Dawn reserves for its own use to pass more data to shaders
-    static constexpr size_t kBufferLengthBufferSlot = kMetalBufferTableSize - 1;
-    // The number of Metal buffers Dawn can use in a generic way (i.e. that aren't reserved)
-    static constexpr size_t kGenericMetalBufferSlots = kMetalBufferTableSize - 1;
+// The number of Metal buffers usable by applications in general
+static constexpr size_t kMetalBufferTableSize = 31;
+// The Metal buffer slot that Dawn reserves for its own use to pass more data to shaders
+static constexpr size_t kBufferLengthBufferSlot = kMetalBufferTableSize - 1;
+// The number of Metal buffers Dawn can use in a generic way (i.e. that aren't reserved)
+static constexpr size_t kGenericMetalBufferSlots = kMetalBufferTableSize - 1;
 
-    static constexpr BindGroupIndex kPullingBufferBindingSet = BindGroupIndex(kMaxBindGroups);
+static constexpr BindGroupIndex kPullingBufferBindingSet = BindGroupIndex(kMaxBindGroups);
 
-    class PipelineLayout final : public PipelineLayoutBase {
-      public:
-        static Ref<PipelineLayout> Create(Device* device,
-                                          const PipelineLayoutDescriptor* descriptor);
+class PipelineLayout final : public PipelineLayoutBase {
+  public:
+    static Ref<PipelineLayout> Create(Device* device, const PipelineLayoutDescriptor* descriptor);
 
-        using BindingIndexInfo =
-            ityp::array<BindGroupIndex,
-                        ityp::stack_vec<BindingIndex, uint32_t, kMaxOptimalBindingsPerGroup>,
-                        kMaxBindGroups>;
-        const BindingIndexInfo& GetBindingIndexInfo(SingleShaderStage stage) const;
+    using BindingIndexInfo =
+        ityp::array<BindGroupIndex,
+                    ityp::stack_vec<BindingIndex, uint32_t, kMaxOptimalBindingsPerGroup>,
+                    kMaxBindGroups>;
+    const BindingIndexInfo& GetBindingIndexInfo(SingleShaderStage stage) const;
 
-        // The number of Metal vertex stage buffers used for the whole pipeline layout.
-        uint32_t GetBufferBindingCount(SingleShaderStage stage);
+    // The number of Metal vertex stage buffers used for the whole pipeline layout.
+    uint32_t GetBufferBindingCount(SingleShaderStage stage);
 
-      private:
-        PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor);
-        ~PipelineLayout() override = default;
-        PerStage<BindingIndexInfo> mIndexInfo;
-        PerStage<uint32_t> mBufferBindingCount;
-    };
+  private:
+    PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor);
+    ~PipelineLayout() override = default;
+    PerStage<BindingIndexInfo> mIndexInfo;
+    PerStage<uint32_t> mBufferBindingCount;
+};
 
 }  // namespace dawn::native::metal
 
diff --git a/src/dawn/native/metal/PipelineLayoutMTL.mm b/src/dawn/native/metal/PipelineLayoutMTL.mm
index 5f789ea..2dfad2e 100644
--- a/src/dawn/native/metal/PipelineLayoutMTL.mm
+++ b/src/dawn/native/metal/PipelineLayoutMTL.mm
@@ -20,63 +20,63 @@
 
 namespace dawn::native::metal {
 
-    // static
-    Ref<PipelineLayout> PipelineLayout::Create(Device* device,
-                                               const PipelineLayoutDescriptor* descriptor) {
-        return AcquireRef(new PipelineLayout(device, descriptor));
-    }
+// static
+Ref<PipelineLayout> PipelineLayout::Create(Device* device,
+                                           const PipelineLayoutDescriptor* descriptor) {
+    return AcquireRef(new PipelineLayout(device, descriptor));
+}
 
-    PipelineLayout::PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor)
-        : PipelineLayoutBase(device, descriptor) {
-        // Each stage has its own numbering namespace in CompilerMSL.
-        for (auto stage : IterateStages(kAllStages)) {
-            uint32_t bufferIndex = 0;
-            uint32_t samplerIndex = 0;
-            uint32_t textureIndex = 0;
+PipelineLayout::PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor)
+    : PipelineLayoutBase(device, descriptor) {
+    // Each stage has its own numbering namespace in CompilerMSL.
+    for (auto stage : IterateStages(kAllStages)) {
+        uint32_t bufferIndex = 0;
+        uint32_t samplerIndex = 0;
+        uint32_t textureIndex = 0;
 
-            for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
-                mIndexInfo[stage][group].resize(GetBindGroupLayout(group)->GetBindingCount());
+        for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
+            mIndexInfo[stage][group].resize(GetBindGroupLayout(group)->GetBindingCount());
 
-                for (BindingIndex bindingIndex{0};
-                     bindingIndex < GetBindGroupLayout(group)->GetBindingCount(); ++bindingIndex) {
-                    const BindingInfo& bindingInfo =
-                        GetBindGroupLayout(group)->GetBindingInfo(bindingIndex);
-                    if (!(bindingInfo.visibility & StageBit(stage))) {
-                        continue;
-                    }
+            for (BindingIndex bindingIndex{0};
+                 bindingIndex < GetBindGroupLayout(group)->GetBindingCount(); ++bindingIndex) {
+                const BindingInfo& bindingInfo =
+                    GetBindGroupLayout(group)->GetBindingInfo(bindingIndex);
+                if (!(bindingInfo.visibility & StageBit(stage))) {
+                    continue;
+                }
 
-                    switch (bindingInfo.bindingType) {
-                        case BindingInfoType::Buffer:
-                            mIndexInfo[stage][group][bindingIndex] = bufferIndex;
-                            bufferIndex++;
-                            break;
+                switch (bindingInfo.bindingType) {
+                    case BindingInfoType::Buffer:
+                        mIndexInfo[stage][group][bindingIndex] = bufferIndex;
+                        bufferIndex++;
+                        break;
 
-                        case BindingInfoType::Sampler:
-                            mIndexInfo[stage][group][bindingIndex] = samplerIndex;
-                            samplerIndex++;
-                            break;
+                    case BindingInfoType::Sampler:
+                        mIndexInfo[stage][group][bindingIndex] = samplerIndex;
+                        samplerIndex++;
+                        break;
 
-                        case BindingInfoType::Texture:
-                        case BindingInfoType::StorageTexture:
-                        case BindingInfoType::ExternalTexture:
-                            mIndexInfo[stage][group][bindingIndex] = textureIndex;
-                            textureIndex++;
-                            break;
-                    }
+                    case BindingInfoType::Texture:
+                    case BindingInfoType::StorageTexture:
+                    case BindingInfoType::ExternalTexture:
+                        mIndexInfo[stage][group][bindingIndex] = textureIndex;
+                        textureIndex++;
+                        break;
                 }
             }
-
-            mBufferBindingCount[stage] = bufferIndex;
         }
-    }
 
-    const PipelineLayout::BindingIndexInfo& PipelineLayout::GetBindingIndexInfo(
-        SingleShaderStage stage) const {
-        return mIndexInfo[stage];
+        mBufferBindingCount[stage] = bufferIndex;
     }
+}
 
-    uint32_t PipelineLayout::GetBufferBindingCount(SingleShaderStage stage) {
-        return mBufferBindingCount[stage];
-    }
+const PipelineLayout::BindingIndexInfo& PipelineLayout::GetBindingIndexInfo(
+    SingleShaderStage stage) const {
+    return mIndexInfo[stage];
+}
+
+uint32_t PipelineLayout::GetBufferBindingCount(SingleShaderStage stage) {
+    return mBufferBindingCount[stage];
+}
 
 }  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/QuerySetMTL.h b/src/dawn/native/metal/QuerySetMTL.h
index 67cda0f..9f0e6bb 100644
--- a/src/dawn/native/metal/QuerySetMTL.h
+++ b/src/dawn/native/metal/QuerySetMTL.h
@@ -23,31 +23,31 @@
 
 namespace dawn::native::metal {
 
-    class Device;
+class Device;
 
-    class QuerySet final : public QuerySetBase {
-      public:
-        static ResultOrError<Ref<QuerySet>> Create(Device* device,
-                                                   const QuerySetDescriptor* descriptor);
+class QuerySet final : public QuerySetBase {
+  public:
+    static ResultOrError<Ref<QuerySet>> Create(Device* device,
+                                               const QuerySetDescriptor* descriptor);
 
-        id<MTLBuffer> GetVisibilityBuffer() const;
-        id<MTLCounterSampleBuffer> GetCounterSampleBuffer() const
-            API_AVAILABLE(macos(10.15), ios(14.0));
+    id<MTLBuffer> GetVisibilityBuffer() const;
+    id<MTLCounterSampleBuffer> GetCounterSampleBuffer() const
+        API_AVAILABLE(macos(10.15), ios(14.0));
 
-      private:
-        ~QuerySet() override;
-        using QuerySetBase::QuerySetBase;
-        MaybeError Initialize();
+  private:
+    ~QuerySet() override;
+    using QuerySetBase::QuerySetBase;
+    MaybeError Initialize();
 
-        // Dawn API
-        void DestroyImpl() override;
+    // Dawn API
+    void DestroyImpl() override;
 
-        NSPRef<id<MTLBuffer>> mVisibilityBuffer;
-        // Note that mCounterSampleBuffer cannot be an NSRef because the API_AVAILABLE macros don't
-        // propagate nicely through templates.
-        id<MTLCounterSampleBuffer> mCounterSampleBuffer API_AVAILABLE(macos(10.15),
-                                                                      ios(14.0)) = nullptr;
-    };
+    NSPRef<id<MTLBuffer>> mVisibilityBuffer;
+    // Note that mCounterSampleBuffer cannot be an NSRef because the API_AVAILABLE macros don't
+    // propagate nicely through templates.
+    id<MTLCounterSampleBuffer> mCounterSampleBuffer API_AVAILABLE(macos(10.15),
+                                                                  ios(14.0)) = nullptr;
+};
 
 }  // namespace dawn::native::metal
 
diff --git a/src/dawn/native/metal/QuerySetMTL.mm b/src/dawn/native/metal/QuerySetMTL.mm
index 4882fee..1e19963 100644
--- a/src/dawn/native/metal/QuerySetMTL.mm
+++ b/src/dawn/native/metal/QuerySetMTL.mm
@@ -20,120 +20,119 @@
 
 namespace dawn::native::metal {
 
-    namespace {
+namespace {
 
-        ResultOrError<id<MTLCounterSampleBuffer>> CreateCounterSampleBuffer(
-            Device* device,
-            MTLCommonCounterSet counterSet,
-            uint32_t count) API_AVAILABLE(macos(10.15), ios(14.0)) {
-            NSRef<MTLCounterSampleBufferDescriptor> descriptorRef =
-                AcquireNSRef([MTLCounterSampleBufferDescriptor new]);
-            MTLCounterSampleBufferDescriptor* descriptor = descriptorRef.Get();
+ResultOrError<id<MTLCounterSampleBuffer>> CreateCounterSampleBuffer(Device* device,
+                                                                    MTLCommonCounterSet counterSet,
+                                                                    uint32_t count)
+    API_AVAILABLE(macos(10.15), ios(14.0)) {
+    NSRef<MTLCounterSampleBufferDescriptor> descriptorRef =
+        AcquireNSRef([MTLCounterSampleBufferDescriptor new]);
+    MTLCounterSampleBufferDescriptor* descriptor = descriptorRef.Get();
 
-            // To determine which counters are available from a device, we need to iterate through
-            // the counterSets property of a MTLDevice. Then configure which counters will be
-            // sampled by creating a MTLCounterSampleBufferDescriptor and setting its counterSet
-            // property to the matched one of the available set.
-            for (id<MTLCounterSet> set in device->GetMTLDevice().counterSets) {
-                if ([set.name isEqualToString:counterSet]) {
-                    descriptor.counterSet = set;
-                    break;
-                }
-            }
-            ASSERT(descriptor.counterSet != nullptr);
-
-            descriptor.sampleCount = static_cast<NSUInteger>(std::max(count, uint32_t(1u)));
-            descriptor.storageMode = MTLStorageModePrivate;
-            if (device->IsToggleEnabled(Toggle::MetalUseSharedModeForCounterSampleBuffer)) {
-                descriptor.storageMode = MTLStorageModeShared;
-            }
-
-            NSError* error = nullptr;
-            id<MTLCounterSampleBuffer> counterSampleBuffer =
-                [device->GetMTLDevice() newCounterSampleBufferWithDescriptor:descriptor
-                                                                       error:&error];
-            if (error != nullptr) {
-                return DAWN_OUT_OF_MEMORY_ERROR(std::string("Error creating query set: ") +
-                                                [error.localizedDescription UTF8String]);
-            }
-
-            return counterSampleBuffer;
+    // To determine which counters are available from a device, we need to iterate through
+    // the counterSets property of a MTLDevice. Then configure which counters will be
+    // sampled by creating a MTLCounterSampleBufferDescriptor and setting its counterSet
+    // property to the matched one of the available set.
+    for (id<MTLCounterSet> set in device->GetMTLDevice().counterSets) {
+        if ([set.name isEqualToString:counterSet]) {
+            descriptor.counterSet = set;
+            break;
         }
     }
+    ASSERT(descriptor.counterSet != nullptr);
 
-    // static
-    ResultOrError<Ref<QuerySet>> QuerySet::Create(Device* device,
-                                                  const QuerySetDescriptor* descriptor) {
-        Ref<QuerySet> queryset = AcquireRef(new QuerySet(device, descriptor));
-        DAWN_TRY(queryset->Initialize());
-        return queryset;
+    descriptor.sampleCount = static_cast<NSUInteger>(std::max(count, uint32_t(1u)));
+    descriptor.storageMode = MTLStorageModePrivate;
+    if (device->IsToggleEnabled(Toggle::MetalUseSharedModeForCounterSampleBuffer)) {
+        descriptor.storageMode = MTLStorageModeShared;
     }
 
-    MaybeError QuerySet::Initialize() {
-        Device* device = ToBackend(GetDevice());
+    NSError* error = nullptr;
+    id<MTLCounterSampleBuffer> counterSampleBuffer =
+        [device->GetMTLDevice() newCounterSampleBufferWithDescriptor:descriptor error:&error];
+    if (error != nullptr) {
+        return DAWN_OUT_OF_MEMORY_ERROR(std::string("Error creating query set: ") +
+                                        [error.localizedDescription UTF8String]);
+    }
 
-        switch (GetQueryType()) {
-            case wgpu::QueryType::Occlusion: {
-                // Create buffer for writing 64-bit results.
-                NSUInteger bufferSize = static_cast<NSUInteger>(
-                    std::max(GetQueryCount() * sizeof(uint64_t), size_t(4u)));
-                mVisibilityBuffer = AcquireNSPRef([device->GetMTLDevice()
-                    newBufferWithLength:bufferSize
-                                options:MTLResourceStorageModePrivate]);
+    return counterSampleBuffer;
+}
+}  // namespace
 
-                if (mVisibilityBuffer == nil) {
-                    return DAWN_OUT_OF_MEMORY_ERROR("Failed to allocate query set.");
-                }
-                break;
+// static
+ResultOrError<Ref<QuerySet>> QuerySet::Create(Device* device,
+                                              const QuerySetDescriptor* descriptor) {
+    Ref<QuerySet> queryset = AcquireRef(new QuerySet(device, descriptor));
+    DAWN_TRY(queryset->Initialize());
+    return queryset;
+}
+
+MaybeError QuerySet::Initialize() {
+    Device* device = ToBackend(GetDevice());
+
+    switch (GetQueryType()) {
+        case wgpu::QueryType::Occlusion: {
+            // Create buffer for writing 64-bit results.
+            NSUInteger bufferSize =
+                static_cast<NSUInteger>(std::max(GetQueryCount() * sizeof(uint64_t), size_t(4u)));
+            mVisibilityBuffer = AcquireNSPRef([device->GetMTLDevice()
+                newBufferWithLength:bufferSize
+                            options:MTLResourceStorageModePrivate]);
+
+            if (mVisibilityBuffer == nil) {
+                return DAWN_OUT_OF_MEMORY_ERROR("Failed to allocate query set.");
             }
-            case wgpu::QueryType::PipelineStatistics:
-                if (@available(macOS 10.15, iOS 14.0, *)) {
-                    DAWN_TRY_ASSIGN(mCounterSampleBuffer,
-                                    CreateCounterSampleBuffer(device, MTLCommonCounterSetStatistic,
-                                                              GetQueryCount()));
-                } else {
-                    UNREACHABLE();
-                }
-                break;
-            case wgpu::QueryType::Timestamp:
-                if (@available(macOS 10.15, iOS 14.0, *)) {
-                    DAWN_TRY_ASSIGN(mCounterSampleBuffer,
-                                    CreateCounterSampleBuffer(device, MTLCommonCounterSetTimestamp,
-                                                              GetQueryCount()));
-                } else {
-                    UNREACHABLE();
-                }
-                break;
-            default:
+            break;
+        }
+        case wgpu::QueryType::PipelineStatistics:
+            if (@available(macOS 10.15, iOS 14.0, *)) {
+                DAWN_TRY_ASSIGN(mCounterSampleBuffer,
+                                CreateCounterSampleBuffer(device, MTLCommonCounterSetStatistic,
+                                                          GetQueryCount()));
+            } else {
                 UNREACHABLE();
-                break;
-        }
-
-        return {};
+            }
+            break;
+        case wgpu::QueryType::Timestamp:
+            if (@available(macOS 10.15, iOS 14.0, *)) {
+                DAWN_TRY_ASSIGN(mCounterSampleBuffer,
+                                CreateCounterSampleBuffer(device, MTLCommonCounterSetTimestamp,
+                                                          GetQueryCount()));
+            } else {
+                UNREACHABLE();
+            }
+            break;
+        default:
+            UNREACHABLE();
+            break;
     }
 
-    id<MTLBuffer> QuerySet::GetVisibilityBuffer() const {
-        return mVisibilityBuffer.Get();
+    return {};
+}
+
+id<MTLBuffer> QuerySet::GetVisibilityBuffer() const {
+    return mVisibilityBuffer.Get();
+}
+
+id<MTLCounterSampleBuffer> QuerySet::GetCounterSampleBuffer() const
+    API_AVAILABLE(macos(10.15), ios(14.0)) {
+    return mCounterSampleBuffer;
+}
+
+QuerySet::~QuerySet() = default;
+
+void QuerySet::DestroyImpl() {
+    QuerySetBase::DestroyImpl();
+
+    mVisibilityBuffer = nullptr;
+
+    // mCounterSampleBuffer isn't an NSRef because API_AVAILABLE doesn't work will with
+    // templates.
+    if (@available(macOS 10.15, iOS 14.0, *)) {
+        [mCounterSampleBuffer release];
+        mCounterSampleBuffer = nullptr;
     }
-
-    id<MTLCounterSampleBuffer> QuerySet::GetCounterSampleBuffer() const
-        API_AVAILABLE(macos(10.15), ios(14.0)) {
-        return mCounterSampleBuffer;
-    }
-
-    QuerySet::~QuerySet() = default;
-
-    void QuerySet::DestroyImpl() {
-        QuerySetBase::DestroyImpl();
-
-        mVisibilityBuffer = nullptr;
-
-        // mCounterSampleBuffer isn't an NSRef because API_AVAILABLE doesn't work will with
-        // templates.
-        if (@available(macOS 10.15, iOS 14.0, *)) {
-            [mCounterSampleBuffer release];
-            mCounterSampleBuffer = nullptr;
-        }
-    }
+}
 
 }  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/QueueMTL.h b/src/dawn/native/metal/QueueMTL.h
index 7c1070e..e1a37b9 100644
--- a/src/dawn/native/metal/QueueMTL.h
+++ b/src/dawn/native/metal/QueueMTL.h
@@ -19,15 +19,15 @@
 
 namespace dawn::native::metal {
 
-    class Device;
+class Device;
 
-    class Queue final : public QueueBase {
-      public:
-        Queue(Device* device, const QueueDescriptor* descriptor);
+class Queue final : public QueueBase {
+  public:
+    Queue(Device* device, const QueueDescriptor* descriptor);
 
-      private:
-        MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
-    };
+  private:
+    MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+};
 
 }  // namespace dawn::native::metal
 
diff --git a/src/dawn/native/metal/QueueMTL.mm b/src/dawn/native/metal/QueueMTL.mm
index 2bf50c5..c129c5d 100644
--- a/src/dawn/native/metal/QueueMTL.mm
+++ b/src/dawn/native/metal/QueueMTL.mm
@@ -26,24 +26,22 @@
 
 namespace dawn::native::metal {
 
-    Queue::Queue(Device* device, const QueueDescriptor* descriptor)
-        : QueueBase(device, descriptor) {
+Queue::Queue(Device* device, const QueueDescriptor* descriptor) : QueueBase(device, descriptor) {}
+
+MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
+    Device* device = ToBackend(GetDevice());
+
+    DAWN_TRY(device->Tick());
+
+    CommandRecordingContext* commandContext = device->GetPendingCommandContext();
+
+    TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording, "CommandBufferMTL::FillCommands");
+    for (uint32_t i = 0; i < commandCount; ++i) {
+        DAWN_TRY(ToBackend(commands[i])->FillCommands(commandContext));
     }
+    TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording, "CommandBufferMTL::FillCommands");
 
-    MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
-        Device* device = ToBackend(GetDevice());
-
-        DAWN_TRY(device->Tick());
-
-        CommandRecordingContext* commandContext = device->GetPendingCommandContext();
-
-        TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording, "CommandBufferMTL::FillCommands");
-        for (uint32_t i = 0; i < commandCount; ++i) {
-            DAWN_TRY(ToBackend(commands[i])->FillCommands(commandContext));
-        }
-        TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording, "CommandBufferMTL::FillCommands");
-
-        return device->SubmitPendingCommandBuffer();
-    }
+    return device->SubmitPendingCommandBuffer();
+}
 
 }  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/RenderPipelineMTL.h b/src/dawn/native/metal/RenderPipelineMTL.h
index 338a3e9..cd8111d 100644
--- a/src/dawn/native/metal/RenderPipelineMTL.h
+++ b/src/dawn/native/metal/RenderPipelineMTL.h
@@ -23,47 +23,46 @@
 
 namespace dawn::native::metal {
 
-    class Device;
+class Device;
 
-    class RenderPipeline final : public RenderPipelineBase {
-      public:
-        static Ref<RenderPipelineBase> CreateUninitialized(
-            Device* device,
-            const RenderPipelineDescriptor* descriptor);
-        static void InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
-                                    WGPUCreateRenderPipelineAsyncCallback callback,
-                                    void* userdata);
+class RenderPipeline final : public RenderPipelineBase {
+  public:
+    static Ref<RenderPipelineBase> CreateUninitialized(Device* device,
+                                                       const RenderPipelineDescriptor* descriptor);
+    static void InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+                                WGPUCreateRenderPipelineAsyncCallback callback,
+                                void* userdata);
 
-        MTLPrimitiveType GetMTLPrimitiveTopology() const;
-        MTLWinding GetMTLFrontFace() const;
-        MTLCullMode GetMTLCullMode() const;
+    MTLPrimitiveType GetMTLPrimitiveTopology() const;
+    MTLWinding GetMTLFrontFace() const;
+    MTLCullMode GetMTLCullMode() const;
 
-        void Encode(id<MTLRenderCommandEncoder> encoder);
+    void Encode(id<MTLRenderCommandEncoder> encoder);
 
-        id<MTLDepthStencilState> GetMTLDepthStencilState();
+    id<MTLDepthStencilState> GetMTLDepthStencilState();
 
-        // For each Dawn vertex buffer, give the index in which it will be positioned in the Metal
-        // vertex buffer table.
-        uint32_t GetMtlVertexBufferIndex(VertexBufferSlot slot) const;
+    // For each Dawn vertex buffer, give the index in which it will be positioned in the Metal
+    // vertex buffer table.
+    uint32_t GetMtlVertexBufferIndex(VertexBufferSlot slot) const;
 
-        wgpu::ShaderStage GetStagesRequiringStorageBufferLength() const;
+    wgpu::ShaderStage GetStagesRequiringStorageBufferLength() const;
 
-        MaybeError Initialize() override;
+    MaybeError Initialize() override;
 
-      private:
-        using RenderPipelineBase::RenderPipelineBase;
+  private:
+    using RenderPipelineBase::RenderPipelineBase;
 
-        NSRef<MTLVertexDescriptor> MakeVertexDesc();
+    NSRef<MTLVertexDescriptor> MakeVertexDesc();
 
-        MTLPrimitiveType mMtlPrimitiveTopology;
-        MTLWinding mMtlFrontFace;
-        MTLCullMode mMtlCullMode;
-        NSPRef<id<MTLRenderPipelineState>> mMtlRenderPipelineState;
-        NSPRef<id<MTLDepthStencilState>> mMtlDepthStencilState;
-        ityp::array<VertexBufferSlot, uint32_t, kMaxVertexBuffers> mMtlVertexBufferIndices;
+    MTLPrimitiveType mMtlPrimitiveTopology;
+    MTLWinding mMtlFrontFace;
+    MTLCullMode mMtlCullMode;
+    NSPRef<id<MTLRenderPipelineState>> mMtlRenderPipelineState;
+    NSPRef<id<MTLDepthStencilState>> mMtlDepthStencilState;
+    ityp::array<VertexBufferSlot, uint32_t, kMaxVertexBuffers> mMtlVertexBufferIndices;
 
-        wgpu::ShaderStage mStagesRequiringStorageBufferLength = wgpu::ShaderStage::None;
-    };
+    wgpu::ShaderStage mStagesRequiringStorageBufferLength = wgpu::ShaderStage::None;
+};
 
 }  // namespace dawn::native::metal
 
diff --git a/src/dawn/native/metal/RenderPipelineMTL.mm b/src/dawn/native/metal/RenderPipelineMTL.mm
index 4c0f4b1..2e10ed9 100644
--- a/src/dawn/native/metal/RenderPipelineMTL.mm
+++ b/src/dawn/native/metal/RenderPipelineMTL.mm
@@ -24,483 +24,475 @@
 
 namespace dawn::native::metal {
 
-    namespace {
-        MTLVertexFormat VertexFormatType(wgpu::VertexFormat format) {
-            switch (format) {
-                case wgpu::VertexFormat::Uint8x2:
-                    return MTLVertexFormatUChar2;
-                case wgpu::VertexFormat::Uint8x4:
-                    return MTLVertexFormatUChar4;
-                case wgpu::VertexFormat::Sint8x2:
-                    return MTLVertexFormatChar2;
-                case wgpu::VertexFormat::Sint8x4:
-                    return MTLVertexFormatChar4;
-                case wgpu::VertexFormat::Unorm8x2:
-                    return MTLVertexFormatUChar2Normalized;
-                case wgpu::VertexFormat::Unorm8x4:
-                    return MTLVertexFormatUChar4Normalized;
-                case wgpu::VertexFormat::Snorm8x2:
-                    return MTLVertexFormatChar2Normalized;
-                case wgpu::VertexFormat::Snorm8x4:
-                    return MTLVertexFormatChar4Normalized;
-                case wgpu::VertexFormat::Uint16x2:
-                    return MTLVertexFormatUShort2;
-                case wgpu::VertexFormat::Uint16x4:
-                    return MTLVertexFormatUShort4;
-                case wgpu::VertexFormat::Sint16x2:
-                    return MTLVertexFormatShort2;
-                case wgpu::VertexFormat::Sint16x4:
-                    return MTLVertexFormatShort4;
-                case wgpu::VertexFormat::Unorm16x2:
-                    return MTLVertexFormatUShort2Normalized;
-                case wgpu::VertexFormat::Unorm16x4:
-                    return MTLVertexFormatUShort4Normalized;
-                case wgpu::VertexFormat::Snorm16x2:
-                    return MTLVertexFormatShort2Normalized;
-                case wgpu::VertexFormat::Snorm16x4:
-                    return MTLVertexFormatShort4Normalized;
-                case wgpu::VertexFormat::Float16x2:
-                    return MTLVertexFormatHalf2;
-                case wgpu::VertexFormat::Float16x4:
-                    return MTLVertexFormatHalf4;
-                case wgpu::VertexFormat::Float32:
-                    return MTLVertexFormatFloat;
-                case wgpu::VertexFormat::Float32x2:
-                    return MTLVertexFormatFloat2;
-                case wgpu::VertexFormat::Float32x3:
-                    return MTLVertexFormatFloat3;
-                case wgpu::VertexFormat::Float32x4:
-                    return MTLVertexFormatFloat4;
-                case wgpu::VertexFormat::Uint32:
-                    return MTLVertexFormatUInt;
-                case wgpu::VertexFormat::Uint32x2:
-                    return MTLVertexFormatUInt2;
-                case wgpu::VertexFormat::Uint32x3:
-                    return MTLVertexFormatUInt3;
-                case wgpu::VertexFormat::Uint32x4:
-                    return MTLVertexFormatUInt4;
-                case wgpu::VertexFormat::Sint32:
-                    return MTLVertexFormatInt;
-                case wgpu::VertexFormat::Sint32x2:
-                    return MTLVertexFormatInt2;
-                case wgpu::VertexFormat::Sint32x3:
-                    return MTLVertexFormatInt3;
-                case wgpu::VertexFormat::Sint32x4:
-                    return MTLVertexFormatInt4;
-                default:
-                    UNREACHABLE();
-            }
-        }
+namespace {
+MTLVertexFormat VertexFormatType(wgpu::VertexFormat format) {
+    switch (format) {
+        case wgpu::VertexFormat::Uint8x2:
+            return MTLVertexFormatUChar2;
+        case wgpu::VertexFormat::Uint8x4:
+            return MTLVertexFormatUChar4;
+        case wgpu::VertexFormat::Sint8x2:
+            return MTLVertexFormatChar2;
+        case wgpu::VertexFormat::Sint8x4:
+            return MTLVertexFormatChar4;
+        case wgpu::VertexFormat::Unorm8x2:
+            return MTLVertexFormatUChar2Normalized;
+        case wgpu::VertexFormat::Unorm8x4:
+            return MTLVertexFormatUChar4Normalized;
+        case wgpu::VertexFormat::Snorm8x2:
+            return MTLVertexFormatChar2Normalized;
+        case wgpu::VertexFormat::Snorm8x4:
+            return MTLVertexFormatChar4Normalized;
+        case wgpu::VertexFormat::Uint16x2:
+            return MTLVertexFormatUShort2;
+        case wgpu::VertexFormat::Uint16x4:
+            return MTLVertexFormatUShort4;
+        case wgpu::VertexFormat::Sint16x2:
+            return MTLVertexFormatShort2;
+        case wgpu::VertexFormat::Sint16x4:
+            return MTLVertexFormatShort4;
+        case wgpu::VertexFormat::Unorm16x2:
+            return MTLVertexFormatUShort2Normalized;
+        case wgpu::VertexFormat::Unorm16x4:
+            return MTLVertexFormatUShort4Normalized;
+        case wgpu::VertexFormat::Snorm16x2:
+            return MTLVertexFormatShort2Normalized;
+        case wgpu::VertexFormat::Snorm16x4:
+            return MTLVertexFormatShort4Normalized;
+        case wgpu::VertexFormat::Float16x2:
+            return MTLVertexFormatHalf2;
+        case wgpu::VertexFormat::Float16x4:
+            return MTLVertexFormatHalf4;
+        case wgpu::VertexFormat::Float32:
+            return MTLVertexFormatFloat;
+        case wgpu::VertexFormat::Float32x2:
+            return MTLVertexFormatFloat2;
+        case wgpu::VertexFormat::Float32x3:
+            return MTLVertexFormatFloat3;
+        case wgpu::VertexFormat::Float32x4:
+            return MTLVertexFormatFloat4;
+        case wgpu::VertexFormat::Uint32:
+            return MTLVertexFormatUInt;
+        case wgpu::VertexFormat::Uint32x2:
+            return MTLVertexFormatUInt2;
+        case wgpu::VertexFormat::Uint32x3:
+            return MTLVertexFormatUInt3;
+        case wgpu::VertexFormat::Uint32x4:
+            return MTLVertexFormatUInt4;
+        case wgpu::VertexFormat::Sint32:
+            return MTLVertexFormatInt;
+        case wgpu::VertexFormat::Sint32x2:
+            return MTLVertexFormatInt2;
+        case wgpu::VertexFormat::Sint32x3:
+            return MTLVertexFormatInt3;
+        case wgpu::VertexFormat::Sint32x4:
+            return MTLVertexFormatInt4;
+        default:
+            UNREACHABLE();
+    }
+}
 
-        MTLVertexStepFunction VertexStepModeFunction(wgpu::VertexStepMode mode) {
-            switch (mode) {
-                case wgpu::VertexStepMode::Vertex:
-                    return MTLVertexStepFunctionPerVertex;
-                case wgpu::VertexStepMode::Instance:
-                    return MTLVertexStepFunctionPerInstance;
-            }
-        }
+MTLVertexStepFunction VertexStepModeFunction(wgpu::VertexStepMode mode) {
+    switch (mode) {
+        case wgpu::VertexStepMode::Vertex:
+            return MTLVertexStepFunctionPerVertex;
+        case wgpu::VertexStepMode::Instance:
+            return MTLVertexStepFunctionPerInstance;
+    }
+}
 
-        MTLPrimitiveType MTLPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
-            switch (primitiveTopology) {
-                case wgpu::PrimitiveTopology::PointList:
-                    return MTLPrimitiveTypePoint;
-                case wgpu::PrimitiveTopology::LineList:
-                    return MTLPrimitiveTypeLine;
-                case wgpu::PrimitiveTopology::LineStrip:
-                    return MTLPrimitiveTypeLineStrip;
-                case wgpu::PrimitiveTopology::TriangleList:
-                    return MTLPrimitiveTypeTriangle;
-                case wgpu::PrimitiveTopology::TriangleStrip:
-                    return MTLPrimitiveTypeTriangleStrip;
-            }
-        }
+MTLPrimitiveType MTLPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
+    switch (primitiveTopology) {
+        case wgpu::PrimitiveTopology::PointList:
+            return MTLPrimitiveTypePoint;
+        case wgpu::PrimitiveTopology::LineList:
+            return MTLPrimitiveTypeLine;
+        case wgpu::PrimitiveTopology::LineStrip:
+            return MTLPrimitiveTypeLineStrip;
+        case wgpu::PrimitiveTopology::TriangleList:
+            return MTLPrimitiveTypeTriangle;
+        case wgpu::PrimitiveTopology::TriangleStrip:
+            return MTLPrimitiveTypeTriangleStrip;
+    }
+}
 
-        MTLPrimitiveTopologyClass MTLInputPrimitiveTopology(
-            wgpu::PrimitiveTopology primitiveTopology) {
-            switch (primitiveTopology) {
-                case wgpu::PrimitiveTopology::PointList:
-                    return MTLPrimitiveTopologyClassPoint;
-                case wgpu::PrimitiveTopology::LineList:
-                case wgpu::PrimitiveTopology::LineStrip:
-                    return MTLPrimitiveTopologyClassLine;
-                case wgpu::PrimitiveTopology::TriangleList:
-                case wgpu::PrimitiveTopology::TriangleStrip:
-                    return MTLPrimitiveTopologyClassTriangle;
-            }
-        }
+MTLPrimitiveTopologyClass MTLInputPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
+    switch (primitiveTopology) {
+        case wgpu::PrimitiveTopology::PointList:
+            return MTLPrimitiveTopologyClassPoint;
+        case wgpu::PrimitiveTopology::LineList:
+        case wgpu::PrimitiveTopology::LineStrip:
+            return MTLPrimitiveTopologyClassLine;
+        case wgpu::PrimitiveTopology::TriangleList:
+        case wgpu::PrimitiveTopology::TriangleStrip:
+            return MTLPrimitiveTopologyClassTriangle;
+    }
+}
 
-        MTLBlendFactor MetalBlendFactor(wgpu::BlendFactor factor, bool alpha) {
-            switch (factor) {
-                case wgpu::BlendFactor::Zero:
-                    return MTLBlendFactorZero;
-                case wgpu::BlendFactor::One:
-                    return MTLBlendFactorOne;
-                case wgpu::BlendFactor::Src:
-                    return MTLBlendFactorSourceColor;
-                case wgpu::BlendFactor::OneMinusSrc:
-                    return MTLBlendFactorOneMinusSourceColor;
-                case wgpu::BlendFactor::SrcAlpha:
-                    return MTLBlendFactorSourceAlpha;
-                case wgpu::BlendFactor::OneMinusSrcAlpha:
-                    return MTLBlendFactorOneMinusSourceAlpha;
-                case wgpu::BlendFactor::Dst:
-                    return MTLBlendFactorDestinationColor;
-                case wgpu::BlendFactor::OneMinusDst:
-                    return MTLBlendFactorOneMinusDestinationColor;
-                case wgpu::BlendFactor::DstAlpha:
-                    return MTLBlendFactorDestinationAlpha;
-                case wgpu::BlendFactor::OneMinusDstAlpha:
-                    return MTLBlendFactorOneMinusDestinationAlpha;
-                case wgpu::BlendFactor::SrcAlphaSaturated:
-                    return MTLBlendFactorSourceAlphaSaturated;
-                case wgpu::BlendFactor::Constant:
-                    return alpha ? MTLBlendFactorBlendAlpha : MTLBlendFactorBlendColor;
-                case wgpu::BlendFactor::OneMinusConstant:
-                    return alpha ? MTLBlendFactorOneMinusBlendAlpha
-                                 : MTLBlendFactorOneMinusBlendColor;
-            }
-        }
+MTLBlendFactor MetalBlendFactor(wgpu::BlendFactor factor, bool alpha) {
+    switch (factor) {
+        case wgpu::BlendFactor::Zero:
+            return MTLBlendFactorZero;
+        case wgpu::BlendFactor::One:
+            return MTLBlendFactorOne;
+        case wgpu::BlendFactor::Src:
+            return MTLBlendFactorSourceColor;
+        case wgpu::BlendFactor::OneMinusSrc:
+            return MTLBlendFactorOneMinusSourceColor;
+        case wgpu::BlendFactor::SrcAlpha:
+            return MTLBlendFactorSourceAlpha;
+        case wgpu::BlendFactor::OneMinusSrcAlpha:
+            return MTLBlendFactorOneMinusSourceAlpha;
+        case wgpu::BlendFactor::Dst:
+            return MTLBlendFactorDestinationColor;
+        case wgpu::BlendFactor::OneMinusDst:
+            return MTLBlendFactorOneMinusDestinationColor;
+        case wgpu::BlendFactor::DstAlpha:
+            return MTLBlendFactorDestinationAlpha;
+        case wgpu::BlendFactor::OneMinusDstAlpha:
+            return MTLBlendFactorOneMinusDestinationAlpha;
+        case wgpu::BlendFactor::SrcAlphaSaturated:
+            return MTLBlendFactorSourceAlphaSaturated;
+        case wgpu::BlendFactor::Constant:
+            return alpha ? MTLBlendFactorBlendAlpha : MTLBlendFactorBlendColor;
+        case wgpu::BlendFactor::OneMinusConstant:
+            return alpha ? MTLBlendFactorOneMinusBlendAlpha : MTLBlendFactorOneMinusBlendColor;
+    }
+}
 
-        MTLBlendOperation MetalBlendOperation(wgpu::BlendOperation operation) {
-            switch (operation) {
-                case wgpu::BlendOperation::Add:
-                    return MTLBlendOperationAdd;
-                case wgpu::BlendOperation::Subtract:
-                    return MTLBlendOperationSubtract;
-                case wgpu::BlendOperation::ReverseSubtract:
-                    return MTLBlendOperationReverseSubtract;
-                case wgpu::BlendOperation::Min:
-                    return MTLBlendOperationMin;
-                case wgpu::BlendOperation::Max:
-                    return MTLBlendOperationMax;
-            }
-        }
+MTLBlendOperation MetalBlendOperation(wgpu::BlendOperation operation) {
+    switch (operation) {
+        case wgpu::BlendOperation::Add:
+            return MTLBlendOperationAdd;
+        case wgpu::BlendOperation::Subtract:
+            return MTLBlendOperationSubtract;
+        case wgpu::BlendOperation::ReverseSubtract:
+            return MTLBlendOperationReverseSubtract;
+        case wgpu::BlendOperation::Min:
+            return MTLBlendOperationMin;
+        case wgpu::BlendOperation::Max:
+            return MTLBlendOperationMax;
+    }
+}
 
-        MTLColorWriteMask MetalColorWriteMask(wgpu::ColorWriteMask writeMask,
-                                              bool isDeclaredInFragmentShader) {
-            if (!isDeclaredInFragmentShader) {
-                return MTLColorWriteMaskNone;
-            }
-
-            MTLColorWriteMask mask = MTLColorWriteMaskNone;
-
-            if (writeMask & wgpu::ColorWriteMask::Red) {
-                mask |= MTLColorWriteMaskRed;
-            }
-            if (writeMask & wgpu::ColorWriteMask::Green) {
-                mask |= MTLColorWriteMaskGreen;
-            }
-            if (writeMask & wgpu::ColorWriteMask::Blue) {
-                mask |= MTLColorWriteMaskBlue;
-            }
-            if (writeMask & wgpu::ColorWriteMask::Alpha) {
-                mask |= MTLColorWriteMaskAlpha;
-            }
-
-            return mask;
-        }
-
-        void ComputeBlendDesc(MTLRenderPipelineColorAttachmentDescriptor* attachment,
-                              const ColorTargetState* state,
-                              bool isDeclaredInFragmentShader) {
-            attachment.blendingEnabled = state->blend != nullptr;
-            if (attachment.blendingEnabled) {
-                attachment.sourceRGBBlendFactor =
-                    MetalBlendFactor(state->blend->color.srcFactor, false);
-                attachment.destinationRGBBlendFactor =
-                    MetalBlendFactor(state->blend->color.dstFactor, false);
-                attachment.rgbBlendOperation = MetalBlendOperation(state->blend->color.operation);
-                attachment.sourceAlphaBlendFactor =
-                    MetalBlendFactor(state->blend->alpha.srcFactor, true);
-                attachment.destinationAlphaBlendFactor =
-                    MetalBlendFactor(state->blend->alpha.dstFactor, true);
-                attachment.alphaBlendOperation = MetalBlendOperation(state->blend->alpha.operation);
-            }
-            attachment.writeMask =
-                MetalColorWriteMask(state->writeMask, isDeclaredInFragmentShader);
-        }
-
-        MTLStencilOperation MetalStencilOperation(wgpu::StencilOperation stencilOperation) {
-            switch (stencilOperation) {
-                case wgpu::StencilOperation::Keep:
-                    return MTLStencilOperationKeep;
-                case wgpu::StencilOperation::Zero:
-                    return MTLStencilOperationZero;
-                case wgpu::StencilOperation::Replace:
-                    return MTLStencilOperationReplace;
-                case wgpu::StencilOperation::Invert:
-                    return MTLStencilOperationInvert;
-                case wgpu::StencilOperation::IncrementClamp:
-                    return MTLStencilOperationIncrementClamp;
-                case wgpu::StencilOperation::DecrementClamp:
-                    return MTLStencilOperationDecrementClamp;
-                case wgpu::StencilOperation::IncrementWrap:
-                    return MTLStencilOperationIncrementWrap;
-                case wgpu::StencilOperation::DecrementWrap:
-                    return MTLStencilOperationDecrementWrap;
-            }
-        }
-
-        NSRef<MTLDepthStencilDescriptor> MakeDepthStencilDesc(const DepthStencilState* descriptor) {
-            NSRef<MTLDepthStencilDescriptor> mtlDepthStencilDescRef =
-                AcquireNSRef([MTLDepthStencilDescriptor new]);
-            MTLDepthStencilDescriptor* mtlDepthStencilDescriptor = mtlDepthStencilDescRef.Get();
-
-            mtlDepthStencilDescriptor.depthCompareFunction =
-                ToMetalCompareFunction(descriptor->depthCompare);
-            mtlDepthStencilDescriptor.depthWriteEnabled = descriptor->depthWriteEnabled;
-
-            if (StencilTestEnabled(descriptor)) {
-                NSRef<MTLStencilDescriptor> backFaceStencilRef =
-                    AcquireNSRef([MTLStencilDescriptor new]);
-                MTLStencilDescriptor* backFaceStencil = backFaceStencilRef.Get();
-                NSRef<MTLStencilDescriptor> frontFaceStencilRef =
-                    AcquireNSRef([MTLStencilDescriptor new]);
-                MTLStencilDescriptor* frontFaceStencil = frontFaceStencilRef.Get();
-
-                backFaceStencil.stencilCompareFunction =
-                    ToMetalCompareFunction(descriptor->stencilBack.compare);
-                backFaceStencil.stencilFailureOperation =
-                    MetalStencilOperation(descriptor->stencilBack.failOp);
-                backFaceStencil.depthFailureOperation =
-                    MetalStencilOperation(descriptor->stencilBack.depthFailOp);
-                backFaceStencil.depthStencilPassOperation =
-                    MetalStencilOperation(descriptor->stencilBack.passOp);
-                backFaceStencil.readMask = descriptor->stencilReadMask;
-                backFaceStencil.writeMask = descriptor->stencilWriteMask;
-
-                frontFaceStencil.stencilCompareFunction =
-                    ToMetalCompareFunction(descriptor->stencilFront.compare);
-                frontFaceStencil.stencilFailureOperation =
-                    MetalStencilOperation(descriptor->stencilFront.failOp);
-                frontFaceStencil.depthFailureOperation =
-                    MetalStencilOperation(descriptor->stencilFront.depthFailOp);
-                frontFaceStencil.depthStencilPassOperation =
-                    MetalStencilOperation(descriptor->stencilFront.passOp);
-                frontFaceStencil.readMask = descriptor->stencilReadMask;
-                frontFaceStencil.writeMask = descriptor->stencilWriteMask;
-
-                mtlDepthStencilDescriptor.backFaceStencil = backFaceStencil;
-                mtlDepthStencilDescriptor.frontFaceStencil = frontFaceStencil;
-            }
-
-            return mtlDepthStencilDescRef;
-        }
-
-        MTLWinding MTLFrontFace(wgpu::FrontFace face) {
-            switch (face) {
-                case wgpu::FrontFace::CW:
-                    return MTLWindingClockwise;
-                case wgpu::FrontFace::CCW:
-                    return MTLWindingCounterClockwise;
-            }
-        }
-
-        MTLCullMode ToMTLCullMode(wgpu::CullMode mode) {
-            switch (mode) {
-                case wgpu::CullMode::None:
-                    return MTLCullModeNone;
-                case wgpu::CullMode::Front:
-                    return MTLCullModeFront;
-                case wgpu::CullMode::Back:
-                    return MTLCullModeBack;
-            }
-        }
-
-    }  // anonymous namespace
-
-    // static
-    Ref<RenderPipelineBase> RenderPipeline::CreateUninitialized(
-        Device* device,
-        const RenderPipelineDescriptor* descriptor) {
-        return AcquireRef(new RenderPipeline(device, descriptor));
+MTLColorWriteMask MetalColorWriteMask(wgpu::ColorWriteMask writeMask,
+                                      bool isDeclaredInFragmentShader) {
+    if (!isDeclaredInFragmentShader) {
+        return MTLColorWriteMaskNone;
     }
 
-    MaybeError RenderPipeline::Initialize() {
-        mMtlPrimitiveTopology = MTLPrimitiveTopology(GetPrimitiveTopology());
-        mMtlFrontFace = MTLFrontFace(GetFrontFace());
-        mMtlCullMode = ToMTLCullMode(GetCullMode());
-        auto mtlDevice = ToBackend(GetDevice())->GetMTLDevice();
+    MTLColorWriteMask mask = MTLColorWriteMaskNone;
 
-        NSRef<MTLRenderPipelineDescriptor> descriptorMTLRef =
-            AcquireNSRef([MTLRenderPipelineDescriptor new]);
-        MTLRenderPipelineDescriptor* descriptorMTL = descriptorMTLRef.Get();
+    if (writeMask & wgpu::ColorWriteMask::Red) {
+        mask |= MTLColorWriteMaskRed;
+    }
+    if (writeMask & wgpu::ColorWriteMask::Green) {
+        mask |= MTLColorWriteMaskGreen;
+    }
+    if (writeMask & wgpu::ColorWriteMask::Blue) {
+        mask |= MTLColorWriteMaskBlue;
+    }
+    if (writeMask & wgpu::ColorWriteMask::Alpha) {
+        mask |= MTLColorWriteMaskAlpha;
+    }
 
-        // TODO(dawn:1384): MakeVertexDesc should be const in the future, so we don't need to call
-        // it here when vertex pulling is enabled
-        NSRef<MTLVertexDescriptor> vertexDesc = MakeVertexDesc();
+    return mask;
+}
 
-        // Calling MakeVertexDesc first is important since it sets indices for packed bindings
-        if (GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling)) {
-            vertexDesc = AcquireNSRef([MTLVertexDescriptor new]);
-        }
-        descriptorMTL.vertexDescriptor = vertexDesc.Get();
+void ComputeBlendDesc(MTLRenderPipelineColorAttachmentDescriptor* attachment,
+                      const ColorTargetState* state,
+                      bool isDeclaredInFragmentShader) {
+    attachment.blendingEnabled = state->blend != nullptr;
+    if (attachment.blendingEnabled) {
+        attachment.sourceRGBBlendFactor = MetalBlendFactor(state->blend->color.srcFactor, false);
+        attachment.destinationRGBBlendFactor =
+            MetalBlendFactor(state->blend->color.dstFactor, false);
+        attachment.rgbBlendOperation = MetalBlendOperation(state->blend->color.operation);
+        attachment.sourceAlphaBlendFactor = MetalBlendFactor(state->blend->alpha.srcFactor, true);
+        attachment.destinationAlphaBlendFactor =
+            MetalBlendFactor(state->blend->alpha.dstFactor, true);
+        attachment.alphaBlendOperation = MetalBlendOperation(state->blend->alpha.operation);
+    }
+    attachment.writeMask = MetalColorWriteMask(state->writeMask, isDeclaredInFragmentShader);
+}
 
-        const PerStage<ProgrammableStage>& allStages = GetAllStages();
-        const ProgrammableStage& vertexStage = allStages[wgpu::ShaderStage::Vertex];
-        ShaderModule::MetalFunctionData vertexData;
-        DAWN_TRY(CreateMTLFunction(vertexStage, SingleShaderStage::Vertex, ToBackend(GetLayout()),
-                                   &vertexData, 0xFFFFFFFF, this));
+MTLStencilOperation MetalStencilOperation(wgpu::StencilOperation stencilOperation) {
+    switch (stencilOperation) {
+        case wgpu::StencilOperation::Keep:
+            return MTLStencilOperationKeep;
+        case wgpu::StencilOperation::Zero:
+            return MTLStencilOperationZero;
+        case wgpu::StencilOperation::Replace:
+            return MTLStencilOperationReplace;
+        case wgpu::StencilOperation::Invert:
+            return MTLStencilOperationInvert;
+        case wgpu::StencilOperation::IncrementClamp:
+            return MTLStencilOperationIncrementClamp;
+        case wgpu::StencilOperation::DecrementClamp:
+            return MTLStencilOperationDecrementClamp;
+        case wgpu::StencilOperation::IncrementWrap:
+            return MTLStencilOperationIncrementWrap;
+        case wgpu::StencilOperation::DecrementWrap:
+            return MTLStencilOperationDecrementWrap;
+    }
+}
 
-        descriptorMTL.vertexFunction = vertexData.function.Get();
-        if (vertexData.needsStorageBufferLength) {
-            mStagesRequiringStorageBufferLength |= wgpu::ShaderStage::Vertex;
+NSRef<MTLDepthStencilDescriptor> MakeDepthStencilDesc(const DepthStencilState* descriptor) {
+    NSRef<MTLDepthStencilDescriptor> mtlDepthStencilDescRef =
+        AcquireNSRef([MTLDepthStencilDescriptor new]);
+    MTLDepthStencilDescriptor* mtlDepthStencilDescriptor = mtlDepthStencilDescRef.Get();
+
+    mtlDepthStencilDescriptor.depthCompareFunction =
+        ToMetalCompareFunction(descriptor->depthCompare);
+    mtlDepthStencilDescriptor.depthWriteEnabled = descriptor->depthWriteEnabled;
+
+    if (StencilTestEnabled(descriptor)) {
+        NSRef<MTLStencilDescriptor> backFaceStencilRef = AcquireNSRef([MTLStencilDescriptor new]);
+        MTLStencilDescriptor* backFaceStencil = backFaceStencilRef.Get();
+        NSRef<MTLStencilDescriptor> frontFaceStencilRef = AcquireNSRef([MTLStencilDescriptor new]);
+        MTLStencilDescriptor* frontFaceStencil = frontFaceStencilRef.Get();
+
+        backFaceStencil.stencilCompareFunction =
+            ToMetalCompareFunction(descriptor->stencilBack.compare);
+        backFaceStencil.stencilFailureOperation =
+            MetalStencilOperation(descriptor->stencilBack.failOp);
+        backFaceStencil.depthFailureOperation =
+            MetalStencilOperation(descriptor->stencilBack.depthFailOp);
+        backFaceStencil.depthStencilPassOperation =
+            MetalStencilOperation(descriptor->stencilBack.passOp);
+        backFaceStencil.readMask = descriptor->stencilReadMask;
+        backFaceStencil.writeMask = descriptor->stencilWriteMask;
+
+        frontFaceStencil.stencilCompareFunction =
+            ToMetalCompareFunction(descriptor->stencilFront.compare);
+        frontFaceStencil.stencilFailureOperation =
+            MetalStencilOperation(descriptor->stencilFront.failOp);
+        frontFaceStencil.depthFailureOperation =
+            MetalStencilOperation(descriptor->stencilFront.depthFailOp);
+        frontFaceStencil.depthStencilPassOperation =
+            MetalStencilOperation(descriptor->stencilFront.passOp);
+        frontFaceStencil.readMask = descriptor->stencilReadMask;
+        frontFaceStencil.writeMask = descriptor->stencilWriteMask;
+
+        mtlDepthStencilDescriptor.backFaceStencil = backFaceStencil;
+        mtlDepthStencilDescriptor.frontFaceStencil = frontFaceStencil;
+    }
+
+    return mtlDepthStencilDescRef;
+}
+
+MTLWinding MTLFrontFace(wgpu::FrontFace face) {
+    switch (face) {
+        case wgpu::FrontFace::CW:
+            return MTLWindingClockwise;
+        case wgpu::FrontFace::CCW:
+            return MTLWindingCounterClockwise;
+    }
+}
+
+MTLCullMode ToMTLCullMode(wgpu::CullMode mode) {
+    switch (mode) {
+        case wgpu::CullMode::None:
+            return MTLCullModeNone;
+        case wgpu::CullMode::Front:
+            return MTLCullModeFront;
+        case wgpu::CullMode::Back:
+            return MTLCullModeBack;
+    }
+}
+
+}  // anonymous namespace
+
+// static
+Ref<RenderPipelineBase> RenderPipeline::CreateUninitialized(
+    Device* device,
+    const RenderPipelineDescriptor* descriptor) {
+    return AcquireRef(new RenderPipeline(device, descriptor));
+}
+
+MaybeError RenderPipeline::Initialize() {
+    mMtlPrimitiveTopology = MTLPrimitiveTopology(GetPrimitiveTopology());
+    mMtlFrontFace = MTLFrontFace(GetFrontFace());
+    mMtlCullMode = ToMTLCullMode(GetCullMode());
+    auto mtlDevice = ToBackend(GetDevice())->GetMTLDevice();
+
+    NSRef<MTLRenderPipelineDescriptor> descriptorMTLRef =
+        AcquireNSRef([MTLRenderPipelineDescriptor new]);
+    MTLRenderPipelineDescriptor* descriptorMTL = descriptorMTLRef.Get();
+
+    // TODO(dawn:1384): MakeVertexDesc should be const in the future, so we don't need to call
+    // it here when vertex pulling is enabled
+    NSRef<MTLVertexDescriptor> vertexDesc = MakeVertexDesc();
+
+    // Calling MakeVertexDesc first is important since it sets indices for packed bindings
+    if (GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling)) {
+        vertexDesc = AcquireNSRef([MTLVertexDescriptor new]);
+    }
+    descriptorMTL.vertexDescriptor = vertexDesc.Get();
+
+    const PerStage<ProgrammableStage>& allStages = GetAllStages();
+    const ProgrammableStage& vertexStage = allStages[wgpu::ShaderStage::Vertex];
+    ShaderModule::MetalFunctionData vertexData;
+    DAWN_TRY(CreateMTLFunction(vertexStage, SingleShaderStage::Vertex, ToBackend(GetLayout()),
+                               &vertexData, 0xFFFFFFFF, this));
+
+    descriptorMTL.vertexFunction = vertexData.function.Get();
+    if (vertexData.needsStorageBufferLength) {
+        mStagesRequiringStorageBufferLength |= wgpu::ShaderStage::Vertex;
+    }
+
+    if (GetStageMask() & wgpu::ShaderStage::Fragment) {
+        const ProgrammableStage& fragmentStage = allStages[wgpu::ShaderStage::Fragment];
+        ShaderModule::MetalFunctionData fragmentData;
+        DAWN_TRY(CreateMTLFunction(fragmentStage, SingleShaderStage::Fragment,
+                                   ToBackend(GetLayout()), &fragmentData, GetSampleMask()));
+
+        descriptorMTL.fragmentFunction = fragmentData.function.Get();
+        if (fragmentData.needsStorageBufferLength) {
+            mStagesRequiringStorageBufferLength |= wgpu::ShaderStage::Fragment;
         }
 
-        if (GetStageMask() & wgpu::ShaderStage::Fragment) {
-            const ProgrammableStage& fragmentStage = allStages[wgpu::ShaderStage::Fragment];
-            ShaderModule::MetalFunctionData fragmentData;
-            DAWN_TRY(CreateMTLFunction(fragmentStage, SingleShaderStage::Fragment,
-                                       ToBackend(GetLayout()), &fragmentData, GetSampleMask()));
-
-            descriptorMTL.fragmentFunction = fragmentData.function.Get();
-            if (fragmentData.needsStorageBufferLength) {
-                mStagesRequiringStorageBufferLength |= wgpu::ShaderStage::Fragment;
-            }
-
-            const auto& fragmentOutputsWritten = fragmentStage.metadata->fragmentOutputsWritten;
-            for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
-                descriptorMTL.colorAttachments[static_cast<uint8_t>(i)].pixelFormat =
-                    MetalPixelFormat(GetColorAttachmentFormat(i));
-                const ColorTargetState* descriptor = GetColorTargetState(i);
-                ComputeBlendDesc(descriptorMTL.colorAttachments[static_cast<uint8_t>(i)],
-                                 descriptor, fragmentOutputsWritten[i]);
-            }
+        const auto& fragmentOutputsWritten = fragmentStage.metadata->fragmentOutputsWritten;
+        for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
+            descriptorMTL.colorAttachments[static_cast<uint8_t>(i)].pixelFormat =
+                MetalPixelFormat(GetColorAttachmentFormat(i));
+            const ColorTargetState* descriptor = GetColorTargetState(i);
+            ComputeBlendDesc(descriptorMTL.colorAttachments[static_cast<uint8_t>(i)], descriptor,
+                             fragmentOutputsWritten[i]);
         }
+    }
 
-        if (HasDepthStencilAttachment()) {
-            wgpu::TextureFormat depthStencilFormat = GetDepthStencilFormat();
-            const Format& internalFormat = GetDevice()->GetValidInternalFormat(depthStencilFormat);
-            MTLPixelFormat metalFormat = MetalPixelFormat(depthStencilFormat);
+    if (HasDepthStencilAttachment()) {
+        wgpu::TextureFormat depthStencilFormat = GetDepthStencilFormat();
+        const Format& internalFormat = GetDevice()->GetValidInternalFormat(depthStencilFormat);
+        MTLPixelFormat metalFormat = MetalPixelFormat(depthStencilFormat);
 
-            if (internalFormat.HasDepth()) {
-                descriptorMTL.depthAttachmentPixelFormat = metalFormat;
-            }
-            if (internalFormat.HasStencil()) {
-                descriptorMTL.stencilAttachmentPixelFormat = metalFormat;
-            }
+        if (internalFormat.HasDepth()) {
+            descriptorMTL.depthAttachmentPixelFormat = metalFormat;
         }
-
-        descriptorMTL.inputPrimitiveTopology = MTLInputPrimitiveTopology(GetPrimitiveTopology());
-        descriptorMTL.sampleCount = GetSampleCount();
-        descriptorMTL.alphaToCoverageEnabled = IsAlphaToCoverageEnabled();
-
-        NSError* error = nullptr;
-        mMtlRenderPipelineState =
-            AcquireNSPRef([mtlDevice newRenderPipelineStateWithDescriptor:descriptorMTL
-                                                                    error:&error]);
-        if (error != nullptr) {
-            return DAWN_INTERNAL_ERROR(std::string("Error creating pipeline state ") +
-                                       [error.localizedDescription UTF8String]);
+        if (internalFormat.HasStencil()) {
+            descriptorMTL.stencilAttachmentPixelFormat = metalFormat;
         }
-        ASSERT(mMtlRenderPipelineState != nil);
-
-        // Create depth stencil state and cache it, fetch the cached depth stencil state when we
-        // call setDepthStencilState() for a given render pipeline in CommandEncoder, in order
-        // to improve performance.
-        NSRef<MTLDepthStencilDescriptor> depthStencilDesc =
-            MakeDepthStencilDesc(GetDepthStencilState());
-        mMtlDepthStencilState =
-            AcquireNSPRef([mtlDevice newDepthStencilStateWithDescriptor:depthStencilDesc.Get()]);
-
-        return {};
     }
 
-    MTLPrimitiveType RenderPipeline::GetMTLPrimitiveTopology() const {
-        return mMtlPrimitiveTopology;
+    descriptorMTL.inputPrimitiveTopology = MTLInputPrimitiveTopology(GetPrimitiveTopology());
+    descriptorMTL.sampleCount = GetSampleCount();
+    descriptorMTL.alphaToCoverageEnabled = IsAlphaToCoverageEnabled();
+
+    NSError* error = nullptr;
+    mMtlRenderPipelineState =
+        AcquireNSPRef([mtlDevice newRenderPipelineStateWithDescriptor:descriptorMTL error:&error]);
+    if (error != nullptr) {
+        return DAWN_INTERNAL_ERROR(std::string("Error creating pipeline state ") +
+                                   [error.localizedDescription UTF8String]);
     }
+    ASSERT(mMtlRenderPipelineState != nil);
 
-    MTLWinding RenderPipeline::GetMTLFrontFace() const {
-        return mMtlFrontFace;
-    }
+    // Create depth stencil state and cache it, fetch the cached depth stencil state when we
+    // call setDepthStencilState() for a given render pipeline in CommandEncoder, in order
+    // to improve performance.
+    NSRef<MTLDepthStencilDescriptor> depthStencilDesc =
+        MakeDepthStencilDesc(GetDepthStencilState());
+    mMtlDepthStencilState =
+        AcquireNSPRef([mtlDevice newDepthStencilStateWithDescriptor:depthStencilDesc.Get()]);
 
-    MTLCullMode RenderPipeline::GetMTLCullMode() const {
-        return mMtlCullMode;
-    }
+    return {};
+}
 
-    void RenderPipeline::Encode(id<MTLRenderCommandEncoder> encoder) {
-        [encoder setRenderPipelineState:mMtlRenderPipelineState.Get()];
-    }
+MTLPrimitiveType RenderPipeline::GetMTLPrimitiveTopology() const {
+    return mMtlPrimitiveTopology;
+}
 
-    id<MTLDepthStencilState> RenderPipeline::GetMTLDepthStencilState() {
-        return mMtlDepthStencilState.Get();
-    }
+MTLWinding RenderPipeline::GetMTLFrontFace() const {
+    return mMtlFrontFace;
+}
 
-    uint32_t RenderPipeline::GetMtlVertexBufferIndex(VertexBufferSlot slot) const {
-        ASSERT(slot < kMaxVertexBuffersTyped);
-        return mMtlVertexBufferIndices[slot];
-    }
+MTLCullMode RenderPipeline::GetMTLCullMode() const {
+    return mMtlCullMode;
+}
 
-    wgpu::ShaderStage RenderPipeline::GetStagesRequiringStorageBufferLength() const {
-        return mStagesRequiringStorageBufferLength;
-    }
+void RenderPipeline::Encode(id<MTLRenderCommandEncoder> encoder) {
+    [encoder setRenderPipelineState:mMtlRenderPipelineState.Get()];
+}
 
-    NSRef<MTLVertexDescriptor> RenderPipeline::MakeVertexDesc() {
-        MTLVertexDescriptor* mtlVertexDescriptor = [MTLVertexDescriptor new];
+id<MTLDepthStencilState> RenderPipeline::GetMTLDepthStencilState() {
+    return mMtlDepthStencilState.Get();
+}
 
-        // Vertex buffers are packed after all the buffers for the bind groups.
-        uint32_t mtlVertexBufferIndex =
-            ToBackend(GetLayout())->GetBufferBindingCount(SingleShaderStage::Vertex);
+uint32_t RenderPipeline::GetMtlVertexBufferIndex(VertexBufferSlot slot) const {
+    ASSERT(slot < kMaxVertexBuffersTyped);
+    return mMtlVertexBufferIndices[slot];
+}
 
-        for (VertexBufferSlot slot : IterateBitSet(GetVertexBufferSlotsUsed())) {
-            const VertexBufferInfo& info = GetVertexBuffer(slot);
+wgpu::ShaderStage RenderPipeline::GetStagesRequiringStorageBufferLength() const {
+    return mStagesRequiringStorageBufferLength;
+}
 
-            MTLVertexBufferLayoutDescriptor* layoutDesc = [MTLVertexBufferLayoutDescriptor new];
-            if (info.arrayStride == 0) {
-                // For MTLVertexStepFunctionConstant, the stepRate must be 0,
-                // but the arrayStride must NOT be 0, so we made up it with
-                // max(attrib.offset + sizeof(attrib) for each attrib)
-                size_t maxArrayStride = 0;
-                for (VertexAttributeLocation loc : IterateBitSet(GetAttributeLocationsUsed())) {
-                    const VertexAttributeInfo& attrib = GetAttribute(loc);
-                    // Only use the attributes that use the current input
-                    if (attrib.vertexBufferSlot != slot) {
-                        continue;
-                    }
-                    maxArrayStride =
-                        std::max(maxArrayStride, GetVertexFormatInfo(attrib.format).byteSize +
-                                                     size_t(attrib.offset));
+NSRef<MTLVertexDescriptor> RenderPipeline::MakeVertexDesc() {
+    MTLVertexDescriptor* mtlVertexDescriptor = [MTLVertexDescriptor new];
+
+    // Vertex buffers are packed after all the buffers for the bind groups.
+    uint32_t mtlVertexBufferIndex =
+        ToBackend(GetLayout())->GetBufferBindingCount(SingleShaderStage::Vertex);
+
+    for (VertexBufferSlot slot : IterateBitSet(GetVertexBufferSlotsUsed())) {
+        const VertexBufferInfo& info = GetVertexBuffer(slot);
+
+        MTLVertexBufferLayoutDescriptor* layoutDesc = [MTLVertexBufferLayoutDescriptor new];
+        if (info.arrayStride == 0) {
+            // For MTLVertexStepFunctionConstant, the stepRate must be 0,
+            // but the arrayStride must NOT be 0, so we made up it with
+            // max(attrib.offset + sizeof(attrib) for each attrib)
+            size_t maxArrayStride = 0;
+            for (VertexAttributeLocation loc : IterateBitSet(GetAttributeLocationsUsed())) {
+                const VertexAttributeInfo& attrib = GetAttribute(loc);
+                // Only use the attributes that use the current input
+                if (attrib.vertexBufferSlot != slot) {
+                    continue;
                 }
-                layoutDesc.stepFunction = MTLVertexStepFunctionConstant;
-                layoutDesc.stepRate = 0;
-                // Metal requires the stride must be a multiple of 4 bytes, align it with next
-                // multiple of 4 if it's not.
-                layoutDesc.stride = Align(maxArrayStride, 4);
-            } else {
-                layoutDesc.stepFunction = VertexStepModeFunction(info.stepMode);
-                layoutDesc.stepRate = 1;
-                layoutDesc.stride = info.arrayStride;
+                maxArrayStride =
+                    std::max(maxArrayStride,
+                             GetVertexFormatInfo(attrib.format).byteSize + size_t(attrib.offset));
             }
-
-            mtlVertexDescriptor.layouts[mtlVertexBufferIndex] = layoutDesc;
-            [layoutDesc release];
-
-            mMtlVertexBufferIndices[slot] = mtlVertexBufferIndex;
-            mtlVertexBufferIndex++;
+            layoutDesc.stepFunction = MTLVertexStepFunctionConstant;
+            layoutDesc.stepRate = 0;
+            // Metal requires the stride must be a multiple of 4 bytes, align it with next
+            // multiple of 4 if it's not.
+            layoutDesc.stride = Align(maxArrayStride, 4);
+        } else {
+            layoutDesc.stepFunction = VertexStepModeFunction(info.stepMode);
+            layoutDesc.stepRate = 1;
+            layoutDesc.stride = info.arrayStride;
         }
 
-        for (VertexAttributeLocation loc : IterateBitSet(GetAttributeLocationsUsed())) {
-            const VertexAttributeInfo& info = GetAttribute(loc);
+        mtlVertexDescriptor.layouts[mtlVertexBufferIndex] = layoutDesc;
+        [layoutDesc release];
 
-            auto attribDesc = [MTLVertexAttributeDescriptor new];
-            attribDesc.format = VertexFormatType(info.format);
-            attribDesc.offset = info.offset;
-            attribDesc.bufferIndex = mMtlVertexBufferIndices[info.vertexBufferSlot];
-            mtlVertexDescriptor.attributes[static_cast<uint8_t>(loc)] = attribDesc;
-            [attribDesc release];
-        }
-
-        return AcquireNSRef(mtlVertexDescriptor);
+        mMtlVertexBufferIndices[slot] = mtlVertexBufferIndex;
+        mtlVertexBufferIndex++;
     }
 
-    void RenderPipeline::InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
-                                         WGPUCreateRenderPipelineAsyncCallback callback,
-                                         void* userdata) {
-        std::unique_ptr<CreateRenderPipelineAsyncTask> asyncTask =
-            std::make_unique<CreateRenderPipelineAsyncTask>(std::move(renderPipeline), callback,
-                                                            userdata);
-        CreateRenderPipelineAsyncTask::RunAsync(std::move(asyncTask));
+    for (VertexAttributeLocation loc : IterateBitSet(GetAttributeLocationsUsed())) {
+        const VertexAttributeInfo& info = GetAttribute(loc);
+
+        auto attribDesc = [MTLVertexAttributeDescriptor new];
+        attribDesc.format = VertexFormatType(info.format);
+        attribDesc.offset = info.offset;
+        attribDesc.bufferIndex = mMtlVertexBufferIndices[info.vertexBufferSlot];
+        mtlVertexDescriptor.attributes[static_cast<uint8_t>(loc)] = attribDesc;
+        [attribDesc release];
     }
 
+    return AcquireNSRef(mtlVertexDescriptor);
+}
+
+void RenderPipeline::InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+                                     WGPUCreateRenderPipelineAsyncCallback callback,
+                                     void* userdata) {
+    std::unique_ptr<CreateRenderPipelineAsyncTask> asyncTask =
+        std::make_unique<CreateRenderPipelineAsyncTask>(std::move(renderPipeline), callback,
+                                                        userdata);
+    CreateRenderPipelineAsyncTask::RunAsync(std::move(asyncTask));
+}
+
 }  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/SamplerMTL.h b/src/dawn/native/metal/SamplerMTL.h
index c71c884..2960817 100644
--- a/src/dawn/native/metal/SamplerMTL.h
+++ b/src/dawn/native/metal/SamplerMTL.h
@@ -23,21 +23,20 @@
 
 namespace dawn::native::metal {
 
-    class Device;
+class Device;
 
-    class Sampler final : public SamplerBase {
-      public:
-        static ResultOrError<Ref<Sampler>> Create(Device* device,
-                                                  const SamplerDescriptor* descriptor);
+class Sampler final : public SamplerBase {
+  public:
+    static ResultOrError<Ref<Sampler>> Create(Device* device, const SamplerDescriptor* descriptor);
 
-        id<MTLSamplerState> GetMTLSamplerState();
+    id<MTLSamplerState> GetMTLSamplerState();
 
-      private:
-        using SamplerBase::SamplerBase;
-        MaybeError Initialize(const SamplerDescriptor* descriptor);
+  private:
+    using SamplerBase::SamplerBase;
+    MaybeError Initialize(const SamplerDescriptor* descriptor);
 
-        NSPRef<id<MTLSamplerState>> mMtlSamplerState;
-    };
+    NSPRef<id<MTLSamplerState>> mMtlSamplerState;
+};
 
 }  // namespace dawn::native::metal
 
diff --git a/src/dawn/native/metal/SamplerMTL.mm b/src/dawn/native/metal/SamplerMTL.mm
index 235b2f8..329cd28 100644
--- a/src/dawn/native/metal/SamplerMTL.mm
+++ b/src/dawn/native/metal/SamplerMTL.mm
@@ -19,88 +19,87 @@
 
 namespace dawn::native::metal {
 
-    namespace {
-        MTLSamplerMinMagFilter FilterModeToMinMagFilter(wgpu::FilterMode mode) {
-            switch (mode) {
-                case wgpu::FilterMode::Nearest:
-                    return MTLSamplerMinMagFilterNearest;
-                case wgpu::FilterMode::Linear:
-                    return MTLSamplerMinMagFilterLinear;
-            }
-        }
+namespace {
+MTLSamplerMinMagFilter FilterModeToMinMagFilter(wgpu::FilterMode mode) {
+    switch (mode) {
+        case wgpu::FilterMode::Nearest:
+            return MTLSamplerMinMagFilterNearest;
+        case wgpu::FilterMode::Linear:
+            return MTLSamplerMinMagFilterLinear;
+    }
+}
 
-        MTLSamplerMipFilter FilterModeToMipFilter(wgpu::FilterMode mode) {
-            switch (mode) {
-                case wgpu::FilterMode::Nearest:
-                    return MTLSamplerMipFilterNearest;
-                case wgpu::FilterMode::Linear:
-                    return MTLSamplerMipFilterLinear;
-            }
-        }
+MTLSamplerMipFilter FilterModeToMipFilter(wgpu::FilterMode mode) {
+    switch (mode) {
+        case wgpu::FilterMode::Nearest:
+            return MTLSamplerMipFilterNearest;
+        case wgpu::FilterMode::Linear:
+            return MTLSamplerMipFilterLinear;
+    }
+}
 
-        MTLSamplerAddressMode AddressMode(wgpu::AddressMode mode) {
-            switch (mode) {
-                case wgpu::AddressMode::Repeat:
-                    return MTLSamplerAddressModeRepeat;
-                case wgpu::AddressMode::MirrorRepeat:
-                    return MTLSamplerAddressModeMirrorRepeat;
-                case wgpu::AddressMode::ClampToEdge:
-                    return MTLSamplerAddressModeClampToEdge;
-            }
-        }
+MTLSamplerAddressMode AddressMode(wgpu::AddressMode mode) {
+    switch (mode) {
+        case wgpu::AddressMode::Repeat:
+            return MTLSamplerAddressModeRepeat;
+        case wgpu::AddressMode::MirrorRepeat:
+            return MTLSamplerAddressModeMirrorRepeat;
+        case wgpu::AddressMode::ClampToEdge:
+            return MTLSamplerAddressModeClampToEdge;
+    }
+}
+}  // namespace
+
+// static
+ResultOrError<Ref<Sampler>> Sampler::Create(Device* device, const SamplerDescriptor* descriptor) {
+    DAWN_INVALID_IF(
+        descriptor->compare != wgpu::CompareFunction::Undefined &&
+            device->IsToggleEnabled(Toggle::MetalDisableSamplerCompare),
+        "Sampler compare function (%s) not supported. Compare functions are disabled with the "
+        "Metal backend.",
+        descriptor->compare);
+
+    Ref<Sampler> sampler = AcquireRef(new Sampler(device, descriptor));
+    DAWN_TRY(sampler->Initialize(descriptor));
+    return sampler;
+}
+
+MaybeError Sampler::Initialize(const SamplerDescriptor* descriptor) {
+    NSRef<MTLSamplerDescriptor> mtlDescRef = AcquireNSRef([MTLSamplerDescriptor new]);
+    MTLSamplerDescriptor* mtlDesc = mtlDescRef.Get();
+
+    mtlDesc.minFilter = FilterModeToMinMagFilter(descriptor->minFilter);
+    mtlDesc.magFilter = FilterModeToMinMagFilter(descriptor->magFilter);
+    mtlDesc.mipFilter = FilterModeToMipFilter(descriptor->mipmapFilter);
+
+    mtlDesc.sAddressMode = AddressMode(descriptor->addressModeU);
+    mtlDesc.tAddressMode = AddressMode(descriptor->addressModeV);
+    mtlDesc.rAddressMode = AddressMode(descriptor->addressModeW);
+
+    mtlDesc.lodMinClamp = descriptor->lodMinClamp;
+    mtlDesc.lodMaxClamp = descriptor->lodMaxClamp;
+    // https://developer.apple.com/documentation/metal/mtlsamplerdescriptor/1516164-maxanisotropy
+    mtlDesc.maxAnisotropy = std::min<uint16_t>(GetMaxAnisotropy(), 16u);
+
+    if (descriptor->compare != wgpu::CompareFunction::Undefined) {
+        // Sampler compare is unsupported before A9, which we validate in
+        // Sampler::Create.
+        mtlDesc.compareFunction = ToMetalCompareFunction(descriptor->compare);
+        // The value is default-initialized in the else-case, and we don't set it or the
+        // Metal debug device errors.
     }
 
-    // static
-    ResultOrError<Ref<Sampler>> Sampler::Create(Device* device,
-                                                const SamplerDescriptor* descriptor) {
-        DAWN_INVALID_IF(
-            descriptor->compare != wgpu::CompareFunction::Undefined &&
-                device->IsToggleEnabled(Toggle::MetalDisableSamplerCompare),
-            "Sampler compare function (%s) not supported. Compare functions are disabled with the "
-            "Metal backend.",
-            descriptor->compare);
+    mMtlSamplerState = AcquireNSPRef(
+        [ToBackend(GetDevice())->GetMTLDevice() newSamplerStateWithDescriptor:mtlDesc]);
 
-        Ref<Sampler> sampler = AcquireRef(new Sampler(device, descriptor));
-        DAWN_TRY(sampler->Initialize(descriptor));
-        return sampler;
+    if (mMtlSamplerState == nil) {
+        return DAWN_OUT_OF_MEMORY_ERROR("Failed to allocate sampler.");
     }
+    return {};
+}
 
-    MaybeError Sampler::Initialize(const SamplerDescriptor* descriptor) {
-        NSRef<MTLSamplerDescriptor> mtlDescRef = AcquireNSRef([MTLSamplerDescriptor new]);
-        MTLSamplerDescriptor* mtlDesc = mtlDescRef.Get();
-
-        mtlDesc.minFilter = FilterModeToMinMagFilter(descriptor->minFilter);
-        mtlDesc.magFilter = FilterModeToMinMagFilter(descriptor->magFilter);
-        mtlDesc.mipFilter = FilterModeToMipFilter(descriptor->mipmapFilter);
-
-        mtlDesc.sAddressMode = AddressMode(descriptor->addressModeU);
-        mtlDesc.tAddressMode = AddressMode(descriptor->addressModeV);
-        mtlDesc.rAddressMode = AddressMode(descriptor->addressModeW);
-
-        mtlDesc.lodMinClamp = descriptor->lodMinClamp;
-        mtlDesc.lodMaxClamp = descriptor->lodMaxClamp;
-        // https://developer.apple.com/documentation/metal/mtlsamplerdescriptor/1516164-maxanisotropy
-        mtlDesc.maxAnisotropy = std::min<uint16_t>(GetMaxAnisotropy(), 16u);
-
-        if (descriptor->compare != wgpu::CompareFunction::Undefined) {
-            // Sampler compare is unsupported before A9, which we validate in
-            // Sampler::Create.
-            mtlDesc.compareFunction = ToMetalCompareFunction(descriptor->compare);
-            // The value is default-initialized in the else-case, and we don't set it or the
-            // Metal debug device errors.
-        }
-
-        mMtlSamplerState = AcquireNSPRef(
-            [ToBackend(GetDevice())->GetMTLDevice() newSamplerStateWithDescriptor:mtlDesc]);
-
-        if (mMtlSamplerState == nil) {
-            return DAWN_OUT_OF_MEMORY_ERROR("Failed to allocate sampler.");
-        }
-        return {};
-    }
-
-    id<MTLSamplerState> Sampler::GetMTLSamplerState() {
-        return mMtlSamplerState.Get();
-    }
+id<MTLSamplerState> Sampler::GetMTLSamplerState() {
+    return mMtlSamplerState.Get();
+}
 
 }  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/ShaderModuleMTL.h b/src/dawn/native/metal/ShaderModuleMTL.h
index 560d8cf..82547b8 100644
--- a/src/dawn/native/metal/ShaderModuleMTL.h
+++ b/src/dawn/native/metal/ShaderModuleMTL.h
@@ -27,47 +27,47 @@
 
 namespace dawn::native::metal {
 
-    class Device;
-    class PipelineLayout;
-    class RenderPipeline;
+class Device;
+class PipelineLayout;
+class RenderPipeline;
 
-    class ShaderModule final : public ShaderModuleBase {
-      public:
-        static ResultOrError<Ref<ShaderModule>> Create(Device* device,
-                                                       const ShaderModuleDescriptor* descriptor,
-                                                       ShaderModuleParseResult* parseResult);
+class ShaderModule final : public ShaderModuleBase {
+  public:
+    static ResultOrError<Ref<ShaderModule>> Create(Device* device,
+                                                   const ShaderModuleDescriptor* descriptor,
+                                                   ShaderModuleParseResult* parseResult);
 
-        struct MetalFunctionData {
-            NSPRef<id<MTLFunction>> function;
-            bool needsStorageBufferLength;
-            std::vector<uint32_t> workgroupAllocations;
-        };
-
-        // MTLFunctionConstantValues needs @available tag to compile
-        // Use id (like void*) in function signature as workaround and do static cast inside
-        MaybeError CreateFunction(const char* entryPointName,
-                                  SingleShaderStage stage,
-                                  const PipelineLayout* layout,
-                                  MetalFunctionData* out,
-                                  id constantValues = nil,
-                                  uint32_t sampleMask = 0xFFFFFFFF,
-                                  const RenderPipeline* renderPipeline = nullptr);
-
-      private:
-        ResultOrError<std::string> TranslateToMSL(const char* entryPointName,
-                                                  SingleShaderStage stage,
-                                                  const PipelineLayout* layout,
-                                                  uint32_t sampleMask,
-                                                  const RenderPipeline* renderPipeline,
-                                                  std::string* remappedEntryPointName,
-                                                  bool* needsStorageBufferLength,
-                                                  bool* hasInvariantAttribute,
-                                                  std::vector<uint32_t>* workgroupAllocations);
-        ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
-        ~ShaderModule() override = default;
-        MaybeError Initialize(ShaderModuleParseResult* parseResult);
+    struct MetalFunctionData {
+        NSPRef<id<MTLFunction>> function;
+        bool needsStorageBufferLength;
+        std::vector<uint32_t> workgroupAllocations;
     };
 
+    // MTLFunctionConstantValues needs @available tag to compile
+    // Use id (like void*) in function signature as workaround and do static cast inside
+    MaybeError CreateFunction(const char* entryPointName,
+                              SingleShaderStage stage,
+                              const PipelineLayout* layout,
+                              MetalFunctionData* out,
+                              id constantValues = nil,
+                              uint32_t sampleMask = 0xFFFFFFFF,
+                              const RenderPipeline* renderPipeline = nullptr);
+
+  private:
+    ResultOrError<std::string> TranslateToMSL(const char* entryPointName,
+                                              SingleShaderStage stage,
+                                              const PipelineLayout* layout,
+                                              uint32_t sampleMask,
+                                              const RenderPipeline* renderPipeline,
+                                              std::string* remappedEntryPointName,
+                                              bool* needsStorageBufferLength,
+                                              bool* hasInvariantAttribute,
+                                              std::vector<uint32_t>* workgroupAllocations);
+    ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
+    ~ShaderModule() override = default;
+    MaybeError Initialize(ShaderModuleParseResult* parseResult);
+};
+
 }  // namespace dawn::native::metal
 
 #endif  // SRC_DAWN_NATIVE_METAL_SHADERMODULEMTL_H_
diff --git a/src/dawn/native/metal/ShaderModuleMTL.mm b/src/dawn/native/metal/ShaderModuleMTL.mm
index e182898..a32433a 100644
--- a/src/dawn/native/metal/ShaderModuleMTL.mm
+++ b/src/dawn/native/metal/ShaderModuleMTL.mm
@@ -28,251 +28,247 @@
 
 namespace dawn::native::metal {
 
-    // static
-    ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
-                                                          const ShaderModuleDescriptor* descriptor,
-                                                          ShaderModuleParseResult* parseResult) {
-        Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
-        DAWN_TRY(module->Initialize(parseResult));
-        return module;
-    }
+// static
+ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
+                                                      const ShaderModuleDescriptor* descriptor,
+                                                      ShaderModuleParseResult* parseResult) {
+    Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
+    DAWN_TRY(module->Initialize(parseResult));
+    return module;
+}
 
-    ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
-        : ShaderModuleBase(device, descriptor) {
-    }
+ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
+    : ShaderModuleBase(device, descriptor) {}
 
-    MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
-        ScopedTintICEHandler scopedICEHandler(GetDevice());
-        return InitializeBase(parseResult);
-    }
+MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
+    ScopedTintICEHandler scopedICEHandler(GetDevice());
+    return InitializeBase(parseResult);
+}
 
-    ResultOrError<std::string> ShaderModule::TranslateToMSL(
-        const char* entryPointName,
-        SingleShaderStage stage,
-        const PipelineLayout* layout,
-        uint32_t sampleMask,
-        const RenderPipeline* renderPipeline,
-        std::string* remappedEntryPointName,
-        bool* needsStorageBufferLength,
-        bool* hasInvariantAttribute,
-        std::vector<uint32_t>* workgroupAllocations) {
-        ScopedTintICEHandler scopedICEHandler(GetDevice());
+ResultOrError<std::string> ShaderModule::TranslateToMSL(
+    const char* entryPointName,
+    SingleShaderStage stage,
+    const PipelineLayout* layout,
+    uint32_t sampleMask,
+    const RenderPipeline* renderPipeline,
+    std::string* remappedEntryPointName,
+    bool* needsStorageBufferLength,
+    bool* hasInvariantAttribute,
+    std::vector<uint32_t>* workgroupAllocations) {
+    ScopedTintICEHandler scopedICEHandler(GetDevice());
 
-        std::ostringstream errorStream;
-        errorStream << "Tint MSL failure:" << std::endl;
+    std::ostringstream errorStream;
+    errorStream << "Tint MSL failure:" << std::endl;
 
-        // Remap BindingNumber to BindingIndex in WGSL shader
-        using BindingRemapper = tint::transform::BindingRemapper;
-        using BindingPoint = tint::transform::BindingPoint;
-        BindingRemapper::BindingPoints bindingPoints;
-        BindingRemapper::AccessControls accessControls;
+    // Remap BindingNumber to BindingIndex in WGSL shader
+    using BindingRemapper = tint::transform::BindingRemapper;
+    using BindingPoint = tint::transform::BindingPoint;
+    BindingRemapper::BindingPoints bindingPoints;
+    BindingRemapper::AccessControls accessControls;
 
-        for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
-            const BindGroupLayoutBase::BindingMap& bindingMap =
-                layout->GetBindGroupLayout(group)->GetBindingMap();
-            for (const auto [bindingNumber, bindingIndex] : bindingMap) {
-                const BindingInfo& bindingInfo =
-                    layout->GetBindGroupLayout(group)->GetBindingInfo(bindingIndex);
+    for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+        const BindGroupLayoutBase::BindingMap& bindingMap =
+            layout->GetBindGroupLayout(group)->GetBindingMap();
+        for (const auto [bindingNumber, bindingIndex] : bindingMap) {
+            const BindingInfo& bindingInfo =
+                layout->GetBindGroupLayout(group)->GetBindingInfo(bindingIndex);
 
-                if (!(bindingInfo.visibility & StageBit(stage))) {
-                    continue;
-                }
+            if (!(bindingInfo.visibility & StageBit(stage))) {
+                continue;
+            }
 
-                uint32_t shaderIndex = layout->GetBindingIndexInfo(stage)[group][bindingIndex];
+            uint32_t shaderIndex = layout->GetBindingIndexInfo(stage)[group][bindingIndex];
 
-                BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
-                                             static_cast<uint32_t>(bindingNumber)};
-                BindingPoint dstBindingPoint{0, shaderIndex};
-                if (srcBindingPoint != dstBindingPoint) {
-                    bindingPoints.emplace(srcBindingPoint, dstBindingPoint);
-                }
+            BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
+                                         static_cast<uint32_t>(bindingNumber)};
+            BindingPoint dstBindingPoint{0, shaderIndex};
+            if (srcBindingPoint != dstBindingPoint) {
+                bindingPoints.emplace(srcBindingPoint, dstBindingPoint);
             }
         }
+    }
 
-        tint::transform::Manager transformManager;
-        tint::transform::DataMap transformInputs;
+    tint::transform::Manager transformManager;
+    tint::transform::DataMap transformInputs;
 
-        // We only remap bindings for the target entry point, so we need to strip all other entry
-        // points to avoid generating invalid bindings for them.
-        transformManager.Add<tint::transform::SingleEntryPoint>();
-        transformInputs.Add<tint::transform::SingleEntryPoint::Config>(entryPointName);
+    // We only remap bindings for the target entry point, so we need to strip all other entry
+    // points to avoid generating invalid bindings for them.
+    transformManager.Add<tint::transform::SingleEntryPoint>();
+    transformInputs.Add<tint::transform::SingleEntryPoint::Config>(entryPointName);
 
-        AddExternalTextureTransform(layout, &transformManager, &transformInputs);
+    AddExternalTextureTransform(layout, &transformManager, &transformInputs);
 
-        if (stage == SingleShaderStage::Vertex &&
-            GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling)) {
-            transformManager.Add<tint::transform::VertexPulling>();
-            AddVertexPullingTransformConfig(*renderPipeline, entryPointName,
-                                            kPullingBufferBindingSet, &transformInputs);
+    if (stage == SingleShaderStage::Vertex &&
+        GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling)) {
+        transformManager.Add<tint::transform::VertexPulling>();
+        AddVertexPullingTransformConfig(*renderPipeline, entryPointName, kPullingBufferBindingSet,
+                                        &transformInputs);
 
-            for (VertexBufferSlot slot :
-                 IterateBitSet(renderPipeline->GetVertexBufferSlotsUsed())) {
-                uint32_t metalIndex = renderPipeline->GetMtlVertexBufferIndex(slot);
+        for (VertexBufferSlot slot : IterateBitSet(renderPipeline->GetVertexBufferSlotsUsed())) {
+            uint32_t metalIndex = renderPipeline->GetMtlVertexBufferIndex(slot);
 
-                // Tell Tint to map (kPullingBufferBindingSet, slot) to this MSL buffer index.
-                BindingPoint srcBindingPoint{static_cast<uint32_t>(kPullingBufferBindingSet),
-                                             static_cast<uint8_t>(slot)};
-                BindingPoint dstBindingPoint{0, metalIndex};
-                if (srcBindingPoint != dstBindingPoint) {
-                    bindingPoints.emplace(srcBindingPoint, dstBindingPoint);
-                }
+            // Tell Tint to map (kPullingBufferBindingSet, slot) to this MSL buffer index.
+            BindingPoint srcBindingPoint{static_cast<uint32_t>(kPullingBufferBindingSet),
+                                         static_cast<uint8_t>(slot)};
+            BindingPoint dstBindingPoint{0, metalIndex};
+            if (srcBindingPoint != dstBindingPoint) {
+                bindingPoints.emplace(srcBindingPoint, dstBindingPoint);
             }
         }
-        if (GetDevice()->IsRobustnessEnabled()) {
-            transformManager.Add<tint::transform::Robustness>();
-        }
-        transformManager.Add<tint::transform::BindingRemapper>();
-        transformManager.Add<tint::transform::Renamer>();
+    }
+    if (GetDevice()->IsRobustnessEnabled()) {
+        transformManager.Add<tint::transform::Robustness>();
+    }
+    transformManager.Add<tint::transform::BindingRemapper>();
+    transformManager.Add<tint::transform::Renamer>();
 
-        if (GetDevice()->IsToggleEnabled(Toggle::DisableSymbolRenaming)) {
-            // We still need to rename MSL reserved keywords
-            transformInputs.Add<tint::transform::Renamer::Config>(
-                tint::transform::Renamer::Target::kMslKeywords);
-        }
+    if (GetDevice()->IsToggleEnabled(Toggle::DisableSymbolRenaming)) {
+        // We still need to rename MSL reserved keywords
+        transformInputs.Add<tint::transform::Renamer::Config>(
+            tint::transform::Renamer::Target::kMslKeywords);
+    }
 
-        transformInputs.Add<BindingRemapper::Remappings>(std::move(bindingPoints),
-                                                         std::move(accessControls),
-                                                         /* mayCollide */ true);
+    transformInputs.Add<BindingRemapper::Remappings>(std::move(bindingPoints),
+                                                     std::move(accessControls),
+                                                     /* mayCollide */ true);
 
-        tint::Program program;
-        tint::transform::DataMap transformOutputs;
-        {
-            TRACE_EVENT0(GetDevice()->GetPlatform(), General, "RunTransforms");
-            DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, GetTintProgram(),
-                                                   transformInputs, &transformOutputs, nullptr));
-        }
+    tint::Program program;
+    tint::transform::DataMap transformOutputs;
+    {
+        TRACE_EVENT0(GetDevice()->GetPlatform(), General, "RunTransforms");
+        DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, GetTintProgram(), transformInputs,
+                                               &transformOutputs, nullptr));
+    }
 
-        if (auto* data = transformOutputs.Get<tint::transform::Renamer::Data>()) {
-            auto it = data->remappings.find(entryPointName);
-            if (it != data->remappings.end()) {
-                *remappedEntryPointName = it->second;
-            } else {
-                DAWN_INVALID_IF(!GetDevice()->IsToggleEnabled(Toggle::DisableSymbolRenaming),
-                                "Could not find remapped name for entry point.");
-
-                *remappedEntryPointName = entryPointName;
-            }
+    if (auto* data = transformOutputs.Get<tint::transform::Renamer::Data>()) {
+        auto it = data->remappings.find(entryPointName);
+        if (it != data->remappings.end()) {
+            *remappedEntryPointName = it->second;
         } else {
-            return DAWN_FORMAT_VALIDATION_ERROR("Transform output missing renamer data.");
+            DAWN_INVALID_IF(!GetDevice()->IsToggleEnabled(Toggle::DisableSymbolRenaming),
+                            "Could not find remapped name for entry point.");
+
+            *remappedEntryPointName = entryPointName;
         }
-
-        tint::writer::msl::Options options;
-        options.buffer_size_ubo_index = kBufferLengthBufferSlot;
-        options.fixed_sample_mask = sampleMask;
-        options.disable_workgroup_init = GetDevice()->IsToggleEnabled(Toggle::DisableWorkgroupInit);
-        options.emit_vertex_point_size =
-            stage == SingleShaderStage::Vertex &&
-            renderPipeline->GetPrimitiveTopology() == wgpu::PrimitiveTopology::PointList;
-        TRACE_EVENT0(GetDevice()->GetPlatform(), General, "tint::writer::msl::Generate");
-        auto result = tint::writer::msl::Generate(&program, options);
-        DAWN_INVALID_IF(!result.success, "An error occured while generating MSL: %s.",
-                        result.error);
-
-        *needsStorageBufferLength = result.needs_storage_buffer_sizes;
-        *hasInvariantAttribute = result.has_invariant_attribute;
-        *workgroupAllocations = std::move(result.workgroup_allocations[*remappedEntryPointName]);
-
-        return std::move(result.msl);
+    } else {
+        return DAWN_FORMAT_VALIDATION_ERROR("Transform output missing renamer data.");
     }
 
-    MaybeError ShaderModule::CreateFunction(const char* entryPointName,
-                                            SingleShaderStage stage,
-                                            const PipelineLayout* layout,
-                                            ShaderModule::MetalFunctionData* out,
-                                            id constantValuesPointer,
-                                            uint32_t sampleMask,
-                                            const RenderPipeline* renderPipeline) {
-        TRACE_EVENT0(GetDevice()->GetPlatform(), General, "ShaderModuleMTL::CreateFunction");
+    tint::writer::msl::Options options;
+    options.buffer_size_ubo_index = kBufferLengthBufferSlot;
+    options.fixed_sample_mask = sampleMask;
+    options.disable_workgroup_init = GetDevice()->IsToggleEnabled(Toggle::DisableWorkgroupInit);
+    options.emit_vertex_point_size =
+        stage == SingleShaderStage::Vertex &&
+        renderPipeline->GetPrimitiveTopology() == wgpu::PrimitiveTopology::PointList;
+    TRACE_EVENT0(GetDevice()->GetPlatform(), General, "tint::writer::msl::Generate");
+    auto result = tint::writer::msl::Generate(&program, options);
+    DAWN_INVALID_IF(!result.success, "An error occured while generating MSL: %s.", result.error);
 
-        ASSERT(!IsError());
-        ASSERT(out);
+    *needsStorageBufferLength = result.needs_storage_buffer_sizes;
+    *hasInvariantAttribute = result.has_invariant_attribute;
+    *workgroupAllocations = std::move(result.workgroup_allocations[*remappedEntryPointName]);
 
-        // Vertex stages must specify a renderPipeline
-        if (stage == SingleShaderStage::Vertex) {
-            ASSERT(renderPipeline != nullptr);
-        }
+    return std::move(result.msl);
+}
 
-        std::string remappedEntryPointName;
-        std::string msl;
-        bool hasInvariantAttribute = false;
-        DAWN_TRY_ASSIGN(msl,
-                        TranslateToMSL(entryPointName, stage, layout, sampleMask, renderPipeline,
-                                       &remappedEntryPointName, &out->needsStorageBufferLength,
-                                       &hasInvariantAttribute, &out->workgroupAllocations));
+MaybeError ShaderModule::CreateFunction(const char* entryPointName,
+                                        SingleShaderStage stage,
+                                        const PipelineLayout* layout,
+                                        ShaderModule::MetalFunctionData* out,
+                                        id constantValuesPointer,
+                                        uint32_t sampleMask,
+                                        const RenderPipeline* renderPipeline) {
+    TRACE_EVENT0(GetDevice()->GetPlatform(), General, "ShaderModuleMTL::CreateFunction");
 
-        // Metal uses Clang to compile the shader as C++14. Disable everything in the -Wall
-        // category. -Wunused-variable in particular comes up a lot in generated code, and some
-        // (old?) Metal drivers accidentally treat it as a MTLLibraryErrorCompileError instead
-        // of a warning.
-        msl = R"(
+    ASSERT(!IsError());
+    ASSERT(out);
+
+    // Vertex stages must specify a renderPipeline
+    if (stage == SingleShaderStage::Vertex) {
+        ASSERT(renderPipeline != nullptr);
+    }
+
+    std::string remappedEntryPointName;
+    std::string msl;
+    bool hasInvariantAttribute = false;
+    DAWN_TRY_ASSIGN(msl, TranslateToMSL(entryPointName, stage, layout, sampleMask, renderPipeline,
+                                        &remappedEntryPointName, &out->needsStorageBufferLength,
+                                        &hasInvariantAttribute, &out->workgroupAllocations));
+
+    // Metal uses Clang to compile the shader as C++14. Disable everything in the -Wall
+    // category. -Wunused-variable in particular comes up a lot in generated code, and some
+    // (old?) Metal drivers accidentally treat it as a MTLLibraryErrorCompileError instead
+    // of a warning.
+    msl = R"(
 #ifdef __clang__
 #pragma clang diagnostic ignored "-Wall"
 #endif
 )" + msl;
 
-        if (GetDevice()->IsToggleEnabled(Toggle::DumpShaders)) {
-            std::ostringstream dumpedMsg;
-            dumpedMsg << "/* Dumped generated MSL */" << std::endl << msl;
-            GetDevice()->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
-        }
-
-        NSRef<NSString> mslSource = AcquireNSRef([[NSString alloc] initWithUTF8String:msl.c_str()]);
-
-        NSRef<MTLCompileOptions> compileOptions = AcquireNSRef([[MTLCompileOptions alloc] init]);
-        if (hasInvariantAttribute) {
-            if (@available(macOS 11.0, iOS 13.0, *)) {
-                (*compileOptions).preserveInvariance = true;
-            }
-        }
-        auto mtlDevice = ToBackend(GetDevice())->GetMTLDevice();
-        NSError* error = nullptr;
-
-        NSPRef<id<MTLLibrary>> library;
-        {
-            TRACE_EVENT0(GetDevice()->GetPlatform(), General, "MTLDevice::newLibraryWithSource");
-            library = AcquireNSPRef([mtlDevice newLibraryWithSource:mslSource.Get()
-                                                            options:compileOptions.Get()
-                                                              error:&error]);
-        }
-
-        if (error != nullptr) {
-            DAWN_INVALID_IF(error.code != MTLLibraryErrorCompileWarning,
-                            "Unable to create library object: %s.",
-                            [error.localizedDescription UTF8String]);
-        }
-        ASSERT(library != nil);
-
-        NSRef<NSString> name =
-            AcquireNSRef([[NSString alloc] initWithUTF8String:remappedEntryPointName.c_str()]);
-
-        {
-            TRACE_EVENT0(GetDevice()->GetPlatform(), General, "MTLLibrary::newFunctionWithName");
-            if (constantValuesPointer != nil) {
-                if (@available(macOS 10.12, *)) {
-                    MTLFunctionConstantValues* constantValues = constantValuesPointer;
-                    out->function = AcquireNSPRef([*library newFunctionWithName:name.Get()
-                                                                 constantValues:constantValues
-                                                                          error:&error]);
-                    if (error != nullptr) {
-                        if (error.code != MTLLibraryErrorCompileWarning) {
-                            return DAWN_VALIDATION_ERROR(std::string("Function compile error: ") +
-                                                         [error.localizedDescription UTF8String]);
-                        }
-                    }
-                    ASSERT(out->function != nil);
-                } else {
-                    UNREACHABLE();
-                }
-            } else {
-                out->function = AcquireNSPRef([*library newFunctionWithName:name.Get()]);
-            }
-        }
-
-        if (GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling) &&
-            GetEntryPoint(entryPointName).usedVertexInputs.any()) {
-            out->needsStorageBufferLength = true;
-        }
-
-        return {};
+    if (GetDevice()->IsToggleEnabled(Toggle::DumpShaders)) {
+        std::ostringstream dumpedMsg;
+        dumpedMsg << "/* Dumped generated MSL */" << std::endl << msl;
+        GetDevice()->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
     }
+
+    NSRef<NSString> mslSource = AcquireNSRef([[NSString alloc] initWithUTF8String:msl.c_str()]);
+
+    NSRef<MTLCompileOptions> compileOptions = AcquireNSRef([[MTLCompileOptions alloc] init]);
+    if (hasInvariantAttribute) {
+        if (@available(macOS 11.0, iOS 13.0, *)) {
+            (*compileOptions).preserveInvariance = true;
+        }
+    }
+    auto mtlDevice = ToBackend(GetDevice())->GetMTLDevice();
+    NSError* error = nullptr;
+
+    NSPRef<id<MTLLibrary>> library;
+    {
+        TRACE_EVENT0(GetDevice()->GetPlatform(), General, "MTLDevice::newLibraryWithSource");
+        library = AcquireNSPRef([mtlDevice newLibraryWithSource:mslSource.Get()
+                                                        options:compileOptions.Get()
+                                                          error:&error]);
+    }
+
+    if (error != nullptr) {
+        DAWN_INVALID_IF(error.code != MTLLibraryErrorCompileWarning,
+                        "Unable to create library object: %s.",
+                        [error.localizedDescription UTF8String]);
+    }
+    ASSERT(library != nil);
+
+    NSRef<NSString> name =
+        AcquireNSRef([[NSString alloc] initWithUTF8String:remappedEntryPointName.c_str()]);
+
+    {
+        TRACE_EVENT0(GetDevice()->GetPlatform(), General, "MTLLibrary::newFunctionWithName");
+        if (constantValuesPointer != nil) {
+            if (@available(macOS 10.12, *)) {
+                MTLFunctionConstantValues* constantValues = constantValuesPointer;
+                out->function = AcquireNSPRef([*library newFunctionWithName:name.Get()
+                                                             constantValues:constantValues
+                                                                      error:&error]);
+                if (error != nullptr) {
+                    if (error.code != MTLLibraryErrorCompileWarning) {
+                        return DAWN_VALIDATION_ERROR(std::string("Function compile error: ") +
+                                                     [error.localizedDescription UTF8String]);
+                    }
+                }
+                ASSERT(out->function != nil);
+            } else {
+                UNREACHABLE();
+            }
+        } else {
+            out->function = AcquireNSPRef([*library newFunctionWithName:name.Get()]);
+        }
+    }
+
+    if (GetDevice()->IsToggleEnabled(Toggle::MetalEnableVertexPulling) &&
+        GetEntryPoint(entryPointName).usedVertexInputs.any()) {
+        out->needsStorageBufferLength = true;
+    }
+
+    return {};
+}
 }  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/StagingBufferMTL.h b/src/dawn/native/metal/StagingBufferMTL.h
index 7c6636f..afd2eac 100644
--- a/src/dawn/native/metal/StagingBufferMTL.h
+++ b/src/dawn/native/metal/StagingBufferMTL.h
@@ -23,20 +23,20 @@
 
 namespace dawn::native::metal {
 
-    class Device;
+class Device;
 
-    class StagingBuffer : public StagingBufferBase {
-      public:
-        StagingBuffer(size_t size, Device* device);
+class StagingBuffer : public StagingBufferBase {
+  public:
+    StagingBuffer(size_t size, Device* device);
 
-        id<MTLBuffer> GetBufferHandle() const;
+    id<MTLBuffer> GetBufferHandle() const;
 
-        MaybeError Initialize() override;
+    MaybeError Initialize() override;
 
-      private:
-        Device* mDevice;
-        NSPRef<id<MTLBuffer>> mBuffer;
-    };
+  private:
+    Device* mDevice;
+    NSPRef<id<MTLBuffer>> mBuffer;
+};
 }  // namespace dawn::native::metal
 
 #endif  // SRC_DAWN_NATIVE_METAL_STAGINGBUFFERMTL_H_
diff --git a/src/dawn/native/metal/StagingBufferMTL.mm b/src/dawn/native/metal/StagingBufferMTL.mm
index a3fd91f..6bdc9e3 100644
--- a/src/dawn/native/metal/StagingBufferMTL.mm
+++ b/src/dawn/native/metal/StagingBufferMTL.mm
@@ -17,30 +17,29 @@
 
 namespace dawn::native::metal {
 
-    StagingBuffer::StagingBuffer(size_t size, Device* device)
-        : StagingBufferBase(size), mDevice(device) {
+StagingBuffer::StagingBuffer(size_t size, Device* device)
+    : StagingBufferBase(size), mDevice(device) {}
+
+MaybeError StagingBuffer::Initialize() {
+    const size_t bufferSize = GetSize();
+    mBuffer =
+        AcquireNSPRef([mDevice->GetMTLDevice() newBufferWithLength:bufferSize
+                                                           options:MTLResourceStorageModeShared]);
+
+    if (mBuffer == nullptr) {
+        return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
     }
 
-    MaybeError StagingBuffer::Initialize() {
-        const size_t bufferSize = GetSize();
-        mBuffer = AcquireNSPRef([mDevice->GetMTLDevice()
-            newBufferWithLength:bufferSize
-                        options:MTLResourceStorageModeShared]);
-
-        if (mBuffer == nullptr) {
-            return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
-        }
-
-        mMappedPointer = [*mBuffer contents];
-        if (mMappedPointer == nullptr) {
-            return DAWN_INTERNAL_ERROR("Unable to map staging buffer.");
-        }
-
-        return {};
+    mMappedPointer = [*mBuffer contents];
+    if (mMappedPointer == nullptr) {
+        return DAWN_INTERNAL_ERROR("Unable to map staging buffer.");
     }
 
-    id<MTLBuffer> StagingBuffer::GetBufferHandle() const {
-        return mBuffer.Get();
-    }
+    return {};
+}
+
+id<MTLBuffer> StagingBuffer::GetBufferHandle() const {
+    return mBuffer.Get();
+}
 
 }  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/SwapChainMTL.h b/src/dawn/native/metal/SwapChainMTL.h
index a5a1e48..003629c 100644
--- a/src/dawn/native/metal/SwapChainMTL.h
+++ b/src/dawn/native/metal/SwapChainMTL.h
@@ -24,43 +24,43 @@
 
 namespace dawn::native::metal {
 
-    class Device;
-    class Texture;
+class Device;
+class Texture;
 
-    class OldSwapChain final : public OldSwapChainBase {
-      public:
-        static Ref<OldSwapChain> Create(Device* deivce, const SwapChainDescriptor* descriptor);
+class OldSwapChain final : public OldSwapChainBase {
+  public:
+    static Ref<OldSwapChain> Create(Device* deivce, const SwapChainDescriptor* descriptor);
 
-      protected:
-        OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
-        ~OldSwapChain() override;
-        TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
-        MaybeError OnBeforePresent(TextureViewBase* view) override;
-    };
+  protected:
+    OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
+    ~OldSwapChain() override;
+    TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
+    MaybeError OnBeforePresent(TextureViewBase* view) override;
+};
 
-    class SwapChain final : public NewSwapChainBase {
-      public:
-        static ResultOrError<Ref<SwapChain>> Create(Device* device,
-                                                    Surface* surface,
-                                                    NewSwapChainBase* previousSwapChain,
-                                                    const SwapChainDescriptor* descriptor);
-        ~SwapChain() override;
+class SwapChain final : public NewSwapChainBase {
+  public:
+    static ResultOrError<Ref<SwapChain>> Create(Device* device,
+                                                Surface* surface,
+                                                NewSwapChainBase* previousSwapChain,
+                                                const SwapChainDescriptor* descriptor);
+    ~SwapChain() override;
 
-      private:
-        void DestroyImpl() override;
+  private:
+    void DestroyImpl() override;
 
-        using NewSwapChainBase::NewSwapChainBase;
-        MaybeError Initialize(NewSwapChainBase* previousSwapChain);
+    using NewSwapChainBase::NewSwapChainBase;
+    MaybeError Initialize(NewSwapChainBase* previousSwapChain);
 
-        NSRef<CAMetalLayer> mLayer;
+    NSRef<CAMetalLayer> mLayer;
 
-        NSPRef<id<CAMetalDrawable>> mCurrentDrawable;
-        Ref<Texture> mTexture;
+    NSPRef<id<CAMetalDrawable>> mCurrentDrawable;
+    Ref<Texture> mTexture;
 
-        MaybeError PresentImpl() override;
-        ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewImpl() override;
-        void DetachFromSurfaceImpl() override;
-    };
+    MaybeError PresentImpl() override;
+    ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewImpl() override;
+    void DetachFromSurfaceImpl() override;
+};
 
 }  // namespace dawn::native::metal
 
diff --git a/src/dawn/native/metal/SwapChainMTL.mm b/src/dawn/native/metal/SwapChainMTL.mm
index 758ea20..bf68884 100644
--- a/src/dawn/native/metal/SwapChainMTL.mm
+++ b/src/dawn/native/metal/SwapChainMTL.mm
@@ -24,131 +24,130 @@
 
 namespace dawn::native::metal {
 
-    // OldSwapChain
+// OldSwapChain
 
-    // static
-    Ref<OldSwapChain> OldSwapChain::Create(Device* device, const SwapChainDescriptor* descriptor) {
-        return AcquireRef(new OldSwapChain(device, descriptor));
+// static
+Ref<OldSwapChain> OldSwapChain::Create(Device* device, const SwapChainDescriptor* descriptor) {
+    return AcquireRef(new OldSwapChain(device, descriptor));
+}
+
+OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
+    : OldSwapChainBase(device, descriptor) {
+    const auto& im = GetImplementation();
+    DawnWSIContextMetal wsiContext = {};
+    wsiContext.device = ToBackend(GetDevice())->GetMTLDevice();
+    wsiContext.queue = ToBackend(GetDevice())->GetMTLQueue();
+    im.Init(im.userData, &wsiContext);
+}
+
+OldSwapChain::~OldSwapChain() {}
+
+TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
+    const auto& im = GetImplementation();
+    DawnSwapChainNextTexture next = {};
+    DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
+    if (error) {
+        GetDevice()->HandleError(InternalErrorType::Internal, error);
+        return nullptr;
     }
 
-    OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
-        : OldSwapChainBase(device, descriptor) {
-        const auto& im = GetImplementation();
-        DawnWSIContextMetal wsiContext = {};
-        wsiContext.device = ToBackend(GetDevice())->GetMTLDevice();
-        wsiContext.queue = ToBackend(GetDevice())->GetMTLQueue();
-        im.Init(im.userData, &wsiContext);
+    id<MTLTexture> nativeTexture = reinterpret_cast<id<MTLTexture>>(next.texture.ptr);
+
+    return Texture::CreateWrapping(ToBackend(GetDevice()), descriptor, nativeTexture).Detach();
+}
+
+MaybeError OldSwapChain::OnBeforePresent(TextureViewBase*) {
+    return {};
+}
+
+// SwapChain
+
+// static
+ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
+                                                Surface* surface,
+                                                NewSwapChainBase* previousSwapChain,
+                                                const SwapChainDescriptor* descriptor) {
+    Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
+    DAWN_TRY(swapchain->Initialize(previousSwapChain));
+    return swapchain;
+}
+
+SwapChain::~SwapChain() = default;
+
+void SwapChain::DestroyImpl() {
+    SwapChainBase::DestroyImpl();
+    DetachFromSurface();
+}
+
+MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
+    ASSERT(GetSurface()->GetType() == Surface::Type::MetalLayer);
+
+    if (previousSwapChain != nullptr) {
+        // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by
+        // multiple backends one after the other. It probably needs to block until the backend
+        // and GPU are completely finished with the previous swapchain.
+        DAWN_INVALID_IF(previousSwapChain->GetBackendType() != wgpu::BackendType::Metal,
+                        "Metal SwapChain cannot switch backend types from %s to %s.",
+                        previousSwapChain->GetBackendType(), wgpu::BackendType::Metal);
+
+        previousSwapChain->DetachFromSurface();
     }
 
-    OldSwapChain::~OldSwapChain() {
-    }
+    mLayer = static_cast<CAMetalLayer*>(GetSurface()->GetMetalLayer());
+    ASSERT(mLayer != nullptr);
 
-    TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
-        const auto& im = GetImplementation();
-        DawnSwapChainNextTexture next = {};
-        DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
-        if (error) {
-            GetDevice()->HandleError(InternalErrorType::Internal, error);
-            return nullptr;
-        }
+    CGSize size = {};
+    size.width = GetWidth();
+    size.height = GetHeight();
+    [*mLayer setDrawableSize:size];
 
-        id<MTLTexture> nativeTexture = reinterpret_cast<id<MTLTexture>>(next.texture.ptr);
-
-        return Texture::CreateWrapping(ToBackend(GetDevice()), descriptor, nativeTexture).Detach();
-    }
-
-    MaybeError OldSwapChain::OnBeforePresent(TextureViewBase*) {
-        return {};
-    }
-
-    // SwapChain
-
-    // static
-    ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
-                                                    Surface* surface,
-                                                    NewSwapChainBase* previousSwapChain,
-                                                    const SwapChainDescriptor* descriptor) {
-        Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
-        DAWN_TRY(swapchain->Initialize(previousSwapChain));
-        return swapchain;
-    }
-
-    SwapChain::~SwapChain() = default;
-
-    void SwapChain::DestroyImpl() {
-        SwapChainBase::DestroyImpl();
-        DetachFromSurface();
-    }
-
-    MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
-        ASSERT(GetSurface()->GetType() == Surface::Type::MetalLayer);
-
-        if (previousSwapChain != nullptr) {
-            // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by
-            // multiple backends one after the other. It probably needs to block until the backend
-            // and GPU are completely finished with the previous swapchain.
-            DAWN_INVALID_IF(previousSwapChain->GetBackendType() != wgpu::BackendType::Metal,
-                            "Metal SwapChain cannot switch backend types from %s to %s.",
-                            previousSwapChain->GetBackendType(), wgpu::BackendType::Metal);
-
-            previousSwapChain->DetachFromSurface();
-        }
-
-        mLayer = static_cast<CAMetalLayer*>(GetSurface()->GetMetalLayer());
-        ASSERT(mLayer != nullptr);
-
-        CGSize size = {};
-        size.width = GetWidth();
-        size.height = GetHeight();
-        [*mLayer setDrawableSize:size];
-
-        [*mLayer setFramebufferOnly:(GetUsage() == wgpu::TextureUsage::RenderAttachment)];
-        [*mLayer setDevice:ToBackend(GetDevice())->GetMTLDevice()];
-        [*mLayer setPixelFormat:MetalPixelFormat(GetFormat())];
+    [*mLayer setFramebufferOnly:(GetUsage() == wgpu::TextureUsage::RenderAttachment)];
+    [*mLayer setDevice:ToBackend(GetDevice())->GetMTLDevice()];
+    [*mLayer setPixelFormat:MetalPixelFormat(GetFormat())];
 
 #if defined(DAWN_PLATFORM_MACOS)
-        if (@available(macos 10.13, *)) {
-            [*mLayer setDisplaySyncEnabled:(GetPresentMode() != wgpu::PresentMode::Immediate)];
-        }
+    if (@available(macos 10.13, *)) {
+        [*mLayer setDisplaySyncEnabled:(GetPresentMode() != wgpu::PresentMode::Immediate)];
+    }
 #endif  // defined(DAWN_PLATFORM_MACOS)
 
-        // There is no way to control Fifo vs. Mailbox in Metal.
+    // There is no way to control Fifo vs. Mailbox in Metal.
 
-        return {};
-    }
+    return {};
+}
 
-    MaybeError SwapChain::PresentImpl() {
-        ASSERT(mCurrentDrawable != nullptr);
-        [*mCurrentDrawable present];
+MaybeError SwapChain::PresentImpl() {
+    ASSERT(mCurrentDrawable != nullptr);
+    [*mCurrentDrawable present];
 
+    mTexture->APIDestroy();
+    mTexture = nullptr;
+
+    mCurrentDrawable = nullptr;
+
+    return {};
+}
+
+ResultOrError<Ref<TextureViewBase>> SwapChain::GetCurrentTextureViewImpl() {
+    ASSERT(mCurrentDrawable == nullptr);
+    mCurrentDrawable = [*mLayer nextDrawable];
+
+    TextureDescriptor textureDesc = GetSwapChainBaseTextureDescriptor(this);
+
+    mTexture =
+        Texture::CreateWrapping(ToBackend(GetDevice()), &textureDesc, [*mCurrentDrawable texture]);
+    return mTexture->CreateView();
+}
+
+void SwapChain::DetachFromSurfaceImpl() {
+    ASSERT((mTexture == nullptr) == (mCurrentDrawable == nullptr));
+
+    if (mTexture != nullptr) {
         mTexture->APIDestroy();
         mTexture = nullptr;
 
         mCurrentDrawable = nullptr;
-
-        return {};
     }
-
-    ResultOrError<Ref<TextureViewBase>> SwapChain::GetCurrentTextureViewImpl() {
-        ASSERT(mCurrentDrawable == nullptr);
-        mCurrentDrawable = [*mLayer nextDrawable];
-
-        TextureDescriptor textureDesc = GetSwapChainBaseTextureDescriptor(this);
-
-        mTexture = Texture::CreateWrapping(ToBackend(GetDevice()), &textureDesc,
-                                           [*mCurrentDrawable texture]);
-        return mTexture->CreateView();
-    }
-
-    void SwapChain::DetachFromSurfaceImpl() {
-        ASSERT((mTexture == nullptr) == (mCurrentDrawable == nullptr));
-
-        if (mTexture != nullptr) {
-            mTexture->APIDestroy();
-            mTexture = nullptr;
-
-            mCurrentDrawable = nullptr;
-        }
-    }
+}
 
 }  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/TextureMTL.h b/src/dawn/native/metal/TextureMTL.h
index 484a1b4..d8b8ccd 100644
--- a/src/dawn/native/metal/TextureMTL.h
+++ b/src/dawn/native/metal/TextureMTL.h
@@ -26,79 +26,77 @@
 
 namespace dawn::native::metal {
 
-    class CommandRecordingContext;
-    class Device;
+class CommandRecordingContext;
+class Device;
 
-    MTLPixelFormat MetalPixelFormat(wgpu::TextureFormat format);
-    MaybeError ValidateIOSurfaceCanBeWrapped(const DeviceBase* device,
-                                             const TextureDescriptor* descriptor,
-                                             IOSurfaceRef ioSurface);
+MTLPixelFormat MetalPixelFormat(wgpu::TextureFormat format);
+MaybeError ValidateIOSurfaceCanBeWrapped(const DeviceBase* device,
+                                         const TextureDescriptor* descriptor,
+                                         IOSurfaceRef ioSurface);
 
-    class Texture final : public TextureBase {
-      public:
-        static ResultOrError<Ref<Texture>> Create(Device* device,
-                                                  const TextureDescriptor* descriptor);
-        static ResultOrError<Ref<Texture>> CreateFromIOSurface(
-            Device* device,
-            const ExternalImageDescriptor* descriptor,
-            IOSurfaceRef ioSurface);
-        static Ref<Texture> CreateWrapping(Device* device,
-                                           const TextureDescriptor* descriptor,
-                                           NSPRef<id<MTLTexture>> wrapped);
+class Texture final : public TextureBase {
+  public:
+    static ResultOrError<Ref<Texture>> Create(Device* device, const TextureDescriptor* descriptor);
+    static ResultOrError<Ref<Texture>> CreateFromIOSurface(
+        Device* device,
+        const ExternalImageDescriptor* descriptor,
+        IOSurfaceRef ioSurface);
+    static Ref<Texture> CreateWrapping(Device* device,
+                                       const TextureDescriptor* descriptor,
+                                       NSPRef<id<MTLTexture>> wrapped);
 
-        id<MTLTexture> GetMTLTexture() const;
-        IOSurfaceRef GetIOSurface();
-        NSPRef<id<MTLTexture>> CreateFormatView(wgpu::TextureFormat format);
+    id<MTLTexture> GetMTLTexture() const;
+    IOSurfaceRef GetIOSurface();
+    NSPRef<id<MTLTexture>> CreateFormatView(wgpu::TextureFormat format);
 
-        void EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
-                                                 const SubresourceRange& range);
+    void EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
+                                             const SubresourceRange& range);
 
-      private:
-        using TextureBase::TextureBase;
-        ~Texture() override;
+  private:
+    using TextureBase::TextureBase;
+    ~Texture() override;
 
-        NSRef<MTLTextureDescriptor> CreateMetalTextureDescriptor() const;
+    NSRef<MTLTextureDescriptor> CreateMetalTextureDescriptor() const;
 
-        MaybeError InitializeAsInternalTexture(const TextureDescriptor* descriptor);
-        MaybeError InitializeFromIOSurface(const ExternalImageDescriptor* descriptor,
-                                           const TextureDescriptor* textureDescriptor,
-                                           IOSurfaceRef ioSurface);
-        void InitializeAsWrapping(const TextureDescriptor* descriptor,
-                                  NSPRef<id<MTLTexture>> wrapped);
+    MaybeError InitializeAsInternalTexture(const TextureDescriptor* descriptor);
+    MaybeError InitializeFromIOSurface(const ExternalImageDescriptor* descriptor,
+                                       const TextureDescriptor* textureDescriptor,
+                                       IOSurfaceRef ioSurface);
+    void InitializeAsWrapping(const TextureDescriptor* descriptor, NSPRef<id<MTLTexture>> wrapped);
 
-        void DestroyImpl() override;
+    void DestroyImpl() override;
 
-        MaybeError ClearTexture(CommandRecordingContext* commandContext,
-                                const SubresourceRange& range,
-                                TextureBase::ClearValue clearValue);
+    MaybeError ClearTexture(CommandRecordingContext* commandContext,
+                            const SubresourceRange& range,
+                            TextureBase::ClearValue clearValue);
 
-        NSPRef<id<MTLTexture>> mMtlTexture;
+    NSPRef<id<MTLTexture>> mMtlTexture;
 
-        MTLTextureUsage mMtlUsage;
-        CFRef<IOSurfaceRef> mIOSurface = nullptr;
+    MTLTextureUsage mMtlUsage;
+    CFRef<IOSurfaceRef> mIOSurface = nullptr;
+};
+
+class TextureView final : public TextureViewBase {
+  public:
+    static ResultOrError<Ref<TextureView>> Create(TextureBase* texture,
+                                                  const TextureViewDescriptor* descriptor);
+
+    id<MTLTexture> GetMTLTexture() const;
+
+    struct AttachmentInfo {
+        NSPRef<id<MTLTexture>> texture;
+        uint32_t baseMipLevel;
+        uint32_t baseArrayLayer;
     };
+    AttachmentInfo GetAttachmentInfo() const;
 
-    class TextureView final : public TextureViewBase {
-      public:
-        static ResultOrError<Ref<TextureView>> Create(TextureBase* texture,
-                                                      const TextureViewDescriptor* descriptor);
+  private:
+    using TextureViewBase::TextureViewBase;
+    MaybeError Initialize(const TextureViewDescriptor* descriptor);
 
-        id<MTLTexture> GetMTLTexture() const;
-
-        struct AttachmentInfo {
-            NSPRef<id<MTLTexture>> texture;
-            uint32_t baseMipLevel;
-            uint32_t baseArrayLayer;
-        };
-        AttachmentInfo GetAttachmentInfo() const;
-
-      private:
-        using TextureViewBase::TextureViewBase;
-        MaybeError Initialize(const TextureViewDescriptor* descriptor);
-
-        // TODO(crbug.com/dawn/1355): Clear this reference on texture destroy.
-        NSPRef<id<MTLTexture>> mMtlTextureView;
-    };
+    // TODO(crbug.com/dawn/1355): Clear this reference on texture destroy.
+    NSPRef<id<MTLTexture>> mMtlTextureView;
+};
 
 }  // namespace dawn::native::metal
 
diff --git a/src/dawn/native/metal/TextureMTL.mm b/src/dawn/native/metal/TextureMTL.mm
index 98c5fdb..429d60c 100644
--- a/src/dawn/native/metal/TextureMTL.mm
+++ b/src/dawn/native/metal/TextureMTL.mm
@@ -27,957 +27,814 @@
 
 namespace dawn::native::metal {
 
-    namespace {
+namespace {
 
-        MTLTextureUsage MetalTextureUsage(const Format& format,
-                                          wgpu::TextureUsage usage,
-                                          uint32_t sampleCount) {
-            MTLTextureUsage result = MTLTextureUsageUnknown;  // This is 0
+MTLTextureUsage MetalTextureUsage(const Format& format,
+                                  wgpu::TextureUsage usage,
+                                  uint32_t sampleCount) {
+    MTLTextureUsage result = MTLTextureUsageUnknown;  // This is 0
 
-            if (usage & (wgpu::TextureUsage::StorageBinding)) {
-                result |= MTLTextureUsageShaderWrite | MTLTextureUsageShaderRead;
-            }
+    if (usage & (wgpu::TextureUsage::StorageBinding)) {
+        result |= MTLTextureUsageShaderWrite | MTLTextureUsageShaderRead;
+    }
 
-            if (usage & (wgpu::TextureUsage::TextureBinding)) {
-                result |= MTLTextureUsageShaderRead;
+    if (usage & (wgpu::TextureUsage::TextureBinding)) {
+        result |= MTLTextureUsageShaderRead;
 
-                // For sampling stencil aspect of combined depth/stencil.
-                // See TextureView::Initialize.
-                // Depth views for depth/stencil textures in Metal simply use the original
-                // texture's format, but stencil views require format reinterpretation.
-                if (@available(macOS 10.12, iOS 10.0, *)) {
-                    if (IsSubset(Aspect::Depth | Aspect::Stencil, format.aspects)) {
-                        result |= MTLTextureUsagePixelFormatView;
-                    }
-                }
-            }
-
-            // MTLTextureUsageRenderTarget is needed to clear multisample textures.
-            if (usage & (wgpu::TextureUsage::RenderAttachment) || sampleCount > 1) {
-                result |= MTLTextureUsageRenderTarget;
-            }
-
-            return result;
-        }
-
-        MTLTextureType MetalTextureViewType(wgpu::TextureViewDimension dimension,
-                                            unsigned int sampleCount) {
-            switch (dimension) {
-                case wgpu::TextureViewDimension::e1D:
-                    return MTLTextureType1D;
-                case wgpu::TextureViewDimension::e2D:
-                    return (sampleCount > 1) ? MTLTextureType2DMultisample : MTLTextureType2D;
-                case wgpu::TextureViewDimension::e2DArray:
-                    return MTLTextureType2DArray;
-                case wgpu::TextureViewDimension::Cube:
-                    return MTLTextureTypeCube;
-                case wgpu::TextureViewDimension::CubeArray:
-                    return MTLTextureTypeCubeArray;
-                case wgpu::TextureViewDimension::e3D:
-                    return MTLTextureType3D;
-
-                case wgpu::TextureViewDimension::Undefined:
-                    UNREACHABLE();
+        // For sampling stencil aspect of combined depth/stencil.
+        // See TextureView::Initialize.
+        // Depth views for depth/stencil textures in Metal simply use the original
+        // texture's format, but stencil views require format reinterpretation.
+        if (@available(macOS 10.12, iOS 10.0, *)) {
+            if (IsSubset(Aspect::Depth | Aspect::Stencil, format.aspects)) {
+                result |= MTLTextureUsagePixelFormatView;
             }
         }
+    }
 
-        bool RequiresCreatingNewTextureView(const TextureBase* texture,
-                                            const TextureViewDescriptor* textureViewDescriptor) {
-            constexpr wgpu::TextureUsage kShaderUsageNeedsView =
-                wgpu::TextureUsage::StorageBinding | wgpu::TextureUsage::TextureBinding;
-            constexpr wgpu::TextureUsage kUsageNeedsView =
-                kShaderUsageNeedsView | wgpu::TextureUsage::RenderAttachment;
-            if ((texture->GetInternalUsage() & kUsageNeedsView) == 0) {
-                return false;
-            }
+    // MTLTextureUsageRenderTarget is needed to clear multisample textures.
+    if (usage & (wgpu::TextureUsage::RenderAttachment) || sampleCount > 1) {
+        result |= MTLTextureUsageRenderTarget;
+    }
 
-            if (texture->GetFormat().format != textureViewDescriptor->format &&
-                !texture->GetFormat().HasDepthOrStencil()) {
-                // Color format reinterpretation required.
-                // Note: Depth/stencil formats don't support reinterpretation.
-                // See also TextureView::GetAttachmentInfo when modifying this condition.
-                return true;
-            }
+    return result;
+}
 
-            // Reinterpretation not required. Now, we only need a new view if the view dimension or
-            // set of subresources for the shader is different from the base texture.
-            if ((texture->GetInternalUsage() & kShaderUsageNeedsView) == 0) {
-                return false;
-            }
+MTLTextureType MetalTextureViewType(wgpu::TextureViewDimension dimension,
+                                    unsigned int sampleCount) {
+    switch (dimension) {
+        case wgpu::TextureViewDimension::e1D:
+            return MTLTextureType1D;
+        case wgpu::TextureViewDimension::e2D:
+            return (sampleCount > 1) ? MTLTextureType2DMultisample : MTLTextureType2D;
+        case wgpu::TextureViewDimension::e2DArray:
+            return MTLTextureType2DArray;
+        case wgpu::TextureViewDimension::Cube:
+            return MTLTextureTypeCube;
+        case wgpu::TextureViewDimension::CubeArray:
+            return MTLTextureTypeCubeArray;
+        case wgpu::TextureViewDimension::e3D:
+            return MTLTextureType3D;
 
-            if (texture->GetArrayLayers() != textureViewDescriptor->arrayLayerCount ||
-                (texture->GetArrayLayers() == 1 &&
-                 texture->GetDimension() == wgpu::TextureDimension::e2D &&
-                 textureViewDescriptor->dimension == wgpu::TextureViewDimension::e2DArray)) {
-                // If the view has a different number of array layers, we need a new view.
-                // And, if the original texture is a 2D texture with one array layer, we need a new
-                // view to view it as a 2D array texture.
-                return true;
-            }
+        case wgpu::TextureViewDimension::Undefined:
+            UNREACHABLE();
+    }
+}
 
-            if (texture->GetNumMipLevels() != textureViewDescriptor->mipLevelCount) {
-                return true;
-            }
+bool RequiresCreatingNewTextureView(const TextureBase* texture,
+                                    const TextureViewDescriptor* textureViewDescriptor) {
+    constexpr wgpu::TextureUsage kShaderUsageNeedsView =
+        wgpu::TextureUsage::StorageBinding | wgpu::TextureUsage::TextureBinding;
+    constexpr wgpu::TextureUsage kUsageNeedsView =
+        kShaderUsageNeedsView | wgpu::TextureUsage::RenderAttachment;
+    if ((texture->GetInternalUsage() & kUsageNeedsView) == 0) {
+        return false;
+    }
 
-            // If the texture is created with MTLTextureUsagePixelFormatView, we need
-            // a new view to perform format reinterpretation.
-            if ((MetalTextureUsage(texture->GetFormat(), texture->GetInternalUsage(),
-                                   texture->GetSampleCount()) &
-                 MTLTextureUsagePixelFormatView) != 0) {
-                return true;
-            }
+    if (texture->GetFormat().format != textureViewDescriptor->format &&
+        !texture->GetFormat().HasDepthOrStencil()) {
+        // Color format reinterpretation required.
+        // Note: Depth/stencil formats don't support reinterpretation.
+        // See also TextureView::GetAttachmentInfo when modifying this condition.
+        return true;
+    }
 
-            switch (textureViewDescriptor->dimension) {
-                case wgpu::TextureViewDimension::Cube:
-                case wgpu::TextureViewDimension::CubeArray:
-                    return true;
-                default:
-                    break;
-            }
+    // Reinterpretation not required. Now, we only need a new view if the view dimension or
+    // set of subresources for the shader is different from the base texture.
+    if ((texture->GetInternalUsage() & kShaderUsageNeedsView) == 0) {
+        return false;
+    }
 
+    if (texture->GetArrayLayers() != textureViewDescriptor->arrayLayerCount ||
+        (texture->GetArrayLayers() == 1 && texture->GetDimension() == wgpu::TextureDimension::e2D &&
+         textureViewDescriptor->dimension == wgpu::TextureViewDimension::e2DArray)) {
+        // If the view has a different number of array layers, we need a new view.
+        // And, if the original texture is a 2D texture with one array layer, we need a new
+        // view to view it as a 2D array texture.
+        return true;
+    }
+
+    if (texture->GetNumMipLevels() != textureViewDescriptor->mipLevelCount) {
+        return true;
+    }
+
+    // If the texture is created with MTLTextureUsagePixelFormatView, we need
+    // a new view to perform format reinterpretation.
+    if ((MetalTextureUsage(texture->GetFormat(), texture->GetInternalUsage(),
+                           texture->GetSampleCount()) &
+         MTLTextureUsagePixelFormatView) != 0) {
+        return true;
+    }
+
+    switch (textureViewDescriptor->dimension) {
+        case wgpu::TextureViewDimension::Cube:
+        case wgpu::TextureViewDimension::CubeArray:
+            return true;
+        default:
+            break;
+    }
+
+    return false;
+}
+
+// Metal only allows format reinterpretation to happen on swizzle pattern or conversion
+// between linear space and sRGB without setting MTLTextureUsagePixelFormatView flag. For
+// example, creating bgra8Unorm texture view on rgba8Unorm texture or creating
+// rgba8Unorm_srgb texture view on rgab8Unorm texture.
+bool AllowFormatReinterpretationWithoutFlag(MTLPixelFormat origin,
+                                            MTLPixelFormat reinterpretation) {
+    switch (origin) {
+        case MTLPixelFormatRGBA8Unorm:
+            return reinterpretation == MTLPixelFormatBGRA8Unorm ||
+                   reinterpretation == MTLPixelFormatRGBA8Unorm_sRGB;
+        case MTLPixelFormatBGRA8Unorm:
+            return reinterpretation == MTLPixelFormatRGBA8Unorm ||
+                   reinterpretation == MTLPixelFormatBGRA8Unorm_sRGB;
+        case MTLPixelFormatRGBA8Unorm_sRGB:
+            return reinterpretation == MTLPixelFormatBGRA8Unorm_sRGB ||
+                   reinterpretation == MTLPixelFormatRGBA8Unorm;
+        case MTLPixelFormatBGRA8Unorm_sRGB:
+            return reinterpretation == MTLPixelFormatRGBA8Unorm_sRGB ||
+                   reinterpretation == MTLPixelFormatBGRA8Unorm;
+#if defined(DAWN_PLATFORM_MACOS)
+        case MTLPixelFormatBC1_RGBA:
+            return reinterpretation == MTLPixelFormatBC1_RGBA_sRGB;
+        case MTLPixelFormatBC1_RGBA_sRGB:
+            return reinterpretation == MTLPixelFormatBC1_RGBA;
+        case MTLPixelFormatBC2_RGBA:
+            return reinterpretation == MTLPixelFormatBC2_RGBA_sRGB;
+        case MTLPixelFormatBC2_RGBA_sRGB:
+            return reinterpretation == MTLPixelFormatBC2_RGBA;
+        case MTLPixelFormatBC3_RGBA:
+            return reinterpretation == MTLPixelFormatBC3_RGBA_sRGB;
+        case MTLPixelFormatBC3_RGBA_sRGB:
+            return reinterpretation == MTLPixelFormatBC3_RGBA;
+        case MTLPixelFormatBC7_RGBAUnorm:
+            return reinterpretation == MTLPixelFormatBC7_RGBAUnorm_sRGB;
+        case MTLPixelFormatBC7_RGBAUnorm_sRGB:
+            return reinterpretation == MTLPixelFormatBC7_RGBAUnorm;
+#endif
+
+        default:
             return false;
-        }
+    }
+}
 
-        // Metal only allows format reinterpretation to happen on swizzle pattern or conversion
-        // between linear space and sRGB without setting MTLTextureUsagePixelFormatView flag. For
-        // example, creating bgra8Unorm texture view on rgba8Unorm texture or creating
-        // rgba8Unorm_srgb texture view on rgab8Unorm texture.
-        bool AllowFormatReinterpretationWithoutFlag(MTLPixelFormat origin,
-                                                    MTLPixelFormat reinterpretation) {
-            switch (origin) {
-                case MTLPixelFormatRGBA8Unorm:
-                    return reinterpretation == MTLPixelFormatBGRA8Unorm ||
-                           reinterpretation == MTLPixelFormatRGBA8Unorm_sRGB;
-                case MTLPixelFormatBGRA8Unorm:
-                    return reinterpretation == MTLPixelFormatRGBA8Unorm ||
-                           reinterpretation == MTLPixelFormatBGRA8Unorm_sRGB;
-                case MTLPixelFormatRGBA8Unorm_sRGB:
-                    return reinterpretation == MTLPixelFormatBGRA8Unorm_sRGB ||
-                           reinterpretation == MTLPixelFormatRGBA8Unorm;
-                case MTLPixelFormatBGRA8Unorm_sRGB:
-                    return reinterpretation == MTLPixelFormatRGBA8Unorm_sRGB ||
-                           reinterpretation == MTLPixelFormatBGRA8Unorm;
-#if defined(DAWN_PLATFORM_MACOS)
-                case MTLPixelFormatBC1_RGBA:
-                    return reinterpretation == MTLPixelFormatBC1_RGBA_sRGB;
-                case MTLPixelFormatBC1_RGBA_sRGB:
-                    return reinterpretation == MTLPixelFormatBC1_RGBA;
-                case MTLPixelFormatBC2_RGBA:
-                    return reinterpretation == MTLPixelFormatBC2_RGBA_sRGB;
-                case MTLPixelFormatBC2_RGBA_sRGB:
-                    return reinterpretation == MTLPixelFormatBC2_RGBA;
-                case MTLPixelFormatBC3_RGBA:
-                    return reinterpretation == MTLPixelFormatBC3_RGBA_sRGB;
-                case MTLPixelFormatBC3_RGBA_sRGB:
-                    return reinterpretation == MTLPixelFormatBC3_RGBA;
-                case MTLPixelFormatBC7_RGBAUnorm:
-                    return reinterpretation == MTLPixelFormatBC7_RGBAUnorm_sRGB;
-                case MTLPixelFormatBC7_RGBAUnorm_sRGB:
-                    return reinterpretation == MTLPixelFormatBC7_RGBAUnorm;
-#endif
+ResultOrError<wgpu::TextureFormat> GetFormatEquivalentToIOSurfaceFormat(uint32_t format) {
+    switch (format) {
+        case kCVPixelFormatType_64RGBAHalf:
+            return wgpu::TextureFormat::RGBA16Float;
+        case kCVPixelFormatType_TwoComponent16Half:
+            return wgpu::TextureFormat::RG16Float;
+        case kCVPixelFormatType_OneComponent16Half:
+            return wgpu::TextureFormat::R16Float;
+        case kCVPixelFormatType_ARGB2101010LEPacked:
+            return wgpu::TextureFormat::RGB10A2Unorm;
+        case kCVPixelFormatType_32RGBA:
+            return wgpu::TextureFormat::RGBA8Unorm;
+        case kCVPixelFormatType_32BGRA:
+            return wgpu::TextureFormat::BGRA8Unorm;
+        case kCVPixelFormatType_TwoComponent8:
+            return wgpu::TextureFormat::RG8Unorm;
+        case kCVPixelFormatType_OneComponent8:
+            return wgpu::TextureFormat::R8Unorm;
+        case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange:
+            return wgpu::TextureFormat::R8BG8Biplanar420Unorm;
+        default:
+            return DAWN_FORMAT_VALIDATION_ERROR("Unsupported IOSurface format (%x).", format);
+    }
+}
 
-                default:
-                    return false;
-            }
-        }
-
-        ResultOrError<wgpu::TextureFormat> GetFormatEquivalentToIOSurfaceFormat(uint32_t format) {
-            switch (format) {
-                case kCVPixelFormatType_64RGBAHalf:
-                    return wgpu::TextureFormat::RGBA16Float;
-                case kCVPixelFormatType_TwoComponent16Half:
-                    return wgpu::TextureFormat::RG16Float;
-                case kCVPixelFormatType_OneComponent16Half:
-                    return wgpu::TextureFormat::R16Float;
-                case kCVPixelFormatType_ARGB2101010LEPacked:
-                    return wgpu::TextureFormat::RGB10A2Unorm;
-                case kCVPixelFormatType_32RGBA:
-                    return wgpu::TextureFormat::RGBA8Unorm;
-                case kCVPixelFormatType_32BGRA:
-                    return wgpu::TextureFormat::BGRA8Unorm;
-                case kCVPixelFormatType_TwoComponent8:
-                    return wgpu::TextureFormat::RG8Unorm;
-                case kCVPixelFormatType_OneComponent8:
-                    return wgpu::TextureFormat::R8Unorm;
-                case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange:
-                    return wgpu::TextureFormat::R8BG8Biplanar420Unorm;
-                default:
-                    return DAWN_FORMAT_VALIDATION_ERROR("Unsupported IOSurface format (%x).",
-                                                        format);
-            }
-        }
-
-        uint32_t GetIOSurfacePlane(wgpu::TextureAspect aspect) {
-            switch (aspect) {
-                case wgpu::TextureAspect::Plane0Only:
-                    return 0;
-                case wgpu::TextureAspect::Plane1Only:
-                    return 1;
-                default:
-                    UNREACHABLE();
-            }
-        }
+uint32_t GetIOSurfacePlane(wgpu::TextureAspect aspect) {
+    switch (aspect) {
+        case wgpu::TextureAspect::Plane0Only:
+            return 0;
+        case wgpu::TextureAspect::Plane1Only:
+            return 1;
+        default:
+            UNREACHABLE();
+    }
+}
 
 #if defined(DAWN_PLATFORM_MACOS)
-        MTLStorageMode kIOSurfaceStorageMode = MTLStorageModeManaged;
+MTLStorageMode kIOSurfaceStorageMode = MTLStorageModeManaged;
 #elif defined(DAWN_PLATFORM_IOS)
-        MTLStorageMode kIOSurfaceStorageMode = MTLStorageModePrivate;
+MTLStorageMode kIOSurfaceStorageMode = MTLStorageModePrivate;
 #else
-#    error "Unsupported Apple platform."
+#error "Unsupported Apple platform."
 #endif
-    }
+}  // namespace
 
-    MTLPixelFormat MetalPixelFormat(wgpu::TextureFormat format) {
-        switch (format) {
-            case wgpu::TextureFormat::R8Unorm:
-                return MTLPixelFormatR8Unorm;
-            case wgpu::TextureFormat::R8Snorm:
-                return MTLPixelFormatR8Snorm;
-            case wgpu::TextureFormat::R8Uint:
-                return MTLPixelFormatR8Uint;
-            case wgpu::TextureFormat::R8Sint:
-                return MTLPixelFormatR8Sint;
+MTLPixelFormat MetalPixelFormat(wgpu::TextureFormat format) {
+    switch (format) {
+        case wgpu::TextureFormat::R8Unorm:
+            return MTLPixelFormatR8Unorm;
+        case wgpu::TextureFormat::R8Snorm:
+            return MTLPixelFormatR8Snorm;
+        case wgpu::TextureFormat::R8Uint:
+            return MTLPixelFormatR8Uint;
+        case wgpu::TextureFormat::R8Sint:
+            return MTLPixelFormatR8Sint;
 
-            case wgpu::TextureFormat::R16Uint:
-                return MTLPixelFormatR16Uint;
-            case wgpu::TextureFormat::R16Sint:
-                return MTLPixelFormatR16Sint;
-            case wgpu::TextureFormat::R16Float:
-                return MTLPixelFormatR16Float;
-            case wgpu::TextureFormat::RG8Unorm:
-                return MTLPixelFormatRG8Unorm;
-            case wgpu::TextureFormat::RG8Snorm:
-                return MTLPixelFormatRG8Snorm;
-            case wgpu::TextureFormat::RG8Uint:
-                return MTLPixelFormatRG8Uint;
-            case wgpu::TextureFormat::RG8Sint:
-                return MTLPixelFormatRG8Sint;
+        case wgpu::TextureFormat::R16Uint:
+            return MTLPixelFormatR16Uint;
+        case wgpu::TextureFormat::R16Sint:
+            return MTLPixelFormatR16Sint;
+        case wgpu::TextureFormat::R16Float:
+            return MTLPixelFormatR16Float;
+        case wgpu::TextureFormat::RG8Unorm:
+            return MTLPixelFormatRG8Unorm;
+        case wgpu::TextureFormat::RG8Snorm:
+            return MTLPixelFormatRG8Snorm;
+        case wgpu::TextureFormat::RG8Uint:
+            return MTLPixelFormatRG8Uint;
+        case wgpu::TextureFormat::RG8Sint:
+            return MTLPixelFormatRG8Sint;
 
-            case wgpu::TextureFormat::R32Uint:
-                return MTLPixelFormatR32Uint;
-            case wgpu::TextureFormat::R32Sint:
-                return MTLPixelFormatR32Sint;
-            case wgpu::TextureFormat::R32Float:
-                return MTLPixelFormatR32Float;
-            case wgpu::TextureFormat::RG16Uint:
-                return MTLPixelFormatRG16Uint;
-            case wgpu::TextureFormat::RG16Sint:
-                return MTLPixelFormatRG16Sint;
-            case wgpu::TextureFormat::RG16Float:
-                return MTLPixelFormatRG16Float;
-            case wgpu::TextureFormat::RGBA8Unorm:
-                return MTLPixelFormatRGBA8Unorm;
-            case wgpu::TextureFormat::RGBA8UnormSrgb:
-                return MTLPixelFormatRGBA8Unorm_sRGB;
-            case wgpu::TextureFormat::RGBA8Snorm:
-                return MTLPixelFormatRGBA8Snorm;
-            case wgpu::TextureFormat::RGBA8Uint:
-                return MTLPixelFormatRGBA8Uint;
-            case wgpu::TextureFormat::RGBA8Sint:
-                return MTLPixelFormatRGBA8Sint;
-            case wgpu::TextureFormat::BGRA8Unorm:
-                return MTLPixelFormatBGRA8Unorm;
-            case wgpu::TextureFormat::BGRA8UnormSrgb:
-                return MTLPixelFormatBGRA8Unorm_sRGB;
-            case wgpu::TextureFormat::RGB10A2Unorm:
-                return MTLPixelFormatRGB10A2Unorm;
-            case wgpu::TextureFormat::RG11B10Ufloat:
-                return MTLPixelFormatRG11B10Float;
-            case wgpu::TextureFormat::RGB9E5Ufloat:
-                return MTLPixelFormatRGB9E5Float;
+        case wgpu::TextureFormat::R32Uint:
+            return MTLPixelFormatR32Uint;
+        case wgpu::TextureFormat::R32Sint:
+            return MTLPixelFormatR32Sint;
+        case wgpu::TextureFormat::R32Float:
+            return MTLPixelFormatR32Float;
+        case wgpu::TextureFormat::RG16Uint:
+            return MTLPixelFormatRG16Uint;
+        case wgpu::TextureFormat::RG16Sint:
+            return MTLPixelFormatRG16Sint;
+        case wgpu::TextureFormat::RG16Float:
+            return MTLPixelFormatRG16Float;
+        case wgpu::TextureFormat::RGBA8Unorm:
+            return MTLPixelFormatRGBA8Unorm;
+        case wgpu::TextureFormat::RGBA8UnormSrgb:
+            return MTLPixelFormatRGBA8Unorm_sRGB;
+        case wgpu::TextureFormat::RGBA8Snorm:
+            return MTLPixelFormatRGBA8Snorm;
+        case wgpu::TextureFormat::RGBA8Uint:
+            return MTLPixelFormatRGBA8Uint;
+        case wgpu::TextureFormat::RGBA8Sint:
+            return MTLPixelFormatRGBA8Sint;
+        case wgpu::TextureFormat::BGRA8Unorm:
+            return MTLPixelFormatBGRA8Unorm;
+        case wgpu::TextureFormat::BGRA8UnormSrgb:
+            return MTLPixelFormatBGRA8Unorm_sRGB;
+        case wgpu::TextureFormat::RGB10A2Unorm:
+            return MTLPixelFormatRGB10A2Unorm;
+        case wgpu::TextureFormat::RG11B10Ufloat:
+            return MTLPixelFormatRG11B10Float;
+        case wgpu::TextureFormat::RGB9E5Ufloat:
+            return MTLPixelFormatRGB9E5Float;
 
-            case wgpu::TextureFormat::RG32Uint:
-                return MTLPixelFormatRG32Uint;
-            case wgpu::TextureFormat::RG32Sint:
-                return MTLPixelFormatRG32Sint;
-            case wgpu::TextureFormat::RG32Float:
-                return MTLPixelFormatRG32Float;
-            case wgpu::TextureFormat::RGBA16Uint:
-                return MTLPixelFormatRGBA16Uint;
-            case wgpu::TextureFormat::RGBA16Sint:
-                return MTLPixelFormatRGBA16Sint;
-            case wgpu::TextureFormat::RGBA16Float:
-                return MTLPixelFormatRGBA16Float;
+        case wgpu::TextureFormat::RG32Uint:
+            return MTLPixelFormatRG32Uint;
+        case wgpu::TextureFormat::RG32Sint:
+            return MTLPixelFormatRG32Sint;
+        case wgpu::TextureFormat::RG32Float:
+            return MTLPixelFormatRG32Float;
+        case wgpu::TextureFormat::RGBA16Uint:
+            return MTLPixelFormatRGBA16Uint;
+        case wgpu::TextureFormat::RGBA16Sint:
+            return MTLPixelFormatRGBA16Sint;
+        case wgpu::TextureFormat::RGBA16Float:
+            return MTLPixelFormatRGBA16Float;
 
-            case wgpu::TextureFormat::RGBA32Uint:
-                return MTLPixelFormatRGBA32Uint;
-            case wgpu::TextureFormat::RGBA32Sint:
-                return MTLPixelFormatRGBA32Sint;
-            case wgpu::TextureFormat::RGBA32Float:
-                return MTLPixelFormatRGBA32Float;
+        case wgpu::TextureFormat::RGBA32Uint:
+            return MTLPixelFormatRGBA32Uint;
+        case wgpu::TextureFormat::RGBA32Sint:
+            return MTLPixelFormatRGBA32Sint;
+        case wgpu::TextureFormat::RGBA32Float:
+            return MTLPixelFormatRGBA32Float;
 
-            case wgpu::TextureFormat::Depth32Float:
-                return MTLPixelFormatDepth32Float;
-            case wgpu::TextureFormat::Depth24Plus:
-                return MTLPixelFormatDepth32Float;
-            case wgpu::TextureFormat::Depth24PlusStencil8:
-            case wgpu::TextureFormat::Depth32FloatStencil8:
-                return MTLPixelFormatDepth32Float_Stencil8;
-            case wgpu::TextureFormat::Depth16Unorm:
-                if (@available(macOS 10.12, iOS 13.0, *)) {
-                    return MTLPixelFormatDepth16Unorm;
-                } else {
-                    // TODO(dawn:1181): Allow non-conformant implementation on macOS 10.11
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::Stencil8:
-                return MTLPixelFormatStencil8;
+        case wgpu::TextureFormat::Depth32Float:
+            return MTLPixelFormatDepth32Float;
+        case wgpu::TextureFormat::Depth24Plus:
+            return MTLPixelFormatDepth32Float;
+        case wgpu::TextureFormat::Depth24PlusStencil8:
+        case wgpu::TextureFormat::Depth32FloatStencil8:
+            return MTLPixelFormatDepth32Float_Stencil8;
+        case wgpu::TextureFormat::Depth16Unorm:
+            if (@available(macOS 10.12, iOS 13.0, *)) {
+                return MTLPixelFormatDepth16Unorm;
+            } else {
+                // TODO(dawn:1181): Allow non-conformant implementation on macOS 10.11
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::Stencil8:
+            return MTLPixelFormatStencil8;
 
 #if defined(DAWN_PLATFORM_MACOS)
-            case wgpu::TextureFormat::Depth24UnormStencil8:
-                return MTLPixelFormatDepth24Unorm_Stencil8;
+        case wgpu::TextureFormat::Depth24UnormStencil8:
+            return MTLPixelFormatDepth24Unorm_Stencil8;
 
-            case wgpu::TextureFormat::BC1RGBAUnorm:
-                return MTLPixelFormatBC1_RGBA;
-            case wgpu::TextureFormat::BC1RGBAUnormSrgb:
-                return MTLPixelFormatBC1_RGBA_sRGB;
-            case wgpu::TextureFormat::BC2RGBAUnorm:
-                return MTLPixelFormatBC2_RGBA;
-            case wgpu::TextureFormat::BC2RGBAUnormSrgb:
-                return MTLPixelFormatBC2_RGBA_sRGB;
-            case wgpu::TextureFormat::BC3RGBAUnorm:
-                return MTLPixelFormatBC3_RGBA;
-            case wgpu::TextureFormat::BC3RGBAUnormSrgb:
-                return MTLPixelFormatBC3_RGBA_sRGB;
-            case wgpu::TextureFormat::BC4RSnorm:
-                return MTLPixelFormatBC4_RSnorm;
-            case wgpu::TextureFormat::BC4RUnorm:
-                return MTLPixelFormatBC4_RUnorm;
-            case wgpu::TextureFormat::BC5RGSnorm:
-                return MTLPixelFormatBC5_RGSnorm;
-            case wgpu::TextureFormat::BC5RGUnorm:
-                return MTLPixelFormatBC5_RGUnorm;
-            case wgpu::TextureFormat::BC6HRGBFloat:
-                return MTLPixelFormatBC6H_RGBFloat;
-            case wgpu::TextureFormat::BC6HRGBUfloat:
-                return MTLPixelFormatBC6H_RGBUfloat;
-            case wgpu::TextureFormat::BC7RGBAUnorm:
-                return MTLPixelFormatBC7_RGBAUnorm;
-            case wgpu::TextureFormat::BC7RGBAUnormSrgb:
-                return MTLPixelFormatBC7_RGBAUnorm_sRGB;
+        case wgpu::TextureFormat::BC1RGBAUnorm:
+            return MTLPixelFormatBC1_RGBA;
+        case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+            return MTLPixelFormatBC1_RGBA_sRGB;
+        case wgpu::TextureFormat::BC2RGBAUnorm:
+            return MTLPixelFormatBC2_RGBA;
+        case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+            return MTLPixelFormatBC2_RGBA_sRGB;
+        case wgpu::TextureFormat::BC3RGBAUnorm:
+            return MTLPixelFormatBC3_RGBA;
+        case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+            return MTLPixelFormatBC3_RGBA_sRGB;
+        case wgpu::TextureFormat::BC4RSnorm:
+            return MTLPixelFormatBC4_RSnorm;
+        case wgpu::TextureFormat::BC4RUnorm:
+            return MTLPixelFormatBC4_RUnorm;
+        case wgpu::TextureFormat::BC5RGSnorm:
+            return MTLPixelFormatBC5_RGSnorm;
+        case wgpu::TextureFormat::BC5RGUnorm:
+            return MTLPixelFormatBC5_RGUnorm;
+        case wgpu::TextureFormat::BC6HRGBFloat:
+            return MTLPixelFormatBC6H_RGBFloat;
+        case wgpu::TextureFormat::BC6HRGBUfloat:
+            return MTLPixelFormatBC6H_RGBUfloat;
+        case wgpu::TextureFormat::BC7RGBAUnorm:
+            return MTLPixelFormatBC7_RGBAUnorm;
+        case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+            return MTLPixelFormatBC7_RGBAUnorm_sRGB;
 #else
-            case wgpu::TextureFormat::Depth24UnormStencil8:
+        case wgpu::TextureFormat::Depth24UnormStencil8:
 
-            case wgpu::TextureFormat::BC1RGBAUnorm:
-            case wgpu::TextureFormat::BC1RGBAUnormSrgb:
-            case wgpu::TextureFormat::BC2RGBAUnorm:
-            case wgpu::TextureFormat::BC2RGBAUnormSrgb:
-            case wgpu::TextureFormat::BC3RGBAUnorm:
-            case wgpu::TextureFormat::BC3RGBAUnormSrgb:
-            case wgpu::TextureFormat::BC4RSnorm:
-            case wgpu::TextureFormat::BC4RUnorm:
-            case wgpu::TextureFormat::BC5RGSnorm:
-            case wgpu::TextureFormat::BC5RGUnorm:
-            case wgpu::TextureFormat::BC6HRGBFloat:
-            case wgpu::TextureFormat::BC6HRGBUfloat:
-            case wgpu::TextureFormat::BC7RGBAUnorm:
-            case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+        case wgpu::TextureFormat::BC1RGBAUnorm:
+        case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+        case wgpu::TextureFormat::BC2RGBAUnorm:
+        case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+        case wgpu::TextureFormat::BC3RGBAUnorm:
+        case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+        case wgpu::TextureFormat::BC4RSnorm:
+        case wgpu::TextureFormat::BC4RUnorm:
+        case wgpu::TextureFormat::BC5RGSnorm:
+        case wgpu::TextureFormat::BC5RGUnorm:
+        case wgpu::TextureFormat::BC6HRGBFloat:
+        case wgpu::TextureFormat::BC6HRGBUfloat:
+        case wgpu::TextureFormat::BC7RGBAUnorm:
+        case wgpu::TextureFormat::BC7RGBAUnormSrgb:
 #endif
 
-            case wgpu::TextureFormat::ETC2RGB8Unorm:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatETC2_RGB8;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatETC2_RGB8_sRGB;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ETC2RGB8A1Unorm:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatETC2_RGB8A1;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatETC2_RGB8A1_sRGB;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ETC2RGBA8Unorm:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatEAC_RGBA8;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatEAC_RGBA8_sRGB;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::EACR11Unorm:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatEAC_R11Unorm;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::EACR11Snorm:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatEAC_R11Snorm;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::EACRG11Unorm:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatEAC_RG11Unorm;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::EACRG11Snorm:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatEAC_RG11Snorm;
-                } else {
-                    UNREACHABLE();
-                }
-
-            case wgpu::TextureFormat::ASTC4x4Unorm:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatASTC_4x4_LDR;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ASTC4x4UnormSrgb:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatASTC_4x4_sRGB;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ASTC5x4Unorm:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatASTC_5x4_LDR;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ASTC5x4UnormSrgb:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatASTC_5x4_sRGB;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ASTC5x5Unorm:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatASTC_5x5_LDR;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ASTC5x5UnormSrgb:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatASTC_5x5_sRGB;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ASTC6x5Unorm:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatASTC_6x5_LDR;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ASTC6x5UnormSrgb:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatASTC_6x5_sRGB;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ASTC6x6Unorm:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatASTC_6x6_LDR;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ASTC6x6UnormSrgb:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatASTC_6x6_sRGB;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ASTC8x5Unorm:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatASTC_8x5_LDR;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ASTC8x5UnormSrgb:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatASTC_8x5_sRGB;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ASTC8x6Unorm:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatASTC_8x6_LDR;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ASTC8x6UnormSrgb:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatASTC_8x6_sRGB;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ASTC8x8Unorm:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatASTC_8x8_LDR;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ASTC8x8UnormSrgb:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatASTC_8x8_sRGB;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ASTC10x5Unorm:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatASTC_10x5_LDR;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ASTC10x5UnormSrgb:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatASTC_10x5_sRGB;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ASTC10x6Unorm:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatASTC_10x6_LDR;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ASTC10x6UnormSrgb:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatASTC_10x6_sRGB;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ASTC10x8Unorm:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatASTC_10x8_LDR;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ASTC10x8UnormSrgb:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatASTC_10x8_sRGB;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ASTC10x10Unorm:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatASTC_10x10_LDR;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ASTC10x10UnormSrgb:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatASTC_10x10_sRGB;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ASTC12x10Unorm:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatASTC_12x10_LDR;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ASTC12x10UnormSrgb:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatASTC_12x10_sRGB;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ASTC12x12Unorm:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatASTC_12x12_LDR;
-                } else {
-                    UNREACHABLE();
-                }
-            case wgpu::TextureFormat::ASTC12x12UnormSrgb:
-                if (@available(macOS 11.0, iOS 8.0, *)) {
-                    return MTLPixelFormatASTC_12x12_sRGB;
-                } else {
-                    UNREACHABLE();
-                }
-
-            case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
-            case wgpu::TextureFormat::Undefined:
-                UNREACHABLE();
-        }
-    }
-
-    MaybeError ValidateIOSurfaceCanBeWrapped(const DeviceBase*,
-                                             const TextureDescriptor* descriptor,
-                                             IOSurfaceRef ioSurface) {
-        DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
-                        "Texture dimension (%s) is not %s.", descriptor->dimension,
-                        wgpu::TextureDimension::e2D);
-
-        DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
-                        descriptor->mipLevelCount);
-
-        DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1,
-                        "Array layer count (%u) is not 1.", descriptor->size.depthOrArrayLayers);
-
-        DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
-                        descriptor->sampleCount);
-
-        uint32_t surfaceWidth = IOSurfaceGetWidth(ioSurface);
-        uint32_t surfaceHeight = IOSurfaceGetHeight(ioSurface);
-
-        DAWN_INVALID_IF(
-            descriptor->size.width != surfaceWidth || descriptor->size.height != surfaceHeight ||
-                descriptor->size.depthOrArrayLayers != 1,
-            "IOSurface size (width: %u, height %u, depth: 1) doesn't match descriptor size %s.",
-            surfaceWidth, surfaceHeight, &descriptor->size);
-
-        wgpu::TextureFormat ioSurfaceFormat;
-        DAWN_TRY_ASSIGN(ioSurfaceFormat,
-                        GetFormatEquivalentToIOSurfaceFormat(IOSurfaceGetPixelFormat(ioSurface)));
-        DAWN_INVALID_IF(descriptor->format != ioSurfaceFormat,
-                        "IOSurface format (%s) doesn't match the descriptor format (%s).",
-                        ioSurfaceFormat, descriptor->format);
-
-        return {};
-    }
-
-    NSRef<MTLTextureDescriptor> Texture::CreateMetalTextureDescriptor() const {
-        NSRef<MTLTextureDescriptor> mtlDescRef = AcquireNSRef([MTLTextureDescriptor new]);
-        MTLTextureDescriptor* mtlDesc = mtlDescRef.Get();
-
-        mtlDesc.width = GetWidth();
-        mtlDesc.sampleCount = GetSampleCount();
-        // Metal only allows format reinterpretation to happen on swizzle pattern or conversion
-        // between linear space and sRGB. For example, creating bgra8Unorm texture view on
-        // rgba8Unorm texture or creating rgba8Unorm_srgb texture view on rgab8Unorm texture.
-        mtlDesc.usage = MetalTextureUsage(GetFormat(), GetInternalUsage(), GetSampleCount());
-        mtlDesc.pixelFormat = MetalPixelFormat(GetFormat().format);
-        mtlDesc.mipmapLevelCount = GetNumMipLevels();
-        mtlDesc.storageMode = MTLStorageModePrivate;
-
-        // Choose the correct MTLTextureType and paper over differences in how the array layer count
-        // is specified.
-        switch (GetDimension()) {
-            case wgpu::TextureDimension::e1D:
-                mtlDesc.arrayLength = 1;
-                mtlDesc.depth = 1;
-                ASSERT(mtlDesc.sampleCount == 1);
-                mtlDesc.textureType = MTLTextureType1D;
-                break;
-
-            case wgpu::TextureDimension::e2D:
-                mtlDesc.height = GetHeight();
-                mtlDesc.arrayLength = GetArrayLayers();
-                mtlDesc.depth = 1;
-                if (mtlDesc.arrayLength > 1) {
-                    ASSERT(mtlDesc.sampleCount == 1);
-                    mtlDesc.textureType = MTLTextureType2DArray;
-                } else if (mtlDesc.sampleCount > 1) {
-                    mtlDesc.textureType = MTLTextureType2DMultisample;
-                } else {
-                    mtlDesc.textureType = MTLTextureType2D;
-                }
-                break;
-            case wgpu::TextureDimension::e3D:
-                mtlDesc.height = GetHeight();
-                mtlDesc.depth = GetDepth();
-                mtlDesc.arrayLength = 1;
-                ASSERT(mtlDesc.sampleCount == 1);
-                mtlDesc.textureType = MTLTextureType3D;
-                break;
-        }
-
-        return mtlDescRef;
-    }
-
-    // static
-    ResultOrError<Ref<Texture>> Texture::Create(Device* device,
-                                                const TextureDescriptor* descriptor) {
-        Ref<Texture> texture =
-            AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
-        DAWN_TRY(texture->InitializeAsInternalTexture(descriptor));
-        return texture;
-    }
-
-    // static
-    ResultOrError<Ref<Texture>> Texture::CreateFromIOSurface(
-        Device* device,
-        const ExternalImageDescriptor* descriptor,
-        IOSurfaceRef ioSurface) {
-        const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
-
-        Ref<Texture> texture =
-            AcquireRef(new Texture(device, textureDescriptor, TextureState::OwnedExternal));
-        DAWN_TRY(texture->InitializeFromIOSurface(descriptor, textureDescriptor, ioSurface));
-        return texture;
-    }
-
-    // static
-    Ref<Texture> Texture::CreateWrapping(Device* device,
-                                         const TextureDescriptor* descriptor,
-                                         NSPRef<id<MTLTexture>> wrapped) {
-        Ref<Texture> texture =
-            AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
-        texture->InitializeAsWrapping(descriptor, std::move(wrapped));
-        return texture;
-    }
-
-    MaybeError Texture::InitializeAsInternalTexture(const TextureDescriptor* descriptor) {
-        Device* device = ToBackend(GetDevice());
-
-        NSRef<MTLTextureDescriptor> mtlDesc = CreateMetalTextureDescriptor();
-        mMtlUsage = [*mtlDesc usage];
-        mMtlTexture =
-            AcquireNSPRef([device->GetMTLDevice() newTextureWithDescriptor:mtlDesc.Get()]);
-
-        if (mMtlTexture == nil) {
-            return DAWN_OUT_OF_MEMORY_ERROR("Failed to allocate texture.");
-        }
-
-        if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
-            DAWN_TRY(ClearTexture(device->GetPendingCommandContext(), GetAllSubresources(),
-                                  TextureBase::ClearValue::NonZero));
-        }
-
-        return {};
-    }
-
-    void Texture::InitializeAsWrapping(const TextureDescriptor* descriptor,
-                                       NSPRef<id<MTLTexture>> wrapped) {
-        NSRef<MTLTextureDescriptor> mtlDesc = CreateMetalTextureDescriptor();
-        mMtlUsage = [*mtlDesc usage];
-        mMtlTexture = std::move(wrapped);
-    }
-
-    MaybeError Texture::InitializeFromIOSurface(const ExternalImageDescriptor* descriptor,
-                                                const TextureDescriptor* textureDescriptor,
-                                                IOSurfaceRef ioSurface) {
-        mIOSurface = ioSurface;
-
-        // Uses WGPUTexture which wraps multiplanar ioSurface needs to create
-        // texture view explicitly. Wrap the ioSurface and delay to extract
-        // MTLTexture from the plane of it when creating texture view.
-        // WGPUTexture which wraps non-multplanar ioSurface needs to support
-        // ops that doesn't require creating texture view(e.g. copy). Extract
-        // MTLTexture from such ioSurface to support this.
-        if (!GetFormat().IsMultiPlanar()) {
-            Device* device = ToBackend(GetDevice());
-
-            NSRef<MTLTextureDescriptor> mtlDesc = CreateMetalTextureDescriptor();
-            [*mtlDesc setStorageMode:kIOSurfaceStorageMode];
-
-            mMtlUsage = [*mtlDesc usage];
-            mMtlTexture =
-                AcquireNSPRef([device->GetMTLDevice() newTextureWithDescriptor:mtlDesc.Get()
-                                                                     iosurface:ioSurface
-                                                                         plane:0]);
-        }
-        SetIsSubresourceContentInitialized(descriptor->isInitialized, GetAllSubresources());
-        return {};
-    }
-
-    Texture::~Texture() {
-    }
-
-    void Texture::DestroyImpl() {
-        TextureBase::DestroyImpl();
-        mMtlTexture = nullptr;
-        mIOSurface = nullptr;
-    }
-
-    id<MTLTexture> Texture::GetMTLTexture() const {
-        return mMtlTexture.Get();
-    }
-
-    IOSurfaceRef Texture::GetIOSurface() {
-        return mIOSurface.Get();
-    }
-
-    NSPRef<id<MTLTexture>> Texture::CreateFormatView(wgpu::TextureFormat format) {
-        if (GetFormat().format == format) {
-            return mMtlTexture;
-        }
-
-        ASSERT(AllowFormatReinterpretationWithoutFlag(MetalPixelFormat(GetFormat().format),
-                                                      MetalPixelFormat(format)));
-        return AcquireNSPRef(
-            [mMtlTexture.Get() newTextureViewWithPixelFormat:MetalPixelFormat(format)]);
-    }
-
-    MaybeError Texture::ClearTexture(CommandRecordingContext* commandContext,
-                                     const SubresourceRange& range,
-                                     TextureBase::ClearValue clearValue) {
-        Device* device = ToBackend(GetDevice());
-
-        const uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
-        const double dClearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0.0 : 1.0;
-
-        if ((mMtlUsage & MTLTextureUsageRenderTarget) != 0) {
-            ASSERT(GetFormat().isRenderable);
-
-            // End the blit encoder if it is open.
-            commandContext->EndBlit();
-
-            if (GetFormat().HasDepthOrStencil()) {
-                // Create a render pass to clear each subresource.
-                for (uint32_t level = range.baseMipLevel;
-                     level < range.baseMipLevel + range.levelCount; ++level) {
-                    for (uint32_t arrayLayer = range.baseArrayLayer;
-                         arrayLayer < range.baseArrayLayer + range.layerCount; arrayLayer++) {
-                        if (clearValue == TextureBase::ClearValue::Zero &&
-                            IsSubresourceContentInitialized(SubresourceRange::SingleMipAndLayer(
-                                level, arrayLayer, range.aspects))) {
-                            // Skip lazy clears if already initialized.
-                            continue;
-                        }
-
-                        // Note that this creates a descriptor that's autoreleased so we don't use
-                        // AcquireNSRef
-                        NSRef<MTLRenderPassDescriptor> descriptorRef =
-                            [MTLRenderPassDescriptor renderPassDescriptor];
-                        MTLRenderPassDescriptor* descriptor = descriptorRef.Get();
-
-                        // At least one aspect needs clearing. Iterate the aspects individually to
-                        // determine which to clear.
-                        for (Aspect aspect : IterateEnumMask(range.aspects)) {
-                            if (clearValue == TextureBase::ClearValue::Zero &&
-                                IsSubresourceContentInitialized(SubresourceRange::SingleMipAndLayer(
-                                    level, arrayLayer, aspect))) {
-                                // Skip lazy clears if already initialized.
-                                continue;
-                            }
-
-                            ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
-                            switch (aspect) {
-                                case Aspect::Depth:
-                                    descriptor.depthAttachment.texture = GetMTLTexture();
-                                    descriptor.depthAttachment.level = level;
-                                    descriptor.depthAttachment.slice = arrayLayer;
-                                    descriptor.depthAttachment.loadAction = MTLLoadActionClear;
-                                    descriptor.depthAttachment.storeAction = MTLStoreActionStore;
-                                    descriptor.depthAttachment.clearDepth = dClearColor;
-                                    break;
-                                case Aspect::Stencil:
-                                    descriptor.stencilAttachment.texture = GetMTLTexture();
-                                    descriptor.stencilAttachment.level = level;
-                                    descriptor.stencilAttachment.slice = arrayLayer;
-                                    descriptor.stencilAttachment.loadAction = MTLLoadActionClear;
-                                    descriptor.stencilAttachment.storeAction = MTLStoreActionStore;
-                                    descriptor.stencilAttachment.clearStencil =
-                                        static_cast<uint32_t>(clearColor);
-                                    break;
-                                default:
-                                    UNREACHABLE();
-                            }
-                        }
-
-                        DAWN_TRY(EncodeEmptyMetalRenderPass(device, commandContext, descriptor,
-                                                            GetMipLevelVirtualSize(level)));
-                    }
-                }
+        case wgpu::TextureFormat::ETC2RGB8Unorm:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatETC2_RGB8;
             } else {
-                ASSERT(GetFormat().IsColor());
-                for (uint32_t level = range.baseMipLevel;
-                     level < range.baseMipLevel + range.levelCount; ++level) {
-                    // Create multiple render passes with each subresource as a color attachment to
-                    // clear them all. Only do this for array layers to ensure all attachments have
-                    // the same size.
-                    NSRef<MTLRenderPassDescriptor> descriptor;
-                    uint32_t attachment = 0;
-
-                    uint32_t numZSlices = GetMipLevelVirtualSize(level).depthOrArrayLayers;
-
-                    for (uint32_t arrayLayer = range.baseArrayLayer;
-                         arrayLayer < range.baseArrayLayer + range.layerCount; arrayLayer++) {
-                        if (clearValue == TextureBase::ClearValue::Zero &&
-                            IsSubresourceContentInitialized(SubresourceRange::SingleMipAndLayer(
-                                level, arrayLayer, Aspect::Color))) {
-                            // Skip lazy clears if already initialized.
-                            continue;
-                        }
-
-                        for (uint32_t z = 0; z < numZSlices; ++z) {
-                            if (descriptor == nullptr) {
-                                // Note that this creates a descriptor that's autoreleased so we
-                                // don't use AcquireNSRef
-                                descriptor = [MTLRenderPassDescriptor renderPassDescriptor];
-                            }
-
-                            [*descriptor colorAttachments][attachment].texture = GetMTLTexture();
-                            [*descriptor colorAttachments][attachment].loadAction =
-                                MTLLoadActionClear;
-                            [*descriptor colorAttachments][attachment].storeAction =
-                                MTLStoreActionStore;
-                            [*descriptor colorAttachments][attachment].clearColor =
-                                MTLClearColorMake(dClearColor, dClearColor, dClearColor,
-                                                  dClearColor);
-                            [*descriptor colorAttachments][attachment].level = level;
-                            [*descriptor colorAttachments][attachment].slice = arrayLayer;
-                            [*descriptor colorAttachments][attachment].depthPlane = z;
-
-                            attachment++;
-
-                            if (attachment == kMaxColorAttachments) {
-                                attachment = 0;
-                                DAWN_TRY(EncodeEmptyMetalRenderPass(device, commandContext,
-                                                                    descriptor.Get(),
-                                                                    GetMipLevelVirtualSize(level)));
-                                descriptor = nullptr;
-                            }
-                        }
-                    }
-
-                    if (descriptor != nullptr) {
-                        DAWN_TRY(EncodeEmptyMetalRenderPass(device, commandContext,
-                                                            descriptor.Get(),
-                                                            GetMipLevelVirtualSize(level)));
-                    }
-                }
+                UNREACHABLE();
             }
-        } else {
-            Extent3D largestMipSize = GetMipLevelVirtualSize(range.baseMipLevel);
+        case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatETC2_RGB8_sRGB;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatETC2_RGB8A1;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatETC2_RGB8A1_sRGB;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ETC2RGBA8Unorm:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatEAC_RGBA8;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatEAC_RGBA8_sRGB;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::EACR11Unorm:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatEAC_R11Unorm;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::EACR11Snorm:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatEAC_R11Snorm;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::EACRG11Unorm:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatEAC_RG11Unorm;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::EACRG11Snorm:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatEAC_RG11Snorm;
+            } else {
+                UNREACHABLE();
+            }
 
-            // Encode a buffer to texture copy to clear each subresource.
-            for (Aspect aspect : IterateEnumMask(range.aspects)) {
-                // Compute the buffer size big enough to fill the largest mip.
-                const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(aspect).block;
+        case wgpu::TextureFormat::ASTC4x4Unorm:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatASTC_4x4_LDR;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatASTC_4x4_sRGB;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ASTC5x4Unorm:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatASTC_5x4_LDR;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatASTC_5x4_sRGB;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ASTC5x5Unorm:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatASTC_5x5_LDR;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatASTC_5x5_sRGB;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ASTC6x5Unorm:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatASTC_6x5_LDR;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatASTC_6x5_sRGB;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ASTC6x6Unorm:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatASTC_6x6_LDR;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatASTC_6x6_sRGB;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ASTC8x5Unorm:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatASTC_8x5_LDR;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatASTC_8x5_sRGB;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ASTC8x6Unorm:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatASTC_8x6_LDR;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatASTC_8x6_sRGB;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ASTC8x8Unorm:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatASTC_8x8_LDR;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatASTC_8x8_sRGB;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ASTC10x5Unorm:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatASTC_10x5_LDR;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatASTC_10x5_sRGB;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ASTC10x6Unorm:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatASTC_10x6_LDR;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatASTC_10x6_sRGB;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ASTC10x8Unorm:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatASTC_10x8_LDR;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatASTC_10x8_sRGB;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ASTC10x10Unorm:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatASTC_10x10_LDR;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatASTC_10x10_sRGB;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ASTC12x10Unorm:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatASTC_12x10_LDR;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatASTC_12x10_sRGB;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ASTC12x12Unorm:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatASTC_12x12_LDR;
+            } else {
+                UNREACHABLE();
+            }
+        case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+            if (@available(macOS 11.0, iOS 8.0, *)) {
+                return MTLPixelFormatASTC_12x12_sRGB;
+            } else {
+                UNREACHABLE();
+            }
 
-                // Metal validation layers: sourceBytesPerRow must be at least 64.
-                uint32_t largestMipBytesPerRow =
-                    std::max((largestMipSize.width / blockInfo.width) * blockInfo.byteSize, 64u);
+        case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+        case wgpu::TextureFormat::Undefined:
+            UNREACHABLE();
+    }
+}
 
-                // Metal validation layers: sourceBytesPerImage must be at least 512.
-                uint64_t largestMipBytesPerImage =
-                    std::max(static_cast<uint64_t>(largestMipBytesPerRow) *
-                                 (largestMipSize.height / blockInfo.height),
-                             512llu);
+MaybeError ValidateIOSurfaceCanBeWrapped(const DeviceBase*,
+                                         const TextureDescriptor* descriptor,
+                                         IOSurfaceRef ioSurface) {
+    DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
+                    "Texture dimension (%s) is not %s.", descriptor->dimension,
+                    wgpu::TextureDimension::e2D);
 
-                uint64_t bufferSize = largestMipBytesPerImage * largestMipSize.depthOrArrayLayers;
+    DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
+                    descriptor->mipLevelCount);
 
-                if (bufferSize > std::numeric_limits<NSUInteger>::max()) {
-                    return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
-                }
+    DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1, "Array layer count (%u) is not 1.",
+                    descriptor->size.depthOrArrayLayers);
 
-                DynamicUploader* uploader = device->GetDynamicUploader();
-                UploadHandle uploadHandle;
-                DAWN_TRY_ASSIGN(uploadHandle,
-                                uploader->Allocate(bufferSize, device->GetPendingCommandSerial(),
-                                                   blockInfo.byteSize));
-                memset(uploadHandle.mappedBuffer, clearColor, bufferSize);
+    DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
+                    descriptor->sampleCount);
 
-                id<MTLBuffer> uploadBuffer =
-                    ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle();
+    uint32_t surfaceWidth = IOSurfaceGetWidth(ioSurface);
+    uint32_t surfaceHeight = IOSurfaceGetHeight(ioSurface);
 
-                for (uint32_t level = range.baseMipLevel;
-                     level < range.baseMipLevel + range.levelCount; ++level) {
-                    Extent3D virtualSize = GetMipLevelVirtualSize(level);
+    DAWN_INVALID_IF(
+        descriptor->size.width != surfaceWidth || descriptor->size.height != surfaceHeight ||
+            descriptor->size.depthOrArrayLayers != 1,
+        "IOSurface size (width: %u, height %u, depth: 1) doesn't match descriptor size %s.",
+        surfaceWidth, surfaceHeight, &descriptor->size);
 
-                    for (uint32_t arrayLayer = range.baseArrayLayer;
-                         arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
+    wgpu::TextureFormat ioSurfaceFormat;
+    DAWN_TRY_ASSIGN(ioSurfaceFormat,
+                    GetFormatEquivalentToIOSurfaceFormat(IOSurfaceGetPixelFormat(ioSurface)));
+    DAWN_INVALID_IF(descriptor->format != ioSurfaceFormat,
+                    "IOSurface format (%s) doesn't match the descriptor format (%s).",
+                    ioSurfaceFormat, descriptor->format);
+
+    return {};
+}
+
+NSRef<MTLTextureDescriptor> Texture::CreateMetalTextureDescriptor() const {
+    NSRef<MTLTextureDescriptor> mtlDescRef = AcquireNSRef([MTLTextureDescriptor new]);
+    MTLTextureDescriptor* mtlDesc = mtlDescRef.Get();
+
+    mtlDesc.width = GetWidth();
+    mtlDesc.sampleCount = GetSampleCount();
+    // Metal only allows format reinterpretation to happen on swizzle pattern or conversion
+    // between linear space and sRGB. For example, creating bgra8Unorm texture view on
+    // rgba8Unorm texture or creating rgba8Unorm_srgb texture view on rgab8Unorm texture.
+    mtlDesc.usage = MetalTextureUsage(GetFormat(), GetInternalUsage(), GetSampleCount());
+    mtlDesc.pixelFormat = MetalPixelFormat(GetFormat().format);
+    mtlDesc.mipmapLevelCount = GetNumMipLevels();
+    mtlDesc.storageMode = MTLStorageModePrivate;
+
+    // Choose the correct MTLTextureType and paper over differences in how the array layer count
+    // is specified.
+    switch (GetDimension()) {
+        case wgpu::TextureDimension::e1D:
+            mtlDesc.arrayLength = 1;
+            mtlDesc.depth = 1;
+            ASSERT(mtlDesc.sampleCount == 1);
+            mtlDesc.textureType = MTLTextureType1D;
+            break;
+
+        case wgpu::TextureDimension::e2D:
+            mtlDesc.height = GetHeight();
+            mtlDesc.arrayLength = GetArrayLayers();
+            mtlDesc.depth = 1;
+            if (mtlDesc.arrayLength > 1) {
+                ASSERT(mtlDesc.sampleCount == 1);
+                mtlDesc.textureType = MTLTextureType2DArray;
+            } else if (mtlDesc.sampleCount > 1) {
+                mtlDesc.textureType = MTLTextureType2DMultisample;
+            } else {
+                mtlDesc.textureType = MTLTextureType2D;
+            }
+            break;
+        case wgpu::TextureDimension::e3D:
+            mtlDesc.height = GetHeight();
+            mtlDesc.depth = GetDepth();
+            mtlDesc.arrayLength = 1;
+            ASSERT(mtlDesc.sampleCount == 1);
+            mtlDesc.textureType = MTLTextureType3D;
+            break;
+    }
+
+    return mtlDescRef;
+}
+
+// static
+ResultOrError<Ref<Texture>> Texture::Create(Device* device, const TextureDescriptor* descriptor) {
+    Ref<Texture> texture = AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
+    DAWN_TRY(texture->InitializeAsInternalTexture(descriptor));
+    return texture;
+}
+
+// static
+ResultOrError<Ref<Texture>> Texture::CreateFromIOSurface(Device* device,
+                                                         const ExternalImageDescriptor* descriptor,
+                                                         IOSurfaceRef ioSurface) {
+    const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
+
+    Ref<Texture> texture =
+        AcquireRef(new Texture(device, textureDescriptor, TextureState::OwnedExternal));
+    DAWN_TRY(texture->InitializeFromIOSurface(descriptor, textureDescriptor, ioSurface));
+    return texture;
+}
+
+// static
+Ref<Texture> Texture::CreateWrapping(Device* device,
+                                     const TextureDescriptor* descriptor,
+                                     NSPRef<id<MTLTexture>> wrapped) {
+    Ref<Texture> texture = AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
+    texture->InitializeAsWrapping(descriptor, std::move(wrapped));
+    return texture;
+}
+
+MaybeError Texture::InitializeAsInternalTexture(const TextureDescriptor* descriptor) {
+    Device* device = ToBackend(GetDevice());
+
+    NSRef<MTLTextureDescriptor> mtlDesc = CreateMetalTextureDescriptor();
+    mMtlUsage = [*mtlDesc usage];
+    mMtlTexture = AcquireNSPRef([device->GetMTLDevice() newTextureWithDescriptor:mtlDesc.Get()]);
+
+    if (mMtlTexture == nil) {
+        return DAWN_OUT_OF_MEMORY_ERROR("Failed to allocate texture.");
+    }
+
+    if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
+        DAWN_TRY(ClearTexture(device->GetPendingCommandContext(), GetAllSubresources(),
+                              TextureBase::ClearValue::NonZero));
+    }
+
+    return {};
+}
+
+void Texture::InitializeAsWrapping(const TextureDescriptor* descriptor,
+                                   NSPRef<id<MTLTexture>> wrapped) {
+    NSRef<MTLTextureDescriptor> mtlDesc = CreateMetalTextureDescriptor();
+    mMtlUsage = [*mtlDesc usage];
+    mMtlTexture = std::move(wrapped);
+}
+
+MaybeError Texture::InitializeFromIOSurface(const ExternalImageDescriptor* descriptor,
+                                            const TextureDescriptor* textureDescriptor,
+                                            IOSurfaceRef ioSurface) {
+    mIOSurface = ioSurface;
+
+    // Uses WGPUTexture which wraps multiplanar ioSurface needs to create
+    // texture view explicitly. Wrap the ioSurface and delay to extract
+    // MTLTexture from the plane of it when creating texture view.
+    // WGPUTexture which wraps non-multplanar ioSurface needs to support
+    // ops that doesn't require creating texture view(e.g. copy). Extract
+    // MTLTexture from such ioSurface to support this.
+    if (!GetFormat().IsMultiPlanar()) {
+        Device* device = ToBackend(GetDevice());
+
+        NSRef<MTLTextureDescriptor> mtlDesc = CreateMetalTextureDescriptor();
+        [*mtlDesc setStorageMode:kIOSurfaceStorageMode];
+
+        mMtlUsage = [*mtlDesc usage];
+        mMtlTexture = AcquireNSPRef([device->GetMTLDevice() newTextureWithDescriptor:mtlDesc.Get()
+                                                                           iosurface:ioSurface
+                                                                               plane:0]);
+    }
+    SetIsSubresourceContentInitialized(descriptor->isInitialized, GetAllSubresources());
+    return {};
+}
+
+Texture::~Texture() {}
+
+void Texture::DestroyImpl() {
+    TextureBase::DestroyImpl();
+    mMtlTexture = nullptr;
+    mIOSurface = nullptr;
+}
+
+id<MTLTexture> Texture::GetMTLTexture() const {
+    return mMtlTexture.Get();
+}
+
+IOSurfaceRef Texture::GetIOSurface() {
+    return mIOSurface.Get();
+}
+
+NSPRef<id<MTLTexture>> Texture::CreateFormatView(wgpu::TextureFormat format) {
+    if (GetFormat().format == format) {
+        return mMtlTexture;
+    }
+
+    ASSERT(AllowFormatReinterpretationWithoutFlag(MetalPixelFormat(GetFormat().format),
+                                                  MetalPixelFormat(format)));
+    return AcquireNSPRef(
+        [mMtlTexture.Get() newTextureViewWithPixelFormat:MetalPixelFormat(format)]);
+}
+
+MaybeError Texture::ClearTexture(CommandRecordingContext* commandContext,
+                                 const SubresourceRange& range,
+                                 TextureBase::ClearValue clearValue) {
+    Device* device = ToBackend(GetDevice());
+
+    const uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
+    const double dClearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0.0 : 1.0;
+
+    if ((mMtlUsage & MTLTextureUsageRenderTarget) != 0) {
+        ASSERT(GetFormat().isRenderable);
+
+        // End the blit encoder if it is open.
+        commandContext->EndBlit();
+
+        if (GetFormat().HasDepthOrStencil()) {
+            // Create a render pass to clear each subresource.
+            for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+                 ++level) {
+                for (uint32_t arrayLayer = range.baseArrayLayer;
+                     arrayLayer < range.baseArrayLayer + range.layerCount; arrayLayer++) {
+                    if (clearValue == TextureBase::ClearValue::Zero &&
+                        IsSubresourceContentInitialized(SubresourceRange::SingleMipAndLayer(
+                            level, arrayLayer, range.aspects))) {
+                        // Skip lazy clears if already initialized.
+                        continue;
+                    }
+
+                    // Note that this creates a descriptor that's autoreleased so we don't use
+                    // AcquireNSRef
+                    NSRef<MTLRenderPassDescriptor> descriptorRef =
+                        [MTLRenderPassDescriptor renderPassDescriptor];
+                    MTLRenderPassDescriptor* descriptor = descriptorRef.Get();
+
+                    // At least one aspect needs clearing. Iterate the aspects individually to
+                    // determine which to clear.
+                    for (Aspect aspect : IterateEnumMask(range.aspects)) {
                         if (clearValue == TextureBase::ClearValue::Zero &&
                             IsSubresourceContentInitialized(
                                 SubresourceRange::SingleMipAndLayer(level, arrayLayer, aspect))) {
@@ -985,170 +842,296 @@
                             continue;
                         }
 
-                        MTLBlitOption blitOption = ComputeMTLBlitOption(GetFormat(), aspect);
-                        [commandContext->EnsureBlit()
-                                 copyFromBuffer:uploadBuffer
-                                   sourceOffset:uploadHandle.startOffset
-                              sourceBytesPerRow:largestMipBytesPerRow
-                            sourceBytesPerImage:largestMipBytesPerImage
-                                     sourceSize:MTLSizeMake(virtualSize.width, virtualSize.height,
-                                                            virtualSize.depthOrArrayLayers)
-                                      toTexture:GetMTLTexture()
-                               destinationSlice:arrayLayer
-                               destinationLevel:level
-                              destinationOrigin:MTLOriginMake(0, 0, 0)
-                                        options:blitOption];
+                        ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
+                        switch (aspect) {
+                            case Aspect::Depth:
+                                descriptor.depthAttachment.texture = GetMTLTexture();
+                                descriptor.depthAttachment.level = level;
+                                descriptor.depthAttachment.slice = arrayLayer;
+                                descriptor.depthAttachment.loadAction = MTLLoadActionClear;
+                                descriptor.depthAttachment.storeAction = MTLStoreActionStore;
+                                descriptor.depthAttachment.clearDepth = dClearColor;
+                                break;
+                            case Aspect::Stencil:
+                                descriptor.stencilAttachment.texture = GetMTLTexture();
+                                descriptor.stencilAttachment.level = level;
+                                descriptor.stencilAttachment.slice = arrayLayer;
+                                descriptor.stencilAttachment.loadAction = MTLLoadActionClear;
+                                descriptor.stencilAttachment.storeAction = MTLStoreActionStore;
+                                descriptor.stencilAttachment.clearStencil =
+                                    static_cast<uint32_t>(clearColor);
+                                break;
+                            default:
+                                UNREACHABLE();
+                        }
                     }
+
+                    DAWN_TRY(EncodeEmptyMetalRenderPass(device, commandContext, descriptor,
+                                                        GetMipLevelVirtualSize(level)));
                 }
             }
-        }
-
-        if (clearValue == TextureBase::ClearValue::Zero) {
-            SetIsSubresourceContentInitialized(true, range);
-            device->IncrementLazyClearCountForTesting();
-        }
-        return {};
-    }
-
-    void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
-                                                      const SubresourceRange& range) {
-        if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
-            return;
-        }
-        if (!IsSubresourceContentInitialized(range)) {
-            // If subresource has not been initialized, clear it to black as it could
-            // contain dirty bits from recycled memory
-            GetDevice()->ConsumedError(
-                ClearTexture(commandContext, range, TextureBase::ClearValue::Zero));
-        }
-    }
-
-    // static
-    ResultOrError<Ref<TextureView>> TextureView::Create(TextureBase* texture,
-                                                        const TextureViewDescriptor* descriptor) {
-        Ref<TextureView> view = AcquireRef(new TextureView(texture, descriptor));
-        DAWN_TRY(view->Initialize(descriptor));
-        return view;
-    }
-
-    MaybeError TextureView::Initialize(const TextureViewDescriptor* descriptor) {
-        Texture* texture = ToBackend(GetTexture());
-
-        // Texture could be destroyed by the time we make a view.
-        if (GetTexture()->GetTextureState() == Texture::TextureState::Destroyed) {
-            return {};
-        }
-
-        id<MTLTexture> mtlTexture = texture->GetMTLTexture();
-
-        if (!RequiresCreatingNewTextureView(texture, descriptor)) {
-            mMtlTextureView = mtlTexture;
-        } else if (texture->GetFormat().IsMultiPlanar()) {
-            NSRef<MTLTextureDescriptor> mtlDescRef = AcquireNSRef([MTLTextureDescriptor new]);
-            MTLTextureDescriptor* mtlDesc = mtlDescRef.Get();
-
-            mtlDesc.sampleCount = texture->GetSampleCount();
-            mtlDesc.usage = MetalTextureUsage(texture->GetFormat(), texture->GetInternalUsage(),
-                                              texture->GetSampleCount());
-            mtlDesc.pixelFormat = MetalPixelFormat(descriptor->format);
-            mtlDesc.mipmapLevelCount = texture->GetNumMipLevels();
-            mtlDesc.storageMode = kIOSurfaceStorageMode;
-
-            uint32_t plane = GetIOSurfacePlane(descriptor->aspect);
-            mtlDesc.width = IOSurfaceGetWidthOfPlane(texture->GetIOSurface(), plane);
-            mtlDesc.height = IOSurfaceGetHeightOfPlane(texture->GetIOSurface(), plane);
-
-            // Multiplanar texture is validated to only have single layer, single mipLevel
-            // and 2d textures (depth == 1)
-            ASSERT(texture->GetArrayLayers() == 1 &&
-                   texture->GetDimension() == wgpu::TextureDimension::e2D &&
-                   texture->GetNumMipLevels() == 1);
-            mtlDesc.arrayLength = 1;
-            mtlDesc.depth = 1;
-
-            mMtlTextureView = AcquireNSPRef([ToBackend(GetDevice())->GetMTLDevice()
-                newTextureWithDescriptor:mtlDesc
-                               iosurface:texture->GetIOSurface()
-                                   plane:plane]);
-            if (mMtlTextureView == nil) {
-                return DAWN_INTERNAL_ERROR(
-                    "Failed to create MTLTexture view for external texture.");
-            }
         } else {
-            MTLPixelFormat viewFormat = MetalPixelFormat(descriptor->format);
-            MTLPixelFormat textureFormat = MetalPixelFormat(GetTexture()->GetFormat().format);
-            if (descriptor->aspect == wgpu::TextureAspect::StencilOnly &&
-                textureFormat != MTLPixelFormatStencil8) {
-                if (@available(macOS 10.12, iOS 10.0, *)) {
-                    if (textureFormat == MTLPixelFormatDepth32Float_Stencil8) {
-                        viewFormat = MTLPixelFormatX32_Stencil8;
+            ASSERT(GetFormat().IsColor());
+            for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+                 ++level) {
+                // Create multiple render passes with each subresource as a color attachment to
+                // clear them all. Only do this for array layers to ensure all attachments have
+                // the same size.
+                NSRef<MTLRenderPassDescriptor> descriptor;
+                uint32_t attachment = 0;
+
+                uint32_t numZSlices = GetMipLevelVirtualSize(level).depthOrArrayLayers;
+
+                for (uint32_t arrayLayer = range.baseArrayLayer;
+                     arrayLayer < range.baseArrayLayer + range.layerCount; arrayLayer++) {
+                    if (clearValue == TextureBase::ClearValue::Zero &&
+                        IsSubresourceContentInitialized(SubresourceRange::SingleMipAndLayer(
+                            level, arrayLayer, Aspect::Color))) {
+                        // Skip lazy clears if already initialized.
+                        continue;
                     }
-#if defined(DAWN_PLATFORM_MACOS)
-                    else if (textureFormat == MTLPixelFormatDepth24Unorm_Stencil8) {
-                        viewFormat = MTLPixelFormatX24_Stencil8;
+
+                    for (uint32_t z = 0; z < numZSlices; ++z) {
+                        if (descriptor == nullptr) {
+                            // Note that this creates a descriptor that's autoreleased so we
+                            // don't use AcquireNSRef
+                            descriptor = [MTLRenderPassDescriptor renderPassDescriptor];
+                        }
+
+                        [*descriptor colorAttachments][attachment].texture = GetMTLTexture();
+                        [*descriptor colorAttachments][attachment].loadAction = MTLLoadActionClear;
+                        [*descriptor colorAttachments][attachment].storeAction =
+                            MTLStoreActionStore;
+                        [*descriptor colorAttachments][attachment].clearColor =
+                            MTLClearColorMake(dClearColor, dClearColor, dClearColor, dClearColor);
+                        [*descriptor colorAttachments][attachment].level = level;
+                        [*descriptor colorAttachments][attachment].slice = arrayLayer;
+                        [*descriptor colorAttachments][attachment].depthPlane = z;
+
+                        attachment++;
+
+                        if (attachment == kMaxColorAttachments) {
+                            attachment = 0;
+                            DAWN_TRY(EncodeEmptyMetalRenderPass(device, commandContext,
+                                                                descriptor.Get(),
+                                                                GetMipLevelVirtualSize(level)));
+                            descriptor = nullptr;
+                        }
                     }
-#endif
-                    else {
-                        UNREACHABLE();
-                    }
-                } else {
-                    // TODO(enga): Add a workaround to back combined depth/stencil textures
-                    // with Sampled usage using two separate textures.
-                    // Or, consider always using the workaround for D32S8.
-                    GetDevice()->ConsumedError(
-                        DAWN_DEVICE_LOST_ERROR("Cannot create stencil-only texture view of "
-                                               "combined depth/stencil format."));
                 }
-            } else if (GetTexture()->GetFormat().HasDepth() &&
-                       GetTexture()->GetFormat().HasStencil()) {
-                // Depth-only views for depth/stencil textures in Metal simply use the original
-                // texture's format.
-                viewFormat = textureFormat;
-            }
 
-            MTLTextureType textureViewType =
-                MetalTextureViewType(descriptor->dimension, texture->GetSampleCount());
-            auto mipLevelRange = NSMakeRange(descriptor->baseMipLevel, descriptor->mipLevelCount);
-            auto arrayLayerRange =
-                NSMakeRange(descriptor->baseArrayLayer, descriptor->arrayLayerCount);
-
-            mMtlTextureView =
-                AcquireNSPRef([mtlTexture newTextureViewWithPixelFormat:viewFormat
-                                                            textureType:textureViewType
-                                                                 levels:mipLevelRange
-                                                                 slices:arrayLayerRange]);
-            if (mMtlTextureView == nil) {
-                return DAWN_INTERNAL_ERROR("Failed to create MTLTexture view.");
+                if (descriptor != nullptr) {
+                    DAWN_TRY(EncodeEmptyMetalRenderPass(device, commandContext, descriptor.Get(),
+                                                        GetMipLevelVirtualSize(level)));
+                }
             }
         }
+    } else {
+        Extent3D largestMipSize = GetMipLevelVirtualSize(range.baseMipLevel);
 
+        // Encode a buffer to texture copy to clear each subresource.
+        for (Aspect aspect : IterateEnumMask(range.aspects)) {
+            // Compute the buffer size big enough to fill the largest mip.
+            const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(aspect).block;
+
+            // Metal validation layers: sourceBytesPerRow must be at least 64.
+            uint32_t largestMipBytesPerRow =
+                std::max((largestMipSize.width / blockInfo.width) * blockInfo.byteSize, 64u);
+
+            // Metal validation layers: sourceBytesPerImage must be at least 512.
+            uint64_t largestMipBytesPerImage =
+                std::max(static_cast<uint64_t>(largestMipBytesPerRow) *
+                             (largestMipSize.height / blockInfo.height),
+                         512llu);
+
+            uint64_t bufferSize = largestMipBytesPerImage * largestMipSize.depthOrArrayLayers;
+
+            if (bufferSize > std::numeric_limits<NSUInteger>::max()) {
+                return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
+            }
+
+            DynamicUploader* uploader = device->GetDynamicUploader();
+            UploadHandle uploadHandle;
+            DAWN_TRY_ASSIGN(uploadHandle,
+                            uploader->Allocate(bufferSize, device->GetPendingCommandSerial(),
+                                               blockInfo.byteSize));
+            memset(uploadHandle.mappedBuffer, clearColor, bufferSize);
+
+            id<MTLBuffer> uploadBuffer = ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle();
+
+            for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+                 ++level) {
+                Extent3D virtualSize = GetMipLevelVirtualSize(level);
+
+                for (uint32_t arrayLayer = range.baseArrayLayer;
+                     arrayLayer < range.baseArrayLayer + range.layerCount; ++arrayLayer) {
+                    if (clearValue == TextureBase::ClearValue::Zero &&
+                        IsSubresourceContentInitialized(
+                            SubresourceRange::SingleMipAndLayer(level, arrayLayer, aspect))) {
+                        // Skip lazy clears if already initialized.
+                        continue;
+                    }
+
+                    MTLBlitOption blitOption = ComputeMTLBlitOption(GetFormat(), aspect);
+                    [commandContext->EnsureBlit()
+                             copyFromBuffer:uploadBuffer
+                               sourceOffset:uploadHandle.startOffset
+                          sourceBytesPerRow:largestMipBytesPerRow
+                        sourceBytesPerImage:largestMipBytesPerImage
+                                 sourceSize:MTLSizeMake(virtualSize.width, virtualSize.height,
+                                                        virtualSize.depthOrArrayLayers)
+                                  toTexture:GetMTLTexture()
+                           destinationSlice:arrayLayer
+                           destinationLevel:level
+                          destinationOrigin:MTLOriginMake(0, 0, 0)
+                                    options:blitOption];
+                }
+            }
+        }
+    }
+
+    if (clearValue == TextureBase::ClearValue::Zero) {
+        SetIsSubresourceContentInitialized(true, range);
+        device->IncrementLazyClearCountForTesting();
+    }
+    return {};
+}
+
+void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* commandContext,
+                                                  const SubresourceRange& range) {
+    if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
+        return;
+    }
+    if (!IsSubresourceContentInitialized(range)) {
+        // If subresource has not been initialized, clear it to black as it could
+        // contain dirty bits from recycled memory
+        GetDevice()->ConsumedError(
+            ClearTexture(commandContext, range, TextureBase::ClearValue::Zero));
+    }
+}
+
+// static
+ResultOrError<Ref<TextureView>> TextureView::Create(TextureBase* texture,
+                                                    const TextureViewDescriptor* descriptor) {
+    Ref<TextureView> view = AcquireRef(new TextureView(texture, descriptor));
+    DAWN_TRY(view->Initialize(descriptor));
+    return view;
+}
+
+MaybeError TextureView::Initialize(const TextureViewDescriptor* descriptor) {
+    Texture* texture = ToBackend(GetTexture());
+
+    // Texture could be destroyed by the time we make a view.
+    if (GetTexture()->GetTextureState() == Texture::TextureState::Destroyed) {
         return {};
     }
 
-    id<MTLTexture> TextureView::GetMTLTexture() const {
-        ASSERT(mMtlTextureView != nullptr);
-        return mMtlTextureView.Get();
+    id<MTLTexture> mtlTexture = texture->GetMTLTexture();
+
+    if (!RequiresCreatingNewTextureView(texture, descriptor)) {
+        mMtlTextureView = mtlTexture;
+    } else if (texture->GetFormat().IsMultiPlanar()) {
+        NSRef<MTLTextureDescriptor> mtlDescRef = AcquireNSRef([MTLTextureDescriptor new]);
+        MTLTextureDescriptor* mtlDesc = mtlDescRef.Get();
+
+        mtlDesc.sampleCount = texture->GetSampleCount();
+        mtlDesc.usage = MetalTextureUsage(texture->GetFormat(), texture->GetInternalUsage(),
+                                          texture->GetSampleCount());
+        mtlDesc.pixelFormat = MetalPixelFormat(descriptor->format);
+        mtlDesc.mipmapLevelCount = texture->GetNumMipLevels();
+        mtlDesc.storageMode = kIOSurfaceStorageMode;
+
+        uint32_t plane = GetIOSurfacePlane(descriptor->aspect);
+        mtlDesc.width = IOSurfaceGetWidthOfPlane(texture->GetIOSurface(), plane);
+        mtlDesc.height = IOSurfaceGetHeightOfPlane(texture->GetIOSurface(), plane);
+
+        // Multiplanar texture is validated to only have single layer, single mipLevel
+        // and 2d textures (depth == 1)
+        ASSERT(texture->GetArrayLayers() == 1 &&
+               texture->GetDimension() == wgpu::TextureDimension::e2D &&
+               texture->GetNumMipLevels() == 1);
+        mtlDesc.arrayLength = 1;
+        mtlDesc.depth = 1;
+
+        mMtlTextureView = AcquireNSPRef([ToBackend(GetDevice())->GetMTLDevice()
+            newTextureWithDescriptor:mtlDesc
+                           iosurface:texture->GetIOSurface()
+                               plane:plane]);
+        if (mMtlTextureView == nil) {
+            return DAWN_INTERNAL_ERROR("Failed to create MTLTexture view for external texture.");
+        }
+    } else {
+        MTLPixelFormat viewFormat = MetalPixelFormat(descriptor->format);
+        MTLPixelFormat textureFormat = MetalPixelFormat(GetTexture()->GetFormat().format);
+        if (descriptor->aspect == wgpu::TextureAspect::StencilOnly &&
+            textureFormat != MTLPixelFormatStencil8) {
+            if (@available(macOS 10.12, iOS 10.0, *)) {
+                if (textureFormat == MTLPixelFormatDepth32Float_Stencil8) {
+                    viewFormat = MTLPixelFormatX32_Stencil8;
+                }
+#if defined(DAWN_PLATFORM_MACOS)
+                else if (textureFormat == MTLPixelFormatDepth24Unorm_Stencil8) {
+                    viewFormat = MTLPixelFormatX24_Stencil8;
+                }
+#endif
+                else {
+                    UNREACHABLE();
+                }
+            } else {
+                // TODO(enga): Add a workaround to back combined depth/stencil textures
+                // with Sampled usage using two separate textures.
+                // Or, consider always using the workaround for D32S8.
+                GetDevice()->ConsumedError(
+                    DAWN_DEVICE_LOST_ERROR("Cannot create stencil-only texture view of "
+                                           "combined depth/stencil format."));
+            }
+        } else if (GetTexture()->GetFormat().HasDepth() && GetTexture()->GetFormat().HasStencil()) {
+            // Depth-only views for depth/stencil textures in Metal simply use the original
+            // texture's format.
+            viewFormat = textureFormat;
+        }
+
+        MTLTextureType textureViewType =
+            MetalTextureViewType(descriptor->dimension, texture->GetSampleCount());
+        auto mipLevelRange = NSMakeRange(descriptor->baseMipLevel, descriptor->mipLevelCount);
+        auto arrayLayerRange = NSMakeRange(descriptor->baseArrayLayer, descriptor->arrayLayerCount);
+
+        mMtlTextureView = AcquireNSPRef([mtlTexture newTextureViewWithPixelFormat:viewFormat
+                                                                      textureType:textureViewType
+                                                                           levels:mipLevelRange
+                                                                           slices:arrayLayerRange]);
+        if (mMtlTextureView == nil) {
+            return DAWN_INTERNAL_ERROR("Failed to create MTLTexture view.");
+        }
     }
 
-    TextureView::AttachmentInfo TextureView::GetAttachmentInfo() const {
-        ASSERT(GetTexture()->GetInternalUsage() & wgpu::TextureUsage::RenderAttachment);
-        // Use our own view if the formats do not match.
-        // If the formats do not match, format reinterpretation will be required.
-        // Note: Depth/stencil formats don't support reinterpretation.
-        // Also, we compute |useOwnView| here instead of relying on whether or not
-        // a view was created in Initialize, because rendering to a depth/stencil
-        // texture on Metal only works when using the original texture, not a view.
-        bool useOwnView = GetFormat().format != GetTexture()->GetFormat().format &&
-                          !GetTexture()->GetFormat().HasDepthOrStencil();
-        if (useOwnView) {
-            ASSERT(mMtlTextureView.Get());
-            return {mMtlTextureView, 0, 0};
-        }
-        AttachmentInfo info;
-        info.texture = ToBackend(GetTexture())->GetMTLTexture();
-        info.baseMipLevel = GetBaseMipLevel();
-        info.baseArrayLayer = GetBaseArrayLayer();
-        return info;
+    return {};
+}
+
+id<MTLTexture> TextureView::GetMTLTexture() const {
+    ASSERT(mMtlTextureView != nullptr);
+    return mMtlTextureView.Get();
+}
+
+TextureView::AttachmentInfo TextureView::GetAttachmentInfo() const {
+    ASSERT(GetTexture()->GetInternalUsage() & wgpu::TextureUsage::RenderAttachment);
+    // Use our own view if the formats do not match.
+    // If the formats do not match, format reinterpretation will be required.
+    // Note: Depth/stencil formats don't support reinterpretation.
+    // Also, we compute |useOwnView| here instead of relying on whether or not
+    // a view was created in Initialize, because rendering to a depth/stencil
+    // texture on Metal only works when using the original texture, not a view.
+    bool useOwnView = GetFormat().format != GetTexture()->GetFormat().format &&
+                      !GetTexture()->GetFormat().HasDepthOrStencil();
+    if (useOwnView) {
+        ASSERT(mMtlTextureView.Get());
+        return {mMtlTextureView, 0, 0};
     }
+    AttachmentInfo info;
+    info.texture = ToBackend(GetTexture())->GetMTLTexture();
+    info.baseMipLevel = GetBaseMipLevel();
+    info.baseArrayLayer = GetBaseArrayLayer();
+    return info;
+}
 
 }  // namespace dawn::native::metal
diff --git a/src/dawn/native/metal/UtilsMetal.h b/src/dawn/native/metal/UtilsMetal.h
index 438d848..9ee31bd 100644
--- a/src/dawn/native/metal/UtilsMetal.h
+++ b/src/dawn/native/metal/UtilsMetal.h
@@ -23,89 +23,85 @@
 #import <Metal/Metal.h>
 
 namespace dawn::native {
-    struct ProgrammableStage;
-    struct EntryPointMetadata;
-    enum class SingleShaderStage;
-}
+struct ProgrammableStage;
+struct EntryPointMetadata;
+enum class SingleShaderStage;
+}  // namespace dawn::native
 
 namespace dawn::native::metal {
 
-    MTLCompareFunction ToMetalCompareFunction(wgpu::CompareFunction compareFunction);
+MTLCompareFunction ToMetalCompareFunction(wgpu::CompareFunction compareFunction);
 
-    struct TextureBufferCopySplit {
-        static constexpr uint32_t kMaxTextureBufferCopyRegions = 3;
+struct TextureBufferCopySplit {
+    static constexpr uint32_t kMaxTextureBufferCopyRegions = 3;
 
-        struct CopyInfo {
-            NSUInteger bufferOffset;
-            NSUInteger bytesPerRow;
-            NSUInteger bytesPerImage;
-            Origin3D textureOrigin;
-            Extent3D copyExtent;
-        };
-
-        uint32_t count = 0;
-        std::array<CopyInfo, kMaxTextureBufferCopyRegions> copies;
-
-        auto begin() const {
-            return copies.begin();
-        }
-
-        auto end() const {
-            return copies.begin() + count;
-        }
+    struct CopyInfo {
+        NSUInteger bufferOffset;
+        NSUInteger bytesPerRow;
+        NSUInteger bytesPerImage;
+        Origin3D textureOrigin;
+        Extent3D copyExtent;
     };
 
-    TextureBufferCopySplit ComputeTextureBufferCopySplit(const Texture* texture,
-                                                         uint32_t mipLevel,
-                                                         Origin3D origin,
-                                                         Extent3D copyExtent,
-                                                         uint64_t bufferSize,
-                                                         uint64_t bufferOffset,
-                                                         uint32_t bytesPerRow,
-                                                         uint32_t rowsPerImage,
-                                                         Aspect aspect);
+    uint32_t count = 0;
+    std::array<CopyInfo, kMaxTextureBufferCopyRegions> copies;
 
-    void EnsureDestinationTextureInitialized(CommandRecordingContext* commandContext,
-                                             Texture* texture,
-                                             const TextureCopy& dst,
-                                             const Extent3D& size);
+    auto begin() const { return copies.begin(); }
 
-    MTLBlitOption ComputeMTLBlitOption(const Format& format, Aspect aspect);
+    auto end() const { return copies.begin() + count; }
+};
 
-    // Helper function to create function with constant values wrapped in
-    // if available branch
-    MaybeError CreateMTLFunction(const ProgrammableStage& programmableStage,
-                                 SingleShaderStage singleShaderStage,
-                                 PipelineLayout* pipelineLayout,
-                                 ShaderModule::MetalFunctionData* functionData,
-                                 uint32_t sampleMask = 0xFFFFFFFF,
-                                 const RenderPipeline* renderPipeline = nullptr);
+TextureBufferCopySplit ComputeTextureBufferCopySplit(const Texture* texture,
+                                                     uint32_t mipLevel,
+                                                     Origin3D origin,
+                                                     Extent3D copyExtent,
+                                                     uint64_t bufferSize,
+                                                     uint64_t bufferOffset,
+                                                     uint32_t bytesPerRow,
+                                                     uint32_t rowsPerImage,
+                                                     Aspect aspect);
 
-    // Allow use MTLStoreActionStoreAndMultismapleResolve because the logic in the backend is
-    // first to compute what the "best" Metal render pass descriptor is, then fix it up if we
-    // are not on macOS 10.12 (i.e. the EmulateStoreAndMSAAResolve toggle is on).
+void EnsureDestinationTextureInitialized(CommandRecordingContext* commandContext,
+                                         Texture* texture,
+                                         const TextureCopy& dst,
+                                         const Extent3D& size);
+
+MTLBlitOption ComputeMTLBlitOption(const Format& format, Aspect aspect);
+
+// Helper function to create function with constant values wrapped in
+// if available branch
+MaybeError CreateMTLFunction(const ProgrammableStage& programmableStage,
+                             SingleShaderStage singleShaderStage,
+                             PipelineLayout* pipelineLayout,
+                             ShaderModule::MetalFunctionData* functionData,
+                             uint32_t sampleMask = 0xFFFFFFFF,
+                             const RenderPipeline* renderPipeline = nullptr);
+
+// Allow use MTLStoreActionStoreAndMultismapleResolve because the logic in the backend is
+// first to compute what the "best" Metal render pass descriptor is, then fix it up if we
+// are not on macOS 10.12 (i.e. the EmulateStoreAndMSAAResolve toggle is on).
 #pragma clang diagnostic push
 #pragma clang diagnostic ignored "-Wunguarded-availability"
-    constexpr MTLStoreAction kMTLStoreActionStoreAndMultisampleResolve =
-        MTLStoreActionStoreAndMultisampleResolve;
+constexpr MTLStoreAction kMTLStoreActionStoreAndMultisampleResolve =
+    MTLStoreActionStoreAndMultisampleResolve;
 #pragma clang diagnostic pop
 
-    // Helper functions to encode Metal render passes that take care of multiple workarounds that
-    // happen at the render pass start and end. Because workarounds wrap the encoding of the render
-    // pass, the encoding must be entirely done by the `encodeInside` callback.
-    // At the end of this function, `commandContext` will have no encoder open.
-    using EncodeInsideRenderPass = std::function<MaybeError(id<MTLRenderCommandEncoder>)>;
-    MaybeError EncodeMetalRenderPass(Device* device,
-                                     CommandRecordingContext* commandContext,
-                                     MTLRenderPassDescriptor* mtlRenderPass,
-                                     uint32_t width,
-                                     uint32_t height,
-                                     EncodeInsideRenderPass encodeInside);
+// Helper functions to encode Metal render passes that take care of multiple workarounds that
+// happen at the render pass start and end. Because workarounds wrap the encoding of the render
+// pass, the encoding must be entirely done by the `encodeInside` callback.
+// At the end of this function, `commandContext` will have no encoder open.
+using EncodeInsideRenderPass = std::function<MaybeError(id<MTLRenderCommandEncoder>)>;
+MaybeError EncodeMetalRenderPass(Device* device,
+                                 CommandRecordingContext* commandContext,
+                                 MTLRenderPassDescriptor* mtlRenderPass,
+                                 uint32_t width,
+                                 uint32_t height,
+                                 EncodeInsideRenderPass encodeInside);
 
-    MaybeError EncodeEmptyMetalRenderPass(Device* device,
-                                          CommandRecordingContext* commandContext,
-                                          MTLRenderPassDescriptor* mtlRenderPass,
-                                          Extent3D size);
+MaybeError EncodeEmptyMetalRenderPass(Device* device,
+                                      CommandRecordingContext* commandContext,
+                                      MTLRenderPassDescriptor* mtlRenderPass,
+                                      Extent3D size);
 
 }  // namespace dawn::native::metal
 
diff --git a/src/dawn/native/metal/UtilsMetal.mm b/src/dawn/native/metal/UtilsMetal.mm
index 5c35d35..339b8872 100644
--- a/src/dawn/native/metal/UtilsMetal.mm
+++ b/src/dawn/native/metal/UtilsMetal.mm
@@ -21,553 +21,542 @@
 
 namespace dawn::native::metal {
 
-    namespace {
-        // A helper struct to track state while doing workarounds for Metal render passes. It
-        // contains a temporary texture and information about the attachment it replaces.
-        // Helper methods encode copies between the two textures.
-        struct SavedMetalAttachment {
-            id<MTLTexture> texture = nil;
-            NSUInteger level;
-            NSUInteger slice;
+namespace {
+// A helper struct to track state while doing workarounds for Metal render passes. It
+// contains a temporary texture and information about the attachment it replaces.
+// Helper methods encode copies between the two textures.
+struct SavedMetalAttachment {
+    id<MTLTexture> texture = nil;
+    NSUInteger level;
+    NSUInteger slice;
 
-            NSPRef<id<MTLTexture>> temporary;
+    NSPRef<id<MTLTexture>> temporary;
 
-            void CopyFromTemporaryToAttachment(CommandRecordingContext* commandContext) {
-                [commandContext->EnsureBlit()
-                      copyFromTexture:temporary.Get()
-                          sourceSlice:0
-                          sourceLevel:0
-                         sourceOrigin:MTLOriginMake(0, 0, 0)
-                           sourceSize:MTLSizeMake([temporary.Get() width], [temporary.Get() height],
-                                                  1)
-                            toTexture:texture
-                     destinationSlice:slice
-                     destinationLevel:level
-                    destinationOrigin:MTLOriginMake(0, 0, 0)];
-            }
-
-            void CopyFromAttachmentToTemporary(CommandRecordingContext* commandContext) {
-                [commandContext->EnsureBlit()
-                      copyFromTexture:texture
-                          sourceSlice:slice
-                          sourceLevel:level
-                         sourceOrigin:MTLOriginMake(0, 0, 0)
-                           sourceSize:MTLSizeMake([temporary.Get() width], [temporary.Get() height],
-                                                  1)
-                            toTexture:temporary.Get()
-                     destinationSlice:0
-                     destinationLevel:0
-                    destinationOrigin:MTLOriginMake(0, 0, 0)];
-            }
-        };
-
-        // Common code between both kinds of attachments swaps.
-        ResultOrError<SavedMetalAttachment> SaveAttachmentCreateTemporary(
-            Device* device,
-            id<MTLTexture> attachmentTexture,
-            NSUInteger attachmentLevel,
-            NSUInteger attachmentSlice) {
-            // Save the attachment.
-            SavedMetalAttachment result;
-            result.texture = attachmentTexture;
-            result.level = attachmentLevel;
-            result.slice = attachmentSlice;
-
-            // Create the temporary texture.
-            NSRef<MTLTextureDescriptor> mtlDescRef = AcquireNSRef([MTLTextureDescriptor new]);
-            MTLTextureDescriptor* mtlDesc = mtlDescRef.Get();
-
-            mtlDesc.textureType = MTLTextureType2D;
-            mtlDesc.usage = MTLTextureUsageRenderTarget;
-            mtlDesc.pixelFormat = [result.texture pixelFormat];
-            mtlDesc.width = std::max([result.texture width] >> attachmentLevel, NSUInteger(1));
-            mtlDesc.height = std::max([result.texture height] >> attachmentLevel, NSUInteger(1));
-            mtlDesc.depth = 1;
-            mtlDesc.mipmapLevelCount = 1;
-            mtlDesc.arrayLength = 1;
-            mtlDesc.storageMode = MTLStorageModePrivate;
-            mtlDesc.sampleCount = [result.texture sampleCount];
-
-            result.temporary =
-                AcquireNSPRef([device->GetMTLDevice() newTextureWithDescriptor:mtlDesc]);
-            if (result.temporary == nil) {
-                return DAWN_OUT_OF_MEMORY_ERROR("Allocation of temporary texture failed.");
-            }
-
-            return result;
-        }
-
-        // Patches the render pass attachment to replace it with a temporary texture. Returns a
-        // SavedMetalAttachment that can be used to easily copy between the original attachment and
-        // the temporary.
-        ResultOrError<SavedMetalAttachment> PatchAttachmentWithTemporary(
-            Device* device,
-            MTLRenderPassAttachmentDescriptor* attachment) {
-            SavedMetalAttachment result;
-            DAWN_TRY_ASSIGN(
-                result, SaveAttachmentCreateTemporary(device, attachment.texture, attachment.level,
-                                                      attachment.slice));
-
-            // Replace the attachment with the temporary
-            attachment.texture = result.temporary.Get();
-            attachment.level = 0;
-            attachment.slice = 0;
-
-            return result;
-        }
-
-        // Same as PatchAttachmentWithTemporary but for the resolve attachment.
-        ResultOrError<SavedMetalAttachment> PatchResolveAttachmentWithTemporary(
-            Device* device,
-            MTLRenderPassAttachmentDescriptor* attachment) {
-            SavedMetalAttachment result;
-            DAWN_TRY_ASSIGN(result, SaveAttachmentCreateTemporary(device, attachment.resolveTexture,
-                                                                  attachment.resolveLevel,
-                                                                  attachment.resolveSlice));
-
-            // Replace the resolve attachment with the tempoary.
-            attachment.resolveTexture = result.temporary.Get();
-            attachment.resolveLevel = 0;
-            attachment.resolveSlice = 0;
-
-            return result;
-        }
-
-        // Helper function for Toggle EmulateStoreAndMSAAResolve
-        void ResolveInAnotherRenderPass(
-            CommandRecordingContext* commandContext,
-            const MTLRenderPassDescriptor* mtlRenderPass,
-            const std::array<id<MTLTexture>, kMaxColorAttachments>& resolveTextures) {
-            // Note that this creates a descriptor that's autoreleased so we don't use AcquireNSRef
-            NSRef<MTLRenderPassDescriptor> mtlRenderPassForResolveRef =
-                [MTLRenderPassDescriptor renderPassDescriptor];
-            MTLRenderPassDescriptor* mtlRenderPassForResolve = mtlRenderPassForResolveRef.Get();
-
-            for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
-                if (resolveTextures[i] == nullptr) {
-                    continue;
-                }
-
-                mtlRenderPassForResolve.colorAttachments[i].texture =
-                    mtlRenderPass.colorAttachments[i].texture;
-                mtlRenderPassForResolve.colorAttachments[i].loadAction = MTLLoadActionLoad;
-                mtlRenderPassForResolve.colorAttachments[i].storeAction =
-                    MTLStoreActionMultisampleResolve;
-                mtlRenderPassForResolve.colorAttachments[i].resolveTexture = resolveTextures[i];
-                mtlRenderPassForResolve.colorAttachments[i].resolveLevel =
-                    mtlRenderPass.colorAttachments[i].resolveLevel;
-                mtlRenderPassForResolve.colorAttachments[i].resolveSlice =
-                    mtlRenderPass.colorAttachments[i].resolveSlice;
-            }
-
-            commandContext->BeginRender(mtlRenderPassForResolve);
-            commandContext->EndRender();
-        }
-    }  // anonymous namespace
-
-    MTLCompareFunction ToMetalCompareFunction(wgpu::CompareFunction compareFunction) {
-        switch (compareFunction) {
-            case wgpu::CompareFunction::Never:
-                return MTLCompareFunctionNever;
-            case wgpu::CompareFunction::Less:
-                return MTLCompareFunctionLess;
-            case wgpu::CompareFunction::LessEqual:
-                return MTLCompareFunctionLessEqual;
-            case wgpu::CompareFunction::Greater:
-                return MTLCompareFunctionGreater;
-            case wgpu::CompareFunction::GreaterEqual:
-                return MTLCompareFunctionGreaterEqual;
-            case wgpu::CompareFunction::NotEqual:
-                return MTLCompareFunctionNotEqual;
-            case wgpu::CompareFunction::Equal:
-                return MTLCompareFunctionEqual;
-            case wgpu::CompareFunction::Always:
-                return MTLCompareFunctionAlways;
-
-            case wgpu::CompareFunction::Undefined:
-                UNREACHABLE();
-        }
+    void CopyFromTemporaryToAttachment(CommandRecordingContext* commandContext) {
+        [commandContext->EnsureBlit()
+              copyFromTexture:temporary.Get()
+                  sourceSlice:0
+                  sourceLevel:0
+                 sourceOrigin:MTLOriginMake(0, 0, 0)
+                   sourceSize:MTLSizeMake([temporary.Get() width], [temporary.Get() height], 1)
+                    toTexture:texture
+             destinationSlice:slice
+             destinationLevel:level
+            destinationOrigin:MTLOriginMake(0, 0, 0)];
     }
 
-    TextureBufferCopySplit ComputeTextureBufferCopySplit(const Texture* texture,
-                                                         uint32_t mipLevel,
-                                                         Origin3D origin,
-                                                         Extent3D copyExtent,
-                                                         uint64_t bufferSize,
-                                                         uint64_t bufferOffset,
-                                                         uint32_t bytesPerRow,
-                                                         uint32_t rowsPerImage,
-                                                         Aspect aspect) {
-        TextureBufferCopySplit copy;
-        const Format textureFormat = texture->GetFormat();
-        const TexelBlockInfo& blockInfo = textureFormat.GetAspectInfo(aspect).block;
+    void CopyFromAttachmentToTemporary(CommandRecordingContext* commandContext) {
+        [commandContext->EnsureBlit()
+              copyFromTexture:texture
+                  sourceSlice:slice
+                  sourceLevel:level
+                 sourceOrigin:MTLOriginMake(0, 0, 0)
+                   sourceSize:MTLSizeMake([temporary.Get() width], [temporary.Get() height], 1)
+                    toTexture:temporary.Get()
+             destinationSlice:0
+             destinationLevel:0
+            destinationOrigin:MTLOriginMake(0, 0, 0)];
+    }
+};
 
-        // When copying textures from/to an unpacked buffer, the Metal validation layer doesn't
-        // compute the correct range when checking if the buffer is big enough to contain the
-        // data for the whole copy. Instead of looking at the position of the last texel in the
-        // buffer, it computes the volume of the 3D box with bytesPerRow * (rowsPerImage /
-        // format.blockHeight) * copySize.depthOrArrayLayers. For example considering the pixel
-        // buffer below where in memory, each row data (D) of the texture is followed by some
-        // padding data (P):
-        //     |DDDDDDD|PP|
-        //     |DDDDDDD|PP|
-        //     |DDDDDDD|PP|
-        //     |DDDDDDD|PP|
-        //     |DDDDDDA|PP|
-        // The last pixel read will be A, but the driver will think it is the whole last padding
-        // row, causing it to generate an error when the pixel buffer is just big enough.
+// Common code between both kinds of attachments swaps.
+ResultOrError<SavedMetalAttachment> SaveAttachmentCreateTemporary(Device* device,
+                                                                  id<MTLTexture> attachmentTexture,
+                                                                  NSUInteger attachmentLevel,
+                                                                  NSUInteger attachmentSlice) {
+    // Save the attachment.
+    SavedMetalAttachment result;
+    result.texture = attachmentTexture;
+    result.level = attachmentLevel;
+    result.slice = attachmentSlice;
 
-        // We work around this limitation by detecting when Metal would complain and copy the
-        // last image and row separately using tight sourceBytesPerRow or sourceBytesPerImage.
-        uint32_t bytesPerImage = bytesPerRow * rowsPerImage;
+    // Create the temporary texture.
+    NSRef<MTLTextureDescriptor> mtlDescRef = AcquireNSRef([MTLTextureDescriptor new]);
+    MTLTextureDescriptor* mtlDesc = mtlDescRef.Get();
 
-        // Metal validation layer requires that if the texture's pixel format is a compressed
-        // format, the sourceSize must be a multiple of the pixel format's block size or be
-        // clamped to the edge of the texture if the block extends outside the bounds of a
-        // texture.
-        const Extent3D clampedCopyExtent =
-            texture->ClampToMipLevelVirtualSize(mipLevel, origin, copyExtent);
+    mtlDesc.textureType = MTLTextureType2D;
+    mtlDesc.usage = MTLTextureUsageRenderTarget;
+    mtlDesc.pixelFormat = [result.texture pixelFormat];
+    mtlDesc.width = std::max([result.texture width] >> attachmentLevel, NSUInteger(1));
+    mtlDesc.height = std::max([result.texture height] >> attachmentLevel, NSUInteger(1));
+    mtlDesc.depth = 1;
+    mtlDesc.mipmapLevelCount = 1;
+    mtlDesc.arrayLength = 1;
+    mtlDesc.storageMode = MTLStorageModePrivate;
+    mtlDesc.sampleCount = [result.texture sampleCount];
 
-        // Check whether buffer size is big enough.
-        bool needWorkaround =
-            bufferSize - bufferOffset < bytesPerImage * copyExtent.depthOrArrayLayers;
-        if (!needWorkaround) {
-            copy.count = 1;
-            copy.copies[0].bufferOffset = bufferOffset;
-            copy.copies[0].bytesPerRow = bytesPerRow;
-            copy.copies[0].bytesPerImage = bytesPerImage;
-            copy.copies[0].textureOrigin = origin;
-            copy.copies[0].copyExtent = {clampedCopyExtent.width, clampedCopyExtent.height,
-                                         copyExtent.depthOrArrayLayers};
-            return copy;
+    result.temporary = AcquireNSPRef([device->GetMTLDevice() newTextureWithDescriptor:mtlDesc]);
+    if (result.temporary == nil) {
+        return DAWN_OUT_OF_MEMORY_ERROR("Allocation of temporary texture failed.");
+    }
+
+    return result;
+}
+
+// Patches the render pass attachment to replace it with a temporary texture. Returns a
+// SavedMetalAttachment that can be used to easily copy between the original attachment and
+// the temporary.
+ResultOrError<SavedMetalAttachment> PatchAttachmentWithTemporary(
+    Device* device,
+    MTLRenderPassAttachmentDescriptor* attachment) {
+    SavedMetalAttachment result;
+    DAWN_TRY_ASSIGN(result, SaveAttachmentCreateTemporary(device, attachment.texture,
+                                                          attachment.level, attachment.slice));
+
+    // Replace the attachment with the temporary
+    attachment.texture = result.temporary.Get();
+    attachment.level = 0;
+    attachment.slice = 0;
+
+    return result;
+}
+
+// Same as PatchAttachmentWithTemporary but for the resolve attachment.
+ResultOrError<SavedMetalAttachment> PatchResolveAttachmentWithTemporary(
+    Device* device,
+    MTLRenderPassAttachmentDescriptor* attachment) {
+    SavedMetalAttachment result;
+    DAWN_TRY_ASSIGN(
+        result, SaveAttachmentCreateTemporary(device, attachment.resolveTexture,
+                                              attachment.resolveLevel, attachment.resolveSlice));
+
+    // Replace the resolve attachment with the tempoary.
+    attachment.resolveTexture = result.temporary.Get();
+    attachment.resolveLevel = 0;
+    attachment.resolveSlice = 0;
+
+    return result;
+}
+
+// Helper function for Toggle EmulateStoreAndMSAAResolve
+void ResolveInAnotherRenderPass(
+    CommandRecordingContext* commandContext,
+    const MTLRenderPassDescriptor* mtlRenderPass,
+    const std::array<id<MTLTexture>, kMaxColorAttachments>& resolveTextures) {
+    // Note that this creates a descriptor that's autoreleased so we don't use AcquireNSRef
+    NSRef<MTLRenderPassDescriptor> mtlRenderPassForResolveRef =
+        [MTLRenderPassDescriptor renderPassDescriptor];
+    MTLRenderPassDescriptor* mtlRenderPassForResolve = mtlRenderPassForResolveRef.Get();
+
+    for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
+        if (resolveTextures[i] == nullptr) {
+            continue;
         }
 
-        uint64_t currentOffset = bufferOffset;
+        mtlRenderPassForResolve.colorAttachments[i].texture =
+            mtlRenderPass.colorAttachments[i].texture;
+        mtlRenderPassForResolve.colorAttachments[i].loadAction = MTLLoadActionLoad;
+        mtlRenderPassForResolve.colorAttachments[i].storeAction = MTLStoreActionMultisampleResolve;
+        mtlRenderPassForResolve.colorAttachments[i].resolveTexture = resolveTextures[i];
+        mtlRenderPassForResolve.colorAttachments[i].resolveLevel =
+            mtlRenderPass.colorAttachments[i].resolveLevel;
+        mtlRenderPassForResolve.colorAttachments[i].resolveSlice =
+            mtlRenderPass.colorAttachments[i].resolveSlice;
+    }
 
-        // Doing all the copy except the last image.
-        if (copyExtent.depthOrArrayLayers > 1) {
-            copy.copies[copy.count].bufferOffset = currentOffset;
-            copy.copies[copy.count].bytesPerRow = bytesPerRow;
-            copy.copies[copy.count].bytesPerImage = bytesPerImage;
-            copy.copies[copy.count].textureOrigin = origin;
-            copy.copies[copy.count].copyExtent = {clampedCopyExtent.width, clampedCopyExtent.height,
-                                                  copyExtent.depthOrArrayLayers - 1};
+    commandContext->BeginRender(mtlRenderPassForResolve);
+    commandContext->EndRender();
+}
+}  // anonymous namespace
 
-            ++copy.count;
+MTLCompareFunction ToMetalCompareFunction(wgpu::CompareFunction compareFunction) {
+    switch (compareFunction) {
+        case wgpu::CompareFunction::Never:
+            return MTLCompareFunctionNever;
+        case wgpu::CompareFunction::Less:
+            return MTLCompareFunctionLess;
+        case wgpu::CompareFunction::LessEqual:
+            return MTLCompareFunctionLessEqual;
+        case wgpu::CompareFunction::Greater:
+            return MTLCompareFunctionGreater;
+        case wgpu::CompareFunction::GreaterEqual:
+            return MTLCompareFunctionGreaterEqual;
+        case wgpu::CompareFunction::NotEqual:
+            return MTLCompareFunctionNotEqual;
+        case wgpu::CompareFunction::Equal:
+            return MTLCompareFunctionEqual;
+        case wgpu::CompareFunction::Always:
+            return MTLCompareFunctionAlways;
 
-            // Update offset to copy to the last image.
-            currentOffset += (copyExtent.depthOrArrayLayers - 1) * bytesPerImage;
-        }
+        case wgpu::CompareFunction::Undefined:
+            UNREACHABLE();
+    }
+}
 
-        // Doing all the copy in last image except the last row.
-        uint32_t copyBlockRowCount = copyExtent.height / blockInfo.height;
-        if (copyBlockRowCount > 1) {
-            copy.copies[copy.count].bufferOffset = currentOffset;
-            copy.copies[copy.count].bytesPerRow = bytesPerRow;
-            copy.copies[copy.count].bytesPerImage = bytesPerRow * (copyBlockRowCount - 1);
-            copy.copies[copy.count].textureOrigin = {origin.x, origin.y,
-                                                     origin.z + copyExtent.depthOrArrayLayers - 1};
+TextureBufferCopySplit ComputeTextureBufferCopySplit(const Texture* texture,
+                                                     uint32_t mipLevel,
+                                                     Origin3D origin,
+                                                     Extent3D copyExtent,
+                                                     uint64_t bufferSize,
+                                                     uint64_t bufferOffset,
+                                                     uint32_t bytesPerRow,
+                                                     uint32_t rowsPerImage,
+                                                     Aspect aspect) {
+    TextureBufferCopySplit copy;
+    const Format textureFormat = texture->GetFormat();
+    const TexelBlockInfo& blockInfo = textureFormat.GetAspectInfo(aspect).block;
 
-            ASSERT(copyExtent.height - blockInfo.height <
-                   texture->GetMipLevelVirtualSize(mipLevel).height);
-            copy.copies[copy.count].copyExtent = {clampedCopyExtent.width,
-                                                  copyExtent.height - blockInfo.height, 1};
+    // When copying textures from/to an unpacked buffer, the Metal validation layer doesn't
+    // compute the correct range when checking if the buffer is big enough to contain the
+    // data for the whole copy. Instead of looking at the position of the last texel in the
+    // buffer, it computes the volume of the 3D box with bytesPerRow * (rowsPerImage /
+    // format.blockHeight) * copySize.depthOrArrayLayers. For example considering the pixel
+    // buffer below where in memory, each row data (D) of the texture is followed by some
+    // padding data (P):
+    //     |DDDDDDD|PP|
+    //     |DDDDDDD|PP|
+    //     |DDDDDDD|PP|
+    //     |DDDDDDD|PP|
+    //     |DDDDDDA|PP|
+    // The last pixel read will be A, but the driver will think it is the whole last padding
+    // row, causing it to generate an error when the pixel buffer is just big enough.
 
-            ++copy.count;
+    // We work around this limitation by detecting when Metal would complain and copy the
+    // last image and row separately using tight sourceBytesPerRow or sourceBytesPerImage.
+    uint32_t bytesPerImage = bytesPerRow * rowsPerImage;
 
-            // Update offset to copy to the last row.
-            currentOffset += (copyBlockRowCount - 1) * bytesPerRow;
-        }
+    // Metal validation layer requires that if the texture's pixel format is a compressed
+    // format, the sourceSize must be a multiple of the pixel format's block size or be
+    // clamped to the edge of the texture if the block extends outside the bounds of a
+    // texture.
+    const Extent3D clampedCopyExtent =
+        texture->ClampToMipLevelVirtualSize(mipLevel, origin, copyExtent);
 
-        // Doing the last row copy with the exact number of bytes in last row.
-        // Workaround this issue in a way just like the copy to a 1D texture.
-        uint32_t lastRowDataSize = (copyExtent.width / blockInfo.width) * blockInfo.byteSize;
-        uint32_t lastRowCopyExtentHeight =
-            blockInfo.height + clampedCopyExtent.height - copyExtent.height;
-        ASSERT(lastRowCopyExtentHeight <= blockInfo.height);
-
-        copy.copies[copy.count].bufferOffset = currentOffset;
-        copy.copies[copy.count].bytesPerRow = lastRowDataSize;
-        copy.copies[copy.count].bytesPerImage = lastRowDataSize;
-        copy.copies[copy.count].textureOrigin = {origin.x,
-                                                 origin.y + copyExtent.height - blockInfo.height,
-                                                 origin.z + copyExtent.depthOrArrayLayers - 1};
-        copy.copies[copy.count].copyExtent = {clampedCopyExtent.width, lastRowCopyExtentHeight, 1};
-        ++copy.count;
-
+    // Check whether buffer size is big enough.
+    bool needWorkaround = bufferSize - bufferOffset < bytesPerImage * copyExtent.depthOrArrayLayers;
+    if (!needWorkaround) {
+        copy.count = 1;
+        copy.copies[0].bufferOffset = bufferOffset;
+        copy.copies[0].bytesPerRow = bytesPerRow;
+        copy.copies[0].bytesPerImage = bytesPerImage;
+        copy.copies[0].textureOrigin = origin;
+        copy.copies[0].copyExtent = {clampedCopyExtent.width, clampedCopyExtent.height,
+                                     copyExtent.depthOrArrayLayers};
         return copy;
     }
 
-    void EnsureDestinationTextureInitialized(CommandRecordingContext* commandContext,
-                                             Texture* texture,
-                                             const TextureCopy& dst,
-                                             const Extent3D& size) {
-        ASSERT(texture == dst.texture.Get());
-        SubresourceRange range = GetSubresourcesAffectedByCopy(dst, size);
-        if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), size, dst.mipLevel)) {
-            texture->SetIsSubresourceContentInitialized(true, range);
-        } else {
-            texture->EnsureSubresourceContentInitialized(commandContext, range);
-        }
+    uint64_t currentOffset = bufferOffset;
+
+    // Doing all the copy except the last image.
+    if (copyExtent.depthOrArrayLayers > 1) {
+        copy.copies[copy.count].bufferOffset = currentOffset;
+        copy.copies[copy.count].bytesPerRow = bytesPerRow;
+        copy.copies[copy.count].bytesPerImage = bytesPerImage;
+        copy.copies[copy.count].textureOrigin = origin;
+        copy.copies[copy.count].copyExtent = {clampedCopyExtent.width, clampedCopyExtent.height,
+                                              copyExtent.depthOrArrayLayers - 1};
+
+        ++copy.count;
+
+        // Update offset to copy to the last image.
+        currentOffset += (copyExtent.depthOrArrayLayers - 1) * bytesPerImage;
     }
 
-    MTLBlitOption ComputeMTLBlitOption(const Format& format, Aspect aspect) {
-        ASSERT(HasOneBit(aspect));
-        ASSERT(format.aspects & aspect);
+    // Doing all the copy in last image except the last row.
+    uint32_t copyBlockRowCount = copyExtent.height / blockInfo.height;
+    if (copyBlockRowCount > 1) {
+        copy.copies[copy.count].bufferOffset = currentOffset;
+        copy.copies[copy.count].bytesPerRow = bytesPerRow;
+        copy.copies[copy.count].bytesPerImage = bytesPerRow * (copyBlockRowCount - 1);
+        copy.copies[copy.count].textureOrigin = {origin.x, origin.y,
+                                                 origin.z + copyExtent.depthOrArrayLayers - 1};
 
-        if (IsSubset(Aspect::Depth | Aspect::Stencil, format.aspects)) {
-            // We only provide a blit option if the format has both depth and stencil.
-            // It is invalid to provide a blit option otherwise.
-            switch (aspect) {
-                case Aspect::Depth:
-                    return MTLBlitOptionDepthFromDepthStencil;
-                case Aspect::Stencil:
-                    return MTLBlitOptionStencilFromDepthStencil;
+        ASSERT(copyExtent.height - blockInfo.height <
+               texture->GetMipLevelVirtualSize(mipLevel).height);
+        copy.copies[copy.count].copyExtent = {clampedCopyExtent.width,
+                                              copyExtent.height - blockInfo.height, 1};
+
+        ++copy.count;
+
+        // Update offset to copy to the last row.
+        currentOffset += (copyBlockRowCount - 1) * bytesPerRow;
+    }
+
+    // Doing the last row copy with the exact number of bytes in last row.
+    // Workaround this issue in a way just like the copy to a 1D texture.
+    uint32_t lastRowDataSize = (copyExtent.width / blockInfo.width) * blockInfo.byteSize;
+    uint32_t lastRowCopyExtentHeight =
+        blockInfo.height + clampedCopyExtent.height - copyExtent.height;
+    ASSERT(lastRowCopyExtentHeight <= blockInfo.height);
+
+    copy.copies[copy.count].bufferOffset = currentOffset;
+    copy.copies[copy.count].bytesPerRow = lastRowDataSize;
+    copy.copies[copy.count].bytesPerImage = lastRowDataSize;
+    copy.copies[copy.count].textureOrigin = {origin.x,
+                                             origin.y + copyExtent.height - blockInfo.height,
+                                             origin.z + copyExtent.depthOrArrayLayers - 1};
+    copy.copies[copy.count].copyExtent = {clampedCopyExtent.width, lastRowCopyExtentHeight, 1};
+    ++copy.count;
+
+    return copy;
+}
+
+void EnsureDestinationTextureInitialized(CommandRecordingContext* commandContext,
+                                         Texture* texture,
+                                         const TextureCopy& dst,
+                                         const Extent3D& size) {
+    ASSERT(texture == dst.texture.Get());
+    SubresourceRange range = GetSubresourcesAffectedByCopy(dst, size);
+    if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), size, dst.mipLevel)) {
+        texture->SetIsSubresourceContentInitialized(true, range);
+    } else {
+        texture->EnsureSubresourceContentInitialized(commandContext, range);
+    }
+}
+
+MTLBlitOption ComputeMTLBlitOption(const Format& format, Aspect aspect) {
+    ASSERT(HasOneBit(aspect));
+    ASSERT(format.aspects & aspect);
+
+    if (IsSubset(Aspect::Depth | Aspect::Stencil, format.aspects)) {
+        // We only provide a blit option if the format has both depth and stencil.
+        // It is invalid to provide a blit option otherwise.
+        switch (aspect) {
+            case Aspect::Depth:
+                return MTLBlitOptionDepthFromDepthStencil;
+            case Aspect::Stencil:
+                return MTLBlitOptionStencilFromDepthStencil;
+            default:
+                UNREACHABLE();
+        }
+    }
+    return MTLBlitOptionNone;
+}
+
+MaybeError CreateMTLFunction(const ProgrammableStage& programmableStage,
+                             SingleShaderStage singleShaderStage,
+                             PipelineLayout* pipelineLayout,
+                             ShaderModule::MetalFunctionData* functionData,
+                             uint32_t sampleMask,
+                             const RenderPipeline* renderPipeline) {
+    ShaderModule* shaderModule = ToBackend(programmableStage.module.Get());
+    const char* shaderEntryPoint = programmableStage.entryPoint.c_str();
+    const auto& entryPointMetadata = programmableStage.module->GetEntryPoint(shaderEntryPoint);
+    if (entryPointMetadata.overridableConstants.size() == 0) {
+        DAWN_TRY(shaderModule->CreateFunction(shaderEntryPoint, singleShaderStage, pipelineLayout,
+                                              functionData, nil, sampleMask, renderPipeline));
+        return {};
+    }
+
+    if (@available(macOS 10.12, *)) {
+        // MTLFunctionConstantValues can only be created within the if available branch
+        NSRef<MTLFunctionConstantValues> constantValues =
+            AcquireNSRef([MTLFunctionConstantValues new]);
+
+        std::unordered_set<std::string> overriddenConstants;
+
+        auto switchType = [&](EntryPointMetadata::OverridableConstant::Type dawnType,
+                              MTLDataType* type, OverridableConstantScalar* entry,
+                              double value = 0) {
+            switch (dawnType) {
+                case EntryPointMetadata::OverridableConstant::Type::Boolean:
+                    *type = MTLDataTypeBool;
+                    if (entry) {
+                        entry->b = static_cast<int32_t>(value);
+                    }
+                    break;
+                case EntryPointMetadata::OverridableConstant::Type::Float32:
+                    *type = MTLDataTypeFloat;
+                    if (entry) {
+                        entry->f32 = static_cast<float>(value);
+                    }
+                    break;
+                case EntryPointMetadata::OverridableConstant::Type::Int32:
+                    *type = MTLDataTypeInt;
+                    if (entry) {
+                        entry->i32 = static_cast<int32_t>(value);
+                    }
+                    break;
+                case EntryPointMetadata::OverridableConstant::Type::Uint32:
+                    *type = MTLDataTypeUInt;
+                    if (entry) {
+                        entry->u32 = static_cast<uint32_t>(value);
+                    }
+                    break;
                 default:
                     UNREACHABLE();
             }
-        }
-        return MTLBlitOptionNone;
-    }
+        };
 
-    MaybeError CreateMTLFunction(const ProgrammableStage& programmableStage,
-                                 SingleShaderStage singleShaderStage,
-                                 PipelineLayout* pipelineLayout,
-                                 ShaderModule::MetalFunctionData* functionData,
-                                 uint32_t sampleMask,
-                                 const RenderPipeline* renderPipeline) {
-        ShaderModule* shaderModule = ToBackend(programmableStage.module.Get());
-        const char* shaderEntryPoint = programmableStage.entryPoint.c_str();
-        const auto& entryPointMetadata = programmableStage.module->GetEntryPoint(shaderEntryPoint);
-        if (entryPointMetadata.overridableConstants.size() == 0) {
-            DAWN_TRY(shaderModule->CreateFunction(shaderEntryPoint, singleShaderStage,
-                                                  pipelineLayout, functionData, nil, sampleMask,
-                                                  renderPipeline));
+        for (const auto& [name, value] : programmableStage.constants) {
+            overriddenConstants.insert(name);
+
+            // This is already validated so `name` must exist
+            const auto& moduleConstant = entryPointMetadata.overridableConstants.at(name);
+
+            MTLDataType type;
+            OverridableConstantScalar entry{};
+
+            switchType(moduleConstant.type, &type, &entry, value);
+
+            [constantValues.Get() setConstantValue:&entry type:type atIndex:moduleConstant.id];
+        }
+
+        // Set shader initialized default values because MSL function_constant
+        // has no default value
+        for (const std::string& name : entryPointMetadata.initializedOverridableConstants) {
+            if (overriddenConstants.count(name) != 0) {
+                // This constant already has overridden value
+                continue;
+            }
+
+            // Must exist because it is validated
+            const auto& moduleConstant = entryPointMetadata.overridableConstants.at(name);
+            ASSERT(moduleConstant.isInitialized);
+            MTLDataType type;
+
+            switchType(moduleConstant.type, &type, nullptr);
+
+            [constantValues.Get() setConstantValue:&moduleConstant.defaultValue
+                                              type:type
+                                           atIndex:moduleConstant.id];
+        }
+
+        DAWN_TRY(shaderModule->CreateFunction(shaderEntryPoint, singleShaderStage, pipelineLayout,
+                                              functionData, constantValues.Get(), sampleMask,
+                                              renderPipeline));
+    } else {
+        UNREACHABLE();
+    }
+    return {};
+}
+
+MaybeError EncodeMetalRenderPass(Device* device,
+                                 CommandRecordingContext* commandContext,
+                                 MTLRenderPassDescriptor* mtlRenderPass,
+                                 uint32_t width,
+                                 uint32_t height,
+                                 EncodeInsideRenderPass encodeInside) {
+    // This function handles multiple workarounds. Because some cases requires multiple
+    // workarounds to happen at the same time, it handles workarounds one by one and calls
+    // itself recursively to handle the next workaround if needed.
+
+    // Handle Toggle AlwaysResolveIntoZeroLevelAndLayer. We must handle this before applying
+    // the store + MSAA resolve workaround, otherwise this toggle will never be handled because
+    // the resolve texture is removed when applying the store + MSAA resolve workaround.
+    if (device->IsToggleEnabled(Toggle::AlwaysResolveIntoZeroLevelAndLayer)) {
+        std::array<SavedMetalAttachment, kMaxColorAttachments> trueResolveAttachments = {};
+        bool workaroundUsed = false;
+        for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
+            if (mtlRenderPass.colorAttachments[i].resolveTexture == nullptr) {
+                continue;
+            }
+
+            if (mtlRenderPass.colorAttachments[i].resolveLevel == 0 &&
+                mtlRenderPass.colorAttachments[i].resolveSlice == 0) {
+                continue;
+            }
+
+            DAWN_TRY_ASSIGN(
+                trueResolveAttachments[i],
+                PatchResolveAttachmentWithTemporary(device, mtlRenderPass.colorAttachments[i]));
+            workaroundUsed = true;
+        }
+
+        // If we need to use a temporary resolve texture we need to copy the result of MSAA
+        // resolve back to the true resolve targets.
+        if (workaroundUsed) {
+            DAWN_TRY(EncodeMetalRenderPass(device, commandContext, mtlRenderPass, width, height,
+                                           std::move(encodeInside)));
+
+            for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
+                if (trueResolveAttachments[i].texture == nullptr) {
+                    continue;
+                }
+
+                trueResolveAttachments[i].CopyFromTemporaryToAttachment(commandContext);
+            }
             return {};
         }
-
-        if (@available(macOS 10.12, *)) {
-            // MTLFunctionConstantValues can only be created within the if available branch
-            NSRef<MTLFunctionConstantValues> constantValues =
-                AcquireNSRef([MTLFunctionConstantValues new]);
-
-            std::unordered_set<std::string> overriddenConstants;
-
-            auto switchType = [&](EntryPointMetadata::OverridableConstant::Type dawnType,
-                                  MTLDataType* type, OverridableConstantScalar* entry,
-                                  double value = 0) {
-                switch (dawnType) {
-                    case EntryPointMetadata::OverridableConstant::Type::Boolean:
-                        *type = MTLDataTypeBool;
-                        if (entry) {
-                            entry->b = static_cast<int32_t>(value);
-                        }
-                        break;
-                    case EntryPointMetadata::OverridableConstant::Type::Float32:
-                        *type = MTLDataTypeFloat;
-                        if (entry) {
-                            entry->f32 = static_cast<float>(value);
-                        }
-                        break;
-                    case EntryPointMetadata::OverridableConstant::Type::Int32:
-                        *type = MTLDataTypeInt;
-                        if (entry) {
-                            entry->i32 = static_cast<int32_t>(value);
-                        }
-                        break;
-                    case EntryPointMetadata::OverridableConstant::Type::Uint32:
-                        *type = MTLDataTypeUInt;
-                        if (entry) {
-                            entry->u32 = static_cast<uint32_t>(value);
-                        }
-                        break;
-                    default:
-                        UNREACHABLE();
-                }
-            };
-
-            for (const auto& [name, value] : programmableStage.constants) {
-                overriddenConstants.insert(name);
-
-                // This is already validated so `name` must exist
-                const auto& moduleConstant = entryPointMetadata.overridableConstants.at(name);
-
-                MTLDataType type;
-                OverridableConstantScalar entry{};
-
-                switchType(moduleConstant.type, &type, &entry, value);
-
-                [constantValues.Get() setConstantValue:&entry type:type atIndex:moduleConstant.id];
-            }
-
-            // Set shader initialized default values because MSL function_constant
-            // has no default value
-            for (const std::string& name : entryPointMetadata.initializedOverridableConstants) {
-                if (overriddenConstants.count(name) != 0) {
-                    // This constant already has overridden value
-                    continue;
-                }
-
-                // Must exist because it is validated
-                const auto& moduleConstant = entryPointMetadata.overridableConstants.at(name);
-                ASSERT(moduleConstant.isInitialized);
-                MTLDataType type;
-
-                switchType(moduleConstant.type, &type, nullptr);
-
-                [constantValues.Get() setConstantValue:&moduleConstant.defaultValue
-                                                  type:type
-                                               atIndex:moduleConstant.id];
-            }
-
-            DAWN_TRY(shaderModule->CreateFunction(
-                shaderEntryPoint, singleShaderStage, pipelineLayout, functionData,
-                constantValues.Get(), sampleMask, renderPipeline));
-        } else {
-            UNREACHABLE();
-        }
-        return {};
     }
 
-    MaybeError EncodeMetalRenderPass(Device* device,
-                                     CommandRecordingContext* commandContext,
-                                     MTLRenderPassDescriptor* mtlRenderPass,
-                                     uint32_t width,
-                                     uint32_t height,
-                                     EncodeInsideRenderPass encodeInside) {
-        // This function handles multiple workarounds. Because some cases requires multiple
-        // workarounds to happen at the same time, it handles workarounds one by one and calls
-        // itself recursively to handle the next workaround if needed.
+    // Handles the workaround for r8unorm rg8unorm mipmap rendering being broken on some
+    // devices. Render to a temporary texture instead and then copy back to the attachment.
+    if (device->IsToggleEnabled(Toggle::MetalRenderR8RG8UnormSmallMipToTempTexture)) {
+        std::array<SavedMetalAttachment, kMaxColorAttachments> originalAttachments;
+        bool workaroundUsed = false;
 
-        // Handle Toggle AlwaysResolveIntoZeroLevelAndLayer. We must handle this before applying
-        // the store + MSAA resolve workaround, otherwise this toggle will never be handled because
-        // the resolve texture is removed when applying the store + MSAA resolve workaround.
-        if (device->IsToggleEnabled(Toggle::AlwaysResolveIntoZeroLevelAndLayer)) {
-            std::array<SavedMetalAttachment, kMaxColorAttachments> trueResolveAttachments = {};
-            bool workaroundUsed = false;
-            for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
-                if (mtlRenderPass.colorAttachments[i].resolveTexture == nullptr) {
-                    continue;
-                }
-
-                if (mtlRenderPass.colorAttachments[i].resolveLevel == 0 &&
-                    mtlRenderPass.colorAttachments[i].resolveSlice == 0) {
-                    continue;
-                }
-
-                DAWN_TRY_ASSIGN(
-                    trueResolveAttachments[i],
-                    PatchResolveAttachmentWithTemporary(device, mtlRenderPass.colorAttachments[i]));
-                workaroundUsed = true;
+        for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
+            if (mtlRenderPass.colorAttachments[i].texture == nullptr) {
+                continue;
             }
 
-            // If we need to use a temporary resolve texture we need to copy the result of MSAA
-            // resolve back to the true resolve targets.
-            if (workaroundUsed) {
-                DAWN_TRY(EncodeMetalRenderPass(device, commandContext, mtlRenderPass, width, height,
-                                               std::move(encodeInside)));
+            if ([mtlRenderPass.colorAttachments[i].texture pixelFormat] != MTLPixelFormatR8Unorm &&
+                [mtlRenderPass.colorAttachments[i].texture pixelFormat] != MTLPixelFormatRG8Unorm) {
+                continue;
+            }
 
-                for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
-                    if (trueResolveAttachments[i].texture == nullptr) {
-                        continue;
-                    }
+            if (mtlRenderPass.colorAttachments[i].level < 2) {
+                continue;
+            }
 
-                    trueResolveAttachments[i].CopyFromTemporaryToAttachment(commandContext);
-                }
-                return {};
+            DAWN_TRY_ASSIGN(originalAttachments[i], PatchAttachmentWithTemporary(
+                                                        device, mtlRenderPass.colorAttachments[i]));
+            workaroundUsed = true;
+
+            if (mtlRenderPass.colorAttachments[i].loadAction == MTLLoadActionLoad) {
+                originalAttachments[i].CopyFromAttachmentToTemporary(commandContext);
             }
         }
 
-        // Handles the workaround for r8unorm rg8unorm mipmap rendering being broken on some
-        // devices. Render to a temporary texture instead and then copy back to the attachment.
-        if (device->IsToggleEnabled(Toggle::MetalRenderR8RG8UnormSmallMipToTempTexture)) {
-            std::array<SavedMetalAttachment, kMaxColorAttachments> originalAttachments;
-            bool workaroundUsed = false;
+        if (workaroundUsed) {
+            DAWN_TRY(EncodeMetalRenderPass(device, commandContext, mtlRenderPass, width, height,
+                                           std::move(encodeInside)));
 
             for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
-                if (mtlRenderPass.colorAttachments[i].texture == nullptr) {
+                if (originalAttachments[i].texture == nullptr) {
                     continue;
                 }
 
-                if ([mtlRenderPass.colorAttachments[i].texture pixelFormat] !=
-                        MTLPixelFormatR8Unorm &&
-                    [mtlRenderPass.colorAttachments[i].texture pixelFormat] !=
-                        MTLPixelFormatRG8Unorm) {
-                    continue;
-                }
-
-                if (mtlRenderPass.colorAttachments[i].level < 2) {
-                    continue;
-                }
-
-                DAWN_TRY_ASSIGN(
-                    originalAttachments[i],
-                    PatchAttachmentWithTemporary(device, mtlRenderPass.colorAttachments[i]));
-                workaroundUsed = true;
-
-                if (mtlRenderPass.colorAttachments[i].loadAction == MTLLoadActionLoad) {
-                    originalAttachments[i].CopyFromAttachmentToTemporary(commandContext);
-                }
+                originalAttachments[i].CopyFromTemporaryToAttachment(commandContext);
             }
-
-            if (workaroundUsed) {
-                DAWN_TRY(EncodeMetalRenderPass(device, commandContext, mtlRenderPass, width, height,
-                                               std::move(encodeInside)));
-
-                for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
-                    if (originalAttachments[i].texture == nullptr) {
-                        continue;
-                    }
-
-                    originalAttachments[i].CopyFromTemporaryToAttachment(commandContext);
-                }
-                return {};
-            }
+            return {};
         }
-
-        // Handle Store + MSAA resolve workaround (Toggle EmulateStoreAndMSAAResolve).
-        // Done after the workarounds that modify the non-resolve attachments so that
-        // ResolveInAnotherRenderPass uses the temporary attachments if needed instead of the
-        // original ones.
-        if (device->IsToggleEnabled(Toggle::EmulateStoreAndMSAAResolve)) {
-            bool hasStoreAndMSAAResolve = false;
-
-            // Remove any store + MSAA resolve and remember them.
-            std::array<id<MTLTexture>, kMaxColorAttachments> resolveTextures = {};
-            for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
-                if (mtlRenderPass.colorAttachments[i].storeAction ==
-                    kMTLStoreActionStoreAndMultisampleResolve) {
-                    hasStoreAndMSAAResolve = true;
-                    resolveTextures[i] = mtlRenderPass.colorAttachments[i].resolveTexture;
-
-                    mtlRenderPass.colorAttachments[i].storeAction = MTLStoreActionStore;
-                    mtlRenderPass.colorAttachments[i].resolveTexture = nullptr;
-                }
-            }
-
-            // If we found a store + MSAA resolve we need to resolve in a different render pass.
-            if (hasStoreAndMSAAResolve) {
-                DAWN_TRY(EncodeMetalRenderPass(device, commandContext, mtlRenderPass, width, height,
-                                               std::move(encodeInside)));
-
-                ResolveInAnotherRenderPass(commandContext, mtlRenderPass, resolveTextures);
-                return {};
-            }
-        }
-
-        // No (more) workarounds needed! We can finally encode the actual render pass.
-        commandContext->EndBlit();
-        DAWN_TRY(encodeInside(commandContext->BeginRender(mtlRenderPass)));
-        commandContext->EndRender();
-        return {};
     }
 
-    MaybeError EncodeEmptyMetalRenderPass(Device* device,
-                                          CommandRecordingContext* commandContext,
-                                          MTLRenderPassDescriptor* mtlRenderPass,
-                                          Extent3D size) {
-        return EncodeMetalRenderPass(device, commandContext, mtlRenderPass, size.width, size.height,
-                                     [&](id<MTLRenderCommandEncoder>) -> MaybeError { return {}; });
+    // Handle Store + MSAA resolve workaround (Toggle EmulateStoreAndMSAAResolve).
+    // Done after the workarounds that modify the non-resolve attachments so that
+    // ResolveInAnotherRenderPass uses the temporary attachments if needed instead of the
+    // original ones.
+    if (device->IsToggleEnabled(Toggle::EmulateStoreAndMSAAResolve)) {
+        bool hasStoreAndMSAAResolve = false;
+
+        // Remove any store + MSAA resolve and remember them.
+        std::array<id<MTLTexture>, kMaxColorAttachments> resolveTextures = {};
+        for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
+            if (mtlRenderPass.colorAttachments[i].storeAction ==
+                kMTLStoreActionStoreAndMultisampleResolve) {
+                hasStoreAndMSAAResolve = true;
+                resolveTextures[i] = mtlRenderPass.colorAttachments[i].resolveTexture;
+
+                mtlRenderPass.colorAttachments[i].storeAction = MTLStoreActionStore;
+                mtlRenderPass.colorAttachments[i].resolveTexture = nullptr;
+            }
+        }
+
+        // If we found a store + MSAA resolve we need to resolve in a different render pass.
+        if (hasStoreAndMSAAResolve) {
+            DAWN_TRY(EncodeMetalRenderPass(device, commandContext, mtlRenderPass, width, height,
+                                           std::move(encodeInside)));
+
+            ResolveInAnotherRenderPass(commandContext, mtlRenderPass, resolveTextures);
+            return {};
+        }
     }
 
+    // No (more) workarounds needed! We can finally encode the actual render pass.
+    commandContext->EndBlit();
+    DAWN_TRY(encodeInside(commandContext->BeginRender(mtlRenderPass)));
+    commandContext->EndRender();
+    return {};
+}
+
+MaybeError EncodeEmptyMetalRenderPass(Device* device,
+                                      CommandRecordingContext* commandContext,
+                                      MTLRenderPassDescriptor* mtlRenderPass,
+                                      Extent3D size) {
+    return EncodeMetalRenderPass(device, commandContext, mtlRenderPass, size.width, size.height,
+                                 [&](id<MTLRenderCommandEncoder>) -> MaybeError { return {}; });
+}
+
 }  // namespace dawn::native::metal
diff --git a/src/dawn/native/null/DeviceNull.cpp b/src/dawn/native/null/DeviceNull.cpp
index 9ac02bb..bcd9479 100644
--- a/src/dawn/native/null/DeviceNull.cpp
+++ b/src/dawn/native/null/DeviceNull.cpp
@@ -25,499 +25,483 @@
 
 namespace dawn::native::null {
 
-    // Implementation of pre-Device objects: the null adapter, null backend connection and Connect()
+// Implementation of pre-Device objects: the null adapter, null backend connection and Connect()
 
-    Adapter::Adapter(InstanceBase* instance) : AdapterBase(instance, wgpu::BackendType::Null) {
-        mVendorId = 0;
-        mDeviceId = 0;
-        mName = "Null backend";
-        mAdapterType = wgpu::AdapterType::CPU;
-        MaybeError err = Initialize();
-        ASSERT(err.IsSuccess());
+Adapter::Adapter(InstanceBase* instance) : AdapterBase(instance, wgpu::BackendType::Null) {
+    mVendorId = 0;
+    mDeviceId = 0;
+    mName = "Null backend";
+    mAdapterType = wgpu::AdapterType::CPU;
+    MaybeError err = Initialize();
+    ASSERT(err.IsSuccess());
+}
+
+Adapter::~Adapter() = default;
+
+bool Adapter::SupportsExternalImages() const {
+    return false;
+}
+
+// Used for the tests that intend to use an adapter without all features enabled.
+void Adapter::SetSupportedFeatures(const std::vector<wgpu::FeatureName>& requiredFeatures) {
+    mSupportedFeatures = {};
+    for (wgpu::FeatureName f : requiredFeatures) {
+        mSupportedFeatures.EnableFeature(f);
+    }
+}
+
+MaybeError Adapter::InitializeImpl() {
+    return {};
+}
+
+MaybeError Adapter::InitializeSupportedFeaturesImpl() {
+    // Enable all features by default for the convenience of tests.
+    mSupportedFeatures.featuresBitSet.set();
+    return {};
+}
+
+MaybeError Adapter::InitializeSupportedLimitsImpl(CombinedLimits* limits) {
+    GetDefaultLimits(&limits->v1);
+    return {};
+}
+
+ResultOrError<Ref<DeviceBase>> Adapter::CreateDeviceImpl(const DeviceDescriptor* descriptor) {
+    return Device::Create(this, descriptor);
+}
+
+class Backend : public BackendConnection {
+  public:
+    explicit Backend(InstanceBase* instance)
+        : BackendConnection(instance, wgpu::BackendType::Null) {}
+
+    std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override {
+        // There is always a single Null adapter because it is purely CPU based and doesn't
+        // depend on the system.
+        std::vector<Ref<AdapterBase>> adapters;
+        Ref<Adapter> adapter = AcquireRef(new Adapter(GetInstance()));
+        adapters.push_back(std::move(adapter));
+        return adapters;
+    }
+};
+
+BackendConnection* Connect(InstanceBase* instance) {
+    return new Backend(instance);
+}
+
+struct CopyFromStagingToBufferOperation : PendingOperation {
+    virtual void Execute() {
+        destination->CopyFromStaging(staging, sourceOffset, destinationOffset, size);
     }
 
-    Adapter::~Adapter() = default;
+    StagingBufferBase* staging;
+    Ref<Buffer> destination;
+    uint64_t sourceOffset;
+    uint64_t destinationOffset;
+    uint64_t size;
+};
 
-    bool Adapter::SupportsExternalImages() const {
-        return false;
+// Device
+
+// static
+ResultOrError<Ref<Device>> Device::Create(Adapter* adapter, const DeviceDescriptor* descriptor) {
+    Ref<Device> device = AcquireRef(new Device(adapter, descriptor));
+    DAWN_TRY(device->Initialize(descriptor));
+    return device;
+}
+
+Device::~Device() {
+    Destroy();
+}
+
+MaybeError Device::Initialize(const DeviceDescriptor* descriptor) {
+    return DeviceBase::Initialize(AcquireRef(new Queue(this, &descriptor->defaultQueue)));
+}
+
+ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
+    const BindGroupDescriptor* descriptor) {
+    return AcquireRef(new BindGroup(this, descriptor));
+}
+ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
+    const BindGroupLayoutDescriptor* descriptor,
+    PipelineCompatibilityToken pipelineCompatibilityToken) {
+    return AcquireRef(new BindGroupLayout(this, descriptor, pipelineCompatibilityToken));
+}
+ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
+    DAWN_TRY(IncrementMemoryUsage(descriptor->size));
+    return AcquireRef(new Buffer(this, descriptor));
+}
+ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
+    CommandEncoder* encoder,
+    const CommandBufferDescriptor* descriptor) {
+    return AcquireRef(new CommandBuffer(encoder, descriptor));
+}
+Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
+    const ComputePipelineDescriptor* descriptor) {
+    return AcquireRef(new ComputePipeline(this, descriptor));
+}
+ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
+    const PipelineLayoutDescriptor* descriptor) {
+    return AcquireRef(new PipelineLayout(this, descriptor));
+}
+ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(const QuerySetDescriptor* descriptor) {
+    return AcquireRef(new QuerySet(this, descriptor));
+}
+Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
+    const RenderPipelineDescriptor* descriptor) {
+    return AcquireRef(new RenderPipeline(this, descriptor));
+}
+ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
+    return AcquireRef(new Sampler(this, descriptor));
+}
+ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
+    const ShaderModuleDescriptor* descriptor,
+    ShaderModuleParseResult* parseResult) {
+    Ref<ShaderModule> module = AcquireRef(new ShaderModule(this, descriptor));
+    DAWN_TRY(module->Initialize(parseResult));
+    return module;
+}
+ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
+    const SwapChainDescriptor* descriptor) {
+    return AcquireRef(new OldSwapChain(this, descriptor));
+}
+ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
+    Surface* surface,
+    NewSwapChainBase* previousSwapChain,
+    const SwapChainDescriptor* descriptor) {
+    return SwapChain::Create(this, surface, previousSwapChain, descriptor);
+}
+ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
+    return AcquireRef(new Texture(this, descriptor, TextureBase::TextureState::OwnedInternal));
+}
+ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
+    TextureBase* texture,
+    const TextureViewDescriptor* descriptor) {
+    return AcquireRef(new TextureView(texture, descriptor));
+}
+
+ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
+    std::unique_ptr<StagingBufferBase> stagingBuffer = std::make_unique<StagingBuffer>(size, this);
+    DAWN_TRY(stagingBuffer->Initialize());
+    return std::move(stagingBuffer);
+}
+
+void Device::DestroyImpl() {
+    ASSERT(GetState() == State::Disconnected);
+
+    // Clear pending operations before checking mMemoryUsage because some operations keep a
+    // reference to Buffers.
+    mPendingOperations.clear();
+    ASSERT(mMemoryUsage == 0);
+}
+
+MaybeError Device::WaitForIdleForDestruction() {
+    mPendingOperations.clear();
+    return {};
+}
+
+MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
+                                           uint64_t sourceOffset,
+                                           BufferBase* destination,
+                                           uint64_t destinationOffset,
+                                           uint64_t size) {
+    if (IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
+        destination->SetIsDataInitialized();
     }
 
-    // Used for the tests that intend to use an adapter without all features enabled.
-    void Adapter::SetSupportedFeatures(const std::vector<wgpu::FeatureName>& requiredFeatures) {
-        mSupportedFeatures = {};
-        for (wgpu::FeatureName f : requiredFeatures) {
-            mSupportedFeatures.EnableFeature(f);
+    auto operation = std::make_unique<CopyFromStagingToBufferOperation>();
+    operation->staging = source;
+    operation->destination = ToBackend(destination);
+    operation->sourceOffset = sourceOffset;
+    operation->destinationOffset = destinationOffset;
+    operation->size = size;
+
+    AddPendingOperation(std::move(operation));
+
+    return {};
+}
+
+MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
+                                            const TextureDataLayout& src,
+                                            TextureCopy* dst,
+                                            const Extent3D& copySizePixels) {
+    return {};
+}
+
+MaybeError Device::IncrementMemoryUsage(uint64_t bytes) {
+    static_assert(kMaxMemoryUsage <= std::numeric_limits<size_t>::max());
+    if (bytes > kMaxMemoryUsage || mMemoryUsage > kMaxMemoryUsage - bytes) {
+        return DAWN_OUT_OF_MEMORY_ERROR("Out of memory.");
+    }
+    mMemoryUsage += bytes;
+    return {};
+}
+
+void Device::DecrementMemoryUsage(uint64_t bytes) {
+    ASSERT(mMemoryUsage >= bytes);
+    mMemoryUsage -= bytes;
+}
+
+MaybeError Device::TickImpl() {
+    return SubmitPendingOperations();
+}
+
+ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
+    return GetLastSubmittedCommandSerial();
+}
+
+void Device::AddPendingOperation(std::unique_ptr<PendingOperation> operation) {
+    mPendingOperations.emplace_back(std::move(operation));
+}
+
+MaybeError Device::SubmitPendingOperations() {
+    for (auto& operation : mPendingOperations) {
+        operation->Execute();
+    }
+    mPendingOperations.clear();
+
+    DAWN_TRY(CheckPassedSerials());
+    IncrementLastSubmittedCommandSerial();
+
+    return {};
+}
+
+// BindGroupDataHolder
+
+BindGroupDataHolder::BindGroupDataHolder(size_t size)
+    : mBindingDataAllocation(malloc(size))  // malloc is guaranteed to return a
+                                            // pointer aligned enough for the allocation
+{}
+
+BindGroupDataHolder::~BindGroupDataHolder() {
+    free(mBindingDataAllocation);
+}
+
+// BindGroup
+
+BindGroup::BindGroup(DeviceBase* device, const BindGroupDescriptor* descriptor)
+    : BindGroupDataHolder(descriptor->layout->GetBindingDataSize()),
+      BindGroupBase(device, descriptor, mBindingDataAllocation) {}
+
+// BindGroupLayout
+
+BindGroupLayout::BindGroupLayout(DeviceBase* device,
+                                 const BindGroupLayoutDescriptor* descriptor,
+                                 PipelineCompatibilityToken pipelineCompatibilityToken)
+    : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken) {}
+
+// Buffer
+
+Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
+    : BufferBase(device, descriptor) {
+    mBackingData = std::unique_ptr<uint8_t[]>(new uint8_t[GetSize()]);
+    mAllocatedSize = GetSize();
+}
+
+bool Buffer::IsCPUWritableAtCreation() const {
+    // Only return true for mappable buffers so we can test cases that need / don't need a
+    // staging buffer.
+    return (GetUsage() & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) != 0;
+}
+
+MaybeError Buffer::MapAtCreationImpl() {
+    return {};
+}
+
+void Buffer::CopyFromStaging(StagingBufferBase* staging,
+                             uint64_t sourceOffset,
+                             uint64_t destinationOffset,
+                             uint64_t size) {
+    uint8_t* ptr = reinterpret_cast<uint8_t*>(staging->GetMappedPointer());
+    memcpy(mBackingData.get() + destinationOffset, ptr + sourceOffset, size);
+}
+
+void Buffer::DoWriteBuffer(uint64_t bufferOffset, const void* data, size_t size) {
+    ASSERT(bufferOffset + size <= GetSize());
+    ASSERT(mBackingData);
+    memcpy(mBackingData.get() + bufferOffset, data, size);
+}
+
+MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
+    return {};
+}
+
+void* Buffer::GetMappedPointerImpl() {
+    return mBackingData.get();
+}
+
+void Buffer::UnmapImpl() {}
+
+void Buffer::DestroyImpl() {
+    BufferBase::DestroyImpl();
+    ToBackend(GetDevice())->DecrementMemoryUsage(GetSize());
+}
+
+// CommandBuffer
+
+CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
+    : CommandBufferBase(encoder, descriptor) {}
+
+// QuerySet
+
+QuerySet::QuerySet(Device* device, const QuerySetDescriptor* descriptor)
+    : QuerySetBase(device, descriptor) {}
+
+// Queue
+
+Queue::Queue(Device* device, const QueueDescriptor* descriptor) : QueueBase(device, descriptor) {}
+
+Queue::~Queue() {}
+
+MaybeError Queue::SubmitImpl(uint32_t, CommandBufferBase* const*) {
+    Device* device = ToBackend(GetDevice());
+
+    // The Vulkan, D3D12 and Metal implementation all tick the device here,
+    // for testing purposes we should also tick in the null implementation.
+    DAWN_TRY(device->Tick());
+
+    return device->SubmitPendingOperations();
+}
+
+MaybeError Queue::WriteBufferImpl(BufferBase* buffer,
+                                  uint64_t bufferOffset,
+                                  const void* data,
+                                  size_t size) {
+    ToBackend(buffer)->DoWriteBuffer(bufferOffset, data, size);
+    return {};
+}
+
+// ComputePipeline
+MaybeError ComputePipeline::Initialize() {
+    return {};
+}
+
+// RenderPipeline
+MaybeError RenderPipeline::Initialize() {
+    return {};
+}
+
+// SwapChain
+
+// static
+ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
+                                                Surface* surface,
+                                                NewSwapChainBase* previousSwapChain,
+                                                const SwapChainDescriptor* descriptor) {
+    Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
+    DAWN_TRY(swapchain->Initialize(previousSwapChain));
+    return swapchain;
+}
+
+MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
+    if (previousSwapChain != nullptr) {
+        // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by
+        // multiple backends one after the other. It probably needs to block until the backend
+        // and GPU are completely finished with the previous swapchain.
+        if (previousSwapChain->GetBackendType() != wgpu::BackendType::Null) {
+            return DAWN_VALIDATION_ERROR("null::SwapChain cannot switch between APIs");
         }
     }
 
-    MaybeError Adapter::InitializeImpl() {
-        return {};
-    }
+    return {};
+}
 
-    MaybeError Adapter::InitializeSupportedFeaturesImpl() {
-        // Enable all features by default for the convenience of tests.
-        mSupportedFeatures.featuresBitSet.set();
-        return {};
-    }
+SwapChain::~SwapChain() = default;
 
-    MaybeError Adapter::InitializeSupportedLimitsImpl(CombinedLimits* limits) {
-        GetDefaultLimits(&limits->v1);
-        return {};
-    }
+MaybeError SwapChain::PresentImpl() {
+    mTexture->APIDestroy();
+    mTexture = nullptr;
+    return {};
+}
 
-    ResultOrError<Ref<DeviceBase>> Adapter::CreateDeviceImpl(const DeviceDescriptor* descriptor) {
-        return Device::Create(this, descriptor);
-    }
+ResultOrError<Ref<TextureViewBase>> SwapChain::GetCurrentTextureViewImpl() {
+    TextureDescriptor textureDesc = GetSwapChainBaseTextureDescriptor(this);
+    mTexture = AcquireRef(
+        new Texture(GetDevice(), &textureDesc, TextureBase::TextureState::OwnedInternal));
+    return mTexture->CreateView();
+}
 
-    class Backend : public BackendConnection {
-      public:
-        explicit Backend(InstanceBase* instance)
-            : BackendConnection(instance, wgpu::BackendType::Null) {
-        }
-
-        std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override {
-            // There is always a single Null adapter because it is purely CPU based and doesn't
-            // depend on the system.
-            std::vector<Ref<AdapterBase>> adapters;
-            Ref<Adapter> adapter = AcquireRef(new Adapter(GetInstance()));
-            adapters.push_back(std::move(adapter));
-            return adapters;
-        }
-    };
-
-    BackendConnection* Connect(InstanceBase* instance) {
-        return new Backend(instance);
-    }
-
-    struct CopyFromStagingToBufferOperation : PendingOperation {
-        virtual void Execute() {
-            destination->CopyFromStaging(staging, sourceOffset, destinationOffset, size);
-        }
-
-        StagingBufferBase* staging;
-        Ref<Buffer> destination;
-        uint64_t sourceOffset;
-        uint64_t destinationOffset;
-        uint64_t size;
-    };
-
-    // Device
-
-    // static
-    ResultOrError<Ref<Device>> Device::Create(Adapter* adapter,
-                                              const DeviceDescriptor* descriptor) {
-        Ref<Device> device = AcquireRef(new Device(adapter, descriptor));
-        DAWN_TRY(device->Initialize(descriptor));
-        return device;
-    }
-
-    Device::~Device() {
-        Destroy();
-    }
-
-    MaybeError Device::Initialize(const DeviceDescriptor* descriptor) {
-        return DeviceBase::Initialize(AcquireRef(new Queue(this, &descriptor->defaultQueue)));
-    }
-
-    ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
-        const BindGroupDescriptor* descriptor) {
-        return AcquireRef(new BindGroup(this, descriptor));
-    }
-    ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
-        const BindGroupLayoutDescriptor* descriptor,
-        PipelineCompatibilityToken pipelineCompatibilityToken) {
-        return AcquireRef(new BindGroupLayout(this, descriptor, pipelineCompatibilityToken));
-    }
-    ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
-        DAWN_TRY(IncrementMemoryUsage(descriptor->size));
-        return AcquireRef(new Buffer(this, descriptor));
-    }
-    ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
-        CommandEncoder* encoder,
-        const CommandBufferDescriptor* descriptor) {
-        return AcquireRef(new CommandBuffer(encoder, descriptor));
-    }
-    Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
-        const ComputePipelineDescriptor* descriptor) {
-        return AcquireRef(new ComputePipeline(this, descriptor));
-    }
-    ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
-        const PipelineLayoutDescriptor* descriptor) {
-        return AcquireRef(new PipelineLayout(this, descriptor));
-    }
-    ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
-        const QuerySetDescriptor* descriptor) {
-        return AcquireRef(new QuerySet(this, descriptor));
-    }
-    Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
-        const RenderPipelineDescriptor* descriptor) {
-        return AcquireRef(new RenderPipeline(this, descriptor));
-    }
-    ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
-        return AcquireRef(new Sampler(this, descriptor));
-    }
-    ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
-        const ShaderModuleDescriptor* descriptor,
-        ShaderModuleParseResult* parseResult) {
-        Ref<ShaderModule> module = AcquireRef(new ShaderModule(this, descriptor));
-        DAWN_TRY(module->Initialize(parseResult));
-        return module;
-    }
-    ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
-        const SwapChainDescriptor* descriptor) {
-        return AcquireRef(new OldSwapChain(this, descriptor));
-    }
-    ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
-        Surface* surface,
-        NewSwapChainBase* previousSwapChain,
-        const SwapChainDescriptor* descriptor) {
-        return SwapChain::Create(this, surface, previousSwapChain, descriptor);
-    }
-    ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
-        return AcquireRef(new Texture(this, descriptor, TextureBase::TextureState::OwnedInternal));
-    }
-    ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
-        TextureBase* texture,
-        const TextureViewDescriptor* descriptor) {
-        return AcquireRef(new TextureView(texture, descriptor));
-    }
-
-    ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
-        std::unique_ptr<StagingBufferBase> stagingBuffer =
-            std::make_unique<StagingBuffer>(size, this);
-        DAWN_TRY(stagingBuffer->Initialize());
-        return std::move(stagingBuffer);
-    }
-
-    void Device::DestroyImpl() {
-        ASSERT(GetState() == State::Disconnected);
-
-        // Clear pending operations before checking mMemoryUsage because some operations keep a
-        // reference to Buffers.
-        mPendingOperations.clear();
-        ASSERT(mMemoryUsage == 0);
-    }
-
-    MaybeError Device::WaitForIdleForDestruction() {
-        mPendingOperations.clear();
-        return {};
-    }
-
-    MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
-                                               uint64_t sourceOffset,
-                                               BufferBase* destination,
-                                               uint64_t destinationOffset,
-                                               uint64_t size) {
-        if (IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
-            destination->SetIsDataInitialized();
-        }
-
-        auto operation = std::make_unique<CopyFromStagingToBufferOperation>();
-        operation->staging = source;
-        operation->destination = ToBackend(destination);
-        operation->sourceOffset = sourceOffset;
-        operation->destinationOffset = destinationOffset;
-        operation->size = size;
-
-        AddPendingOperation(std::move(operation));
-
-        return {};
-    }
-
-    MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
-                                                const TextureDataLayout& src,
-                                                TextureCopy* dst,
-                                                const Extent3D& copySizePixels) {
-        return {};
-    }
-
-    MaybeError Device::IncrementMemoryUsage(uint64_t bytes) {
-        static_assert(kMaxMemoryUsage <= std::numeric_limits<size_t>::max());
-        if (bytes > kMaxMemoryUsage || mMemoryUsage > kMaxMemoryUsage - bytes) {
-            return DAWN_OUT_OF_MEMORY_ERROR("Out of memory.");
-        }
-        mMemoryUsage += bytes;
-        return {};
-    }
-
-    void Device::DecrementMemoryUsage(uint64_t bytes) {
-        ASSERT(mMemoryUsage >= bytes);
-        mMemoryUsage -= bytes;
-    }
-
-    MaybeError Device::TickImpl() {
-        return SubmitPendingOperations();
-    }
-
-    ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
-        return GetLastSubmittedCommandSerial();
-    }
-
-    void Device::AddPendingOperation(std::unique_ptr<PendingOperation> operation) {
-        mPendingOperations.emplace_back(std::move(operation));
-    }
-
-    MaybeError Device::SubmitPendingOperations() {
-        for (auto& operation : mPendingOperations) {
-            operation->Execute();
-        }
-        mPendingOperations.clear();
-
-        DAWN_TRY(CheckPassedSerials());
-        IncrementLastSubmittedCommandSerial();
-
-        return {};
-    }
-
-    // BindGroupDataHolder
-
-    BindGroupDataHolder::BindGroupDataHolder(size_t size)
-        : mBindingDataAllocation(malloc(size))  // malloc is guaranteed to return a
-                                                // pointer aligned enough for the allocation
-    {
-    }
-
-    BindGroupDataHolder::~BindGroupDataHolder() {
-        free(mBindingDataAllocation);
-    }
-
-    // BindGroup
-
-    BindGroup::BindGroup(DeviceBase* device, const BindGroupDescriptor* descriptor)
-        : BindGroupDataHolder(descriptor->layout->GetBindingDataSize()),
-          BindGroupBase(device, descriptor, mBindingDataAllocation) {
-    }
-
-    // BindGroupLayout
-
-    BindGroupLayout::BindGroupLayout(DeviceBase* device,
-                                     const BindGroupLayoutDescriptor* descriptor,
-                                     PipelineCompatibilityToken pipelineCompatibilityToken)
-        : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken) {
-    }
-
-    // Buffer
-
-    Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
-        : BufferBase(device, descriptor) {
-        mBackingData = std::unique_ptr<uint8_t[]>(new uint8_t[GetSize()]);
-        mAllocatedSize = GetSize();
-    }
-
-    bool Buffer::IsCPUWritableAtCreation() const {
-        // Only return true for mappable buffers so we can test cases that need / don't need a
-        // staging buffer.
-        return (GetUsage() & (wgpu::BufferUsage::MapRead | wgpu::BufferUsage::MapWrite)) != 0;
-    }
-
-    MaybeError Buffer::MapAtCreationImpl() {
-        return {};
-    }
-
-    void Buffer::CopyFromStaging(StagingBufferBase* staging,
-                                 uint64_t sourceOffset,
-                                 uint64_t destinationOffset,
-                                 uint64_t size) {
-        uint8_t* ptr = reinterpret_cast<uint8_t*>(staging->GetMappedPointer());
-        memcpy(mBackingData.get() + destinationOffset, ptr + sourceOffset, size);
-    }
-
-    void Buffer::DoWriteBuffer(uint64_t bufferOffset, const void* data, size_t size) {
-        ASSERT(bufferOffset + size <= GetSize());
-        ASSERT(mBackingData);
-        memcpy(mBackingData.get() + bufferOffset, data, size);
-    }
-
-    MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
-        return {};
-    }
-
-    void* Buffer::GetMappedPointerImpl() {
-        return mBackingData.get();
-    }
-
-    void Buffer::UnmapImpl() {
-    }
-
-    void Buffer::DestroyImpl() {
-        BufferBase::DestroyImpl();
-        ToBackend(GetDevice())->DecrementMemoryUsage(GetSize());
-    }
-
-    // CommandBuffer
-
-    CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
-        : CommandBufferBase(encoder, descriptor) {
-    }
-
-    // QuerySet
-
-    QuerySet::QuerySet(Device* device, const QuerySetDescriptor* descriptor)
-        : QuerySetBase(device, descriptor) {
-    }
-
-    // Queue
-
-    Queue::Queue(Device* device, const QueueDescriptor* descriptor)
-        : QueueBase(device, descriptor) {
-    }
-
-    Queue::~Queue() {
-    }
-
-    MaybeError Queue::SubmitImpl(uint32_t, CommandBufferBase* const*) {
-        Device* device = ToBackend(GetDevice());
-
-        // The Vulkan, D3D12 and Metal implementation all tick the device here,
-        // for testing purposes we should also tick in the null implementation.
-        DAWN_TRY(device->Tick());
-
-        return device->SubmitPendingOperations();
-    }
-
-    MaybeError Queue::WriteBufferImpl(BufferBase* buffer,
-                                      uint64_t bufferOffset,
-                                      const void* data,
-                                      size_t size) {
-        ToBackend(buffer)->DoWriteBuffer(bufferOffset, data, size);
-        return {};
-    }
-
-    // ComputePipeline
-    MaybeError ComputePipeline::Initialize() {
-        return {};
-    }
-
-    // RenderPipeline
-    MaybeError RenderPipeline::Initialize() {
-        return {};
-    }
-
-    // SwapChain
-
-    // static
-    ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
-                                                    Surface* surface,
-                                                    NewSwapChainBase* previousSwapChain,
-                                                    const SwapChainDescriptor* descriptor) {
-        Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
-        DAWN_TRY(swapchain->Initialize(previousSwapChain));
-        return swapchain;
-    }
-
-    MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
-        if (previousSwapChain != nullptr) {
-            // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by
-            // multiple backends one after the other. It probably needs to block until the backend
-            // and GPU are completely finished with the previous swapchain.
-            if (previousSwapChain->GetBackendType() != wgpu::BackendType::Null) {
-                return DAWN_VALIDATION_ERROR("null::SwapChain cannot switch between APIs");
-            }
-        }
-
-        return {};
-    }
-
-    SwapChain::~SwapChain() = default;
-
-    MaybeError SwapChain::PresentImpl() {
+void SwapChain::DetachFromSurfaceImpl() {
+    if (mTexture != nullptr) {
         mTexture->APIDestroy();
         mTexture = nullptr;
-        return {};
     }
+}
 
-    ResultOrError<Ref<TextureViewBase>> SwapChain::GetCurrentTextureViewImpl() {
-        TextureDescriptor textureDesc = GetSwapChainBaseTextureDescriptor(this);
-        mTexture = AcquireRef(
-            new Texture(GetDevice(), &textureDesc, TextureBase::TextureState::OwnedInternal));
-        return mTexture->CreateView();
+// ShaderModule
+
+MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
+    return InitializeBase(parseResult);
+}
+
+// OldSwapChain
+
+OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
+    : OldSwapChainBase(device, descriptor) {
+    const auto& im = GetImplementation();
+    im.Init(im.userData, nullptr);
+}
+
+OldSwapChain::~OldSwapChain() {}
+
+TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
+    return GetDevice()->APICreateTexture(descriptor);
+}
+
+MaybeError OldSwapChain::OnBeforePresent(TextureViewBase*) {
+    return {};
+}
+
+// NativeSwapChainImpl
+
+void NativeSwapChainImpl::Init(WSIContext* context) {}
+
+DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
+                                                  WGPUTextureUsage,
+                                                  uint32_t width,
+                                                  uint32_t height) {
+    return DAWN_SWAP_CHAIN_NO_ERROR;
+}
+
+DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
+    return DAWN_SWAP_CHAIN_NO_ERROR;
+}
+
+DawnSwapChainError NativeSwapChainImpl::Present() {
+    return DAWN_SWAP_CHAIN_NO_ERROR;
+}
+
+wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
+    return wgpu::TextureFormat::RGBA8Unorm;
+}
+
+// StagingBuffer
+
+StagingBuffer::StagingBuffer(size_t size, Device* device)
+    : StagingBufferBase(size), mDevice(device) {}
+
+StagingBuffer::~StagingBuffer() {
+    if (mBuffer) {
+        mDevice->DecrementMemoryUsage(GetSize());
     }
+}
 
-    void SwapChain::DetachFromSurfaceImpl() {
-        if (mTexture != nullptr) {
-            mTexture->APIDestroy();
-            mTexture = nullptr;
-        }
-    }
+MaybeError StagingBuffer::Initialize() {
+    DAWN_TRY(mDevice->IncrementMemoryUsage(GetSize()));
+    mBuffer = std::make_unique<uint8_t[]>(GetSize());
+    mMappedPointer = mBuffer.get();
+    return {};
+}
 
-    // ShaderModule
+uint32_t Device::GetOptimalBytesPerRowAlignment() const {
+    return 1;
+}
 
-    MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
-        return InitializeBase(parseResult);
-    }
+uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
+    return 1;
+}
 
-    // OldSwapChain
-
-    OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
-        : OldSwapChainBase(device, descriptor) {
-        const auto& im = GetImplementation();
-        im.Init(im.userData, nullptr);
-    }
-
-    OldSwapChain::~OldSwapChain() {
-    }
-
-    TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
-        return GetDevice()->APICreateTexture(descriptor);
-    }
-
-    MaybeError OldSwapChain::OnBeforePresent(TextureViewBase*) {
-        return {};
-    }
-
-    // NativeSwapChainImpl
-
-    void NativeSwapChainImpl::Init(WSIContext* context) {
-    }
-
-    DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
-                                                      WGPUTextureUsage,
-                                                      uint32_t width,
-                                                      uint32_t height) {
-        return DAWN_SWAP_CHAIN_NO_ERROR;
-    }
-
-    DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
-        return DAWN_SWAP_CHAIN_NO_ERROR;
-    }
-
-    DawnSwapChainError NativeSwapChainImpl::Present() {
-        return DAWN_SWAP_CHAIN_NO_ERROR;
-    }
-
-    wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
-        return wgpu::TextureFormat::RGBA8Unorm;
-    }
-
-    // StagingBuffer
-
-    StagingBuffer::StagingBuffer(size_t size, Device* device)
-        : StagingBufferBase(size), mDevice(device) {
-    }
-
-    StagingBuffer::~StagingBuffer() {
-        if (mBuffer) {
-            mDevice->DecrementMemoryUsage(GetSize());
-        }
-    }
-
-    MaybeError StagingBuffer::Initialize() {
-        DAWN_TRY(mDevice->IncrementMemoryUsage(GetSize()));
-        mBuffer = std::make_unique<uint8_t[]>(GetSize());
-        mMappedPointer = mBuffer.get();
-        return {};
-    }
-
-    uint32_t Device::GetOptimalBytesPerRowAlignment() const {
-        return 1;
-    }
-
-    uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
-        return 1;
-    }
-
-    float Device::GetTimestampPeriodInNS() const {
-        return 1.0f;
-    }
+float Device::GetTimestampPeriodInNS() const {
+    return 1.0f;
+}
 
 }  // namespace dawn::native::null
diff --git a/src/dawn/native/null/DeviceNull.h b/src/dawn/native/null/DeviceNull.h
index 1a7c1d7..36003b8 100644
--- a/src/dawn/native/null/DeviceNull.h
+++ b/src/dawn/native/null/DeviceNull.h
@@ -41,302 +41,297 @@
 
 namespace dawn::native::null {
 
-    class Adapter;
-    class BindGroup;
-    class BindGroupLayout;
-    class Buffer;
-    class CommandBuffer;
-    class ComputePipeline;
-    class Device;
-    using PipelineLayout = PipelineLayoutBase;
-    class QuerySet;
-    class Queue;
-    class RenderPipeline;
-    using Sampler = SamplerBase;
-    class ShaderModule;
-    class SwapChain;
-    using Texture = TextureBase;
-    using TextureView = TextureViewBase;
+class Adapter;
+class BindGroup;
+class BindGroupLayout;
+class Buffer;
+class CommandBuffer;
+class ComputePipeline;
+class Device;
+using PipelineLayout = PipelineLayoutBase;
+class QuerySet;
+class Queue;
+class RenderPipeline;
+using Sampler = SamplerBase;
+class ShaderModule;
+class SwapChain;
+using Texture = TextureBase;
+using TextureView = TextureViewBase;
 
-    struct NullBackendTraits {
-        using AdapterType = Adapter;
-        using BindGroupType = BindGroup;
-        using BindGroupLayoutType = BindGroupLayout;
-        using BufferType = Buffer;
-        using CommandBufferType = CommandBuffer;
-        using ComputePipelineType = ComputePipeline;
-        using DeviceType = Device;
-        using PipelineLayoutType = PipelineLayout;
-        using QuerySetType = QuerySet;
-        using QueueType = Queue;
-        using RenderPipelineType = RenderPipeline;
-        using SamplerType = Sampler;
-        using ShaderModuleType = ShaderModule;
-        using SwapChainType = SwapChain;
-        using TextureType = Texture;
-        using TextureViewType = TextureView;
-    };
+struct NullBackendTraits {
+    using AdapterType = Adapter;
+    using BindGroupType = BindGroup;
+    using BindGroupLayoutType = BindGroupLayout;
+    using BufferType = Buffer;
+    using CommandBufferType = CommandBuffer;
+    using ComputePipelineType = ComputePipeline;
+    using DeviceType = Device;
+    using PipelineLayoutType = PipelineLayout;
+    using QuerySetType = QuerySet;
+    using QueueType = Queue;
+    using RenderPipelineType = RenderPipeline;
+    using SamplerType = Sampler;
+    using ShaderModuleType = ShaderModule;
+    using SwapChainType = SwapChain;
+    using TextureType = Texture;
+    using TextureViewType = TextureView;
+};
 
-    template <typename T>
-    auto ToBackend(T&& common) -> decltype(ToBackendBase<NullBackendTraits>(common)) {
-        return ToBackendBase<NullBackendTraits>(common);
-    }
+template <typename T>
+auto ToBackend(T&& common) -> decltype(ToBackendBase<NullBackendTraits>(common)) {
+    return ToBackendBase<NullBackendTraits>(common);
+}
 
-    struct PendingOperation {
-        virtual ~PendingOperation() = default;
-        virtual void Execute() = 0;
-    };
+struct PendingOperation {
+    virtual ~PendingOperation() = default;
+    virtual void Execute() = 0;
+};
 
-    class Device final : public DeviceBase {
-      public:
-        static ResultOrError<Ref<Device>> Create(Adapter* adapter,
-                                                 const DeviceDescriptor* descriptor);
-        ~Device() override;
+class Device final : public DeviceBase {
+  public:
+    static ResultOrError<Ref<Device>> Create(Adapter* adapter, const DeviceDescriptor* descriptor);
+    ~Device() override;
 
-        MaybeError Initialize(const DeviceDescriptor* descriptor);
+    MaybeError Initialize(const DeviceDescriptor* descriptor);
 
-        ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
-            CommandEncoder* encoder,
-            const CommandBufferDescriptor* descriptor) override;
+    ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
+        CommandEncoder* encoder,
+        const CommandBufferDescriptor* descriptor) override;
 
-        MaybeError TickImpl() override;
+    MaybeError TickImpl() override;
 
-        void AddPendingOperation(std::unique_ptr<PendingOperation> operation);
-        MaybeError SubmitPendingOperations();
+    void AddPendingOperation(std::unique_ptr<PendingOperation> operation);
+    MaybeError SubmitPendingOperations();
 
-        ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
-        MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
-                                           uint64_t sourceOffset,
-                                           BufferBase* destination,
-                                           uint64_t destinationOffset,
-                                           uint64_t size) override;
-        MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
-                                            const TextureDataLayout& src,
-                                            TextureCopy* dst,
-                                            const Extent3D& copySizePixels) override;
+    ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
+    MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
+                                       uint64_t sourceOffset,
+                                       BufferBase* destination,
+                                       uint64_t destinationOffset,
+                                       uint64_t size) override;
+    MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
+                                        const TextureDataLayout& src,
+                                        TextureCopy* dst,
+                                        const Extent3D& copySizePixels) override;
 
-        MaybeError IncrementMemoryUsage(uint64_t bytes);
-        void DecrementMemoryUsage(uint64_t bytes);
+    MaybeError IncrementMemoryUsage(uint64_t bytes);
+    void DecrementMemoryUsage(uint64_t bytes);
 
-        uint32_t GetOptimalBytesPerRowAlignment() const override;
-        uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
+    uint32_t GetOptimalBytesPerRowAlignment() const override;
+    uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
 
-        float GetTimestampPeriodInNS() const override;
+    float GetTimestampPeriodInNS() const override;
 
-      private:
-        using DeviceBase::DeviceBase;
+  private:
+    using DeviceBase::DeviceBase;
 
-        ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
-            const BindGroupDescriptor* descriptor) override;
-        ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
-            const BindGroupLayoutDescriptor* descriptor,
-            PipelineCompatibilityToken pipelineCompatibilityToken) override;
-        ResultOrError<Ref<BufferBase>> CreateBufferImpl(
-            const BufferDescriptor* descriptor) override;
-        Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
-            const ComputePipelineDescriptor* descriptor) override;
-        ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
-            const PipelineLayoutDescriptor* descriptor) override;
-        ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
-            const QuerySetDescriptor* descriptor) override;
-        Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
-            const RenderPipelineDescriptor* descriptor) override;
-        ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
-            const SamplerDescriptor* descriptor) override;
-        ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
-            const ShaderModuleDescriptor* descriptor,
-            ShaderModuleParseResult* parseResult) override;
-        ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
-            const SwapChainDescriptor* descriptor) override;
-        ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
-            Surface* surface,
-            NewSwapChainBase* previousSwapChain,
-            const SwapChainDescriptor* descriptor) override;
-        ResultOrError<Ref<TextureBase>> CreateTextureImpl(
-            const TextureDescriptor* descriptor) override;
-        ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
-            TextureBase* texture,
-            const TextureViewDescriptor* descriptor) override;
+    ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
+        const BindGroupDescriptor* descriptor) override;
+    ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
+        const BindGroupLayoutDescriptor* descriptor,
+        PipelineCompatibilityToken pipelineCompatibilityToken) override;
+    ResultOrError<Ref<BufferBase>> CreateBufferImpl(const BufferDescriptor* descriptor) override;
+    Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
+        const ComputePipelineDescriptor* descriptor) override;
+    ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
+        const PipelineLayoutDescriptor* descriptor) override;
+    ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
+        const QuerySetDescriptor* descriptor) override;
+    Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
+        const RenderPipelineDescriptor* descriptor) override;
+    ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(const SamplerDescriptor* descriptor) override;
+    ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
+        const ShaderModuleDescriptor* descriptor,
+        ShaderModuleParseResult* parseResult) override;
+    ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
+        const SwapChainDescriptor* descriptor) override;
+    ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
+        Surface* surface,
+        NewSwapChainBase* previousSwapChain,
+        const SwapChainDescriptor* descriptor) override;
+    ResultOrError<Ref<TextureBase>> CreateTextureImpl(const TextureDescriptor* descriptor) override;
+    ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
+        TextureBase* texture,
+        const TextureViewDescriptor* descriptor) override;
 
-        ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
+    ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
 
-        void DestroyImpl() override;
-        MaybeError WaitForIdleForDestruction() override;
+    void DestroyImpl() override;
+    MaybeError WaitForIdleForDestruction() override;
 
-        std::vector<std::unique_ptr<PendingOperation>> mPendingOperations;
+    std::vector<std::unique_ptr<PendingOperation>> mPendingOperations;
 
-        static constexpr uint64_t kMaxMemoryUsage = 512 * 1024 * 1024;
-        size_t mMemoryUsage = 0;
-    };
+    static constexpr uint64_t kMaxMemoryUsage = 512 * 1024 * 1024;
+    size_t mMemoryUsage = 0;
+};
 
-    class Adapter : public AdapterBase {
-      public:
-        explicit Adapter(InstanceBase* instance);
-        ~Adapter() override;
+class Adapter : public AdapterBase {
+  public:
+    explicit Adapter(InstanceBase* instance);
+    ~Adapter() override;
 
-        // AdapterBase Implementation
-        bool SupportsExternalImages() const override;
+    // AdapterBase Implementation
+    bool SupportsExternalImages() const override;
 
-        // Used for the tests that intend to use an adapter without all features enabled.
-        void SetSupportedFeatures(const std::vector<wgpu::FeatureName>& requiredFeatures);
+    // Used for the tests that intend to use an adapter without all features enabled.
+    void SetSupportedFeatures(const std::vector<wgpu::FeatureName>& requiredFeatures);
 
-      private:
-        MaybeError InitializeImpl() override;
-        MaybeError InitializeSupportedFeaturesImpl() override;
-        MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override;
+  private:
+    MaybeError InitializeImpl() override;
+    MaybeError InitializeSupportedFeaturesImpl() override;
+    MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override;
 
-        ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(
-            const DeviceDescriptor* descriptor) override;
-    };
+    ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(const DeviceDescriptor* descriptor) override;
+};
 
-    // Helper class so |BindGroup| can allocate memory for its binding data,
-    // before calling the BindGroupBase base class constructor.
-    class BindGroupDataHolder {
-      protected:
-        explicit BindGroupDataHolder(size_t size);
-        ~BindGroupDataHolder();
+// Helper class so |BindGroup| can allocate memory for its binding data,
+// before calling the BindGroupBase base class constructor.
+class BindGroupDataHolder {
+  protected:
+    explicit BindGroupDataHolder(size_t size);
+    ~BindGroupDataHolder();
 
-        void* mBindingDataAllocation;
-    };
+    void* mBindingDataAllocation;
+};
 
-    // We don't have the complexity of placement-allocation of bind group data in
-    // the Null backend. This class, keeps the binding data in a separate allocation for simplicity.
-    class BindGroup final : private BindGroupDataHolder, public BindGroupBase {
-      public:
-        BindGroup(DeviceBase* device, const BindGroupDescriptor* descriptor);
+// We don't have the complexity of placement-allocation of bind group data in
+// the Null backend. This class, keeps the binding data in a separate allocation for simplicity.
+class BindGroup final : private BindGroupDataHolder, public BindGroupBase {
+  public:
+    BindGroup(DeviceBase* device, const BindGroupDescriptor* descriptor);
 
-      private:
-        ~BindGroup() override = default;
-    };
+  private:
+    ~BindGroup() override = default;
+};
 
-    class BindGroupLayout final : public BindGroupLayoutBase {
-      public:
-        BindGroupLayout(DeviceBase* device,
-                        const BindGroupLayoutDescriptor* descriptor,
-                        PipelineCompatibilityToken pipelineCompatibilityToken);
+class BindGroupLayout final : public BindGroupLayoutBase {
+  public:
+    BindGroupLayout(DeviceBase* device,
+                    const BindGroupLayoutDescriptor* descriptor,
+                    PipelineCompatibilityToken pipelineCompatibilityToken);
 
-      private:
-        ~BindGroupLayout() override = default;
-    };
+  private:
+    ~BindGroupLayout() override = default;
+};
 
-    class Buffer final : public BufferBase {
-      public:
-        Buffer(Device* device, const BufferDescriptor* descriptor);
+class Buffer final : public BufferBase {
+  public:
+    Buffer(Device* device, const BufferDescriptor* descriptor);
 
-        void CopyFromStaging(StagingBufferBase* staging,
-                             uint64_t sourceOffset,
-                             uint64_t destinationOffset,
-                             uint64_t size);
+    void CopyFromStaging(StagingBufferBase* staging,
+                         uint64_t sourceOffset,
+                         uint64_t destinationOffset,
+                         uint64_t size);
 
-        void DoWriteBuffer(uint64_t bufferOffset, const void* data, size_t size);
+    void DoWriteBuffer(uint64_t bufferOffset, const void* data, size_t size);
 
-      private:
-        MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
-        void UnmapImpl() override;
-        void DestroyImpl() override;
-        bool IsCPUWritableAtCreation() const override;
-        MaybeError MapAtCreationImpl() override;
-        void* GetMappedPointerImpl() override;
+  private:
+    MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
+    void UnmapImpl() override;
+    void DestroyImpl() override;
+    bool IsCPUWritableAtCreation() const override;
+    MaybeError MapAtCreationImpl() override;
+    void* GetMappedPointerImpl() override;
 
-        std::unique_ptr<uint8_t[]> mBackingData;
-    };
+    std::unique_ptr<uint8_t[]> mBackingData;
+};
 
-    class CommandBuffer final : public CommandBufferBase {
-      public:
-        CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
-    };
+class CommandBuffer final : public CommandBufferBase {
+  public:
+    CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
+};
 
-    class QuerySet final : public QuerySetBase {
-      public:
-        QuerySet(Device* device, const QuerySetDescriptor* descriptor);
-    };
+class QuerySet final : public QuerySetBase {
+  public:
+    QuerySet(Device* device, const QuerySetDescriptor* descriptor);
+};
 
-    class Queue final : public QueueBase {
-      public:
-        Queue(Device* device, const QueueDescriptor* descriptor);
+class Queue final : public QueueBase {
+  public:
+    Queue(Device* device, const QueueDescriptor* descriptor);
 
-      private:
-        ~Queue() override;
-        MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
-        MaybeError WriteBufferImpl(BufferBase* buffer,
-                                   uint64_t bufferOffset,
-                                   const void* data,
-                                   size_t size) override;
-    };
+  private:
+    ~Queue() override;
+    MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+    MaybeError WriteBufferImpl(BufferBase* buffer,
+                               uint64_t bufferOffset,
+                               const void* data,
+                               size_t size) override;
+};
 
-    class ComputePipeline final : public ComputePipelineBase {
-      public:
-        using ComputePipelineBase::ComputePipelineBase;
+class ComputePipeline final : public ComputePipelineBase {
+  public:
+    using ComputePipelineBase::ComputePipelineBase;
 
-        MaybeError Initialize() override;
-    };
+    MaybeError Initialize() override;
+};
 
-    class RenderPipeline final : public RenderPipelineBase {
-      public:
-        using RenderPipelineBase::RenderPipelineBase;
+class RenderPipeline final : public RenderPipelineBase {
+  public:
+    using RenderPipelineBase::RenderPipelineBase;
 
-        MaybeError Initialize() override;
-    };
+    MaybeError Initialize() override;
+};
 
-    class ShaderModule final : public ShaderModuleBase {
-      public:
-        using ShaderModuleBase::ShaderModuleBase;
+class ShaderModule final : public ShaderModuleBase {
+  public:
+    using ShaderModuleBase::ShaderModuleBase;
 
-        MaybeError Initialize(ShaderModuleParseResult* parseResult);
-    };
+    MaybeError Initialize(ShaderModuleParseResult* parseResult);
+};
 
-    class SwapChain final : public NewSwapChainBase {
-      public:
-        static ResultOrError<Ref<SwapChain>> Create(Device* device,
-                                                    Surface* surface,
-                                                    NewSwapChainBase* previousSwapChain,
-                                                    const SwapChainDescriptor* descriptor);
-        ~SwapChain() override;
+class SwapChain final : public NewSwapChainBase {
+  public:
+    static ResultOrError<Ref<SwapChain>> Create(Device* device,
+                                                Surface* surface,
+                                                NewSwapChainBase* previousSwapChain,
+                                                const SwapChainDescriptor* descriptor);
+    ~SwapChain() override;
 
-      private:
-        using NewSwapChainBase::NewSwapChainBase;
-        MaybeError Initialize(NewSwapChainBase* previousSwapChain);
+  private:
+    using NewSwapChainBase::NewSwapChainBase;
+    MaybeError Initialize(NewSwapChainBase* previousSwapChain);
 
-        Ref<Texture> mTexture;
+    Ref<Texture> mTexture;
 
-        MaybeError PresentImpl() override;
-        ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewImpl() override;
-        void DetachFromSurfaceImpl() override;
-    };
+    MaybeError PresentImpl() override;
+    ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewImpl() override;
+    void DetachFromSurfaceImpl() override;
+};
 
-    class OldSwapChain final : public OldSwapChainBase {
-      public:
-        OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
+class OldSwapChain final : public OldSwapChainBase {
+  public:
+    OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
 
-      protected:
-        ~OldSwapChain() override;
-        TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
-        MaybeError OnBeforePresent(TextureViewBase*) override;
-    };
+  protected:
+    ~OldSwapChain() override;
+    TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
+    MaybeError OnBeforePresent(TextureViewBase*) override;
+};
 
-    class NativeSwapChainImpl {
-      public:
-        using WSIContext = struct {};
-        void Init(WSIContext* context);
-        DawnSwapChainError Configure(WGPUTextureFormat format,
-                                     WGPUTextureUsage,
-                                     uint32_t width,
-                                     uint32_t height);
-        DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
-        DawnSwapChainError Present();
-        wgpu::TextureFormat GetPreferredFormat() const;
-    };
+class NativeSwapChainImpl {
+  public:
+    using WSIContext = struct {};
+    void Init(WSIContext* context);
+    DawnSwapChainError Configure(WGPUTextureFormat format,
+                                 WGPUTextureUsage,
+                                 uint32_t width,
+                                 uint32_t height);
+    DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
+    DawnSwapChainError Present();
+    wgpu::TextureFormat GetPreferredFormat() const;
+};
 
-    class StagingBuffer : public StagingBufferBase {
-      public:
-        StagingBuffer(size_t size, Device* device);
-        ~StagingBuffer() override;
-        MaybeError Initialize() override;
+class StagingBuffer : public StagingBufferBase {
+  public:
+    StagingBuffer(size_t size, Device* device);
+    ~StagingBuffer() override;
+    MaybeError Initialize() override;
 
-      private:
-        Device* mDevice;
-        std::unique_ptr<uint8_t[]> mBuffer;
-    };
+  private:
+    Device* mDevice;
+    std::unique_ptr<uint8_t[]> mBuffer;
+};
 
 }  // namespace dawn::native::null
 
diff --git a/src/dawn/native/null/NullBackend.cpp b/src/dawn/native/null/NullBackend.cpp
index 43637cd..7e4ce53 100644
--- a/src/dawn/native/null/NullBackend.cpp
+++ b/src/dawn/native/null/NullBackend.cpp
@@ -22,11 +22,11 @@
 
 namespace dawn::native::null {
 
-    DawnSwapChainImplementation CreateNativeSwapChainImpl() {
-        DawnSwapChainImplementation impl;
-        impl = CreateSwapChainImplementation(new NativeSwapChainImpl());
-        impl.textureUsage = WGPUTextureUsage_Present;
-        return impl;
-    }
+DawnSwapChainImplementation CreateNativeSwapChainImpl() {
+    DawnSwapChainImplementation impl;
+    impl = CreateSwapChainImplementation(new NativeSwapChainImpl());
+    impl.textureUsage = WGPUTextureUsage_Present;
+    return impl;
+}
 
 }  // namespace dawn::native::null
diff --git a/src/dawn/native/opengl/BackendGL.cpp b/src/dawn/native/opengl/BackendGL.cpp
index b440bba..3164eda 100644
--- a/src/dawn/native/opengl/BackendGL.cpp
+++ b/src/dawn/native/opengl/BackendGL.cpp
@@ -26,283 +26,278 @@
 
 namespace dawn::native::opengl {
 
-    namespace {
+namespace {
 
-        struct Vendor {
-            const char* vendorName;
-            uint32_t vendorId;
-        };
+struct Vendor {
+    const char* vendorName;
+    uint32_t vendorId;
+};
 
-        const Vendor kVendors[] = {{"ATI", gpu_info::kVendorID_AMD},
-                                   {"ARM", gpu_info::kVendorID_ARM},
-                                   {"Imagination", gpu_info::kVendorID_ImgTec},
-                                   {"Intel", gpu_info::kVendorID_Intel},
-                                   {"NVIDIA", gpu_info::kVendorID_Nvidia},
-                                   {"Qualcomm", gpu_info::kVendorID_Qualcomm}};
+const Vendor kVendors[] = {{"ATI", gpu_info::kVendorID_AMD},
+                           {"ARM", gpu_info::kVendorID_ARM},
+                           {"Imagination", gpu_info::kVendorID_ImgTec},
+                           {"Intel", gpu_info::kVendorID_Intel},
+                           {"NVIDIA", gpu_info::kVendorID_Nvidia},
+                           {"Qualcomm", gpu_info::kVendorID_Qualcomm}};
 
-        uint32_t GetVendorIdFromVendors(const char* vendor) {
-            uint32_t vendorId = 0;
-            for (const auto& it : kVendors) {
-                // Matching vendor name with vendor string
-                if (strstr(vendor, it.vendorName) != nullptr) {
-                    vendorId = it.vendorId;
-                    break;
-                }
-            }
-            return vendorId;
+uint32_t GetVendorIdFromVendors(const char* vendor) {
+    uint32_t vendorId = 0;
+    for (const auto& it : kVendors) {
+        // Matching vendor name with vendor string
+        if (strstr(vendor, it.vendorName) != nullptr) {
+            vendorId = it.vendorId;
+            break;
         }
+    }
+    return vendorId;
+}
 
-        void KHRONOS_APIENTRY OnGLDebugMessage(GLenum source,
-                                               GLenum type,
-                                               GLuint id,
-                                               GLenum severity,
-                                               GLsizei length,
-                                               const GLchar* message,
-                                               const void* userParam) {
-            const char* sourceText;
-            switch (source) {
-                case GL_DEBUG_SOURCE_API:
-                    sourceText = "OpenGL";
-                    break;
-                case GL_DEBUG_SOURCE_WINDOW_SYSTEM:
-                    sourceText = "Window System";
-                    break;
-                case GL_DEBUG_SOURCE_SHADER_COMPILER:
-                    sourceText = "Shader Compiler";
-                    break;
-                case GL_DEBUG_SOURCE_THIRD_PARTY:
-                    sourceText = "Third Party";
-                    break;
-                case GL_DEBUG_SOURCE_APPLICATION:
-                    sourceText = "Application";
-                    break;
-                case GL_DEBUG_SOURCE_OTHER:
-                    sourceText = "Other";
-                    break;
-                default:
-                    sourceText = "UNKNOWN";
-                    break;
-            }
-
-            const char* severityText;
-            switch (severity) {
-                case GL_DEBUG_SEVERITY_HIGH:
-                    severityText = "High";
-                    break;
-                case GL_DEBUG_SEVERITY_MEDIUM:
-                    severityText = "Medium";
-                    break;
-                case GL_DEBUG_SEVERITY_LOW:
-                    severityText = "Low";
-                    break;
-                case GL_DEBUG_SEVERITY_NOTIFICATION:
-                    severityText = "Notification";
-                    break;
-                default:
-                    severityText = "UNKNOWN";
-                    break;
-            }
-
-            if (type == GL_DEBUG_TYPE_ERROR) {
-                dawn::WarningLog() << "OpenGL error:"
-                                   << "\n    Source: " << sourceText      //
-                                   << "\n    ID: " << id                  //
-                                   << "\n    Severity: " << severityText  //
-                                   << "\n    Message: " << message;
-
-                // Abort on an error when in Debug mode.
-                UNREACHABLE();
-            }
-        }
-
-    }  // anonymous namespace
-
-    // The OpenGL backend's Adapter.
-
-    class Adapter : public AdapterBase {
-      public:
-        Adapter(InstanceBase* instance, wgpu::BackendType backendType)
-            : AdapterBase(instance, backendType) {
-        }
-
-        MaybeError InitializeGLFunctions(void* (*getProc)(const char*)) {
-            // Use getProc to populate the dispatch table
-            return mFunctions.Initialize(getProc);
-        }
-
-        ~Adapter() override = default;
-
-        // AdapterBase Implementation
-        bool SupportsExternalImages() const override {
-            // Via dawn::native::opengl::WrapExternalEGLImage
-            return GetBackendType() == wgpu::BackendType::OpenGLES;
-        }
-
-      private:
-        MaybeError InitializeImpl() override {
-            if (mFunctions.GetVersion().IsES()) {
-                ASSERT(GetBackendType() == wgpu::BackendType::OpenGLES);
-            } else {
-                ASSERT(GetBackendType() == wgpu::BackendType::OpenGL);
-            }
-
-            // Use the debug output functionality to get notified about GL errors
-            // TODO(cwallez@chromium.org): add support for the KHR_debug and ARB_debug_output
-            // extensions
-            bool hasDebugOutput = mFunctions.IsAtLeastGL(4, 3) || mFunctions.IsAtLeastGLES(3, 2);
-
-            if (GetInstance()->IsBackendValidationEnabled() && hasDebugOutput) {
-                mFunctions.Enable(GL_DEBUG_OUTPUT);
-                mFunctions.Enable(GL_DEBUG_OUTPUT_SYNCHRONOUS);
-
-                // Any GL error; dangerous undefined behavior; any shader compiler and linker errors
-                mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_HIGH,
-                                               0, nullptr, GL_TRUE);
-
-                // Severe performance warnings; GLSL or other shader compiler and linker warnings;
-                // use of currently deprecated behavior
-                mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_MEDIUM,
-                                               0, nullptr, GL_TRUE);
-
-                // Performance warnings from redundant state changes; trivial undefined behavior
-                // This is disabled because we do an incredible amount of redundant state changes.
-                mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_LOW, 0,
-                                               nullptr, GL_FALSE);
-
-                // Any message which is not an error or performance concern
-                mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE,
-                                               GL_DEBUG_SEVERITY_NOTIFICATION, 0, nullptr,
-                                               GL_FALSE);
-                mFunctions.DebugMessageCallback(&OnGLDebugMessage, nullptr);
-            }
-
-            // Set state that never changes between devices.
-            mFunctions.Enable(GL_DEPTH_TEST);
-            mFunctions.Enable(GL_SCISSOR_TEST);
-            mFunctions.Enable(GL_PRIMITIVE_RESTART_FIXED_INDEX);
-            if (mFunctions.GetVersion().IsDesktop()) {
-                // These are not necessary on GLES. The functionality is enabled by default, and
-                // works by specifying sample counts and SRGB textures, respectively.
-                mFunctions.Enable(GL_MULTISAMPLE);
-                mFunctions.Enable(GL_FRAMEBUFFER_SRGB);
-            }
-            mFunctions.Enable(GL_SAMPLE_MASK);
-
-            mName = reinterpret_cast<const char*>(mFunctions.GetString(GL_RENDERER));
-
-            // Workaroud to find vendor id from vendor name
-            const char* vendor = reinterpret_cast<const char*>(mFunctions.GetString(GL_VENDOR));
-            mVendorId = GetVendorIdFromVendors(vendor);
-
-            mDriverDescription = std::string("OpenGL version ") +
-                                 reinterpret_cast<const char*>(mFunctions.GetString(GL_VERSION));
-
-            if (mName.find("SwiftShader") != std::string::npos) {
-                mAdapterType = wgpu::AdapterType::CPU;
-            }
-
-            return {};
-        }
-
-        MaybeError InitializeSupportedFeaturesImpl() override {
-            // TextureCompressionBC
-            {
-                // BC1, BC2 and BC3 are not supported in OpenGL or OpenGL ES core features.
-                bool supportsS3TC =
-                    mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_s3tc") ||
-                    (mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_dxt1") &&
-                     mFunctions.IsGLExtensionSupported("GL_ANGLE_texture_compression_dxt3") &&
-                     mFunctions.IsGLExtensionSupported("GL_ANGLE_texture_compression_dxt5"));
-
-                // COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT and
-                // COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT requires both GL_EXT_texture_sRGB and
-                // GL_EXT_texture_compression_s3tc on desktop OpenGL drivers.
-                // (https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_texture_sRGB.txt)
-                bool supportsTextureSRGB = mFunctions.IsGLExtensionSupported("GL_EXT_texture_sRGB");
-
-                // GL_EXT_texture_compression_s3tc_srgb is an extension in OpenGL ES.
-                // NVidia GLES drivers don't support this extension, but they do support
-                // GL_NV_sRGB_formats. (Note that GL_EXT_texture_sRGB does not exist on ES.
-                // GL_EXT_sRGB does (core in ES 3.0), but it does not automatically provide S3TC
-                // SRGB support even if S3TC is supported; see
-                // https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_sRGB.txt.)
-                bool supportsS3TCSRGB =
-                    mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_s3tc_srgb") ||
-                    mFunctions.IsGLExtensionSupported("GL_NV_sRGB_formats");
-
-                // BC4 and BC5
-                bool supportsRGTC =
-                    mFunctions.IsAtLeastGL(3, 0) ||
-                    mFunctions.IsGLExtensionSupported("GL_ARB_texture_compression_rgtc") ||
-                    mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_rgtc");
-
-                // BC6 and BC7
-                bool supportsBPTC =
-                    mFunctions.IsAtLeastGL(4, 2) ||
-                    mFunctions.IsGLExtensionSupported("GL_ARB_texture_compression_bptc") ||
-                    mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_bptc");
-
-                if (supportsS3TC && (supportsTextureSRGB || supportsS3TCSRGB) && supportsRGTC &&
-                    supportsBPTC) {
-                    mSupportedFeatures.EnableFeature(dawn::native::Feature::TextureCompressionBC);
-                }
-                mSupportedFeatures.EnableFeature(Feature::Depth24UnormStencil8);
-            }
-
-            return {};
-        }
-
-        MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override {
-            GetDefaultLimits(&limits->v1);
-            return {};
-        }
-
-        ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(
-            const DeviceDescriptor* descriptor) override {
-            // There is no limit on the number of devices created from this adapter because they can
-            // all share the same backing OpenGL context.
-            return Device::Create(this, descriptor, mFunctions);
-        }
-
-        OpenGLFunctions mFunctions;
-    };
-
-    // Implementation of the OpenGL backend's BackendConnection
-
-    Backend::Backend(InstanceBase* instance, wgpu::BackendType backendType)
-        : BackendConnection(instance, backendType) {
+void KHRONOS_APIENTRY OnGLDebugMessage(GLenum source,
+                                       GLenum type,
+                                       GLuint id,
+                                       GLenum severity,
+                                       GLsizei length,
+                                       const GLchar* message,
+                                       const void* userParam) {
+    const char* sourceText;
+    switch (source) {
+        case GL_DEBUG_SOURCE_API:
+            sourceText = "OpenGL";
+            break;
+        case GL_DEBUG_SOURCE_WINDOW_SYSTEM:
+            sourceText = "Window System";
+            break;
+        case GL_DEBUG_SOURCE_SHADER_COMPILER:
+            sourceText = "Shader Compiler";
+            break;
+        case GL_DEBUG_SOURCE_THIRD_PARTY:
+            sourceText = "Third Party";
+            break;
+        case GL_DEBUG_SOURCE_APPLICATION:
+            sourceText = "Application";
+            break;
+        case GL_DEBUG_SOURCE_OTHER:
+            sourceText = "Other";
+            break;
+        default:
+            sourceText = "UNKNOWN";
+            break;
     }
 
-    std::vector<Ref<AdapterBase>> Backend::DiscoverDefaultAdapters() {
-        // The OpenGL backend needs at least "getProcAddress" to discover an adapter.
+    const char* severityText;
+    switch (severity) {
+        case GL_DEBUG_SEVERITY_HIGH:
+            severityText = "High";
+            break;
+        case GL_DEBUG_SEVERITY_MEDIUM:
+            severityText = "Medium";
+            break;
+        case GL_DEBUG_SEVERITY_LOW:
+            severityText = "Low";
+            break;
+        case GL_DEBUG_SEVERITY_NOTIFICATION:
+            severityText = "Notification";
+            break;
+        default:
+            severityText = "UNKNOWN";
+            break;
+    }
+
+    if (type == GL_DEBUG_TYPE_ERROR) {
+        dawn::WarningLog() << "OpenGL error:"
+                           << "\n    Source: " << sourceText      //
+                           << "\n    ID: " << id                  //
+                           << "\n    Severity: " << severityText  //
+                           << "\n    Message: " << message;
+
+        // Abort on an error when in Debug mode.
+        UNREACHABLE();
+    }
+}
+
+}  // anonymous namespace
+
+// The OpenGL backend's Adapter.
+
+class Adapter : public AdapterBase {
+  public:
+    Adapter(InstanceBase* instance, wgpu::BackendType backendType)
+        : AdapterBase(instance, backendType) {}
+
+    MaybeError InitializeGLFunctions(void* (*getProc)(const char*)) {
+        // Use getProc to populate the dispatch table
+        return mFunctions.Initialize(getProc);
+    }
+
+    ~Adapter() override = default;
+
+    // AdapterBase Implementation
+    bool SupportsExternalImages() const override {
+        // Via dawn::native::opengl::WrapExternalEGLImage
+        return GetBackendType() == wgpu::BackendType::OpenGLES;
+    }
+
+  private:
+    MaybeError InitializeImpl() override {
+        if (mFunctions.GetVersion().IsES()) {
+            ASSERT(GetBackendType() == wgpu::BackendType::OpenGLES);
+        } else {
+            ASSERT(GetBackendType() == wgpu::BackendType::OpenGL);
+        }
+
+        // Use the debug output functionality to get notified about GL errors
+        // TODO(cwallez@chromium.org): add support for the KHR_debug and ARB_debug_output
+        // extensions
+        bool hasDebugOutput = mFunctions.IsAtLeastGL(4, 3) || mFunctions.IsAtLeastGLES(3, 2);
+
+        if (GetInstance()->IsBackendValidationEnabled() && hasDebugOutput) {
+            mFunctions.Enable(GL_DEBUG_OUTPUT);
+            mFunctions.Enable(GL_DEBUG_OUTPUT_SYNCHRONOUS);
+
+            // Any GL error; dangerous undefined behavior; any shader compiler and linker errors
+            mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_HIGH, 0,
+                                           nullptr, GL_TRUE);
+
+            // Severe performance warnings; GLSL or other shader compiler and linker warnings;
+            // use of currently deprecated behavior
+            mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_MEDIUM, 0,
+                                           nullptr, GL_TRUE);
+
+            // Performance warnings from redundant state changes; trivial undefined behavior
+            // This is disabled because we do an incredible amount of redundant state changes.
+            mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DEBUG_SEVERITY_LOW, 0,
+                                           nullptr, GL_FALSE);
+
+            // Any message which is not an error or performance concern
+            mFunctions.DebugMessageControl(GL_DONT_CARE, GL_DONT_CARE,
+                                           GL_DEBUG_SEVERITY_NOTIFICATION, 0, nullptr, GL_FALSE);
+            mFunctions.DebugMessageCallback(&OnGLDebugMessage, nullptr);
+        }
+
+        // Set state that never changes between devices.
+        mFunctions.Enable(GL_DEPTH_TEST);
+        mFunctions.Enable(GL_SCISSOR_TEST);
+        mFunctions.Enable(GL_PRIMITIVE_RESTART_FIXED_INDEX);
+        if (mFunctions.GetVersion().IsDesktop()) {
+            // These are not necessary on GLES. The functionality is enabled by default, and
+            // works by specifying sample counts and SRGB textures, respectively.
+            mFunctions.Enable(GL_MULTISAMPLE);
+            mFunctions.Enable(GL_FRAMEBUFFER_SRGB);
+        }
+        mFunctions.Enable(GL_SAMPLE_MASK);
+
+        mName = reinterpret_cast<const char*>(mFunctions.GetString(GL_RENDERER));
+
+        // Workaroud to find vendor id from vendor name
+        const char* vendor = reinterpret_cast<const char*>(mFunctions.GetString(GL_VENDOR));
+        mVendorId = GetVendorIdFromVendors(vendor);
+
+        mDriverDescription = std::string("OpenGL version ") +
+                             reinterpret_cast<const char*>(mFunctions.GetString(GL_VERSION));
+
+        if (mName.find("SwiftShader") != std::string::npos) {
+            mAdapterType = wgpu::AdapterType::CPU;
+        }
+
         return {};
     }
 
-    ResultOrError<std::vector<Ref<AdapterBase>>> Backend::DiscoverAdapters(
-        const AdapterDiscoveryOptionsBase* optionsBase) {
-        // TODO(cwallez@chromium.org): For now only create a single OpenGL adapter because don't
-        // know how to handle MakeCurrent.
-        DAWN_INVALID_IF(mCreatedAdapter, "The OpenGL backend can only create a single adapter.");
+    MaybeError InitializeSupportedFeaturesImpl() override {
+        // TextureCompressionBC
+        {
+            // BC1, BC2 and BC3 are not supported in OpenGL or OpenGL ES core features.
+            bool supportsS3TC =
+                mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_s3tc") ||
+                (mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_dxt1") &&
+                 mFunctions.IsGLExtensionSupported("GL_ANGLE_texture_compression_dxt3") &&
+                 mFunctions.IsGLExtensionSupported("GL_ANGLE_texture_compression_dxt5"));
 
-        ASSERT(static_cast<wgpu::BackendType>(optionsBase->backendType) == GetType());
-        const AdapterDiscoveryOptions* options =
-            static_cast<const AdapterDiscoveryOptions*>(optionsBase);
+            // COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT and
+            // COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT requires both GL_EXT_texture_sRGB and
+            // GL_EXT_texture_compression_s3tc on desktop OpenGL drivers.
+            // (https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_texture_sRGB.txt)
+            bool supportsTextureSRGB = mFunctions.IsGLExtensionSupported("GL_EXT_texture_sRGB");
 
-        DAWN_INVALID_IF(options->getProc == nullptr,
-                        "AdapterDiscoveryOptions::getProc must be set");
+            // GL_EXT_texture_compression_s3tc_srgb is an extension in OpenGL ES.
+            // NVidia GLES drivers don't support this extension, but they do support
+            // GL_NV_sRGB_formats. (Note that GL_EXT_texture_sRGB does not exist on ES.
+            // GL_EXT_sRGB does (core in ES 3.0), but it does not automatically provide S3TC
+            // SRGB support even if S3TC is supported; see
+            // https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_sRGB.txt.)
+            bool supportsS3TCSRGB =
+                mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_s3tc_srgb") ||
+                mFunctions.IsGLExtensionSupported("GL_NV_sRGB_formats");
 
-        Ref<Adapter> adapter = AcquireRef(
-            new Adapter(GetInstance(), static_cast<wgpu::BackendType>(optionsBase->backendType)));
-        DAWN_TRY(adapter->InitializeGLFunctions(options->getProc));
-        DAWN_TRY(adapter->Initialize());
+            // BC4 and BC5
+            bool supportsRGTC =
+                mFunctions.IsAtLeastGL(3, 0) ||
+                mFunctions.IsGLExtensionSupported("GL_ARB_texture_compression_rgtc") ||
+                mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_rgtc");
 
-        mCreatedAdapter = true;
-        std::vector<Ref<AdapterBase>> adapters{std::move(adapter)};
-        return std::move(adapters);
+            // BC6 and BC7
+            bool supportsBPTC =
+                mFunctions.IsAtLeastGL(4, 2) ||
+                mFunctions.IsGLExtensionSupported("GL_ARB_texture_compression_bptc") ||
+                mFunctions.IsGLExtensionSupported("GL_EXT_texture_compression_bptc");
+
+            if (supportsS3TC && (supportsTextureSRGB || supportsS3TCSRGB) && supportsRGTC &&
+                supportsBPTC) {
+                mSupportedFeatures.EnableFeature(dawn::native::Feature::TextureCompressionBC);
+            }
+            mSupportedFeatures.EnableFeature(Feature::Depth24UnormStencil8);
+        }
+
+        return {};
     }
 
-    BackendConnection* Connect(InstanceBase* instance, wgpu::BackendType backendType) {
-        return new Backend(instance, backendType);
+    MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override {
+        GetDefaultLimits(&limits->v1);
+        return {};
     }
 
+    ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(const DeviceDescriptor* descriptor) override {
+        // There is no limit on the number of devices created from this adapter because they can
+        // all share the same backing OpenGL context.
+        return Device::Create(this, descriptor, mFunctions);
+    }
+
+    OpenGLFunctions mFunctions;
+};
+
+// Implementation of the OpenGL backend's BackendConnection
+
+Backend::Backend(InstanceBase* instance, wgpu::BackendType backendType)
+    : BackendConnection(instance, backendType) {}
+
+std::vector<Ref<AdapterBase>> Backend::DiscoverDefaultAdapters() {
+    // The OpenGL backend needs at least "getProcAddress" to discover an adapter.
+    return {};
+}
+
+ResultOrError<std::vector<Ref<AdapterBase>>> Backend::DiscoverAdapters(
+    const AdapterDiscoveryOptionsBase* optionsBase) {
+    // TODO(cwallez@chromium.org): For now only create a single OpenGL adapter because don't
+    // know how to handle MakeCurrent.
+    DAWN_INVALID_IF(mCreatedAdapter, "The OpenGL backend can only create a single adapter.");
+
+    ASSERT(static_cast<wgpu::BackendType>(optionsBase->backendType) == GetType());
+    const AdapterDiscoveryOptions* options =
+        static_cast<const AdapterDiscoveryOptions*>(optionsBase);
+
+    DAWN_INVALID_IF(options->getProc == nullptr, "AdapterDiscoveryOptions::getProc must be set");
+
+    Ref<Adapter> adapter = AcquireRef(
+        new Adapter(GetInstance(), static_cast<wgpu::BackendType>(optionsBase->backendType)));
+    DAWN_TRY(adapter->InitializeGLFunctions(options->getProc));
+    DAWN_TRY(adapter->Initialize());
+
+    mCreatedAdapter = true;
+    std::vector<Ref<AdapterBase>> adapters{std::move(adapter)};
+    return std::move(adapters);
+}
+
+BackendConnection* Connect(InstanceBase* instance, wgpu::BackendType backendType) {
+    return new Backend(instance, backendType);
+}
+
 }  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/BackendGL.h b/src/dawn/native/opengl/BackendGL.h
index def1201..5916728 100644
--- a/src/dawn/native/opengl/BackendGL.h
+++ b/src/dawn/native/opengl/BackendGL.h
@@ -21,17 +21,17 @@
 
 namespace dawn::native::opengl {
 
-    class Backend : public BackendConnection {
-      public:
-        Backend(InstanceBase* instance, wgpu::BackendType backendType);
+class Backend : public BackendConnection {
+  public:
+    Backend(InstanceBase* instance, wgpu::BackendType backendType);
 
-        std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override;
-        ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
-            const AdapterDiscoveryOptionsBase* options) override;
+    std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override;
+    ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
+        const AdapterDiscoveryOptionsBase* options) override;
 
-      private:
-        bool mCreatedAdapter = false;
-    };
+  private:
+    bool mCreatedAdapter = false;
+};
 
 }  // namespace dawn::native::opengl
 
diff --git a/src/dawn/native/opengl/BindGroupGL.cpp b/src/dawn/native/opengl/BindGroupGL.cpp
index 6573a92..a688c2e 100644
--- a/src/dawn/native/opengl/BindGroupGL.cpp
+++ b/src/dawn/native/opengl/BindGroupGL.cpp
@@ -20,46 +20,45 @@
 
 namespace dawn::native::opengl {
 
-    MaybeError ValidateGLBindGroupDescriptor(const BindGroupDescriptor* descriptor) {
-        const BindGroupLayoutBase::BindingMap& bindingMap = descriptor->layout->GetBindingMap();
-        for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
-            const BindGroupEntry& entry = descriptor->entries[i];
+MaybeError ValidateGLBindGroupDescriptor(const BindGroupDescriptor* descriptor) {
+    const BindGroupLayoutBase::BindingMap& bindingMap = descriptor->layout->GetBindingMap();
+    for (uint32_t i = 0; i < descriptor->entryCount; ++i) {
+        const BindGroupEntry& entry = descriptor->entries[i];
 
-            const auto& it = bindingMap.find(BindingNumber(entry.binding));
-            BindingIndex bindingIndex = it->second;
-            ASSERT(bindingIndex < descriptor->layout->GetBindingCount());
+        const auto& it = bindingMap.find(BindingNumber(entry.binding));
+        BindingIndex bindingIndex = it->second;
+        ASSERT(bindingIndex < descriptor->layout->GetBindingCount());
 
-            const BindingInfo& bindingInfo = descriptor->layout->GetBindingInfo(bindingIndex);
-            if (bindingInfo.bindingType == BindingInfoType::StorageTexture) {
-                ASSERT(entry.textureView != nullptr);
-                const uint32_t textureViewLayerCount = entry.textureView->GetLayerCount();
-                DAWN_INVALID_IF(
-                    textureViewLayerCount != 1 &&
-                        textureViewLayerCount != entry.textureView->GetTexture()->GetArrayLayers(),
-                    "%s binds %u layers. Currently the OpenGL backend only supports either binding "
-                    "1 layer or the all layers (%u) for storage texture.",
-                    entry.textureView, textureViewLayerCount,
-                    entry.textureView->GetTexture()->GetArrayLayers());
-            }
+        const BindingInfo& bindingInfo = descriptor->layout->GetBindingInfo(bindingIndex);
+        if (bindingInfo.bindingType == BindingInfoType::StorageTexture) {
+            ASSERT(entry.textureView != nullptr);
+            const uint32_t textureViewLayerCount = entry.textureView->GetLayerCount();
+            DAWN_INVALID_IF(
+                textureViewLayerCount != 1 &&
+                    textureViewLayerCount != entry.textureView->GetTexture()->GetArrayLayers(),
+                "%s binds %u layers. Currently the OpenGL backend only supports either binding "
+                "1 layer or the all layers (%u) for storage texture.",
+                entry.textureView, textureViewLayerCount,
+                entry.textureView->GetTexture()->GetArrayLayers());
         }
-
-        return {};
     }
 
-    BindGroup::BindGroup(Device* device, const BindGroupDescriptor* descriptor)
-        : BindGroupBase(this, device, descriptor) {
-    }
+    return {};
+}
 
-    BindGroup::~BindGroup() = default;
+BindGroup::BindGroup(Device* device, const BindGroupDescriptor* descriptor)
+    : BindGroupBase(this, device, descriptor) {}
 
-    void BindGroup::DestroyImpl() {
-        BindGroupBase::DestroyImpl();
-        ToBackend(GetLayout())->DeallocateBindGroup(this);
-    }
+BindGroup::~BindGroup() = default;
 
-    // static
-    Ref<BindGroup> BindGroup::Create(Device* device, const BindGroupDescriptor* descriptor) {
-        return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
-    }
+void BindGroup::DestroyImpl() {
+    BindGroupBase::DestroyImpl();
+    ToBackend(GetLayout())->DeallocateBindGroup(this);
+}
+
+// static
+Ref<BindGroup> BindGroup::Create(Device* device, const BindGroupDescriptor* descriptor) {
+    return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
+}
 
 }  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/BindGroupGL.h b/src/dawn/native/opengl/BindGroupGL.h
index fb6e2b5..a33b03b 100644
--- a/src/dawn/native/opengl/BindGroupGL.h
+++ b/src/dawn/native/opengl/BindGroupGL.h
@@ -20,21 +20,21 @@
 
 namespace dawn::native::opengl {
 
-    class Device;
+class Device;
 
-    MaybeError ValidateGLBindGroupDescriptor(const BindGroupDescriptor* descriptor);
+MaybeError ValidateGLBindGroupDescriptor(const BindGroupDescriptor* descriptor);
 
-    class BindGroup final : public BindGroupBase, public PlacementAllocated {
-      public:
-        static Ref<BindGroup> Create(Device* device, const BindGroupDescriptor* descriptor);
+class BindGroup final : public BindGroupBase, public PlacementAllocated {
+  public:
+    static Ref<BindGroup> Create(Device* device, const BindGroupDescriptor* descriptor);
 
-        BindGroup(Device* device, const BindGroupDescriptor* descriptor);
+    BindGroup(Device* device, const BindGroupDescriptor* descriptor);
 
-      private:
-        ~BindGroup() override;
+  private:
+    ~BindGroup() override;
 
-        void DestroyImpl() override;
-    };
+    void DestroyImpl() override;
+};
 
 }  // namespace dawn::native::opengl
 
diff --git a/src/dawn/native/opengl/BindGroupLayoutGL.cpp b/src/dawn/native/opengl/BindGroupLayoutGL.cpp
index ca23169..b665d17 100644
--- a/src/dawn/native/opengl/BindGroupLayoutGL.cpp
+++ b/src/dawn/native/opengl/BindGroupLayoutGL.cpp
@@ -16,20 +16,19 @@
 
 namespace dawn::native::opengl {
 
-    BindGroupLayout::BindGroupLayout(DeviceBase* device,
-                                     const BindGroupLayoutDescriptor* descriptor,
-                                     PipelineCompatibilityToken pipelineCompatibilityToken)
-        : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
-          mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
-    }
+BindGroupLayout::BindGroupLayout(DeviceBase* device,
+                                 const BindGroupLayoutDescriptor* descriptor,
+                                 PipelineCompatibilityToken pipelineCompatibilityToken)
+    : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
+      mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {}
 
-    Ref<BindGroup> BindGroupLayout::AllocateBindGroup(Device* device,
-                                                      const BindGroupDescriptor* descriptor) {
-        return AcquireRef(mBindGroupAllocator.Allocate(device, descriptor));
-    }
+Ref<BindGroup> BindGroupLayout::AllocateBindGroup(Device* device,
+                                                  const BindGroupDescriptor* descriptor) {
+    return AcquireRef(mBindGroupAllocator.Allocate(device, descriptor));
+}
 
-    void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup) {
-        mBindGroupAllocator.Deallocate(bindGroup);
-    }
+void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup) {
+    mBindGroupAllocator.Deallocate(bindGroup);
+}
 
 }  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/BindGroupLayoutGL.h b/src/dawn/native/opengl/BindGroupLayoutGL.h
index c774c7d..136b16f 100644
--- a/src/dawn/native/opengl/BindGroupLayoutGL.h
+++ b/src/dawn/native/opengl/BindGroupLayoutGL.h
@@ -21,21 +21,21 @@
 
 namespace dawn::native::opengl {
 
-    class Device;
+class Device;
 
-    class BindGroupLayout final : public BindGroupLayoutBase {
-      public:
-        BindGroupLayout(DeviceBase* device,
-                        const BindGroupLayoutDescriptor* descriptor,
-                        PipelineCompatibilityToken pipelineCompatibilityToken);
+class BindGroupLayout final : public BindGroupLayoutBase {
+  public:
+    BindGroupLayout(DeviceBase* device,
+                    const BindGroupLayoutDescriptor* descriptor,
+                    PipelineCompatibilityToken pipelineCompatibilityToken);
 
-        Ref<BindGroup> AllocateBindGroup(Device* device, const BindGroupDescriptor* descriptor);
-        void DeallocateBindGroup(BindGroup* bindGroup);
+    Ref<BindGroup> AllocateBindGroup(Device* device, const BindGroupDescriptor* descriptor);
+    void DeallocateBindGroup(BindGroup* bindGroup);
 
-      private:
-        ~BindGroupLayout() override = default;
-        SlabAllocator<BindGroup> mBindGroupAllocator;
-    };
+  private:
+    ~BindGroupLayout() override = default;
+    SlabAllocator<BindGroup> mBindGroupAllocator;
+};
 
 }  // namespace dawn::native::opengl
 
diff --git a/src/dawn/native/opengl/BufferGL.cpp b/src/dawn/native/opengl/BufferGL.cpp
index 2c97e07..c05730bb 100644
--- a/src/dawn/native/opengl/BufferGL.cpp
+++ b/src/dawn/native/opengl/BufferGL.cpp
@@ -23,166 +23,165 @@
 
 namespace dawn::native::opengl {
 
-    // Buffer
+// Buffer
 
-    // static
-    ResultOrError<Ref<Buffer>> Buffer::CreateInternalBuffer(Device* device,
-                                                            const BufferDescriptor* descriptor,
-                                                            bool shouldLazyClear) {
-        Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor, shouldLazyClear));
-        if (descriptor->mappedAtCreation) {
-            DAWN_TRY(buffer->MapAtCreationInternal());
-        }
-
-        return std::move(buffer);
+// static
+ResultOrError<Ref<Buffer>> Buffer::CreateInternalBuffer(Device* device,
+                                                        const BufferDescriptor* descriptor,
+                                                        bool shouldLazyClear) {
+    Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor, shouldLazyClear));
+    if (descriptor->mappedAtCreation) {
+        DAWN_TRY(buffer->MapAtCreationInternal());
     }
 
-    Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
-        : BufferBase(device, descriptor) {
-        // Allocate at least 4 bytes so clamped accesses are always in bounds.
-        mAllocatedSize = std::max(GetSize(), uint64_t(4u));
+    return std::move(buffer);
+}
 
-        device->gl.GenBuffers(1, &mBuffer);
-        device->gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
+Buffer::Buffer(Device* device, const BufferDescriptor* descriptor)
+    : BufferBase(device, descriptor) {
+    // Allocate at least 4 bytes so clamped accesses are always in bounds.
+    mAllocatedSize = std::max(GetSize(), uint64_t(4u));
 
-        // The buffers with mappedAtCreation == true will be initialized in
-        // BufferBase::MapAtCreation().
-        if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
-            !descriptor->mappedAtCreation) {
-            std::vector<uint8_t> clearValues(mAllocatedSize, 1u);
-            device->gl.BufferData(GL_ARRAY_BUFFER, mAllocatedSize, clearValues.data(),
-                                  GL_STATIC_DRAW);
-        } else {
-            // Buffers start zeroed if you pass nullptr to glBufferData.
-            device->gl.BufferData(GL_ARRAY_BUFFER, mAllocatedSize, nullptr, GL_STATIC_DRAW);
-        }
+    device->gl.GenBuffers(1, &mBuffer);
+    device->gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
+
+    // The buffers with mappedAtCreation == true will be initialized in
+    // BufferBase::MapAtCreation().
+    if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
+        !descriptor->mappedAtCreation) {
+        std::vector<uint8_t> clearValues(mAllocatedSize, 1u);
+        device->gl.BufferData(GL_ARRAY_BUFFER, mAllocatedSize, clearValues.data(), GL_STATIC_DRAW);
+    } else {
+        // Buffers start zeroed if you pass nullptr to glBufferData.
+        device->gl.BufferData(GL_ARRAY_BUFFER, mAllocatedSize, nullptr, GL_STATIC_DRAW);
     }
+}
 
-    Buffer::Buffer(Device* device, const BufferDescriptor* descriptor, bool shouldLazyClear)
-        : Buffer(device, descriptor) {
-        if (!shouldLazyClear) {
-            SetIsDataInitialized();
-        }
-    }
-
-    Buffer::~Buffer() = default;
-
-    GLuint Buffer::GetHandle() const {
-        return mBuffer;
-    }
-
-    bool Buffer::EnsureDataInitialized() {
-        if (!NeedsInitialization()) {
-            return false;
-        }
-
-        InitializeToZero();
-        return true;
-    }
-
-    bool Buffer::EnsureDataInitializedAsDestination(uint64_t offset, uint64_t size) {
-        if (!NeedsInitialization()) {
-            return false;
-        }
-
-        if (IsFullBufferRange(offset, size)) {
-            SetIsDataInitialized();
-            return false;
-        }
-
-        InitializeToZero();
-        return true;
-    }
-
-    bool Buffer::EnsureDataInitializedAsDestination(const CopyTextureToBufferCmd* copy) {
-        if (!NeedsInitialization()) {
-            return false;
-        }
-
-        if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
-            SetIsDataInitialized();
-            return false;
-        }
-
-        InitializeToZero();
-        return true;
-    }
-
-    void Buffer::InitializeToZero() {
-        ASSERT(NeedsInitialization());
-
-        const uint64_t size = GetAllocatedSize();
-        Device* device = ToBackend(GetDevice());
-
-        const std::vector<uint8_t> clearValues(size, 0u);
-        device->gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
-        device->gl.BufferSubData(GL_ARRAY_BUFFER, 0, size, clearValues.data());
-        device->IncrementLazyClearCountForTesting();
-
+Buffer::Buffer(Device* device, const BufferDescriptor* descriptor, bool shouldLazyClear)
+    : Buffer(device, descriptor) {
+    if (!shouldLazyClear) {
         SetIsDataInitialized();
     }
+}
 
-    bool Buffer::IsCPUWritableAtCreation() const {
-        // TODO(enga): All buffers in GL can be mapped. Investigate if mapping them will cause the
-        // driver to migrate it to shared memory.
-        return true;
+Buffer::~Buffer() = default;
+
+GLuint Buffer::GetHandle() const {
+    return mBuffer;
+}
+
+bool Buffer::EnsureDataInitialized() {
+    if (!NeedsInitialization()) {
+        return false;
     }
 
-    MaybeError Buffer::MapAtCreationImpl() {
-        const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
-        gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
-        mMappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, 0, GetSize(), GL_MAP_WRITE_BIT);
-        return {};
+    InitializeToZero();
+    return true;
+}
+
+bool Buffer::EnsureDataInitializedAsDestination(uint64_t offset, uint64_t size) {
+    if (!NeedsInitialization()) {
+        return false;
     }
 
-    MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
-        const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+    if (IsFullBufferRange(offset, size)) {
+        SetIsDataInitialized();
+        return false;
+    }
 
-        // It is an error to map an empty range in OpenGL. We always have at least a 4-byte buffer
-        // so we extend the range to be 4 bytes.
-        if (size == 0) {
-            if (offset != 0) {
-                offset -= 4;
-            }
-            size = 4;
+    InitializeToZero();
+    return true;
+}
+
+bool Buffer::EnsureDataInitializedAsDestination(const CopyTextureToBufferCmd* copy) {
+    if (!NeedsInitialization()) {
+        return false;
+    }
+
+    if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
+        SetIsDataInitialized();
+        return false;
+    }
+
+    InitializeToZero();
+    return true;
+}
+
+void Buffer::InitializeToZero() {
+    ASSERT(NeedsInitialization());
+
+    const uint64_t size = GetAllocatedSize();
+    Device* device = ToBackend(GetDevice());
+
+    const std::vector<uint8_t> clearValues(size, 0u);
+    device->gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
+    device->gl.BufferSubData(GL_ARRAY_BUFFER, 0, size, clearValues.data());
+    device->IncrementLazyClearCountForTesting();
+
+    SetIsDataInitialized();
+}
+
+bool Buffer::IsCPUWritableAtCreation() const {
+    // TODO(enga): All buffers in GL can be mapped. Investigate if mapping them will cause the
+    // driver to migrate it to shared memory.
+    return true;
+}
+
+MaybeError Buffer::MapAtCreationImpl() {
+    const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+    gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
+    mMappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, 0, GetSize(), GL_MAP_WRITE_BIT);
+    return {};
+}
+
+MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
+    const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+
+    // It is an error to map an empty range in OpenGL. We always have at least a 4-byte buffer
+    // so we extend the range to be 4 bytes.
+    if (size == 0) {
+        if (offset != 0) {
+            offset -= 4;
         }
-
-        EnsureDataInitialized();
-
-        // This does GPU->CPU synchronization, we could require a high
-        // version of OpenGL that would let us map the buffer unsynchronized.
-        gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
-        void* mappedData = nullptr;
-        if (mode & wgpu::MapMode::Read) {
-            mappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, offset, size, GL_MAP_READ_BIT);
-        } else {
-            ASSERT(mode & wgpu::MapMode::Write);
-            mappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, offset, size, GL_MAP_WRITE_BIT);
-        }
-
-        // The frontend asks that the pointer returned by GetMappedPointerImpl is from the start of
-        // the resource but OpenGL gives us the pointer at offset. Remove the offset.
-        mMappedData = static_cast<uint8_t*>(mappedData) - offset;
-        return {};
+        size = 4;
     }
 
-    void* Buffer::GetMappedPointerImpl() {
-        // The mapping offset has already been removed.
-        return mMappedData;
+    EnsureDataInitialized();
+
+    // This does GPU->CPU synchronization, we could require a high
+    // version of OpenGL that would let us map the buffer unsynchronized.
+    gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
+    void* mappedData = nullptr;
+    if (mode & wgpu::MapMode::Read) {
+        mappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, offset, size, GL_MAP_READ_BIT);
+    } else {
+        ASSERT(mode & wgpu::MapMode::Write);
+        mappedData = gl.MapBufferRange(GL_ARRAY_BUFFER, offset, size, GL_MAP_WRITE_BIT);
     }
 
-    void Buffer::UnmapImpl() {
-        const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+    // The frontend asks that the pointer returned by GetMappedPointerImpl is from the start of
+    // the resource but OpenGL gives us the pointer at offset. Remove the offset.
+    mMappedData = static_cast<uint8_t*>(mappedData) - offset;
+    return {};
+}
 
-        gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
-        gl.UnmapBuffer(GL_ARRAY_BUFFER);
-        mMappedData = nullptr;
-    }
+void* Buffer::GetMappedPointerImpl() {
+    // The mapping offset has already been removed.
+    return mMappedData;
+}
 
-    void Buffer::DestroyImpl() {
-        BufferBase::DestroyImpl();
-        ToBackend(GetDevice())->gl.DeleteBuffers(1, &mBuffer);
-        mBuffer = 0;
-    }
+void Buffer::UnmapImpl() {
+    const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+
+    gl.BindBuffer(GL_ARRAY_BUFFER, mBuffer);
+    gl.UnmapBuffer(GL_ARRAY_BUFFER);
+    mMappedData = nullptr;
+}
+
+void Buffer::DestroyImpl() {
+    BufferBase::DestroyImpl();
+    ToBackend(GetDevice())->gl.DeleteBuffers(1, &mBuffer);
+    mBuffer = 0;
+}
 
 }  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/BufferGL.h b/src/dawn/native/opengl/BufferGL.h
index 910e5db..a86c841 100644
--- a/src/dawn/native/opengl/BufferGL.h
+++ b/src/dawn/native/opengl/BufferGL.h
@@ -21,37 +21,37 @@
 
 namespace dawn::native::opengl {
 
-    class Device;
+class Device;
 
-    class Buffer final : public BufferBase {
-      public:
-        static ResultOrError<Ref<Buffer>> CreateInternalBuffer(Device* device,
-                                                               const BufferDescriptor* descriptor,
-                                                               bool shouldLazyClear);
+class Buffer final : public BufferBase {
+  public:
+    static ResultOrError<Ref<Buffer>> CreateInternalBuffer(Device* device,
+                                                           const BufferDescriptor* descriptor,
+                                                           bool shouldLazyClear);
 
-        Buffer(Device* device, const BufferDescriptor* descriptor);
+    Buffer(Device* device, const BufferDescriptor* descriptor);
 
-        GLuint GetHandle() const;
+    GLuint GetHandle() const;
 
-        bool EnsureDataInitialized();
-        bool EnsureDataInitializedAsDestination(uint64_t offset, uint64_t size);
-        bool EnsureDataInitializedAsDestination(const CopyTextureToBufferCmd* copy);
+    bool EnsureDataInitialized();
+    bool EnsureDataInitializedAsDestination(uint64_t offset, uint64_t size);
+    bool EnsureDataInitializedAsDestination(const CopyTextureToBufferCmd* copy);
 
-      private:
-        Buffer(Device* device, const BufferDescriptor* descriptor, bool shouldLazyClear);
-        ~Buffer() override;
-        MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
-        void UnmapImpl() override;
-        void DestroyImpl() override;
-        bool IsCPUWritableAtCreation() const override;
-        MaybeError MapAtCreationImpl() override;
-        void* GetMappedPointerImpl() override;
+  private:
+    Buffer(Device* device, const BufferDescriptor* descriptor, bool shouldLazyClear);
+    ~Buffer() override;
+    MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
+    void UnmapImpl() override;
+    void DestroyImpl() override;
+    bool IsCPUWritableAtCreation() const override;
+    MaybeError MapAtCreationImpl() override;
+    void* GetMappedPointerImpl() override;
 
-        void InitializeToZero();
+    void InitializeToZero();
 
-        GLuint mBuffer = 0;
-        void* mMappedData = nullptr;
-    };
+    GLuint mBuffer = 0;
+    void* mMappedData = nullptr;
+};
 
 }  // namespace dawn::native::opengl
 
diff --git a/src/dawn/native/opengl/CommandBufferGL.cpp b/src/dawn/native/opengl/CommandBufferGL.cpp
index eeeeff1..9931163 100644
--- a/src/dawn/native/opengl/CommandBufferGL.cpp
+++ b/src/dawn/native/opengl/CommandBufferGL.cpp
@@ -38,1339 +38,1316 @@
 
 namespace dawn::native::opengl {
 
-    namespace {
+namespace {
 
-        GLenum IndexFormatType(wgpu::IndexFormat format) {
-            switch (format) {
-                case wgpu::IndexFormat::Uint16:
-                    return GL_UNSIGNED_SHORT;
-                case wgpu::IndexFormat::Uint32:
-                    return GL_UNSIGNED_INT;
-                case wgpu::IndexFormat::Undefined:
-                    break;
-            }
+GLenum IndexFormatType(wgpu::IndexFormat format) {
+    switch (format) {
+        case wgpu::IndexFormat::Uint16:
+            return GL_UNSIGNED_SHORT;
+        case wgpu::IndexFormat::Uint32:
+            return GL_UNSIGNED_INT;
+        case wgpu::IndexFormat::Undefined:
+            break;
+    }
+    UNREACHABLE();
+}
+
+GLenum VertexFormatType(wgpu::VertexFormat format) {
+    switch (format) {
+        case wgpu::VertexFormat::Uint8x2:
+        case wgpu::VertexFormat::Uint8x4:
+        case wgpu::VertexFormat::Unorm8x2:
+        case wgpu::VertexFormat::Unorm8x4:
+            return GL_UNSIGNED_BYTE;
+        case wgpu::VertexFormat::Sint8x2:
+        case wgpu::VertexFormat::Sint8x4:
+        case wgpu::VertexFormat::Snorm8x2:
+        case wgpu::VertexFormat::Snorm8x4:
+            return GL_BYTE;
+        case wgpu::VertexFormat::Uint16x2:
+        case wgpu::VertexFormat::Uint16x4:
+        case wgpu::VertexFormat::Unorm16x2:
+        case wgpu::VertexFormat::Unorm16x4:
+            return GL_UNSIGNED_SHORT;
+        case wgpu::VertexFormat::Sint16x2:
+        case wgpu::VertexFormat::Sint16x4:
+        case wgpu::VertexFormat::Snorm16x2:
+        case wgpu::VertexFormat::Snorm16x4:
+            return GL_SHORT;
+        case wgpu::VertexFormat::Float16x2:
+        case wgpu::VertexFormat::Float16x4:
+            return GL_HALF_FLOAT;
+        case wgpu::VertexFormat::Float32:
+        case wgpu::VertexFormat::Float32x2:
+        case wgpu::VertexFormat::Float32x3:
+        case wgpu::VertexFormat::Float32x4:
+            return GL_FLOAT;
+        case wgpu::VertexFormat::Uint32:
+        case wgpu::VertexFormat::Uint32x2:
+        case wgpu::VertexFormat::Uint32x3:
+        case wgpu::VertexFormat::Uint32x4:
+            return GL_UNSIGNED_INT;
+        case wgpu::VertexFormat::Sint32:
+        case wgpu::VertexFormat::Sint32x2:
+        case wgpu::VertexFormat::Sint32x3:
+        case wgpu::VertexFormat::Sint32x4:
+            return GL_INT;
+        default:
             UNREACHABLE();
-        }
+    }
+}
 
-        GLenum VertexFormatType(wgpu::VertexFormat format) {
-            switch (format) {
-                case wgpu::VertexFormat::Uint8x2:
-                case wgpu::VertexFormat::Uint8x4:
-                case wgpu::VertexFormat::Unorm8x2:
-                case wgpu::VertexFormat::Unorm8x4:
-                    return GL_UNSIGNED_BYTE;
-                case wgpu::VertexFormat::Sint8x2:
-                case wgpu::VertexFormat::Sint8x4:
-                case wgpu::VertexFormat::Snorm8x2:
-                case wgpu::VertexFormat::Snorm8x4:
-                    return GL_BYTE;
-                case wgpu::VertexFormat::Uint16x2:
-                case wgpu::VertexFormat::Uint16x4:
-                case wgpu::VertexFormat::Unorm16x2:
-                case wgpu::VertexFormat::Unorm16x4:
-                    return GL_UNSIGNED_SHORT;
-                case wgpu::VertexFormat::Sint16x2:
-                case wgpu::VertexFormat::Sint16x4:
-                case wgpu::VertexFormat::Snorm16x2:
-                case wgpu::VertexFormat::Snorm16x4:
-                    return GL_SHORT;
-                case wgpu::VertexFormat::Float16x2:
-                case wgpu::VertexFormat::Float16x4:
-                    return GL_HALF_FLOAT;
-                case wgpu::VertexFormat::Float32:
-                case wgpu::VertexFormat::Float32x2:
-                case wgpu::VertexFormat::Float32x3:
-                case wgpu::VertexFormat::Float32x4:
-                    return GL_FLOAT;
-                case wgpu::VertexFormat::Uint32:
-                case wgpu::VertexFormat::Uint32x2:
-                case wgpu::VertexFormat::Uint32x3:
-                case wgpu::VertexFormat::Uint32x4:
-                    return GL_UNSIGNED_INT;
-                case wgpu::VertexFormat::Sint32:
-                case wgpu::VertexFormat::Sint32x2:
-                case wgpu::VertexFormat::Sint32x3:
-                case wgpu::VertexFormat::Sint32x4:
-                    return GL_INT;
-                default:
-                    UNREACHABLE();
-            }
-        }
+GLboolean VertexFormatIsNormalized(wgpu::VertexFormat format) {
+    switch (format) {
+        case wgpu::VertexFormat::Unorm8x2:
+        case wgpu::VertexFormat::Unorm8x4:
+        case wgpu::VertexFormat::Snorm8x2:
+        case wgpu::VertexFormat::Snorm8x4:
+        case wgpu::VertexFormat::Unorm16x2:
+        case wgpu::VertexFormat::Unorm16x4:
+        case wgpu::VertexFormat::Snorm16x2:
+        case wgpu::VertexFormat::Snorm16x4:
+            return GL_TRUE;
+        default:
+            return GL_FALSE;
+    }
+}
 
-        GLboolean VertexFormatIsNormalized(wgpu::VertexFormat format) {
-            switch (format) {
-                case wgpu::VertexFormat::Unorm8x2:
-                case wgpu::VertexFormat::Unorm8x4:
-                case wgpu::VertexFormat::Snorm8x2:
-                case wgpu::VertexFormat::Snorm8x4:
-                case wgpu::VertexFormat::Unorm16x2:
-                case wgpu::VertexFormat::Unorm16x4:
-                case wgpu::VertexFormat::Snorm16x2:
-                case wgpu::VertexFormat::Snorm16x4:
-                    return GL_TRUE;
-                default:
-                    return GL_FALSE;
-            }
-        }
+bool VertexFormatIsInt(wgpu::VertexFormat format) {
+    switch (format) {
+        case wgpu::VertexFormat::Uint8x2:
+        case wgpu::VertexFormat::Uint8x4:
+        case wgpu::VertexFormat::Sint8x2:
+        case wgpu::VertexFormat::Sint8x4:
+        case wgpu::VertexFormat::Uint16x2:
+        case wgpu::VertexFormat::Uint16x4:
+        case wgpu::VertexFormat::Sint16x2:
+        case wgpu::VertexFormat::Sint16x4:
+        case wgpu::VertexFormat::Uint32:
+        case wgpu::VertexFormat::Uint32x2:
+        case wgpu::VertexFormat::Uint32x3:
+        case wgpu::VertexFormat::Uint32x4:
+        case wgpu::VertexFormat::Sint32:
+        case wgpu::VertexFormat::Sint32x2:
+        case wgpu::VertexFormat::Sint32x3:
+        case wgpu::VertexFormat::Sint32x4:
+            return true;
+        default:
+            return false;
+    }
+}
 
-        bool VertexFormatIsInt(wgpu::VertexFormat format) {
-            switch (format) {
-                case wgpu::VertexFormat::Uint8x2:
-                case wgpu::VertexFormat::Uint8x4:
-                case wgpu::VertexFormat::Sint8x2:
-                case wgpu::VertexFormat::Sint8x4:
-                case wgpu::VertexFormat::Uint16x2:
-                case wgpu::VertexFormat::Uint16x4:
-                case wgpu::VertexFormat::Sint16x2:
-                case wgpu::VertexFormat::Sint16x4:
-                case wgpu::VertexFormat::Uint32:
-                case wgpu::VertexFormat::Uint32x2:
-                case wgpu::VertexFormat::Uint32x3:
-                case wgpu::VertexFormat::Uint32x4:
-                case wgpu::VertexFormat::Sint32:
-                case wgpu::VertexFormat::Sint32x2:
-                case wgpu::VertexFormat::Sint32x3:
-                case wgpu::VertexFormat::Sint32x4:
-                    return true;
-                default:
-                    return false;
-            }
-        }
-
-        // Vertex buffers and index buffers are implemented as part of an OpenGL VAO that
-        // corresponds to a VertexState. On the contrary in Dawn they are part of the global state.
-        // This means that we have to re-apply these buffers on a VertexState change.
-        class VertexStateBufferBindingTracker {
-          public:
-            void OnSetIndexBuffer(BufferBase* buffer) {
-                mIndexBufferDirty = true;
-                mIndexBuffer = ToBackend(buffer);
-            }
-
-            void OnSetVertexBuffer(VertexBufferSlot slot, BufferBase* buffer, uint64_t offset) {
-                mVertexBuffers[slot] = ToBackend(buffer);
-                mVertexBufferOffsets[slot] = offset;
-                mDirtyVertexBuffers.set(slot);
-            }
-
-            void OnSetPipeline(RenderPipelineBase* pipeline) {
-                if (mLastPipeline == pipeline) {
-                    return;
-                }
-
-                mIndexBufferDirty = true;
-                mDirtyVertexBuffers |= pipeline->GetVertexBufferSlotsUsed();
-
-                mLastPipeline = pipeline;
-            }
-
-            void Apply(const OpenGLFunctions& gl) {
-                if (mIndexBufferDirty && mIndexBuffer != nullptr) {
-                    gl.BindBuffer(GL_ELEMENT_ARRAY_BUFFER, mIndexBuffer->GetHandle());
-                    mIndexBufferDirty = false;
-                }
-
-                for (VertexBufferSlot slot : IterateBitSet(
-                         mDirtyVertexBuffers & mLastPipeline->GetVertexBufferSlotsUsed())) {
-                    for (VertexAttributeLocation location : IterateBitSet(
-                             ToBackend(mLastPipeline)->GetAttributesUsingVertexBuffer(slot))) {
-                        const VertexAttributeInfo& attribute =
-                            mLastPipeline->GetAttribute(location);
-
-                        GLuint attribIndex = static_cast<GLuint>(static_cast<uint8_t>(location));
-                        GLuint buffer = mVertexBuffers[slot]->GetHandle();
-                        uint64_t offset = mVertexBufferOffsets[slot];
-
-                        const VertexBufferInfo& vertexBuffer = mLastPipeline->GetVertexBuffer(slot);
-                        uint32_t components = GetVertexFormatInfo(attribute.format).componentCount;
-                        GLenum formatType = VertexFormatType(attribute.format);
-
-                        GLboolean normalized = VertexFormatIsNormalized(attribute.format);
-                        gl.BindBuffer(GL_ARRAY_BUFFER, buffer);
-                        if (VertexFormatIsInt(attribute.format)) {
-                            gl.VertexAttribIPointer(
-                                attribIndex, components, formatType, vertexBuffer.arrayStride,
-                                reinterpret_cast<void*>(
-                                    static_cast<intptr_t>(offset + attribute.offset)));
-                        } else {
-                            gl.VertexAttribPointer(attribIndex, components, formatType, normalized,
-                                                   vertexBuffer.arrayStride,
-                                                   reinterpret_cast<void*>(static_cast<intptr_t>(
-                                                       offset + attribute.offset)));
-                        }
-                    }
-                }
-
-                mDirtyVertexBuffers.reset();
-            }
-
-          private:
-            bool mIndexBufferDirty = false;
-            Buffer* mIndexBuffer = nullptr;
-
-            ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mDirtyVertexBuffers;
-            ityp::array<VertexBufferSlot, Buffer*, kMaxVertexBuffers> mVertexBuffers;
-            ityp::array<VertexBufferSlot, uint64_t, kMaxVertexBuffers> mVertexBufferOffsets;
-
-            RenderPipelineBase* mLastPipeline = nullptr;
-        };
-
-        class BindGroupTracker : public BindGroupTrackerBase<false, uint64_t> {
-          public:
-            void OnSetPipeline(RenderPipeline* pipeline) {
-                BindGroupTrackerBase::OnSetPipeline(pipeline);
-                mPipeline = pipeline;
-            }
-
-            void OnSetPipeline(ComputePipeline* pipeline) {
-                BindGroupTrackerBase::OnSetPipeline(pipeline);
-                mPipeline = pipeline;
-            }
-
-            void Apply(const OpenGLFunctions& gl) {
-                BeforeApply();
-                for (BindGroupIndex index :
-                     IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
-                    ApplyBindGroup(gl, index, mBindGroups[index], mDynamicOffsetCounts[index],
-                                   mDynamicOffsets[index].data());
-                }
-                AfterApply();
-            }
-
-          private:
-            void ApplyBindGroup(const OpenGLFunctions& gl,
-                                BindGroupIndex index,
-                                BindGroupBase* group,
-                                uint32_t dynamicOffsetCount,
-                                uint64_t* dynamicOffsets) {
-                const auto& indices = ToBackend(mPipelineLayout)->GetBindingIndexInfo()[index];
-                uint32_t currentDynamicOffsetIndex = 0;
-
-                for (BindingIndex bindingIndex{0};
-                     bindingIndex < group->GetLayout()->GetBindingCount(); ++bindingIndex) {
-                    const BindingInfo& bindingInfo =
-                        group->GetLayout()->GetBindingInfo(bindingIndex);
-
-                    if (bindingInfo.bindingType == BindingInfoType::Texture) {
-                        TextureView* view = ToBackend(group->GetBindingAsTextureView(bindingIndex));
-                        view->CopyIfNeeded();
-                    }
-                }
-
-                for (BindingIndex bindingIndex{0};
-                     bindingIndex < group->GetLayout()->GetBindingCount(); ++bindingIndex) {
-                    const BindingInfo& bindingInfo =
-                        group->GetLayout()->GetBindingInfo(bindingIndex);
-
-                    switch (bindingInfo.bindingType) {
-                        case BindingInfoType::Buffer: {
-                            BufferBinding binding = group->GetBindingAsBufferBinding(bindingIndex);
-                            GLuint buffer = ToBackend(binding.buffer)->GetHandle();
-                            GLuint index = indices[bindingIndex];
-                            GLuint offset = binding.offset;
-
-                            if (bindingInfo.buffer.hasDynamicOffset) {
-                                offset += dynamicOffsets[currentDynamicOffsetIndex];
-                                ++currentDynamicOffsetIndex;
-                            }
-
-                            GLenum target;
-                            switch (bindingInfo.buffer.type) {
-                                case wgpu::BufferBindingType::Uniform:
-                                    target = GL_UNIFORM_BUFFER;
-                                    break;
-                                case wgpu::BufferBindingType::Storage:
-                                case kInternalStorageBufferBinding:
-                                case wgpu::BufferBindingType::ReadOnlyStorage:
-                                    target = GL_SHADER_STORAGE_BUFFER;
-                                    break;
-                                case wgpu::BufferBindingType::Undefined:
-                                    UNREACHABLE();
-                            }
-
-                            gl.BindBufferRange(target, index, buffer, offset, binding.size);
-                            break;
-                        }
-
-                        case BindingInfoType::Sampler: {
-                            Sampler* sampler = ToBackend(group->GetBindingAsSampler(bindingIndex));
-                            GLuint samplerIndex = indices[bindingIndex];
-
-                            for (PipelineGL::SamplerUnit unit :
-                                 mPipeline->GetTextureUnitsForSampler(samplerIndex)) {
-                                // Only use filtering for certain texture units, because int
-                                // and uint texture are only complete without filtering
-                                if (unit.shouldUseFiltering) {
-                                    gl.BindSampler(unit.unit, sampler->GetFilteringHandle());
-                                } else {
-                                    gl.BindSampler(unit.unit, sampler->GetNonFilteringHandle());
-                                }
-                            }
-                            break;
-                        }
-
-                        case BindingInfoType::Texture: {
-                            TextureView* view =
-                                ToBackend(group->GetBindingAsTextureView(bindingIndex));
-                            GLuint handle = view->GetHandle();
-                            GLenum target = view->GetGLTarget();
-                            GLuint viewIndex = indices[bindingIndex];
-
-                            for (auto unit : mPipeline->GetTextureUnitsForTextureView(viewIndex)) {
-                                gl.ActiveTexture(GL_TEXTURE0 + unit);
-                                gl.BindTexture(target, handle);
-                                if (ToBackend(view->GetTexture())->GetGLFormat().format ==
-                                    GL_DEPTH_STENCIL) {
-                                    Aspect aspect = view->GetAspects();
-                                    ASSERT(HasOneBit(aspect));
-                                    switch (aspect) {
-                                        case Aspect::None:
-                                        case Aspect::Color:
-                                        case Aspect::CombinedDepthStencil:
-                                        case Aspect::Plane0:
-                                        case Aspect::Plane1:
-                                            UNREACHABLE();
-                                        case Aspect::Depth:
-                                            gl.TexParameteri(target, GL_DEPTH_STENCIL_TEXTURE_MODE,
-                                                             GL_DEPTH_COMPONENT);
-                                            break;
-                                        case Aspect::Stencil:
-                                            gl.TexParameteri(target, GL_DEPTH_STENCIL_TEXTURE_MODE,
-                                                             GL_STENCIL_INDEX);
-                                            break;
-                                    }
-                                }
-                            }
-                            break;
-                        }
-
-                        case BindingInfoType::StorageTexture: {
-                            TextureView* view =
-                                ToBackend(group->GetBindingAsTextureView(bindingIndex));
-                            Texture* texture = ToBackend(view->GetTexture());
-                            GLuint handle = texture->GetHandle();
-                            GLuint imageIndex = indices[bindingIndex];
-
-                            GLenum access;
-                            switch (bindingInfo.storageTexture.access) {
-                                case wgpu::StorageTextureAccess::WriteOnly:
-                                    access = GL_WRITE_ONLY;
-                                    break;
-                                case wgpu::StorageTextureAccess::Undefined:
-                                    UNREACHABLE();
-                            }
-
-                            // OpenGL ES only supports either binding a layer or the entire
-                            // texture in glBindImageTexture().
-                            GLboolean isLayered;
-                            if (view->GetLayerCount() == 1) {
-                                isLayered = GL_FALSE;
-                            } else if (texture->GetArrayLayers() == view->GetLayerCount()) {
-                                isLayered = GL_TRUE;
-                            } else {
-                                UNREACHABLE();
-                            }
-
-                            gl.BindImageTexture(imageIndex, handle, view->GetBaseMipLevel(),
-                                                isLayered, view->GetBaseArrayLayer(), access,
-                                                texture->GetGLFormat().internalFormat);
-                            texture->Touch();
-                            break;
-                        }
-
-                        case BindingInfoType::ExternalTexture:
-                            UNREACHABLE();
-                            break;
-                    }
-                }
-            }
-
-            PipelineGL* mPipeline = nullptr;
-        };
-
-        void ResolveMultisampledRenderTargets(const OpenGLFunctions& gl,
-                                              const BeginRenderPassCmd* renderPass) {
-            ASSERT(renderPass != nullptr);
-
-            GLuint readFbo = 0;
-            GLuint writeFbo = 0;
-
-            for (ColorAttachmentIndex i :
-                 IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
-                if (renderPass->colorAttachments[i].resolveTarget != nullptr) {
-                    if (readFbo == 0) {
-                        ASSERT(writeFbo == 0);
-                        gl.GenFramebuffers(1, &readFbo);
-                        gl.GenFramebuffers(1, &writeFbo);
-                    }
-
-                    TextureView* colorView = ToBackend(renderPass->colorAttachments[i].view.Get());
-
-                    gl.BindFramebuffer(GL_READ_FRAMEBUFFER, readFbo);
-                    colorView->BindToFramebuffer(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0);
-
-                    TextureView* resolveView =
-                        ToBackend(renderPass->colorAttachments[i].resolveTarget.Get());
-                    gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, writeFbo);
-                    resolveView->BindToFramebuffer(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0);
-                    gl.BlitFramebuffer(0, 0, renderPass->width, renderPass->height, 0, 0,
-                                       renderPass->width, renderPass->height, GL_COLOR_BUFFER_BIT,
-                                       GL_NEAREST);
-                    ToBackend(resolveView->GetTexture())->Touch();
-                }
-            }
-
-            gl.DeleteFramebuffers(1, &readFbo);
-            gl.DeleteFramebuffers(1, &writeFbo);
-        }
-
-        // OpenGL SPEC requires the source/destination region must be a region that is contained
-        // within srcImage/dstImage. Here the size of the image refers to the virtual size, while
-        // Dawn validates texture copy extent with the physical size, so we need to re-calculate the
-        // texture copy extent to ensure it should fit in the virtual size of the subresource.
-        Extent3D ComputeTextureCopyExtent(const TextureCopy& textureCopy,
-                                          const Extent3D& copySize) {
-            Extent3D validTextureCopyExtent = copySize;
-            const TextureBase* texture = textureCopy.texture.Get();
-            Extent3D virtualSizeAtLevel = texture->GetMipLevelVirtualSize(textureCopy.mipLevel);
-            ASSERT(textureCopy.origin.x <= virtualSizeAtLevel.width);
-            ASSERT(textureCopy.origin.y <= virtualSizeAtLevel.height);
-            if (copySize.width > virtualSizeAtLevel.width - textureCopy.origin.x) {
-                ASSERT(texture->GetFormat().isCompressed);
-                validTextureCopyExtent.width = virtualSizeAtLevel.width - textureCopy.origin.x;
-            }
-            if (copySize.height > virtualSizeAtLevel.height - textureCopy.origin.y) {
-                ASSERT(texture->GetFormat().isCompressed);
-                validTextureCopyExtent.height = virtualSizeAtLevel.height - textureCopy.origin.y;
-            }
-
-            return validTextureCopyExtent;
-        }
-
-        bool TextureFormatIsSnorm(wgpu::TextureFormat format) {
-            return format == wgpu::TextureFormat::RGBA8Snorm ||
-                   format == wgpu::TextureFormat::RG8Snorm ||
-                   format == wgpu::TextureFormat::R8Snorm;
-        }
-    }  // namespace
-
-    CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
-        : CommandBufferBase(encoder, descriptor) {
+// Vertex buffers and index buffers are implemented as part of an OpenGL VAO that
+// corresponds to a VertexState. On the contrary in Dawn they are part of the global state.
+// This means that we have to re-apply these buffers on a VertexState change.
+class VertexStateBufferBindingTracker {
+  public:
+    void OnSetIndexBuffer(BufferBase* buffer) {
+        mIndexBufferDirty = true;
+        mIndexBuffer = ToBackend(buffer);
     }
 
-    MaybeError CommandBuffer::Execute() {
-        const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+    void OnSetVertexBuffer(VertexBufferSlot slot, BufferBase* buffer, uint64_t offset) {
+        mVertexBuffers[slot] = ToBackend(buffer);
+        mVertexBufferOffsets[slot] = offset;
+        mDirtyVertexBuffers.set(slot);
+    }
 
-        auto LazyClearSyncScope = [](const SyncScopeResourceUsage& scope) {
-            for (size_t i = 0; i < scope.textures.size(); i++) {
-                Texture* texture = ToBackend(scope.textures[i]);
+    void OnSetPipeline(RenderPipelineBase* pipeline) {
+        if (mLastPipeline == pipeline) {
+            return;
+        }
 
-                // Clear subresources that are not render attachments. Render attachments will be
-                // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
-                // subresource has not been initialized before the render pass.
-                scope.textureUsages[i].Iterate(
-                    [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
-                        if (usage & ~wgpu::TextureUsage::RenderAttachment) {
-                            texture->EnsureSubresourceContentInitialized(range);
+        mIndexBufferDirty = true;
+        mDirtyVertexBuffers |= pipeline->GetVertexBufferSlotsUsed();
+
+        mLastPipeline = pipeline;
+    }
+
+    void Apply(const OpenGLFunctions& gl) {
+        if (mIndexBufferDirty && mIndexBuffer != nullptr) {
+            gl.BindBuffer(GL_ELEMENT_ARRAY_BUFFER, mIndexBuffer->GetHandle());
+            mIndexBufferDirty = false;
+        }
+
+        for (VertexBufferSlot slot :
+             IterateBitSet(mDirtyVertexBuffers & mLastPipeline->GetVertexBufferSlotsUsed())) {
+            for (VertexAttributeLocation location :
+                 IterateBitSet(ToBackend(mLastPipeline)->GetAttributesUsingVertexBuffer(slot))) {
+                const VertexAttributeInfo& attribute = mLastPipeline->GetAttribute(location);
+
+                GLuint attribIndex = static_cast<GLuint>(static_cast<uint8_t>(location));
+                GLuint buffer = mVertexBuffers[slot]->GetHandle();
+                uint64_t offset = mVertexBufferOffsets[slot];
+
+                const VertexBufferInfo& vertexBuffer = mLastPipeline->GetVertexBuffer(slot);
+                uint32_t components = GetVertexFormatInfo(attribute.format).componentCount;
+                GLenum formatType = VertexFormatType(attribute.format);
+
+                GLboolean normalized = VertexFormatIsNormalized(attribute.format);
+                gl.BindBuffer(GL_ARRAY_BUFFER, buffer);
+                if (VertexFormatIsInt(attribute.format)) {
+                    gl.VertexAttribIPointer(
+                        attribIndex, components, formatType, vertexBuffer.arrayStride,
+                        reinterpret_cast<void*>(static_cast<intptr_t>(offset + attribute.offset)));
+                } else {
+                    gl.VertexAttribPointer(
+                        attribIndex, components, formatType, normalized, vertexBuffer.arrayStride,
+                        reinterpret_cast<void*>(static_cast<intptr_t>(offset + attribute.offset)));
+                }
+            }
+        }
+
+        mDirtyVertexBuffers.reset();
+    }
+
+  private:
+    bool mIndexBufferDirty = false;
+    Buffer* mIndexBuffer = nullptr;
+
+    ityp::bitset<VertexBufferSlot, kMaxVertexBuffers> mDirtyVertexBuffers;
+    ityp::array<VertexBufferSlot, Buffer*, kMaxVertexBuffers> mVertexBuffers;
+    ityp::array<VertexBufferSlot, uint64_t, kMaxVertexBuffers> mVertexBufferOffsets;
+
+    RenderPipelineBase* mLastPipeline = nullptr;
+};
+
+class BindGroupTracker : public BindGroupTrackerBase<false, uint64_t> {
+  public:
+    void OnSetPipeline(RenderPipeline* pipeline) {
+        BindGroupTrackerBase::OnSetPipeline(pipeline);
+        mPipeline = pipeline;
+    }
+
+    void OnSetPipeline(ComputePipeline* pipeline) {
+        BindGroupTrackerBase::OnSetPipeline(pipeline);
+        mPipeline = pipeline;
+    }
+
+    void Apply(const OpenGLFunctions& gl) {
+        BeforeApply();
+        for (BindGroupIndex index : IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
+            ApplyBindGroup(gl, index, mBindGroups[index], mDynamicOffsetCounts[index],
+                           mDynamicOffsets[index].data());
+        }
+        AfterApply();
+    }
+
+  private:
+    void ApplyBindGroup(const OpenGLFunctions& gl,
+                        BindGroupIndex index,
+                        BindGroupBase* group,
+                        uint32_t dynamicOffsetCount,
+                        uint64_t* dynamicOffsets) {
+        const auto& indices = ToBackend(mPipelineLayout)->GetBindingIndexInfo()[index];
+        uint32_t currentDynamicOffsetIndex = 0;
+
+        for (BindingIndex bindingIndex{0}; bindingIndex < group->GetLayout()->GetBindingCount();
+             ++bindingIndex) {
+            const BindingInfo& bindingInfo = group->GetLayout()->GetBindingInfo(bindingIndex);
+
+            if (bindingInfo.bindingType == BindingInfoType::Texture) {
+                TextureView* view = ToBackend(group->GetBindingAsTextureView(bindingIndex));
+                view->CopyIfNeeded();
+            }
+        }
+
+        for (BindingIndex bindingIndex{0}; bindingIndex < group->GetLayout()->GetBindingCount();
+             ++bindingIndex) {
+            const BindingInfo& bindingInfo = group->GetLayout()->GetBindingInfo(bindingIndex);
+
+            switch (bindingInfo.bindingType) {
+                case BindingInfoType::Buffer: {
+                    BufferBinding binding = group->GetBindingAsBufferBinding(bindingIndex);
+                    GLuint buffer = ToBackend(binding.buffer)->GetHandle();
+                    GLuint index = indices[bindingIndex];
+                    GLuint offset = binding.offset;
+
+                    if (bindingInfo.buffer.hasDynamicOffset) {
+                        offset += dynamicOffsets[currentDynamicOffsetIndex];
+                        ++currentDynamicOffsetIndex;
+                    }
+
+                    GLenum target;
+                    switch (bindingInfo.buffer.type) {
+                        case wgpu::BufferBindingType::Uniform:
+                            target = GL_UNIFORM_BUFFER;
+                            break;
+                        case wgpu::BufferBindingType::Storage:
+                        case kInternalStorageBufferBinding:
+                        case wgpu::BufferBindingType::ReadOnlyStorage:
+                            target = GL_SHADER_STORAGE_BUFFER;
+                            break;
+                        case wgpu::BufferBindingType::Undefined:
+                            UNREACHABLE();
+                    }
+
+                    gl.BindBufferRange(target, index, buffer, offset, binding.size);
+                    break;
+                }
+
+                case BindingInfoType::Sampler: {
+                    Sampler* sampler = ToBackend(group->GetBindingAsSampler(bindingIndex));
+                    GLuint samplerIndex = indices[bindingIndex];
+
+                    for (PipelineGL::SamplerUnit unit :
+                         mPipeline->GetTextureUnitsForSampler(samplerIndex)) {
+                        // Only use filtering for certain texture units, because int
+                        // and uint texture are only complete without filtering
+                        if (unit.shouldUseFiltering) {
+                            gl.BindSampler(unit.unit, sampler->GetFilteringHandle());
+                        } else {
+                            gl.BindSampler(unit.unit, sampler->GetNonFilteringHandle());
                         }
-                    });
-            }
-
-            for (BufferBase* bufferBase : scope.buffers) {
-                ToBackend(bufferBase)->EnsureDataInitialized();
-            }
-        };
-
-        size_t nextComputePassNumber = 0;
-        size_t nextRenderPassNumber = 0;
-
-        Command type;
-        while (mCommands.NextCommandId(&type)) {
-            switch (type) {
-                case Command::BeginComputePass: {
-                    mCommands.NextCommand<BeginComputePassCmd>();
-                    for (const SyncScopeResourceUsage& scope :
-                         GetResourceUsages().computePasses[nextComputePassNumber].dispatchUsages) {
-                        LazyClearSyncScope(scope);
                     }
-                    DAWN_TRY(ExecuteComputePass());
-
-                    nextComputePassNumber++;
                     break;
                 }
 
-                case Command::BeginRenderPass: {
-                    auto* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
-                    LazyClearSyncScope(GetResourceUsages().renderPasses[nextRenderPassNumber]);
-                    LazyClearRenderPassAttachments(cmd);
-                    DAWN_TRY(ExecuteRenderPass(cmd));
+                case BindingInfoType::Texture: {
+                    TextureView* view = ToBackend(group->GetBindingAsTextureView(bindingIndex));
+                    GLuint handle = view->GetHandle();
+                    GLenum target = view->GetGLTarget();
+                    GLuint viewIndex = indices[bindingIndex];
 
-                    nextRenderPassNumber++;
+                    for (auto unit : mPipeline->GetTextureUnitsForTextureView(viewIndex)) {
+                        gl.ActiveTexture(GL_TEXTURE0 + unit);
+                        gl.BindTexture(target, handle);
+                        if (ToBackend(view->GetTexture())->GetGLFormat().format ==
+                            GL_DEPTH_STENCIL) {
+                            Aspect aspect = view->GetAspects();
+                            ASSERT(HasOneBit(aspect));
+                            switch (aspect) {
+                                case Aspect::None:
+                                case Aspect::Color:
+                                case Aspect::CombinedDepthStencil:
+                                case Aspect::Plane0:
+                                case Aspect::Plane1:
+                                    UNREACHABLE();
+                                case Aspect::Depth:
+                                    gl.TexParameteri(target, GL_DEPTH_STENCIL_TEXTURE_MODE,
+                                                     GL_DEPTH_COMPONENT);
+                                    break;
+                                case Aspect::Stencil:
+                                    gl.TexParameteri(target, GL_DEPTH_STENCIL_TEXTURE_MODE,
+                                                     GL_STENCIL_INDEX);
+                                    break;
+                            }
+                        }
+                    }
                     break;
                 }
 
-                case Command::CopyBufferToBuffer: {
-                    CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
-                    if (copy->size == 0) {
-                        // Skip no-op copies.
-                        break;
+                case BindingInfoType::StorageTexture: {
+                    TextureView* view = ToBackend(group->GetBindingAsTextureView(bindingIndex));
+                    Texture* texture = ToBackend(view->GetTexture());
+                    GLuint handle = texture->GetHandle();
+                    GLuint imageIndex = indices[bindingIndex];
+
+                    GLenum access;
+                    switch (bindingInfo.storageTexture.access) {
+                        case wgpu::StorageTextureAccess::WriteOnly:
+                            access = GL_WRITE_ONLY;
+                            break;
+                        case wgpu::StorageTextureAccess::Undefined:
+                            UNREACHABLE();
                     }
 
-                    ToBackend(copy->source)->EnsureDataInitialized();
-                    ToBackend(copy->destination)
-                        ->EnsureDataInitializedAsDestination(copy->destinationOffset, copy->size);
-
-                    gl.BindBuffer(GL_PIXEL_PACK_BUFFER, ToBackend(copy->source)->GetHandle());
-                    gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER,
-                                  ToBackend(copy->destination)->GetHandle());
-                    gl.CopyBufferSubData(GL_PIXEL_PACK_BUFFER, GL_PIXEL_UNPACK_BUFFER,
-                                         copy->sourceOffset, copy->destinationOffset, copy->size);
-
-                    gl.BindBuffer(GL_PIXEL_PACK_BUFFER, 0);
-                    gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
-                    break;
-                }
-
-                case Command::CopyBufferToTexture: {
-                    CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
-                    if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
-                        copy->copySize.depthOrArrayLayers == 0) {
-                        // Skip no-op copies.
-                        continue;
-                    }
-                    auto& src = copy->source;
-                    auto& dst = copy->destination;
-                    Buffer* buffer = ToBackend(src.buffer.Get());
-
-                    DAWN_INVALID_IF(
-                        dst.aspect == Aspect::Stencil,
-                        "Copies to stencil textures are unsupported on the OpenGL backend.");
-
-                    ASSERT(dst.aspect == Aspect::Color);
-
-                    buffer->EnsureDataInitialized();
-                    SubresourceRange range = GetSubresourcesAffectedByCopy(dst, copy->copySize);
-                    if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
-                                                      dst.mipLevel)) {
-                        dst.texture->SetIsSubresourceContentInitialized(true, range);
+                    // OpenGL ES only supports either binding a layer or the entire
+                    // texture in glBindImageTexture().
+                    GLboolean isLayered;
+                    if (view->GetLayerCount() == 1) {
+                        isLayered = GL_FALSE;
+                    } else if (texture->GetArrayLayers() == view->GetLayerCount()) {
+                        isLayered = GL_TRUE;
                     } else {
-                        ToBackend(dst.texture)->EnsureSubresourceContentInitialized(range);
-                    }
-
-                    gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, buffer->GetHandle());
-
-                    TextureDataLayout dataLayout;
-                    dataLayout.offset = 0;
-                    dataLayout.bytesPerRow = src.bytesPerRow;
-                    dataLayout.rowsPerImage = src.rowsPerImage;
-
-                    DoTexSubImage(gl, dst, reinterpret_cast<void*>(src.offset), dataLayout,
-                                  copy->copySize);
-                    gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
-                    ToBackend(dst.texture)->Touch();
-                    break;
-                }
-
-                case Command::CopyTextureToBuffer: {
-                    CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
-                    if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
-                        copy->copySize.depthOrArrayLayers == 0) {
-                        // Skip no-op copies.
-                        continue;
-                    }
-                    auto& src = copy->source;
-                    auto& dst = copy->destination;
-                    auto& copySize = copy->copySize;
-                    Texture* texture = ToBackend(src.texture.Get());
-                    Buffer* buffer = ToBackend(dst.buffer.Get());
-                    const Format& formatInfo = texture->GetFormat();
-                    const GLFormat& format = texture->GetGLFormat();
-                    GLenum target = texture->GetGLTarget();
-
-                    // TODO(crbug.com/dawn/667): Implement validation in WebGPU/Compat to
-                    // avoid this codepath. OpenGL does not support readback from non-renderable
-                    // texture formats.
-                    if (formatInfo.isCompressed ||
-                        (TextureFormatIsSnorm(formatInfo.format) &&
-                         GetDevice()->IsToggleEnabled(Toggle::DisableSnormRead))) {
                         UNREACHABLE();
                     }
 
-                    buffer->EnsureDataInitializedAsDestination(copy);
-
-                    ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D);
-                    SubresourceRange subresources =
-                        GetSubresourcesAffectedByCopy(src, copy->copySize);
-                    texture->EnsureSubresourceContentInitialized(subresources);
-                    // The only way to move data from a texture to a buffer in GL is via
-                    // glReadPixels with a pack buffer. Create a temporary FBO for the copy.
-                    gl.BindTexture(target, texture->GetHandle());
-
-                    GLuint readFBO = 0;
-                    gl.GenFramebuffers(1, &readFBO);
-                    gl.BindFramebuffer(GL_READ_FRAMEBUFFER, readFBO);
-
-                    const TexelBlockInfo& blockInfo = formatInfo.GetAspectInfo(src.aspect).block;
-
-                    gl.BindBuffer(GL_PIXEL_PACK_BUFFER, buffer->GetHandle());
-                    gl.PixelStorei(GL_PACK_ROW_LENGTH, dst.bytesPerRow / blockInfo.byteSize);
-
-                    GLenum glAttachment;
-                    GLenum glFormat;
-                    GLenum glType;
-                    switch (src.aspect) {
-                        case Aspect::Color:
-                            glAttachment = GL_COLOR_ATTACHMENT0;
-                            glFormat = format.format;
-                            glType = format.type;
-                            break;
-                        case Aspect::Depth:
-                            glAttachment = GL_DEPTH_ATTACHMENT;
-                            glFormat = GL_DEPTH_COMPONENT;
-                            glType = GL_FLOAT;
-                            break;
-                        case Aspect::Stencil:
-                            glAttachment = GL_STENCIL_ATTACHMENT;
-                            glFormat = GL_STENCIL_INDEX;
-                            glType = GL_UNSIGNED_BYTE;
-                            break;
-
-                        case Aspect::CombinedDepthStencil:
-                        case Aspect::None:
-                        case Aspect::Plane0:
-                        case Aspect::Plane1:
-                            UNREACHABLE();
-                    }
-
-                    uint8_t* offset =
-                        reinterpret_cast<uint8_t*>(static_cast<uintptr_t>(dst.offset));
-                    switch (texture->GetDimension()) {
-                        case wgpu::TextureDimension::e2D: {
-                            if (texture->GetArrayLayers() == 1) {
-                                gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, glAttachment, target,
-                                                        texture->GetHandle(), src.mipLevel);
-                                gl.ReadPixels(src.origin.x, src.origin.y, copySize.width,
-                                              copySize.height, glFormat, glType, offset);
-                                break;
-                            }
-                            // Implementation for 2D array is the same as 3D.
-                            [[fallthrough]];
-                        }
-
-                        case wgpu::TextureDimension::e3D: {
-                            const uint64_t bytesPerImage = dst.bytesPerRow * dst.rowsPerImage;
-                            for (uint32_t z = 0; z < copySize.depthOrArrayLayers; ++z) {
-                                gl.FramebufferTextureLayer(GL_READ_FRAMEBUFFER, glAttachment,
-                                                           texture->GetHandle(), src.mipLevel,
-                                                           src.origin.z + z);
-                                gl.ReadPixels(src.origin.x, src.origin.y, copySize.width,
-                                              copySize.height, glFormat, glType, offset);
-
-                                offset += bytesPerImage;
-                            }
-                            break;
-                        }
-
-                        case wgpu::TextureDimension::e1D:
-                            UNREACHABLE();
-                    }
-
-                    gl.PixelStorei(GL_PACK_ROW_LENGTH, 0);
-
-                    gl.BindBuffer(GL_PIXEL_PACK_BUFFER, 0);
-                    gl.DeleteFramebuffers(1, &readFBO);
+                    gl.BindImageTexture(imageIndex, handle, view->GetBaseMipLevel(), isLayered,
+                                        view->GetBaseArrayLayer(), access,
+                                        texture->GetGLFormat().internalFormat);
+                    texture->Touch();
                     break;
                 }
 
-                case Command::CopyTextureToTexture: {
-                    CopyTextureToTextureCmd* copy =
-                        mCommands.NextCommand<CopyTextureToTextureCmd>();
-                    if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
-                        copy->copySize.depthOrArrayLayers == 0) {
-                        // Skip no-op copies.
-                        continue;
+                case BindingInfoType::ExternalTexture:
+                    UNREACHABLE();
+                    break;
+            }
+        }
+    }
+
+    PipelineGL* mPipeline = nullptr;
+};
+
+void ResolveMultisampledRenderTargets(const OpenGLFunctions& gl,
+                                      const BeginRenderPassCmd* renderPass) {
+    ASSERT(renderPass != nullptr);
+
+    GLuint readFbo = 0;
+    GLuint writeFbo = 0;
+
+    for (ColorAttachmentIndex i :
+         IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+        if (renderPass->colorAttachments[i].resolveTarget != nullptr) {
+            if (readFbo == 0) {
+                ASSERT(writeFbo == 0);
+                gl.GenFramebuffers(1, &readFbo);
+                gl.GenFramebuffers(1, &writeFbo);
+            }
+
+            TextureView* colorView = ToBackend(renderPass->colorAttachments[i].view.Get());
+
+            gl.BindFramebuffer(GL_READ_FRAMEBUFFER, readFbo);
+            colorView->BindToFramebuffer(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0);
+
+            TextureView* resolveView =
+                ToBackend(renderPass->colorAttachments[i].resolveTarget.Get());
+            gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, writeFbo);
+            resolveView->BindToFramebuffer(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0);
+            gl.BlitFramebuffer(0, 0, renderPass->width, renderPass->height, 0, 0, renderPass->width,
+                               renderPass->height, GL_COLOR_BUFFER_BIT, GL_NEAREST);
+            ToBackend(resolveView->GetTexture())->Touch();
+        }
+    }
+
+    gl.DeleteFramebuffers(1, &readFbo);
+    gl.DeleteFramebuffers(1, &writeFbo);
+}
+
+// OpenGL SPEC requires the source/destination region must be a region that is contained
+// within srcImage/dstImage. Here the size of the image refers to the virtual size, while
+// Dawn validates texture copy extent with the physical size, so we need to re-calculate the
+// texture copy extent to ensure it should fit in the virtual size of the subresource.
+Extent3D ComputeTextureCopyExtent(const TextureCopy& textureCopy, const Extent3D& copySize) {
+    Extent3D validTextureCopyExtent = copySize;
+    const TextureBase* texture = textureCopy.texture.Get();
+    Extent3D virtualSizeAtLevel = texture->GetMipLevelVirtualSize(textureCopy.mipLevel);
+    ASSERT(textureCopy.origin.x <= virtualSizeAtLevel.width);
+    ASSERT(textureCopy.origin.y <= virtualSizeAtLevel.height);
+    if (copySize.width > virtualSizeAtLevel.width - textureCopy.origin.x) {
+        ASSERT(texture->GetFormat().isCompressed);
+        validTextureCopyExtent.width = virtualSizeAtLevel.width - textureCopy.origin.x;
+    }
+    if (copySize.height > virtualSizeAtLevel.height - textureCopy.origin.y) {
+        ASSERT(texture->GetFormat().isCompressed);
+        validTextureCopyExtent.height = virtualSizeAtLevel.height - textureCopy.origin.y;
+    }
+
+    return validTextureCopyExtent;
+}
+
+bool TextureFormatIsSnorm(wgpu::TextureFormat format) {
+    return format == wgpu::TextureFormat::RGBA8Snorm || format == wgpu::TextureFormat::RG8Snorm ||
+           format == wgpu::TextureFormat::R8Snorm;
+}
+}  // namespace
+
+CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
+    : CommandBufferBase(encoder, descriptor) {}
+
+MaybeError CommandBuffer::Execute() {
+    const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+
+    auto LazyClearSyncScope = [](const SyncScopeResourceUsage& scope) {
+        for (size_t i = 0; i < scope.textures.size(); i++) {
+            Texture* texture = ToBackend(scope.textures[i]);
+
+            // Clear subresources that are not render attachments. Render attachments will be
+            // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
+            // subresource has not been initialized before the render pass.
+            scope.textureUsages[i].Iterate(
+                [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
+                    if (usage & ~wgpu::TextureUsage::RenderAttachment) {
+                        texture->EnsureSubresourceContentInitialized(range);
                     }
-                    auto& src = copy->source;
-                    auto& dst = copy->destination;
+                });
+        }
 
-                    // TODO(crbug.com/dawn/817): add workaround for the case that imageExtentSrc
-                    // is not equal to imageExtentDst. For example when copySize fits in the virtual
-                    // size of the source image but does not fit in the one of the destination
-                    // image.
-                    Extent3D copySize = ComputeTextureCopyExtent(dst, copy->copySize);
-                    Texture* srcTexture = ToBackend(src.texture.Get());
-                    Texture* dstTexture = ToBackend(dst.texture.Get());
+        for (BufferBase* bufferBase : scope.buffers) {
+            ToBackend(bufferBase)->EnsureDataInitialized();
+        }
+    };
 
-                    SubresourceRange srcRange = GetSubresourcesAffectedByCopy(src, copy->copySize);
-                    SubresourceRange dstRange = GetSubresourcesAffectedByCopy(dst, copy->copySize);
+    size_t nextComputePassNumber = 0;
+    size_t nextRenderPassNumber = 0;
 
-                    srcTexture->EnsureSubresourceContentInitialized(srcRange);
-                    if (IsCompleteSubresourceCopiedTo(dstTexture, copySize, dst.mipLevel)) {
-                        dstTexture->SetIsSubresourceContentInitialized(true, dstRange);
-                    } else {
-                        dstTexture->EnsureSubresourceContentInitialized(dstRange);
-                    }
-                    CopyImageSubData(gl, src.aspect, srcTexture->GetHandle(),
-                                     srcTexture->GetGLTarget(), src.mipLevel, src.origin,
-                                     dstTexture->GetHandle(), dstTexture->GetGLTarget(),
-                                     dst.mipLevel, dst.origin, copySize);
-                    ToBackend(dst.texture)->Touch();
+    Command type;
+    while (mCommands.NextCommandId(&type)) {
+        switch (type) {
+            case Command::BeginComputePass: {
+                mCommands.NextCommand<BeginComputePassCmd>();
+                for (const SyncScopeResourceUsage& scope :
+                     GetResourceUsages().computePasses[nextComputePassNumber].dispatchUsages) {
+                    LazyClearSyncScope(scope);
+                }
+                DAWN_TRY(ExecuteComputePass());
+
+                nextComputePassNumber++;
+                break;
+            }
+
+            case Command::BeginRenderPass: {
+                auto* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
+                LazyClearSyncScope(GetResourceUsages().renderPasses[nextRenderPassNumber]);
+                LazyClearRenderPassAttachments(cmd);
+                DAWN_TRY(ExecuteRenderPass(cmd));
+
+                nextRenderPassNumber++;
+                break;
+            }
+
+            case Command::CopyBufferToBuffer: {
+                CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
+                if (copy->size == 0) {
+                    // Skip no-op copies.
                     break;
                 }
 
-                case Command::ClearBuffer: {
-                    ClearBufferCmd* cmd = mCommands.NextCommand<ClearBufferCmd>();
-                    if (cmd->size == 0) {
-                        // Skip no-op fills.
+                ToBackend(copy->source)->EnsureDataInitialized();
+                ToBackend(copy->destination)
+                    ->EnsureDataInitializedAsDestination(copy->destinationOffset, copy->size);
+
+                gl.BindBuffer(GL_PIXEL_PACK_BUFFER, ToBackend(copy->source)->GetHandle());
+                gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, ToBackend(copy->destination)->GetHandle());
+                gl.CopyBufferSubData(GL_PIXEL_PACK_BUFFER, GL_PIXEL_UNPACK_BUFFER,
+                                     copy->sourceOffset, copy->destinationOffset, copy->size);
+
+                gl.BindBuffer(GL_PIXEL_PACK_BUFFER, 0);
+                gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
+                break;
+            }
+
+            case Command::CopyBufferToTexture: {
+                CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
+                if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+                    copy->copySize.depthOrArrayLayers == 0) {
+                    // Skip no-op copies.
+                    continue;
+                }
+                auto& src = copy->source;
+                auto& dst = copy->destination;
+                Buffer* buffer = ToBackend(src.buffer.Get());
+
+                DAWN_INVALID_IF(
+                    dst.aspect == Aspect::Stencil,
+                    "Copies to stencil textures are unsupported on the OpenGL backend.");
+
+                ASSERT(dst.aspect == Aspect::Color);
+
+                buffer->EnsureDataInitialized();
+                SubresourceRange range = GetSubresourcesAffectedByCopy(dst, copy->copySize);
+                if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
+                                                  dst.mipLevel)) {
+                    dst.texture->SetIsSubresourceContentInitialized(true, range);
+                } else {
+                    ToBackend(dst.texture)->EnsureSubresourceContentInitialized(range);
+                }
+
+                gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, buffer->GetHandle());
+
+                TextureDataLayout dataLayout;
+                dataLayout.offset = 0;
+                dataLayout.bytesPerRow = src.bytesPerRow;
+                dataLayout.rowsPerImage = src.rowsPerImage;
+
+                DoTexSubImage(gl, dst, reinterpret_cast<void*>(src.offset), dataLayout,
+                              copy->copySize);
+                gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
+                ToBackend(dst.texture)->Touch();
+                break;
+            }
+
+            case Command::CopyTextureToBuffer: {
+                CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
+                if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+                    copy->copySize.depthOrArrayLayers == 0) {
+                    // Skip no-op copies.
+                    continue;
+                }
+                auto& src = copy->source;
+                auto& dst = copy->destination;
+                auto& copySize = copy->copySize;
+                Texture* texture = ToBackend(src.texture.Get());
+                Buffer* buffer = ToBackend(dst.buffer.Get());
+                const Format& formatInfo = texture->GetFormat();
+                const GLFormat& format = texture->GetGLFormat();
+                GLenum target = texture->GetGLTarget();
+
+                // TODO(crbug.com/dawn/667): Implement validation in WebGPU/Compat to
+                // avoid this codepath. OpenGL does not support readback from non-renderable
+                // texture formats.
+                if (formatInfo.isCompressed ||
+                    (TextureFormatIsSnorm(formatInfo.format) &&
+                     GetDevice()->IsToggleEnabled(Toggle::DisableSnormRead))) {
+                    UNREACHABLE();
+                }
+
+                buffer->EnsureDataInitializedAsDestination(copy);
+
+                ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D);
+                SubresourceRange subresources = GetSubresourcesAffectedByCopy(src, copy->copySize);
+                texture->EnsureSubresourceContentInitialized(subresources);
+                // The only way to move data from a texture to a buffer in GL is via
+                // glReadPixels with a pack buffer. Create a temporary FBO for the copy.
+                gl.BindTexture(target, texture->GetHandle());
+
+                GLuint readFBO = 0;
+                gl.GenFramebuffers(1, &readFBO);
+                gl.BindFramebuffer(GL_READ_FRAMEBUFFER, readFBO);
+
+                const TexelBlockInfo& blockInfo = formatInfo.GetAspectInfo(src.aspect).block;
+
+                gl.BindBuffer(GL_PIXEL_PACK_BUFFER, buffer->GetHandle());
+                gl.PixelStorei(GL_PACK_ROW_LENGTH, dst.bytesPerRow / blockInfo.byteSize);
+
+                GLenum glAttachment;
+                GLenum glFormat;
+                GLenum glType;
+                switch (src.aspect) {
+                    case Aspect::Color:
+                        glAttachment = GL_COLOR_ATTACHMENT0;
+                        glFormat = format.format;
+                        glType = format.type;
+                        break;
+                    case Aspect::Depth:
+                        glAttachment = GL_DEPTH_ATTACHMENT;
+                        glFormat = GL_DEPTH_COMPONENT;
+                        glType = GL_FLOAT;
+                        break;
+                    case Aspect::Stencil:
+                        glAttachment = GL_STENCIL_ATTACHMENT;
+                        glFormat = GL_STENCIL_INDEX;
+                        glType = GL_UNSIGNED_BYTE;
+                        break;
+
+                    case Aspect::CombinedDepthStencil:
+                    case Aspect::None:
+                    case Aspect::Plane0:
+                    case Aspect::Plane1:
+                        UNREACHABLE();
+                }
+
+                uint8_t* offset = reinterpret_cast<uint8_t*>(static_cast<uintptr_t>(dst.offset));
+                switch (texture->GetDimension()) {
+                    case wgpu::TextureDimension::e2D: {
+                        if (texture->GetArrayLayers() == 1) {
+                            gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, glAttachment, target,
+                                                    texture->GetHandle(), src.mipLevel);
+                            gl.ReadPixels(src.origin.x, src.origin.y, copySize.width,
+                                          copySize.height, glFormat, glType, offset);
+                            break;
+                        }
+                        // Implementation for 2D array is the same as 3D.
+                        [[fallthrough]];
+                    }
+
+                    case wgpu::TextureDimension::e3D: {
+                        const uint64_t bytesPerImage = dst.bytesPerRow * dst.rowsPerImage;
+                        for (uint32_t z = 0; z < copySize.depthOrArrayLayers; ++z) {
+                            gl.FramebufferTextureLayer(GL_READ_FRAMEBUFFER, glAttachment,
+                                                       texture->GetHandle(), src.mipLevel,
+                                                       src.origin.z + z);
+                            gl.ReadPixels(src.origin.x, src.origin.y, copySize.width,
+                                          copySize.height, glFormat, glType, offset);
+
+                            offset += bytesPerImage;
+                        }
                         break;
                     }
-                    Buffer* dstBuffer = ToBackend(cmd->buffer.Get());
 
-                    bool clearedToZero =
-                        dstBuffer->EnsureDataInitializedAsDestination(cmd->offset, cmd->size);
-
-                    if (!clearedToZero) {
-                        const std::vector<uint8_t> clearValues(cmd->size, 0u);
-                        gl.BindBuffer(GL_ARRAY_BUFFER, dstBuffer->GetHandle());
-                        gl.BufferSubData(GL_ARRAY_BUFFER, cmd->offset, cmd->size,
-                                         clearValues.data());
-                    }
-
-                    break;
+                    case wgpu::TextureDimension::e1D:
+                        UNREACHABLE();
                 }
 
-                case Command::ResolveQuerySet: {
-                    // TODO(crbug.com/dawn/434): Resolve non-precise occlusion query.
-                    SkipCommand(&mCommands, type);
-                    break;
-                }
+                gl.PixelStorei(GL_PACK_ROW_LENGTH, 0);
 
-                case Command::WriteTimestamp: {
-                    return DAWN_UNIMPLEMENTED_ERROR("WriteTimestamp unimplemented");
-                }
-
-                case Command::InsertDebugMarker:
-                case Command::PopDebugGroup:
-                case Command::PushDebugGroup: {
-                    // Due to lack of linux driver support for GL_EXT_debug_marker
-                    // extension these functions are skipped.
-                    SkipCommand(&mCommands, type);
-                    break;
-                }
-
-                case Command::WriteBuffer: {
-                    WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
-                    uint64_t offset = write->offset;
-                    uint64_t size = write->size;
-                    if (size == 0) {
-                        continue;
-                    }
-
-                    Buffer* dstBuffer = ToBackend(write->buffer.Get());
-                    uint8_t* data = mCommands.NextData<uint8_t>(size);
-                    dstBuffer->EnsureDataInitializedAsDestination(offset, size);
-
-                    gl.BindBuffer(GL_ARRAY_BUFFER, dstBuffer->GetHandle());
-                    gl.BufferSubData(GL_ARRAY_BUFFER, offset, size, data);
-                    break;
-                }
-
-                default:
-                    UNREACHABLE();
+                gl.BindBuffer(GL_PIXEL_PACK_BUFFER, 0);
+                gl.DeleteFramebuffers(1, &readFBO);
+                break;
             }
-        }
 
-        return {};
-    }
-
-    MaybeError CommandBuffer::ExecuteComputePass() {
-        const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
-        ComputePipeline* lastPipeline = nullptr;
-        BindGroupTracker bindGroupTracker = {};
-
-        Command type;
-        while (mCommands.NextCommandId(&type)) {
-            switch (type) {
-                case Command::EndComputePass: {
-                    mCommands.NextCommand<EndComputePassCmd>();
-                    return {};
+            case Command::CopyTextureToTexture: {
+                CopyTextureToTextureCmd* copy = mCommands.NextCommand<CopyTextureToTextureCmd>();
+                if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+                    copy->copySize.depthOrArrayLayers == 0) {
+                    // Skip no-op copies.
+                    continue;
                 }
+                auto& src = copy->source;
+                auto& dst = copy->destination;
 
-                case Command::Dispatch: {
-                    DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
-                    bindGroupTracker.Apply(gl);
+                // TODO(crbug.com/dawn/817): add workaround for the case that imageExtentSrc
+                // is not equal to imageExtentDst. For example when copySize fits in the virtual
+                // size of the source image but does not fit in the one of the destination
+                // image.
+                Extent3D copySize = ComputeTextureCopyExtent(dst, copy->copySize);
+                Texture* srcTexture = ToBackend(src.texture.Get());
+                Texture* dstTexture = ToBackend(dst.texture.Get());
 
-                    gl.DispatchCompute(dispatch->x, dispatch->y, dispatch->z);
-                    gl.MemoryBarrier(GL_ALL_BARRIER_BITS);
-                    break;
-                }
+                SubresourceRange srcRange = GetSubresourcesAffectedByCopy(src, copy->copySize);
+                SubresourceRange dstRange = GetSubresourcesAffectedByCopy(dst, copy->copySize);
 
-                case Command::DispatchIndirect: {
-                    DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
-                    bindGroupTracker.Apply(gl);
-
-                    uint64_t indirectBufferOffset = dispatch->indirectOffset;
-                    Buffer* indirectBuffer = ToBackend(dispatch->indirectBuffer.Get());
-
-                    gl.BindBuffer(GL_DISPATCH_INDIRECT_BUFFER, indirectBuffer->GetHandle());
-                    gl.DispatchComputeIndirect(static_cast<GLintptr>(indirectBufferOffset));
-                    gl.MemoryBarrier(GL_ALL_BARRIER_BITS);
-                    break;
-                }
-
-                case Command::SetComputePipeline: {
-                    SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
-                    lastPipeline = ToBackend(cmd->pipeline).Get();
-                    lastPipeline->ApplyNow();
-
-                    bindGroupTracker.OnSetPipeline(lastPipeline);
-                    break;
-                }
-
-                case Command::SetBindGroup: {
-                    SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
-                    uint32_t* dynamicOffsets = nullptr;
-                    if (cmd->dynamicOffsetCount > 0) {
-                        dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
-                    }
-                    bindGroupTracker.OnSetBindGroup(cmd->index, cmd->group.Get(),
-                                                    cmd->dynamicOffsetCount, dynamicOffsets);
-                    break;
-                }
-
-                case Command::InsertDebugMarker:
-                case Command::PopDebugGroup:
-                case Command::PushDebugGroup: {
-                    // Due to lack of linux driver support for GL_EXT_debug_marker
-                    // extension these functions are skipped.
-                    SkipCommand(&mCommands, type);
-                    break;
-                }
-
-                case Command::WriteTimestamp: {
-                    return DAWN_UNIMPLEMENTED_ERROR("WriteTimestamp unimplemented");
-                }
-
-                default:
-                    UNREACHABLE();
-            }
-        }
-
-        // EndComputePass should have been called
-        UNREACHABLE();
-    }
-
-    MaybeError CommandBuffer::ExecuteRenderPass(BeginRenderPassCmd* renderPass) {
-        const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
-        GLuint fbo = 0;
-
-        // Create the framebuffer used for this render pass and calls the correct glDrawBuffers
-        {
-            // TODO(kainino@chromium.org): This is added to possibly work around an issue seen on
-            // Windows/Intel. It should break any feedback loop before the clears, even if there
-            // shouldn't be any negative effects from this. Investigate whether it's actually
-            // needed.
-            gl.BindFramebuffer(GL_READ_FRAMEBUFFER, 0);
-            // TODO(kainino@chromium.org): possible future optimization: create these framebuffers
-            // at Framebuffer build time (or maybe CommandBuffer build time) so they don't have to
-            // be created and destroyed at draw time.
-            gl.GenFramebuffers(1, &fbo);
-            gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, fbo);
-
-            // Mapping from attachmentSlot to GL framebuffer attachment points. Defaults to zero
-            // (GL_NONE).
-            ityp::array<ColorAttachmentIndex, GLenum, kMaxColorAttachments> drawBuffers = {};
-
-            // Construct GL framebuffer
-
-            ColorAttachmentIndex attachmentCount(uint8_t(0));
-            for (ColorAttachmentIndex i :
-                 IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
-                TextureView* textureView = ToBackend(renderPass->colorAttachments[i].view.Get());
-                GLenum glAttachment = GL_COLOR_ATTACHMENT0 + static_cast<uint8_t>(i);
-
-                // Attach color buffers.
-                textureView->BindToFramebuffer(GL_DRAW_FRAMEBUFFER, glAttachment);
-                drawBuffers[i] = glAttachment;
-                attachmentCount = i;
-                attachmentCount++;
-            }
-            gl.DrawBuffers(static_cast<uint8_t>(attachmentCount), drawBuffers.data());
-
-            if (renderPass->attachmentState->HasDepthStencilAttachment()) {
-                TextureView* textureView = ToBackend(renderPass->depthStencilAttachment.view.Get());
-                const Format& format = textureView->GetTexture()->GetFormat();
-
-                // Attach depth/stencil buffer.
-                GLenum glAttachment = 0;
-                if (format.aspects == (Aspect::Depth | Aspect::Stencil)) {
-                    glAttachment = GL_DEPTH_STENCIL_ATTACHMENT;
-                } else if (format.aspects == Aspect::Depth) {
-                    glAttachment = GL_DEPTH_ATTACHMENT;
-                } else if (format.aspects == Aspect::Stencil) {
-                    glAttachment = GL_STENCIL_ATTACHMENT;
+                srcTexture->EnsureSubresourceContentInitialized(srcRange);
+                if (IsCompleteSubresourceCopiedTo(dstTexture, copySize, dst.mipLevel)) {
+                    dstTexture->SetIsSubresourceContentInitialized(true, dstRange);
                 } else {
-                    UNREACHABLE();
+                    dstTexture->EnsureSubresourceContentInitialized(dstRange);
                 }
-
-                textureView->BindToFramebuffer(GL_DRAW_FRAMEBUFFER, glAttachment);
-            }
-        }
-
-        ASSERT(gl.CheckFramebufferStatus(GL_DRAW_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE);
-
-        // Set defaults for dynamic state before executing clears and commands.
-        PersistentPipelineState persistentPipelineState;
-        persistentPipelineState.SetDefaultState(gl);
-        gl.BlendColor(0, 0, 0, 0);
-        gl.Viewport(0, 0, renderPass->width, renderPass->height);
-        gl.DepthRangef(0.0, 1.0);
-        gl.Scissor(0, 0, renderPass->width, renderPass->height);
-
-        // Clear framebuffer attachments as needed
-        {
-            for (ColorAttachmentIndex index :
-                 IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
-                uint8_t i = static_cast<uint8_t>(index);
-                auto* attachmentInfo = &renderPass->colorAttachments[index];
-
-                // Load op - color
-                if (attachmentInfo->loadOp == wgpu::LoadOp::Clear) {
-                    gl.ColorMask(true, true, true, true);
-
-                    wgpu::TextureComponentType baseType =
-                        attachmentInfo->view->GetFormat().GetAspectInfo(Aspect::Color).baseType;
-                    switch (baseType) {
-                        case wgpu::TextureComponentType::Float: {
-                            const std::array<float, 4> appliedClearColor =
-                                ConvertToFloatColor(attachmentInfo->clearColor);
-                            gl.ClearBufferfv(GL_COLOR, i, appliedClearColor.data());
-                            break;
-                        }
-                        case wgpu::TextureComponentType::Uint: {
-                            const std::array<uint32_t, 4> appliedClearColor =
-                                ConvertToUnsignedIntegerColor(attachmentInfo->clearColor);
-                            gl.ClearBufferuiv(GL_COLOR, i, appliedClearColor.data());
-                            break;
-                        }
-                        case wgpu::TextureComponentType::Sint: {
-                            const std::array<int32_t, 4> appliedClearColor =
-                                ConvertToSignedIntegerColor(attachmentInfo->clearColor);
-                            gl.ClearBufferiv(GL_COLOR, i, appliedClearColor.data());
-                            break;
-                        }
-
-                        case wgpu::TextureComponentType::DepthComparison:
-                            UNREACHABLE();
-                    }
-                }
-
-                if (attachmentInfo->storeOp == wgpu::StoreOp::Discard) {
-                    // TODO(natlee@microsoft.com): call glDiscard to do optimization
-                }
+                CopyImageSubData(gl, src.aspect, srcTexture->GetHandle(), srcTexture->GetGLTarget(),
+                                 src.mipLevel, src.origin, dstTexture->GetHandle(),
+                                 dstTexture->GetGLTarget(), dst.mipLevel, dst.origin, copySize);
+                ToBackend(dst.texture)->Touch();
+                break;
             }
 
-            if (renderPass->attachmentState->HasDepthStencilAttachment()) {
-                auto* attachmentInfo = &renderPass->depthStencilAttachment;
-                const Format& attachmentFormat = attachmentInfo->view->GetTexture()->GetFormat();
-
-                // Load op - depth/stencil
-                bool doDepthClear = attachmentFormat.HasDepth() &&
-                                    (attachmentInfo->depthLoadOp == wgpu::LoadOp::Clear);
-                bool doStencilClear = attachmentFormat.HasStencil() &&
-                                      (attachmentInfo->stencilLoadOp == wgpu::LoadOp::Clear);
-
-                if (doDepthClear) {
-                    gl.DepthMask(GL_TRUE);
-                }
-                if (doStencilClear) {
-                    gl.StencilMask(GetStencilMaskFromStencilFormat(attachmentFormat.format));
-                }
-
-                if (doDepthClear && doStencilClear) {
-                    gl.ClearBufferfi(GL_DEPTH_STENCIL, 0, attachmentInfo->clearDepth,
-                                     attachmentInfo->clearStencil);
-                } else if (doDepthClear) {
-                    gl.ClearBufferfv(GL_DEPTH, 0, &attachmentInfo->clearDepth);
-                } else if (doStencilClear) {
-                    const GLint clearStencil = attachmentInfo->clearStencil;
-                    gl.ClearBufferiv(GL_STENCIL, 0, &clearStencil);
-                }
-            }
-        }
-
-        RenderPipeline* lastPipeline = nullptr;
-        uint64_t indexBufferBaseOffset = 0;
-        GLenum indexBufferFormat;
-        uint32_t indexFormatSize;
-
-        VertexStateBufferBindingTracker vertexStateBufferBindingTracker;
-        BindGroupTracker bindGroupTracker = {};
-
-        auto DoRenderBundleCommand = [&](CommandIterator* iter, Command type) {
-            switch (type) {
-                case Command::Draw: {
-                    DrawCmd* draw = iter->NextCommand<DrawCmd>();
-                    vertexStateBufferBindingTracker.Apply(gl);
-                    bindGroupTracker.Apply(gl);
-
-                    if (draw->firstInstance > 0) {
-                        gl.DrawArraysInstancedBaseInstance(
-                            lastPipeline->GetGLPrimitiveTopology(), draw->firstVertex,
-                            draw->vertexCount, draw->instanceCount, draw->firstInstance);
-                    } else {
-                        // This branch is only needed on OpenGL < 4.2
-                        gl.DrawArraysInstanced(lastPipeline->GetGLPrimitiveTopology(),
-                                               draw->firstVertex, draw->vertexCount,
-                                               draw->instanceCount);
-                    }
+            case Command::ClearBuffer: {
+                ClearBufferCmd* cmd = mCommands.NextCommand<ClearBufferCmd>();
+                if (cmd->size == 0) {
+                    // Skip no-op fills.
                     break;
                 }
+                Buffer* dstBuffer = ToBackend(cmd->buffer.Get());
 
-                case Command::DrawIndexed: {
-                    DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
-                    vertexStateBufferBindingTracker.Apply(gl);
-                    bindGroupTracker.Apply(gl);
+                bool clearedToZero =
+                    dstBuffer->EnsureDataInitializedAsDestination(cmd->offset, cmd->size);
 
-                    if (draw->firstInstance > 0) {
-                        gl.DrawElementsInstancedBaseVertexBaseInstance(
+                if (!clearedToZero) {
+                    const std::vector<uint8_t> clearValues(cmd->size, 0u);
+                    gl.BindBuffer(GL_ARRAY_BUFFER, dstBuffer->GetHandle());
+                    gl.BufferSubData(GL_ARRAY_BUFFER, cmd->offset, cmd->size, clearValues.data());
+                }
+
+                break;
+            }
+
+            case Command::ResolveQuerySet: {
+                // TODO(crbug.com/dawn/434): Resolve non-precise occlusion query.
+                SkipCommand(&mCommands, type);
+                break;
+            }
+
+            case Command::WriteTimestamp: {
+                return DAWN_UNIMPLEMENTED_ERROR("WriteTimestamp unimplemented");
+            }
+
+            case Command::InsertDebugMarker:
+            case Command::PopDebugGroup:
+            case Command::PushDebugGroup: {
+                // Due to lack of linux driver support for GL_EXT_debug_marker
+                // extension these functions are skipped.
+                SkipCommand(&mCommands, type);
+                break;
+            }
+
+            case Command::WriteBuffer: {
+                WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
+                uint64_t offset = write->offset;
+                uint64_t size = write->size;
+                if (size == 0) {
+                    continue;
+                }
+
+                Buffer* dstBuffer = ToBackend(write->buffer.Get());
+                uint8_t* data = mCommands.NextData<uint8_t>(size);
+                dstBuffer->EnsureDataInitializedAsDestination(offset, size);
+
+                gl.BindBuffer(GL_ARRAY_BUFFER, dstBuffer->GetHandle());
+                gl.BufferSubData(GL_ARRAY_BUFFER, offset, size, data);
+                break;
+            }
+
+            default:
+                UNREACHABLE();
+        }
+    }
+
+    return {};
+}
+
+MaybeError CommandBuffer::ExecuteComputePass() {
+    const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+    ComputePipeline* lastPipeline = nullptr;
+    BindGroupTracker bindGroupTracker = {};
+
+    Command type;
+    while (mCommands.NextCommandId(&type)) {
+        switch (type) {
+            case Command::EndComputePass: {
+                mCommands.NextCommand<EndComputePassCmd>();
+                return {};
+            }
+
+            case Command::Dispatch: {
+                DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
+                bindGroupTracker.Apply(gl);
+
+                gl.DispatchCompute(dispatch->x, dispatch->y, dispatch->z);
+                gl.MemoryBarrier(GL_ALL_BARRIER_BITS);
+                break;
+            }
+
+            case Command::DispatchIndirect: {
+                DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
+                bindGroupTracker.Apply(gl);
+
+                uint64_t indirectBufferOffset = dispatch->indirectOffset;
+                Buffer* indirectBuffer = ToBackend(dispatch->indirectBuffer.Get());
+
+                gl.BindBuffer(GL_DISPATCH_INDIRECT_BUFFER, indirectBuffer->GetHandle());
+                gl.DispatchComputeIndirect(static_cast<GLintptr>(indirectBufferOffset));
+                gl.MemoryBarrier(GL_ALL_BARRIER_BITS);
+                break;
+            }
+
+            case Command::SetComputePipeline: {
+                SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
+                lastPipeline = ToBackend(cmd->pipeline).Get();
+                lastPipeline->ApplyNow();
+
+                bindGroupTracker.OnSetPipeline(lastPipeline);
+                break;
+            }
+
+            case Command::SetBindGroup: {
+                SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
+                uint32_t* dynamicOffsets = nullptr;
+                if (cmd->dynamicOffsetCount > 0) {
+                    dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
+                }
+                bindGroupTracker.OnSetBindGroup(cmd->index, cmd->group.Get(),
+                                                cmd->dynamicOffsetCount, dynamicOffsets);
+                break;
+            }
+
+            case Command::InsertDebugMarker:
+            case Command::PopDebugGroup:
+            case Command::PushDebugGroup: {
+                // Due to lack of linux driver support for GL_EXT_debug_marker
+                // extension these functions are skipped.
+                SkipCommand(&mCommands, type);
+                break;
+            }
+
+            case Command::WriteTimestamp: {
+                return DAWN_UNIMPLEMENTED_ERROR("WriteTimestamp unimplemented");
+            }
+
+            default:
+                UNREACHABLE();
+        }
+    }
+
+    // EndComputePass should have been called
+    UNREACHABLE();
+}
+
+MaybeError CommandBuffer::ExecuteRenderPass(BeginRenderPassCmd* renderPass) {
+    const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+    GLuint fbo = 0;
+
+    // Create the framebuffer used for this render pass and calls the correct glDrawBuffers
+    {
+        // TODO(kainino@chromium.org): This is added to possibly work around an issue seen on
+        // Windows/Intel. It should break any feedback loop before the clears, even if there
+        // shouldn't be any negative effects from this. Investigate whether it's actually
+        // needed.
+        gl.BindFramebuffer(GL_READ_FRAMEBUFFER, 0);
+        // TODO(kainino@chromium.org): possible future optimization: create these framebuffers
+        // at Framebuffer build time (or maybe CommandBuffer build time) so they don't have to
+        // be created and destroyed at draw time.
+        gl.GenFramebuffers(1, &fbo);
+        gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, fbo);
+
+        // Mapping from attachmentSlot to GL framebuffer attachment points. Defaults to zero
+        // (GL_NONE).
+        ityp::array<ColorAttachmentIndex, GLenum, kMaxColorAttachments> drawBuffers = {};
+
+        // Construct GL framebuffer
+
+        ColorAttachmentIndex attachmentCount(uint8_t(0));
+        for (ColorAttachmentIndex i :
+             IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+            TextureView* textureView = ToBackend(renderPass->colorAttachments[i].view.Get());
+            GLenum glAttachment = GL_COLOR_ATTACHMENT0 + static_cast<uint8_t>(i);
+
+            // Attach color buffers.
+            textureView->BindToFramebuffer(GL_DRAW_FRAMEBUFFER, glAttachment);
+            drawBuffers[i] = glAttachment;
+            attachmentCount = i;
+            attachmentCount++;
+        }
+        gl.DrawBuffers(static_cast<uint8_t>(attachmentCount), drawBuffers.data());
+
+        if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+            TextureView* textureView = ToBackend(renderPass->depthStencilAttachment.view.Get());
+            const Format& format = textureView->GetTexture()->GetFormat();
+
+            // Attach depth/stencil buffer.
+            GLenum glAttachment = 0;
+            if (format.aspects == (Aspect::Depth | Aspect::Stencil)) {
+                glAttachment = GL_DEPTH_STENCIL_ATTACHMENT;
+            } else if (format.aspects == Aspect::Depth) {
+                glAttachment = GL_DEPTH_ATTACHMENT;
+            } else if (format.aspects == Aspect::Stencil) {
+                glAttachment = GL_STENCIL_ATTACHMENT;
+            } else {
+                UNREACHABLE();
+            }
+
+            textureView->BindToFramebuffer(GL_DRAW_FRAMEBUFFER, glAttachment);
+        }
+    }
+
+    ASSERT(gl.CheckFramebufferStatus(GL_DRAW_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE);
+
+    // Set defaults for dynamic state before executing clears and commands.
+    PersistentPipelineState persistentPipelineState;
+    persistentPipelineState.SetDefaultState(gl);
+    gl.BlendColor(0, 0, 0, 0);
+    gl.Viewport(0, 0, renderPass->width, renderPass->height);
+    gl.DepthRangef(0.0, 1.0);
+    gl.Scissor(0, 0, renderPass->width, renderPass->height);
+
+    // Clear framebuffer attachments as needed
+    {
+        for (ColorAttachmentIndex index :
+             IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+            uint8_t i = static_cast<uint8_t>(index);
+            auto* attachmentInfo = &renderPass->colorAttachments[index];
+
+            // Load op - color
+            if (attachmentInfo->loadOp == wgpu::LoadOp::Clear) {
+                gl.ColorMask(true, true, true, true);
+
+                wgpu::TextureComponentType baseType =
+                    attachmentInfo->view->GetFormat().GetAspectInfo(Aspect::Color).baseType;
+                switch (baseType) {
+                    case wgpu::TextureComponentType::Float: {
+                        const std::array<float, 4> appliedClearColor =
+                            ConvertToFloatColor(attachmentInfo->clearColor);
+                        gl.ClearBufferfv(GL_COLOR, i, appliedClearColor.data());
+                        break;
+                    }
+                    case wgpu::TextureComponentType::Uint: {
+                        const std::array<uint32_t, 4> appliedClearColor =
+                            ConvertToUnsignedIntegerColor(attachmentInfo->clearColor);
+                        gl.ClearBufferuiv(GL_COLOR, i, appliedClearColor.data());
+                        break;
+                    }
+                    case wgpu::TextureComponentType::Sint: {
+                        const std::array<int32_t, 4> appliedClearColor =
+                            ConvertToSignedIntegerColor(attachmentInfo->clearColor);
+                        gl.ClearBufferiv(GL_COLOR, i, appliedClearColor.data());
+                        break;
+                    }
+
+                    case wgpu::TextureComponentType::DepthComparison:
+                        UNREACHABLE();
+                }
+            }
+
+            if (attachmentInfo->storeOp == wgpu::StoreOp::Discard) {
+                // TODO(natlee@microsoft.com): call glDiscard to do optimization
+            }
+        }
+
+        if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+            auto* attachmentInfo = &renderPass->depthStencilAttachment;
+            const Format& attachmentFormat = attachmentInfo->view->GetTexture()->GetFormat();
+
+            // Load op - depth/stencil
+            bool doDepthClear =
+                attachmentFormat.HasDepth() && (attachmentInfo->depthLoadOp == wgpu::LoadOp::Clear);
+            bool doStencilClear = attachmentFormat.HasStencil() &&
+                                  (attachmentInfo->stencilLoadOp == wgpu::LoadOp::Clear);
+
+            if (doDepthClear) {
+                gl.DepthMask(GL_TRUE);
+            }
+            if (doStencilClear) {
+                gl.StencilMask(GetStencilMaskFromStencilFormat(attachmentFormat.format));
+            }
+
+            if (doDepthClear && doStencilClear) {
+                gl.ClearBufferfi(GL_DEPTH_STENCIL, 0, attachmentInfo->clearDepth,
+                                 attachmentInfo->clearStencil);
+            } else if (doDepthClear) {
+                gl.ClearBufferfv(GL_DEPTH, 0, &attachmentInfo->clearDepth);
+            } else if (doStencilClear) {
+                const GLint clearStencil = attachmentInfo->clearStencil;
+                gl.ClearBufferiv(GL_STENCIL, 0, &clearStencil);
+            }
+        }
+    }
+
+    RenderPipeline* lastPipeline = nullptr;
+    uint64_t indexBufferBaseOffset = 0;
+    GLenum indexBufferFormat;
+    uint32_t indexFormatSize;
+
+    VertexStateBufferBindingTracker vertexStateBufferBindingTracker;
+    BindGroupTracker bindGroupTracker = {};
+
+    auto DoRenderBundleCommand = [&](CommandIterator* iter, Command type) {
+        switch (type) {
+            case Command::Draw: {
+                DrawCmd* draw = iter->NextCommand<DrawCmd>();
+                vertexStateBufferBindingTracker.Apply(gl);
+                bindGroupTracker.Apply(gl);
+
+                if (draw->firstInstance > 0) {
+                    gl.DrawArraysInstancedBaseInstance(lastPipeline->GetGLPrimitiveTopology(),
+                                                       draw->firstVertex, draw->vertexCount,
+                                                       draw->instanceCount, draw->firstInstance);
+                } else {
+                    // This branch is only needed on OpenGL < 4.2
+                    gl.DrawArraysInstanced(lastPipeline->GetGLPrimitiveTopology(),
+                                           draw->firstVertex, draw->vertexCount,
+                                           draw->instanceCount);
+                }
+                break;
+            }
+
+            case Command::DrawIndexed: {
+                DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
+                vertexStateBufferBindingTracker.Apply(gl);
+                bindGroupTracker.Apply(gl);
+
+                if (draw->firstInstance > 0) {
+                    gl.DrawElementsInstancedBaseVertexBaseInstance(
+                        lastPipeline->GetGLPrimitiveTopology(), draw->indexCount, indexBufferFormat,
+                        reinterpret_cast<void*>(draw->firstIndex * indexFormatSize +
+                                                indexBufferBaseOffset),
+                        draw->instanceCount, draw->baseVertex, draw->firstInstance);
+                } else {
+                    // This branch is only needed on OpenGL < 4.2; ES < 3.2
+                    if (draw->baseVertex != 0) {
+                        gl.DrawElementsInstancedBaseVertex(
                             lastPipeline->GetGLPrimitiveTopology(), draw->indexCount,
                             indexBufferFormat,
                             reinterpret_cast<void*>(draw->firstIndex * indexFormatSize +
                                                     indexBufferBaseOffset),
-                            draw->instanceCount, draw->baseVertex, draw->firstInstance);
+                            draw->instanceCount, draw->baseVertex);
                     } else {
-                        // This branch is only needed on OpenGL < 4.2; ES < 3.2
-                        if (draw->baseVertex != 0) {
-                            gl.DrawElementsInstancedBaseVertex(
-                                lastPipeline->GetGLPrimitiveTopology(), draw->indexCount,
-                                indexBufferFormat,
-                                reinterpret_cast<void*>(draw->firstIndex * indexFormatSize +
-                                                        indexBufferBaseOffset),
-                                draw->instanceCount, draw->baseVertex);
-                        } else {
-                            // This branch is only needed on OpenGL < 3.2; ES < 3.2
-                            gl.DrawElementsInstanced(
-                                lastPipeline->GetGLPrimitiveTopology(), draw->indexCount,
-                                indexBufferFormat,
-                                reinterpret_cast<void*>(draw->firstIndex * indexFormatSize +
-                                                        indexBufferBaseOffset),
-                                draw->instanceCount);
-                        }
+                        // This branch is only needed on OpenGL < 3.2; ES < 3.2
+                        gl.DrawElementsInstanced(
+                            lastPipeline->GetGLPrimitiveTopology(), draw->indexCount,
+                            indexBufferFormat,
+                            reinterpret_cast<void*>(draw->firstIndex * indexFormatSize +
+                                                    indexBufferBaseOffset),
+                            draw->instanceCount);
                     }
-                    break;
                 }
-
-                case Command::DrawIndirect: {
-                    DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
-                    vertexStateBufferBindingTracker.Apply(gl);
-                    bindGroupTracker.Apply(gl);
-
-                    uint64_t indirectBufferOffset = draw->indirectOffset;
-                    Buffer* indirectBuffer = ToBackend(draw->indirectBuffer.Get());
-
-                    gl.BindBuffer(GL_DRAW_INDIRECT_BUFFER, indirectBuffer->GetHandle());
-                    gl.DrawArraysIndirect(
-                        lastPipeline->GetGLPrimitiveTopology(),
-                        reinterpret_cast<void*>(static_cast<intptr_t>(indirectBufferOffset)));
-                    break;
-                }
-
-                case Command::DrawIndexedIndirect: {
-                    DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
-
-                    vertexStateBufferBindingTracker.Apply(gl);
-                    bindGroupTracker.Apply(gl);
-
-                    Buffer* indirectBuffer = ToBackend(draw->indirectBuffer.Get());
-                    ASSERT(indirectBuffer != nullptr);
-
-                    gl.BindBuffer(GL_DRAW_INDIRECT_BUFFER, indirectBuffer->GetHandle());
-                    gl.DrawElementsIndirect(
-                        lastPipeline->GetGLPrimitiveTopology(), indexBufferFormat,
-                        reinterpret_cast<void*>(static_cast<intptr_t>(draw->indirectOffset)));
-                    break;
-                }
-
-                case Command::InsertDebugMarker:
-                case Command::PopDebugGroup:
-                case Command::PushDebugGroup: {
-                    // Due to lack of linux driver support for GL_EXT_debug_marker
-                    // extension these functions are skipped.
-                    SkipCommand(iter, type);
-                    break;
-                }
-
-                case Command::SetRenderPipeline: {
-                    SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
-                    lastPipeline = ToBackend(cmd->pipeline).Get();
-                    lastPipeline->ApplyNow(persistentPipelineState);
-
-                    vertexStateBufferBindingTracker.OnSetPipeline(lastPipeline);
-                    bindGroupTracker.OnSetPipeline(lastPipeline);
-                    break;
-                }
-
-                case Command::SetBindGroup: {
-                    SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
-                    uint32_t* dynamicOffsets = nullptr;
-                    if (cmd->dynamicOffsetCount > 0) {
-                        dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
-                    }
-                    bindGroupTracker.OnSetBindGroup(cmd->index, cmd->group.Get(),
-                                                    cmd->dynamicOffsetCount, dynamicOffsets);
-                    break;
-                }
-
-                case Command::SetIndexBuffer: {
-                    SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
-
-                    indexBufferBaseOffset = cmd->offset;
-                    indexBufferFormat = IndexFormatType(cmd->format);
-                    indexFormatSize = IndexFormatSize(cmd->format);
-                    vertexStateBufferBindingTracker.OnSetIndexBuffer(cmd->buffer.Get());
-                    break;
-                }
-
-                case Command::SetVertexBuffer: {
-                    SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
-                    vertexStateBufferBindingTracker.OnSetVertexBuffer(cmd->slot, cmd->buffer.Get(),
-                                                                      cmd->offset);
-                    break;
-                }
-
-                default:
-                    UNREACHABLE();
-                    break;
+                break;
             }
-        };
 
-        Command type;
-        while (mCommands.NextCommandId(&type)) {
-            switch (type) {
-                case Command::EndRenderPass: {
-                    mCommands.NextCommand<EndRenderPassCmd>();
+            case Command::DrawIndirect: {
+                DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
+                vertexStateBufferBindingTracker.Apply(gl);
+                bindGroupTracker.Apply(gl);
 
-                    for (ColorAttachmentIndex i :
-                         IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
-                        TextureView* textureView =
-                            ToBackend(renderPass->colorAttachments[i].view.Get());
-                        ToBackend(textureView->GetTexture())->Touch();
+                uint64_t indirectBufferOffset = draw->indirectOffset;
+                Buffer* indirectBuffer = ToBackend(draw->indirectBuffer.Get());
+
+                gl.BindBuffer(GL_DRAW_INDIRECT_BUFFER, indirectBuffer->GetHandle());
+                gl.DrawArraysIndirect(
+                    lastPipeline->GetGLPrimitiveTopology(),
+                    reinterpret_cast<void*>(static_cast<intptr_t>(indirectBufferOffset)));
+                break;
+            }
+
+            case Command::DrawIndexedIndirect: {
+                DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
+
+                vertexStateBufferBindingTracker.Apply(gl);
+                bindGroupTracker.Apply(gl);
+
+                Buffer* indirectBuffer = ToBackend(draw->indirectBuffer.Get());
+                ASSERT(indirectBuffer != nullptr);
+
+                gl.BindBuffer(GL_DRAW_INDIRECT_BUFFER, indirectBuffer->GetHandle());
+                gl.DrawElementsIndirect(
+                    lastPipeline->GetGLPrimitiveTopology(), indexBufferFormat,
+                    reinterpret_cast<void*>(static_cast<intptr_t>(draw->indirectOffset)));
+                break;
+            }
+
+            case Command::InsertDebugMarker:
+            case Command::PopDebugGroup:
+            case Command::PushDebugGroup: {
+                // Due to lack of linux driver support for GL_EXT_debug_marker
+                // extension these functions are skipped.
+                SkipCommand(iter, type);
+                break;
+            }
+
+            case Command::SetRenderPipeline: {
+                SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
+                lastPipeline = ToBackend(cmd->pipeline).Get();
+                lastPipeline->ApplyNow(persistentPipelineState);
+
+                vertexStateBufferBindingTracker.OnSetPipeline(lastPipeline);
+                bindGroupTracker.OnSetPipeline(lastPipeline);
+                break;
+            }
+
+            case Command::SetBindGroup: {
+                SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
+                uint32_t* dynamicOffsets = nullptr;
+                if (cmd->dynamicOffsetCount > 0) {
+                    dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
+                }
+                bindGroupTracker.OnSetBindGroup(cmd->index, cmd->group.Get(),
+                                                cmd->dynamicOffsetCount, dynamicOffsets);
+                break;
+            }
+
+            case Command::SetIndexBuffer: {
+                SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
+
+                indexBufferBaseOffset = cmd->offset;
+                indexBufferFormat = IndexFormatType(cmd->format);
+                indexFormatSize = IndexFormatSize(cmd->format);
+                vertexStateBufferBindingTracker.OnSetIndexBuffer(cmd->buffer.Get());
+                break;
+            }
+
+            case Command::SetVertexBuffer: {
+                SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
+                vertexStateBufferBindingTracker.OnSetVertexBuffer(cmd->slot, cmd->buffer.Get(),
+                                                                  cmd->offset);
+                break;
+            }
+
+            default:
+                UNREACHABLE();
+                break;
+        }
+    };
+
+    Command type;
+    while (mCommands.NextCommandId(&type)) {
+        switch (type) {
+            case Command::EndRenderPass: {
+                mCommands.NextCommand<EndRenderPassCmd>();
+
+                for (ColorAttachmentIndex i :
+                     IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+                    TextureView* textureView =
+                        ToBackend(renderPass->colorAttachments[i].view.Get());
+                    ToBackend(textureView->GetTexture())->Touch();
+                }
+                if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+                    TextureView* textureView =
+                        ToBackend(renderPass->depthStencilAttachment.view.Get());
+                    ToBackend(textureView->GetTexture())->Touch();
+                }
+                if (renderPass->attachmentState->GetSampleCount() > 1) {
+                    ResolveMultisampledRenderTargets(gl, renderPass);
+                }
+                gl.DeleteFramebuffers(1, &fbo);
+                return {};
+            }
+
+            case Command::SetStencilReference: {
+                SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
+                persistentPipelineState.SetStencilReference(gl, cmd->reference);
+                break;
+            }
+
+            case Command::SetViewport: {
+                SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
+                if (gl.IsAtLeastGL(4, 1)) {
+                    gl.ViewportIndexedf(0, cmd->x, cmd->y, cmd->width, cmd->height);
+                } else {
+                    // Floating-point viewport coords are unsupported on OpenGL ES, but
+                    // truncation is ok because other APIs do not guarantee subpixel precision
+                    // either.
+                    gl.Viewport(static_cast<int>(cmd->x), static_cast<int>(cmd->y),
+                                static_cast<int>(cmd->width), static_cast<int>(cmd->height));
+                }
+                gl.DepthRangef(cmd->minDepth, cmd->maxDepth);
+                break;
+            }
+
+            case Command::SetScissorRect: {
+                SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
+                gl.Scissor(cmd->x, cmd->y, cmd->width, cmd->height);
+                break;
+            }
+
+            case Command::SetBlendConstant: {
+                SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
+                const std::array<float, 4> blendColor = ConvertToFloatColor(cmd->color);
+                gl.BlendColor(blendColor[0], blendColor[1], blendColor[2], blendColor[3]);
+                break;
+            }
+
+            case Command::ExecuteBundles: {
+                ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
+                auto bundles = mCommands.NextData<Ref<RenderBundleBase>>(cmd->count);
+
+                for (uint32_t i = 0; i < cmd->count; ++i) {
+                    CommandIterator* iter = bundles[i]->GetCommands();
+                    iter->Reset();
+                    while (iter->NextCommandId(&type)) {
+                        DoRenderBundleCommand(iter, type);
                     }
-                    if (renderPass->attachmentState->HasDepthStencilAttachment()) {
-                        TextureView* textureView =
-                            ToBackend(renderPass->depthStencilAttachment.view.Get());
-                        ToBackend(textureView->GetTexture())->Touch();
+                }
+                break;
+            }
+
+            case Command::BeginOcclusionQuery: {
+                return DAWN_UNIMPLEMENTED_ERROR("BeginOcclusionQuery unimplemented.");
+            }
+
+            case Command::EndOcclusionQuery: {
+                return DAWN_UNIMPLEMENTED_ERROR("EndOcclusionQuery unimplemented.");
+            }
+
+            case Command::WriteTimestamp:
+                return DAWN_UNIMPLEMENTED_ERROR("WriteTimestamp unimplemented");
+
+            default: {
+                DoRenderBundleCommand(&mCommands, type);
+                break;
+            }
+        }
+    }
+
+    // EndRenderPass should have been called
+    UNREACHABLE();
+}
+
+void DoTexSubImage(const OpenGLFunctions& gl,
+                   const TextureCopy& destination,
+                   const void* data,
+                   const TextureDataLayout& dataLayout,
+                   const Extent3D& copySize) {
+    Texture* texture = ToBackend(destination.texture.Get());
+    ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D);
+
+    const GLFormat& format = texture->GetGLFormat();
+    GLenum target = texture->GetGLTarget();
+    data = static_cast<const uint8_t*>(data) + dataLayout.offset;
+    gl.ActiveTexture(GL_TEXTURE0);
+    gl.BindTexture(target, texture->GetHandle());
+    const TexelBlockInfo& blockInfo = texture->GetFormat().GetAspectInfo(destination.aspect).block;
+
+    uint32_t x = destination.origin.x;
+    uint32_t y = destination.origin.y;
+    uint32_t z = destination.origin.z;
+    if (texture->GetFormat().isCompressed) {
+        size_t rowSize = copySize.width / blockInfo.width * blockInfo.byteSize;
+        Extent3D virtSize = texture->GetMipLevelVirtualSize(destination.mipLevel);
+        uint32_t width = std::min(copySize.width, virtSize.width - x);
+
+        // In GLES glPixelStorei() doesn't affect CompressedTexSubImage*D() and
+        // GL_UNPACK_COMPRESSED_BLOCK_* isn't defined, so we have to workaround
+        // this limitation by copying the compressed texture data once per row.
+        // See OpenGL ES 3.2 SPEC Chapter 8.4.1, "Pixel Storage Modes and Pixel
+        // Buffer Objects" for more details. For Desktop GL, we use row-by-row
+        // copies only for uploads where bytesPerRow is not a multiple of byteSize.
+        if (dataLayout.bytesPerRow % blockInfo.byteSize == 0 && gl.GetVersion().IsDesktop()) {
+            size_t imageSize =
+                rowSize * (copySize.height / blockInfo.height) * copySize.depthOrArrayLayers;
+
+            uint32_t height = std::min(copySize.height, virtSize.height - y);
+
+            gl.PixelStorei(GL_UNPACK_ROW_LENGTH,
+                           dataLayout.bytesPerRow / blockInfo.byteSize * blockInfo.width);
+            gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_SIZE, blockInfo.byteSize);
+            gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_WIDTH, blockInfo.width);
+            gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_HEIGHT, blockInfo.height);
+            gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_DEPTH, 1);
+
+            if (texture->GetArrayLayers() == 1 &&
+                texture->GetDimension() == wgpu::TextureDimension::e2D) {
+                gl.CompressedTexSubImage2D(target, destination.mipLevel, x, y, width, height,
+                                           format.internalFormat, imageSize, data);
+            } else {
+                gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, dataLayout.rowsPerImage * blockInfo.height);
+                gl.CompressedTexSubImage3D(target, destination.mipLevel, x, y, z, width, height,
+                                           copySize.depthOrArrayLayers, format.internalFormat,
+                                           imageSize, data);
+                gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
+            }
+
+            gl.PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
+            gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_SIZE, 0);
+            gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_WIDTH, 0);
+            gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_HEIGHT, 0);
+            gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_DEPTH, 0);
+        } else {
+            if (texture->GetArrayLayers() == 1 &&
+                texture->GetDimension() == wgpu::TextureDimension::e2D) {
+                const uint8_t* d = static_cast<const uint8_t*>(data);
+
+                for (; y < destination.origin.y + copySize.height; y += blockInfo.height) {
+                    uint32_t height = std::min(blockInfo.height, virtSize.height - y);
+                    gl.CompressedTexSubImage2D(target, destination.mipLevel, x, y, width, height,
+                                               format.internalFormat, rowSize, d);
+                    d += dataLayout.bytesPerRow;
+                }
+            } else {
+                const uint8_t* slice = static_cast<const uint8_t*>(data);
+
+                for (; z < destination.origin.z + copySize.depthOrArrayLayers; ++z) {
+                    const uint8_t* d = slice;
+
+                    for (y = destination.origin.y; y < destination.origin.y + copySize.height;
+                         y += blockInfo.height) {
+                        uint32_t height = std::min(blockInfo.height, virtSize.height - y);
+                        gl.CompressedTexSubImage3D(target, destination.mipLevel, x, y, z, width,
+                                                   height, 1, format.internalFormat, rowSize, d);
+                        d += dataLayout.bytesPerRow;
                     }
-                    if (renderPass->attachmentState->GetSampleCount() > 1) {
-                        ResolveMultisampledRenderTargets(gl, renderPass);
-                    }
-                    gl.DeleteFramebuffers(1, &fbo);
-                    return {};
-                }
 
-                case Command::SetStencilReference: {
-                    SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
-                    persistentPipelineState.SetStencilReference(gl, cmd->reference);
-                    break;
-                }
-
-                case Command::SetViewport: {
-                    SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
-                    if (gl.IsAtLeastGL(4, 1)) {
-                        gl.ViewportIndexedf(0, cmd->x, cmd->y, cmd->width, cmd->height);
-                    } else {
-                        // Floating-point viewport coords are unsupported on OpenGL ES, but
-                        // truncation is ok because other APIs do not guarantee subpixel precision
-                        // either.
-                        gl.Viewport(static_cast<int>(cmd->x), static_cast<int>(cmd->y),
-                                    static_cast<int>(cmd->width), static_cast<int>(cmd->height));
-                    }
-                    gl.DepthRangef(cmd->minDepth, cmd->maxDepth);
-                    break;
-                }
-
-                case Command::SetScissorRect: {
-                    SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
-                    gl.Scissor(cmd->x, cmd->y, cmd->width, cmd->height);
-                    break;
-                }
-
-                case Command::SetBlendConstant: {
-                    SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
-                    const std::array<float, 4> blendColor = ConvertToFloatColor(cmd->color);
-                    gl.BlendColor(blendColor[0], blendColor[1], blendColor[2], blendColor[3]);
-                    break;
-                }
-
-                case Command::ExecuteBundles: {
-                    ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
-                    auto bundles = mCommands.NextData<Ref<RenderBundleBase>>(cmd->count);
-
-                    for (uint32_t i = 0; i < cmd->count; ++i) {
-                        CommandIterator* iter = bundles[i]->GetCommands();
-                        iter->Reset();
-                        while (iter->NextCommandId(&type)) {
-                            DoRenderBundleCommand(iter, type);
-                        }
-                    }
-                    break;
-                }
-
-                case Command::BeginOcclusionQuery: {
-                    return DAWN_UNIMPLEMENTED_ERROR("BeginOcclusionQuery unimplemented.");
-                }
-
-                case Command::EndOcclusionQuery: {
-                    return DAWN_UNIMPLEMENTED_ERROR("EndOcclusionQuery unimplemented.");
-                }
-
-                case Command::WriteTimestamp:
-                    return DAWN_UNIMPLEMENTED_ERROR("WriteTimestamp unimplemented");
-
-                default: {
-                    DoRenderBundleCommand(&mCommands, type);
-                    break;
+                    slice += dataLayout.rowsPerImage * dataLayout.bytesPerRow;
                 }
             }
         }
-
-        // EndRenderPass should have been called
-        UNREACHABLE();
-    }
-
-    void DoTexSubImage(const OpenGLFunctions& gl,
-                       const TextureCopy& destination,
-                       const void* data,
-                       const TextureDataLayout& dataLayout,
-                       const Extent3D& copySize) {
-        Texture* texture = ToBackend(destination.texture.Get());
-        ASSERT(texture->GetDimension() != wgpu::TextureDimension::e1D);
-
-        const GLFormat& format = texture->GetGLFormat();
-        GLenum target = texture->GetGLTarget();
-        data = static_cast<const uint8_t*>(data) + dataLayout.offset;
-        gl.ActiveTexture(GL_TEXTURE0);
-        gl.BindTexture(target, texture->GetHandle());
-        const TexelBlockInfo& blockInfo =
-            texture->GetFormat().GetAspectInfo(destination.aspect).block;
-
-        uint32_t x = destination.origin.x;
-        uint32_t y = destination.origin.y;
-        uint32_t z = destination.origin.z;
-        if (texture->GetFormat().isCompressed) {
-            size_t rowSize = copySize.width / blockInfo.width * blockInfo.byteSize;
-            Extent3D virtSize = texture->GetMipLevelVirtualSize(destination.mipLevel);
-            uint32_t width = std::min(copySize.width, virtSize.width - x);
-
-            // In GLES glPixelStorei() doesn't affect CompressedTexSubImage*D() and
-            // GL_UNPACK_COMPRESSED_BLOCK_* isn't defined, so we have to workaround
-            // this limitation by copying the compressed texture data once per row.
-            // See OpenGL ES 3.2 SPEC Chapter 8.4.1, "Pixel Storage Modes and Pixel
-            // Buffer Objects" for more details. For Desktop GL, we use row-by-row
-            // copies only for uploads where bytesPerRow is not a multiple of byteSize.
-            if (dataLayout.bytesPerRow % blockInfo.byteSize == 0 && gl.GetVersion().IsDesktop()) {
-                size_t imageSize =
-                    rowSize * (copySize.height / blockInfo.height) * copySize.depthOrArrayLayers;
-
-                uint32_t height = std::min(copySize.height, virtSize.height - y);
-
-                gl.PixelStorei(GL_UNPACK_ROW_LENGTH,
-                               dataLayout.bytesPerRow / blockInfo.byteSize * blockInfo.width);
-                gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_SIZE, blockInfo.byteSize);
-                gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_WIDTH, blockInfo.width);
-                gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_HEIGHT, blockInfo.height);
-                gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_DEPTH, 1);
-
-                if (texture->GetArrayLayers() == 1 &&
-                    texture->GetDimension() == wgpu::TextureDimension::e2D) {
-                    gl.CompressedTexSubImage2D(target, destination.mipLevel, x, y, width, height,
-                                               format.internalFormat, imageSize, data);
-                } else {
-                    gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT,
-                                   dataLayout.rowsPerImage * blockInfo.height);
-                    gl.CompressedTexSubImage3D(target, destination.mipLevel, x, y, z, width, height,
-                                               copySize.depthOrArrayLayers, format.internalFormat,
-                                               imageSize, data);
-                    gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
-                }
-
-                gl.PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
-                gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_SIZE, 0);
-                gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_WIDTH, 0);
-                gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_HEIGHT, 0);
-                gl.PixelStorei(GL_UNPACK_COMPRESSED_BLOCK_DEPTH, 0);
+    } else {
+        uint32_t width = copySize.width;
+        uint32_t height = copySize.height;
+        if (dataLayout.bytesPerRow % blockInfo.byteSize == 0) {
+            gl.PixelStorei(GL_UNPACK_ROW_LENGTH,
+                           dataLayout.bytesPerRow / blockInfo.byteSize * blockInfo.width);
+            if (texture->GetArrayLayers() == 1 &&
+                texture->GetDimension() == wgpu::TextureDimension::e2D) {
+                gl.TexSubImage2D(target, destination.mipLevel, x, y, width, height, format.format,
+                                 format.type, data);
             } else {
-                if (texture->GetArrayLayers() == 1 &&
-                    texture->GetDimension() == wgpu::TextureDimension::e2D) {
-                    const uint8_t* d = static_cast<const uint8_t*>(data);
-
-                    for (; y < destination.origin.y + copySize.height; y += blockInfo.height) {
-                        uint32_t height = std::min(blockInfo.height, virtSize.height - y);
-                        gl.CompressedTexSubImage2D(target, destination.mipLevel, x, y, width,
-                                                   height, format.internalFormat, rowSize, d);
-                        d += dataLayout.bytesPerRow;
-                    }
-                } else {
-                    const uint8_t* slice = static_cast<const uint8_t*>(data);
-
-                    for (; z < destination.origin.z + copySize.depthOrArrayLayers; ++z) {
-                        const uint8_t* d = slice;
-
-                        for (y = destination.origin.y; y < destination.origin.y + copySize.height;
-                             y += blockInfo.height) {
-                            uint32_t height = std::min(blockInfo.height, virtSize.height - y);
-                            gl.CompressedTexSubImage3D(target, destination.mipLevel, x, y, z, width,
-                                                       height, 1, format.internalFormat, rowSize,
-                                                       d);
-                            d += dataLayout.bytesPerRow;
-                        }
-
-                        slice += dataLayout.rowsPerImage * dataLayout.bytesPerRow;
-                    }
-                }
+                gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, dataLayout.rowsPerImage * blockInfo.height);
+                gl.TexSubImage3D(target, destination.mipLevel, x, y, z, width, height,
+                                 copySize.depthOrArrayLayers, format.format, format.type, data);
+                gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
             }
+            gl.PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
         } else {
-            uint32_t width = copySize.width;
-            uint32_t height = copySize.height;
-            if (dataLayout.bytesPerRow % blockInfo.byteSize == 0) {
-                gl.PixelStorei(GL_UNPACK_ROW_LENGTH,
-                               dataLayout.bytesPerRow / blockInfo.byteSize * blockInfo.width);
-                if (texture->GetArrayLayers() == 1 &&
-                    texture->GetDimension() == wgpu::TextureDimension::e2D) {
-                    gl.TexSubImage2D(target, destination.mipLevel, x, y, width, height,
-                                     format.format, format.type, data);
-                } else {
-                    gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT,
-                                   dataLayout.rowsPerImage * blockInfo.height);
-                    gl.TexSubImage3D(target, destination.mipLevel, x, y, z, width, height,
-                                     copySize.depthOrArrayLayers, format.format, format.type, data);
-                    gl.PixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0);
+            if (texture->GetArrayLayers() == 1 &&
+                texture->GetDimension() == wgpu::TextureDimension::e2D) {
+                const uint8_t* d = static_cast<const uint8_t*>(data);
+                for (; y < destination.origin.y + height; ++y) {
+                    gl.TexSubImage2D(target, destination.mipLevel, x, y, width, 1, format.format,
+                                     format.type, d);
+                    d += dataLayout.bytesPerRow;
                 }
-                gl.PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
             } else {
-                if (texture->GetArrayLayers() == 1 &&
-                    texture->GetDimension() == wgpu::TextureDimension::e2D) {
-                    const uint8_t* d = static_cast<const uint8_t*>(data);
-                    for (; y < destination.origin.y + height; ++y) {
-                        gl.TexSubImage2D(target, destination.mipLevel, x, y, width, 1,
+                const uint8_t* slice = static_cast<const uint8_t*>(data);
+                for (; z < destination.origin.z + copySize.depthOrArrayLayers; ++z) {
+                    const uint8_t* d = slice;
+                    for (y = destination.origin.y; y < destination.origin.y + height; ++y) {
+                        gl.TexSubImage3D(target, destination.mipLevel, x, y, z, width, 1, 1,
                                          format.format, format.type, d);
                         d += dataLayout.bytesPerRow;
                     }
-                } else {
-                    const uint8_t* slice = static_cast<const uint8_t*>(data);
-                    for (; z < destination.origin.z + copySize.depthOrArrayLayers; ++z) {
-                        const uint8_t* d = slice;
-                        for (y = destination.origin.y; y < destination.origin.y + height; ++y) {
-                            gl.TexSubImage3D(target, destination.mipLevel, x, y, z, width, 1, 1,
-                                             format.format, format.type, d);
-                            d += dataLayout.bytesPerRow;
-                        }
-                        slice += dataLayout.rowsPerImage * dataLayout.bytesPerRow;
-                    }
+                    slice += dataLayout.rowsPerImage * dataLayout.bytesPerRow;
                 }
             }
         }
     }
+}
 
 }  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/CommandBufferGL.h b/src/dawn/native/opengl/CommandBufferGL.h
index 4b3da13..d270377 100644
--- a/src/dawn/native/opengl/CommandBufferGL.h
+++ b/src/dawn/native/opengl/CommandBufferGL.h
@@ -18,32 +18,32 @@
 #include "dawn/native/CommandBuffer.h"
 
 namespace dawn::native {
-    struct BeginRenderPassCmd;
+struct BeginRenderPassCmd;
 }  // namespace dawn::native
 
 namespace dawn::native::opengl {
 
-    class Device;
-    struct OpenGLFunctions;
+class Device;
+struct OpenGLFunctions;
 
-    class CommandBuffer final : public CommandBufferBase {
-      public:
-        CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
+class CommandBuffer final : public CommandBufferBase {
+  public:
+    CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
 
-        MaybeError Execute();
+    MaybeError Execute();
 
-      private:
-        MaybeError ExecuteComputePass();
-        MaybeError ExecuteRenderPass(BeginRenderPassCmd* renderPass);
-    };
+  private:
+    MaybeError ExecuteComputePass();
+    MaybeError ExecuteRenderPass(BeginRenderPassCmd* renderPass);
+};
 
-    // Like glTexSubImage*, the "data" argument is either a pointer to image data or
-    // an offset if a PBO is bound.
-    void DoTexSubImage(const OpenGLFunctions& gl,
-                       const TextureCopy& destination,
-                       const void* data,
-                       const TextureDataLayout& dataLayout,
-                       const Extent3D& copySize);
+// Like glTexSubImage*, the "data" argument is either a pointer to image data or
+// an offset if a PBO is bound.
+void DoTexSubImage(const OpenGLFunctions& gl,
+                   const TextureCopy& destination,
+                   const void* data,
+                   const TextureDataLayout& dataLayout,
+                   const Extent3D& copySize);
 }  // namespace dawn::native::opengl
 
 #endif  // SRC_DAWN_NATIVE_OPENGL_COMMANDBUFFERGL_H_
diff --git a/src/dawn/native/opengl/ComputePipelineGL.cpp b/src/dawn/native/opengl/ComputePipelineGL.cpp
index b535411..35d2abd 100644
--- a/src/dawn/native/opengl/ComputePipelineGL.cpp
+++ b/src/dawn/native/opengl/ComputePipelineGL.cpp
@@ -18,28 +18,27 @@
 
 namespace dawn::native::opengl {
 
-    // static
-    Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
-        Device* device,
-        const ComputePipelineDescriptor* descriptor) {
-        return AcquireRef(new ComputePipeline(device, descriptor));
-    }
+// static
+Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
+    Device* device,
+    const ComputePipelineDescriptor* descriptor) {
+    return AcquireRef(new ComputePipeline(device, descriptor));
+}
 
-    ComputePipeline::~ComputePipeline() = default;
+ComputePipeline::~ComputePipeline() = default;
 
-    void ComputePipeline::DestroyImpl() {
-        ComputePipelineBase::DestroyImpl();
-        DeleteProgram(ToBackend(GetDevice())->gl);
-    }
+void ComputePipeline::DestroyImpl() {
+    ComputePipelineBase::DestroyImpl();
+    DeleteProgram(ToBackend(GetDevice())->gl);
+}
 
-    MaybeError ComputePipeline::Initialize() {
-        DAWN_TRY(
-            InitializeBase(ToBackend(GetDevice())->gl, ToBackend(GetLayout()), GetAllStages()));
-        return {};
-    }
+MaybeError ComputePipeline::Initialize() {
+    DAWN_TRY(InitializeBase(ToBackend(GetDevice())->gl, ToBackend(GetLayout()), GetAllStages()));
+    return {};
+}
 
-    void ComputePipeline::ApplyNow() {
-        PipelineGL::ApplyNow(ToBackend(GetDevice())->gl);
-    }
+void ComputePipeline::ApplyNow() {
+    PipelineGL::ApplyNow(ToBackend(GetDevice())->gl);
+}
 
 }  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/ComputePipelineGL.h b/src/dawn/native/opengl/ComputePipelineGL.h
index 00a3ded..b90bb08 100644
--- a/src/dawn/native/opengl/ComputePipelineGL.h
+++ b/src/dawn/native/opengl/ComputePipelineGL.h
@@ -23,23 +23,22 @@
 
 namespace dawn::native::opengl {
 
-    class Device;
+class Device;
 
-    class ComputePipeline final : public ComputePipelineBase, public PipelineGL {
-      public:
-        static Ref<ComputePipeline> CreateUninitialized(
-            Device* device,
-            const ComputePipelineDescriptor* descriptor);
+class ComputePipeline final : public ComputePipelineBase, public PipelineGL {
+  public:
+    static Ref<ComputePipeline> CreateUninitialized(Device* device,
+                                                    const ComputePipelineDescriptor* descriptor);
 
-        void ApplyNow();
+    void ApplyNow();
 
-        MaybeError Initialize() override;
+    MaybeError Initialize() override;
 
-      private:
-        using ComputePipelineBase::ComputePipelineBase;
-        ~ComputePipeline() override;
-        void DestroyImpl() override;
-    };
+  private:
+    using ComputePipelineBase::ComputePipelineBase;
+    ~ComputePipeline() override;
+    void DestroyImpl() override;
+};
 
 }  // namespace dawn::native::opengl
 
diff --git a/src/dawn/native/opengl/DeviceGL.cpp b/src/dawn/native/opengl/DeviceGL.cpp
index c520ab1..0db564b 100644
--- a/src/dawn/native/opengl/DeviceGL.cpp
+++ b/src/dawn/native/opengl/DeviceGL.cpp
@@ -34,305 +34,302 @@
 
 namespace dawn::native::opengl {
 
-    // static
-    ResultOrError<Ref<Device>> Device::Create(AdapterBase* adapter,
-                                              const DeviceDescriptor* descriptor,
-                                              const OpenGLFunctions& functions) {
-        Ref<Device> device = AcquireRef(new Device(adapter, descriptor, functions));
-        DAWN_TRY(device->Initialize(descriptor));
-        return device;
+// static
+ResultOrError<Ref<Device>> Device::Create(AdapterBase* adapter,
+                                          const DeviceDescriptor* descriptor,
+                                          const OpenGLFunctions& functions) {
+    Ref<Device> device = AcquireRef(new Device(adapter, descriptor, functions));
+    DAWN_TRY(device->Initialize(descriptor));
+    return device;
+}
+
+Device::Device(AdapterBase* adapter,
+               const DeviceDescriptor* descriptor,
+               const OpenGLFunctions& functions)
+    : DeviceBase(adapter, descriptor), gl(functions) {}
+
+Device::~Device() {
+    Destroy();
+}
+
+MaybeError Device::Initialize(const DeviceDescriptor* descriptor) {
+    InitTogglesFromDriver();
+    mFormatTable = BuildGLFormatTable(GetBGRAInternalFormat());
+
+    return DeviceBase::Initialize(AcquireRef(new Queue(this, &descriptor->defaultQueue)));
+}
+
+void Device::InitTogglesFromDriver() {
+    bool supportsBaseVertex = gl.IsAtLeastGLES(3, 2) || gl.IsAtLeastGL(3, 2);
+
+    bool supportsBaseInstance = gl.IsAtLeastGLES(3, 2) || gl.IsAtLeastGL(4, 2);
+
+    // TODO(crbug.com/dawn/582): Use OES_draw_buffers_indexed where available.
+    bool supportsIndexedDrawBuffers = gl.IsAtLeastGLES(3, 2) || gl.IsAtLeastGL(3, 0);
+
+    bool supportsSnormRead =
+        gl.IsAtLeastGL(4, 4) || gl.IsGLExtensionSupported("GL_EXT_render_snorm");
+
+    bool supportsDepthRead = gl.IsAtLeastGL(3, 0) || gl.IsGLExtensionSupported("GL_NV_read_depth");
+
+    bool supportsStencilRead =
+        gl.IsAtLeastGL(3, 0) || gl.IsGLExtensionSupported("GL_NV_read_stencil");
+
+    bool supportsDepthStencilRead =
+        gl.IsAtLeastGL(3, 0) || gl.IsGLExtensionSupported("GL_NV_read_depth_stencil");
+
+    // Desktop GL supports BGRA textures via swizzling in the driver; ES requires an extension.
+    bool supportsBGRARead =
+        gl.GetVersion().IsDesktop() || gl.IsGLExtensionSupported("GL_EXT_read_format_bgra");
+
+    bool supportsSampleVariables = gl.IsAtLeastGL(4, 0) || gl.IsAtLeastGLES(3, 2) ||
+                                   gl.IsGLExtensionSupported("GL_OES_sample_variables");
+
+    // TODO(crbug.com/dawn/343): We can support the extension variants, but need to load the EXT
+    // procs without the extension suffix.
+    // We'll also need emulation of shader builtins gl_BaseVertex and gl_BaseInstance.
+
+    // supportsBaseVertex |=
+    //     (gl.IsAtLeastGLES(2, 0) &&
+    //      (gl.IsGLExtensionSupported("OES_draw_elements_base_vertex") ||
+    //       gl.IsGLExtensionSupported("EXT_draw_elements_base_vertex"))) ||
+    //     (gl.IsAtLeastGL(3, 1) && gl.IsGLExtensionSupported("ARB_draw_elements_base_vertex"));
+
+    // supportsBaseInstance |=
+    //     (gl.IsAtLeastGLES(3, 1) && gl.IsGLExtensionSupported("EXT_base_instance")) ||
+    //     (gl.IsAtLeastGL(3, 1) && gl.IsGLExtensionSupported("ARB_base_instance"));
+
+    // TODO(crbug.com/dawn/343): Investigate emulation.
+    SetToggle(Toggle::DisableBaseVertex, !supportsBaseVertex);
+    SetToggle(Toggle::DisableBaseInstance, !supportsBaseInstance);
+    SetToggle(Toggle::DisableIndexedDrawBuffers, !supportsIndexedDrawBuffers);
+    SetToggle(Toggle::DisableSnormRead, !supportsSnormRead);
+    SetToggle(Toggle::DisableDepthRead, !supportsDepthRead);
+    SetToggle(Toggle::DisableStencilRead, !supportsStencilRead);
+    SetToggle(Toggle::DisableDepthStencilRead, !supportsDepthStencilRead);
+    SetToggle(Toggle::DisableBGRARead, !supportsBGRARead);
+    SetToggle(Toggle::DisableSampleVariables, !supportsSampleVariables);
+    SetToggle(Toggle::FlushBeforeClientWaitSync, gl.GetVersion().IsES());
+    // For OpenGL ES, we must use a placeholder fragment shader for vertex-only render pipeline.
+    SetToggle(Toggle::UsePlaceholderFragmentInVertexOnlyPipeline, gl.GetVersion().IsES());
+}
+
+const GLFormat& Device::GetGLFormat(const Format& format) {
+    ASSERT(format.isSupported);
+    ASSERT(format.GetIndex() < mFormatTable.size());
+
+    const GLFormat& result = mFormatTable[format.GetIndex()];
+    ASSERT(result.isSupportedOnBackend);
+    return result;
+}
+
+GLenum Device::GetBGRAInternalFormat() const {
+    if (gl.IsGLExtensionSupported("GL_EXT_texture_format_BGRA8888") ||
+        gl.IsGLExtensionSupported("GL_APPLE_texture_format_BGRA8888")) {
+        return GL_BGRA8_EXT;
+    } else {
+        // Desktop GL will swizzle to/from RGBA8 for BGRA formats.
+        return GL_RGBA8;
+    }
+}
+
+ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
+    const BindGroupDescriptor* descriptor) {
+    DAWN_TRY(ValidateGLBindGroupDescriptor(descriptor));
+    return BindGroup::Create(this, descriptor);
+}
+ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
+    const BindGroupLayoutDescriptor* descriptor,
+    PipelineCompatibilityToken pipelineCompatibilityToken) {
+    return AcquireRef(new BindGroupLayout(this, descriptor, pipelineCompatibilityToken));
+}
+ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
+    return AcquireRef(new Buffer(this, descriptor));
+}
+ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
+    CommandEncoder* encoder,
+    const CommandBufferDescriptor* descriptor) {
+    return AcquireRef(new CommandBuffer(encoder, descriptor));
+}
+Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
+    const ComputePipelineDescriptor* descriptor) {
+    return ComputePipeline::CreateUninitialized(this, descriptor);
+}
+ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
+    const PipelineLayoutDescriptor* descriptor) {
+    return AcquireRef(new PipelineLayout(this, descriptor));
+}
+ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(const QuerySetDescriptor* descriptor) {
+    return AcquireRef(new QuerySet(this, descriptor));
+}
+Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
+    const RenderPipelineDescriptor* descriptor) {
+    return RenderPipeline::CreateUninitialized(this, descriptor);
+}
+ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
+    return AcquireRef(new Sampler(this, descriptor));
+}
+ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
+    const ShaderModuleDescriptor* descriptor,
+    ShaderModuleParseResult* parseResult) {
+    return ShaderModule::Create(this, descriptor, parseResult);
+}
+ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
+    const SwapChainDescriptor* descriptor) {
+    return AcquireRef(new SwapChain(this, descriptor));
+}
+ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
+    Surface* surface,
+    NewSwapChainBase* previousSwapChain,
+    const SwapChainDescriptor* descriptor) {
+    return DAWN_FORMAT_VALIDATION_ERROR("New swapchains not implemented.");
+}
+ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
+    return AcquireRef(new Texture(this, descriptor));
+}
+ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
+    TextureBase* texture,
+    const TextureViewDescriptor* descriptor) {
+    return AcquireRef(new TextureView(texture, descriptor));
+}
+
+void Device::SubmitFenceSync() {
+    GLsync sync = gl.FenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
+    IncrementLastSubmittedCommandSerial();
+    mFencesInFlight.emplace(sync, GetLastSubmittedCommandSerial());
+}
+
+MaybeError Device::ValidateEGLImageCanBeWrapped(const TextureDescriptor* descriptor,
+                                                ::EGLImage image) {
+    DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
+                    "Texture dimension (%s) is not %s.", descriptor->dimension,
+                    wgpu::TextureDimension::e2D);
+
+    DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
+                    descriptor->mipLevelCount);
+
+    DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1, "Array layer count (%u) is not 1.",
+                    descriptor->size.depthOrArrayLayers);
+
+    DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
+                    descriptor->sampleCount);
+
+    DAWN_INVALID_IF(descriptor->usage &
+                        (wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding),
+                    "Texture usage (%s) cannot have %s or %s.", descriptor->usage,
+                    wgpu::TextureUsage::TextureBinding, wgpu::TextureUsage::StorageBinding);
+
+    return {};
+}
+TextureBase* Device::CreateTextureWrappingEGLImage(const ExternalImageDescriptor* descriptor,
+                                                   ::EGLImage image) {
+    const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
+
+    if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
+        return nullptr;
+    }
+    if (ConsumedError(ValidateEGLImageCanBeWrapped(textureDescriptor, image))) {
+        return nullptr;
     }
 
-    Device::Device(AdapterBase* adapter,
-                   const DeviceDescriptor* descriptor,
-                   const OpenGLFunctions& functions)
-        : DeviceBase(adapter, descriptor), gl(functions) {
+    GLuint tex;
+    gl.GenTextures(1, &tex);
+    gl.BindTexture(GL_TEXTURE_2D, tex);
+    gl.EGLImageTargetTexture2DOES(GL_TEXTURE_2D, image);
+
+    GLint width, height, internalFormat;
+    gl.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &width);
+    gl.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &height);
+    gl.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_INTERNAL_FORMAT, &internalFormat);
+
+    if (textureDescriptor->size.width != static_cast<uint32_t>(width) ||
+        textureDescriptor->size.height != static_cast<uint32_t>(height) ||
+        textureDescriptor->size.depthOrArrayLayers != 1) {
+        ConsumedError(DAWN_FORMAT_VALIDATION_ERROR(
+            "EGLImage size (width: %u, height: %u, depth: 1) doesn't match descriptor size %s.",
+            width, height, &textureDescriptor->size));
+        gl.DeleteTextures(1, &tex);
+        return nullptr;
     }
 
-    Device::~Device() {
-        Destroy();
-    }
+    // TODO(dawn:803): Validate the OpenGL texture format from the EGLImage against the format
+    // in the passed-in TextureDescriptor.
+    return new Texture(this, textureDescriptor, tex, TextureBase::TextureState::OwnedInternal);
+}
 
-    MaybeError Device::Initialize(const DeviceDescriptor* descriptor) {
-        InitTogglesFromDriver();
-        mFormatTable = BuildGLFormatTable(GetBGRAInternalFormat());
+MaybeError Device::TickImpl() {
+    return {};
+}
 
-        return DeviceBase::Initialize(AcquireRef(new Queue(this, &descriptor->defaultQueue)));
-    }
+ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
+    ExecutionSerial fenceSerial{0};
+    while (!mFencesInFlight.empty()) {
+        auto [sync, tentativeSerial] = mFencesInFlight.front();
 
-    void Device::InitTogglesFromDriver() {
-        bool supportsBaseVertex = gl.IsAtLeastGLES(3, 2) || gl.IsAtLeastGL(3, 2);
+        // Fence are added in order, so we can stop searching as soon
+        // as we see one that's not ready.
 
-        bool supportsBaseInstance = gl.IsAtLeastGLES(3, 2) || gl.IsAtLeastGL(4, 2);
-
-        // TODO(crbug.com/dawn/582): Use OES_draw_buffers_indexed where available.
-        bool supportsIndexedDrawBuffers = gl.IsAtLeastGLES(3, 2) || gl.IsAtLeastGL(3, 0);
-
-        bool supportsSnormRead =
-            gl.IsAtLeastGL(4, 4) || gl.IsGLExtensionSupported("GL_EXT_render_snorm");
-
-        bool supportsDepthRead =
-            gl.IsAtLeastGL(3, 0) || gl.IsGLExtensionSupported("GL_NV_read_depth");
-
-        bool supportsStencilRead =
-            gl.IsAtLeastGL(3, 0) || gl.IsGLExtensionSupported("GL_NV_read_stencil");
-
-        bool supportsDepthStencilRead =
-            gl.IsAtLeastGL(3, 0) || gl.IsGLExtensionSupported("GL_NV_read_depth_stencil");
-
-        // Desktop GL supports BGRA textures via swizzling in the driver; ES requires an extension.
-        bool supportsBGRARead =
-            gl.GetVersion().IsDesktop() || gl.IsGLExtensionSupported("GL_EXT_read_format_bgra");
-
-        bool supportsSampleVariables = gl.IsAtLeastGL(4, 0) || gl.IsAtLeastGLES(3, 2) ||
-                                       gl.IsGLExtensionSupported("GL_OES_sample_variables");
-
-        // TODO(crbug.com/dawn/343): We can support the extension variants, but need to load the EXT
-        // procs without the extension suffix.
-        // We'll also need emulation of shader builtins gl_BaseVertex and gl_BaseInstance.
-
-        // supportsBaseVertex |=
-        //     (gl.IsAtLeastGLES(2, 0) &&
-        //      (gl.IsGLExtensionSupported("OES_draw_elements_base_vertex") ||
-        //       gl.IsGLExtensionSupported("EXT_draw_elements_base_vertex"))) ||
-        //     (gl.IsAtLeastGL(3, 1) && gl.IsGLExtensionSupported("ARB_draw_elements_base_vertex"));
-
-        // supportsBaseInstance |=
-        //     (gl.IsAtLeastGLES(3, 1) && gl.IsGLExtensionSupported("EXT_base_instance")) ||
-        //     (gl.IsAtLeastGL(3, 1) && gl.IsGLExtensionSupported("ARB_base_instance"));
-
-        // TODO(crbug.com/dawn/343): Investigate emulation.
-        SetToggle(Toggle::DisableBaseVertex, !supportsBaseVertex);
-        SetToggle(Toggle::DisableBaseInstance, !supportsBaseInstance);
-        SetToggle(Toggle::DisableIndexedDrawBuffers, !supportsIndexedDrawBuffers);
-        SetToggle(Toggle::DisableSnormRead, !supportsSnormRead);
-        SetToggle(Toggle::DisableDepthRead, !supportsDepthRead);
-        SetToggle(Toggle::DisableStencilRead, !supportsStencilRead);
-        SetToggle(Toggle::DisableDepthStencilRead, !supportsDepthStencilRead);
-        SetToggle(Toggle::DisableBGRARead, !supportsBGRARead);
-        SetToggle(Toggle::DisableSampleVariables, !supportsSampleVariables);
-        SetToggle(Toggle::FlushBeforeClientWaitSync, gl.GetVersion().IsES());
-        // For OpenGL ES, we must use a placeholder fragment shader for vertex-only render pipeline.
-        SetToggle(Toggle::UsePlaceholderFragmentInVertexOnlyPipeline, gl.GetVersion().IsES());
-    }
-
-    const GLFormat& Device::GetGLFormat(const Format& format) {
-        ASSERT(format.isSupported);
-        ASSERT(format.GetIndex() < mFormatTable.size());
-
-        const GLFormat& result = mFormatTable[format.GetIndex()];
-        ASSERT(result.isSupportedOnBackend);
-        return result;
-    }
-
-    GLenum Device::GetBGRAInternalFormat() const {
-        if (gl.IsGLExtensionSupported("GL_EXT_texture_format_BGRA8888") ||
-            gl.IsGLExtensionSupported("GL_APPLE_texture_format_BGRA8888")) {
-            return GL_BGRA8_EXT;
-        } else {
-            // Desktop GL will swizzle to/from RGBA8 for BGRA formats.
-            return GL_RGBA8;
+        // TODO(crbug.com/dawn/633): Remove this workaround after the deadlock issue is fixed.
+        if (IsToggleEnabled(Toggle::FlushBeforeClientWaitSync)) {
+            gl.Flush();
         }
-    }
-
-    ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
-        const BindGroupDescriptor* descriptor) {
-        DAWN_TRY(ValidateGLBindGroupDescriptor(descriptor));
-        return BindGroup::Create(this, descriptor);
-    }
-    ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
-        const BindGroupLayoutDescriptor* descriptor,
-        PipelineCompatibilityToken pipelineCompatibilityToken) {
-        return AcquireRef(new BindGroupLayout(this, descriptor, pipelineCompatibilityToken));
-    }
-    ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
-        return AcquireRef(new Buffer(this, descriptor));
-    }
-    ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
-        CommandEncoder* encoder,
-        const CommandBufferDescriptor* descriptor) {
-        return AcquireRef(new CommandBuffer(encoder, descriptor));
-    }
-    Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
-        const ComputePipelineDescriptor* descriptor) {
-        return ComputePipeline::CreateUninitialized(this, descriptor);
-    }
-    ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
-        const PipelineLayoutDescriptor* descriptor) {
-        return AcquireRef(new PipelineLayout(this, descriptor));
-    }
-    ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
-        const QuerySetDescriptor* descriptor) {
-        return AcquireRef(new QuerySet(this, descriptor));
-    }
-    Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
-        const RenderPipelineDescriptor* descriptor) {
-        return RenderPipeline::CreateUninitialized(this, descriptor);
-    }
-    ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
-        return AcquireRef(new Sampler(this, descriptor));
-    }
-    ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
-        const ShaderModuleDescriptor* descriptor,
-        ShaderModuleParseResult* parseResult) {
-        return ShaderModule::Create(this, descriptor, parseResult);
-    }
-    ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
-        const SwapChainDescriptor* descriptor) {
-        return AcquireRef(new SwapChain(this, descriptor));
-    }
-    ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
-        Surface* surface,
-        NewSwapChainBase* previousSwapChain,
-        const SwapChainDescriptor* descriptor) {
-        return DAWN_FORMAT_VALIDATION_ERROR("New swapchains not implemented.");
-    }
-    ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
-        return AcquireRef(new Texture(this, descriptor));
-    }
-    ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
-        TextureBase* texture,
-        const TextureViewDescriptor* descriptor) {
-        return AcquireRef(new TextureView(texture, descriptor));
-    }
-
-    void Device::SubmitFenceSync() {
-        GLsync sync = gl.FenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
-        IncrementLastSubmittedCommandSerial();
-        mFencesInFlight.emplace(sync, GetLastSubmittedCommandSerial());
-    }
-
-    MaybeError Device::ValidateEGLImageCanBeWrapped(const TextureDescriptor* descriptor,
-                                                    ::EGLImage image) {
-        DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
-                        "Texture dimension (%s) is not %s.", descriptor->dimension,
-                        wgpu::TextureDimension::e2D);
-
-        DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
-                        descriptor->mipLevelCount);
-
-        DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1,
-                        "Array layer count (%u) is not 1.", descriptor->size.depthOrArrayLayers);
-
-        DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
-                        descriptor->sampleCount);
-
-        DAWN_INVALID_IF(descriptor->usage & (wgpu::TextureUsage::TextureBinding |
-                                             wgpu::TextureUsage::StorageBinding),
-                        "Texture usage (%s) cannot have %s or %s.", descriptor->usage,
-                        wgpu::TextureUsage::TextureBinding, wgpu::TextureUsage::StorageBinding);
-
-        return {};
-    }
-    TextureBase* Device::CreateTextureWrappingEGLImage(const ExternalImageDescriptor* descriptor,
-                                                       ::EGLImage image) {
-        const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
-
-        if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
-            return nullptr;
+        GLenum result = gl.ClientWaitSync(sync, GL_SYNC_FLUSH_COMMANDS_BIT, 0);
+        if (result == GL_TIMEOUT_EXPIRED) {
+            return fenceSerial;
         }
-        if (ConsumedError(ValidateEGLImageCanBeWrapped(textureDescriptor, image))) {
-            return nullptr;
-        }
+        // Update fenceSerial since fence is ready.
+        fenceSerial = tentativeSerial;
 
-        GLuint tex;
-        gl.GenTextures(1, &tex);
-        gl.BindTexture(GL_TEXTURE_2D, tex);
-        gl.EGLImageTargetTexture2DOES(GL_TEXTURE_2D, image);
+        gl.DeleteSync(sync);
 
-        GLint width, height, internalFormat;
-        gl.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_WIDTH, &width);
-        gl.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_HEIGHT, &height);
-        gl.GetTexLevelParameteriv(GL_TEXTURE_2D, 0, GL_TEXTURE_INTERNAL_FORMAT, &internalFormat);
+        mFencesInFlight.pop();
 
-        if (textureDescriptor->size.width != static_cast<uint32_t>(width) ||
-            textureDescriptor->size.height != static_cast<uint32_t>(height) ||
-            textureDescriptor->size.depthOrArrayLayers != 1) {
-            ConsumedError(DAWN_FORMAT_VALIDATION_ERROR(
-                "EGLImage size (width: %u, height: %u, depth: 1) doesn't match descriptor size %s.",
-                width, height, &textureDescriptor->size));
-            gl.DeleteTextures(1, &tex);
-            return nullptr;
-        }
-
-        // TODO(dawn:803): Validate the OpenGL texture format from the EGLImage against the format
-        // in the passed-in TextureDescriptor.
-        return new Texture(this, textureDescriptor, tex, TextureBase::TextureState::OwnedInternal);
+        ASSERT(fenceSerial > GetCompletedCommandSerial());
     }
+    return fenceSerial;
+}
 
-    MaybeError Device::TickImpl() {
-        return {};
-    }
+ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
+    return DAWN_UNIMPLEMENTED_ERROR("Device unable to create staging buffer.");
+}
 
-    ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
-        ExecutionSerial fenceSerial{0};
-        while (!mFencesInFlight.empty()) {
-            auto [sync, tentativeSerial] = mFencesInFlight.front();
+MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
+                                           uint64_t sourceOffset,
+                                           BufferBase* destination,
+                                           uint64_t destinationOffset,
+                                           uint64_t size) {
+    return DAWN_UNIMPLEMENTED_ERROR("Device unable to copy from staging buffer.");
+}
 
-            // Fence are added in order, so we can stop searching as soon
-            // as we see one that's not ready.
+MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
+                                            const TextureDataLayout& src,
+                                            TextureCopy* dst,
+                                            const Extent3D& copySizePixels) {
+    return DAWN_UNIMPLEMENTED_ERROR("Device unable to copy from staging buffer to texture.");
+}
 
-            // TODO(crbug.com/dawn/633): Remove this workaround after the deadlock issue is fixed.
-            if (IsToggleEnabled(Toggle::FlushBeforeClientWaitSync)) {
-                gl.Flush();
-            }
-            GLenum result = gl.ClientWaitSync(sync, GL_SYNC_FLUSH_COMMANDS_BIT, 0);
-            if (result == GL_TIMEOUT_EXPIRED) {
-                return fenceSerial;
-            }
-            // Update fenceSerial since fence is ready.
-            fenceSerial = tentativeSerial;
+void Device::DestroyImpl() {
+    ASSERT(GetState() == State::Disconnected);
+}
 
-            gl.DeleteSync(sync);
+MaybeError Device::WaitForIdleForDestruction() {
+    gl.Finish();
+    DAWN_TRY(CheckPassedSerials());
+    ASSERT(mFencesInFlight.empty());
 
-            mFencesInFlight.pop();
+    return {};
+}
 
-            ASSERT(fenceSerial > GetCompletedCommandSerial());
-        }
-        return fenceSerial;
-    }
+uint32_t Device::GetOptimalBytesPerRowAlignment() const {
+    return 1;
+}
 
-    ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
-        return DAWN_UNIMPLEMENTED_ERROR("Device unable to create staging buffer.");
-    }
+uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
+    return 1;
+}
 
-    MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
-                                               uint64_t sourceOffset,
-                                               BufferBase* destination,
-                                               uint64_t destinationOffset,
-                                               uint64_t size) {
-        return DAWN_UNIMPLEMENTED_ERROR("Device unable to copy from staging buffer.");
-    }
-
-    MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
-                                                const TextureDataLayout& src,
-                                                TextureCopy* dst,
-                                                const Extent3D& copySizePixels) {
-        return DAWN_UNIMPLEMENTED_ERROR("Device unable to copy from staging buffer to texture.");
-    }
-
-    void Device::DestroyImpl() {
-        ASSERT(GetState() == State::Disconnected);
-    }
-
-    MaybeError Device::WaitForIdleForDestruction() {
-        gl.Finish();
-        DAWN_TRY(CheckPassedSerials());
-        ASSERT(mFencesInFlight.empty());
-
-        return {};
-    }
-
-    uint32_t Device::GetOptimalBytesPerRowAlignment() const {
-        return 1;
-    }
-
-    uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
-        return 1;
-    }
-
-    float Device::GetTimestampPeriodInNS() const {
-        return 1.0f;
-    }
+float Device::GetTimestampPeriodInNS() const {
+    return 1.0f;
+}
 
 }  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/DeviceGL.h b/src/dawn/native/opengl/DeviceGL.h
index 52ea056..c0314fd 100644
--- a/src/dawn/native/opengl/DeviceGL.h
+++ b/src/dawn/native/opengl/DeviceGL.h
@@ -30,104 +30,100 @@
 
 // Remove windows.h macros after glad's include of windows.h
 #if defined(DAWN_PLATFORM_WINDOWS)
-#    include "dawn/common/windows_with_undefs.h"
+#include "dawn/common/windows_with_undefs.h"
 #endif
 
 typedef void* EGLImage;
 
 namespace dawn::native::opengl {
 
-    class Device final : public DeviceBase {
-      public:
-        static ResultOrError<Ref<Device>> Create(AdapterBase* adapter,
-                                                 const DeviceDescriptor* descriptor,
-                                                 const OpenGLFunctions& functions);
-        ~Device() override;
+class Device final : public DeviceBase {
+  public:
+    static ResultOrError<Ref<Device>> Create(AdapterBase* adapter,
+                                             const DeviceDescriptor* descriptor,
+                                             const OpenGLFunctions& functions);
+    ~Device() override;
 
-        MaybeError Initialize(const DeviceDescriptor* descriptor);
+    MaybeError Initialize(const DeviceDescriptor* descriptor);
 
-        // Contains all the OpenGL entry points, glDoFoo is called via device->gl.DoFoo.
-        const OpenGLFunctions gl;
+    // Contains all the OpenGL entry points, glDoFoo is called via device->gl.DoFoo.
+    const OpenGLFunctions gl;
 
-        const GLFormat& GetGLFormat(const Format& format);
+    const GLFormat& GetGLFormat(const Format& format);
 
-        void SubmitFenceSync();
+    void SubmitFenceSync();
 
-        MaybeError ValidateEGLImageCanBeWrapped(const TextureDescriptor* descriptor,
-                                                ::EGLImage image);
-        TextureBase* CreateTextureWrappingEGLImage(const ExternalImageDescriptor* descriptor,
-                                                   ::EGLImage image);
+    MaybeError ValidateEGLImageCanBeWrapped(const TextureDescriptor* descriptor, ::EGLImage image);
+    TextureBase* CreateTextureWrappingEGLImage(const ExternalImageDescriptor* descriptor,
+                                               ::EGLImage image);
 
-        ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
-            CommandEncoder* encoder,
-            const CommandBufferDescriptor* descriptor) override;
+    ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
+        CommandEncoder* encoder,
+        const CommandBufferDescriptor* descriptor) override;
 
-        MaybeError TickImpl() override;
+    MaybeError TickImpl() override;
 
-        ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
-        MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
-                                           uint64_t sourceOffset,
-                                           BufferBase* destination,
-                                           uint64_t destinationOffset,
-                                           uint64_t size) override;
+    ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
+    MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
+                                       uint64_t sourceOffset,
+                                       BufferBase* destination,
+                                       uint64_t destinationOffset,
+                                       uint64_t size) override;
 
-        MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
-                                            const TextureDataLayout& src,
-                                            TextureCopy* dst,
-                                            const Extent3D& copySizePixels) override;
+    MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
+                                        const TextureDataLayout& src,
+                                        TextureCopy* dst,
+                                        const Extent3D& copySizePixels) override;
 
-        uint32_t GetOptimalBytesPerRowAlignment() const override;
-        uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
+    uint32_t GetOptimalBytesPerRowAlignment() const override;
+    uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
 
-        float GetTimestampPeriodInNS() const override;
+    float GetTimestampPeriodInNS() const override;
 
-      private:
-        Device(AdapterBase* adapter,
-               const DeviceDescriptor* descriptor,
-               const OpenGLFunctions& functions);
+  private:
+    Device(AdapterBase* adapter,
+           const DeviceDescriptor* descriptor,
+           const OpenGLFunctions& functions);
 
-        ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
-            const BindGroupDescriptor* descriptor) override;
-        ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
-            const BindGroupLayoutDescriptor* descriptor,
-            PipelineCompatibilityToken pipelineCompatibilityToken) override;
-        ResultOrError<Ref<BufferBase>> CreateBufferImpl(
-            const BufferDescriptor* descriptor) override;
-        ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
-            const PipelineLayoutDescriptor* descriptor) override;
-        ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
-            const QuerySetDescriptor* descriptor) override;
-        ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
-            const SamplerDescriptor* descriptor) override;
-        ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
-            const ShaderModuleDescriptor* descriptor,
-            ShaderModuleParseResult* parseResult) override;
-        ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
-            const SwapChainDescriptor* descriptor) override;
-        ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
-            Surface* surface,
-            NewSwapChainBase* previousSwapChain,
-            const SwapChainDescriptor* descriptor) override;
-        ResultOrError<Ref<TextureBase>> CreateTextureImpl(
-            const TextureDescriptor* descriptor) override;
-        ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
-            TextureBase* texture,
-            const TextureViewDescriptor* descriptor) override;
-        Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
-            const ComputePipelineDescriptor* descriptor) override;
-        Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
-            const RenderPipelineDescriptor* descriptor) override;
+    ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
+        const BindGroupDescriptor* descriptor) override;
+    ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
+        const BindGroupLayoutDescriptor* descriptor,
+        PipelineCompatibilityToken pipelineCompatibilityToken) override;
+    ResultOrError<Ref<BufferBase>> CreateBufferImpl(const BufferDescriptor* descriptor) override;
+    ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
+        const PipelineLayoutDescriptor* descriptor) override;
+    ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
+        const QuerySetDescriptor* descriptor) override;
+    ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(const SamplerDescriptor* descriptor) override;
+    ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
+        const ShaderModuleDescriptor* descriptor,
+        ShaderModuleParseResult* parseResult) override;
+    ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
+        const SwapChainDescriptor* descriptor) override;
+    ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
+        Surface* surface,
+        NewSwapChainBase* previousSwapChain,
+        const SwapChainDescriptor* descriptor) override;
+    ResultOrError<Ref<TextureBase>> CreateTextureImpl(const TextureDescriptor* descriptor) override;
+    ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
+        TextureBase* texture,
+        const TextureViewDescriptor* descriptor) override;
+    Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
+        const ComputePipelineDescriptor* descriptor) override;
+    Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
+        const RenderPipelineDescriptor* descriptor) override;
 
-        void InitTogglesFromDriver();
-        GLenum GetBGRAInternalFormat() const;
-        ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
-        void DestroyImpl() override;
-        MaybeError WaitForIdleForDestruction() override;
+    void InitTogglesFromDriver();
+    GLenum GetBGRAInternalFormat() const;
+    ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
+    void DestroyImpl() override;
+    MaybeError WaitForIdleForDestruction() override;
 
-        std::queue<std::pair<GLsync, ExecutionSerial>> mFencesInFlight;
+    std::queue<std::pair<GLsync, ExecutionSerial>> mFencesInFlight;
 
-        GLFormatTable mFormatTable;
-    };
+    GLFormatTable mFormatTable;
+};
 
 }  // namespace dawn::native::opengl
 
diff --git a/src/dawn/native/opengl/Forward.h b/src/dawn/native/opengl/Forward.h
index 2ebd2fb..1ba0409 100644
--- a/src/dawn/native/opengl/Forward.h
+++ b/src/dawn/native/opengl/Forward.h
@@ -19,47 +19,47 @@
 
 namespace dawn::native::opengl {
 
-    class Adapter;
-    class BindGroup;
-    class BindGroupLayout;
-    class Buffer;
-    class CommandBuffer;
-    class ComputePipeline;
-    class Device;
-    class PersistentPipelineState;
-    class PipelineLayout;
-    class QuerySet;
-    class Queue;
-    class RenderPipeline;
-    class Sampler;
-    class ShaderModule;
-    class SwapChain;
-    class Texture;
-    class TextureView;
+class Adapter;
+class BindGroup;
+class BindGroupLayout;
+class Buffer;
+class CommandBuffer;
+class ComputePipeline;
+class Device;
+class PersistentPipelineState;
+class PipelineLayout;
+class QuerySet;
+class Queue;
+class RenderPipeline;
+class Sampler;
+class ShaderModule;
+class SwapChain;
+class Texture;
+class TextureView;
 
-    struct OpenGLBackendTraits {
-        using AdapterType = Adapter;
-        using BindGroupType = BindGroup;
-        using BindGroupLayoutType = BindGroupLayout;
-        using BufferType = Buffer;
-        using CommandBufferType = CommandBuffer;
-        using ComputePipelineType = ComputePipeline;
-        using DeviceType = Device;
-        using PipelineLayoutType = PipelineLayout;
-        using QuerySetType = QuerySet;
-        using QueueType = Queue;
-        using RenderPipelineType = RenderPipeline;
-        using SamplerType = Sampler;
-        using ShaderModuleType = ShaderModule;
-        using SwapChainType = SwapChain;
-        using TextureType = Texture;
-        using TextureViewType = TextureView;
-    };
+struct OpenGLBackendTraits {
+    using AdapterType = Adapter;
+    using BindGroupType = BindGroup;
+    using BindGroupLayoutType = BindGroupLayout;
+    using BufferType = Buffer;
+    using CommandBufferType = CommandBuffer;
+    using ComputePipelineType = ComputePipeline;
+    using DeviceType = Device;
+    using PipelineLayoutType = PipelineLayout;
+    using QuerySetType = QuerySet;
+    using QueueType = Queue;
+    using RenderPipelineType = RenderPipeline;
+    using SamplerType = Sampler;
+    using ShaderModuleType = ShaderModule;
+    using SwapChainType = SwapChain;
+    using TextureType = Texture;
+    using TextureViewType = TextureView;
+};
 
-    template <typename T>
-    auto ToBackend(T&& common) -> decltype(ToBackendBase<OpenGLBackendTraits>(common)) {
-        return ToBackendBase<OpenGLBackendTraits>(common);
-    }
+template <typename T>
+auto ToBackend(T&& common) -> decltype(ToBackendBase<OpenGLBackendTraits>(common)) {
+    return ToBackendBase<OpenGLBackendTraits>(common);
+}
 
 }  // namespace dawn::native::opengl
 
diff --git a/src/dawn/native/opengl/GLFormat.cpp b/src/dawn/native/opengl/GLFormat.cpp
index 2d54426..6fbca79 100644
--- a/src/dawn/native/opengl/GLFormat.cpp
+++ b/src/dawn/native/opengl/GLFormat.cpp
@@ -16,32 +16,32 @@
 
 namespace dawn::native::opengl {
 
-    GLFormatTable BuildGLFormatTable(GLenum internalFormatForBGRA) {
-        GLFormatTable table;
+GLFormatTable BuildGLFormatTable(GLenum internalFormatForBGRA) {
+    GLFormatTable table;
 
-        using Type = GLFormat::ComponentType;
+    using Type = GLFormat::ComponentType;
 
-        auto AddFormat = [&table](wgpu::TextureFormat dawnFormat, GLenum internalFormat,
-                                  GLenum format, GLenum type, Type componentType) {
-            FormatIndex index = ComputeFormatIndex(dawnFormat);
-            ASSERT(index < table.size());
+    auto AddFormat = [&table](wgpu::TextureFormat dawnFormat, GLenum internalFormat, GLenum format,
+                              GLenum type, Type componentType) {
+        FormatIndex index = ComputeFormatIndex(dawnFormat);
+        ASSERT(index < table.size());
 
-            table[index].internalFormat = internalFormat;
-            table[index].format = format;
-            table[index].type = type;
-            table[index].componentType = componentType;
-            table[index].isSupportedOnBackend = true;
-        };
+        table[index].internalFormat = internalFormat;
+        table[index].format = format;
+        table[index].type = type;
+        table[index].componentType = componentType;
+        table[index].isSupportedOnBackend = true;
+    };
 
-        // It's dangerous to go alone, take this:
-        //
-        //     [ANGLE's formatutils.cpp]
-        //     [ANGLE's formatutilsgl.cpp]
-        //
-        // The format tables in these files are extremely complete and the best reference on GL
-        // format support, enums, etc.
+    // It's dangerous to go alone, take this:
+    //
+    //     [ANGLE's formatutils.cpp]
+    //     [ANGLE's formatutilsgl.cpp]
+    //
+    // The format tables in these files are extremely complete and the best reference on GL
+    // format support, enums, etc.
 
-        // clang-format off
+    // clang-format off
 
         // 1 byte color formats
         AddFormat(wgpu::TextureFormat::R8Unorm, GL_R8, GL_RED, GL_UNSIGNED_BYTE, Type::Float);
@@ -113,9 +113,9 @@
         AddFormat(wgpu::TextureFormat::BC7RGBAUnorm, GL_COMPRESSED_RGBA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
         AddFormat(wgpu::TextureFormat::BC7RGBAUnormSrgb, GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_BYTE, Type::Float);
 
-        // clang-format on
+    // clang-format on
 
-        return table;
-    }
+    return table;
+}
 
 }  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/GLFormat.h b/src/dawn/native/opengl/GLFormat.h
index a76f989..ea3db9a 100644
--- a/src/dawn/native/opengl/GLFormat.h
+++ b/src/dawn/native/opengl/GLFormat.h
@@ -20,22 +20,22 @@
 
 namespace dawn::native::opengl {
 
-    class Device;
+class Device;
 
-    struct GLFormat {
-        GLenum internalFormat = 0;
-        GLenum format = 0;
-        GLenum type = 0;
-        bool isSupportedOnBackend = false;
+struct GLFormat {
+    GLenum internalFormat = 0;
+    GLenum format = 0;
+    GLenum type = 0;
+    bool isSupportedOnBackend = false;
 
-        // OpenGL has different functions depending on the format component type, for example
-        // glClearBufferfv is only valid on formats with the Float ComponentType
-        enum ComponentType { Float, Int, Uint, DepthStencil };
-        ComponentType componentType;
-    };
+    // OpenGL has different functions depending on the format component type, for example
+    // glClearBufferfv is only valid on formats with the Float ComponentType
+    enum ComponentType { Float, Int, Uint, DepthStencil };
+    ComponentType componentType;
+};
 
-    using GLFormatTable = ityp::array<FormatIndex, GLFormat, kKnownFormatCount>;
-    GLFormatTable BuildGLFormatTable(GLenum internalFormatForBGRA);
+using GLFormatTable = ityp::array<FormatIndex, GLFormat, kKnownFormatCount>;
+GLFormatTable BuildGLFormatTable(GLenum internalFormatForBGRA);
 
 }  // namespace dawn::native::opengl
 
diff --git a/src/dawn/native/opengl/NativeSwapChainImplGL.cpp b/src/dawn/native/opengl/NativeSwapChainImplGL.cpp
index b01e7e3..409acf1 100644
--- a/src/dawn/native/opengl/NativeSwapChainImplGL.cpp
+++ b/src/dawn/native/opengl/NativeSwapChainImplGL.cpp
@@ -18,71 +18,69 @@
 
 namespace dawn::native::opengl {
 
-    NativeSwapChainImpl::NativeSwapChainImpl(Device* device,
-                                             PresentCallback present,
-                                             void* presentUserdata)
-        : mPresentCallback(present), mPresentUserdata(presentUserdata), mDevice(device) {
+NativeSwapChainImpl::NativeSwapChainImpl(Device* device,
+                                         PresentCallback present,
+                                         void* presentUserdata)
+    : mPresentCallback(present), mPresentUserdata(presentUserdata), mDevice(device) {}
+
+NativeSwapChainImpl::~NativeSwapChainImpl() {
+    const OpenGLFunctions& gl = mDevice->gl;
+    gl.DeleteTextures(1, &mBackTexture);
+    gl.DeleteFramebuffers(1, &mBackFBO);
+}
+
+void NativeSwapChainImpl::Init(DawnWSIContextGL* /*context*/) {
+    const OpenGLFunctions& gl = mDevice->gl;
+    gl.GenTextures(1, &mBackTexture);
+    gl.BindTexture(GL_TEXTURE_2D, mBackTexture);
+    gl.TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 0, 0, 0, GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
+
+    gl.GenFramebuffers(1, &mBackFBO);
+    gl.BindFramebuffer(GL_READ_FRAMEBUFFER, mBackFBO);
+    gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, mBackTexture,
+                            0);
+}
+
+DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
+                                                  WGPUTextureUsage usage,
+                                                  uint32_t width,
+                                                  uint32_t height) {
+    if (format != WGPUTextureFormat_RGBA8Unorm) {
+        return "unsupported format";
     }
+    ASSERT(width > 0);
+    ASSERT(height > 0);
+    mWidth = width;
+    mHeight = height;
 
-    NativeSwapChainImpl::~NativeSwapChainImpl() {
-        const OpenGLFunctions& gl = mDevice->gl;
-        gl.DeleteTextures(1, &mBackTexture);
-        gl.DeleteFramebuffers(1, &mBackFBO);
-    }
+    const OpenGLFunctions& gl = mDevice->gl;
+    gl.BindTexture(GL_TEXTURE_2D, mBackTexture);
+    // Reallocate the texture
+    gl.TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
 
-    void NativeSwapChainImpl::Init(DawnWSIContextGL* /*context*/) {
-        const OpenGLFunctions& gl = mDevice->gl;
-        gl.GenTextures(1, &mBackTexture);
-        gl.BindTexture(GL_TEXTURE_2D, mBackTexture);
-        gl.TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 0, 0, 0, GL_RGBA, GL_UNSIGNED_BYTE, nullptr);
+    return DAWN_SWAP_CHAIN_NO_ERROR;
+}
 
-        gl.GenFramebuffers(1, &mBackFBO);
-        gl.BindFramebuffer(GL_READ_FRAMEBUFFER, mBackFBO);
-        gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D,
-                                mBackTexture, 0);
-    }
+DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
+    nextTexture->texture.u32 = mBackTexture;
+    return DAWN_SWAP_CHAIN_NO_ERROR;
+}
 
-    DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
-                                                      WGPUTextureUsage usage,
-                                                      uint32_t width,
-                                                      uint32_t height) {
-        if (format != WGPUTextureFormat_RGBA8Unorm) {
-            return "unsupported format";
-        }
-        ASSERT(width > 0);
-        ASSERT(height > 0);
-        mWidth = width;
-        mHeight = height;
+DawnSwapChainError NativeSwapChainImpl::Present() {
+    const OpenGLFunctions& gl = mDevice->gl;
+    gl.BindFramebuffer(GL_READ_FRAMEBUFFER, mBackFBO);
+    gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
+    gl.Scissor(0, 0, mWidth, mHeight);
+    gl.BlitFramebuffer(0, 0, mWidth, mHeight, 0, mHeight, mWidth, 0, GL_COLOR_BUFFER_BIT,
+                       GL_NEAREST);
 
-        const OpenGLFunctions& gl = mDevice->gl;
-        gl.BindTexture(GL_TEXTURE_2D, mBackTexture);
-        // Reallocate the texture
-        gl.TexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE,
-                      nullptr);
+    mPresentCallback(mPresentUserdata);
 
-        return DAWN_SWAP_CHAIN_NO_ERROR;
-    }
+    return DAWN_SWAP_CHAIN_NO_ERROR;
+}
 
-    DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
-        nextTexture->texture.u32 = mBackTexture;
-        return DAWN_SWAP_CHAIN_NO_ERROR;
-    }
-
-    DawnSwapChainError NativeSwapChainImpl::Present() {
-        const OpenGLFunctions& gl = mDevice->gl;
-        gl.BindFramebuffer(GL_READ_FRAMEBUFFER, mBackFBO);
-        gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
-        gl.Scissor(0, 0, mWidth, mHeight);
-        gl.BlitFramebuffer(0, 0, mWidth, mHeight, 0, mHeight, mWidth, 0, GL_COLOR_BUFFER_BIT,
-                           GL_NEAREST);
-
-        mPresentCallback(mPresentUserdata);
-
-        return DAWN_SWAP_CHAIN_NO_ERROR;
-    }
-
-    wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
-        return wgpu::TextureFormat::RGBA8Unorm;
-    }
+wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
+    return wgpu::TextureFormat::RGBA8Unorm;
+}
 
 }  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/NativeSwapChainImplGL.h b/src/dawn/native/opengl/NativeSwapChainImplGL.h
index 6d52074..1a2013e 100644
--- a/src/dawn/native/opengl/NativeSwapChainImplGL.h
+++ b/src/dawn/native/opengl/NativeSwapChainImplGL.h
@@ -22,36 +22,36 @@
 
 namespace dawn::native::opengl {
 
-    class Device;
+class Device;
 
-    class NativeSwapChainImpl {
-      public:
-        using WSIContext = DawnWSIContextGL;
+class NativeSwapChainImpl {
+  public:
+    using WSIContext = DawnWSIContextGL;
 
-        NativeSwapChainImpl(Device* device, PresentCallback present, void* presentUserdata);
-        ~NativeSwapChainImpl();
+    NativeSwapChainImpl(Device* device, PresentCallback present, void* presentUserdata);
+    ~NativeSwapChainImpl();
 
-        void Init(DawnWSIContextGL* context);
-        DawnSwapChainError Configure(WGPUTextureFormat format,
-                                     WGPUTextureUsage,
-                                     uint32_t width,
-                                     uint32_t height);
-        DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
-        DawnSwapChainError Present();
+    void Init(DawnWSIContextGL* context);
+    DawnSwapChainError Configure(WGPUTextureFormat format,
+                                 WGPUTextureUsage,
+                                 uint32_t width,
+                                 uint32_t height);
+    DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
+    DawnSwapChainError Present();
 
-        wgpu::TextureFormat GetPreferredFormat() const;
+    wgpu::TextureFormat GetPreferredFormat() const;
 
-      private:
-        PresentCallback mPresentCallback;
-        void* mPresentUserdata;
+  private:
+    PresentCallback mPresentCallback;
+    void* mPresentUserdata;
 
-        uint32_t mWidth = 0;
-        uint32_t mHeight = 0;
-        GLuint mBackFBO = 0;
-        GLuint mBackTexture = 0;
+    uint32_t mWidth = 0;
+    uint32_t mHeight = 0;
+    GLuint mBackFBO = 0;
+    GLuint mBackTexture = 0;
 
-        Device* mDevice = nullptr;
-    };
+    Device* mDevice = nullptr;
+};
 
 }  // namespace dawn::native::opengl
 
diff --git a/src/dawn/native/opengl/OpenGLBackend.cpp b/src/dawn/native/opengl/OpenGLBackend.cpp
index 739de62..c77c1d0 100644
--- a/src/dawn/native/opengl/OpenGLBackend.cpp
+++ b/src/dawn/native/opengl/OpenGLBackend.cpp
@@ -23,43 +23,39 @@
 
 namespace dawn::native::opengl {
 
-    AdapterDiscoveryOptions::AdapterDiscoveryOptions()
-        : AdapterDiscoveryOptionsBase(WGPUBackendType_OpenGL) {
-    }
+AdapterDiscoveryOptions::AdapterDiscoveryOptions()
+    : AdapterDiscoveryOptionsBase(WGPUBackendType_OpenGL) {}
 
-    AdapterDiscoveryOptionsES::AdapterDiscoveryOptionsES()
-        : AdapterDiscoveryOptionsBase(WGPUBackendType_OpenGLES) {
-    }
+AdapterDiscoveryOptionsES::AdapterDiscoveryOptionsES()
+    : AdapterDiscoveryOptionsBase(WGPUBackendType_OpenGLES) {}
 
-    DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
-                                                          PresentCallback present,
-                                                          void* presentUserdata) {
-        Device* backendDevice = ToBackend(FromAPI(device));
+DawnSwapChainImplementation CreateNativeSwapChainImpl(WGPUDevice device,
+                                                      PresentCallback present,
+                                                      void* presentUserdata) {
+    Device* backendDevice = ToBackend(FromAPI(device));
 
-        DawnSwapChainImplementation impl;
-        impl = CreateSwapChainImplementation(
-            new NativeSwapChainImpl(backendDevice, present, presentUserdata));
-        impl.textureUsage = WGPUTextureUsage_Present;
+    DawnSwapChainImplementation impl;
+    impl = CreateSwapChainImplementation(
+        new NativeSwapChainImpl(backendDevice, present, presentUserdata));
+    impl.textureUsage = WGPUTextureUsage_Present;
 
-        return impl;
-    }
+    return impl;
+}
 
-    WGPUTextureFormat GetNativeSwapChainPreferredFormat(
-        const DawnSwapChainImplementation* swapChain) {
-        NativeSwapChainImpl* impl = reinterpret_cast<NativeSwapChainImpl*>(swapChain->userData);
-        return static_cast<WGPUTextureFormat>(impl->GetPreferredFormat());
-    }
+WGPUTextureFormat GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain) {
+    NativeSwapChainImpl* impl = reinterpret_cast<NativeSwapChainImpl*>(swapChain->userData);
+    return static_cast<WGPUTextureFormat>(impl->GetPreferredFormat());
+}
 
-    ExternalImageDescriptorEGLImage::ExternalImageDescriptorEGLImage()
-        : ExternalImageDescriptor(ExternalImageType::EGLImage) {
-    }
+ExternalImageDescriptorEGLImage::ExternalImageDescriptorEGLImage()
+    : ExternalImageDescriptor(ExternalImageType::EGLImage) {}
 
-    WGPUTexture WrapExternalEGLImage(WGPUDevice device,
-                                     const ExternalImageDescriptorEGLImage* descriptor) {
-        Device* backendDevice = ToBackend(FromAPI(device));
-        TextureBase* texture =
-            backendDevice->CreateTextureWrappingEGLImage(descriptor, descriptor->image);
-        return ToAPI(texture);
-    }
+WGPUTexture WrapExternalEGLImage(WGPUDevice device,
+                                 const ExternalImageDescriptorEGLImage* descriptor) {
+    Device* backendDevice = ToBackend(FromAPI(device));
+    TextureBase* texture =
+        backendDevice->CreateTextureWrappingEGLImage(descriptor, descriptor->image);
+    return ToAPI(texture);
+}
 
 }  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/OpenGLFunctions.cpp b/src/dawn/native/opengl/OpenGLFunctions.cpp
index 45f8354..ccd0720 100644
--- a/src/dawn/native/opengl/OpenGLFunctions.cpp
+++ b/src/dawn/native/opengl/OpenGLFunctions.cpp
@@ -18,44 +18,44 @@
 
 namespace dawn::native::opengl {
 
-    MaybeError OpenGLFunctions::Initialize(GetProcAddress getProc) {
-        DAWN_TRY(mVersion.Initialize(getProc));
-        if (mVersion.IsES()) {
-            DAWN_TRY(LoadOpenGLESProcs(getProc, mVersion.GetMajor(), mVersion.GetMinor()));
-        } else {
-            DAWN_TRY(LoadDesktopGLProcs(getProc, mVersion.GetMajor(), mVersion.GetMinor()));
-        }
-
-        InitializeSupportedGLExtensions();
-
-        return {};
+MaybeError OpenGLFunctions::Initialize(GetProcAddress getProc) {
+    DAWN_TRY(mVersion.Initialize(getProc));
+    if (mVersion.IsES()) {
+        DAWN_TRY(LoadOpenGLESProcs(getProc, mVersion.GetMajor(), mVersion.GetMinor()));
+    } else {
+        DAWN_TRY(LoadDesktopGLProcs(getProc, mVersion.GetMajor(), mVersion.GetMinor()));
     }
 
-    void OpenGLFunctions::InitializeSupportedGLExtensions() {
-        int32_t numExtensions;
-        GetIntegerv(GL_NUM_EXTENSIONS, &numExtensions);
+    InitializeSupportedGLExtensions();
 
-        for (int32_t i = 0; i < numExtensions; ++i) {
-            const char* extensionName = reinterpret_cast<const char*>(GetStringi(GL_EXTENSIONS, i));
-            mSupportedGLExtensionsSet.insert(extensionName);
-        }
-    }
+    return {};
+}
 
-    bool OpenGLFunctions::IsGLExtensionSupported(const char* extension) const {
-        ASSERT(extension != nullptr);
-        return mSupportedGLExtensionsSet.count(extension) != 0;
-    }
+void OpenGLFunctions::InitializeSupportedGLExtensions() {
+    int32_t numExtensions;
+    GetIntegerv(GL_NUM_EXTENSIONS, &numExtensions);
 
-    const OpenGLVersion& OpenGLFunctions::GetVersion() const {
-        return mVersion;
+    for (int32_t i = 0; i < numExtensions; ++i) {
+        const char* extensionName = reinterpret_cast<const char*>(GetStringi(GL_EXTENSIONS, i));
+        mSupportedGLExtensionsSet.insert(extensionName);
     }
+}
 
-    bool OpenGLFunctions::IsAtLeastGL(uint32_t majorVersion, uint32_t minorVersion) const {
-        return mVersion.IsDesktop() && mVersion.IsAtLeast(majorVersion, minorVersion);
-    }
+bool OpenGLFunctions::IsGLExtensionSupported(const char* extension) const {
+    ASSERT(extension != nullptr);
+    return mSupportedGLExtensionsSet.count(extension) != 0;
+}
 
-    bool OpenGLFunctions::IsAtLeastGLES(uint32_t majorVersion, uint32_t minorVersion) const {
-        return mVersion.IsES() && mVersion.IsAtLeast(majorVersion, minorVersion);
-    }
+const OpenGLVersion& OpenGLFunctions::GetVersion() const {
+    return mVersion;
+}
+
+bool OpenGLFunctions::IsAtLeastGL(uint32_t majorVersion, uint32_t minorVersion) const {
+    return mVersion.IsDesktop() && mVersion.IsAtLeast(majorVersion, minorVersion);
+}
+
+bool OpenGLFunctions::IsAtLeastGLES(uint32_t majorVersion, uint32_t minorVersion) const {
+    return mVersion.IsES() && mVersion.IsAtLeast(majorVersion, minorVersion);
+}
 
 }  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/OpenGLFunctions.h b/src/dawn/native/opengl/OpenGLFunctions.h
index 47eb631..4ab6e09 100644
--- a/src/dawn/native/opengl/OpenGLFunctions.h
+++ b/src/dawn/native/opengl/OpenGLFunctions.h
@@ -23,23 +23,23 @@
 
 namespace dawn::native::opengl {
 
-    struct OpenGLFunctions : OpenGLFunctionsBase {
-      public:
-        MaybeError Initialize(GetProcAddress getProc);
+struct OpenGLFunctions : OpenGLFunctionsBase {
+  public:
+    MaybeError Initialize(GetProcAddress getProc);
 
-        const OpenGLVersion& GetVersion() const;
-        bool IsAtLeastGL(uint32_t majorVersion, uint32_t minorVersion) const;
-        bool IsAtLeastGLES(uint32_t majorVersion, uint32_t minorVersion) const;
+    const OpenGLVersion& GetVersion() const;
+    bool IsAtLeastGL(uint32_t majorVersion, uint32_t minorVersion) const;
+    bool IsAtLeastGLES(uint32_t majorVersion, uint32_t minorVersion) const;
 
-        bool IsGLExtensionSupported(const char* extension) const;
+    bool IsGLExtensionSupported(const char* extension) const;
 
-      private:
-        void InitializeSupportedGLExtensions();
+  private:
+    void InitializeSupportedGLExtensions();
 
-        OpenGLVersion mVersion;
+    OpenGLVersion mVersion;
 
-        std::unordered_set<std::string> mSupportedGLExtensionsSet;
-    };
+    std::unordered_set<std::string> mSupportedGLExtensionsSet;
+};
 
 }  // namespace dawn::native::opengl
 
diff --git a/src/dawn/native/opengl/OpenGLVersion.cpp b/src/dawn/native/opengl/OpenGLVersion.cpp
index 74a7272..297b5fb 100644
--- a/src/dawn/native/opengl/OpenGLVersion.cpp
+++ b/src/dawn/native/opengl/OpenGLVersion.cpp
@@ -20,58 +20,58 @@
 
 namespace dawn::native::opengl {
 
-    MaybeError OpenGLVersion::Initialize(GetProcAddress getProc) {
-        PFNGLGETSTRINGPROC getString = reinterpret_cast<PFNGLGETSTRINGPROC>(getProc("glGetString"));
-        if (getString == nullptr) {
-            return DAWN_INTERNAL_ERROR("Couldn't load glGetString");
-        }
-
-        std::string version = reinterpret_cast<const char*>(getString(GL_VERSION));
-
-        if (version.find("OpenGL ES") != std::string::npos) {
-            // ES spec states that the GL_VERSION string will be in the following format:
-            // "OpenGL ES N.M vendor-specific information"
-            mStandard = Standard::ES;
-            mMajorVersion = version[10] - '0';
-            mMinorVersion = version[12] - '0';
-
-            // The minor version shouldn't get to two digits.
-            ASSERT(version.size() <= 13 || !isdigit(version[13]));
-        } else {
-            // OpenGL spec states the GL_VERSION string will be in the following format:
-            // <version number><space><vendor-specific information>
-            // The version number is either of the form major number.minor number or major
-            // number.minor number.release number, where the numbers all have one or more
-            // digits
-            mStandard = Standard::Desktop;
-            mMajorVersion = version[0] - '0';
-            mMinorVersion = version[2] - '0';
-
-            // The minor version shouldn't get to two digits.
-            ASSERT(version.size() <= 3 || !isdigit(version[3]));
-        }
-
-        return {};
+MaybeError OpenGLVersion::Initialize(GetProcAddress getProc) {
+    PFNGLGETSTRINGPROC getString = reinterpret_cast<PFNGLGETSTRINGPROC>(getProc("glGetString"));
+    if (getString == nullptr) {
+        return DAWN_INTERNAL_ERROR("Couldn't load glGetString");
     }
 
-    bool OpenGLVersion::IsDesktop() const {
-        return mStandard == Standard::Desktop;
+    std::string version = reinterpret_cast<const char*>(getString(GL_VERSION));
+
+    if (version.find("OpenGL ES") != std::string::npos) {
+        // ES spec states that the GL_VERSION string will be in the following format:
+        // "OpenGL ES N.M vendor-specific information"
+        mStandard = Standard::ES;
+        mMajorVersion = version[10] - '0';
+        mMinorVersion = version[12] - '0';
+
+        // The minor version shouldn't get to two digits.
+        ASSERT(version.size() <= 13 || !isdigit(version[13]));
+    } else {
+        // OpenGL spec states the GL_VERSION string will be in the following format:
+        // <version number><space><vendor-specific information>
+        // The version number is either of the form major number.minor number or major
+        // number.minor number.release number, where the numbers all have one or more
+        // digits
+        mStandard = Standard::Desktop;
+        mMajorVersion = version[0] - '0';
+        mMinorVersion = version[2] - '0';
+
+        // The minor version shouldn't get to two digits.
+        ASSERT(version.size() <= 3 || !isdigit(version[3]));
     }
 
-    bool OpenGLVersion::IsES() const {
-        return mStandard == Standard::ES;
-    }
+    return {};
+}
 
-    uint32_t OpenGLVersion::GetMajor() const {
-        return mMajorVersion;
-    }
+bool OpenGLVersion::IsDesktop() const {
+    return mStandard == Standard::Desktop;
+}
 
-    uint32_t OpenGLVersion::GetMinor() const {
-        return mMinorVersion;
-    }
+bool OpenGLVersion::IsES() const {
+    return mStandard == Standard::ES;
+}
 
-    bool OpenGLVersion::IsAtLeast(uint32_t majorVersion, uint32_t minorVersion) const {
-        return std::tie(mMajorVersion, mMinorVersion) >= std::tie(majorVersion, minorVersion);
-    }
+uint32_t OpenGLVersion::GetMajor() const {
+    return mMajorVersion;
+}
+
+uint32_t OpenGLVersion::GetMinor() const {
+    return mMinorVersion;
+}
+
+bool OpenGLVersion::IsAtLeast(uint32_t majorVersion, uint32_t minorVersion) const {
+    return std::tie(mMajorVersion, mMinorVersion) >= std::tie(majorVersion, minorVersion);
+}
 
 }  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/OpenGLVersion.h b/src/dawn/native/opengl/OpenGLVersion.h
index d575ba8..a9a296f 100644
--- a/src/dawn/native/opengl/OpenGLVersion.h
+++ b/src/dawn/native/opengl/OpenGLVersion.h
@@ -19,25 +19,25 @@
 
 namespace dawn::native::opengl {
 
-    struct OpenGLVersion {
-      public:
-        MaybeError Initialize(GetProcAddress getProc);
+struct OpenGLVersion {
+  public:
+    MaybeError Initialize(GetProcAddress getProc);
 
-        bool IsDesktop() const;
-        bool IsES() const;
-        uint32_t GetMajor() const;
-        uint32_t GetMinor() const;
-        bool IsAtLeast(uint32_t majorVersion, uint32_t minorVersion) const;
+    bool IsDesktop() const;
+    bool IsES() const;
+    uint32_t GetMajor() const;
+    uint32_t GetMinor() const;
+    bool IsAtLeast(uint32_t majorVersion, uint32_t minorVersion) const;
 
-      private:
-        enum class Standard {
-            Desktop,
-            ES,
-        };
-        uint32_t mMajorVersion;
-        uint32_t mMinorVersion;
-        Standard mStandard;
+  private:
+    enum class Standard {
+        Desktop,
+        ES,
     };
+    uint32_t mMajorVersion;
+    uint32_t mMinorVersion;
+    Standard mStandard;
+};
 
 }  // namespace dawn::native::opengl
 
diff --git a/src/dawn/native/opengl/PersistentPipelineStateGL.cpp b/src/dawn/native/opengl/PersistentPipelineStateGL.cpp
index 446ab1a..8c16897 100644
--- a/src/dawn/native/opengl/PersistentPipelineStateGL.cpp
+++ b/src/dawn/native/opengl/PersistentPipelineStateGL.cpp
@@ -18,41 +18,41 @@
 
 namespace dawn::native::opengl {
 
-    void PersistentPipelineState::SetDefaultState(const OpenGLFunctions& gl) {
-        CallGLStencilFunc(gl);
+void PersistentPipelineState::SetDefaultState(const OpenGLFunctions& gl) {
+    CallGLStencilFunc(gl);
+}
+
+void PersistentPipelineState::SetStencilFuncsAndMask(const OpenGLFunctions& gl,
+                                                     GLenum stencilBackCompareFunction,
+                                                     GLenum stencilFrontCompareFunction,
+                                                     uint32_t stencilReadMask) {
+    if (mStencilBackCompareFunction == stencilBackCompareFunction &&
+        mStencilFrontCompareFunction == stencilFrontCompareFunction &&
+        mStencilReadMask == stencilReadMask) {
+        return;
     }
 
-    void PersistentPipelineState::SetStencilFuncsAndMask(const OpenGLFunctions& gl,
-                                                         GLenum stencilBackCompareFunction,
-                                                         GLenum stencilFrontCompareFunction,
-                                                         uint32_t stencilReadMask) {
-        if (mStencilBackCompareFunction == stencilBackCompareFunction &&
-            mStencilFrontCompareFunction == stencilFrontCompareFunction &&
-            mStencilReadMask == stencilReadMask) {
-            return;
-        }
+    mStencilBackCompareFunction = stencilBackCompareFunction;
+    mStencilFrontCompareFunction = stencilFrontCompareFunction;
+    mStencilReadMask = stencilReadMask;
+    CallGLStencilFunc(gl);
+}
 
-        mStencilBackCompareFunction = stencilBackCompareFunction;
-        mStencilFrontCompareFunction = stencilFrontCompareFunction;
-        mStencilReadMask = stencilReadMask;
-        CallGLStencilFunc(gl);
+void PersistentPipelineState::SetStencilReference(const OpenGLFunctions& gl,
+                                                  uint32_t stencilReference) {
+    if (mStencilReference == stencilReference) {
+        return;
     }
 
-    void PersistentPipelineState::SetStencilReference(const OpenGLFunctions& gl,
-                                                      uint32_t stencilReference) {
-        if (mStencilReference == stencilReference) {
-            return;
-        }
+    mStencilReference = stencilReference;
+    CallGLStencilFunc(gl);
+}
 
-        mStencilReference = stencilReference;
-        CallGLStencilFunc(gl);
-    }
-
-    void PersistentPipelineState::CallGLStencilFunc(const OpenGLFunctions& gl) {
-        gl.StencilFuncSeparate(GL_BACK, mStencilBackCompareFunction, mStencilReference,
-                               mStencilReadMask);
-        gl.StencilFuncSeparate(GL_FRONT, mStencilFrontCompareFunction, mStencilReference,
-                               mStencilReadMask);
-    }
+void PersistentPipelineState::CallGLStencilFunc(const OpenGLFunctions& gl) {
+    gl.StencilFuncSeparate(GL_BACK, mStencilBackCompareFunction, mStencilReference,
+                           mStencilReadMask);
+    gl.StencilFuncSeparate(GL_FRONT, mStencilFrontCompareFunction, mStencilReference,
+                           mStencilReadMask);
+}
 
 }  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/PersistentPipelineStateGL.h b/src/dawn/native/opengl/PersistentPipelineStateGL.h
index fdfe293..8dec4b5 100644
--- a/src/dawn/native/opengl/PersistentPipelineStateGL.h
+++ b/src/dawn/native/opengl/PersistentPipelineStateGL.h
@@ -20,25 +20,25 @@
 
 namespace dawn::native::opengl {
 
-    struct OpenGLFunctions;
+struct OpenGLFunctions;
 
-    class PersistentPipelineState {
-      public:
-        void SetDefaultState(const OpenGLFunctions& gl);
-        void SetStencilFuncsAndMask(const OpenGLFunctions& gl,
-                                    GLenum stencilBackCompareFunction,
-                                    GLenum stencilFrontCompareFunction,
-                                    uint32_t stencilReadMask);
-        void SetStencilReference(const OpenGLFunctions& gl, uint32_t stencilReference);
+class PersistentPipelineState {
+  public:
+    void SetDefaultState(const OpenGLFunctions& gl);
+    void SetStencilFuncsAndMask(const OpenGLFunctions& gl,
+                                GLenum stencilBackCompareFunction,
+                                GLenum stencilFrontCompareFunction,
+                                uint32_t stencilReadMask);
+    void SetStencilReference(const OpenGLFunctions& gl, uint32_t stencilReference);
 
-      private:
-        void CallGLStencilFunc(const OpenGLFunctions& gl);
+  private:
+    void CallGLStencilFunc(const OpenGLFunctions& gl);
 
-        GLenum mStencilBackCompareFunction = GL_ALWAYS;
-        GLenum mStencilFrontCompareFunction = GL_ALWAYS;
-        GLuint mStencilReadMask = 0xffffffff;
-        GLuint mStencilReference = 0;
-    };
+    GLenum mStencilBackCompareFunction = GL_ALWAYS;
+    GLenum mStencilFrontCompareFunction = GL_ALWAYS;
+    GLuint mStencilReadMask = 0xffffffff;
+    GLuint mStencilReference = 0;
+};
 
 }  // namespace dawn::native::opengl
 
diff --git a/src/dawn/native/opengl/PipelineGL.cpp b/src/dawn/native/opengl/PipelineGL.cpp
index 06e5d83..2ddabce 100644
--- a/src/dawn/native/opengl/PipelineGL.cpp
+++ b/src/dawn/native/opengl/PipelineGL.cpp
@@ -30,190 +30,188 @@
 
 namespace dawn::native::opengl {
 
-    namespace {
+namespace {
 
-        GLenum GLShaderType(SingleShaderStage stage) {
-            switch (stage) {
-                case SingleShaderStage::Vertex:
-                    return GL_VERTEX_SHADER;
-                case SingleShaderStage::Fragment:
-                    return GL_FRAGMENT_SHADER;
-                case SingleShaderStage::Compute:
-                    return GL_COMPUTE_SHADER;
-            }
-            UNREACHABLE();
-        }
-
-    }  // namespace
-
-    PipelineGL::PipelineGL() : mProgram(0) {
+GLenum GLShaderType(SingleShaderStage stage) {
+    switch (stage) {
+        case SingleShaderStage::Vertex:
+            return GL_VERTEX_SHADER;
+        case SingleShaderStage::Fragment:
+            return GL_FRAGMENT_SHADER;
+        case SingleShaderStage::Compute:
+            return GL_COMPUTE_SHADER;
     }
+    UNREACHABLE();
+}
 
-    PipelineGL::~PipelineGL() = default;
+}  // namespace
 
-    MaybeError PipelineGL::InitializeBase(const OpenGLFunctions& gl,
-                                          const PipelineLayout* layout,
-                                          const PerStage<ProgrammableStage>& stages) {
-        auto CreateShader = [](const OpenGLFunctions& gl, GLenum type,
-                               const char* source) -> ResultOrError<GLuint> {
-            GLuint shader = gl.CreateShader(type);
-            gl.ShaderSource(shader, 1, &source, nullptr);
-            gl.CompileShader(shader);
+PipelineGL::PipelineGL() : mProgram(0) {}
 
-            GLint compileStatus = GL_FALSE;
-            gl.GetShaderiv(shader, GL_COMPILE_STATUS, &compileStatus);
-            if (compileStatus == GL_FALSE) {
-                GLint infoLogLength = 0;
-                gl.GetShaderiv(shader, GL_INFO_LOG_LENGTH, &infoLogLength);
+PipelineGL::~PipelineGL() = default;
 
-                if (infoLogLength > 1) {
-                    std::vector<char> buffer(infoLogLength);
-                    gl.GetShaderInfoLog(shader, infoLogLength, nullptr, &buffer[0]);
-                    return DAWN_FORMAT_VALIDATION_ERROR("%s\nProgram compilation failed:\n%s",
-                                                        source, buffer.data());
-                }
-            }
-            return shader;
-        };
+MaybeError PipelineGL::InitializeBase(const OpenGLFunctions& gl,
+                                      const PipelineLayout* layout,
+                                      const PerStage<ProgrammableStage>& stages) {
+    auto CreateShader = [](const OpenGLFunctions& gl, GLenum type,
+                           const char* source) -> ResultOrError<GLuint> {
+        GLuint shader = gl.CreateShader(type);
+        gl.ShaderSource(shader, 1, &source, nullptr);
+        gl.CompileShader(shader);
 
-        mProgram = gl.CreateProgram();
-
-        // Compute the set of active stages.
-        wgpu::ShaderStage activeStages = wgpu::ShaderStage::None;
-        for (SingleShaderStage stage : IterateStages(kAllStages)) {
-            if (stages[stage].module != nullptr) {
-                activeStages |= StageBit(stage);
-            }
-        }
-
-        // Create an OpenGL shader for each stage and gather the list of combined samplers.
-        PerStage<CombinedSamplerInfo> combinedSamplers;
-        bool needsPlaceholderSampler = false;
-        std::vector<GLuint> glShaders;
-        for (SingleShaderStage stage : IterateStages(activeStages)) {
-            const ShaderModule* module = ToBackend(stages[stage].module.Get());
-            std::string glsl;
-            DAWN_TRY_ASSIGN(glsl, module->TranslateToGLSL(stages[stage].entryPoint.c_str(), stage,
-                                                          &combinedSamplers[stage], layout,
-                                                          &needsPlaceholderSampler));
-            GLuint shader;
-            DAWN_TRY_ASSIGN(shader, CreateShader(gl, GLShaderType(stage), glsl.c_str()));
-            gl.AttachShader(mProgram, shader);
-            glShaders.push_back(shader);
-        }
-
-        if (needsPlaceholderSampler) {
-            SamplerDescriptor desc = {};
-            ASSERT(desc.minFilter == wgpu::FilterMode::Nearest);
-            ASSERT(desc.magFilter == wgpu::FilterMode::Nearest);
-            ASSERT(desc.mipmapFilter == wgpu::FilterMode::Nearest);
-            mPlaceholderSampler =
-                ToBackend(layout->GetDevice()->GetOrCreateSampler(&desc).AcquireSuccess());
-        }
-
-        // Link all the shaders together.
-        gl.LinkProgram(mProgram);
-
-        GLint linkStatus = GL_FALSE;
-        gl.GetProgramiv(mProgram, GL_LINK_STATUS, &linkStatus);
-        if (linkStatus == GL_FALSE) {
+        GLint compileStatus = GL_FALSE;
+        gl.GetShaderiv(shader, GL_COMPILE_STATUS, &compileStatus);
+        if (compileStatus == GL_FALSE) {
             GLint infoLogLength = 0;
-            gl.GetProgramiv(mProgram, GL_INFO_LOG_LENGTH, &infoLogLength);
+            gl.GetShaderiv(shader, GL_INFO_LOG_LENGTH, &infoLogLength);
 
             if (infoLogLength > 1) {
                 std::vector<char> buffer(infoLogLength);
-                gl.GetProgramInfoLog(mProgram, infoLogLength, nullptr, &buffer[0]);
-                return DAWN_FORMAT_VALIDATION_ERROR("Program link failed:\n%s", buffer.data());
+                gl.GetShaderInfoLog(shader, infoLogLength, nullptr, &buffer[0]);
+                return DAWN_FORMAT_VALIDATION_ERROR("%s\nProgram compilation failed:\n%s", source,
+                                                    buffer.data());
             }
         }
+        return shader;
+    };
 
-        // Compute links between stages for combined samplers, then bind them to texture units
-        gl.UseProgram(mProgram);
-        const auto& indices = layout->GetBindingIndexInfo();
+    mProgram = gl.CreateProgram();
 
-        std::set<CombinedSampler> combinedSamplersSet;
-        for (SingleShaderStage stage : IterateStages(activeStages)) {
-            for (const CombinedSampler& combined : combinedSamplers[stage]) {
-                combinedSamplersSet.insert(combined);
-            }
+    // Compute the set of active stages.
+    wgpu::ShaderStage activeStages = wgpu::ShaderStage::None;
+    for (SingleShaderStage stage : IterateStages(kAllStages)) {
+        if (stages[stage].module != nullptr) {
+            activeStages |= StageBit(stage);
+        }
+    }
+
+    // Create an OpenGL shader for each stage and gather the list of combined samplers.
+    PerStage<CombinedSamplerInfo> combinedSamplers;
+    bool needsPlaceholderSampler = false;
+    std::vector<GLuint> glShaders;
+    for (SingleShaderStage stage : IterateStages(activeStages)) {
+        const ShaderModule* module = ToBackend(stages[stage].module.Get());
+        std::string glsl;
+        DAWN_TRY_ASSIGN(glsl, module->TranslateToGLSL(stages[stage].entryPoint.c_str(), stage,
+                                                      &combinedSamplers[stage], layout,
+                                                      &needsPlaceholderSampler));
+        GLuint shader;
+        DAWN_TRY_ASSIGN(shader, CreateShader(gl, GLShaderType(stage), glsl.c_str()));
+        gl.AttachShader(mProgram, shader);
+        glShaders.push_back(shader);
+    }
+
+    if (needsPlaceholderSampler) {
+        SamplerDescriptor desc = {};
+        ASSERT(desc.minFilter == wgpu::FilterMode::Nearest);
+        ASSERT(desc.magFilter == wgpu::FilterMode::Nearest);
+        ASSERT(desc.mipmapFilter == wgpu::FilterMode::Nearest);
+        mPlaceholderSampler =
+            ToBackend(layout->GetDevice()->GetOrCreateSampler(&desc).AcquireSuccess());
+    }
+
+    // Link all the shaders together.
+    gl.LinkProgram(mProgram);
+
+    GLint linkStatus = GL_FALSE;
+    gl.GetProgramiv(mProgram, GL_LINK_STATUS, &linkStatus);
+    if (linkStatus == GL_FALSE) {
+        GLint infoLogLength = 0;
+        gl.GetProgramiv(mProgram, GL_INFO_LOG_LENGTH, &infoLogLength);
+
+        if (infoLogLength > 1) {
+            std::vector<char> buffer(infoLogLength);
+            gl.GetProgramInfoLog(mProgram, infoLogLength, nullptr, &buffer[0]);
+            return DAWN_FORMAT_VALIDATION_ERROR("Program link failed:\n%s", buffer.data());
+        }
+    }
+
+    // Compute links between stages for combined samplers, then bind them to texture units
+    gl.UseProgram(mProgram);
+    const auto& indices = layout->GetBindingIndexInfo();
+
+    std::set<CombinedSampler> combinedSamplersSet;
+    for (SingleShaderStage stage : IterateStages(activeStages)) {
+        for (const CombinedSampler& combined : combinedSamplers[stage]) {
+            combinedSamplersSet.insert(combined);
+        }
+    }
+
+    mUnitsForSamplers.resize(layout->GetNumSamplers());
+    mUnitsForTextures.resize(layout->GetNumSampledTextures());
+
+    GLuint textureUnit = layout->GetTextureUnitsUsed();
+    for (const auto& combined : combinedSamplersSet) {
+        const std::string& name = combined.GetName();
+        GLint location = gl.GetUniformLocation(mProgram, name.c_str());
+
+        if (location == -1) {
+            continue;
         }
 
-        mUnitsForSamplers.resize(layout->GetNumSamplers());
-        mUnitsForTextures.resize(layout->GetNumSampledTextures());
+        gl.Uniform1i(location, textureUnit);
 
-        GLuint textureUnit = layout->GetTextureUnitsUsed();
-        for (const auto& combined : combinedSamplersSet) {
-            const std::string& name = combined.GetName();
-            GLint location = gl.GetUniformLocation(mProgram, name.c_str());
+        bool shouldUseFiltering;
+        {
+            const BindGroupLayoutBase* bgl =
+                layout->GetBindGroupLayout(combined.textureLocation.group);
+            BindingIndex bindingIndex = bgl->GetBindingIndex(combined.textureLocation.binding);
 
-            if (location == -1) {
-                continue;
-            }
+            GLuint textureIndex = indices[combined.textureLocation.group][bindingIndex];
+            mUnitsForTextures[textureIndex].push_back(textureUnit);
 
-            gl.Uniform1i(location, textureUnit);
-
-            bool shouldUseFiltering;
-            {
+            shouldUseFiltering = bgl->GetBindingInfo(bindingIndex).texture.sampleType ==
+                                 wgpu::TextureSampleType::Float;
+        }
+        {
+            if (combined.usePlaceholderSampler) {
+                mPlaceholderSamplerUnits.push_back(textureUnit);
+            } else {
                 const BindGroupLayoutBase* bgl =
-                    layout->GetBindGroupLayout(combined.textureLocation.group);
-                BindingIndex bindingIndex = bgl->GetBindingIndex(combined.textureLocation.binding);
+                    layout->GetBindGroupLayout(combined.samplerLocation.group);
+                BindingIndex bindingIndex = bgl->GetBindingIndex(combined.samplerLocation.binding);
 
-                GLuint textureIndex = indices[combined.textureLocation.group][bindingIndex];
-                mUnitsForTextures[textureIndex].push_back(textureUnit);
-
-                shouldUseFiltering = bgl->GetBindingInfo(bindingIndex).texture.sampleType ==
-                                     wgpu::TextureSampleType::Float;
+                GLuint samplerIndex = indices[combined.samplerLocation.group][bindingIndex];
+                mUnitsForSamplers[samplerIndex].push_back({textureUnit, shouldUseFiltering});
             }
-            {
-                if (combined.usePlaceholderSampler) {
-                    mPlaceholderSamplerUnits.push_back(textureUnit);
-                } else {
-                    const BindGroupLayoutBase* bgl =
-                        layout->GetBindGroupLayout(combined.samplerLocation.group);
-                    BindingIndex bindingIndex =
-                        bgl->GetBindingIndex(combined.samplerLocation.binding);
-
-                    GLuint samplerIndex = indices[combined.samplerLocation.group][bindingIndex];
-                    mUnitsForSamplers[samplerIndex].push_back({textureUnit, shouldUseFiltering});
-                }
-            }
-
-            textureUnit++;
         }
 
-        for (GLuint glShader : glShaders) {
-            gl.DetachShader(mProgram, glShader);
-            gl.DeleteShader(glShader);
-        }
-
-        return {};
+        textureUnit++;
     }
 
-    void PipelineGL::DeleteProgram(const OpenGLFunctions& gl) {
-        gl.DeleteProgram(mProgram);
+    for (GLuint glShader : glShaders) {
+        gl.DetachShader(mProgram, glShader);
+        gl.DeleteShader(glShader);
     }
 
-    const std::vector<PipelineGL::SamplerUnit>& PipelineGL::GetTextureUnitsForSampler(
-        GLuint index) const {
-        ASSERT(index < mUnitsForSamplers.size());
-        return mUnitsForSamplers[index];
-    }
+    return {};
+}
 
-    const std::vector<GLuint>& PipelineGL::GetTextureUnitsForTextureView(GLuint index) const {
-        ASSERT(index < mUnitsForTextures.size());
-        return mUnitsForTextures[index];
-    }
+void PipelineGL::DeleteProgram(const OpenGLFunctions& gl) {
+    gl.DeleteProgram(mProgram);
+}
 
-    GLuint PipelineGL::GetProgramHandle() const {
-        return mProgram;
-    }
+const std::vector<PipelineGL::SamplerUnit>& PipelineGL::GetTextureUnitsForSampler(
+    GLuint index) const {
+    ASSERT(index < mUnitsForSamplers.size());
+    return mUnitsForSamplers[index];
+}
 
-    void PipelineGL::ApplyNow(const OpenGLFunctions& gl) {
-        gl.UseProgram(mProgram);
-        for (GLuint unit : mPlaceholderSamplerUnits) {
-            ASSERT(mPlaceholderSampler.Get() != nullptr);
-            gl.BindSampler(unit, mPlaceholderSampler->GetNonFilteringHandle());
-        }
+const std::vector<GLuint>& PipelineGL::GetTextureUnitsForTextureView(GLuint index) const {
+    ASSERT(index < mUnitsForTextures.size());
+    return mUnitsForTextures[index];
+}
+
+GLuint PipelineGL::GetProgramHandle() const {
+    return mProgram;
+}
+
+void PipelineGL::ApplyNow(const OpenGLFunctions& gl) {
+    gl.UseProgram(mProgram);
+    for (GLuint unit : mPlaceholderSamplerUnits) {
+        ASSERT(mPlaceholderSampler.Get() != nullptr);
+        gl.BindSampler(unit, mPlaceholderSampler->GetNonFilteringHandle());
     }
+}
 
 }  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/PipelineGL.h b/src/dawn/native/opengl/PipelineGL.h
index 645bae9..c838bc7 100644
--- a/src/dawn/native/opengl/PipelineGL.h
+++ b/src/dawn/native/opengl/PipelineGL.h
@@ -23,46 +23,46 @@
 #include "dawn/native/opengl/opengl_platform.h"
 
 namespace dawn::native {
-    struct ProgrammableStage;
+struct ProgrammableStage;
 }  // namespace dawn::native
 
 namespace dawn::native::opengl {
 
-    struct OpenGLFunctions;
-    class PipelineLayout;
-    class Sampler;
+struct OpenGLFunctions;
+class PipelineLayout;
+class Sampler;
 
-    class PipelineGL {
-      public:
-        PipelineGL();
-        ~PipelineGL();
+class PipelineGL {
+  public:
+    PipelineGL();
+    ~PipelineGL();
 
-        // For each unit a sampler is bound to we need to know if we should use filtering or not
-        // because int and uint texture are only complete without filtering.
-        struct SamplerUnit {
-            GLuint unit;
-            bool shouldUseFiltering;
-        };
-        const std::vector<SamplerUnit>& GetTextureUnitsForSampler(GLuint index) const;
-        const std::vector<GLuint>& GetTextureUnitsForTextureView(GLuint index) const;
-        GLuint GetProgramHandle() const;
-
-      protected:
-        void ApplyNow(const OpenGLFunctions& gl);
-        MaybeError InitializeBase(const OpenGLFunctions& gl,
-                                  const PipelineLayout* layout,
-                                  const PerStage<ProgrammableStage>& stages);
-        void DeleteProgram(const OpenGLFunctions& gl);
-
-      private:
-        GLuint mProgram;
-        std::vector<std::vector<SamplerUnit>> mUnitsForSamplers;
-        std::vector<std::vector<GLuint>> mUnitsForTextures;
-        std::vector<GLuint> mPlaceholderSamplerUnits;
-        // TODO(enga): This could live on the Device, or elsewhere, but currently it makes Device
-        // destruction complex as it requires the sampler to be destroyed before the sampler cache.
-        Ref<Sampler> mPlaceholderSampler;
+    // For each unit a sampler is bound to we need to know if we should use filtering or not
+    // because int and uint texture are only complete without filtering.
+    struct SamplerUnit {
+        GLuint unit;
+        bool shouldUseFiltering;
     };
+    const std::vector<SamplerUnit>& GetTextureUnitsForSampler(GLuint index) const;
+    const std::vector<GLuint>& GetTextureUnitsForTextureView(GLuint index) const;
+    GLuint GetProgramHandle() const;
+
+  protected:
+    void ApplyNow(const OpenGLFunctions& gl);
+    MaybeError InitializeBase(const OpenGLFunctions& gl,
+                              const PipelineLayout* layout,
+                              const PerStage<ProgrammableStage>& stages);
+    void DeleteProgram(const OpenGLFunctions& gl);
+
+  private:
+    GLuint mProgram;
+    std::vector<std::vector<SamplerUnit>> mUnitsForSamplers;
+    std::vector<std::vector<GLuint>> mUnitsForTextures;
+    std::vector<GLuint> mPlaceholderSamplerUnits;
+    // TODO(enga): This could live on the Device, or elsewhere, but currently it makes Device
+    // destruction complex as it requires the sampler to be destroyed before the sampler cache.
+    Ref<Sampler> mPlaceholderSampler;
+};
 
 }  // namespace dawn::native::opengl
 
diff --git a/src/dawn/native/opengl/PipelineLayoutGL.cpp b/src/dawn/native/opengl/PipelineLayoutGL.cpp
index 7dd54ab..c2d793d 100644
--- a/src/dawn/native/opengl/PipelineLayoutGL.cpp
+++ b/src/dawn/native/opengl/PipelineLayoutGL.cpp
@@ -20,76 +20,75 @@
 
 namespace dawn::native::opengl {
 
-    PipelineLayout::PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor)
-        : PipelineLayoutBase(device, descriptor) {
-        GLuint uboIndex = 0;
-        GLuint samplerIndex = 0;
-        GLuint sampledTextureIndex = 0;
-        GLuint ssboIndex = 0;
-        GLuint storageTextureIndex = 0;
+PipelineLayout::PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor)
+    : PipelineLayoutBase(device, descriptor) {
+    GLuint uboIndex = 0;
+    GLuint samplerIndex = 0;
+    GLuint sampledTextureIndex = 0;
+    GLuint ssboIndex = 0;
+    GLuint storageTextureIndex = 0;
 
-        for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
-            const BindGroupLayoutBase* bgl = GetBindGroupLayout(group);
-            mIndexInfo[group].resize(bgl->GetBindingCount());
+    for (BindGroupIndex group : IterateBitSet(GetBindGroupLayoutsMask())) {
+        const BindGroupLayoutBase* bgl = GetBindGroupLayout(group);
+        mIndexInfo[group].resize(bgl->GetBindingCount());
 
-            for (BindingIndex bindingIndex{0}; bindingIndex < bgl->GetBindingCount();
-                 ++bindingIndex) {
-                const BindingInfo& bindingInfo = bgl->GetBindingInfo(bindingIndex);
-                switch (bindingInfo.bindingType) {
-                    case BindingInfoType::Buffer:
-                        switch (bindingInfo.buffer.type) {
-                            case wgpu::BufferBindingType::Uniform:
-                                mIndexInfo[group][bindingIndex] = uboIndex;
-                                uboIndex++;
-                                break;
-                            case wgpu::BufferBindingType::Storage:
-                            case kInternalStorageBufferBinding:
-                            case wgpu::BufferBindingType::ReadOnlyStorage:
-                                mIndexInfo[group][bindingIndex] = ssboIndex;
-                                ssboIndex++;
-                                break;
-                            case wgpu::BufferBindingType::Undefined:
-                                UNREACHABLE();
-                        }
-                        break;
+        for (BindingIndex bindingIndex{0}; bindingIndex < bgl->GetBindingCount(); ++bindingIndex) {
+            const BindingInfo& bindingInfo = bgl->GetBindingInfo(bindingIndex);
+            switch (bindingInfo.bindingType) {
+                case BindingInfoType::Buffer:
+                    switch (bindingInfo.buffer.type) {
+                        case wgpu::BufferBindingType::Uniform:
+                            mIndexInfo[group][bindingIndex] = uboIndex;
+                            uboIndex++;
+                            break;
+                        case wgpu::BufferBindingType::Storage:
+                        case kInternalStorageBufferBinding:
+                        case wgpu::BufferBindingType::ReadOnlyStorage:
+                            mIndexInfo[group][bindingIndex] = ssboIndex;
+                            ssboIndex++;
+                            break;
+                        case wgpu::BufferBindingType::Undefined:
+                            UNREACHABLE();
+                    }
+                    break;
 
-                    case BindingInfoType::Sampler:
-                        mIndexInfo[group][bindingIndex] = samplerIndex;
-                        samplerIndex++;
-                        break;
+                case BindingInfoType::Sampler:
+                    mIndexInfo[group][bindingIndex] = samplerIndex;
+                    samplerIndex++;
+                    break;
 
-                    case BindingInfoType::Texture:
-                    case BindingInfoType::ExternalTexture:
-                        mIndexInfo[group][bindingIndex] = sampledTextureIndex;
-                        sampledTextureIndex++;
-                        break;
+                case BindingInfoType::Texture:
+                case BindingInfoType::ExternalTexture:
+                    mIndexInfo[group][bindingIndex] = sampledTextureIndex;
+                    sampledTextureIndex++;
+                    break;
 
-                    case BindingInfoType::StorageTexture:
-                        mIndexInfo[group][bindingIndex] = storageTextureIndex;
-                        storageTextureIndex++;
-                        break;
-                }
+                case BindingInfoType::StorageTexture:
+                    mIndexInfo[group][bindingIndex] = storageTextureIndex;
+                    storageTextureIndex++;
+                    break;
             }
         }
-
-        mNumSamplers = samplerIndex;
-        mNumSampledTextures = sampledTextureIndex;
     }
 
-    const PipelineLayout::BindingIndexInfo& PipelineLayout::GetBindingIndexInfo() const {
-        return mIndexInfo;
-    }
+    mNumSamplers = samplerIndex;
+    mNumSampledTextures = sampledTextureIndex;
+}
 
-    GLuint PipelineLayout::GetTextureUnitsUsed() const {
-        return 0;
-    }
+const PipelineLayout::BindingIndexInfo& PipelineLayout::GetBindingIndexInfo() const {
+    return mIndexInfo;
+}
 
-    size_t PipelineLayout::GetNumSamplers() const {
-        return mNumSamplers;
-    }
+GLuint PipelineLayout::GetTextureUnitsUsed() const {
+    return 0;
+}
 
-    size_t PipelineLayout::GetNumSampledTextures() const {
-        return mNumSampledTextures;
-    }
+size_t PipelineLayout::GetNumSamplers() const {
+    return mNumSamplers;
+}
+
+size_t PipelineLayout::GetNumSampledTextures() const {
+    return mNumSampledTextures;
+}
 
 }  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/PipelineLayoutGL.h b/src/dawn/native/opengl/PipelineLayoutGL.h
index a315221..a278e2c 100644
--- a/src/dawn/native/opengl/PipelineLayoutGL.h
+++ b/src/dawn/native/opengl/PipelineLayoutGL.h
@@ -24,26 +24,26 @@
 
 namespace dawn::native::opengl {
 
-    class Device;
+class Device;
 
-    class PipelineLayout final : public PipelineLayoutBase {
-      public:
-        PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor);
+class PipelineLayout final : public PipelineLayoutBase {
+  public:
+    PipelineLayout(Device* device, const PipelineLayoutDescriptor* descriptor);
 
-        using BindingIndexInfo =
-            ityp::array<BindGroupIndex, ityp::vector<BindingIndex, GLuint>, kMaxBindGroups>;
-        const BindingIndexInfo& GetBindingIndexInfo() const;
+    using BindingIndexInfo =
+        ityp::array<BindGroupIndex, ityp::vector<BindingIndex, GLuint>, kMaxBindGroups>;
+    const BindingIndexInfo& GetBindingIndexInfo() const;
 
-        GLuint GetTextureUnitsUsed() const;
-        size_t GetNumSamplers() const;
-        size_t GetNumSampledTextures() const;
+    GLuint GetTextureUnitsUsed() const;
+    size_t GetNumSamplers() const;
+    size_t GetNumSampledTextures() const;
 
-      private:
-        ~PipelineLayout() override = default;
-        BindingIndexInfo mIndexInfo;
-        size_t mNumSamplers;
-        size_t mNumSampledTextures;
-    };
+  private:
+    ~PipelineLayout() override = default;
+    BindingIndexInfo mIndexInfo;
+    size_t mNumSamplers;
+    size_t mNumSampledTextures;
+};
 
 }  // namespace dawn::native::opengl
 
diff --git a/src/dawn/native/opengl/QuerySetGL.cpp b/src/dawn/native/opengl/QuerySetGL.cpp
index cdf9858..dc8424c 100644
--- a/src/dawn/native/opengl/QuerySetGL.cpp
+++ b/src/dawn/native/opengl/QuerySetGL.cpp
@@ -18,10 +18,9 @@
 
 namespace dawn::native::opengl {
 
-    QuerySet::QuerySet(Device* device, const QuerySetDescriptor* descriptor)
-        : QuerySetBase(device, descriptor) {
-    }
+QuerySet::QuerySet(Device* device, const QuerySetDescriptor* descriptor)
+    : QuerySetBase(device, descriptor) {}
 
-    QuerySet::~QuerySet() = default;
+QuerySet::~QuerySet() = default;
 
 }  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/QuerySetGL.h b/src/dawn/native/opengl/QuerySetGL.h
index 7e21654..1121113 100644
--- a/src/dawn/native/opengl/QuerySetGL.h
+++ b/src/dawn/native/opengl/QuerySetGL.h
@@ -19,15 +19,15 @@
 
 namespace dawn::native::opengl {
 
-    class Device;
+class Device;
 
-    class QuerySet final : public QuerySetBase {
-      public:
-        QuerySet(Device* device, const QuerySetDescriptor* descriptor);
+class QuerySet final : public QuerySetBase {
+  public:
+    QuerySet(Device* device, const QuerySetDescriptor* descriptor);
 
-      private:
-        ~QuerySet() override;
-    };
+  private:
+    ~QuerySet() override;
+};
 
 }  // namespace dawn::native::opengl
 
diff --git a/src/dawn/native/opengl/QueueGL.cpp b/src/dawn/native/opengl/QueueGL.cpp
index 44a77a2..68eb918 100644
--- a/src/dawn/native/opengl/QueueGL.cpp
+++ b/src/dawn/native/opengl/QueueGL.cpp
@@ -23,60 +23,56 @@
 
 namespace dawn::native::opengl {
 
-    Queue::Queue(Device* device, const QueueDescriptor* descriptor)
-        : QueueBase(device, descriptor) {
+Queue::Queue(Device* device, const QueueDescriptor* descriptor) : QueueBase(device, descriptor) {}
+
+MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
+    Device* device = ToBackend(GetDevice());
+
+    TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording, "CommandBufferGL::Execute");
+    for (uint32_t i = 0; i < commandCount; ++i) {
+        DAWN_TRY(ToBackend(commands[i])->Execute());
     }
+    TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording, "CommandBufferGL::Execute");
 
-    MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
-        Device* device = ToBackend(GetDevice());
+    device->SubmitFenceSync();
+    return {};
+}
 
-        TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording, "CommandBufferGL::Execute");
-        for (uint32_t i = 0; i < commandCount; ++i) {
-            DAWN_TRY(ToBackend(commands[i])->Execute());
-        }
-        TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording, "CommandBufferGL::Execute");
+MaybeError Queue::WriteBufferImpl(BufferBase* buffer,
+                                  uint64_t bufferOffset,
+                                  const void* data,
+                                  size_t size) {
+    const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
 
-        device->SubmitFenceSync();
-        return {};
+    ToBackend(buffer)->EnsureDataInitializedAsDestination(bufferOffset, size);
+
+    gl.BindBuffer(GL_ARRAY_BUFFER, ToBackend(buffer)->GetHandle());
+    gl.BufferSubData(GL_ARRAY_BUFFER, bufferOffset, size, data);
+    return {};
+}
+
+MaybeError Queue::WriteTextureImpl(const ImageCopyTexture& destination,
+                                   const void* data,
+                                   const TextureDataLayout& dataLayout,
+                                   const Extent3D& writeSizePixel) {
+    DAWN_INVALID_IF(destination.aspect == wgpu::TextureAspect::StencilOnly,
+                    "Writes to stencil textures unsupported on the OpenGL backend.");
+
+    TextureCopy textureCopy;
+    textureCopy.texture = destination.texture;
+    textureCopy.mipLevel = destination.mipLevel;
+    textureCopy.origin = destination.origin;
+    textureCopy.aspect = SelectFormatAspects(destination.texture->GetFormat(), destination.aspect);
+
+    SubresourceRange range = GetSubresourcesAffectedByCopy(textureCopy, writeSizePixel);
+    if (IsCompleteSubresourceCopiedTo(destination.texture, writeSizePixel, destination.mipLevel)) {
+        destination.texture->SetIsSubresourceContentInitialized(true, range);
+    } else {
+        ToBackend(destination.texture)->EnsureSubresourceContentInitialized(range);
     }
-
-    MaybeError Queue::WriteBufferImpl(BufferBase* buffer,
-                                      uint64_t bufferOffset,
-                                      const void* data,
-                                      size_t size) {
-        const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
-
-        ToBackend(buffer)->EnsureDataInitializedAsDestination(bufferOffset, size);
-
-        gl.BindBuffer(GL_ARRAY_BUFFER, ToBackend(buffer)->GetHandle());
-        gl.BufferSubData(GL_ARRAY_BUFFER, bufferOffset, size, data);
-        return {};
-    }
-
-    MaybeError Queue::WriteTextureImpl(const ImageCopyTexture& destination,
-                                       const void* data,
-                                       const TextureDataLayout& dataLayout,
-                                       const Extent3D& writeSizePixel) {
-        DAWN_INVALID_IF(destination.aspect == wgpu::TextureAspect::StencilOnly,
-                        "Writes to stencil textures unsupported on the OpenGL backend.");
-
-        TextureCopy textureCopy;
-        textureCopy.texture = destination.texture;
-        textureCopy.mipLevel = destination.mipLevel;
-        textureCopy.origin = destination.origin;
-        textureCopy.aspect =
-            SelectFormatAspects(destination.texture->GetFormat(), destination.aspect);
-
-        SubresourceRange range = GetSubresourcesAffectedByCopy(textureCopy, writeSizePixel);
-        if (IsCompleteSubresourceCopiedTo(destination.texture, writeSizePixel,
-                                          destination.mipLevel)) {
-            destination.texture->SetIsSubresourceContentInitialized(true, range);
-        } else {
-            ToBackend(destination.texture)->EnsureSubresourceContentInitialized(range);
-        }
-        DoTexSubImage(ToBackend(GetDevice())->gl, textureCopy, data, dataLayout, writeSizePixel);
-        ToBackend(destination.texture)->Touch();
-        return {};
-    }
+    DoTexSubImage(ToBackend(GetDevice())->gl, textureCopy, data, dataLayout, writeSizePixel);
+    ToBackend(destination.texture)->Touch();
+    return {};
+}
 
 }  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/QueueGL.h b/src/dawn/native/opengl/QueueGL.h
index c347105..962dd21 100644
--- a/src/dawn/native/opengl/QueueGL.h
+++ b/src/dawn/native/opengl/QueueGL.h
@@ -19,23 +19,23 @@
 
 namespace dawn::native::opengl {
 
-    class Device;
+class Device;
 
-    class Queue final : public QueueBase {
-      public:
-        Queue(Device* device, const QueueDescriptor* descriptor);
+class Queue final : public QueueBase {
+  public:
+    Queue(Device* device, const QueueDescriptor* descriptor);
 
-      private:
-        MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
-        MaybeError WriteBufferImpl(BufferBase* buffer,
-                                   uint64_t bufferOffset,
-                                   const void* data,
-                                   size_t size) override;
-        MaybeError WriteTextureImpl(const ImageCopyTexture& destination,
-                                    const void* data,
-                                    const TextureDataLayout& dataLayout,
-                                    const Extent3D& writeSizePixel) override;
-    };
+  private:
+    MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+    MaybeError WriteBufferImpl(BufferBase* buffer,
+                               uint64_t bufferOffset,
+                               const void* data,
+                               size_t size) override;
+    MaybeError WriteTextureImpl(const ImageCopyTexture& destination,
+                                const void* data,
+                                const TextureDataLayout& dataLayout,
+                                const Extent3D& writeSizePixel) override;
+};
 
 }  // namespace dawn::native::opengl
 
diff --git a/src/dawn/native/opengl/RenderPipelineGL.cpp b/src/dawn/native/opengl/RenderPipelineGL.cpp
index 5e4ddce..6f93260 100644
--- a/src/dawn/native/opengl/RenderPipelineGL.cpp
+++ b/src/dawn/native/opengl/RenderPipelineGL.cpp
@@ -21,325 +21,322 @@
 
 namespace dawn::native::opengl {
 
-    namespace {
+namespace {
 
-        GLenum GLPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
-            switch (primitiveTopology) {
-                case wgpu::PrimitiveTopology::PointList:
-                    return GL_POINTS;
-                case wgpu::PrimitiveTopology::LineList:
-                    return GL_LINES;
-                case wgpu::PrimitiveTopology::LineStrip:
-                    return GL_LINE_STRIP;
-                case wgpu::PrimitiveTopology::TriangleList:
-                    return GL_TRIANGLES;
-                case wgpu::PrimitiveTopology::TriangleStrip:
-                    return GL_TRIANGLE_STRIP;
-            }
-            UNREACHABLE();
-        }
+GLenum GLPrimitiveTopology(wgpu::PrimitiveTopology primitiveTopology) {
+    switch (primitiveTopology) {
+        case wgpu::PrimitiveTopology::PointList:
+            return GL_POINTS;
+        case wgpu::PrimitiveTopology::LineList:
+            return GL_LINES;
+        case wgpu::PrimitiveTopology::LineStrip:
+            return GL_LINE_STRIP;
+        case wgpu::PrimitiveTopology::TriangleList:
+            return GL_TRIANGLES;
+        case wgpu::PrimitiveTopology::TriangleStrip:
+            return GL_TRIANGLE_STRIP;
+    }
+    UNREACHABLE();
+}
 
-        void ApplyFrontFaceAndCulling(const OpenGLFunctions& gl,
-                                      wgpu::FrontFace face,
-                                      wgpu::CullMode mode) {
-            // Note that we invert winding direction in OpenGL. Because Y axis is up in OpenGL,
-            // which is different from WebGPU and other backends (Y axis is down).
-            GLenum direction = (face == wgpu::FrontFace::CCW) ? GL_CW : GL_CCW;
-            gl.FrontFace(direction);
+void ApplyFrontFaceAndCulling(const OpenGLFunctions& gl,
+                              wgpu::FrontFace face,
+                              wgpu::CullMode mode) {
+    // Note that we invert winding direction in OpenGL. Because Y axis is up in OpenGL,
+    // which is different from WebGPU and other backends (Y axis is down).
+    GLenum direction = (face == wgpu::FrontFace::CCW) ? GL_CW : GL_CCW;
+    gl.FrontFace(direction);
 
-            if (mode == wgpu::CullMode::None) {
-                gl.Disable(GL_CULL_FACE);
-            } else {
-                gl.Enable(GL_CULL_FACE);
+    if (mode == wgpu::CullMode::None) {
+        gl.Disable(GL_CULL_FACE);
+    } else {
+        gl.Enable(GL_CULL_FACE);
 
-                GLenum cullMode = (mode == wgpu::CullMode::Front) ? GL_FRONT : GL_BACK;
-                gl.CullFace(cullMode);
-            }
-        }
+        GLenum cullMode = (mode == wgpu::CullMode::Front) ? GL_FRONT : GL_BACK;
+        gl.CullFace(cullMode);
+    }
+}
 
-        GLenum GLBlendFactor(wgpu::BlendFactor factor, bool alpha) {
-            switch (factor) {
-                case wgpu::BlendFactor::Zero:
-                    return GL_ZERO;
-                case wgpu::BlendFactor::One:
-                    return GL_ONE;
-                case wgpu::BlendFactor::Src:
-                    return GL_SRC_COLOR;
-                case wgpu::BlendFactor::OneMinusSrc:
-                    return GL_ONE_MINUS_SRC_COLOR;
-                case wgpu::BlendFactor::SrcAlpha:
-                    return GL_SRC_ALPHA;
-                case wgpu::BlendFactor::OneMinusSrcAlpha:
-                    return GL_ONE_MINUS_SRC_ALPHA;
-                case wgpu::BlendFactor::Dst:
-                    return GL_DST_COLOR;
-                case wgpu::BlendFactor::OneMinusDst:
-                    return GL_ONE_MINUS_DST_COLOR;
-                case wgpu::BlendFactor::DstAlpha:
-                    return GL_DST_ALPHA;
-                case wgpu::BlendFactor::OneMinusDstAlpha:
-                    return GL_ONE_MINUS_DST_ALPHA;
-                case wgpu::BlendFactor::SrcAlphaSaturated:
-                    return GL_SRC_ALPHA_SATURATE;
-                case wgpu::BlendFactor::Constant:
-                    return alpha ? GL_CONSTANT_ALPHA : GL_CONSTANT_COLOR;
-                case wgpu::BlendFactor::OneMinusConstant:
-                    return alpha ? GL_ONE_MINUS_CONSTANT_ALPHA : GL_ONE_MINUS_CONSTANT_COLOR;
-            }
-            UNREACHABLE();
-        }
+GLenum GLBlendFactor(wgpu::BlendFactor factor, bool alpha) {
+    switch (factor) {
+        case wgpu::BlendFactor::Zero:
+            return GL_ZERO;
+        case wgpu::BlendFactor::One:
+            return GL_ONE;
+        case wgpu::BlendFactor::Src:
+            return GL_SRC_COLOR;
+        case wgpu::BlendFactor::OneMinusSrc:
+            return GL_ONE_MINUS_SRC_COLOR;
+        case wgpu::BlendFactor::SrcAlpha:
+            return GL_SRC_ALPHA;
+        case wgpu::BlendFactor::OneMinusSrcAlpha:
+            return GL_ONE_MINUS_SRC_ALPHA;
+        case wgpu::BlendFactor::Dst:
+            return GL_DST_COLOR;
+        case wgpu::BlendFactor::OneMinusDst:
+            return GL_ONE_MINUS_DST_COLOR;
+        case wgpu::BlendFactor::DstAlpha:
+            return GL_DST_ALPHA;
+        case wgpu::BlendFactor::OneMinusDstAlpha:
+            return GL_ONE_MINUS_DST_ALPHA;
+        case wgpu::BlendFactor::SrcAlphaSaturated:
+            return GL_SRC_ALPHA_SATURATE;
+        case wgpu::BlendFactor::Constant:
+            return alpha ? GL_CONSTANT_ALPHA : GL_CONSTANT_COLOR;
+        case wgpu::BlendFactor::OneMinusConstant:
+            return alpha ? GL_ONE_MINUS_CONSTANT_ALPHA : GL_ONE_MINUS_CONSTANT_COLOR;
+    }
+    UNREACHABLE();
+}
 
-        GLenum GLBlendMode(wgpu::BlendOperation operation) {
-            switch (operation) {
-                case wgpu::BlendOperation::Add:
-                    return GL_FUNC_ADD;
-                case wgpu::BlendOperation::Subtract:
-                    return GL_FUNC_SUBTRACT;
-                case wgpu::BlendOperation::ReverseSubtract:
-                    return GL_FUNC_REVERSE_SUBTRACT;
-                case wgpu::BlendOperation::Min:
-                    return GL_MIN;
-                case wgpu::BlendOperation::Max:
-                    return GL_MAX;
-            }
-            UNREACHABLE();
-        }
+GLenum GLBlendMode(wgpu::BlendOperation operation) {
+    switch (operation) {
+        case wgpu::BlendOperation::Add:
+            return GL_FUNC_ADD;
+        case wgpu::BlendOperation::Subtract:
+            return GL_FUNC_SUBTRACT;
+        case wgpu::BlendOperation::ReverseSubtract:
+            return GL_FUNC_REVERSE_SUBTRACT;
+        case wgpu::BlendOperation::Min:
+            return GL_MIN;
+        case wgpu::BlendOperation::Max:
+            return GL_MAX;
+    }
+    UNREACHABLE();
+}
 
-        void ApplyColorState(const OpenGLFunctions& gl,
-                             ColorAttachmentIndex attachment,
-                             const ColorTargetState* state) {
-            GLuint colorBuffer = static_cast<GLuint>(static_cast<uint8_t>(attachment));
-            if (state->blend != nullptr) {
-                gl.Enablei(GL_BLEND, colorBuffer);
-                gl.BlendEquationSeparatei(colorBuffer, GLBlendMode(state->blend->color.operation),
-                                          GLBlendMode(state->blend->alpha.operation));
-                gl.BlendFuncSeparatei(colorBuffer,
-                                      GLBlendFactor(state->blend->color.srcFactor, false),
-                                      GLBlendFactor(state->blend->color.dstFactor, false),
-                                      GLBlendFactor(state->blend->alpha.srcFactor, true),
-                                      GLBlendFactor(state->blend->alpha.dstFactor, true));
-            } else {
-                gl.Disablei(GL_BLEND, colorBuffer);
-            }
-            gl.ColorMaski(colorBuffer, state->writeMask & wgpu::ColorWriteMask::Red,
-                          state->writeMask & wgpu::ColorWriteMask::Green,
-                          state->writeMask & wgpu::ColorWriteMask::Blue,
-                          state->writeMask & wgpu::ColorWriteMask::Alpha);
-        }
+void ApplyColorState(const OpenGLFunctions& gl,
+                     ColorAttachmentIndex attachment,
+                     const ColorTargetState* state) {
+    GLuint colorBuffer = static_cast<GLuint>(static_cast<uint8_t>(attachment));
+    if (state->blend != nullptr) {
+        gl.Enablei(GL_BLEND, colorBuffer);
+        gl.BlendEquationSeparatei(colorBuffer, GLBlendMode(state->blend->color.operation),
+                                  GLBlendMode(state->blend->alpha.operation));
+        gl.BlendFuncSeparatei(colorBuffer, GLBlendFactor(state->blend->color.srcFactor, false),
+                              GLBlendFactor(state->blend->color.dstFactor, false),
+                              GLBlendFactor(state->blend->alpha.srcFactor, true),
+                              GLBlendFactor(state->blend->alpha.dstFactor, true));
+    } else {
+        gl.Disablei(GL_BLEND, colorBuffer);
+    }
+    gl.ColorMaski(colorBuffer, state->writeMask & wgpu::ColorWriteMask::Red,
+                  state->writeMask & wgpu::ColorWriteMask::Green,
+                  state->writeMask & wgpu::ColorWriteMask::Blue,
+                  state->writeMask & wgpu::ColorWriteMask::Alpha);
+}
 
-        void ApplyColorState(const OpenGLFunctions& gl, const ColorTargetState* state) {
-            if (state->blend != nullptr) {
-                gl.Enable(GL_BLEND);
-                gl.BlendEquationSeparate(GLBlendMode(state->blend->color.operation),
-                                         GLBlendMode(state->blend->alpha.operation));
-                gl.BlendFuncSeparate(GLBlendFactor(state->blend->color.srcFactor, false),
-                                     GLBlendFactor(state->blend->color.dstFactor, false),
-                                     GLBlendFactor(state->blend->alpha.srcFactor, true),
-                                     GLBlendFactor(state->blend->alpha.dstFactor, true));
-            } else {
-                gl.Disable(GL_BLEND);
-            }
-            gl.ColorMask(state->writeMask & wgpu::ColorWriteMask::Red,
-                         state->writeMask & wgpu::ColorWriteMask::Green,
-                         state->writeMask & wgpu::ColorWriteMask::Blue,
-                         state->writeMask & wgpu::ColorWriteMask::Alpha);
-        }
+void ApplyColorState(const OpenGLFunctions& gl, const ColorTargetState* state) {
+    if (state->blend != nullptr) {
+        gl.Enable(GL_BLEND);
+        gl.BlendEquationSeparate(GLBlendMode(state->blend->color.operation),
+                                 GLBlendMode(state->blend->alpha.operation));
+        gl.BlendFuncSeparate(GLBlendFactor(state->blend->color.srcFactor, false),
+                             GLBlendFactor(state->blend->color.dstFactor, false),
+                             GLBlendFactor(state->blend->alpha.srcFactor, true),
+                             GLBlendFactor(state->blend->alpha.dstFactor, true));
+    } else {
+        gl.Disable(GL_BLEND);
+    }
+    gl.ColorMask(state->writeMask & wgpu::ColorWriteMask::Red,
+                 state->writeMask & wgpu::ColorWriteMask::Green,
+                 state->writeMask & wgpu::ColorWriteMask::Blue,
+                 state->writeMask & wgpu::ColorWriteMask::Alpha);
+}
 
-        bool Equal(const BlendComponent& lhs, const BlendComponent& rhs) {
-            return lhs.operation == rhs.operation && lhs.srcFactor == rhs.srcFactor &&
-                   lhs.dstFactor == rhs.dstFactor;
-        }
+bool Equal(const BlendComponent& lhs, const BlendComponent& rhs) {
+    return lhs.operation == rhs.operation && lhs.srcFactor == rhs.srcFactor &&
+           lhs.dstFactor == rhs.dstFactor;
+}
 
-        GLuint OpenGLStencilOperation(wgpu::StencilOperation stencilOperation) {
-            switch (stencilOperation) {
-                case wgpu::StencilOperation::Keep:
-                    return GL_KEEP;
-                case wgpu::StencilOperation::Zero:
-                    return GL_ZERO;
-                case wgpu::StencilOperation::Replace:
-                    return GL_REPLACE;
-                case wgpu::StencilOperation::Invert:
-                    return GL_INVERT;
-                case wgpu::StencilOperation::IncrementClamp:
-                    return GL_INCR;
-                case wgpu::StencilOperation::DecrementClamp:
-                    return GL_DECR;
-                case wgpu::StencilOperation::IncrementWrap:
-                    return GL_INCR_WRAP;
-                case wgpu::StencilOperation::DecrementWrap:
-                    return GL_DECR_WRAP;
-            }
-            UNREACHABLE();
-        }
+GLuint OpenGLStencilOperation(wgpu::StencilOperation stencilOperation) {
+    switch (stencilOperation) {
+        case wgpu::StencilOperation::Keep:
+            return GL_KEEP;
+        case wgpu::StencilOperation::Zero:
+            return GL_ZERO;
+        case wgpu::StencilOperation::Replace:
+            return GL_REPLACE;
+        case wgpu::StencilOperation::Invert:
+            return GL_INVERT;
+        case wgpu::StencilOperation::IncrementClamp:
+            return GL_INCR;
+        case wgpu::StencilOperation::DecrementClamp:
+            return GL_DECR;
+        case wgpu::StencilOperation::IncrementWrap:
+            return GL_INCR_WRAP;
+        case wgpu::StencilOperation::DecrementWrap:
+            return GL_DECR_WRAP;
+    }
+    UNREACHABLE();
+}
 
-        void ApplyDepthStencilState(const OpenGLFunctions& gl,
-                                    const DepthStencilState* descriptor,
-                                    PersistentPipelineState* persistentPipelineState) {
-            // Depth writes only occur if depth is enabled
-            if (descriptor->depthCompare == wgpu::CompareFunction::Always &&
-                !descriptor->depthWriteEnabled) {
-                gl.Disable(GL_DEPTH_TEST);
-            } else {
-                gl.Enable(GL_DEPTH_TEST);
-            }
-
-            if (descriptor->depthWriteEnabled) {
-                gl.DepthMask(GL_TRUE);
-            } else {
-                gl.DepthMask(GL_FALSE);
-            }
-
-            gl.DepthFunc(ToOpenGLCompareFunction(descriptor->depthCompare));
-
-            if (StencilTestEnabled(descriptor)) {
-                gl.Enable(GL_STENCIL_TEST);
-            } else {
-                gl.Disable(GL_STENCIL_TEST);
-            }
-
-            GLenum backCompareFunction = ToOpenGLCompareFunction(descriptor->stencilBack.compare);
-            GLenum frontCompareFunction = ToOpenGLCompareFunction(descriptor->stencilFront.compare);
-            persistentPipelineState->SetStencilFuncsAndMask(
-                gl, backCompareFunction, frontCompareFunction, descriptor->stencilReadMask);
-
-            gl.StencilOpSeparate(GL_BACK, OpenGLStencilOperation(descriptor->stencilBack.failOp),
-                                 OpenGLStencilOperation(descriptor->stencilBack.depthFailOp),
-                                 OpenGLStencilOperation(descriptor->stencilBack.passOp));
-            gl.StencilOpSeparate(GL_FRONT, OpenGLStencilOperation(descriptor->stencilFront.failOp),
-                                 OpenGLStencilOperation(descriptor->stencilFront.depthFailOp),
-                                 OpenGLStencilOperation(descriptor->stencilFront.passOp));
-
-            gl.StencilMask(descriptor->stencilWriteMask);
-        }
-
-    }  // anonymous namespace
-
-    // static
-    Ref<RenderPipeline> RenderPipeline::CreateUninitialized(
-        Device* device,
-        const RenderPipelineDescriptor* descriptor) {
-        return AcquireRef(new RenderPipeline(device, descriptor));
+void ApplyDepthStencilState(const OpenGLFunctions& gl,
+                            const DepthStencilState* descriptor,
+                            PersistentPipelineState* persistentPipelineState) {
+    // Depth writes only occur if depth is enabled
+    if (descriptor->depthCompare == wgpu::CompareFunction::Always &&
+        !descriptor->depthWriteEnabled) {
+        gl.Disable(GL_DEPTH_TEST);
+    } else {
+        gl.Enable(GL_DEPTH_TEST);
     }
 
-    RenderPipeline::RenderPipeline(Device* device, const RenderPipelineDescriptor* descriptor)
-        : RenderPipelineBase(device, descriptor),
-          mVertexArrayObject(0),
-          mGlPrimitiveTopology(GLPrimitiveTopology(GetPrimitiveTopology())) {
+    if (descriptor->depthWriteEnabled) {
+        gl.DepthMask(GL_TRUE);
+    } else {
+        gl.DepthMask(GL_FALSE);
     }
 
-    MaybeError RenderPipeline::Initialize() {
-        DAWN_TRY(
-            InitializeBase(ToBackend(GetDevice())->gl, ToBackend(GetLayout()), GetAllStages()));
-        CreateVAOForVertexState();
-        return {};
+    gl.DepthFunc(ToOpenGLCompareFunction(descriptor->depthCompare));
+
+    if (StencilTestEnabled(descriptor)) {
+        gl.Enable(GL_STENCIL_TEST);
+    } else {
+        gl.Disable(GL_STENCIL_TEST);
     }
 
-    RenderPipeline::~RenderPipeline() = default;
+    GLenum backCompareFunction = ToOpenGLCompareFunction(descriptor->stencilBack.compare);
+    GLenum frontCompareFunction = ToOpenGLCompareFunction(descriptor->stencilFront.compare);
+    persistentPipelineState->SetStencilFuncsAndMask(gl, backCompareFunction, frontCompareFunction,
+                                                    descriptor->stencilReadMask);
 
-    void RenderPipeline::DestroyImpl() {
-        RenderPipelineBase::DestroyImpl();
-        const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
-        gl.DeleteVertexArrays(1, &mVertexArrayObject);
-        gl.BindVertexArray(0);
-        DeleteProgram(gl);
-    }
+    gl.StencilOpSeparate(GL_BACK, OpenGLStencilOperation(descriptor->stencilBack.failOp),
+                         OpenGLStencilOperation(descriptor->stencilBack.depthFailOp),
+                         OpenGLStencilOperation(descriptor->stencilBack.passOp));
+    gl.StencilOpSeparate(GL_FRONT, OpenGLStencilOperation(descriptor->stencilFront.failOp),
+                         OpenGLStencilOperation(descriptor->stencilFront.depthFailOp),
+                         OpenGLStencilOperation(descriptor->stencilFront.passOp));
 
-    GLenum RenderPipeline::GetGLPrimitiveTopology() const {
-        return mGlPrimitiveTopology;
-    }
+    gl.StencilMask(descriptor->stencilWriteMask);
+}
 
-    ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>
-    RenderPipeline::GetAttributesUsingVertexBuffer(VertexBufferSlot slot) const {
-        ASSERT(!IsError());
-        return mAttributesUsingVertexBuffer[slot];
-    }
+}  // anonymous namespace
 
-    void RenderPipeline::CreateVAOForVertexState() {
-        const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+// static
+Ref<RenderPipeline> RenderPipeline::CreateUninitialized(
+    Device* device,
+    const RenderPipelineDescriptor* descriptor) {
+    return AcquireRef(new RenderPipeline(device, descriptor));
+}
 
-        gl.GenVertexArrays(1, &mVertexArrayObject);
-        gl.BindVertexArray(mVertexArrayObject);
+RenderPipeline::RenderPipeline(Device* device, const RenderPipelineDescriptor* descriptor)
+    : RenderPipelineBase(device, descriptor),
+      mVertexArrayObject(0),
+      mGlPrimitiveTopology(GLPrimitiveTopology(GetPrimitiveTopology())) {}
 
-        for (VertexAttributeLocation location : IterateBitSet(GetAttributeLocationsUsed())) {
-            const auto& attribute = GetAttribute(location);
-            GLuint glAttrib = static_cast<GLuint>(static_cast<uint8_t>(location));
-            gl.EnableVertexAttribArray(glAttrib);
+MaybeError RenderPipeline::Initialize() {
+    DAWN_TRY(InitializeBase(ToBackend(GetDevice())->gl, ToBackend(GetLayout()), GetAllStages()));
+    CreateVAOForVertexState();
+    return {};
+}
 
-            mAttributesUsingVertexBuffer[attribute.vertexBufferSlot][location] = true;
-            const VertexBufferInfo& vertexBuffer = GetVertexBuffer(attribute.vertexBufferSlot);
+RenderPipeline::~RenderPipeline() = default;
 
-            if (vertexBuffer.arrayStride == 0) {
-                // Emulate a stride of zero (constant vertex attribute) by
-                // setting the attribute instance divisor to a huge number.
-                gl.VertexAttribDivisor(glAttrib, 0xffffffff);
-            } else {
-                switch (vertexBuffer.stepMode) {
-                    case wgpu::VertexStepMode::Vertex:
-                        break;
-                    case wgpu::VertexStepMode::Instance:
-                        gl.VertexAttribDivisor(glAttrib, 1);
-                        break;
-                }
-            }
-        }
-    }
+void RenderPipeline::DestroyImpl() {
+    RenderPipelineBase::DestroyImpl();
+    const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+    gl.DeleteVertexArrays(1, &mVertexArrayObject);
+    gl.BindVertexArray(0);
+    DeleteProgram(gl);
+}
 
-    void RenderPipeline::ApplyNow(PersistentPipelineState& persistentPipelineState) {
-        const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
-        PipelineGL::ApplyNow(gl);
+GLenum RenderPipeline::GetGLPrimitiveTopology() const {
+    return mGlPrimitiveTopology;
+}
 
-        ASSERT(mVertexArrayObject);
-        gl.BindVertexArray(mVertexArrayObject);
+ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>
+RenderPipeline::GetAttributesUsingVertexBuffer(VertexBufferSlot slot) const {
+    ASSERT(!IsError());
+    return mAttributesUsingVertexBuffer[slot];
+}
 
-        ApplyFrontFaceAndCulling(gl, GetFrontFace(), GetCullMode());
+void RenderPipeline::CreateVAOForVertexState() {
+    const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
 
-        ApplyDepthStencilState(gl, GetDepthStencilState(), &persistentPipelineState);
+    gl.GenVertexArrays(1, &mVertexArrayObject);
+    gl.BindVertexArray(mVertexArrayObject);
 
-        gl.SampleMaski(0, GetSampleMask());
-        if (IsAlphaToCoverageEnabled()) {
-            gl.Enable(GL_SAMPLE_ALPHA_TO_COVERAGE);
+    for (VertexAttributeLocation location : IterateBitSet(GetAttributeLocationsUsed())) {
+        const auto& attribute = GetAttribute(location);
+        GLuint glAttrib = static_cast<GLuint>(static_cast<uint8_t>(location));
+        gl.EnableVertexAttribArray(glAttrib);
+
+        mAttributesUsingVertexBuffer[attribute.vertexBufferSlot][location] = true;
+        const VertexBufferInfo& vertexBuffer = GetVertexBuffer(attribute.vertexBufferSlot);
+
+        if (vertexBuffer.arrayStride == 0) {
+            // Emulate a stride of zero (constant vertex attribute) by
+            // setting the attribute instance divisor to a huge number.
+            gl.VertexAttribDivisor(glAttrib, 0xffffffff);
         } else {
-            gl.Disable(GL_SAMPLE_ALPHA_TO_COVERAGE);
-        }
-
-        if (IsDepthBiasEnabled()) {
-            gl.Enable(GL_POLYGON_OFFSET_FILL);
-            float depthBias = GetDepthBias();
-            float slopeScale = GetDepthBiasSlopeScale();
-            if (gl.PolygonOffsetClamp != nullptr) {
-                gl.PolygonOffsetClamp(slopeScale, depthBias, GetDepthBiasClamp());
-            } else {
-                gl.PolygonOffset(slopeScale, depthBias);
+            switch (vertexBuffer.stepMode) {
+                case wgpu::VertexStepMode::Vertex:
+                    break;
+                case wgpu::VertexStepMode::Instance:
+                    gl.VertexAttribDivisor(glAttrib, 1);
+                    break;
             }
-        } else {
-            gl.Disable(GL_POLYGON_OFFSET_FILL);
         }
+    }
+}
 
-        if (!GetDevice()->IsToggleEnabled(Toggle::DisableIndexedDrawBuffers)) {
-            for (ColorAttachmentIndex attachmentSlot : IterateBitSet(GetColorAttachmentsMask())) {
-                ApplyColorState(gl, attachmentSlot, GetColorTargetState(attachmentSlot));
-            }
+void RenderPipeline::ApplyNow(PersistentPipelineState& persistentPipelineState) {
+    const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+    PipelineGL::ApplyNow(gl);
+
+    ASSERT(mVertexArrayObject);
+    gl.BindVertexArray(mVertexArrayObject);
+
+    ApplyFrontFaceAndCulling(gl, GetFrontFace(), GetCullMode());
+
+    ApplyDepthStencilState(gl, GetDepthStencilState(), &persistentPipelineState);
+
+    gl.SampleMaski(0, GetSampleMask());
+    if (IsAlphaToCoverageEnabled()) {
+        gl.Enable(GL_SAMPLE_ALPHA_TO_COVERAGE);
+    } else {
+        gl.Disable(GL_SAMPLE_ALPHA_TO_COVERAGE);
+    }
+
+    if (IsDepthBiasEnabled()) {
+        gl.Enable(GL_POLYGON_OFFSET_FILL);
+        float depthBias = GetDepthBias();
+        float slopeScale = GetDepthBiasSlopeScale();
+        if (gl.PolygonOffsetClamp != nullptr) {
+            gl.PolygonOffsetClamp(slopeScale, depthBias, GetDepthBiasClamp());
         } else {
-            const ColorTargetState* prevDescriptor = nullptr;
-            for (ColorAttachmentIndex attachmentSlot : IterateBitSet(GetColorAttachmentsMask())) {
-                const ColorTargetState* descriptor = GetColorTargetState(attachmentSlot);
-                if (!prevDescriptor) {
-                    ApplyColorState(gl, descriptor);
-                    prevDescriptor = descriptor;
-                } else if ((descriptor->blend == nullptr) != (prevDescriptor->blend == nullptr)) {
-                    // TODO(crbug.com/dawn/582): GLES < 3.2 does not support different blend states
-                    // per color target. Add validation to prevent this as it is not.
+            gl.PolygonOffset(slopeScale, depthBias);
+        }
+    } else {
+        gl.Disable(GL_POLYGON_OFFSET_FILL);
+    }
+
+    if (!GetDevice()->IsToggleEnabled(Toggle::DisableIndexedDrawBuffers)) {
+        for (ColorAttachmentIndex attachmentSlot : IterateBitSet(GetColorAttachmentsMask())) {
+            ApplyColorState(gl, attachmentSlot, GetColorTargetState(attachmentSlot));
+        }
+    } else {
+        const ColorTargetState* prevDescriptor = nullptr;
+        for (ColorAttachmentIndex attachmentSlot : IterateBitSet(GetColorAttachmentsMask())) {
+            const ColorTargetState* descriptor = GetColorTargetState(attachmentSlot);
+            if (!prevDescriptor) {
+                ApplyColorState(gl, descriptor);
+                prevDescriptor = descriptor;
+            } else if ((descriptor->blend == nullptr) != (prevDescriptor->blend == nullptr)) {
+                // TODO(crbug.com/dawn/582): GLES < 3.2 does not support different blend states
+                // per color target. Add validation to prevent this as it is not.
+                ASSERT(false);
+            } else if (descriptor->blend != nullptr) {
+                if (!Equal(descriptor->blend->alpha, prevDescriptor->blend->alpha) ||
+                    !Equal(descriptor->blend->color, prevDescriptor->blend->color) ||
+                    descriptor->writeMask != prevDescriptor->writeMask) {
+                    // TODO(crbug.com/dawn/582)
                     ASSERT(false);
-                } else if (descriptor->blend != nullptr) {
-                    if (!Equal(descriptor->blend->alpha, prevDescriptor->blend->alpha) ||
-                        !Equal(descriptor->blend->color, prevDescriptor->blend->color) ||
-                        descriptor->writeMask != prevDescriptor->writeMask) {
-                        // TODO(crbug.com/dawn/582)
-                        ASSERT(false);
-                    }
                 }
             }
         }
     }
+}
 
 }  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/RenderPipelineGL.h b/src/dawn/native/opengl/RenderPipelineGL.h
index 37f3631..f059724 100644
--- a/src/dawn/native/opengl/RenderPipelineGL.h
+++ b/src/dawn/native/opengl/RenderPipelineGL.h
@@ -24,38 +24,38 @@
 
 namespace dawn::native::opengl {
 
-    class Device;
-    class PersistentPipelineState;
+class Device;
+class PersistentPipelineState;
 
-    class RenderPipeline final : public RenderPipelineBase, public PipelineGL {
-      public:
-        static Ref<RenderPipeline> CreateUninitialized(Device* device,
-                                                       const RenderPipelineDescriptor* descriptor);
+class RenderPipeline final : public RenderPipelineBase, public PipelineGL {
+  public:
+    static Ref<RenderPipeline> CreateUninitialized(Device* device,
+                                                   const RenderPipelineDescriptor* descriptor);
 
-        GLenum GetGLPrimitiveTopology() const;
-        ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> GetAttributesUsingVertexBuffer(
-            VertexBufferSlot slot) const;
+    GLenum GetGLPrimitiveTopology() const;
+    ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes> GetAttributesUsingVertexBuffer(
+        VertexBufferSlot slot) const;
 
-        void ApplyNow(PersistentPipelineState& persistentPipelineState);
+    void ApplyNow(PersistentPipelineState& persistentPipelineState);
 
-        MaybeError Initialize() override;
+    MaybeError Initialize() override;
 
-      private:
-        RenderPipeline(Device* device, const RenderPipelineDescriptor* descriptor);
-        ~RenderPipeline() override;
-        void DestroyImpl() override;
+  private:
+    RenderPipeline(Device* device, const RenderPipelineDescriptor* descriptor);
+    ~RenderPipeline() override;
+    void DestroyImpl() override;
 
-        void CreateVAOForVertexState();
+    void CreateVAOForVertexState();
 
-        // TODO(yunchao.he@intel.com): vao need to be deduplicated between pipelines.
-        GLuint mVertexArrayObject;
-        GLenum mGlPrimitiveTopology;
+    // TODO(yunchao.he@intel.com): vao need to be deduplicated between pipelines.
+    GLuint mVertexArrayObject;
+    GLenum mGlPrimitiveTopology;
 
-        ityp::array<VertexBufferSlot,
-                    ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>,
-                    kMaxVertexBuffers>
-            mAttributesUsingVertexBuffer;
-    };
+    ityp::array<VertexBufferSlot,
+                ityp::bitset<VertexAttributeLocation, kMaxVertexAttributes>,
+                kMaxVertexBuffers>
+        mAttributesUsingVertexBuffer;
+};
 
 }  // namespace dawn::native::opengl
 
diff --git a/src/dawn/native/opengl/SamplerGL.cpp b/src/dawn/native/opengl/SamplerGL.cpp
index 7790530..b40e1d6 100644
--- a/src/dawn/native/opengl/SamplerGL.cpp
+++ b/src/dawn/native/opengl/SamplerGL.cpp
@@ -20,111 +20,109 @@
 
 namespace dawn::native::opengl {
 
-    namespace {
-        GLenum MagFilterMode(wgpu::FilterMode filter) {
-            switch (filter) {
+namespace {
+GLenum MagFilterMode(wgpu::FilterMode filter) {
+    switch (filter) {
+        case wgpu::FilterMode::Nearest:
+            return GL_NEAREST;
+        case wgpu::FilterMode::Linear:
+            return GL_LINEAR;
+    }
+    UNREACHABLE();
+}
+
+GLenum MinFilterMode(wgpu::FilterMode minFilter, wgpu::FilterMode mipMapFilter) {
+    switch (minFilter) {
+        case wgpu::FilterMode::Nearest:
+            switch (mipMapFilter) {
                 case wgpu::FilterMode::Nearest:
-                    return GL_NEAREST;
+                    return GL_NEAREST_MIPMAP_NEAREST;
                 case wgpu::FilterMode::Linear:
-                    return GL_LINEAR;
+                    return GL_NEAREST_MIPMAP_LINEAR;
             }
-            UNREACHABLE();
-        }
-
-        GLenum MinFilterMode(wgpu::FilterMode minFilter, wgpu::FilterMode mipMapFilter) {
-            switch (minFilter) {
+        case wgpu::FilterMode::Linear:
+            switch (mipMapFilter) {
                 case wgpu::FilterMode::Nearest:
-                    switch (mipMapFilter) {
-                        case wgpu::FilterMode::Nearest:
-                            return GL_NEAREST_MIPMAP_NEAREST;
-                        case wgpu::FilterMode::Linear:
-                            return GL_NEAREST_MIPMAP_LINEAR;
-                    }
+                    return GL_LINEAR_MIPMAP_NEAREST;
                 case wgpu::FilterMode::Linear:
-                    switch (mipMapFilter) {
-                        case wgpu::FilterMode::Nearest:
-                            return GL_LINEAR_MIPMAP_NEAREST;
-                        case wgpu::FilterMode::Linear:
-                            return GL_LINEAR_MIPMAP_LINEAR;
-                    }
+                    return GL_LINEAR_MIPMAP_LINEAR;
             }
-            UNREACHABLE();
-        }
+    }
+    UNREACHABLE();
+}
 
-        GLenum WrapMode(wgpu::AddressMode mode) {
-            switch (mode) {
-                case wgpu::AddressMode::Repeat:
-                    return GL_REPEAT;
-                case wgpu::AddressMode::MirrorRepeat:
-                    return GL_MIRRORED_REPEAT;
-                case wgpu::AddressMode::ClampToEdge:
-                    return GL_CLAMP_TO_EDGE;
-            }
-            UNREACHABLE();
-        }
+GLenum WrapMode(wgpu::AddressMode mode) {
+    switch (mode) {
+        case wgpu::AddressMode::Repeat:
+            return GL_REPEAT;
+        case wgpu::AddressMode::MirrorRepeat:
+            return GL_MIRRORED_REPEAT;
+        case wgpu::AddressMode::ClampToEdge:
+            return GL_CLAMP_TO_EDGE;
+    }
+    UNREACHABLE();
+}
 
-    }  // namespace
+}  // namespace
 
-    Sampler::Sampler(Device* device, const SamplerDescriptor* descriptor)
-        : SamplerBase(device, descriptor) {
-        const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+Sampler::Sampler(Device* device, const SamplerDescriptor* descriptor)
+    : SamplerBase(device, descriptor) {
+    const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
 
-        gl.GenSamplers(1, &mFilteringHandle);
-        SetupGLSampler(mFilteringHandle, descriptor, false);
+    gl.GenSamplers(1, &mFilteringHandle);
+    SetupGLSampler(mFilteringHandle, descriptor, false);
 
-        gl.GenSamplers(1, &mNonFilteringHandle);
-        SetupGLSampler(mNonFilteringHandle, descriptor, true);
+    gl.GenSamplers(1, &mNonFilteringHandle);
+    SetupGLSampler(mNonFilteringHandle, descriptor, true);
+}
+
+Sampler::~Sampler() = default;
+
+void Sampler::DestroyImpl() {
+    SamplerBase::DestroyImpl();
+    const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+    gl.DeleteSamplers(1, &mFilteringHandle);
+    gl.DeleteSamplers(1, &mNonFilteringHandle);
+}
+
+void Sampler::SetupGLSampler(GLuint sampler,
+                             const SamplerDescriptor* descriptor,
+                             bool forceNearest) {
+    Device* device = ToBackend(GetDevice());
+    const OpenGLFunctions& gl = device->gl;
+
+    if (forceNearest) {
+        gl.SamplerParameteri(sampler, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+        gl.SamplerParameteri(sampler, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST);
+    } else {
+        gl.SamplerParameteri(sampler, GL_TEXTURE_MAG_FILTER, MagFilterMode(descriptor->magFilter));
+        gl.SamplerParameteri(sampler, GL_TEXTURE_MIN_FILTER,
+                             MinFilterMode(descriptor->minFilter, descriptor->mipmapFilter));
+    }
+    gl.SamplerParameteri(sampler, GL_TEXTURE_WRAP_R, WrapMode(descriptor->addressModeW));
+    gl.SamplerParameteri(sampler, GL_TEXTURE_WRAP_S, WrapMode(descriptor->addressModeU));
+    gl.SamplerParameteri(sampler, GL_TEXTURE_WRAP_T, WrapMode(descriptor->addressModeV));
+
+    gl.SamplerParameterf(sampler, GL_TEXTURE_MIN_LOD, descriptor->lodMinClamp);
+    gl.SamplerParameterf(sampler, GL_TEXTURE_MAX_LOD, descriptor->lodMaxClamp);
+
+    if (descriptor->compare != wgpu::CompareFunction::Undefined) {
+        gl.SamplerParameteri(sampler, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_REF_TO_TEXTURE);
+        gl.SamplerParameteri(sampler, GL_TEXTURE_COMPARE_FUNC,
+                             ToOpenGLCompareFunction(descriptor->compare));
     }
 
-    Sampler::~Sampler() = default;
-
-    void Sampler::DestroyImpl() {
-        SamplerBase::DestroyImpl();
-        const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
-        gl.DeleteSamplers(1, &mFilteringHandle);
-        gl.DeleteSamplers(1, &mNonFilteringHandle);
+    if (gl.IsAtLeastGL(4, 6) || gl.IsGLExtensionSupported("GL_EXT_texture_filter_anisotropic")) {
+        gl.SamplerParameterf(sampler, GL_TEXTURE_MAX_ANISOTROPY, GetMaxAnisotropy());
     }
+}
 
-    void Sampler::SetupGLSampler(GLuint sampler,
-                                 const SamplerDescriptor* descriptor,
-                                 bool forceNearest) {
-        Device* device = ToBackend(GetDevice());
-        const OpenGLFunctions& gl = device->gl;
+GLuint Sampler::GetFilteringHandle() const {
+    return mFilteringHandle;
+}
 
-        if (forceNearest) {
-            gl.SamplerParameteri(sampler, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
-            gl.SamplerParameteri(sampler, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST);
-        } else {
-            gl.SamplerParameteri(sampler, GL_TEXTURE_MAG_FILTER,
-                                 MagFilterMode(descriptor->magFilter));
-            gl.SamplerParameteri(sampler, GL_TEXTURE_MIN_FILTER,
-                                 MinFilterMode(descriptor->minFilter, descriptor->mipmapFilter));
-        }
-        gl.SamplerParameteri(sampler, GL_TEXTURE_WRAP_R, WrapMode(descriptor->addressModeW));
-        gl.SamplerParameteri(sampler, GL_TEXTURE_WRAP_S, WrapMode(descriptor->addressModeU));
-        gl.SamplerParameteri(sampler, GL_TEXTURE_WRAP_T, WrapMode(descriptor->addressModeV));
-
-        gl.SamplerParameterf(sampler, GL_TEXTURE_MIN_LOD, descriptor->lodMinClamp);
-        gl.SamplerParameterf(sampler, GL_TEXTURE_MAX_LOD, descriptor->lodMaxClamp);
-
-        if (descriptor->compare != wgpu::CompareFunction::Undefined) {
-            gl.SamplerParameteri(sampler, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_REF_TO_TEXTURE);
-            gl.SamplerParameteri(sampler, GL_TEXTURE_COMPARE_FUNC,
-                                 ToOpenGLCompareFunction(descriptor->compare));
-        }
-
-        if (gl.IsAtLeastGL(4, 6) ||
-            gl.IsGLExtensionSupported("GL_EXT_texture_filter_anisotropic")) {
-            gl.SamplerParameterf(sampler, GL_TEXTURE_MAX_ANISOTROPY, GetMaxAnisotropy());
-        }
-    }
-
-    GLuint Sampler::GetFilteringHandle() const {
-        return mFilteringHandle;
-    }
-
-    GLuint Sampler::GetNonFilteringHandle() const {
-        return mNonFilteringHandle;
-    }
+GLuint Sampler::GetNonFilteringHandle() const {
+    return mNonFilteringHandle;
+}
 
 }  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/SamplerGL.h b/src/dawn/native/opengl/SamplerGL.h
index 1afb612..82ea9bb 100644
--- a/src/dawn/native/opengl/SamplerGL.h
+++ b/src/dawn/native/opengl/SamplerGL.h
@@ -21,27 +21,27 @@
 
 namespace dawn::native::opengl {
 
-    class Device;
+class Device;
 
-    class Sampler final : public SamplerBase {
-      public:
-        Sampler(Device* device, const SamplerDescriptor* descriptor);
+class Sampler final : public SamplerBase {
+  public:
+    Sampler(Device* device, const SamplerDescriptor* descriptor);
 
-        GLuint GetFilteringHandle() const;
-        GLuint GetNonFilteringHandle() const;
+    GLuint GetFilteringHandle() const;
+    GLuint GetNonFilteringHandle() const;
 
-      private:
-        ~Sampler() override;
-        void DestroyImpl() override;
+  private:
+    ~Sampler() override;
+    void DestroyImpl() override;
 
-        void SetupGLSampler(GLuint sampler, const SamplerDescriptor* descriptor, bool forceNearest);
+    void SetupGLSampler(GLuint sampler, const SamplerDescriptor* descriptor, bool forceNearest);
 
-        GLuint mFilteringHandle;
+    GLuint mFilteringHandle;
 
-        // This is a sampler equivalent to mFilteringHandle except that it uses NEAREST filtering
-        // for everything, which is important to preserve texture completeness for u/int textures.
-        GLuint mNonFilteringHandle;
-    };
+    // This is a sampler equivalent to mFilteringHandle except that it uses NEAREST filtering
+    // for everything, which is important to preserve texture completeness for u/int textures.
+    GLuint mNonFilteringHandle;
+};
 
 }  // namespace dawn::native::opengl
 
diff --git a/src/dawn/native/opengl/ShaderModuleGL.cpp b/src/dawn/native/opengl/ShaderModuleGL.cpp
index f1672fa..b1aaee2 100644
--- a/src/dawn/native/opengl/ShaderModuleGL.cpp
+++ b/src/dawn/native/opengl/ShaderModuleGL.cpp
@@ -28,151 +28,149 @@
 
 namespace dawn::native::opengl {
 
-    std::string GetBindingName(BindGroupIndex group, BindingNumber bindingNumber) {
-        std::ostringstream o;
-        o << "dawn_binding_" << static_cast<uint32_t>(group) << "_"
-          << static_cast<uint32_t>(bindingNumber);
-        return o.str();
-    }
+std::string GetBindingName(BindGroupIndex group, BindingNumber bindingNumber) {
+    std::ostringstream o;
+    o << "dawn_binding_" << static_cast<uint32_t>(group) << "_"
+      << static_cast<uint32_t>(bindingNumber);
+    return o.str();
+}
 
-    bool operator<(const BindingLocation& a, const BindingLocation& b) {
-        return std::tie(a.group, a.binding) < std::tie(b.group, b.binding);
-    }
+bool operator<(const BindingLocation& a, const BindingLocation& b) {
+    return std::tie(a.group, a.binding) < std::tie(b.group, b.binding);
+}
 
-    bool operator<(const CombinedSampler& a, const CombinedSampler& b) {
-        return std::tie(a.usePlaceholderSampler, a.samplerLocation, a.textureLocation) <
-               std::tie(b.usePlaceholderSampler, a.samplerLocation, b.textureLocation);
-    }
+bool operator<(const CombinedSampler& a, const CombinedSampler& b) {
+    return std::tie(a.usePlaceholderSampler, a.samplerLocation, a.textureLocation) <
+           std::tie(b.usePlaceholderSampler, a.samplerLocation, b.textureLocation);
+}
 
-    std::string CombinedSampler::GetName() const {
-        std::ostringstream o;
-        o << "dawn_combined";
-        if (usePlaceholderSampler) {
-            o << "_placeholder_sampler";
+std::string CombinedSampler::GetName() const {
+    std::ostringstream o;
+    o << "dawn_combined";
+    if (usePlaceholderSampler) {
+        o << "_placeholder_sampler";
+    } else {
+        o << "_" << static_cast<uint32_t>(samplerLocation.group) << "_"
+          << static_cast<uint32_t>(samplerLocation.binding);
+    }
+    o << "_with_" << static_cast<uint32_t>(textureLocation.group) << "_"
+      << static_cast<uint32_t>(textureLocation.binding);
+    return o.str();
+}
+
+// static
+ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
+                                                      const ShaderModuleDescriptor* descriptor,
+                                                      ShaderModuleParseResult* parseResult) {
+    Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
+    DAWN_TRY(module->Initialize(parseResult));
+    return module;
+}
+
+ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
+    : ShaderModuleBase(device, descriptor) {}
+
+MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
+    ScopedTintICEHandler scopedICEHandler(GetDevice());
+
+    DAWN_TRY(InitializeBase(parseResult));
+
+    return {};
+}
+
+ResultOrError<std::string> ShaderModule::TranslateToGLSL(const char* entryPointName,
+                                                         SingleShaderStage stage,
+                                                         CombinedSamplerInfo* combinedSamplers,
+                                                         const PipelineLayout* layout,
+                                                         bool* needsPlaceholderSampler) const {
+    TRACE_EVENT0(GetDevice()->GetPlatform(), General, "TranslateToGLSL");
+    tint::transform::Manager transformManager;
+    tint::transform::DataMap transformInputs;
+
+    AddExternalTextureTransform(layout, &transformManager, &transformInputs);
+
+    tint::Program program;
+    DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, GetTintProgram(), transformInputs,
+                                           nullptr, nullptr));
+    const OpenGLVersion& version = ToBackend(GetDevice())->gl.GetVersion();
+
+    tint::writer::glsl::Options tintOptions;
+    using Version = tint::writer::glsl::Version;
+    tintOptions.version =
+        Version(version.IsDesktop() ? Version::Standard::kDesktop : Version::Standard::kES,
+                version.GetMajor(), version.GetMinor());
+
+    using tint::transform::BindingPoint;
+    // When textures are accessed without a sampler (e.g., textureLoad()),
+    // GetSamplerTextureUses() will return this sentinel value.
+    BindingPoint placeholderBindingPoint{static_cast<uint32_t>(kMaxBindGroupsTyped), 0};
+
+    tint::inspector::Inspector inspector(&program);
+    // Find all the sampler/texture pairs for this entry point, and create
+    // CombinedSamplers for them. CombinedSampler records the binding points
+    // of the original texture and sampler, and generates a unique name. The
+    // corresponding uniforms will be retrieved by these generated names
+    // in PipelineGL. Any texture-only references will have
+    // "usePlaceholderSampler" set to true, and only the texture binding point
+    // will be used in naming them. In addition, Dawn will bind a
+    // non-filtering sampler for them (see PipelineGL).
+    auto uses = inspector.GetSamplerTextureUses(entryPointName, placeholderBindingPoint);
+    for (const auto& use : uses) {
+        combinedSamplers->emplace_back();
+
+        CombinedSampler* info = &combinedSamplers->back();
+        if (use.sampler_binding_point == placeholderBindingPoint) {
+            info->usePlaceholderSampler = true;
+            *needsPlaceholderSampler = true;
         } else {
-            o << "_" << static_cast<uint32_t>(samplerLocation.group) << "_"
-              << static_cast<uint32_t>(samplerLocation.binding);
+            info->usePlaceholderSampler = false;
         }
-        o << "_with_" << static_cast<uint32_t>(textureLocation.group) << "_"
-          << static_cast<uint32_t>(textureLocation.binding);
-        return o.str();
+        info->samplerLocation.group = BindGroupIndex(use.sampler_binding_point.group);
+        info->samplerLocation.binding = BindingNumber(use.sampler_binding_point.binding);
+        info->textureLocation.group = BindGroupIndex(use.texture_binding_point.group);
+        info->textureLocation.binding = BindingNumber(use.texture_binding_point.binding);
+        tintOptions.binding_map[use] = info->GetName();
+    }
+    if (*needsPlaceholderSampler) {
+        tintOptions.placeholder_binding_point = placeholderBindingPoint;
     }
 
-    // static
-    ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
-                                                          const ShaderModuleDescriptor* descriptor,
-                                                          ShaderModuleParseResult* parseResult) {
-        Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
-        DAWN_TRY(module->Initialize(parseResult));
-        return module;
-    }
-
-    ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
-        : ShaderModuleBase(device, descriptor) {
-    }
-
-    MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
-        ScopedTintICEHandler scopedICEHandler(GetDevice());
-
-        DAWN_TRY(InitializeBase(parseResult));
-
-        return {};
-    }
-
-    ResultOrError<std::string> ShaderModule::TranslateToGLSL(const char* entryPointName,
-                                                             SingleShaderStage stage,
-                                                             CombinedSamplerInfo* combinedSamplers,
-                                                             const PipelineLayout* layout,
-                                                             bool* needsPlaceholderSampler) const {
-        TRACE_EVENT0(GetDevice()->GetPlatform(), General, "TranslateToGLSL");
-        tint::transform::Manager transformManager;
-        tint::transform::DataMap transformInputs;
-
-        AddExternalTextureTransform(layout, &transformManager, &transformInputs);
-
-        tint::Program program;
-        DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, GetTintProgram(), transformInputs,
-                                               nullptr, nullptr));
-        const OpenGLVersion& version = ToBackend(GetDevice())->gl.GetVersion();
-
-        tint::writer::glsl::Options tintOptions;
-        using Version = tint::writer::glsl::Version;
-        tintOptions.version =
-            Version(version.IsDesktop() ? Version::Standard::kDesktop : Version::Standard::kES,
-                    version.GetMajor(), version.GetMinor());
-
-        using tint::transform::BindingPoint;
-        // When textures are accessed without a sampler (e.g., textureLoad()),
-        // GetSamplerTextureUses() will return this sentinel value.
-        BindingPoint placeholderBindingPoint{static_cast<uint32_t>(kMaxBindGroupsTyped), 0};
-
-        tint::inspector::Inspector inspector(&program);
-        // Find all the sampler/texture pairs for this entry point, and create
-        // CombinedSamplers for them. CombinedSampler records the binding points
-        // of the original texture and sampler, and generates a unique name. The
-        // corresponding uniforms will be retrieved by these generated names
-        // in PipelineGL. Any texture-only references will have
-        // "usePlaceholderSampler" set to true, and only the texture binding point
-        // will be used in naming them. In addition, Dawn will bind a
-        // non-filtering sampler for them (see PipelineGL).
-        auto uses = inspector.GetSamplerTextureUses(entryPointName, placeholderBindingPoint);
-        for (const auto& use : uses) {
-            combinedSamplers->emplace_back();
-
-            CombinedSampler* info = &combinedSamplers->back();
-            if (use.sampler_binding_point == placeholderBindingPoint) {
-                info->usePlaceholderSampler = true;
-                *needsPlaceholderSampler = true;
-            } else {
-                info->usePlaceholderSampler = false;
+    // Since (non-Vulkan) GLSL does not support descriptor sets, generate a
+    // mapping from the original group/binding pair to a binding-only
+    // value. This mapping will be used by Tint to remap all global
+    // variables to the 1D space.
+    for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+        const BindGroupLayoutBase::BindingMap& bindingMap =
+            layout->GetBindGroupLayout(group)->GetBindingMap();
+        for (const auto& it : bindingMap) {
+            BindingNumber bindingNumber = it.first;
+            BindingIndex bindingIndex = it.second;
+            const BindingInfo& bindingInfo =
+                layout->GetBindGroupLayout(group)->GetBindingInfo(bindingIndex);
+            if (!(bindingInfo.visibility & StageBit(stage))) {
+                continue;
             }
-            info->samplerLocation.group = BindGroupIndex(use.sampler_binding_point.group);
-            info->samplerLocation.binding = BindingNumber(use.sampler_binding_point.binding);
-            info->textureLocation.group = BindGroupIndex(use.texture_binding_point.group);
-            info->textureLocation.binding = BindingNumber(use.texture_binding_point.binding);
-            tintOptions.binding_map[use] = info->GetName();
+
+            uint32_t shaderIndex = layout->GetBindingIndexInfo()[group][bindingIndex];
+            BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
+                                         static_cast<uint32_t>(bindingNumber)};
+            BindingPoint dstBindingPoint{0, shaderIndex};
+            tintOptions.binding_points.emplace(srcBindingPoint, dstBindingPoint);
         }
-        if (*needsPlaceholderSampler) {
-            tintOptions.placeholder_binding_point = placeholderBindingPoint;
-        }
-
-        // Since (non-Vulkan) GLSL does not support descriptor sets, generate a
-        // mapping from the original group/binding pair to a binding-only
-        // value. This mapping will be used by Tint to remap all global
-        // variables to the 1D space.
-        for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
-            const BindGroupLayoutBase::BindingMap& bindingMap =
-                layout->GetBindGroupLayout(group)->GetBindingMap();
-            for (const auto& it : bindingMap) {
-                BindingNumber bindingNumber = it.first;
-                BindingIndex bindingIndex = it.second;
-                const BindingInfo& bindingInfo =
-                    layout->GetBindGroupLayout(group)->GetBindingInfo(bindingIndex);
-                if (!(bindingInfo.visibility & StageBit(stage))) {
-                    continue;
-                }
-
-                uint32_t shaderIndex = layout->GetBindingIndexInfo()[group][bindingIndex];
-                BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
-                                             static_cast<uint32_t>(bindingNumber)};
-                BindingPoint dstBindingPoint{0, shaderIndex};
-                tintOptions.binding_points.emplace(srcBindingPoint, dstBindingPoint);
-            }
-            tintOptions.allow_collisions = true;
-        }
-        auto result = tint::writer::glsl::Generate(&program, tintOptions, entryPointName);
-        DAWN_INVALID_IF(!result.success, "An error occured while generating GLSL: %s.",
-                        result.error);
-        std::string glsl = std::move(result.glsl);
-
-        if (GetDevice()->IsToggleEnabled(Toggle::DumpShaders)) {
-            std::ostringstream dumpedMsg;
-            dumpedMsg << "/* Dumped generated GLSL */" << std::endl << glsl;
-
-            GetDevice()->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
-        }
-
-        return glsl;
+        tintOptions.allow_collisions = true;
     }
+    auto result = tint::writer::glsl::Generate(&program, tintOptions, entryPointName);
+    DAWN_INVALID_IF(!result.success, "An error occured while generating GLSL: %s.", result.error);
+    std::string glsl = std::move(result.glsl);
+
+    if (GetDevice()->IsToggleEnabled(Toggle::DumpShaders)) {
+        std::ostringstream dumpedMsg;
+        dumpedMsg << "/* Dumped generated GLSL */" << std::endl << glsl;
+
+        GetDevice()->EmitLog(WGPULoggingType_Info, dumpedMsg.str().c_str());
+    }
+
+    return glsl;
+}
 
 }  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/ShaderModuleGL.h b/src/dawn/native/opengl/ShaderModuleGL.h
index bb75150..90740c7 100644
--- a/src/dawn/native/opengl/ShaderModuleGL.h
+++ b/src/dawn/native/opengl/ShaderModuleGL.h
@@ -26,50 +26,49 @@
 
 namespace dawn::native::opengl {
 
-    class Device;
-    class PipelineLayout;
+class Device;
+class PipelineLayout;
 
-    std::string GetBindingName(BindGroupIndex group, BindingNumber bindingNumber);
+std::string GetBindingName(BindGroupIndex group, BindingNumber bindingNumber);
 
-    struct BindingLocation {
-        BindGroupIndex group;
-        BindingNumber binding;
-    };
-    bool operator<(const BindingLocation& a, const BindingLocation& b);
+struct BindingLocation {
+    BindGroupIndex group;
+    BindingNumber binding;
+};
+bool operator<(const BindingLocation& a, const BindingLocation& b);
 
-    struct CombinedSampler {
-        BindingLocation samplerLocation;
-        BindingLocation textureLocation;
-        // OpenGL requires a sampler with texelFetch. If this is true, the developer did not provide
-        // one and Dawn should bind a placeholder non-filtering sampler. |samplerLocation| is
-        // unused.
-        bool usePlaceholderSampler;
-        std::string GetName() const;
-    };
-    bool operator<(const CombinedSampler& a, const CombinedSampler& b);
+struct CombinedSampler {
+    BindingLocation samplerLocation;
+    BindingLocation textureLocation;
+    // OpenGL requires a sampler with texelFetch. If this is true, the developer did not provide
+    // one and Dawn should bind a placeholder non-filtering sampler. |samplerLocation| is
+    // unused.
+    bool usePlaceholderSampler;
+    std::string GetName() const;
+};
+bool operator<(const CombinedSampler& a, const CombinedSampler& b);
 
-    using CombinedSamplerInfo = std::vector<CombinedSampler>;
+using CombinedSamplerInfo = std::vector<CombinedSampler>;
 
-    using BindingInfoArrayTable =
-        std::unordered_map<std::string, std::unique_ptr<BindingInfoArray>>;
+using BindingInfoArrayTable = std::unordered_map<std::string, std::unique_ptr<BindingInfoArray>>;
 
-    class ShaderModule final : public ShaderModuleBase {
-      public:
-        static ResultOrError<Ref<ShaderModule>> Create(Device* device,
-                                                       const ShaderModuleDescriptor* descriptor,
-                                                       ShaderModuleParseResult* parseResult);
+class ShaderModule final : public ShaderModuleBase {
+  public:
+    static ResultOrError<Ref<ShaderModule>> Create(Device* device,
+                                                   const ShaderModuleDescriptor* descriptor,
+                                                   ShaderModuleParseResult* parseResult);
 
-        ResultOrError<std::string> TranslateToGLSL(const char* entryPointName,
-                                                   SingleShaderStage stage,
-                                                   CombinedSamplerInfo* combinedSamplers,
-                                                   const PipelineLayout* layout,
-                                                   bool* needsPlaceholderSampler) const;
+    ResultOrError<std::string> TranslateToGLSL(const char* entryPointName,
+                                               SingleShaderStage stage,
+                                               CombinedSamplerInfo* combinedSamplers,
+                                               const PipelineLayout* layout,
+                                               bool* needsPlaceholderSampler) const;
 
-      private:
-        ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
-        ~ShaderModule() override = default;
-        MaybeError Initialize(ShaderModuleParseResult* parseResult);
-    };
+  private:
+    ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
+    ~ShaderModule() override = default;
+    MaybeError Initialize(ShaderModuleParseResult* parseResult);
+};
 
 }  // namespace dawn::native::opengl
 
diff --git a/src/dawn/native/opengl/SwapChainGL.cpp b/src/dawn/native/opengl/SwapChainGL.cpp
index 9f82f6c..8501ee7 100644
--- a/src/dawn/native/opengl/SwapChainGL.cpp
+++ b/src/dawn/native/opengl/SwapChainGL.cpp
@@ -22,30 +22,29 @@
 
 namespace dawn::native::opengl {
 
-    SwapChain::SwapChain(Device* device, const SwapChainDescriptor* descriptor)
-        : OldSwapChainBase(device, descriptor) {
-        const auto& im = GetImplementation();
-        im.Init(im.userData, nullptr);
-    }
+SwapChain::SwapChain(Device* device, const SwapChainDescriptor* descriptor)
+    : OldSwapChainBase(device, descriptor) {
+    const auto& im = GetImplementation();
+    im.Init(im.userData, nullptr);
+}
 
-    SwapChain::~SwapChain() {
-    }
+SwapChain::~SwapChain() {}
 
-    TextureBase* SwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
-        const auto& im = GetImplementation();
-        DawnSwapChainNextTexture next = {};
-        DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
-        if (error) {
-            GetDevice()->HandleError(InternalErrorType::Internal, error);
-            return nullptr;
-        }
-        GLuint nativeTexture = next.texture.u32;
-        return new Texture(ToBackend(GetDevice()), descriptor, nativeTexture,
-                           TextureBase::TextureState::OwnedExternal);
+TextureBase* SwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
+    const auto& im = GetImplementation();
+    DawnSwapChainNextTexture next = {};
+    DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
+    if (error) {
+        GetDevice()->HandleError(InternalErrorType::Internal, error);
+        return nullptr;
     }
+    GLuint nativeTexture = next.texture.u32;
+    return new Texture(ToBackend(GetDevice()), descriptor, nativeTexture,
+                       TextureBase::TextureState::OwnedExternal);
+}
 
-    MaybeError SwapChain::OnBeforePresent(TextureViewBase*) {
-        return {};
-    }
+MaybeError SwapChain::OnBeforePresent(TextureViewBase*) {
+    return {};
+}
 
 }  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/SwapChainGL.h b/src/dawn/native/opengl/SwapChainGL.h
index d84c43c..0c14564 100644
--- a/src/dawn/native/opengl/SwapChainGL.h
+++ b/src/dawn/native/opengl/SwapChainGL.h
@@ -21,17 +21,17 @@
 
 namespace dawn::native::opengl {
 
-    class Device;
+class Device;
 
-    class SwapChain final : public OldSwapChainBase {
-      public:
-        SwapChain(Device* device, const SwapChainDescriptor* descriptor);
+class SwapChain final : public OldSwapChainBase {
+  public:
+    SwapChain(Device* device, const SwapChainDescriptor* descriptor);
 
-      protected:
-        ~SwapChain() override;
-        TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
-        MaybeError OnBeforePresent(TextureViewBase* view) override;
-    };
+  protected:
+    ~SwapChain() override;
+    TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
+    MaybeError OnBeforePresent(TextureViewBase* view) override;
+};
 
 }  // namespace dawn::native::opengl
 
diff --git a/src/dawn/native/opengl/TextureGL.cpp b/src/dawn/native/opengl/TextureGL.cpp
index 051c625..5b2b999 100644
--- a/src/dawn/native/opengl/TextureGL.cpp
+++ b/src/dawn/native/opengl/TextureGL.cpp
@@ -27,281 +27,300 @@
 
 namespace dawn::native::opengl {
 
-    namespace {
+namespace {
 
-        GLenum TargetForTexture(const TextureDescriptor* descriptor) {
-            switch (descriptor->dimension) {
-                case wgpu::TextureDimension::e2D:
-                    if (descriptor->size.depthOrArrayLayers > 1) {
-                        ASSERT(descriptor->sampleCount == 1);
-                        return GL_TEXTURE_2D_ARRAY;
-                    } else {
-                        if (descriptor->sampleCount > 1) {
-                            return GL_TEXTURE_2D_MULTISAMPLE;
-                        } else {
-                            return GL_TEXTURE_2D;
-                        }
-                    }
-                case wgpu::TextureDimension::e3D:
-                    ASSERT(descriptor->sampleCount == 1);
-                    return GL_TEXTURE_3D;
-
-                case wgpu::TextureDimension::e1D:
-                    break;
-            }
-            UNREACHABLE();
-        }
-
-        GLenum TargetForTextureViewDimension(wgpu::TextureViewDimension dimension,
-                                             uint32_t arrayLayerCount,
-                                             uint32_t sampleCount) {
-            switch (dimension) {
-                case wgpu::TextureViewDimension::e2D:
-                    return (sampleCount > 1) ? GL_TEXTURE_2D_MULTISAMPLE : GL_TEXTURE_2D;
-                case wgpu::TextureViewDimension::e2DArray:
-                    if (sampleCount > 1) {
-                        ASSERT(arrayLayerCount == 1);
-                        return GL_TEXTURE_2D_MULTISAMPLE;
-                    }
-                    ASSERT(sampleCount == 1);
-                    return GL_TEXTURE_2D_ARRAY;
-                case wgpu::TextureViewDimension::Cube:
-                    ASSERT(sampleCount == 1);
-                    ASSERT(arrayLayerCount == 6);
-                    return GL_TEXTURE_CUBE_MAP;
-                case wgpu::TextureViewDimension::CubeArray:
-                    ASSERT(sampleCount == 1);
-                    ASSERT(arrayLayerCount % 6 == 0);
-                    return GL_TEXTURE_CUBE_MAP_ARRAY;
-                case wgpu::TextureViewDimension::e3D:
-                    return GL_TEXTURE_3D;
-
-                case wgpu::TextureViewDimension::e1D:
-                case wgpu::TextureViewDimension::Undefined:
-                    break;
-            }
-            UNREACHABLE();
-        }
-
-        GLuint GenTexture(const OpenGLFunctions& gl) {
-            GLuint handle = 0;
-            gl.GenTextures(1, &handle);
-            return handle;
-        }
-
-        bool RequiresCreatingNewTextureView(const TextureBase* texture,
-                                            const TextureViewDescriptor* textureViewDescriptor) {
-            constexpr wgpu::TextureUsage kShaderUsageNeedsView =
-                wgpu::TextureUsage::StorageBinding | wgpu::TextureUsage::TextureBinding;
-            constexpr wgpu::TextureUsage kUsageNeedsView =
-                kShaderUsageNeedsView | wgpu::TextureUsage::RenderAttachment;
-            if ((texture->GetInternalUsage() & kUsageNeedsView) == 0) {
-                return false;
-            }
-
-            if (texture->GetFormat().format != textureViewDescriptor->format &&
-                !texture->GetFormat().HasDepthOrStencil()) {
-                // Color format reinterpretation required. Note: Depth/stencil formats don't support
-                // reinterpretation.
-                return true;
-            }
-
-            // Reinterpretation not required. Now, we only need a new view if the view dimension or
-            // set of subresources for the shader is different from the base texture.
-            if ((texture->GetInternalUsage() & kShaderUsageNeedsView) == 0) {
-                return false;
-            }
-
-            if (texture->GetArrayLayers() != textureViewDescriptor->arrayLayerCount ||
-                (texture->GetArrayLayers() == 1 &&
-                 texture->GetDimension() == wgpu::TextureDimension::e2D &&
-                 textureViewDescriptor->dimension == wgpu::TextureViewDimension::e2DArray)) {
-                // If the view has a different number of array layers, we need a new view.
-                // And, if the original texture is a 2D texture with one array layer, we need a new
-                // view to view it as a 2D array texture.
-                return true;
-            }
-
-            if (texture->GetNumMipLevels() != textureViewDescriptor->mipLevelCount) {
-                return true;
-            }
-
-            if (ToBackend(texture)->GetGLFormat().format == GL_DEPTH_STENCIL &&
-                (texture->GetUsage() & wgpu::TextureUsage::TextureBinding) != 0 &&
-                textureViewDescriptor->aspect == wgpu::TextureAspect::StencilOnly) {
-                // We need a separate view for one of the depth or stencil planes
-                // because each glTextureView needs it's own handle to set
-                // GL_DEPTH_STENCIL_TEXTURE_MODE. Choose the stencil aspect for the
-                // extra handle since it is likely sampled less often.
-                return true;
-            }
-
-            switch (textureViewDescriptor->dimension) {
-                case wgpu::TextureViewDimension::Cube:
-                case wgpu::TextureViewDimension::CubeArray:
-                    return true;
-                default:
-                    break;
-            }
-
-            return false;
-        }
-
-        void AllocateTexture(const OpenGLFunctions& gl,
-                             GLenum target,
-                             GLsizei samples,
-                             GLuint levels,
-                             GLenum internalFormat,
-                             const Extent3D& size) {
-            // glTextureView() requires the value of GL_TEXTURE_IMMUTABLE_FORMAT for origtexture to
-            // be GL_TRUE, so the storage of the texture must be allocated with glTexStorage*D.
-            // https://www.khronos.org/registry/OpenGL-Refpages/gl4/html/glTextureView.xhtml
-            switch (target) {
-                case GL_TEXTURE_2D_ARRAY:
-                case GL_TEXTURE_3D:
-                    gl.TexStorage3D(target, levels, internalFormat, size.width, size.height,
-                                    size.depthOrArrayLayers);
-                    break;
-                case GL_TEXTURE_2D:
-                case GL_TEXTURE_CUBE_MAP:
-                    gl.TexStorage2D(target, levels, internalFormat, size.width, size.height);
-                    break;
-                case GL_TEXTURE_2D_MULTISAMPLE:
-                    gl.TexStorage2DMultisample(target, samples, internalFormat, size.width,
-                                               size.height, true);
-                    break;
-                default:
-                    UNREACHABLE();
-            }
-        }
-
-    }  // namespace
-
-    // Texture
-
-    Texture::Texture(Device* device, const TextureDescriptor* descriptor)
-        : Texture(device, descriptor, GenTexture(device->gl), TextureState::OwnedInternal) {
-        const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
-
-        uint32_t levels = GetNumMipLevels();
-
-        const GLFormat& glFormat = GetGLFormat();
-
-        gl.BindTexture(mTarget, mHandle);
-
-        AllocateTexture(gl, mTarget, GetSampleCount(), levels, glFormat.internalFormat, GetSize());
-
-        // The texture is not complete if it uses mipmapping and not all levels up to
-        // MAX_LEVEL have been defined.
-        gl.TexParameteri(mTarget, GL_TEXTURE_MAX_LEVEL, levels - 1);
-
-        if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
-            GetDevice()->ConsumedError(
-                ClearTexture(GetAllSubresources(), TextureBase::ClearValue::NonZero));
-        }
-    }
-
-    void Texture::Touch() {
-        mGenID++;
-    }
-
-    uint32_t Texture::GetGenID() const {
-        return mGenID;
-    }
-
-    Texture::Texture(Device* device,
-                     const TextureDescriptor* descriptor,
-                     GLuint handle,
-                     TextureState state)
-        : TextureBase(device, descriptor, state), mHandle(handle) {
-        mTarget = TargetForTexture(descriptor);
-    }
-
-    Texture::~Texture() {
-    }
-
-    void Texture::DestroyImpl() {
-        TextureBase::DestroyImpl();
-        if (GetTextureState() == TextureState::OwnedInternal) {
-            ToBackend(GetDevice())->gl.DeleteTextures(1, &mHandle);
-            mHandle = 0;
-        }
-    }
-
-    GLuint Texture::GetHandle() const {
-        return mHandle;
-    }
-
-    GLenum Texture::GetGLTarget() const {
-        return mTarget;
-    }
-
-    const GLFormat& Texture::GetGLFormat() const {
-        return ToBackend(GetDevice())->GetGLFormat(GetFormat());
-    }
-
-    MaybeError Texture::ClearTexture(const SubresourceRange& range,
-                                     TextureBase::ClearValue clearValue) {
-        // TODO(crbug.com/dawn/850): initialize the textures with compressed formats.
-        if (GetFormat().isCompressed) {
-            return {};
-        }
-
-        Device* device = ToBackend(GetDevice());
-        const OpenGLFunctions& gl = device->gl;
-
-        uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
-        float fClearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0.f : 1.f;
-
-        if (GetFormat().isRenderable) {
-            if ((range.aspects & (Aspect::Depth | Aspect::Stencil)) != 0) {
-                GLfloat depth = fClearColor;
-                GLint stencil = clearColor;
-                if (range.aspects & Aspect::Depth) {
-                    gl.DepthMask(GL_TRUE);
+GLenum TargetForTexture(const TextureDescriptor* descriptor) {
+    switch (descriptor->dimension) {
+        case wgpu::TextureDimension::e2D:
+            if (descriptor->size.depthOrArrayLayers > 1) {
+                ASSERT(descriptor->sampleCount == 1);
+                return GL_TEXTURE_2D_ARRAY;
+            } else {
+                if (descriptor->sampleCount > 1) {
+                    return GL_TEXTURE_2D_MULTISAMPLE;
+                } else {
+                    return GL_TEXTURE_2D;
                 }
-                if (range.aspects & Aspect::Stencil) {
-                    gl.StencilMask(GetStencilMaskFromStencilFormat(GetFormat().format));
-                }
+            }
+        case wgpu::TextureDimension::e3D:
+            ASSERT(descriptor->sampleCount == 1);
+            return GL_TEXTURE_3D;
 
-                auto DoClear = [&](Aspect aspects) {
-                    if (aspects == (Aspect::Depth | Aspect::Stencil)) {
-                        gl.ClearBufferfi(GL_DEPTH_STENCIL, 0, depth, stencil);
-                    } else if (aspects == Aspect::Depth) {
-                        gl.ClearBufferfv(GL_DEPTH, 0, &depth);
-                    } else if (aspects == Aspect::Stencil) {
-                        gl.ClearBufferiv(GL_STENCIL, 0, &stencil);
-                    } else {
-                        UNREACHABLE();
-                    }
-                };
+        case wgpu::TextureDimension::e1D:
+            break;
+    }
+    UNREACHABLE();
+}
 
-                GLuint framebuffer = 0;
-                gl.GenFramebuffers(1, &framebuffer);
-                gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer);
-                gl.Disable(GL_SCISSOR_TEST);
+GLenum TargetForTextureViewDimension(wgpu::TextureViewDimension dimension,
+                                     uint32_t arrayLayerCount,
+                                     uint32_t sampleCount) {
+    switch (dimension) {
+        case wgpu::TextureViewDimension::e2D:
+            return (sampleCount > 1) ? GL_TEXTURE_2D_MULTISAMPLE : GL_TEXTURE_2D;
+        case wgpu::TextureViewDimension::e2DArray:
+            if (sampleCount > 1) {
+                ASSERT(arrayLayerCount == 1);
+                return GL_TEXTURE_2D_MULTISAMPLE;
+            }
+            ASSERT(sampleCount == 1);
+            return GL_TEXTURE_2D_ARRAY;
+        case wgpu::TextureViewDimension::Cube:
+            ASSERT(sampleCount == 1);
+            ASSERT(arrayLayerCount == 6);
+            return GL_TEXTURE_CUBE_MAP;
+        case wgpu::TextureViewDimension::CubeArray:
+            ASSERT(sampleCount == 1);
+            ASSERT(arrayLayerCount % 6 == 0);
+            return GL_TEXTURE_CUBE_MAP_ARRAY;
+        case wgpu::TextureViewDimension::e3D:
+            return GL_TEXTURE_3D;
 
-                GLenum attachment;
-                if (range.aspects == (Aspect::Depth | Aspect::Stencil)) {
-                    attachment = GL_DEPTH_STENCIL_ATTACHMENT;
-                } else if (range.aspects == Aspect::Depth) {
-                    attachment = GL_DEPTH_ATTACHMENT;
-                } else if (range.aspects == Aspect::Stencil) {
-                    attachment = GL_STENCIL_ATTACHMENT;
+        case wgpu::TextureViewDimension::e1D:
+        case wgpu::TextureViewDimension::Undefined:
+            break;
+    }
+    UNREACHABLE();
+}
+
+GLuint GenTexture(const OpenGLFunctions& gl) {
+    GLuint handle = 0;
+    gl.GenTextures(1, &handle);
+    return handle;
+}
+
+bool RequiresCreatingNewTextureView(const TextureBase* texture,
+                                    const TextureViewDescriptor* textureViewDescriptor) {
+    constexpr wgpu::TextureUsage kShaderUsageNeedsView =
+        wgpu::TextureUsage::StorageBinding | wgpu::TextureUsage::TextureBinding;
+    constexpr wgpu::TextureUsage kUsageNeedsView =
+        kShaderUsageNeedsView | wgpu::TextureUsage::RenderAttachment;
+    if ((texture->GetInternalUsage() & kUsageNeedsView) == 0) {
+        return false;
+    }
+
+    if (texture->GetFormat().format != textureViewDescriptor->format &&
+        !texture->GetFormat().HasDepthOrStencil()) {
+        // Color format reinterpretation required. Note: Depth/stencil formats don't support
+        // reinterpretation.
+        return true;
+    }
+
+    // Reinterpretation not required. Now, we only need a new view if the view dimension or
+    // set of subresources for the shader is different from the base texture.
+    if ((texture->GetInternalUsage() & kShaderUsageNeedsView) == 0) {
+        return false;
+    }
+
+    if (texture->GetArrayLayers() != textureViewDescriptor->arrayLayerCount ||
+        (texture->GetArrayLayers() == 1 && texture->GetDimension() == wgpu::TextureDimension::e2D &&
+         textureViewDescriptor->dimension == wgpu::TextureViewDimension::e2DArray)) {
+        // If the view has a different number of array layers, we need a new view.
+        // And, if the original texture is a 2D texture with one array layer, we need a new
+        // view to view it as a 2D array texture.
+        return true;
+    }
+
+    if (texture->GetNumMipLevels() != textureViewDescriptor->mipLevelCount) {
+        return true;
+    }
+
+    if (ToBackend(texture)->GetGLFormat().format == GL_DEPTH_STENCIL &&
+        (texture->GetUsage() & wgpu::TextureUsage::TextureBinding) != 0 &&
+        textureViewDescriptor->aspect == wgpu::TextureAspect::StencilOnly) {
+        // We need a separate view for one of the depth or stencil planes
+        // because each glTextureView needs it's own handle to set
+        // GL_DEPTH_STENCIL_TEXTURE_MODE. Choose the stencil aspect for the
+        // extra handle since it is likely sampled less often.
+        return true;
+    }
+
+    switch (textureViewDescriptor->dimension) {
+        case wgpu::TextureViewDimension::Cube:
+        case wgpu::TextureViewDimension::CubeArray:
+            return true;
+        default:
+            break;
+    }
+
+    return false;
+}
+
+void AllocateTexture(const OpenGLFunctions& gl,
+                     GLenum target,
+                     GLsizei samples,
+                     GLuint levels,
+                     GLenum internalFormat,
+                     const Extent3D& size) {
+    // glTextureView() requires the value of GL_TEXTURE_IMMUTABLE_FORMAT for origtexture to
+    // be GL_TRUE, so the storage of the texture must be allocated with glTexStorage*D.
+    // https://www.khronos.org/registry/OpenGL-Refpages/gl4/html/glTextureView.xhtml
+    switch (target) {
+        case GL_TEXTURE_2D_ARRAY:
+        case GL_TEXTURE_3D:
+            gl.TexStorage3D(target, levels, internalFormat, size.width, size.height,
+                            size.depthOrArrayLayers);
+            break;
+        case GL_TEXTURE_2D:
+        case GL_TEXTURE_CUBE_MAP:
+            gl.TexStorage2D(target, levels, internalFormat, size.width, size.height);
+            break;
+        case GL_TEXTURE_2D_MULTISAMPLE:
+            gl.TexStorage2DMultisample(target, samples, internalFormat, size.width, size.height,
+                                       true);
+            break;
+        default:
+            UNREACHABLE();
+    }
+}
+
+}  // namespace
+
+// Texture
+
+Texture::Texture(Device* device, const TextureDescriptor* descriptor)
+    : Texture(device, descriptor, GenTexture(device->gl), TextureState::OwnedInternal) {
+    const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+
+    uint32_t levels = GetNumMipLevels();
+
+    const GLFormat& glFormat = GetGLFormat();
+
+    gl.BindTexture(mTarget, mHandle);
+
+    AllocateTexture(gl, mTarget, GetSampleCount(), levels, glFormat.internalFormat, GetSize());
+
+    // The texture is not complete if it uses mipmapping and not all levels up to
+    // MAX_LEVEL have been defined.
+    gl.TexParameteri(mTarget, GL_TEXTURE_MAX_LEVEL, levels - 1);
+
+    if (GetDevice()->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
+        GetDevice()->ConsumedError(
+            ClearTexture(GetAllSubresources(), TextureBase::ClearValue::NonZero));
+    }
+}
+
+void Texture::Touch() {
+    mGenID++;
+}
+
+uint32_t Texture::GetGenID() const {
+    return mGenID;
+}
+
+Texture::Texture(Device* device,
+                 const TextureDescriptor* descriptor,
+                 GLuint handle,
+                 TextureState state)
+    : TextureBase(device, descriptor, state), mHandle(handle) {
+    mTarget = TargetForTexture(descriptor);
+}
+
+Texture::~Texture() {}
+
+void Texture::DestroyImpl() {
+    TextureBase::DestroyImpl();
+    if (GetTextureState() == TextureState::OwnedInternal) {
+        ToBackend(GetDevice())->gl.DeleteTextures(1, &mHandle);
+        mHandle = 0;
+    }
+}
+
+GLuint Texture::GetHandle() const {
+    return mHandle;
+}
+
+GLenum Texture::GetGLTarget() const {
+    return mTarget;
+}
+
+const GLFormat& Texture::GetGLFormat() const {
+    return ToBackend(GetDevice())->GetGLFormat(GetFormat());
+}
+
+MaybeError Texture::ClearTexture(const SubresourceRange& range,
+                                 TextureBase::ClearValue clearValue) {
+    // TODO(crbug.com/dawn/850): initialize the textures with compressed formats.
+    if (GetFormat().isCompressed) {
+        return {};
+    }
+
+    Device* device = ToBackend(GetDevice());
+    const OpenGLFunctions& gl = device->gl;
+
+    uint8_t clearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0 : 1;
+    float fClearColor = (clearValue == TextureBase::ClearValue::Zero) ? 0.f : 1.f;
+
+    if (GetFormat().isRenderable) {
+        if ((range.aspects & (Aspect::Depth | Aspect::Stencil)) != 0) {
+            GLfloat depth = fClearColor;
+            GLint stencil = clearColor;
+            if (range.aspects & Aspect::Depth) {
+                gl.DepthMask(GL_TRUE);
+            }
+            if (range.aspects & Aspect::Stencil) {
+                gl.StencilMask(GetStencilMaskFromStencilFormat(GetFormat().format));
+            }
+
+            auto DoClear = [&](Aspect aspects) {
+                if (aspects == (Aspect::Depth | Aspect::Stencil)) {
+                    gl.ClearBufferfi(GL_DEPTH_STENCIL, 0, depth, stencil);
+                } else if (aspects == Aspect::Depth) {
+                    gl.ClearBufferfv(GL_DEPTH, 0, &depth);
+                } else if (aspects == Aspect::Stencil) {
+                    gl.ClearBufferiv(GL_STENCIL, 0, &stencil);
                 } else {
                     UNREACHABLE();
                 }
+            };
 
-                for (uint32_t level = range.baseMipLevel;
-                     level < range.baseMipLevel + range.levelCount; ++level) {
-                    switch (GetDimension()) {
-                        case wgpu::TextureDimension::e2D:
-                            if (GetArrayLayers() == 1) {
+            GLuint framebuffer = 0;
+            gl.GenFramebuffers(1, &framebuffer);
+            gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer);
+            gl.Disable(GL_SCISSOR_TEST);
+
+            GLenum attachment;
+            if (range.aspects == (Aspect::Depth | Aspect::Stencil)) {
+                attachment = GL_DEPTH_STENCIL_ATTACHMENT;
+            } else if (range.aspects == Aspect::Depth) {
+                attachment = GL_DEPTH_ATTACHMENT;
+            } else if (range.aspects == Aspect::Stencil) {
+                attachment = GL_STENCIL_ATTACHMENT;
+            } else {
+                UNREACHABLE();
+            }
+
+            for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+                 ++level) {
+                switch (GetDimension()) {
+                    case wgpu::TextureDimension::e2D:
+                        if (GetArrayLayers() == 1) {
+                            Aspect aspectsToClear = Aspect::None;
+                            for (Aspect aspect : IterateEnumMask(range.aspects)) {
+                                if (clearValue == TextureBase::ClearValue::Zero &&
+                                    IsSubresourceContentInitialized(
+                                        SubresourceRange::SingleMipAndLayer(level, 0, aspect))) {
+                                    // Skip lazy clears if already initialized.
+                                    continue;
+                                }
+                                aspectsToClear |= aspect;
+                            }
+
+                            if (aspectsToClear == Aspect::None) {
+                                continue;
+                            }
+
+                            gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, attachment, GetGLTarget(),
+                                                    GetHandle(), static_cast<GLint>(level));
+                            DoClear(aspectsToClear);
+                        } else {
+                            for (uint32_t layer = range.baseArrayLayer;
+                                 layer < range.baseArrayLayer + range.layerCount; ++layer) {
                                 Aspect aspectsToClear = Aspect::None;
                                 for (Aspect aspect : IterateEnumMask(range.aspects)) {
                                     if (clearValue == TextureBase::ClearValue::Zero &&
                                         IsSubresourceContentInitialized(
-                                            SubresourceRange::SingleMipAndLayer(level, 0,
+                                            SubresourceRange::SingleMipAndLayer(level, layer,
                                                                                 aspect))) {
                                         // Skip lazy clears if already initialized.
                                         continue;
@@ -313,222 +332,53 @@
                                     continue;
                                 }
 
-                                gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, attachment,
-                                                        GetGLTarget(), GetHandle(),
-                                                        static_cast<GLint>(level));
+                                gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, attachment,
+                                                           GetHandle(), static_cast<GLint>(level),
+                                                           static_cast<GLint>(layer));
                                 DoClear(aspectsToClear);
-                            } else {
-                                for (uint32_t layer = range.baseArrayLayer;
-                                     layer < range.baseArrayLayer + range.layerCount; ++layer) {
-                                    Aspect aspectsToClear = Aspect::None;
-                                    for (Aspect aspect : IterateEnumMask(range.aspects)) {
-                                        if (clearValue == TextureBase::ClearValue::Zero &&
-                                            IsSubresourceContentInitialized(
-                                                SubresourceRange::SingleMipAndLayer(level, layer,
-                                                                                    aspect))) {
-                                            // Skip lazy clears if already initialized.
-                                            continue;
-                                        }
-                                        aspectsToClear |= aspect;
-                                    }
-
-                                    if (aspectsToClear == Aspect::None) {
-                                        continue;
-                                    }
-
-                                    gl.FramebufferTextureLayer(
-                                        GL_DRAW_FRAMEBUFFER, attachment, GetHandle(),
-                                        static_cast<GLint>(level), static_cast<GLint>(layer));
-                                    DoClear(aspectsToClear);
-                                }
                             }
-                            break;
-
-                        case wgpu::TextureDimension::e1D:
-                        case wgpu::TextureDimension::e3D:
-                            UNREACHABLE();
-                    }
-                }
-
-                gl.Enable(GL_SCISSOR_TEST);
-                gl.DeleteFramebuffers(1, &framebuffer);
-            } else {
-                ASSERT(range.aspects == Aspect::Color);
-
-                // For gl.ClearBufferiv/uiv calls
-                constexpr std::array<GLuint, 4> kClearColorDataUint0 = {0u, 0u, 0u, 0u};
-                constexpr std::array<GLuint, 4> kClearColorDataUint1 = {1u, 1u, 1u, 1u};
-                std::array<GLuint, 4> clearColorData;
-                clearColorData.fill((clearValue == TextureBase::ClearValue::Zero) ? 0u : 1u);
-
-                // For gl.ClearBufferfv calls
-                constexpr std::array<GLfloat, 4> kClearColorDataFloat0 = {0.f, 0.f, 0.f, 0.f};
-                constexpr std::array<GLfloat, 4> kClearColorDataFloat1 = {1.f, 1.f, 1.f, 1.f};
-                std::array<GLfloat, 4> fClearColorData;
-                fClearColorData.fill((clearValue == TextureBase::ClearValue::Zero) ? 0.f : 1.f);
-
-                static constexpr uint32_t MAX_TEXEL_SIZE = 16;
-                const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(Aspect::Color).block;
-                ASSERT(blockInfo.byteSize <= MAX_TEXEL_SIZE);
-
-                // For gl.ClearTexSubImage calls
-                constexpr std::array<GLbyte, MAX_TEXEL_SIZE> kClearColorDataBytes0 = {
-                    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-                constexpr std::array<GLbyte, MAX_TEXEL_SIZE> kClearColorDataBytes255 = {
-                    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
-
-                wgpu::TextureComponentType baseType =
-                    GetFormat().GetAspectInfo(Aspect::Color).baseType;
-
-                const GLFormat& glFormat = GetGLFormat();
-                for (uint32_t level = range.baseMipLevel;
-                     level < range.baseMipLevel + range.levelCount; ++level) {
-                    Extent3D mipSize = GetMipLevelPhysicalSize(level);
-                    for (uint32_t layer = range.baseArrayLayer;
-                         layer < range.baseArrayLayer + range.layerCount; ++layer) {
-                        if (clearValue == TextureBase::ClearValue::Zero &&
-                            IsSubresourceContentInitialized(
-                                SubresourceRange::SingleMipAndLayer(level, layer, Aspect::Color))) {
-                            // Skip lazy clears if already initialized.
-                            continue;
                         }
-                        if (gl.IsAtLeastGL(4, 4)) {
-                            gl.ClearTexSubImage(mHandle, static_cast<GLint>(level), 0, 0,
-                                                static_cast<GLint>(layer), mipSize.width,
-                                                mipSize.height, mipSize.depthOrArrayLayers,
-                                                glFormat.format, glFormat.type,
-                                                clearValue == TextureBase::ClearValue::Zero
-                                                    ? kClearColorDataBytes0.data()
-                                                    : kClearColorDataBytes255.data());
-                            continue;
-                        }
+                        break;
 
-                        GLuint framebuffer = 0;
-                        gl.GenFramebuffers(1, &framebuffer);
-                        gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer);
-
-                        GLenum attachment = GL_COLOR_ATTACHMENT0;
-                        gl.DrawBuffers(1, &attachment);
-
-                        gl.Disable(GL_SCISSOR_TEST);
-                        gl.ColorMask(true, true, true, true);
-
-                        auto DoClear = [&]() {
-                            switch (baseType) {
-                                case wgpu::TextureComponentType::Float: {
-                                    gl.ClearBufferfv(GL_COLOR, 0,
-                                                     clearValue == TextureBase::ClearValue::Zero
-                                                         ? kClearColorDataFloat0.data()
-                                                         : kClearColorDataFloat1.data());
-                                    break;
-                                }
-                                case wgpu::TextureComponentType::Uint: {
-                                    gl.ClearBufferuiv(GL_COLOR, 0,
-                                                      clearValue == TextureBase::ClearValue::Zero
-                                                          ? kClearColorDataUint0.data()
-                                                          : kClearColorDataUint1.data());
-                                    break;
-                                }
-                                case wgpu::TextureComponentType::Sint: {
-                                    gl.ClearBufferiv(GL_COLOR, 0,
-                                                     reinterpret_cast<const GLint*>(
-                                                         clearValue == TextureBase::ClearValue::Zero
-                                                             ? kClearColorDataUint0.data()
-                                                             : kClearColorDataUint1.data()));
-                                    break;
-                                }
-
-                                case wgpu::TextureComponentType::DepthComparison:
-                                    UNREACHABLE();
-                            }
-                        };
-
-                        if (GetArrayLayers() == 1) {
-                            switch (GetDimension()) {
-                                case wgpu::TextureDimension::e1D:
-                                    UNREACHABLE();
-                                case wgpu::TextureDimension::e2D:
-                                    gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, attachment,
-                                                            GetGLTarget(), GetHandle(), level);
-                                    DoClear();
-                                    break;
-                                case wgpu::TextureDimension::e3D:
-                                    uint32_t depth =
-                                        GetMipLevelVirtualSize(level).depthOrArrayLayers;
-                                    for (GLint z = 0; z < static_cast<GLint>(depth); ++z) {
-                                        gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, attachment,
-                                                                   GetHandle(), level, z);
-                                        DoClear();
-                                    }
-                                    break;
-                            }
-
-                        } else {
-                            ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
-                            gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, attachment, GetHandle(),
-                                                       level, layer);
-                            DoClear();
-                        }
-
-                        gl.Enable(GL_SCISSOR_TEST);
-                        gl.DeleteFramebuffers(1, &framebuffer);
-                        gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
-                    }
+                    case wgpu::TextureDimension::e1D:
+                    case wgpu::TextureDimension::e3D:
+                        UNREACHABLE();
                 }
             }
+
+            gl.Enable(GL_SCISSOR_TEST);
+            gl.DeleteFramebuffers(1, &framebuffer);
         } else {
             ASSERT(range.aspects == Aspect::Color);
 
-            // create temp buffer with clear color to copy to the texture image
+            // For gl.ClearBufferiv/uiv calls
+            constexpr std::array<GLuint, 4> kClearColorDataUint0 = {0u, 0u, 0u, 0u};
+            constexpr std::array<GLuint, 4> kClearColorDataUint1 = {1u, 1u, 1u, 1u};
+            std::array<GLuint, 4> clearColorData;
+            clearColorData.fill((clearValue == TextureBase::ClearValue::Zero) ? 0u : 1u);
+
+            // For gl.ClearBufferfv calls
+            constexpr std::array<GLfloat, 4> kClearColorDataFloat0 = {0.f, 0.f, 0.f, 0.f};
+            constexpr std::array<GLfloat, 4> kClearColorDataFloat1 = {1.f, 1.f, 1.f, 1.f};
+            std::array<GLfloat, 4> fClearColorData;
+            fClearColorData.fill((clearValue == TextureBase::ClearValue::Zero) ? 0.f : 1.f);
+
+            static constexpr uint32_t MAX_TEXEL_SIZE = 16;
             const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(Aspect::Color).block;
-            ASSERT(kTextureBytesPerRowAlignment % blockInfo.byteSize == 0);
+            ASSERT(blockInfo.byteSize <= MAX_TEXEL_SIZE);
 
-            Extent3D largestMipSize = GetMipLevelPhysicalSize(range.baseMipLevel);
-            uint32_t bytesPerRow =
-                Align((largestMipSize.width / blockInfo.width) * blockInfo.byteSize, 4);
+            // For gl.ClearTexSubImage calls
+            constexpr std::array<GLbyte, MAX_TEXEL_SIZE> kClearColorDataBytes0 = {
+                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+            constexpr std::array<GLbyte, MAX_TEXEL_SIZE> kClearColorDataBytes255 = {
+                -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
 
-            // Make sure that we are not rounding
-            ASSERT(bytesPerRow % blockInfo.byteSize == 0);
-            ASSERT(largestMipSize.height % blockInfo.height == 0);
+            wgpu::TextureComponentType baseType = GetFormat().GetAspectInfo(Aspect::Color).baseType;
 
-            uint64_t bufferSize64 = static_cast<uint64_t>(bytesPerRow) *
-                                    (largestMipSize.height / blockInfo.height) *
-                                    largestMipSize.depthOrArrayLayers;
-            if (bufferSize64 > std::numeric_limits<size_t>::max()) {
-                return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
-            }
-            size_t bufferSize = static_cast<size_t>(bufferSize64);
-
-            dawn::native::BufferDescriptor descriptor = {};
-            descriptor.mappedAtCreation = true;
-            descriptor.usage = wgpu::BufferUsage::CopySrc;
-            descriptor.size = bufferSize;
-
-            // We don't count the lazy clear of srcBuffer because it is an internal buffer.
-            // TODO(natlee@microsoft.com): use Dynamic Uploader here for temp buffer
-            Ref<Buffer> srcBuffer;
-            DAWN_TRY_ASSIGN(srcBuffer, Buffer::CreateInternalBuffer(device, &descriptor, false));
-
-            // Fill the buffer with clear color
-            memset(srcBuffer->GetMappedRange(0, bufferSize), clearColor, bufferSize);
-            srcBuffer->Unmap();
-
-            gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, srcBuffer->GetHandle());
+            const GLFormat& glFormat = GetGLFormat();
             for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
                  ++level) {
-                TextureCopy textureCopy;
-                textureCopy.texture = this;
-                textureCopy.mipLevel = level;
-                textureCopy.origin = {};
-                textureCopy.aspect = Aspect::Color;
-
-                TextureDataLayout dataLayout;
-                dataLayout.offset = 0;
-                dataLayout.bytesPerRow = bytesPerRow;
-                dataLayout.rowsPerImage = largestMipSize.height;
-
                 Extent3D mipSize = GetMipLevelPhysicalSize(level);
-
                 for (uint32_t layer = range.baseArrayLayer;
                      layer < range.baseArrayLayer + range.layerCount; ++layer) {
                     if (clearValue == TextureBase::ClearValue::Zero &&
@@ -537,156 +387,299 @@
                         // Skip lazy clears if already initialized.
                         continue;
                     }
+                    if (gl.IsAtLeastGL(4, 4)) {
+                        gl.ClearTexSubImage(mHandle, static_cast<GLint>(level), 0, 0,
+                                            static_cast<GLint>(layer), mipSize.width,
+                                            mipSize.height, mipSize.depthOrArrayLayers,
+                                            glFormat.format, glFormat.type,
+                                            clearValue == TextureBase::ClearValue::Zero
+                                                ? kClearColorDataBytes0.data()
+                                                : kClearColorDataBytes255.data());
+                        continue;
+                    }
 
-                    textureCopy.origin.z = layer;
-                    DoTexSubImage(ToBackend(GetDevice())->gl, textureCopy, 0, dataLayout, mipSize);
+                    GLuint framebuffer = 0;
+                    gl.GenFramebuffers(1, &framebuffer);
+                    gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer);
+
+                    GLenum attachment = GL_COLOR_ATTACHMENT0;
+                    gl.DrawBuffers(1, &attachment);
+
+                    gl.Disable(GL_SCISSOR_TEST);
+                    gl.ColorMask(true, true, true, true);
+
+                    auto DoClear = [&]() {
+                        switch (baseType) {
+                            case wgpu::TextureComponentType::Float: {
+                                gl.ClearBufferfv(GL_COLOR, 0,
+                                                 clearValue == TextureBase::ClearValue::Zero
+                                                     ? kClearColorDataFloat0.data()
+                                                     : kClearColorDataFloat1.data());
+                                break;
+                            }
+                            case wgpu::TextureComponentType::Uint: {
+                                gl.ClearBufferuiv(GL_COLOR, 0,
+                                                  clearValue == TextureBase::ClearValue::Zero
+                                                      ? kClearColorDataUint0.data()
+                                                      : kClearColorDataUint1.data());
+                                break;
+                            }
+                            case wgpu::TextureComponentType::Sint: {
+                                gl.ClearBufferiv(GL_COLOR, 0,
+                                                 reinterpret_cast<const GLint*>(
+                                                     clearValue == TextureBase::ClearValue::Zero
+                                                         ? kClearColorDataUint0.data()
+                                                         : kClearColorDataUint1.data()));
+                                break;
+                            }
+
+                            case wgpu::TextureComponentType::DepthComparison:
+                                UNREACHABLE();
+                        }
+                    };
+
+                    if (GetArrayLayers() == 1) {
+                        switch (GetDimension()) {
+                            case wgpu::TextureDimension::e1D:
+                                UNREACHABLE();
+                            case wgpu::TextureDimension::e2D:
+                                gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, attachment,
+                                                        GetGLTarget(), GetHandle(), level);
+                                DoClear();
+                                break;
+                            case wgpu::TextureDimension::e3D:
+                                uint32_t depth = GetMipLevelVirtualSize(level).depthOrArrayLayers;
+                                for (GLint z = 0; z < static_cast<GLint>(depth); ++z) {
+                                    gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, attachment,
+                                                               GetHandle(), level, z);
+                                    DoClear();
+                                }
+                                break;
+                        }
+
+                    } else {
+                        ASSERT(GetDimension() == wgpu::TextureDimension::e2D);
+                        gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, attachment, GetHandle(),
+                                                   level, layer);
+                        DoClear();
+                    }
+
+                    gl.Enable(GL_SCISSOR_TEST);
+                    gl.DeleteFramebuffers(1, &framebuffer);
+                    gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
                 }
             }
-            gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
         }
-        if (clearValue == TextureBase::ClearValue::Zero) {
-            SetIsSubresourceContentInitialized(true, range);
-            device->IncrementLazyClearCountForTesting();
+    } else {
+        ASSERT(range.aspects == Aspect::Color);
+
+        // create temp buffer with clear color to copy to the texture image
+        const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(Aspect::Color).block;
+        ASSERT(kTextureBytesPerRowAlignment % blockInfo.byteSize == 0);
+
+        Extent3D largestMipSize = GetMipLevelPhysicalSize(range.baseMipLevel);
+        uint32_t bytesPerRow =
+            Align((largestMipSize.width / blockInfo.width) * blockInfo.byteSize, 4);
+
+        // Make sure that we are not rounding
+        ASSERT(bytesPerRow % blockInfo.byteSize == 0);
+        ASSERT(largestMipSize.height % blockInfo.height == 0);
+
+        uint64_t bufferSize64 = static_cast<uint64_t>(bytesPerRow) *
+                                (largestMipSize.height / blockInfo.height) *
+                                largestMipSize.depthOrArrayLayers;
+        if (bufferSize64 > std::numeric_limits<size_t>::max()) {
+            return DAWN_OUT_OF_MEMORY_ERROR("Unable to allocate buffer.");
         }
-        Touch();
-        return {};
-    }
+        size_t bufferSize = static_cast<size_t>(bufferSize64);
 
-    void Texture::EnsureSubresourceContentInitialized(const SubresourceRange& range) {
-        if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
-            return;
-        }
-        if (!IsSubresourceContentInitialized(range)) {
-            GetDevice()->ConsumedError(ClearTexture(range, TextureBase::ClearValue::Zero));
-        }
-    }
+        dawn::native::BufferDescriptor descriptor = {};
+        descriptor.mappedAtCreation = true;
+        descriptor.usage = wgpu::BufferUsage::CopySrc;
+        descriptor.size = bufferSize;
 
-    // TextureView
+        // We don't count the lazy clear of srcBuffer because it is an internal buffer.
+        // TODO(natlee@microsoft.com): use Dynamic Uploader here for temp buffer
+        Ref<Buffer> srcBuffer;
+        DAWN_TRY_ASSIGN(srcBuffer, Buffer::CreateInternalBuffer(device, &descriptor, false));
 
-    TextureView::TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor)
-        : TextureViewBase(texture, descriptor), mOwnsHandle(false) {
-        mTarget = TargetForTextureViewDimension(descriptor->dimension, descriptor->arrayLayerCount,
-                                                texture->GetSampleCount());
+        // Fill the buffer with clear color
+        memset(srcBuffer->GetMappedRange(0, bufferSize), clearColor, bufferSize);
+        srcBuffer->Unmap();
 
-        // Texture could be destroyed by the time we make a view.
-        if (GetTexture()->GetTextureState() == Texture::TextureState::Destroyed) {
-            return;
-        }
+        gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, srcBuffer->GetHandle());
+        for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+             ++level) {
+            TextureCopy textureCopy;
+            textureCopy.texture = this;
+            textureCopy.mipLevel = level;
+            textureCopy.origin = {};
+            textureCopy.aspect = Aspect::Color;
 
-        if (!RequiresCreatingNewTextureView(texture, descriptor)) {
-            mHandle = ToBackend(texture)->GetHandle();
-        } else {
-            const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
-            if (gl.IsAtLeastGL(4, 3)) {
-                mHandle = GenTexture(gl);
-                const Texture* textureGL = ToBackend(texture);
-                gl.TextureView(mHandle, mTarget, textureGL->GetHandle(), GetInternalFormat(),
-                               descriptor->baseMipLevel, descriptor->mipLevelCount,
-                               descriptor->baseArrayLayer, descriptor->arrayLayerCount);
-                mOwnsHandle = true;
-            } else {
-                // Simulate glTextureView() with texture-to-texture copies.
-                mUseCopy = true;
-                mHandle = 0;
+            TextureDataLayout dataLayout;
+            dataLayout.offset = 0;
+            dataLayout.bytesPerRow = bytesPerRow;
+            dataLayout.rowsPerImage = largestMipSize.height;
+
+            Extent3D mipSize = GetMipLevelPhysicalSize(level);
+
+            for (uint32_t layer = range.baseArrayLayer;
+                 layer < range.baseArrayLayer + range.layerCount; ++layer) {
+                if (clearValue == TextureBase::ClearValue::Zero &&
+                    IsSubresourceContentInitialized(
+                        SubresourceRange::SingleMipAndLayer(level, layer, Aspect::Color))) {
+                    // Skip lazy clears if already initialized.
+                    continue;
+                }
+
+                textureCopy.origin.z = layer;
+                DoTexSubImage(ToBackend(GetDevice())->gl, textureCopy, 0, dataLayout, mipSize);
             }
         }
+        gl.BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
+    }
+    if (clearValue == TextureBase::ClearValue::Zero) {
+        SetIsSubresourceContentInitialized(true, range);
+        device->IncrementLazyClearCountForTesting();
+    }
+    Touch();
+    return {};
+}
+
+void Texture::EnsureSubresourceContentInitialized(const SubresourceRange& range) {
+    if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
+        return;
+    }
+    if (!IsSubresourceContentInitialized(range)) {
+        GetDevice()->ConsumedError(ClearTexture(range, TextureBase::ClearValue::Zero));
+    }
+}
+
+// TextureView
+
+TextureView::TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor)
+    : TextureViewBase(texture, descriptor), mOwnsHandle(false) {
+    mTarget = TargetForTextureViewDimension(descriptor->dimension, descriptor->arrayLayerCount,
+                                            texture->GetSampleCount());
+
+    // Texture could be destroyed by the time we make a view.
+    if (GetTexture()->GetTextureState() == Texture::TextureState::Destroyed) {
+        return;
     }
 
-    TextureView::~TextureView() {
-    }
-
-    void TextureView::DestroyImpl() {
-        TextureViewBase::DestroyImpl();
-        if (mOwnsHandle) {
-            ToBackend(GetDevice())->gl.DeleteTextures(1, &mHandle);
-        }
-    }
-
-    GLuint TextureView::GetHandle() const {
-        ASSERT(mHandle != 0);
-        return mHandle;
-    }
-
-    GLenum TextureView::GetGLTarget() const {
-        return mTarget;
-    }
-
-    void TextureView::BindToFramebuffer(GLenum target, GLenum attachment) {
+    if (!RequiresCreatingNewTextureView(texture, descriptor)) {
+        mHandle = ToBackend(texture)->GetHandle();
+    } else {
         const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
-
-        // Use the base texture where possible to minimize the amount of copying required on GLES.
-        bool useOwnView = GetFormat().format != GetTexture()->GetFormat().format &&
-                          !GetTexture()->GetFormat().HasDepthOrStencil();
-
-        GLuint handle, textarget, mipLevel, arrayLayer;
-        if (useOwnView) {
-            // Use our own texture handle and target which points to a subset of the texture's
-            // subresources.
-            handle = GetHandle();
-            textarget = GetGLTarget();
-            mipLevel = 0;
-            arrayLayer = 0;
-        } else {
-            // Use the texture's handle and target, with the view's base mip level and base array
-
-            handle = ToBackend(GetTexture())->GetHandle();
-            textarget = ToBackend(GetTexture())->GetGLTarget();
-            mipLevel = GetBaseMipLevel();
-            arrayLayer = GetBaseArrayLayer();
-        }
-
-        ASSERT(handle != 0);
-        if (textarget == GL_TEXTURE_2D_ARRAY || textarget == GL_TEXTURE_3D) {
-            gl.FramebufferTextureLayer(target, attachment, handle, mipLevel, arrayLayer);
-        } else {
-            gl.FramebufferTexture2D(target, attachment, textarget, handle, mipLevel);
-        }
-    }
-
-    void TextureView::CopyIfNeeded() {
-        if (!mUseCopy) {
-            return;
-        }
-
-        const Texture* texture = ToBackend(GetTexture());
-        if (mGenID == texture->GetGenID()) {
-            return;
-        }
-
-        Device* device = ToBackend(GetDevice());
-        const OpenGLFunctions& gl = device->gl;
-        uint32_t srcLevel = GetBaseMipLevel();
-        uint32_t numLevels = GetLevelCount();
-
-        uint32_t width = texture->GetWidth() >> srcLevel;
-        uint32_t height = texture->GetHeight() >> srcLevel;
-        Extent3D size{width, height, GetLayerCount()};
-
-        if (mHandle == 0) {
+        if (gl.IsAtLeastGL(4, 3)) {
             mHandle = GenTexture(gl);
-            gl.BindTexture(mTarget, mHandle);
-            AllocateTexture(gl, mTarget, texture->GetSampleCount(), numLevels, GetInternalFormat(),
-                            size);
+            const Texture* textureGL = ToBackend(texture);
+            gl.TextureView(mHandle, mTarget, textureGL->GetHandle(), GetInternalFormat(),
+                           descriptor->baseMipLevel, descriptor->mipLevelCount,
+                           descriptor->baseArrayLayer, descriptor->arrayLayerCount);
             mOwnsHandle = true;
+        } else {
+            // Simulate glTextureView() with texture-to-texture copies.
+            mUseCopy = true;
+            mHandle = 0;
         }
+    }
+}
 
-        Origin3D src{0, 0, GetBaseArrayLayer()};
-        Origin3D dst{0, 0, 0};
-        for (GLuint level = 0; level < numLevels; ++level) {
-            CopyImageSubData(gl, GetAspects(), texture->GetHandle(), texture->GetGLTarget(),
-                             srcLevel + level, src, mHandle, mTarget, level, dst, size);
-        }
+TextureView::~TextureView() {}
 
-        mGenID = texture->GetGenID();
+void TextureView::DestroyImpl() {
+    TextureViewBase::DestroyImpl();
+    if (mOwnsHandle) {
+        ToBackend(GetDevice())->gl.DeleteTextures(1, &mHandle);
+    }
+}
+
+GLuint TextureView::GetHandle() const {
+    ASSERT(mHandle != 0);
+    return mHandle;
+}
+
+GLenum TextureView::GetGLTarget() const {
+    return mTarget;
+}
+
+void TextureView::BindToFramebuffer(GLenum target, GLenum attachment) {
+    const OpenGLFunctions& gl = ToBackend(GetDevice())->gl;
+
+    // Use the base texture where possible to minimize the amount of copying required on GLES.
+    bool useOwnView = GetFormat().format != GetTexture()->GetFormat().format &&
+                      !GetTexture()->GetFormat().HasDepthOrStencil();
+
+    GLuint handle, textarget, mipLevel, arrayLayer;
+    if (useOwnView) {
+        // Use our own texture handle and target which points to a subset of the texture's
+        // subresources.
+        handle = GetHandle();
+        textarget = GetGLTarget();
+        mipLevel = 0;
+        arrayLayer = 0;
+    } else {
+        // Use the texture's handle and target, with the view's base mip level and base array
+
+        handle = ToBackend(GetTexture())->GetHandle();
+        textarget = ToBackend(GetTexture())->GetGLTarget();
+        mipLevel = GetBaseMipLevel();
+        arrayLayer = GetBaseArrayLayer();
     }
 
-    GLenum TextureView::GetInternalFormat() const {
-        // Depth/stencil don't support reinterpretation, and the aspect is specified at
-        // bind time. In that case, we use the base texture format.
-        const Format& format =
-            GetFormat().HasDepthOrStencil() ? GetTexture()->GetFormat() : GetFormat();
-        const GLFormat& glFormat = ToBackend(GetDevice())->GetGLFormat(format);
-        return glFormat.internalFormat;
+    ASSERT(handle != 0);
+    if (textarget == GL_TEXTURE_2D_ARRAY || textarget == GL_TEXTURE_3D) {
+        gl.FramebufferTextureLayer(target, attachment, handle, mipLevel, arrayLayer);
+    } else {
+        gl.FramebufferTexture2D(target, attachment, textarget, handle, mipLevel);
     }
+}
+
+void TextureView::CopyIfNeeded() {
+    if (!mUseCopy) {
+        return;
+    }
+
+    const Texture* texture = ToBackend(GetTexture());
+    if (mGenID == texture->GetGenID()) {
+        return;
+    }
+
+    Device* device = ToBackend(GetDevice());
+    const OpenGLFunctions& gl = device->gl;
+    uint32_t srcLevel = GetBaseMipLevel();
+    uint32_t numLevels = GetLevelCount();
+
+    uint32_t width = texture->GetWidth() >> srcLevel;
+    uint32_t height = texture->GetHeight() >> srcLevel;
+    Extent3D size{width, height, GetLayerCount()};
+
+    if (mHandle == 0) {
+        mHandle = GenTexture(gl);
+        gl.BindTexture(mTarget, mHandle);
+        AllocateTexture(gl, mTarget, texture->GetSampleCount(), numLevels, GetInternalFormat(),
+                        size);
+        mOwnsHandle = true;
+    }
+
+    Origin3D src{0, 0, GetBaseArrayLayer()};
+    Origin3D dst{0, 0, 0};
+    for (GLuint level = 0; level < numLevels; ++level) {
+        CopyImageSubData(gl, GetAspects(), texture->GetHandle(), texture->GetGLTarget(),
+                         srcLevel + level, src, mHandle, mTarget, level, dst, size);
+    }
+
+    mGenID = texture->GetGenID();
+}
+
+GLenum TextureView::GetInternalFormat() const {
+    // Depth/stencil don't support reinterpretation, and the aspect is specified at
+    // bind time. In that case, we use the base texture format.
+    const Format& format =
+        GetFormat().HasDepthOrStencil() ? GetTexture()->GetFormat() : GetFormat();
+    const GLFormat& glFormat = ToBackend(GetDevice())->GetGLFormat(format);
+    return glFormat.internalFormat;
+}
 
 }  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/TextureGL.h b/src/dawn/native/opengl/TextureGL.h
index 9d03201..c9bf63f 100644
--- a/src/dawn/native/opengl/TextureGL.h
+++ b/src/dawn/native/opengl/TextureGL.h
@@ -21,57 +21,54 @@
 
 namespace dawn::native::opengl {
 
-    class Device;
-    struct GLFormat;
+class Device;
+struct GLFormat;
 
-    class Texture final : public TextureBase {
-      public:
-        Texture(Device* device, const TextureDescriptor* descriptor);
-        Texture(Device* device,
-                const TextureDescriptor* descriptor,
-                GLuint handle,
-                TextureState state);
+class Texture final : public TextureBase {
+  public:
+    Texture(Device* device, const TextureDescriptor* descriptor);
+    Texture(Device* device, const TextureDescriptor* descriptor, GLuint handle, TextureState state);
 
-        GLuint GetHandle() const;
-        GLenum GetGLTarget() const;
-        const GLFormat& GetGLFormat() const;
-        uint32_t GetGenID() const;
-        void Touch();
+    GLuint GetHandle() const;
+    GLenum GetGLTarget() const;
+    const GLFormat& GetGLFormat() const;
+    uint32_t GetGenID() const;
+    void Touch();
 
-        void EnsureSubresourceContentInitialized(const SubresourceRange& range);
+    void EnsureSubresourceContentInitialized(const SubresourceRange& range);
 
-      private:
-        ~Texture() override;
+  private:
+    ~Texture() override;
 
-        void DestroyImpl() override;
-        MaybeError ClearTexture(const SubresourceRange& range, TextureBase::ClearValue clearValue);
+    void DestroyImpl() override;
+    MaybeError ClearTexture(const SubresourceRange& range, TextureBase::ClearValue clearValue);
 
-        GLuint mHandle;
-        GLenum mTarget;
-        uint32_t mGenID = 0;
-    };
+    GLuint mHandle;
+    GLenum mTarget;
+    uint32_t mGenID = 0;
+};
 
-    class TextureView final : public TextureViewBase {
-      public:
-        TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor);
+class TextureView final : public TextureViewBase {
+  public:
+    TextureView(TextureBase* texture, const TextureViewDescriptor* descriptor);
 
-        GLuint GetHandle() const;
-        GLenum GetGLTarget() const;
-        void BindToFramebuffer(GLenum target, GLenum attachment);
-        void CopyIfNeeded();
+    GLuint GetHandle() const;
+    GLenum GetGLTarget() const;
+    void BindToFramebuffer(GLenum target, GLenum attachment);
+    void CopyIfNeeded();
 
-      private:
-        ~TextureView() override;
-        void DestroyImpl() override;
-        GLenum GetInternalFormat() const;
+  private:
+    ~TextureView() override;
+    void DestroyImpl() override;
+    GLenum GetInternalFormat() const;
 
-        // TODO(crbug.com/dawn/1355): Delete this handle on texture destroy.
-        GLuint mHandle;
-        GLenum mTarget;
-        bool mOwnsHandle;
-        bool mUseCopy = false;
-        uint32_t mGenID = 0;
-    };
+    // TODO(crbug.com/dawn/1355): Delete this handle on texture destroy.
+    GLuint mHandle;
+    GLenum mTarget;
+    bool mOwnsHandle;
+    bool mUseCopy = false;
+    uint32_t mGenID = 0;
+};
 
 }  // namespace dawn::native::opengl
 
diff --git a/src/dawn/native/opengl/UtilsGL.cpp b/src/dawn/native/opengl/UtilsGL.cpp
index 746f93b..e35b9a1 100644
--- a/src/dawn/native/opengl/UtilsGL.cpp
+++ b/src/dawn/native/opengl/UtilsGL.cpp
@@ -20,134 +20,134 @@
 
 namespace dawn::native::opengl {
 
-    GLuint ToOpenGLCompareFunction(wgpu::CompareFunction compareFunction) {
-        switch (compareFunction) {
-            case wgpu::CompareFunction::Never:
-                return GL_NEVER;
-            case wgpu::CompareFunction::Less:
-                return GL_LESS;
-            case wgpu::CompareFunction::LessEqual:
-                return GL_LEQUAL;
-            case wgpu::CompareFunction::Greater:
-                return GL_GREATER;
-            case wgpu::CompareFunction::GreaterEqual:
-                return GL_GEQUAL;
-            case wgpu::CompareFunction::NotEqual:
-                return GL_NOTEQUAL;
-            case wgpu::CompareFunction::Equal:
-                return GL_EQUAL;
-            case wgpu::CompareFunction::Always:
-                return GL_ALWAYS;
+GLuint ToOpenGLCompareFunction(wgpu::CompareFunction compareFunction) {
+    switch (compareFunction) {
+        case wgpu::CompareFunction::Never:
+            return GL_NEVER;
+        case wgpu::CompareFunction::Less:
+            return GL_LESS;
+        case wgpu::CompareFunction::LessEqual:
+            return GL_LEQUAL;
+        case wgpu::CompareFunction::Greater:
+            return GL_GREATER;
+        case wgpu::CompareFunction::GreaterEqual:
+            return GL_GEQUAL;
+        case wgpu::CompareFunction::NotEqual:
+            return GL_NOTEQUAL;
+        case wgpu::CompareFunction::Equal:
+            return GL_EQUAL;
+        case wgpu::CompareFunction::Always:
+            return GL_ALWAYS;
 
-            case wgpu::CompareFunction::Undefined:
-                break;
-        }
-        UNREACHABLE();
+        case wgpu::CompareFunction::Undefined:
+            break;
+    }
+    UNREACHABLE();
+}
+
+GLint GetStencilMaskFromStencilFormat(wgpu::TextureFormat depthStencilFormat) {
+    switch (depthStencilFormat) {
+        case wgpu::TextureFormat::Depth24PlusStencil8:
+        case wgpu::TextureFormat::Depth24UnormStencil8:
+        case wgpu::TextureFormat::Depth32FloatStencil8:
+        case wgpu::TextureFormat::Stencil8:
+            return 0xFF;
+
+        default:
+            UNREACHABLE();
+    }
+}
+
+void CopyImageSubData(const OpenGLFunctions& gl,
+                      Aspect srcAspects,
+                      GLuint srcHandle,
+                      GLenum srcTarget,
+                      GLint srcLevel,
+                      const Origin3D& src,
+                      GLuint dstHandle,
+                      GLenum dstTarget,
+                      GLint dstLevel,
+                      const Origin3D& dst,
+                      const Extent3D& size) {
+    if (gl.IsAtLeastGL(4, 3) || gl.IsAtLeastGLES(3, 2)) {
+        gl.CopyImageSubData(srcHandle, srcTarget, srcLevel, src.x, src.y, src.z, dstHandle,
+                            dstTarget, dstLevel, dst.x, dst.y, dst.z, size.width, size.height,
+                            size.depthOrArrayLayers);
+        return;
     }
 
-    GLint GetStencilMaskFromStencilFormat(wgpu::TextureFormat depthStencilFormat) {
-        switch (depthStencilFormat) {
-            case wgpu::TextureFormat::Depth24PlusStencil8:
-            case wgpu::TextureFormat::Depth24UnormStencil8:
-            case wgpu::TextureFormat::Depth32FloatStencil8:
-            case wgpu::TextureFormat::Stencil8:
-                return 0xFF;
+    GLint prevReadFBO = 0, prevDrawFBO = 0;
+    gl.GetIntegerv(GL_READ_FRAMEBUFFER_BINDING, &prevReadFBO);
+    gl.GetIntegerv(GL_DRAW_FRAMEBUFFER_BINDING, &prevDrawFBO);
 
-            default:
-                UNREACHABLE();
-        }
+    // Generate temporary framebuffers for the blits.
+    GLuint readFBO = 0, drawFBO = 0;
+    gl.GenFramebuffers(1, &readFBO);
+    gl.GenFramebuffers(1, &drawFBO);
+    gl.BindFramebuffer(GL_READ_FRAMEBUFFER, readFBO);
+    gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, drawFBO);
+
+    // Reset state that may affect glBlitFramebuffer().
+    gl.Disable(GL_SCISSOR_TEST);
+    GLenum blitMask = 0;
+    if (srcAspects & Aspect::Color) {
+        blitMask |= GL_COLOR_BUFFER_BIT;
+    }
+    if (srcAspects & Aspect::Depth) {
+        blitMask |= GL_DEPTH_BUFFER_BIT;
+    }
+    if (srcAspects & Aspect::Stencil) {
+        blitMask |= GL_STENCIL_BUFFER_BIT;
     }
 
-    void CopyImageSubData(const OpenGLFunctions& gl,
-                          Aspect srcAspects,
-                          GLuint srcHandle,
-                          GLenum srcTarget,
-                          GLint srcLevel,
-                          const Origin3D& src,
-                          GLuint dstHandle,
-                          GLenum dstTarget,
-                          GLint dstLevel,
-                          const Origin3D& dst,
-                          const Extent3D& size) {
-        if (gl.IsAtLeastGL(4, 3) || gl.IsAtLeastGLES(3, 2)) {
-            gl.CopyImageSubData(srcHandle, srcTarget, srcLevel, src.x, src.y, src.z, dstHandle,
-                                dstTarget, dstLevel, dst.x, dst.y, dst.z, size.width, size.height,
-                                size.depthOrArrayLayers);
-            return;
-        }
-
-        GLint prevReadFBO = 0, prevDrawFBO = 0;
-        gl.GetIntegerv(GL_READ_FRAMEBUFFER_BINDING, &prevReadFBO);
-        gl.GetIntegerv(GL_DRAW_FRAMEBUFFER_BINDING, &prevDrawFBO);
-
-        // Generate temporary framebuffers for the blits.
-        GLuint readFBO = 0, drawFBO = 0;
-        gl.GenFramebuffers(1, &readFBO);
-        gl.GenFramebuffers(1, &drawFBO);
-        gl.BindFramebuffer(GL_READ_FRAMEBUFFER, readFBO);
-        gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, drawFBO);
-
-        // Reset state that may affect glBlitFramebuffer().
-        gl.Disable(GL_SCISSOR_TEST);
-        GLenum blitMask = 0;
-        if (srcAspects & Aspect::Color) {
-            blitMask |= GL_COLOR_BUFFER_BIT;
-        }
-        if (srcAspects & Aspect::Depth) {
-            blitMask |= GL_DEPTH_BUFFER_BIT;
-        }
-        if (srcAspects & Aspect::Stencil) {
-            blitMask |= GL_STENCIL_BUFFER_BIT;
-        }
-
-        // Iterate over all layers, doing a single blit for each.
-        for (uint32_t layer = 0; layer < size.depthOrArrayLayers; ++layer) {
-            // Set attachments for all aspects.
-            for (Aspect aspect : IterateEnumMask(srcAspects)) {
-                GLenum glAttachment;
-                switch (aspect) {
-                    case Aspect::Color:
-                        glAttachment = GL_COLOR_ATTACHMENT0;
-                        break;
-                    case Aspect::Depth:
-                        glAttachment = GL_DEPTH_ATTACHMENT;
-                        break;
-                    case Aspect::Stencil:
-                        glAttachment = GL_STENCIL_ATTACHMENT;
-                        break;
-                    case Aspect::CombinedDepthStencil:
-                    case Aspect::None:
-                    case Aspect::Plane0:
-                    case Aspect::Plane1:
-                        UNREACHABLE();
-                }
-                if (srcTarget == GL_TEXTURE_2D) {
-                    gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, glAttachment, srcTarget, srcHandle,
-                                            srcLevel);
-                } else {
-                    gl.FramebufferTextureLayer(GL_READ_FRAMEBUFFER, glAttachment, srcHandle,
-                                               srcLevel, src.z + layer);
-                }
-                if (dstTarget == GL_TEXTURE_2D) {
-                    gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, glAttachment, dstTarget, dstHandle,
-                                            dstLevel);
-                } else if (dstTarget == GL_TEXTURE_CUBE_MAP) {
-                    GLenum target = GL_TEXTURE_CUBE_MAP_POSITIVE_X + layer;
-                    gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, glAttachment, target, dstHandle,
-                                            dstLevel);
-                } else {
-                    gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, glAttachment, dstHandle,
-                                               dstLevel, dst.z + layer);
-                }
+    // Iterate over all layers, doing a single blit for each.
+    for (uint32_t layer = 0; layer < size.depthOrArrayLayers; ++layer) {
+        // Set attachments for all aspects.
+        for (Aspect aspect : IterateEnumMask(srcAspects)) {
+            GLenum glAttachment;
+            switch (aspect) {
+                case Aspect::Color:
+                    glAttachment = GL_COLOR_ATTACHMENT0;
+                    break;
+                case Aspect::Depth:
+                    glAttachment = GL_DEPTH_ATTACHMENT;
+                    break;
+                case Aspect::Stencil:
+                    glAttachment = GL_STENCIL_ATTACHMENT;
+                    break;
+                case Aspect::CombinedDepthStencil:
+                case Aspect::None:
+                case Aspect::Plane0:
+                case Aspect::Plane1:
+                    UNREACHABLE();
             }
-            gl.BlitFramebuffer(src.x, src.y, src.x + size.width, src.y + size.height, dst.x, dst.y,
-                               dst.x + size.width, dst.y + size.height, blitMask, GL_NEAREST);
+            if (srcTarget == GL_TEXTURE_2D) {
+                gl.FramebufferTexture2D(GL_READ_FRAMEBUFFER, glAttachment, srcTarget, srcHandle,
+                                        srcLevel);
+            } else {
+                gl.FramebufferTextureLayer(GL_READ_FRAMEBUFFER, glAttachment, srcHandle, srcLevel,
+                                           src.z + layer);
+            }
+            if (dstTarget == GL_TEXTURE_2D) {
+                gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, glAttachment, dstTarget, dstHandle,
+                                        dstLevel);
+            } else if (dstTarget == GL_TEXTURE_CUBE_MAP) {
+                GLenum target = GL_TEXTURE_CUBE_MAP_POSITIVE_X + layer;
+                gl.FramebufferTexture2D(GL_DRAW_FRAMEBUFFER, glAttachment, target, dstHandle,
+                                        dstLevel);
+            } else {
+                gl.FramebufferTextureLayer(GL_DRAW_FRAMEBUFFER, glAttachment, dstHandle, dstLevel,
+                                           dst.z + layer);
+            }
         }
-        gl.Enable(GL_SCISSOR_TEST);
-        gl.DeleteFramebuffers(1, &readFBO);
-        gl.DeleteFramebuffers(1, &drawFBO);
-        gl.BindFramebuffer(GL_READ_FRAMEBUFFER, prevReadFBO);
-        gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, prevDrawFBO);
+        gl.BlitFramebuffer(src.x, src.y, src.x + size.width, src.y + size.height, dst.x, dst.y,
+                           dst.x + size.width, dst.y + size.height, blitMask, GL_NEAREST);
     }
+    gl.Enable(GL_SCISSOR_TEST);
+    gl.DeleteFramebuffers(1, &readFBO);
+    gl.DeleteFramebuffers(1, &drawFBO);
+    gl.BindFramebuffer(GL_READ_FRAMEBUFFER, prevReadFBO);
+    gl.BindFramebuffer(GL_DRAW_FRAMEBUFFER, prevDrawFBO);
+}
 
 }  // namespace dawn::native::opengl
diff --git a/src/dawn/native/opengl/UtilsGL.h b/src/dawn/native/opengl/UtilsGL.h
index 78e12db..97d4f2a 100644
--- a/src/dawn/native/opengl/UtilsGL.h
+++ b/src/dawn/native/opengl/UtilsGL.h
@@ -20,21 +20,21 @@
 #include "dawn/native/opengl/opengl_platform.h"
 
 namespace dawn::native::opengl {
-    struct OpenGLFunctions;
+struct OpenGLFunctions;
 
-    GLuint ToOpenGLCompareFunction(wgpu::CompareFunction compareFunction);
-    GLint GetStencilMaskFromStencilFormat(wgpu::TextureFormat depthStencilFormat);
-    void CopyImageSubData(const OpenGLFunctions& gl,
-                          Aspect srcAspects,
-                          GLuint srcHandle,
-                          GLenum srcTarget,
-                          GLint srcLevel,
-                          const Origin3D& src,
-                          GLuint dstHandle,
-                          GLenum dstTarget,
-                          GLint dstLevel,
-                          const Origin3D& dst,
-                          const Extent3D& size);
+GLuint ToOpenGLCompareFunction(wgpu::CompareFunction compareFunction);
+GLint GetStencilMaskFromStencilFormat(wgpu::TextureFormat depthStencilFormat);
+void CopyImageSubData(const OpenGLFunctions& gl,
+                      Aspect srcAspects,
+                      GLuint srcHandle,
+                      GLenum srcTarget,
+                      GLint srcLevel,
+                      const Origin3D& src,
+                      GLuint dstHandle,
+                      GLenum dstTarget,
+                      GLint dstLevel,
+                      const Origin3D& dst,
+                      const Extent3D& size);
 
 }  // namespace dawn::native::opengl
 
diff --git a/src/dawn/native/utils/WGPUHelpers.cpp b/src/dawn/native/utils/WGPUHelpers.cpp
index 1172ca9..722476e 100644
--- a/src/dawn/native/utils/WGPUHelpers.cpp
+++ b/src/dawn/native/utils/WGPUHelpers.cpp
@@ -33,160 +33,154 @@
 
 namespace dawn::native::utils {
 
-    ResultOrError<Ref<ShaderModuleBase>> CreateShaderModule(DeviceBase* device,
-                                                            const char* source) {
-        ShaderModuleWGSLDescriptor wgslDesc;
-        wgslDesc.source = source;
-        ShaderModuleDescriptor descriptor;
-        descriptor.nextInChain = &wgslDesc;
-        return device->CreateShaderModule(&descriptor);
+ResultOrError<Ref<ShaderModuleBase>> CreateShaderModule(DeviceBase* device, const char* source) {
+    ShaderModuleWGSLDescriptor wgslDesc;
+    wgslDesc.source = source;
+    ShaderModuleDescriptor descriptor;
+    descriptor.nextInChain = &wgslDesc;
+    return device->CreateShaderModule(&descriptor);
+}
+
+ResultOrError<Ref<BufferBase>> CreateBufferFromData(DeviceBase* device,
+                                                    wgpu::BufferUsage usage,
+                                                    const void* data,
+                                                    uint64_t size) {
+    BufferDescriptor descriptor;
+    descriptor.size = size;
+    descriptor.usage = usage;
+    descriptor.mappedAtCreation = true;
+    Ref<BufferBase> buffer;
+    DAWN_TRY_ASSIGN(buffer, device->CreateBuffer(&descriptor));
+    memcpy(buffer->GetMappedRange(0, size), data, size);
+    buffer->Unmap();
+    return buffer;
+}
+
+ResultOrError<Ref<PipelineLayoutBase>> MakeBasicPipelineLayout(
+    DeviceBase* device,
+    const Ref<BindGroupLayoutBase>& bindGroupLayout) {
+    PipelineLayoutDescriptor descriptor;
+    descriptor.bindGroupLayoutCount = 1;
+    BindGroupLayoutBase* bgl = bindGroupLayout.Get();
+    descriptor.bindGroupLayouts = &bgl;
+    return device->CreatePipelineLayout(&descriptor);
+}
+
+ResultOrError<Ref<BindGroupLayoutBase>> MakeBindGroupLayout(
+    DeviceBase* device,
+    std::initializer_list<BindingLayoutEntryInitializationHelper> entriesInitializer,
+    bool allowInternalBinding) {
+    std::vector<BindGroupLayoutEntry> entries;
+    for (const BindingLayoutEntryInitializationHelper& entry : entriesInitializer) {
+        entries.push_back(entry);
     }
 
-    ResultOrError<Ref<BufferBase>> CreateBufferFromData(DeviceBase* device,
-                                                        wgpu::BufferUsage usage,
-                                                        const void* data,
-                                                        uint64_t size) {
-        BufferDescriptor descriptor;
-        descriptor.size = size;
-        descriptor.usage = usage;
-        descriptor.mappedAtCreation = true;
-        Ref<BufferBase> buffer;
-        DAWN_TRY_ASSIGN(buffer, device->CreateBuffer(&descriptor));
-        memcpy(buffer->GetMappedRange(0, size), data, size);
-        buffer->Unmap();
-        return buffer;
+    BindGroupLayoutDescriptor descriptor;
+    descriptor.entryCount = static_cast<uint32_t>(entries.size());
+    descriptor.entries = entries.data();
+    return device->CreateBindGroupLayout(&descriptor, allowInternalBinding);
+}
+
+BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+    uint32_t entryBinding,
+    wgpu::ShaderStage entryVisibility,
+    wgpu::BufferBindingType bufferType,
+    bool bufferHasDynamicOffset,
+    uint64_t bufferMinBindingSize) {
+    binding = entryBinding;
+    visibility = entryVisibility;
+    buffer.type = bufferType;
+    buffer.hasDynamicOffset = bufferHasDynamicOffset;
+    buffer.minBindingSize = bufferMinBindingSize;
+}
+
+BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+    uint32_t entryBinding,
+    wgpu::ShaderStage entryVisibility,
+    wgpu::SamplerBindingType samplerType) {
+    binding = entryBinding;
+    visibility = entryVisibility;
+    sampler.type = samplerType;
+}
+
+BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+    uint32_t entryBinding,
+    wgpu::ShaderStage entryVisibility,
+    wgpu::TextureSampleType textureSampleType,
+    wgpu::TextureViewDimension textureViewDimension,
+    bool textureMultisampled) {
+    binding = entryBinding;
+    visibility = entryVisibility;
+    texture.sampleType = textureSampleType;
+    texture.viewDimension = textureViewDimension;
+    texture.multisampled = textureMultisampled;
+}
+
+BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+    uint32_t entryBinding,
+    wgpu::ShaderStage entryVisibility,
+    wgpu::StorageTextureAccess storageTextureAccess,
+    wgpu::TextureFormat format,
+    wgpu::TextureViewDimension textureViewDimension) {
+    binding = entryBinding;
+    visibility = entryVisibility;
+    storageTexture.access = storageTextureAccess;
+    storageTexture.format = format;
+    storageTexture.viewDimension = textureViewDimension;
+}
+
+BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+    const BindGroupLayoutEntry& entry)
+    : BindGroupLayoutEntry(entry) {}
+
+BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
+                                                         const Ref<SamplerBase>& sampler)
+    : binding(binding), sampler(sampler) {}
+
+BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
+                                                         const Ref<TextureViewBase>& textureView)
+    : binding(binding), textureView(textureView) {}
+
+BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
+                                                         const Ref<BufferBase>& buffer,
+                                                         uint64_t offset,
+                                                         uint64_t size)
+    : binding(binding), buffer(buffer), offset(offset), size(size) {}
+
+BindingInitializationHelper::~BindingInitializationHelper() = default;
+
+BindGroupEntry BindingInitializationHelper::GetAsBinding() const {
+    BindGroupEntry result;
+
+    result.binding = binding;
+    result.sampler = sampler.Get();
+    result.textureView = textureView.Get();
+    result.buffer = buffer.Get();
+    result.offset = offset;
+    result.size = size;
+
+    return result;
+}
+
+ResultOrError<Ref<BindGroupBase>> MakeBindGroup(
+    DeviceBase* device,
+    const Ref<BindGroupLayoutBase>& layout,
+    std::initializer_list<BindingInitializationHelper> entriesInitializer) {
+    std::vector<BindGroupEntry> entries;
+    for (const BindingInitializationHelper& helper : entriesInitializer) {
+        entries.push_back(helper.GetAsBinding());
     }
 
-    ResultOrError<Ref<PipelineLayoutBase>> MakeBasicPipelineLayout(
-        DeviceBase* device,
-        const Ref<BindGroupLayoutBase>& bindGroupLayout) {
-        PipelineLayoutDescriptor descriptor;
-        descriptor.bindGroupLayoutCount = 1;
-        BindGroupLayoutBase* bgl = bindGroupLayout.Get();
-        descriptor.bindGroupLayouts = &bgl;
-        return device->CreatePipelineLayout(&descriptor);
-    }
+    BindGroupDescriptor descriptor;
+    descriptor.layout = layout.Get();
+    descriptor.entryCount = entries.size();
+    descriptor.entries = entries.data();
 
-    ResultOrError<Ref<BindGroupLayoutBase>> MakeBindGroupLayout(
-        DeviceBase* device,
-        std::initializer_list<BindingLayoutEntryInitializationHelper> entriesInitializer,
-        bool allowInternalBinding) {
-        std::vector<BindGroupLayoutEntry> entries;
-        for (const BindingLayoutEntryInitializationHelper& entry : entriesInitializer) {
-            entries.push_back(entry);
-        }
+    return device->CreateBindGroup(&descriptor);
+}
 
-        BindGroupLayoutDescriptor descriptor;
-        descriptor.entryCount = static_cast<uint32_t>(entries.size());
-        descriptor.entries = entries.data();
-        return device->CreateBindGroupLayout(&descriptor, allowInternalBinding);
-    }
-
-    BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
-        uint32_t entryBinding,
-        wgpu::ShaderStage entryVisibility,
-        wgpu::BufferBindingType bufferType,
-        bool bufferHasDynamicOffset,
-        uint64_t bufferMinBindingSize) {
-        binding = entryBinding;
-        visibility = entryVisibility;
-        buffer.type = bufferType;
-        buffer.hasDynamicOffset = bufferHasDynamicOffset;
-        buffer.minBindingSize = bufferMinBindingSize;
-    }
-
-    BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
-        uint32_t entryBinding,
-        wgpu::ShaderStage entryVisibility,
-        wgpu::SamplerBindingType samplerType) {
-        binding = entryBinding;
-        visibility = entryVisibility;
-        sampler.type = samplerType;
-    }
-
-    BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
-        uint32_t entryBinding,
-        wgpu::ShaderStage entryVisibility,
-        wgpu::TextureSampleType textureSampleType,
-        wgpu::TextureViewDimension textureViewDimension,
-        bool textureMultisampled) {
-        binding = entryBinding;
-        visibility = entryVisibility;
-        texture.sampleType = textureSampleType;
-        texture.viewDimension = textureViewDimension;
-        texture.multisampled = textureMultisampled;
-    }
-
-    BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
-        uint32_t entryBinding,
-        wgpu::ShaderStage entryVisibility,
-        wgpu::StorageTextureAccess storageTextureAccess,
-        wgpu::TextureFormat format,
-        wgpu::TextureViewDimension textureViewDimension) {
-        binding = entryBinding;
-        visibility = entryVisibility;
-        storageTexture.access = storageTextureAccess;
-        storageTexture.format = format;
-        storageTexture.viewDimension = textureViewDimension;
-    }
-
-    BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
-        const BindGroupLayoutEntry& entry)
-        : BindGroupLayoutEntry(entry) {
-    }
-
-    BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
-                                                             const Ref<SamplerBase>& sampler)
-        : binding(binding), sampler(sampler) {
-    }
-
-    BindingInitializationHelper::BindingInitializationHelper(
-        uint32_t binding,
-        const Ref<TextureViewBase>& textureView)
-        : binding(binding), textureView(textureView) {
-    }
-
-    BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
-                                                             const Ref<BufferBase>& buffer,
-                                                             uint64_t offset,
-                                                             uint64_t size)
-        : binding(binding), buffer(buffer), offset(offset), size(size) {
-    }
-
-    BindingInitializationHelper::~BindingInitializationHelper() = default;
-
-    BindGroupEntry BindingInitializationHelper::GetAsBinding() const {
-        BindGroupEntry result;
-
-        result.binding = binding;
-        result.sampler = sampler.Get();
-        result.textureView = textureView.Get();
-        result.buffer = buffer.Get();
-        result.offset = offset;
-        result.size = size;
-
-        return result;
-    }
-
-    ResultOrError<Ref<BindGroupBase>> MakeBindGroup(
-        DeviceBase* device,
-        const Ref<BindGroupLayoutBase>& layout,
-        std::initializer_list<BindingInitializationHelper> entriesInitializer) {
-        std::vector<BindGroupEntry> entries;
-        for (const BindingInitializationHelper& helper : entriesInitializer) {
-            entries.push_back(helper.GetAsBinding());
-        }
-
-        BindGroupDescriptor descriptor;
-        descriptor.layout = layout.Get();
-        descriptor.entryCount = entries.size();
-        descriptor.entries = entries.data();
-
-        return device->CreateBindGroup(&descriptor);
-    }
-
-    const char* GetLabelForTrace(const char* label) {
-        return (label == nullptr || strlen(label) == 0) ? "None" : label;
-    }
+const char* GetLabelForTrace(const char* label) {
+    return (label == nullptr || strlen(label) == 0) ? "None" : label;
+}
 
 }  // namespace dawn::native::utils
diff --git a/src/dawn/native/utils/WGPUHelpers.h b/src/dawn/native/utils/WGPUHelpers.h
index ad0eefc..9eab990 100644
--- a/src/dawn/native/utils/WGPUHelpers.h
+++ b/src/dawn/native/utils/WGPUHelpers.h
@@ -25,97 +25,97 @@
 
 namespace dawn::native::utils {
 
-    ResultOrError<Ref<ShaderModuleBase>> CreateShaderModule(DeviceBase* device, const char* source);
+ResultOrError<Ref<ShaderModuleBase>> CreateShaderModule(DeviceBase* device, const char* source);
 
-    ResultOrError<Ref<BufferBase>> CreateBufferFromData(DeviceBase* device,
-                                                        wgpu::BufferUsage usage,
-                                                        const void* data,
-                                                        uint64_t size);
+ResultOrError<Ref<BufferBase>> CreateBufferFromData(DeviceBase* device,
+                                                    wgpu::BufferUsage usage,
+                                                    const void* data,
+                                                    uint64_t size);
 
-    template <typename T>
-    ResultOrError<Ref<BufferBase>> CreateBufferFromData(DeviceBase* device,
-                                                        wgpu::BufferUsage usage,
-                                                        std::initializer_list<T> data) {
-        return CreateBufferFromData(device, usage, data.begin(), uint32_t(sizeof(T) * data.size()));
-    }
+template <typename T>
+ResultOrError<Ref<BufferBase>> CreateBufferFromData(DeviceBase* device,
+                                                    wgpu::BufferUsage usage,
+                                                    std::initializer_list<T> data) {
+    return CreateBufferFromData(device, usage, data.begin(), uint32_t(sizeof(T) * data.size()));
+}
 
-    ResultOrError<Ref<PipelineLayoutBase>> MakeBasicPipelineLayout(
-        DeviceBase* device,
-        const Ref<BindGroupLayoutBase>& bindGroupLayout);
+ResultOrError<Ref<PipelineLayoutBase>> MakeBasicPipelineLayout(
+    DeviceBase* device,
+    const Ref<BindGroupLayoutBase>& bindGroupLayout);
 
-    // Helpers to make creating bind group layouts look nicer:
-    //
-    //   utils::MakeBindGroupLayout(device, {
-    //       {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform},
-    //       {1, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering},
-    //       {3, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}
-    //   });
+// Helpers to make creating bind group layouts look nicer:
+//
+//   utils::MakeBindGroupLayout(device, {
+//       {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform},
+//       {1, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering},
+//       {3, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}
+//   });
 
-    struct BindingLayoutEntryInitializationHelper : BindGroupLayoutEntry {
-        BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
-                                               wgpu::ShaderStage entryVisibility,
-                                               wgpu::BufferBindingType bufferType,
-                                               bool bufferHasDynamicOffset = false,
-                                               uint64_t bufferMinBindingSize = 0);
-        BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
-                                               wgpu::ShaderStage entryVisibility,
-                                               wgpu::SamplerBindingType samplerType);
-        BindingLayoutEntryInitializationHelper(
-            uint32_t entryBinding,
-            wgpu::ShaderStage entryVisibility,
-            wgpu::TextureSampleType textureSampleType,
-            wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D,
-            bool textureMultisampled = false);
-        BindingLayoutEntryInitializationHelper(
-            uint32_t entryBinding,
-            wgpu::ShaderStage entryVisibility,
-            wgpu::StorageTextureAccess storageTextureAccess,
-            wgpu::TextureFormat format,
-            wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D);
+struct BindingLayoutEntryInitializationHelper : BindGroupLayoutEntry {
+    BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
+                                           wgpu::ShaderStage entryVisibility,
+                                           wgpu::BufferBindingType bufferType,
+                                           bool bufferHasDynamicOffset = false,
+                                           uint64_t bufferMinBindingSize = 0);
+    BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
+                                           wgpu::ShaderStage entryVisibility,
+                                           wgpu::SamplerBindingType samplerType);
+    BindingLayoutEntryInitializationHelper(
+        uint32_t entryBinding,
+        wgpu::ShaderStage entryVisibility,
+        wgpu::TextureSampleType textureSampleType,
+        wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D,
+        bool textureMultisampled = false);
+    BindingLayoutEntryInitializationHelper(
+        uint32_t entryBinding,
+        wgpu::ShaderStage entryVisibility,
+        wgpu::StorageTextureAccess storageTextureAccess,
+        wgpu::TextureFormat format,
+        wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D);
 
-        explicit BindingLayoutEntryInitializationHelper(const BindGroupLayoutEntry& entry);
-    };
+    explicit BindingLayoutEntryInitializationHelper(const BindGroupLayoutEntry& entry);
+};
 
-    ResultOrError<Ref<BindGroupLayoutBase>> MakeBindGroupLayout(
-        DeviceBase* device,
-        std::initializer_list<BindingLayoutEntryInitializationHelper> entriesInitializer,
-        bool allowInternalBinding = false);
+ResultOrError<Ref<BindGroupLayoutBase>> MakeBindGroupLayout(
+    DeviceBase* device,
+    std::initializer_list<BindingLayoutEntryInitializationHelper> entriesInitializer,
+    bool allowInternalBinding = false);
 
-    // Helpers to make creating bind groups look nicer:
-    //
-    //   utils::MakeBindGroup(device, layout, {
-    //       {0, mySampler},
-    //       {1, myBuffer, offset, size},
-    //       {3, myTextureView}
-    //   });
+// Helpers to make creating bind groups look nicer:
+//
+//   utils::MakeBindGroup(device, layout, {
+//       {0, mySampler},
+//       {1, myBuffer, offset, size},
+//       {3, myTextureView}
+//   });
 
-    // Structure with one constructor per-type of bindings, so that the initializer_list accepts
-    // bindings with the right type and no extra information.
-    struct BindingInitializationHelper {
-        BindingInitializationHelper(uint32_t binding, const Ref<SamplerBase>& sampler);
-        BindingInitializationHelper(uint32_t binding, const Ref<TextureViewBase>& textureView);
-        BindingInitializationHelper(uint32_t binding,
-                                    const Ref<BufferBase>& buffer,
-                                    uint64_t offset = 0,
-                                    uint64_t size = wgpu::kWholeSize);
-        ~BindingInitializationHelper();
+// Structure with one constructor per-type of bindings, so that the initializer_list accepts
+// bindings with the right type and no extra information.
+struct BindingInitializationHelper {
+    BindingInitializationHelper(uint32_t binding, const Ref<SamplerBase>& sampler);
+    BindingInitializationHelper(uint32_t binding, const Ref<TextureViewBase>& textureView);
+    BindingInitializationHelper(uint32_t binding,
+                                const Ref<BufferBase>& buffer,
+                                uint64_t offset = 0,
+                                uint64_t size = wgpu::kWholeSize);
+    ~BindingInitializationHelper();
 
-        BindGroupEntry GetAsBinding() const;
+    BindGroupEntry GetAsBinding() const;
 
-        uint32_t binding;
-        Ref<SamplerBase> sampler;
-        Ref<TextureViewBase> textureView;
-        Ref<BufferBase> buffer;
-        uint64_t offset = 0;
-        uint64_t size = 0;
-    };
+    uint32_t binding;
+    Ref<SamplerBase> sampler;
+    Ref<TextureViewBase> textureView;
+    Ref<BufferBase> buffer;
+    uint64_t offset = 0;
+    uint64_t size = 0;
+};
 
-    ResultOrError<Ref<BindGroupBase>> MakeBindGroup(
-        DeviceBase* device,
-        const Ref<BindGroupLayoutBase>& layout,
-        std::initializer_list<BindingInitializationHelper> entriesInitializer);
+ResultOrError<Ref<BindGroupBase>> MakeBindGroup(
+    DeviceBase* device,
+    const Ref<BindGroupLayoutBase>& layout,
+    std::initializer_list<BindingInitializationHelper> entriesInitializer);
 
-    const char* GetLabelForTrace(const char* label);
+const char* GetLabelForTrace(const char* label);
 
 }  // namespace dawn::native::utils
 
diff --git a/src/dawn/native/vulkan/AdapterVk.cpp b/src/dawn/native/vulkan/AdapterVk.cpp
index ba8a045..6b0d770 100644
--- a/src/dawn/native/vulkan/AdapterVk.cpp
+++ b/src/dawn/native/vulkan/AdapterVk.cpp
@@ -25,157 +25,156 @@
 
 namespace dawn::native::vulkan {
 
-    Adapter::Adapter(InstanceBase* instance,
-                     VulkanInstance* vulkanInstance,
-                     VkPhysicalDevice physicalDevice)
-        : AdapterBase(instance, wgpu::BackendType::Vulkan),
-          mPhysicalDevice(physicalDevice),
-          mVulkanInstance(vulkanInstance) {
+Adapter::Adapter(InstanceBase* instance,
+                 VulkanInstance* vulkanInstance,
+                 VkPhysicalDevice physicalDevice)
+    : AdapterBase(instance, wgpu::BackendType::Vulkan),
+      mPhysicalDevice(physicalDevice),
+      mVulkanInstance(vulkanInstance) {}
+
+const VulkanDeviceInfo& Adapter::GetDeviceInfo() const {
+    return mDeviceInfo;
+}
+
+VkPhysicalDevice Adapter::GetPhysicalDevice() const {
+    return mPhysicalDevice;
+}
+
+VulkanInstance* Adapter::GetVulkanInstance() const {
+    return mVulkanInstance.Get();
+}
+
+bool Adapter::IsDepthStencilFormatSupported(VkFormat format) {
+    ASSERT(format == VK_FORMAT_D16_UNORM_S8_UINT || format == VK_FORMAT_D24_UNORM_S8_UINT ||
+           format == VK_FORMAT_D32_SFLOAT_S8_UINT || format == VK_FORMAT_S8_UINT);
+
+    VkFormatProperties properties;
+    mVulkanInstance->GetFunctions().GetPhysicalDeviceFormatProperties(mPhysicalDevice, format,
+                                                                      &properties);
+    return properties.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT;
+}
+
+MaybeError Adapter::InitializeImpl() {
+    DAWN_TRY_ASSIGN(mDeviceInfo, GatherDeviceInfo(*this));
+
+    if (mDeviceInfo.HasExt(DeviceExt::DriverProperties)) {
+        mDriverDescription = mDeviceInfo.driverProperties.driverName;
+        if (mDeviceInfo.driverProperties.driverInfo[0] != '\0') {
+            mDriverDescription += std::string(": ") + mDeviceInfo.driverProperties.driverInfo;
+        }
+    } else {
+        mDriverDescription =
+            "Vulkan driver version: " + std::to_string(mDeviceInfo.properties.driverVersion);
     }
 
-    const VulkanDeviceInfo& Adapter::GetDeviceInfo() const {
-        return mDeviceInfo;
+    mDeviceId = mDeviceInfo.properties.deviceID;
+    mVendorId = mDeviceInfo.properties.vendorID;
+    mName = mDeviceInfo.properties.deviceName;
+
+    switch (mDeviceInfo.properties.deviceType) {
+        case VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU:
+            mAdapterType = wgpu::AdapterType::IntegratedGPU;
+            break;
+        case VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU:
+            mAdapterType = wgpu::AdapterType::DiscreteGPU;
+            break;
+        case VK_PHYSICAL_DEVICE_TYPE_CPU:
+            mAdapterType = wgpu::AdapterType::CPU;
+            break;
+        default:
+            mAdapterType = wgpu::AdapterType::Unknown;
+            break;
     }
 
-    VkPhysicalDevice Adapter::GetPhysicalDevice() const {
-        return mPhysicalDevice;
+    return {};
+}
+
+MaybeError Adapter::InitializeSupportedFeaturesImpl() {
+    // Needed for viewport Y-flip.
+    if (!mDeviceInfo.HasExt(DeviceExt::Maintenance1)) {
+        return DAWN_INTERNAL_ERROR("Vulkan 1.1 or Vulkan 1.0 with KHR_Maintenance1 required.");
     }
 
-    VulkanInstance* Adapter::GetVulkanInstance() const {
-        return mVulkanInstance.Get();
+    // Needed for security
+    if (!mDeviceInfo.features.robustBufferAccess) {
+        return DAWN_INTERNAL_ERROR("Vulkan robustBufferAccess feature required.");
     }
 
-    bool Adapter::IsDepthStencilFormatSupported(VkFormat format) {
-        ASSERT(format == VK_FORMAT_D16_UNORM_S8_UINT || format == VK_FORMAT_D24_UNORM_S8_UINT ||
-               format == VK_FORMAT_D32_SFLOAT_S8_UINT || format == VK_FORMAT_S8_UINT);
-
-        VkFormatProperties properties;
-        mVulkanInstance->GetFunctions().GetPhysicalDeviceFormatProperties(mPhysicalDevice, format,
-                                                                          &properties);
-        return properties.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT;
+    if (!mDeviceInfo.features.textureCompressionBC &&
+        !(mDeviceInfo.features.textureCompressionETC2 &&
+          mDeviceInfo.features.textureCompressionASTC_LDR)) {
+        return DAWN_INTERNAL_ERROR(
+            "Vulkan textureCompressionBC feature required or both textureCompressionETC2 and "
+            "textureCompressionASTC required.");
     }
 
-    MaybeError Adapter::InitializeImpl() {
-        DAWN_TRY_ASSIGN(mDeviceInfo, GatherDeviceInfo(*this));
-
-        if (mDeviceInfo.HasExt(DeviceExt::DriverProperties)) {
-            mDriverDescription = mDeviceInfo.driverProperties.driverName;
-            if (mDeviceInfo.driverProperties.driverInfo[0] != '\0') {
-                mDriverDescription += std::string(": ") + mDeviceInfo.driverProperties.driverInfo;
-            }
-        } else {
-            mDriverDescription =
-                "Vulkan driver version: " + std::to_string(mDeviceInfo.properties.driverVersion);
-        }
-
-        mDeviceId = mDeviceInfo.properties.deviceID;
-        mVendorId = mDeviceInfo.properties.vendorID;
-        mName = mDeviceInfo.properties.deviceName;
-
-        switch (mDeviceInfo.properties.deviceType) {
-            case VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU:
-                mAdapterType = wgpu::AdapterType::IntegratedGPU;
-                break;
-            case VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU:
-                mAdapterType = wgpu::AdapterType::DiscreteGPU;
-                break;
-            case VK_PHYSICAL_DEVICE_TYPE_CPU:
-                mAdapterType = wgpu::AdapterType::CPU;
-                break;
-            default:
-                mAdapterType = wgpu::AdapterType::Unknown;
-                break;
-        }
-
-        return {};
+    // Needed for the respective WebGPU features.
+    if (!mDeviceInfo.features.depthBiasClamp) {
+        return DAWN_INTERNAL_ERROR("Vulkan depthBiasClamp feature required.");
+    }
+    if (!mDeviceInfo.features.fragmentStoresAndAtomics) {
+        return DAWN_INTERNAL_ERROR("Vulkan fragmentStoresAndAtomics feature required.");
+    }
+    if (!mDeviceInfo.features.fullDrawIndexUint32) {
+        return DAWN_INTERNAL_ERROR("Vulkan fullDrawIndexUint32 feature required.");
+    }
+    if (!mDeviceInfo.features.imageCubeArray) {
+        return DAWN_INTERNAL_ERROR("Vulkan imageCubeArray feature required.");
+    }
+    if (!mDeviceInfo.features.independentBlend) {
+        return DAWN_INTERNAL_ERROR("Vulkan independentBlend feature required.");
+    }
+    if (!mDeviceInfo.features.sampleRateShading) {
+        return DAWN_INTERNAL_ERROR("Vulkan sampleRateShading feature required.");
     }
 
-    MaybeError Adapter::InitializeSupportedFeaturesImpl() {
-        // Needed for viewport Y-flip.
-        if (!mDeviceInfo.HasExt(DeviceExt::Maintenance1)) {
-            return DAWN_INTERNAL_ERROR("Vulkan 1.1 or Vulkan 1.0 with KHR_Maintenance1 required.");
-        }
+    // Initialize supported extensions
+    if (mDeviceInfo.features.textureCompressionBC == VK_TRUE) {
+        mSupportedFeatures.EnableFeature(Feature::TextureCompressionBC);
+    }
 
-        // Needed for security
-        if (!mDeviceInfo.features.robustBufferAccess) {
-            return DAWN_INTERNAL_ERROR("Vulkan robustBufferAccess feature required.");
-        }
+    if (mDeviceInfo.features.textureCompressionETC2 == VK_TRUE) {
+        mSupportedFeatures.EnableFeature(Feature::TextureCompressionETC2);
+    }
 
-        if (!mDeviceInfo.features.textureCompressionBC &&
-            !(mDeviceInfo.features.textureCompressionETC2 &&
-              mDeviceInfo.features.textureCompressionASTC_LDR)) {
-            return DAWN_INTERNAL_ERROR(
-                "Vulkan textureCompressionBC feature required or both textureCompressionETC2 and "
-                "textureCompressionASTC required.");
-        }
+    if (mDeviceInfo.features.textureCompressionASTC_LDR == VK_TRUE) {
+        mSupportedFeatures.EnableFeature(Feature::TextureCompressionASTC);
+    }
 
-        // Needed for the respective WebGPU features.
-        if (!mDeviceInfo.features.depthBiasClamp) {
-            return DAWN_INTERNAL_ERROR("Vulkan depthBiasClamp feature required.");
-        }
-        if (!mDeviceInfo.features.fragmentStoresAndAtomics) {
-            return DAWN_INTERNAL_ERROR("Vulkan fragmentStoresAndAtomics feature required.");
-        }
-        if (!mDeviceInfo.features.fullDrawIndexUint32) {
-            return DAWN_INTERNAL_ERROR("Vulkan fullDrawIndexUint32 feature required.");
-        }
-        if (!mDeviceInfo.features.imageCubeArray) {
-            return DAWN_INTERNAL_ERROR("Vulkan imageCubeArray feature required.");
-        }
-        if (!mDeviceInfo.features.independentBlend) {
-            return DAWN_INTERNAL_ERROR("Vulkan independentBlend feature required.");
-        }
-        if (!mDeviceInfo.features.sampleRateShading) {
-            return DAWN_INTERNAL_ERROR("Vulkan sampleRateShading feature required.");
-        }
+    if (mDeviceInfo.features.pipelineStatisticsQuery == VK_TRUE) {
+        mSupportedFeatures.EnableFeature(Feature::PipelineStatisticsQuery);
+    }
 
-        // Initialize supported extensions
-        if (mDeviceInfo.features.textureCompressionBC == VK_TRUE) {
-            mSupportedFeatures.EnableFeature(Feature::TextureCompressionBC);
-        }
+    if (mDeviceInfo.features.depthClamp == VK_TRUE) {
+        mSupportedFeatures.EnableFeature(Feature::DepthClamping);
+    }
 
-        if (mDeviceInfo.features.textureCompressionETC2 == VK_TRUE) {
-            mSupportedFeatures.EnableFeature(Feature::TextureCompressionETC2);
-        }
+    if (mDeviceInfo.properties.limits.timestampComputeAndGraphics == VK_TRUE) {
+        mSupportedFeatures.EnableFeature(Feature::TimestampQuery);
+    }
 
-        if (mDeviceInfo.features.textureCompressionASTC_LDR == VK_TRUE) {
-            mSupportedFeatures.EnableFeature(Feature::TextureCompressionASTC);
-        }
+    if (IsDepthStencilFormatSupported(VK_FORMAT_D24_UNORM_S8_UINT)) {
+        mSupportedFeatures.EnableFeature(Feature::Depth24UnormStencil8);
+    }
 
-        if (mDeviceInfo.features.pipelineStatisticsQuery == VK_TRUE) {
-            mSupportedFeatures.EnableFeature(Feature::PipelineStatisticsQuery);
-        }
-
-        if (mDeviceInfo.features.depthClamp == VK_TRUE) {
-            mSupportedFeatures.EnableFeature(Feature::DepthClamping);
-        }
-
-        if (mDeviceInfo.properties.limits.timestampComputeAndGraphics == VK_TRUE) {
-            mSupportedFeatures.EnableFeature(Feature::TimestampQuery);
-        }
-
-        if (IsDepthStencilFormatSupported(VK_FORMAT_D24_UNORM_S8_UINT)) {
-            mSupportedFeatures.EnableFeature(Feature::Depth24UnormStencil8);
-        }
-
-        if (IsDepthStencilFormatSupported(VK_FORMAT_D32_SFLOAT_S8_UINT)) {
-            mSupportedFeatures.EnableFeature(Feature::Depth32FloatStencil8);
-        }
+    if (IsDepthStencilFormatSupported(VK_FORMAT_D32_SFLOAT_S8_UINT)) {
+        mSupportedFeatures.EnableFeature(Feature::Depth32FloatStencil8);
+    }
 
 #if defined(DAWN_USE_SYNC_FDS)
-        // TODO(chromium:1258986): Precisely enable the feature by querying the device's format
-        // features.
-        mSupportedFeatures.EnableFeature(Feature::MultiPlanarFormats);
+    // TODO(chromium:1258986): Precisely enable the feature by querying the device's format
+    // features.
+    mSupportedFeatures.EnableFeature(Feature::MultiPlanarFormats);
 #endif
 
-        return {};
-    }
+    return {};
+}
 
-    MaybeError Adapter::InitializeSupportedLimitsImpl(CombinedLimits* limits) {
-        GetDefaultLimits(&limits->v1);
-        CombinedLimits baseLimits = *limits;
+MaybeError Adapter::InitializeSupportedLimitsImpl(CombinedLimits* limits) {
+    GetDefaultLimits(&limits->v1);
+    CombinedLimits baseLimits = *limits;
 
-        const VkPhysicalDeviceLimits& vkLimits = mDeviceInfo.properties.limits;
+    const VkPhysicalDeviceLimits& vkLimits = mDeviceInfo.properties.limits;
 
 #define CHECK_AND_SET_V1_LIMIT_IMPL(vulkanName, webgpuName, compareOp, msgSegment)   \
     do {                                                                             \
@@ -194,163 +193,156 @@
 #define CHECK_AND_SET_V1_MIN_LIMIT(vulkanName, webgpuName) \
     CHECK_AND_SET_V1_LIMIT_IMPL(vulkanName, webgpuName, >, "most")
 
-        CHECK_AND_SET_V1_MAX_LIMIT(maxImageDimension1D, maxTextureDimension1D);
+    CHECK_AND_SET_V1_MAX_LIMIT(maxImageDimension1D, maxTextureDimension1D);
 
-        CHECK_AND_SET_V1_MAX_LIMIT(maxImageDimension2D, maxTextureDimension2D);
-        CHECK_AND_SET_V1_MAX_LIMIT(maxImageDimensionCube, maxTextureDimension2D);
-        CHECK_AND_SET_V1_MAX_LIMIT(maxFramebufferWidth, maxTextureDimension2D);
-        CHECK_AND_SET_V1_MAX_LIMIT(maxFramebufferHeight, maxTextureDimension2D);
-        CHECK_AND_SET_V1_MAX_LIMIT(maxViewportDimensions[0], maxTextureDimension2D);
-        CHECK_AND_SET_V1_MAX_LIMIT(maxViewportDimensions[1], maxTextureDimension2D);
-        CHECK_AND_SET_V1_MAX_LIMIT(viewportBoundsRange[1], maxTextureDimension2D);
-        limits->v1.maxTextureDimension2D = std::min({
-            static_cast<uint32_t>(vkLimits.maxImageDimension2D),
-            static_cast<uint32_t>(vkLimits.maxImageDimensionCube),
-            static_cast<uint32_t>(vkLimits.maxFramebufferWidth),
-            static_cast<uint32_t>(vkLimits.maxFramebufferHeight),
-            static_cast<uint32_t>(vkLimits.maxViewportDimensions[0]),
-            static_cast<uint32_t>(vkLimits.maxViewportDimensions[1]),
-            static_cast<uint32_t>(vkLimits.viewportBoundsRange[1]),
-        });
+    CHECK_AND_SET_V1_MAX_LIMIT(maxImageDimension2D, maxTextureDimension2D);
+    CHECK_AND_SET_V1_MAX_LIMIT(maxImageDimensionCube, maxTextureDimension2D);
+    CHECK_AND_SET_V1_MAX_LIMIT(maxFramebufferWidth, maxTextureDimension2D);
+    CHECK_AND_SET_V1_MAX_LIMIT(maxFramebufferHeight, maxTextureDimension2D);
+    CHECK_AND_SET_V1_MAX_LIMIT(maxViewportDimensions[0], maxTextureDimension2D);
+    CHECK_AND_SET_V1_MAX_LIMIT(maxViewportDimensions[1], maxTextureDimension2D);
+    CHECK_AND_SET_V1_MAX_LIMIT(viewportBoundsRange[1], maxTextureDimension2D);
+    limits->v1.maxTextureDimension2D = std::min({
+        static_cast<uint32_t>(vkLimits.maxImageDimension2D),
+        static_cast<uint32_t>(vkLimits.maxImageDimensionCube),
+        static_cast<uint32_t>(vkLimits.maxFramebufferWidth),
+        static_cast<uint32_t>(vkLimits.maxFramebufferHeight),
+        static_cast<uint32_t>(vkLimits.maxViewportDimensions[0]),
+        static_cast<uint32_t>(vkLimits.maxViewportDimensions[1]),
+        static_cast<uint32_t>(vkLimits.viewportBoundsRange[1]),
+    });
 
-        CHECK_AND_SET_V1_MAX_LIMIT(maxImageDimension3D, maxTextureDimension3D);
-        CHECK_AND_SET_V1_MAX_LIMIT(maxImageArrayLayers, maxTextureArrayLayers);
-        CHECK_AND_SET_V1_MAX_LIMIT(maxBoundDescriptorSets, maxBindGroups);
-        CHECK_AND_SET_V1_MAX_LIMIT(maxDescriptorSetUniformBuffersDynamic,
-                                   maxDynamicUniformBuffersPerPipelineLayout);
-        CHECK_AND_SET_V1_MAX_LIMIT(maxDescriptorSetStorageBuffersDynamic,
-                                   maxDynamicStorageBuffersPerPipelineLayout);
+    CHECK_AND_SET_V1_MAX_LIMIT(maxImageDimension3D, maxTextureDimension3D);
+    CHECK_AND_SET_V1_MAX_LIMIT(maxImageArrayLayers, maxTextureArrayLayers);
+    CHECK_AND_SET_V1_MAX_LIMIT(maxBoundDescriptorSets, maxBindGroups);
+    CHECK_AND_SET_V1_MAX_LIMIT(maxDescriptorSetUniformBuffersDynamic,
+                               maxDynamicUniformBuffersPerPipelineLayout);
+    CHECK_AND_SET_V1_MAX_LIMIT(maxDescriptorSetStorageBuffersDynamic,
+                               maxDynamicStorageBuffersPerPipelineLayout);
 
-        CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorSampledImages,
-                                   maxSampledTexturesPerShaderStage);
-        CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorSamplers, maxSamplersPerShaderStage);
-        CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorStorageBuffers,
-                                   maxStorageBuffersPerShaderStage);
-        CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorStorageImages,
-                                   maxStorageTexturesPerShaderStage);
-        CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorUniformBuffers,
-                                   maxUniformBuffersPerShaderStage);
-        CHECK_AND_SET_V1_MAX_LIMIT(maxUniformBufferRange, maxUniformBufferBindingSize);
-        CHECK_AND_SET_V1_MAX_LIMIT(maxStorageBufferRange, maxStorageBufferBindingSize);
+    CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorSampledImages,
+                               maxSampledTexturesPerShaderStage);
+    CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorSamplers, maxSamplersPerShaderStage);
+    CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorStorageBuffers,
+                               maxStorageBuffersPerShaderStage);
+    CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorStorageImages,
+                               maxStorageTexturesPerShaderStage);
+    CHECK_AND_SET_V1_MAX_LIMIT(maxPerStageDescriptorUniformBuffers,
+                               maxUniformBuffersPerShaderStage);
+    CHECK_AND_SET_V1_MAX_LIMIT(maxUniformBufferRange, maxUniformBufferBindingSize);
+    CHECK_AND_SET_V1_MAX_LIMIT(maxStorageBufferRange, maxStorageBufferBindingSize);
 
-        CHECK_AND_SET_V1_MIN_LIMIT(minUniformBufferOffsetAlignment,
-                                   minUniformBufferOffsetAlignment);
-        CHECK_AND_SET_V1_MIN_LIMIT(minStorageBufferOffsetAlignment,
-                                   minStorageBufferOffsetAlignment);
+    CHECK_AND_SET_V1_MIN_LIMIT(minUniformBufferOffsetAlignment, minUniformBufferOffsetAlignment);
+    CHECK_AND_SET_V1_MIN_LIMIT(minStorageBufferOffsetAlignment, minStorageBufferOffsetAlignment);
 
-        CHECK_AND_SET_V1_MAX_LIMIT(maxVertexInputBindings, maxVertexBuffers);
-        CHECK_AND_SET_V1_MAX_LIMIT(maxVertexInputAttributes, maxVertexAttributes);
+    CHECK_AND_SET_V1_MAX_LIMIT(maxVertexInputBindings, maxVertexBuffers);
+    CHECK_AND_SET_V1_MAX_LIMIT(maxVertexInputAttributes, maxVertexAttributes);
 
-        if (vkLimits.maxVertexInputBindingStride < baseLimits.v1.maxVertexBufferArrayStride ||
-            vkLimits.maxVertexInputAttributeOffset < baseLimits.v1.maxVertexBufferArrayStride - 1) {
-            return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for maxVertexBufferArrayStride");
-        }
-        limits->v1.maxVertexBufferArrayStride = std::min(
-            vkLimits.maxVertexInputBindingStride, vkLimits.maxVertexInputAttributeOffset + 1);
+    if (vkLimits.maxVertexInputBindingStride < baseLimits.v1.maxVertexBufferArrayStride ||
+        vkLimits.maxVertexInputAttributeOffset < baseLimits.v1.maxVertexBufferArrayStride - 1) {
+        return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for maxVertexBufferArrayStride");
+    }
+    limits->v1.maxVertexBufferArrayStride =
+        std::min(vkLimits.maxVertexInputBindingStride, vkLimits.maxVertexInputAttributeOffset + 1);
 
-        if (vkLimits.maxVertexOutputComponents < baseLimits.v1.maxInterStageShaderComponents ||
-            vkLimits.maxFragmentInputComponents < baseLimits.v1.maxInterStageShaderComponents) {
+    if (vkLimits.maxVertexOutputComponents < baseLimits.v1.maxInterStageShaderComponents ||
+        vkLimits.maxFragmentInputComponents < baseLimits.v1.maxInterStageShaderComponents) {
+        return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for maxInterStageShaderComponents");
+    }
+    limits->v1.maxInterStageShaderComponents =
+        std::min(vkLimits.maxVertexOutputComponents, vkLimits.maxFragmentInputComponents);
+
+    CHECK_AND_SET_V1_MAX_LIMIT(maxComputeSharedMemorySize, maxComputeWorkgroupStorageSize);
+    CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupInvocations, maxComputeInvocationsPerWorkgroup);
+    CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupSize[0], maxComputeWorkgroupSizeX);
+    CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupSize[1], maxComputeWorkgroupSizeY);
+    CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupSize[2], maxComputeWorkgroupSizeZ);
+
+    CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupCount[0], maxComputeWorkgroupsPerDimension);
+    CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupCount[1], maxComputeWorkgroupsPerDimension);
+    CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupCount[2], maxComputeWorkgroupsPerDimension);
+    limits->v1.maxComputeWorkgroupsPerDimension = std::min({
+        vkLimits.maxComputeWorkGroupCount[0],
+        vkLimits.maxComputeWorkGroupCount[1],
+        vkLimits.maxComputeWorkGroupCount[2],
+    });
+
+    if (vkLimits.maxColorAttachments < kMaxColorAttachments) {
+        return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for maxColorAttachments");
+    }
+    if (!IsSubset(VkSampleCountFlags(VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_4_BIT),
+                  vkLimits.framebufferColorSampleCounts)) {
+        return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for framebufferColorSampleCounts");
+    }
+    if (!IsSubset(VkSampleCountFlags(VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_4_BIT),
+                  vkLimits.framebufferDepthSampleCounts)) {
+        return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for framebufferDepthSampleCounts");
+    }
+
+    // Only check maxFragmentCombinedOutputResources on mobile GPUs. Desktop GPUs drivers seem
+    // to put incorrect values for this limit with things like 8 or 16 when they can do bindless
+    // storage buffers. Mesa llvmpipe driver also puts 8 here.
+    uint32_t vendorId = mDeviceInfo.properties.vendorID;
+    if (!gpu_info::IsAMD(vendorId) && !gpu_info::IsIntel(vendorId) && !gpu_info::IsMesa(vendorId) &&
+        !gpu_info::IsNvidia(vendorId)) {
+        if (vkLimits.maxFragmentCombinedOutputResources <
+            kMaxColorAttachments + baseLimits.v1.maxStorageTexturesPerShaderStage +
+                baseLimits.v1.maxStorageBuffersPerShaderStage) {
             return DAWN_INTERNAL_ERROR(
-                "Insufficient Vulkan limits for maxInterStageShaderComponents");
-        }
-        limits->v1.maxInterStageShaderComponents =
-            std::min(vkLimits.maxVertexOutputComponents, vkLimits.maxFragmentInputComponents);
-
-        CHECK_AND_SET_V1_MAX_LIMIT(maxComputeSharedMemorySize, maxComputeWorkgroupStorageSize);
-        CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupInvocations,
-                                   maxComputeInvocationsPerWorkgroup);
-        CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupSize[0], maxComputeWorkgroupSizeX);
-        CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupSize[1], maxComputeWorkgroupSizeY);
-        CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupSize[2], maxComputeWorkgroupSizeZ);
-
-        CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupCount[0], maxComputeWorkgroupsPerDimension);
-        CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupCount[1], maxComputeWorkgroupsPerDimension);
-        CHECK_AND_SET_V1_MAX_LIMIT(maxComputeWorkGroupCount[2], maxComputeWorkgroupsPerDimension);
-        limits->v1.maxComputeWorkgroupsPerDimension = std::min({
-            vkLimits.maxComputeWorkGroupCount[0],
-            vkLimits.maxComputeWorkGroupCount[1],
-            vkLimits.maxComputeWorkGroupCount[2],
-        });
-
-        if (vkLimits.maxColorAttachments < kMaxColorAttachments) {
-            return DAWN_INTERNAL_ERROR("Insufficient Vulkan limits for maxColorAttachments");
-        }
-        if (!IsSubset(VkSampleCountFlags(VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_4_BIT),
-                      vkLimits.framebufferColorSampleCounts)) {
-            return DAWN_INTERNAL_ERROR(
-                "Insufficient Vulkan limits for framebufferColorSampleCounts");
-        }
-        if (!IsSubset(VkSampleCountFlags(VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_4_BIT),
-                      vkLimits.framebufferDepthSampleCounts)) {
-            return DAWN_INTERNAL_ERROR(
-                "Insufficient Vulkan limits for framebufferDepthSampleCounts");
+                "Insufficient Vulkan maxFragmentCombinedOutputResources limit");
         }
 
-        // Only check maxFragmentCombinedOutputResources on mobile GPUs. Desktop GPUs drivers seem
-        // to put incorrect values for this limit with things like 8 or 16 when they can do bindless
-        // storage buffers. Mesa llvmpipe driver also puts 8 here.
-        uint32_t vendorId = mDeviceInfo.properties.vendorID;
-        if (!gpu_info::IsAMD(vendorId) && !gpu_info::IsIntel(vendorId) &&
-            !gpu_info::IsMesa(vendorId) && !gpu_info::IsNvidia(vendorId)) {
-            if (vkLimits.maxFragmentCombinedOutputResources <
-                kMaxColorAttachments + baseLimits.v1.maxStorageTexturesPerShaderStage +
-                    baseLimits.v1.maxStorageBuffersPerShaderStage) {
-                return DAWN_INTERNAL_ERROR(
-                    "Insufficient Vulkan maxFragmentCombinedOutputResources limit");
+        uint32_t maxFragmentCombinedOutputResources = kMaxColorAttachments +
+                                                      limits->v1.maxStorageTexturesPerShaderStage +
+                                                      limits->v1.maxStorageBuffersPerShaderStage;
+
+        if (maxFragmentCombinedOutputResources > vkLimits.maxFragmentCombinedOutputResources) {
+            // WebGPU's maxFragmentCombinedOutputResources exceeds the Vulkan limit.
+            // Decrease |maxStorageTexturesPerShaderStage| and |maxStorageBuffersPerShaderStage|
+            // to fit within the Vulkan limit.
+            uint32_t countOverLimit =
+                maxFragmentCombinedOutputResources - vkLimits.maxFragmentCombinedOutputResources;
+
+            uint32_t maxStorageTexturesOverBase = limits->v1.maxStorageTexturesPerShaderStage -
+                                                  baseLimits.v1.maxStorageTexturesPerShaderStage;
+            uint32_t maxStorageBuffersOverBase = limits->v1.maxStorageBuffersPerShaderStage -
+                                                 baseLimits.v1.maxStorageBuffersPerShaderStage;
+
+            // Reduce the number of resources by half the overage count, but clamp to
+            // to ensure we don't go below the base limits.
+            uint32_t numFewerStorageTextures =
+                std::min(countOverLimit / 2, maxStorageTexturesOverBase);
+            uint32_t numFewerStorageBuffers =
+                std::min((countOverLimit + 1) / 2, maxStorageBuffersOverBase);
+
+            if (numFewerStorageTextures == maxStorageTexturesOverBase) {
+                // If |numFewerStorageTextures| was clamped, subtract the remaining
+                // from the storage buffers.
+                numFewerStorageBuffers = countOverLimit - numFewerStorageTextures;
+                ASSERT(numFewerStorageBuffers <= maxStorageBuffersOverBase);
+            } else if (numFewerStorageBuffers == maxStorageBuffersOverBase) {
+                // If |numFewerStorageBuffers| was clamped, subtract the remaining
+                // from the storage textures.
+                numFewerStorageTextures = countOverLimit - numFewerStorageBuffers;
+                ASSERT(numFewerStorageTextures <= maxStorageTexturesOverBase);
             }
-
-            uint32_t maxFragmentCombinedOutputResources =
-                kMaxColorAttachments + limits->v1.maxStorageTexturesPerShaderStage +
-                limits->v1.maxStorageBuffersPerShaderStage;
-
-            if (maxFragmentCombinedOutputResources > vkLimits.maxFragmentCombinedOutputResources) {
-                // WebGPU's maxFragmentCombinedOutputResources exceeds the Vulkan limit.
-                // Decrease |maxStorageTexturesPerShaderStage| and |maxStorageBuffersPerShaderStage|
-                // to fit within the Vulkan limit.
-                uint32_t countOverLimit = maxFragmentCombinedOutputResources -
-                                          vkLimits.maxFragmentCombinedOutputResources;
-
-                uint32_t maxStorageTexturesOverBase =
-                    limits->v1.maxStorageTexturesPerShaderStage -
-                    baseLimits.v1.maxStorageTexturesPerShaderStage;
-                uint32_t maxStorageBuffersOverBase = limits->v1.maxStorageBuffersPerShaderStage -
-                                                     baseLimits.v1.maxStorageBuffersPerShaderStage;
-
-                // Reduce the number of resources by half the overage count, but clamp to
-                // to ensure we don't go below the base limits.
-                uint32_t numFewerStorageTextures =
-                    std::min(countOverLimit / 2, maxStorageTexturesOverBase);
-                uint32_t numFewerStorageBuffers =
-                    std::min((countOverLimit + 1) / 2, maxStorageBuffersOverBase);
-
-                if (numFewerStorageTextures == maxStorageTexturesOverBase) {
-                    // If |numFewerStorageTextures| was clamped, subtract the remaining
-                    // from the storage buffers.
-                    numFewerStorageBuffers = countOverLimit - numFewerStorageTextures;
-                    ASSERT(numFewerStorageBuffers <= maxStorageBuffersOverBase);
-                } else if (numFewerStorageBuffers == maxStorageBuffersOverBase) {
-                    // If |numFewerStorageBuffers| was clamped, subtract the remaining
-                    // from the storage textures.
-                    numFewerStorageTextures = countOverLimit - numFewerStorageBuffers;
-                    ASSERT(numFewerStorageTextures <= maxStorageTexturesOverBase);
-                }
-                limits->v1.maxStorageTexturesPerShaderStage -= numFewerStorageTextures;
-                limits->v1.maxStorageBuffersPerShaderStage -= numFewerStorageBuffers;
-            }
+            limits->v1.maxStorageTexturesPerShaderStage -= numFewerStorageTextures;
+            limits->v1.maxStorageBuffersPerShaderStage -= numFewerStorageBuffers;
         }
-
-        return {};
     }
 
-    bool Adapter::SupportsExternalImages() const {
-        // Via dawn::native::vulkan::WrapVulkanImage
-        return external_memory::Service::CheckSupport(mDeviceInfo) &&
-               external_semaphore::Service::CheckSupport(mDeviceInfo, mPhysicalDevice,
-                                                         mVulkanInstance->GetFunctions());
-    }
+    return {};
+}
 
-    ResultOrError<Ref<DeviceBase>> Adapter::CreateDeviceImpl(const DeviceDescriptor* descriptor) {
-        return Device::Create(this, descriptor);
-    }
+bool Adapter::SupportsExternalImages() const {
+    // Via dawn::native::vulkan::WrapVulkanImage
+    return external_memory::Service::CheckSupport(mDeviceInfo) &&
+           external_semaphore::Service::CheckSupport(mDeviceInfo, mPhysicalDevice,
+                                                     mVulkanInstance->GetFunctions());
+}
+
+ResultOrError<Ref<DeviceBase>> Adapter::CreateDeviceImpl(const DeviceDescriptor* descriptor) {
+    return Device::Create(this, descriptor);
+}
 
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/AdapterVk.h b/src/dawn/native/vulkan/AdapterVk.h
index 7616cda..2d9ce45 100644
--- a/src/dawn/native/vulkan/AdapterVk.h
+++ b/src/dawn/native/vulkan/AdapterVk.h
@@ -23,36 +23,35 @@
 
 namespace dawn::native::vulkan {
 
-    class VulkanInstance;
+class VulkanInstance;
 
-    class Adapter : public AdapterBase {
-      public:
-        Adapter(InstanceBase* instance,
-                VulkanInstance* vulkanInstance,
-                VkPhysicalDevice physicalDevice);
-        ~Adapter() override = default;
+class Adapter : public AdapterBase {
+  public:
+    Adapter(InstanceBase* instance,
+            VulkanInstance* vulkanInstance,
+            VkPhysicalDevice physicalDevice);
+    ~Adapter() override = default;
 
-        // AdapterBase Implementation
-        bool SupportsExternalImages() const override;
+    // AdapterBase Implementation
+    bool SupportsExternalImages() const override;
 
-        const VulkanDeviceInfo& GetDeviceInfo() const;
-        VkPhysicalDevice GetPhysicalDevice() const;
-        VulkanInstance* GetVulkanInstance() const;
+    const VulkanDeviceInfo& GetDeviceInfo() const;
+    VkPhysicalDevice GetPhysicalDevice() const;
+    VulkanInstance* GetVulkanInstance() const;
 
-        bool IsDepthStencilFormatSupported(VkFormat format);
+    bool IsDepthStencilFormatSupported(VkFormat format);
 
-      private:
-        MaybeError InitializeImpl() override;
-        MaybeError InitializeSupportedFeaturesImpl() override;
-        MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override;
+  private:
+    MaybeError InitializeImpl() override;
+    MaybeError InitializeSupportedFeaturesImpl() override;
+    MaybeError InitializeSupportedLimitsImpl(CombinedLimits* limits) override;
 
-        ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(
-            const DeviceDescriptor* descriptor) override;
+    ResultOrError<Ref<DeviceBase>> CreateDeviceImpl(const DeviceDescriptor* descriptor) override;
 
-        VkPhysicalDevice mPhysicalDevice;
-        Ref<VulkanInstance> mVulkanInstance;
-        VulkanDeviceInfo mDeviceInfo = {};
-    };
+    VkPhysicalDevice mPhysicalDevice;
+    Ref<VulkanInstance> mVulkanInstance;
+    VulkanDeviceInfo mDeviceInfo = {};
+};
 
 }  // namespace dawn::native::vulkan
 
diff --git a/src/dawn/native/vulkan/BackendVk.cpp b/src/dawn/native/vulkan/BackendVk.cpp
index 32bcd7e..1f34bcb 100644
--- a/src/dawn/native/vulkan/BackendVk.cpp
+++ b/src/dawn/native/vulkan/BackendVk.cpp
@@ -28,23 +28,23 @@
 
 // TODO(crbug.com/dawn/283): Link against the Vulkan Loader and remove this.
 #if defined(DAWN_ENABLE_SWIFTSHADER)
-#    if defined(DAWN_PLATFORM_LINUX) || defined(DAWN_PLATFORM_FUSCHIA)
+#if defined(DAWN_PLATFORM_LINUX) || defined(DAWN_PLATFORM_FUSCHIA)
 constexpr char kSwiftshaderLibName[] = "libvk_swiftshader.so";
-#    elif defined(DAWN_PLATFORM_WINDOWS)
+#elif defined(DAWN_PLATFORM_WINDOWS)
 constexpr char kSwiftshaderLibName[] = "vk_swiftshader.dll";
-#    elif defined(DAWN_PLATFORM_MACOS)
+#elif defined(DAWN_PLATFORM_MACOS)
 constexpr char kSwiftshaderLibName[] = "libvk_swiftshader.dylib";
-#    else
-#        error "Unimplemented Swiftshader Vulkan backend platform"
-#    endif
+#else
+#error "Unimplemented Swiftshader Vulkan backend platform"
+#endif
 #endif
 
 #if defined(DAWN_PLATFORM_LINUX)
-#    if defined(DAWN_PLATFORM_ANDROID)
+#if defined(DAWN_PLATFORM_ANDROID)
 constexpr char kVulkanLibName[] = "libvulkan.so";
-#    else
+#else
 constexpr char kVulkanLibName[] = "libvulkan.so.1";
-#    endif
+#endif
 #elif defined(DAWN_PLATFORM_WINDOWS)
 constexpr char kVulkanLibName[] = "vulkan-1.dll";
 #elif defined(DAWN_PLATFORM_MACOS)
@@ -52,7 +52,7 @@
 #elif defined(DAWN_PLATFORM_FUCHSIA)
 constexpr char kVulkanLibName[] = "libvulkan.so";
 #else
-#    error "Unimplemented Vulkan backend platform"
+#error "Unimplemented Vulkan backend platform"
 #endif
 
 struct SkippedMessage {
@@ -84,364 +84,359 @@
 
 namespace dawn::native::vulkan {
 
-    namespace {
+namespace {
 
-        static constexpr ICD kICDs[] = {
-            ICD::None,
+static constexpr ICD kICDs[] = {
+    ICD::None,
 #if defined(DAWN_ENABLE_SWIFTSHADER)
-            ICD::SwiftShader,
+    ICD::SwiftShader,
 #endif  // defined(DAWN_ENABLE_SWIFTSHADER)
-        };
+};
 
-        // Suppress validation errors that are known. Returns false in that case.
-        bool ShouldReportDebugMessage(const char* messageId, const char* message) {
-            for (const SkippedMessage& msg : kSkippedMessages) {
-                if (strstr(messageId, msg.messageId) != nullptr &&
-                    strstr(message, msg.messageContents) != nullptr) {
-                    return false;
-                }
-            }
-            return true;
-        }
-
-        VKAPI_ATTR VkBool32 VKAPI_CALL
-        OnDebugUtilsCallback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
-                             VkDebugUtilsMessageTypeFlagsEXT /* messageTypes */,
-                             const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData,
-                             void* /* pUserData */) {
-            if (ShouldReportDebugMessage(pCallbackData->pMessageIdName, pCallbackData->pMessage)) {
-                dawn::WarningLog() << pCallbackData->pMessage;
-                ASSERT((messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) == 0);
-            }
-            return VK_FALSE;
-        }
-
-        // A debug callback specifically for instance creation so that we don't fire an ASSERT when
-        // the instance fails creation in an expected manner (for example the system not having
-        // Vulkan drivers).
-        VKAPI_ATTR VkBool32 VKAPI_CALL OnInstanceCreationDebugUtilsCallback(
-            VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
-            VkDebugUtilsMessageTypeFlagsEXT /* messageTypes */,
-            const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData,
-            void* /* pUserData */) {
-            dawn::WarningLog() << pCallbackData->pMessage;
-            return VK_FALSE;
-        }
-
-    }  // anonymous namespace
-
-    VulkanInstance::VulkanInstance() = default;
-
-    VulkanInstance::~VulkanInstance() {
-        if (mDebugUtilsMessenger != VK_NULL_HANDLE) {
-            mFunctions.DestroyDebugUtilsMessengerEXT(mInstance, mDebugUtilsMessenger, nullptr);
-            mDebugUtilsMessenger = VK_NULL_HANDLE;
-        }
-
-        // VkPhysicalDevices are destroyed when the VkInstance is destroyed
-        if (mInstance != VK_NULL_HANDLE) {
-            mFunctions.DestroyInstance(mInstance, nullptr);
-            mInstance = VK_NULL_HANDLE;
+// Suppress validation errors that are known. Returns false in that case.
+bool ShouldReportDebugMessage(const char* messageId, const char* message) {
+    for (const SkippedMessage& msg : kSkippedMessages) {
+        if (strstr(messageId, msg.messageId) != nullptr &&
+            strstr(message, msg.messageContents) != nullptr) {
+            return false;
         }
     }
+    return true;
+}
 
-    const VulkanFunctions& VulkanInstance::GetFunctions() const {
-        return mFunctions;
+VKAPI_ATTR VkBool32 VKAPI_CALL
+OnDebugUtilsCallback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
+                     VkDebugUtilsMessageTypeFlagsEXT /* messageTypes */,
+                     const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData,
+                     void* /* pUserData */) {
+    if (ShouldReportDebugMessage(pCallbackData->pMessageIdName, pCallbackData->pMessage)) {
+        dawn::WarningLog() << pCallbackData->pMessage;
+        ASSERT((messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) == 0);
+    }
+    return VK_FALSE;
+}
+
+// A debug callback specifically for instance creation so that we don't fire an ASSERT when
+// the instance fails creation in an expected manner (for example the system not having
+// Vulkan drivers).
+VKAPI_ATTR VkBool32 VKAPI_CALL
+OnInstanceCreationDebugUtilsCallback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
+                                     VkDebugUtilsMessageTypeFlagsEXT /* messageTypes */,
+                                     const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData,
+                                     void* /* pUserData */) {
+    dawn::WarningLog() << pCallbackData->pMessage;
+    return VK_FALSE;
+}
+
+}  // anonymous namespace
+
+VulkanInstance::VulkanInstance() = default;
+
+VulkanInstance::~VulkanInstance() {
+    if (mDebugUtilsMessenger != VK_NULL_HANDLE) {
+        mFunctions.DestroyDebugUtilsMessengerEXT(mInstance, mDebugUtilsMessenger, nullptr);
+        mDebugUtilsMessenger = VK_NULL_HANDLE;
     }
 
-    VkInstance VulkanInstance::GetVkInstance() const {
-        return mInstance;
+    // VkPhysicalDevices are destroyed when the VkInstance is destroyed
+    if (mInstance != VK_NULL_HANDLE) {
+        mFunctions.DestroyInstance(mInstance, nullptr);
+        mInstance = VK_NULL_HANDLE;
     }
+}
 
-    const VulkanGlobalInfo& VulkanInstance::GetGlobalInfo() const {
-        return mGlobalInfo;
-    }
+const VulkanFunctions& VulkanInstance::GetFunctions() const {
+    return mFunctions;
+}
 
-    const std::vector<VkPhysicalDevice>& VulkanInstance::GetPhysicalDevices() const {
-        return mPhysicalDevices;
-    }
+VkInstance VulkanInstance::GetVkInstance() const {
+    return mInstance;
+}
 
-    // static
-    ResultOrError<Ref<VulkanInstance>> VulkanInstance::Create(const InstanceBase* instance,
-                                                              ICD icd) {
-        Ref<VulkanInstance> vulkanInstance = AcquireRef(new VulkanInstance());
-        DAWN_TRY(vulkanInstance->Initialize(instance, icd));
-        return std::move(vulkanInstance);
-    }
+const VulkanGlobalInfo& VulkanInstance::GetGlobalInfo() const {
+    return mGlobalInfo;
+}
 
-    MaybeError VulkanInstance::Initialize(const InstanceBase* instance, ICD icd) {
-        // These environment variables need only be set while loading procs and gathering device
-        // info.
-        ScopedEnvironmentVar vkICDFilenames;
-        ScopedEnvironmentVar vkLayerPath;
+const std::vector<VkPhysicalDevice>& VulkanInstance::GetPhysicalDevices() const {
+    return mPhysicalDevices;
+}
 
-        const std::vector<std::string>& searchPaths = instance->GetRuntimeSearchPaths();
+// static
+ResultOrError<Ref<VulkanInstance>> VulkanInstance::Create(const InstanceBase* instance, ICD icd) {
+    Ref<VulkanInstance> vulkanInstance = AcquireRef(new VulkanInstance());
+    DAWN_TRY(vulkanInstance->Initialize(instance, icd));
+    return std::move(vulkanInstance);
+}
 
-        auto CommaSeparatedResolvedSearchPaths = [&](const char* name) {
-            std::string list;
-            bool first = true;
-            for (const std::string& path : searchPaths) {
-                if (!first) {
-                    list += ", ";
-                }
-                first = false;
-                list += (path + name);
+MaybeError VulkanInstance::Initialize(const InstanceBase* instance, ICD icd) {
+    // These environment variables need only be set while loading procs and gathering device
+    // info.
+    ScopedEnvironmentVar vkICDFilenames;
+    ScopedEnvironmentVar vkLayerPath;
+
+    const std::vector<std::string>& searchPaths = instance->GetRuntimeSearchPaths();
+
+    auto CommaSeparatedResolvedSearchPaths = [&](const char* name) {
+        std::string list;
+        bool first = true;
+        for (const std::string& path : searchPaths) {
+            if (!first) {
+                list += ", ";
             }
-            return list;
-        };
+            first = false;
+            list += (path + name);
+        }
+        return list;
+    };
 
-        auto LoadVulkan = [&](const char* libName) -> MaybeError {
-            for (const std::string& path : searchPaths) {
-                std::string resolvedPath = path + libName;
-                if (mVulkanLib.Open(resolvedPath)) {
-                    return {};
-                }
+    auto LoadVulkan = [&](const char* libName) -> MaybeError {
+        for (const std::string& path : searchPaths) {
+            std::string resolvedPath = path + libName;
+            if (mVulkanLib.Open(resolvedPath)) {
+                return {};
             }
-            return DAWN_FORMAT_INTERNAL_ERROR("Couldn't load Vulkan. Searched %s.",
-                                              CommaSeparatedResolvedSearchPaths(libName));
-        };
+        }
+        return DAWN_FORMAT_INTERNAL_ERROR("Couldn't load Vulkan. Searched %s.",
+                                          CommaSeparatedResolvedSearchPaths(libName));
+    };
 
-        switch (icd) {
-            case ICD::None: {
-                DAWN_TRY(LoadVulkan(kVulkanLibName));
-                // Succesfully loaded driver; break.
-                break;
-            }
-            case ICD::SwiftShader: {
+    switch (icd) {
+        case ICD::None: {
+            DAWN_TRY(LoadVulkan(kVulkanLibName));
+            // Succesfully loaded driver; break.
+            break;
+        }
+        case ICD::SwiftShader: {
 #if defined(DAWN_ENABLE_SWIFTSHADER)
-                DAWN_TRY(LoadVulkan(kSwiftshaderLibName));
-                break;
+            DAWN_TRY(LoadVulkan(kSwiftshaderLibName));
+            break;
 #endif  // defined(DAWN_ENABLE_SWIFTSHADER)
         // ICD::SwiftShader should not be passed if SwiftShader is not enabled.
-                UNREACHABLE();
-            }
+            UNREACHABLE();
         }
+    }
 
-        if (instance->IsBackendValidationEnabled()) {
+    if (instance->IsBackendValidationEnabled()) {
 #if defined(DAWN_ENABLE_VULKAN_VALIDATION_LAYERS)
-            auto execDir = GetExecutableDirectory();
-            std::string vkDataDir = execDir.value_or("") + DAWN_VK_DATA_DIR;
-            if (!vkLayerPath.Set("VK_LAYER_PATH", vkDataDir.c_str())) {
-                return DAWN_INTERNAL_ERROR("Couldn't set VK_LAYER_PATH");
-            }
+        auto execDir = GetExecutableDirectory();
+        std::string vkDataDir = execDir.value_or("") + DAWN_VK_DATA_DIR;
+        if (!vkLayerPath.Set("VK_LAYER_PATH", vkDataDir.c_str())) {
+            return DAWN_INTERNAL_ERROR("Couldn't set VK_LAYER_PATH");
+        }
 #else
-            dawn::WarningLog() << "Backend validation enabled but Dawn was not built with "
-                                  "DAWN_ENABLE_VULKAN_VALIDATION_LAYERS.";
+        dawn::WarningLog() << "Backend validation enabled but Dawn was not built with "
+                              "DAWN_ENABLE_VULKAN_VALIDATION_LAYERS.";
 #endif
+    }
+
+    DAWN_TRY(mFunctions.LoadGlobalProcs(mVulkanLib));
+
+    DAWN_TRY_ASSIGN(mGlobalInfo, GatherGlobalInfo(mFunctions));
+
+    VulkanGlobalKnobs usedGlobalKnobs = {};
+    DAWN_TRY_ASSIGN(usedGlobalKnobs, CreateVkInstance(instance));
+    *static_cast<VulkanGlobalKnobs*>(&mGlobalInfo) = usedGlobalKnobs;
+
+    DAWN_TRY(mFunctions.LoadInstanceProcs(mInstance, mGlobalInfo));
+
+    if (usedGlobalKnobs.HasExt(InstanceExt::DebugUtils)) {
+        DAWN_TRY(RegisterDebugUtils());
+    }
+
+    DAWN_TRY_ASSIGN(mPhysicalDevices, GatherPhysicalDevices(mInstance, mFunctions));
+
+    return {};
+}
+
+ResultOrError<VulkanGlobalKnobs> VulkanInstance::CreateVkInstance(const InstanceBase* instance) {
+    VulkanGlobalKnobs usedKnobs = {};
+    std::vector<const char*> layerNames;
+    InstanceExtSet extensionsToRequest = mGlobalInfo.extensions;
+
+    auto UseLayerIfAvailable = [&](VulkanLayer layer) {
+        if (mGlobalInfo.layers[layer]) {
+            layerNames.push_back(GetVulkanLayerInfo(layer).name);
+            usedKnobs.layers.set(layer, true);
+            extensionsToRequest |= mGlobalInfo.layerExtensions[layer];
         }
+    };
 
-        DAWN_TRY(mFunctions.LoadGlobalProcs(mVulkanLib));
+    // vktrace works by instering a layer, but we hide it behind a macro because the vktrace
+    // layer crashes when used without vktrace server started. See this vktrace issue:
+    // https://github.com/LunarG/VulkanTools/issues/254
+    // Also it is good to put it in first position so that it doesn't see Vulkan calls inserted
+    // by other layers.
+#if defined(DAWN_USE_VKTRACE)
+    UseLayerIfAvailable(VulkanLayer::LunargVkTrace);
+#endif
+    // RenderDoc installs a layer at the system level for its capture but we don't want to use
+    // it unless we are debugging in RenderDoc so we hide it behind a macro.
+#if defined(DAWN_USE_RENDERDOC)
+    UseLayerIfAvailable(VulkanLayer::RenderDocCapture);
+#endif
 
-        DAWN_TRY_ASSIGN(mGlobalInfo, GatherGlobalInfo(mFunctions));
+    if (instance->IsBackendValidationEnabled()) {
+        UseLayerIfAvailable(VulkanLayer::Validation);
+    }
 
-        VulkanGlobalKnobs usedGlobalKnobs = {};
-        DAWN_TRY_ASSIGN(usedGlobalKnobs, CreateVkInstance(instance));
-        *static_cast<VulkanGlobalKnobs*>(&mGlobalInfo) = usedGlobalKnobs;
+    // Always use the Fuchsia swapchain layer if available.
+    UseLayerIfAvailable(VulkanLayer::FuchsiaImagePipeSwapchain);
 
-        DAWN_TRY(mFunctions.LoadInstanceProcs(mInstance, mGlobalInfo));
+    // Available and known instance extensions default to being requested, but some special
+    // cases are removed.
+    usedKnobs.extensions = extensionsToRequest;
 
-        if (usedGlobalKnobs.HasExt(InstanceExt::DebugUtils)) {
-            DAWN_TRY(RegisterDebugUtils());
+    std::vector<const char*> extensionNames;
+    for (InstanceExt ext : IterateBitSet(extensionsToRequest)) {
+        const InstanceExtInfo& info = GetInstanceExtInfo(ext);
+
+        if (info.versionPromoted > mGlobalInfo.apiVersion) {
+            extensionNames.push_back(info.name);
         }
+    }
 
-        DAWN_TRY_ASSIGN(mPhysicalDevices, GatherPhysicalDevices(mInstance, mFunctions));
+    VkApplicationInfo appInfo;
+    appInfo.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
+    appInfo.pNext = nullptr;
+    appInfo.pApplicationName = nullptr;
+    appInfo.applicationVersion = 0;
+    appInfo.pEngineName = nullptr;
+    appInfo.engineVersion = 0;
+    // Vulkan 1.0 implementations were required to return VK_ERROR_INCOMPATIBLE_DRIVER if
+    // apiVersion was larger than 1.0. Meanwhile, as long as the instance supports at least
+    // Vulkan 1.1, an application can use different versions of Vulkan with an instance than
+    // it does with a device or physical device. So we should set apiVersion to Vulkan 1.0
+    // if the instance only supports Vulkan 1.0. Otherwise we set apiVersion to Vulkan 1.2,
+    // treat 1.2 as the highest API version dawn targets.
+    if (mGlobalInfo.apiVersion == VK_MAKE_VERSION(1, 0, 0)) {
+        appInfo.apiVersion = VK_MAKE_VERSION(1, 0, 0);
+    } else {
+        appInfo.apiVersion = VK_MAKE_VERSION(1, 2, 0);
+    }
 
+    VkInstanceCreateInfo createInfo;
+    createInfo.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
+    createInfo.pNext = nullptr;
+    createInfo.flags = 0;
+    createInfo.pApplicationInfo = &appInfo;
+    createInfo.enabledLayerCount = static_cast<uint32_t>(layerNames.size());
+    createInfo.ppEnabledLayerNames = layerNames.data();
+    createInfo.enabledExtensionCount = static_cast<uint32_t>(extensionNames.size());
+    createInfo.ppEnabledExtensionNames = extensionNames.data();
+
+    PNextChainBuilder createInfoChain(&createInfo);
+
+    // Register the debug callback for instance creation so we receive message for any errors
+    // (validation or other).
+    VkDebugUtilsMessengerCreateInfoEXT utilsMessengerCreateInfo;
+    if (usedKnobs.HasExt(InstanceExt::DebugUtils)) {
+        utilsMessengerCreateInfo.flags = 0;
+        utilsMessengerCreateInfo.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT |
+                                                   VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT;
+        utilsMessengerCreateInfo.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
+                                               VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT;
+        utilsMessengerCreateInfo.pfnUserCallback = OnInstanceCreationDebugUtilsCallback;
+        utilsMessengerCreateInfo.pUserData = nullptr;
+
+        createInfoChain.Add(&utilsMessengerCreateInfo,
+                            VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT);
+    }
+
+    // Try to turn on synchronization validation if the instance was created with backend
+    // validation enabled.
+    VkValidationFeaturesEXT validationFeatures;
+    VkValidationFeatureEnableEXT kEnableSynchronizationValidation =
+        VK_VALIDATION_FEATURE_ENABLE_SYNCHRONIZATION_VALIDATION_EXT;
+    if (instance->IsBackendValidationEnabled() &&
+        usedKnobs.HasExt(InstanceExt::ValidationFeatures)) {
+        validationFeatures.enabledValidationFeatureCount = 1;
+        validationFeatures.pEnabledValidationFeatures = &kEnableSynchronizationValidation;
+        validationFeatures.disabledValidationFeatureCount = 0;
+        validationFeatures.pDisabledValidationFeatures = nullptr;
+
+        createInfoChain.Add(&validationFeatures, VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT);
+    }
+
+    DAWN_TRY(CheckVkSuccess(mFunctions.CreateInstance(&createInfo, nullptr, &mInstance),
+                            "vkCreateInstance"));
+
+    return usedKnobs;
+}
+
+MaybeError VulkanInstance::RegisterDebugUtils() {
+    VkDebugUtilsMessengerCreateInfoEXT createInfo;
+    createInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT;
+    createInfo.pNext = nullptr;
+    createInfo.flags = 0;
+    createInfo.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT |
+                                 VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT;
+    createInfo.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
+                             VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT;
+    createInfo.pfnUserCallback = OnDebugUtilsCallback;
+    createInfo.pUserData = nullptr;
+
+    return CheckVkSuccess(mFunctions.CreateDebugUtilsMessengerEXT(mInstance, &createInfo, nullptr,
+                                                                  &*mDebugUtilsMessenger),
+                          "vkCreateDebugUtilsMessengerEXT");
+}
+
+Backend::Backend(InstanceBase* instance) : BackendConnection(instance, wgpu::BackendType::Vulkan) {}
+
+Backend::~Backend() = default;
+
+std::vector<Ref<AdapterBase>> Backend::DiscoverDefaultAdapters() {
+    AdapterDiscoveryOptions options;
+    auto result = DiscoverAdapters(&options);
+    if (result.IsError()) {
+        GetInstance()->ConsumedError(result.AcquireError());
         return {};
     }
+    return result.AcquireSuccess();
+}
 
-    ResultOrError<VulkanGlobalKnobs> VulkanInstance::CreateVkInstance(
-        const InstanceBase* instance) {
-        VulkanGlobalKnobs usedKnobs = {};
-        std::vector<const char*> layerNames;
-        InstanceExtSet extensionsToRequest = mGlobalInfo.extensions;
+ResultOrError<std::vector<Ref<AdapterBase>>> Backend::DiscoverAdapters(
+    const AdapterDiscoveryOptionsBase* optionsBase) {
+    ASSERT(optionsBase->backendType == WGPUBackendType_Vulkan);
 
-        auto UseLayerIfAvailable = [&](VulkanLayer layer) {
-            if (mGlobalInfo.layers[layer]) {
-                layerNames.push_back(GetVulkanLayerInfo(layer).name);
-                usedKnobs.layers.set(layer, true);
-                extensionsToRequest |= mGlobalInfo.layerExtensions[layer];
-            }
-        };
+    const AdapterDiscoveryOptions* options =
+        static_cast<const AdapterDiscoveryOptions*>(optionsBase);
 
-        // vktrace works by instering a layer, but we hide it behind a macro because the vktrace
-        // layer crashes when used without vktrace server started. See this vktrace issue:
-        // https://github.com/LunarG/VulkanTools/issues/254
-        // Also it is good to put it in first position so that it doesn't see Vulkan calls inserted
-        // by other layers.
-#if defined(DAWN_USE_VKTRACE)
-        UseLayerIfAvailable(VulkanLayer::LunargVkTrace);
-#endif
-        // RenderDoc installs a layer at the system level for its capture but we don't want to use
-        // it unless we are debugging in RenderDoc so we hide it behind a macro.
-#if defined(DAWN_USE_RENDERDOC)
-        UseLayerIfAvailable(VulkanLayer::RenderDocCapture);
-#endif
+    std::vector<Ref<AdapterBase>> adapters;
 
-        if (instance->IsBackendValidationEnabled()) {
-            UseLayerIfAvailable(VulkanLayer::Validation);
-        }
-
-        // Always use the Fuchsia swapchain layer if available.
-        UseLayerIfAvailable(VulkanLayer::FuchsiaImagePipeSwapchain);
-
-        // Available and known instance extensions default to being requested, but some special
-        // cases are removed.
-        usedKnobs.extensions = extensionsToRequest;
-
-        std::vector<const char*> extensionNames;
-        for (InstanceExt ext : IterateBitSet(extensionsToRequest)) {
-            const InstanceExtInfo& info = GetInstanceExtInfo(ext);
-
-            if (info.versionPromoted > mGlobalInfo.apiVersion) {
-                extensionNames.push_back(info.name);
-            }
-        }
-
-        VkApplicationInfo appInfo;
-        appInfo.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
-        appInfo.pNext = nullptr;
-        appInfo.pApplicationName = nullptr;
-        appInfo.applicationVersion = 0;
-        appInfo.pEngineName = nullptr;
-        appInfo.engineVersion = 0;
-        // Vulkan 1.0 implementations were required to return VK_ERROR_INCOMPATIBLE_DRIVER if
-        // apiVersion was larger than 1.0. Meanwhile, as long as the instance supports at least
-        // Vulkan 1.1, an application can use different versions of Vulkan with an instance than
-        // it does with a device or physical device. So we should set apiVersion to Vulkan 1.0
-        // if the instance only supports Vulkan 1.0. Otherwise we set apiVersion to Vulkan 1.2,
-        // treat 1.2 as the highest API version dawn targets.
-        if (mGlobalInfo.apiVersion == VK_MAKE_VERSION(1, 0, 0)) {
-            appInfo.apiVersion = VK_MAKE_VERSION(1, 0, 0);
-        } else {
-            appInfo.apiVersion = VK_MAKE_VERSION(1, 2, 0);
-        }
-
-        VkInstanceCreateInfo createInfo;
-        createInfo.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
-        createInfo.pNext = nullptr;
-        createInfo.flags = 0;
-        createInfo.pApplicationInfo = &appInfo;
-        createInfo.enabledLayerCount = static_cast<uint32_t>(layerNames.size());
-        createInfo.ppEnabledLayerNames = layerNames.data();
-        createInfo.enabledExtensionCount = static_cast<uint32_t>(extensionNames.size());
-        createInfo.ppEnabledExtensionNames = extensionNames.data();
-
-        PNextChainBuilder createInfoChain(&createInfo);
-
-        // Register the debug callback for instance creation so we receive message for any errors
-        // (validation or other).
-        VkDebugUtilsMessengerCreateInfoEXT utilsMessengerCreateInfo;
-        if (usedKnobs.HasExt(InstanceExt::DebugUtils)) {
-            utilsMessengerCreateInfo.flags = 0;
-            utilsMessengerCreateInfo.messageSeverity =
-                VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT |
-                VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT;
-            utilsMessengerCreateInfo.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
-                                                   VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT;
-            utilsMessengerCreateInfo.pfnUserCallback = OnInstanceCreationDebugUtilsCallback;
-            utilsMessengerCreateInfo.pUserData = nullptr;
-
-            createInfoChain.Add(&utilsMessengerCreateInfo,
-                                VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT);
-        }
-
-        // Try to turn on synchronization validation if the instance was created with backend
-        // validation enabled.
-        VkValidationFeaturesEXT validationFeatures;
-        VkValidationFeatureEnableEXT kEnableSynchronizationValidation =
-            VK_VALIDATION_FEATURE_ENABLE_SYNCHRONIZATION_VALIDATION_EXT;
-        if (instance->IsBackendValidationEnabled() &&
-            usedKnobs.HasExt(InstanceExt::ValidationFeatures)) {
-            validationFeatures.enabledValidationFeatureCount = 1;
-            validationFeatures.pEnabledValidationFeatures = &kEnableSynchronizationValidation;
-            validationFeatures.disabledValidationFeatureCount = 0;
-            validationFeatures.pDisabledValidationFeatures = nullptr;
-
-            createInfoChain.Add(&validationFeatures, VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT);
-        }
-
-        DAWN_TRY(CheckVkSuccess(mFunctions.CreateInstance(&createInfo, nullptr, &mInstance),
-                                "vkCreateInstance"));
-
-        return usedKnobs;
-    }
-
-    MaybeError VulkanInstance::RegisterDebugUtils() {
-        VkDebugUtilsMessengerCreateInfoEXT createInfo;
-        createInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT;
-        createInfo.pNext = nullptr;
-        createInfo.flags = 0;
-        createInfo.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT |
-                                     VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT;
-        createInfo.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
-                                 VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT;
-        createInfo.pfnUserCallback = OnDebugUtilsCallback;
-        createInfo.pUserData = nullptr;
-
-        return CheckVkSuccess(mFunctions.CreateDebugUtilsMessengerEXT(
-                                  mInstance, &createInfo, nullptr, &*mDebugUtilsMessenger),
-                              "vkCreateDebugUtilsMessengerEXT");
-    }
-
-    Backend::Backend(InstanceBase* instance)
-        : BackendConnection(instance, wgpu::BackendType::Vulkan) {
-    }
-
-    Backend::~Backend() = default;
-
-    std::vector<Ref<AdapterBase>> Backend::DiscoverDefaultAdapters() {
-        AdapterDiscoveryOptions options;
-        auto result = DiscoverAdapters(&options);
-        if (result.IsError()) {
-            GetInstance()->ConsumedError(result.AcquireError());
-            return {};
-        }
-        return result.AcquireSuccess();
-    }
-
-    ResultOrError<std::vector<Ref<AdapterBase>>> Backend::DiscoverAdapters(
-        const AdapterDiscoveryOptionsBase* optionsBase) {
-        ASSERT(optionsBase->backendType == WGPUBackendType_Vulkan);
-
-        const AdapterDiscoveryOptions* options =
-            static_cast<const AdapterDiscoveryOptions*>(optionsBase);
-
-        std::vector<Ref<AdapterBase>> adapters;
-
-        InstanceBase* instance = GetInstance();
-        for (ICD icd : kICDs) {
+    InstanceBase* instance = GetInstance();
+    for (ICD icd : kICDs) {
 #if defined(DAWN_PLATFORM_MACOS)
-            // On Mac, we don't expect non-Swiftshader Vulkan to be available.
-            if (icd == ICD::None) {
-                continue;
-            }
-#endif  // defined(DAWN_PLATFORM_MACOS)
-            if (options->forceSwiftShader && icd != ICD::SwiftShader) {
-                continue;
-            }
-            if (mVulkanInstances[icd] == nullptr && instance->ConsumedError([&]() -> MaybeError {
-                    DAWN_TRY_ASSIGN(mVulkanInstances[icd], VulkanInstance::Create(instance, icd));
-                    return {};
-                }())) {
-                // Instance failed to initialize.
-                continue;
-            }
-            const std::vector<VkPhysicalDevice>& physicalDevices =
-                mVulkanInstances[icd]->GetPhysicalDevices();
-            for (uint32_t i = 0; i < physicalDevices.size(); ++i) {
-                Ref<Adapter> adapter = AcquireRef(
-                    new Adapter(instance, mVulkanInstances[icd].Get(), physicalDevices[i]));
-                if (instance->ConsumedError(adapter->Initialize())) {
-                    continue;
-                }
-                adapters.push_back(std::move(adapter));
-            }
+        // On Mac, we don't expect non-Swiftshader Vulkan to be available.
+        if (icd == ICD::None) {
+            continue;
         }
-        return adapters;
+#endif  // defined(DAWN_PLATFORM_MACOS)
+        if (options->forceSwiftShader && icd != ICD::SwiftShader) {
+            continue;
+        }
+        if (mVulkanInstances[icd] == nullptr && instance->ConsumedError([&]() -> MaybeError {
+                DAWN_TRY_ASSIGN(mVulkanInstances[icd], VulkanInstance::Create(instance, icd));
+                return {};
+            }())) {
+            // Instance failed to initialize.
+            continue;
+        }
+        const std::vector<VkPhysicalDevice>& physicalDevices =
+            mVulkanInstances[icd]->GetPhysicalDevices();
+        for (uint32_t i = 0; i < physicalDevices.size(); ++i) {
+            Ref<Adapter> adapter =
+                AcquireRef(new Adapter(instance, mVulkanInstances[icd].Get(), physicalDevices[i]));
+            if (instance->ConsumedError(adapter->Initialize())) {
+                continue;
+            }
+            adapters.push_back(std::move(adapter));
+        }
     }
+    return adapters;
+}
 
-    BackendConnection* Connect(InstanceBase* instance) {
-        return new Backend(instance);
-    }
+BackendConnection* Connect(InstanceBase* instance) {
+    return new Backend(instance);
+}
 
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/BackendVk.h b/src/dawn/native/vulkan/BackendVk.h
index 85520d9..8567d3a 100644
--- a/src/dawn/native/vulkan/BackendVk.h
+++ b/src/dawn/native/vulkan/BackendVk.h
@@ -27,61 +27,61 @@
 
 namespace dawn::native::vulkan {
 
-    enum class ICD {
-        None,
-        SwiftShader,
-    };
+enum class ICD {
+    None,
+    SwiftShader,
+};
 
-    // VulkanInstance holds the reference to the Vulkan library, the VkInstance, VkPhysicalDevices
-    // on that instance, Vulkan functions loaded from the library, and global information
-    // gathered from the instance. VkPhysicalDevices bound to the VkInstance are bound to the GPU
-    // and GPU driver, keeping them active. It is RefCounted so that (eventually) when all adapters
-    // on an instance are no longer in use, the instance is deleted. This can be particuarly useful
-    // when we create multiple instances to selectively discover ICDs (like only
-    // SwiftShader/iGPU/dGPU/eGPU), and only one physical device on one instance remains in use. We
-    // can delete the VkInstances that are not in use to avoid holding the discrete GPU active.
-    class VulkanInstance : public RefCounted {
-      public:
-        static ResultOrError<Ref<VulkanInstance>> Create(const InstanceBase* instance, ICD icd);
-        ~VulkanInstance();
+// VulkanInstance holds the reference to the Vulkan library, the VkInstance, VkPhysicalDevices
+// on that instance, Vulkan functions loaded from the library, and global information
+// gathered from the instance. VkPhysicalDevices bound to the VkInstance are bound to the GPU
+// and GPU driver, keeping them active. It is RefCounted so that (eventually) when all adapters
+// on an instance are no longer in use, the instance is deleted. This can be particuarly useful
+// when we create multiple instances to selectively discover ICDs (like only
+// SwiftShader/iGPU/dGPU/eGPU), and only one physical device on one instance remains in use. We
+// can delete the VkInstances that are not in use to avoid holding the discrete GPU active.
+class VulkanInstance : public RefCounted {
+  public:
+    static ResultOrError<Ref<VulkanInstance>> Create(const InstanceBase* instance, ICD icd);
+    ~VulkanInstance();
 
-        const VulkanFunctions& GetFunctions() const;
-        VkInstance GetVkInstance() const;
-        const VulkanGlobalInfo& GetGlobalInfo() const;
-        const std::vector<VkPhysicalDevice>& GetPhysicalDevices() const;
+    const VulkanFunctions& GetFunctions() const;
+    VkInstance GetVkInstance() const;
+    const VulkanGlobalInfo& GetGlobalInfo() const;
+    const std::vector<VkPhysicalDevice>& GetPhysicalDevices() const;
 
-      private:
-        VulkanInstance();
+  private:
+    VulkanInstance();
 
-        MaybeError Initialize(const InstanceBase* instance, ICD icd);
-        ResultOrError<VulkanGlobalKnobs> CreateVkInstance(const InstanceBase* instance);
+    MaybeError Initialize(const InstanceBase* instance, ICD icd);
+    ResultOrError<VulkanGlobalKnobs> CreateVkInstance(const InstanceBase* instance);
 
-        MaybeError RegisterDebugUtils();
+    MaybeError RegisterDebugUtils();
 
-        DynamicLib mVulkanLib;
-        VulkanGlobalInfo mGlobalInfo = {};
-        VkInstance mInstance = VK_NULL_HANDLE;
-        VulkanFunctions mFunctions;
+    DynamicLib mVulkanLib;
+    VulkanGlobalInfo mGlobalInfo = {};
+    VkInstance mInstance = VK_NULL_HANDLE;
+    VulkanFunctions mFunctions;
 
-        VkDebugUtilsMessengerEXT mDebugUtilsMessenger = VK_NULL_HANDLE;
+    VkDebugUtilsMessengerEXT mDebugUtilsMessenger = VK_NULL_HANDLE;
 
-        std::vector<VkPhysicalDevice> mPhysicalDevices;
-    };
+    std::vector<VkPhysicalDevice> mPhysicalDevices;
+};
 
-    class Backend : public BackendConnection {
-      public:
-        explicit Backend(InstanceBase* instance);
-        ~Backend() override;
+class Backend : public BackendConnection {
+  public:
+    explicit Backend(InstanceBase* instance);
+    ~Backend() override;
 
-        MaybeError Initialize();
+    MaybeError Initialize();
 
-        std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override;
-        ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
-            const AdapterDiscoveryOptionsBase* optionsBase) override;
+    std::vector<Ref<AdapterBase>> DiscoverDefaultAdapters() override;
+    ResultOrError<std::vector<Ref<AdapterBase>>> DiscoverAdapters(
+        const AdapterDiscoveryOptionsBase* optionsBase) override;
 
-      private:
-        ityp::array<ICD, Ref<VulkanInstance>, 2> mVulkanInstances = {};
-    };
+  private:
+    ityp::array<ICD, Ref<VulkanInstance>, 2> mVulkanInstances = {};
+};
 
 }  // namespace dawn::native::vulkan
 
diff --git a/src/dawn/native/vulkan/BindGroupLayoutVk.cpp b/src/dawn/native/vulkan/BindGroupLayoutVk.cpp
index 04eeff6..d55062f 100644
--- a/src/dawn/native/vulkan/BindGroupLayoutVk.cpp
+++ b/src/dawn/native/vulkan/BindGroupLayoutVk.cpp
@@ -28,171 +28,169 @@
 
 namespace dawn::native::vulkan {
 
-    namespace {
+namespace {
 
-        VkShaderStageFlags VulkanShaderStageFlags(wgpu::ShaderStage stages) {
-            VkShaderStageFlags flags = 0;
+VkShaderStageFlags VulkanShaderStageFlags(wgpu::ShaderStage stages) {
+    VkShaderStageFlags flags = 0;
 
-            if (stages & wgpu::ShaderStage::Vertex) {
-                flags |= VK_SHADER_STAGE_VERTEX_BIT;
+    if (stages & wgpu::ShaderStage::Vertex) {
+        flags |= VK_SHADER_STAGE_VERTEX_BIT;
+    }
+    if (stages & wgpu::ShaderStage::Fragment) {
+        flags |= VK_SHADER_STAGE_FRAGMENT_BIT;
+    }
+    if (stages & wgpu::ShaderStage::Compute) {
+        flags |= VK_SHADER_STAGE_COMPUTE_BIT;
+    }
+
+    return flags;
+}
+
+}  // anonymous namespace
+
+VkDescriptorType VulkanDescriptorType(const BindingInfo& bindingInfo) {
+    switch (bindingInfo.bindingType) {
+        case BindingInfoType::Buffer:
+            switch (bindingInfo.buffer.type) {
+                case wgpu::BufferBindingType::Uniform:
+                    if (bindingInfo.buffer.hasDynamicOffset) {
+                        return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
+                    }
+                    return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+                case wgpu::BufferBindingType::Storage:
+                case kInternalStorageBufferBinding:
+                case wgpu::BufferBindingType::ReadOnlyStorage:
+                    if (bindingInfo.buffer.hasDynamicOffset) {
+                        return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
+                    }
+                    return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
+                case wgpu::BufferBindingType::Undefined:
+                    UNREACHABLE();
             }
-            if (stages & wgpu::ShaderStage::Fragment) {
-                flags |= VK_SHADER_STAGE_FRAGMENT_BIT;
-            }
-            if (stages & wgpu::ShaderStage::Compute) {
-                flags |= VK_SHADER_STAGE_COMPUTE_BIT;
-            }
+        case BindingInfoType::Sampler:
+            return VK_DESCRIPTOR_TYPE_SAMPLER;
+        case BindingInfoType::Texture:
+        case BindingInfoType::ExternalTexture:
+            return VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
+        case BindingInfoType::StorageTexture:
+            return VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
+    }
+    UNREACHABLE();
+}
 
-            return flags;
-        }
+// static
+ResultOrError<Ref<BindGroupLayout>> BindGroupLayout::Create(
+    Device* device,
+    const BindGroupLayoutDescriptor* descriptor,
+    PipelineCompatibilityToken pipelineCompatibilityToken) {
+    Ref<BindGroupLayout> bgl =
+        AcquireRef(new BindGroupLayout(device, descriptor, pipelineCompatibilityToken));
+    DAWN_TRY(bgl->Initialize());
+    return bgl;
+}
 
-    }  // anonymous namespace
+MaybeError BindGroupLayout::Initialize() {
+    // Compute the bindings that will be chained in the DescriptorSetLayout create info. We add
+    // one entry per binding set. This might be optimized by computing continuous ranges of
+    // bindings of the same type.
+    ityp::vector<BindingIndex, VkDescriptorSetLayoutBinding> bindings;
+    bindings.reserve(GetBindingCount());
 
-    VkDescriptorType VulkanDescriptorType(const BindingInfo& bindingInfo) {
-        switch (bindingInfo.bindingType) {
-            case BindingInfoType::Buffer:
-                switch (bindingInfo.buffer.type) {
-                    case wgpu::BufferBindingType::Uniform:
-                        if (bindingInfo.buffer.hasDynamicOffset) {
-                            return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
-                        }
-                        return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
-                    case wgpu::BufferBindingType::Storage:
-                    case kInternalStorageBufferBinding:
-                    case wgpu::BufferBindingType::ReadOnlyStorage:
-                        if (bindingInfo.buffer.hasDynamicOffset) {
-                            return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
-                        }
-                        return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
-                    case wgpu::BufferBindingType::Undefined:
-                        UNREACHABLE();
-                }
-            case BindingInfoType::Sampler:
-                return VK_DESCRIPTOR_TYPE_SAMPLER;
-            case BindingInfoType::Texture:
-            case BindingInfoType::ExternalTexture:
-                return VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
-            case BindingInfoType::StorageTexture:
-                return VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
-        }
-        UNREACHABLE();
+    for (const auto& [_, bindingIndex] : GetBindingMap()) {
+        const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
+
+        VkDescriptorSetLayoutBinding vkBinding;
+        vkBinding.binding = static_cast<uint32_t>(bindingIndex);
+        vkBinding.descriptorType = VulkanDescriptorType(bindingInfo);
+        vkBinding.descriptorCount = 1;
+        vkBinding.stageFlags = VulkanShaderStageFlags(bindingInfo.visibility);
+        vkBinding.pImmutableSamplers = nullptr;
+
+        bindings.emplace_back(vkBinding);
     }
 
-    // static
-    ResultOrError<Ref<BindGroupLayout>> BindGroupLayout::Create(
-        Device* device,
-        const BindGroupLayoutDescriptor* descriptor,
-        PipelineCompatibilityToken pipelineCompatibilityToken) {
-        Ref<BindGroupLayout> bgl =
-            AcquireRef(new BindGroupLayout(device, descriptor, pipelineCompatibilityToken));
-        DAWN_TRY(bgl->Initialize());
-        return bgl;
+    VkDescriptorSetLayoutCreateInfo createInfo;
+    createInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
+    createInfo.pNext = nullptr;
+    createInfo.flags = 0;
+    createInfo.bindingCount = static_cast<uint32_t>(bindings.size());
+    createInfo.pBindings = bindings.data();
+
+    // Record cache key information now since the createInfo is not stored.
+    GetCacheKey()->Record(createInfo);
+
+    Device* device = ToBackend(GetDevice());
+    DAWN_TRY(CheckVkSuccess(device->fn.CreateDescriptorSetLayout(device->GetVkDevice(), &createInfo,
+                                                                 nullptr, &*mHandle),
+                            "CreateDescriptorSetLayout"));
+
+    // Compute the size of descriptor pools used for this layout.
+    std::map<VkDescriptorType, uint32_t> descriptorCountPerType;
+
+    for (BindingIndex bindingIndex{0}; bindingIndex < GetBindingCount(); ++bindingIndex) {
+        VkDescriptorType vulkanType = VulkanDescriptorType(GetBindingInfo(bindingIndex));
+
+        // map::operator[] will return 0 if the key doesn't exist.
+        descriptorCountPerType[vulkanType]++;
     }
 
-    MaybeError BindGroupLayout::Initialize() {
-        // Compute the bindings that will be chained in the DescriptorSetLayout create info. We add
-        // one entry per binding set. This might be optimized by computing continuous ranges of
-        // bindings of the same type.
-        ityp::vector<BindingIndex, VkDescriptorSetLayoutBinding> bindings;
-        bindings.reserve(GetBindingCount());
+    // TODO(enga): Consider deduping allocators for layouts with the same descriptor type
+    // counts.
+    mDescriptorSetAllocator =
+        DescriptorSetAllocator::Create(this, std::move(descriptorCountPerType));
 
-        for (const auto& [_, bindingIndex] : GetBindingMap()) {
-            const BindingInfo& bindingInfo = GetBindingInfo(bindingIndex);
+    SetLabelImpl();
 
-            VkDescriptorSetLayoutBinding vkBinding;
-            vkBinding.binding = static_cast<uint32_t>(bindingIndex);
-            vkBinding.descriptorType = VulkanDescriptorType(bindingInfo);
-            vkBinding.descriptorCount = 1;
-            vkBinding.stageFlags = VulkanShaderStageFlags(bindingInfo.visibility);
-            vkBinding.pImmutableSamplers = nullptr;
+    return {};
+}
 
-            bindings.emplace_back(vkBinding);
-        }
+BindGroupLayout::BindGroupLayout(DeviceBase* device,
+                                 const BindGroupLayoutDescriptor* descriptor,
+                                 PipelineCompatibilityToken pipelineCompatibilityToken)
+    : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
+      mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {}
 
-        VkDescriptorSetLayoutCreateInfo createInfo;
-        createInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
-        createInfo.pNext = nullptr;
-        createInfo.flags = 0;
-        createInfo.bindingCount = static_cast<uint32_t>(bindings.size());
-        createInfo.pBindings = bindings.data();
+BindGroupLayout::~BindGroupLayout() = default;
 
-        // Record cache key information now since the createInfo is not stored.
-        GetCacheKey()->Record(createInfo);
+void BindGroupLayout::DestroyImpl() {
+    BindGroupLayoutBase::DestroyImpl();
 
-        Device* device = ToBackend(GetDevice());
-        DAWN_TRY(CheckVkSuccess(device->fn.CreateDescriptorSetLayout(
-                                    device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
-                                "CreateDescriptorSetLayout"));
+    Device* device = ToBackend(GetDevice());
 
-        // Compute the size of descriptor pools used for this layout.
-        std::map<VkDescriptorType, uint32_t> descriptorCountPerType;
-
-        for (BindingIndex bindingIndex{0}; bindingIndex < GetBindingCount(); ++bindingIndex) {
-            VkDescriptorType vulkanType = VulkanDescriptorType(GetBindingInfo(bindingIndex));
-
-            // map::operator[] will return 0 if the key doesn't exist.
-            descriptorCountPerType[vulkanType]++;
-        }
-
-        // TODO(enga): Consider deduping allocators for layouts with the same descriptor type
-        // counts.
-        mDescriptorSetAllocator =
-            DescriptorSetAllocator::Create(this, std::move(descriptorCountPerType));
-
-        SetLabelImpl();
-
-        return {};
+    // DescriptorSetLayout aren't used by execution on the GPU and can be deleted at any time,
+    // so we can destroy mHandle immediately instead of using the FencedDeleter.
+    // (Swiftshader implements this wrong b/154522740).
+    // In practice, the GPU is done with all descriptor sets because bind group deallocation
+    // refs the bind group layout so that once the bind group is finished being used, we can
+    // recycle its descriptor set.
+    if (mHandle != VK_NULL_HANDLE) {
+        device->fn.DestroyDescriptorSetLayout(device->GetVkDevice(), mHandle, nullptr);
+        mHandle = VK_NULL_HANDLE;
     }
+    mDescriptorSetAllocator = nullptr;
+}
 
-    BindGroupLayout::BindGroupLayout(DeviceBase* device,
-                                     const BindGroupLayoutDescriptor* descriptor,
-                                     PipelineCompatibilityToken pipelineCompatibilityToken)
-        : BindGroupLayoutBase(device, descriptor, pipelineCompatibilityToken),
-          mBindGroupAllocator(MakeFrontendBindGroupAllocator<BindGroup>(4096)) {
-    }
+VkDescriptorSetLayout BindGroupLayout::GetHandle() const {
+    return mHandle;
+}
 
-    BindGroupLayout::~BindGroupLayout() = default;
+ResultOrError<Ref<BindGroup>> BindGroupLayout::AllocateBindGroup(
+    Device* device,
+    const BindGroupDescriptor* descriptor) {
+    DescriptorSetAllocation descriptorSetAllocation;
+    DAWN_TRY_ASSIGN(descriptorSetAllocation, mDescriptorSetAllocator->Allocate());
 
-    void BindGroupLayout::DestroyImpl() {
-        BindGroupLayoutBase::DestroyImpl();
+    return AcquireRef(mBindGroupAllocator.Allocate(device, descriptor, descriptorSetAllocation));
+}
 
-        Device* device = ToBackend(GetDevice());
+void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup,
+                                          DescriptorSetAllocation* descriptorSetAllocation) {
+    mDescriptorSetAllocator->Deallocate(descriptorSetAllocation);
+    mBindGroupAllocator.Deallocate(bindGroup);
+}
 
-        // DescriptorSetLayout aren't used by execution on the GPU and can be deleted at any time,
-        // so we can destroy mHandle immediately instead of using the FencedDeleter.
-        // (Swiftshader implements this wrong b/154522740).
-        // In practice, the GPU is done with all descriptor sets because bind group deallocation
-        // refs the bind group layout so that once the bind group is finished being used, we can
-        // recycle its descriptor set.
-        if (mHandle != VK_NULL_HANDLE) {
-            device->fn.DestroyDescriptorSetLayout(device->GetVkDevice(), mHandle, nullptr);
-            mHandle = VK_NULL_HANDLE;
-        }
-        mDescriptorSetAllocator = nullptr;
-    }
-
-    VkDescriptorSetLayout BindGroupLayout::GetHandle() const {
-        return mHandle;
-    }
-
-    ResultOrError<Ref<BindGroup>> BindGroupLayout::AllocateBindGroup(
-        Device* device,
-        const BindGroupDescriptor* descriptor) {
-        DescriptorSetAllocation descriptorSetAllocation;
-        DAWN_TRY_ASSIGN(descriptorSetAllocation, mDescriptorSetAllocator->Allocate());
-
-        return AcquireRef(
-            mBindGroupAllocator.Allocate(device, descriptor, descriptorSetAllocation));
-    }
-
-    void BindGroupLayout::DeallocateBindGroup(BindGroup* bindGroup,
-                                              DescriptorSetAllocation* descriptorSetAllocation) {
-        mDescriptorSetAllocator->Deallocate(descriptorSetAllocation);
-        mBindGroupAllocator.Deallocate(bindGroup);
-    }
-
-    void BindGroupLayout::SetLabelImpl() {
-        SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_BindGroupLayout", GetLabel());
-    }
+void BindGroupLayout::SetLabelImpl() {
+    SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_BindGroupLayout", GetLabel());
+}
 
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/BindGroupLayoutVk.h b/src/dawn/native/vulkan/BindGroupLayoutVk.h
index 096a227..924e121 100644
--- a/src/dawn/native/vulkan/BindGroupLayoutVk.h
+++ b/src/dawn/native/vulkan/BindGroupLayoutVk.h
@@ -24,60 +24,60 @@
 #include "dawn/native/vulkan/BindGroupVk.h"
 
 namespace dawn::native {
-    class CacheKey;
+class CacheKey;
 }  // namespace dawn::native
 
 namespace dawn::native::vulkan {
 
-    struct DescriptorSetAllocation;
-    class DescriptorSetAllocator;
-    class Device;
+struct DescriptorSetAllocation;
+class DescriptorSetAllocator;
+class Device;
 
-    VkDescriptorType VulkanDescriptorType(const BindingInfo& bindingInfo);
+VkDescriptorType VulkanDescriptorType(const BindingInfo& bindingInfo);
 
-    // In Vulkan descriptor pools have to be sized to an exact number of descriptors. This means
-    // it's hard to have something where we can mix different types of descriptor sets because
-    // we don't know if their vector of number of descriptors will be similar.
-    //
-    // That's why that in addition to containing the VkDescriptorSetLayout to create
-    // VkDescriptorSets for its bindgroups, the layout also acts as an allocator for the descriptor
-    // sets.
-    //
-    // The allocations is done with one pool per descriptor set, which is inefficient, but at least
-    // the pools are reused when no longer used. Minimizing the number of descriptor pool allocation
-    // is important because creating them can incur GPU memory allocation which is usually an
-    // expensive syscall.
-    class BindGroupLayout final : public BindGroupLayoutBase {
-      public:
-        static ResultOrError<Ref<BindGroupLayout>> Create(
-            Device* device,
-            const BindGroupLayoutDescriptor* descriptor,
-            PipelineCompatibilityToken pipelineCompatibilityToken);
+// In Vulkan descriptor pools have to be sized to an exact number of descriptors. This means
+// it's hard to have something where we can mix different types of descriptor sets because
+// we don't know if their vector of number of descriptors will be similar.
+//
+// That's why that in addition to containing the VkDescriptorSetLayout to create
+// VkDescriptorSets for its bindgroups, the layout also acts as an allocator for the descriptor
+// sets.
+//
+// The allocations is done with one pool per descriptor set, which is inefficient, but at least
+// the pools are reused when no longer used. Minimizing the number of descriptor pool allocation
+// is important because creating them can incur GPU memory allocation which is usually an
+// expensive syscall.
+class BindGroupLayout final : public BindGroupLayoutBase {
+  public:
+    static ResultOrError<Ref<BindGroupLayout>> Create(
+        Device* device,
+        const BindGroupLayoutDescriptor* descriptor,
+        PipelineCompatibilityToken pipelineCompatibilityToken);
 
-        BindGroupLayout(DeviceBase* device,
-                        const BindGroupLayoutDescriptor* descriptor,
-                        PipelineCompatibilityToken pipelineCompatibilityToken);
+    BindGroupLayout(DeviceBase* device,
+                    const BindGroupLayoutDescriptor* descriptor,
+                    PipelineCompatibilityToken pipelineCompatibilityToken);
 
-        VkDescriptorSetLayout GetHandle() const;
+    VkDescriptorSetLayout GetHandle() const;
 
-        ResultOrError<Ref<BindGroup>> AllocateBindGroup(Device* device,
-                                                        const BindGroupDescriptor* descriptor);
-        void DeallocateBindGroup(BindGroup* bindGroup,
-                                 DescriptorSetAllocation* descriptorSetAllocation);
+    ResultOrError<Ref<BindGroup>> AllocateBindGroup(Device* device,
+                                                    const BindGroupDescriptor* descriptor);
+    void DeallocateBindGroup(BindGroup* bindGroup,
+                             DescriptorSetAllocation* descriptorSetAllocation);
 
-      private:
-        ~BindGroupLayout() override;
-        MaybeError Initialize();
-        void DestroyImpl() override;
+  private:
+    ~BindGroupLayout() override;
+    MaybeError Initialize();
+    void DestroyImpl() override;
 
-        // Dawn API
-        void SetLabelImpl() override;
+    // Dawn API
+    void SetLabelImpl() override;
 
-        VkDescriptorSetLayout mHandle = VK_NULL_HANDLE;
+    VkDescriptorSetLayout mHandle = VK_NULL_HANDLE;
 
-        SlabAllocator<BindGroup> mBindGroupAllocator;
-        Ref<DescriptorSetAllocator> mDescriptorSetAllocator;
-    };
+    SlabAllocator<BindGroup> mBindGroupAllocator;
+    Ref<DescriptorSetAllocator> mDescriptorSetAllocator;
+};
 
 }  // namespace dawn::native::vulkan
 
diff --git a/src/dawn/native/vulkan/BindGroupVk.cpp b/src/dawn/native/vulkan/BindGroupVk.cpp
index b55c10f..0eafbdb 100644
--- a/src/dawn/native/vulkan/BindGroupVk.cpp
+++ b/src/dawn/native/vulkan/BindGroupVk.cpp
@@ -28,137 +28,135 @@
 
 namespace dawn::native::vulkan {
 
-    // static
-    ResultOrError<Ref<BindGroup>> BindGroup::Create(Device* device,
-                                                    const BindGroupDescriptor* descriptor) {
-        return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
-    }
+// static
+ResultOrError<Ref<BindGroup>> BindGroup::Create(Device* device,
+                                                const BindGroupDescriptor* descriptor) {
+    return ToBackend(descriptor->layout)->AllocateBindGroup(device, descriptor);
+}
 
-    BindGroup::BindGroup(Device* device,
-                         const BindGroupDescriptor* descriptor,
-                         DescriptorSetAllocation descriptorSetAllocation)
-        : BindGroupBase(this, device, descriptor),
-          mDescriptorSetAllocation(descriptorSetAllocation) {
-        // Now do a write of a single descriptor set with all possible chained data allocated on the
-        // stack.
-        const uint32_t bindingCount = static_cast<uint32_t>((GetLayout()->GetBindingCount()));
-        ityp::stack_vec<uint32_t, VkWriteDescriptorSet, kMaxOptimalBindingsPerGroup> writes(
-            bindingCount);
-        ityp::stack_vec<uint32_t, VkDescriptorBufferInfo, kMaxOptimalBindingsPerGroup>
-            writeBufferInfo(bindingCount);
-        ityp::stack_vec<uint32_t, VkDescriptorImageInfo, kMaxOptimalBindingsPerGroup>
-            writeImageInfo(bindingCount);
+BindGroup::BindGroup(Device* device,
+                     const BindGroupDescriptor* descriptor,
+                     DescriptorSetAllocation descriptorSetAllocation)
+    : BindGroupBase(this, device, descriptor), mDescriptorSetAllocation(descriptorSetAllocation) {
+    // Now do a write of a single descriptor set with all possible chained data allocated on the
+    // stack.
+    const uint32_t bindingCount = static_cast<uint32_t>((GetLayout()->GetBindingCount()));
+    ityp::stack_vec<uint32_t, VkWriteDescriptorSet, kMaxOptimalBindingsPerGroup> writes(
+        bindingCount);
+    ityp::stack_vec<uint32_t, VkDescriptorBufferInfo, kMaxOptimalBindingsPerGroup> writeBufferInfo(
+        bindingCount);
+    ityp::stack_vec<uint32_t, VkDescriptorImageInfo, kMaxOptimalBindingsPerGroup> writeImageInfo(
+        bindingCount);
 
-        uint32_t numWrites = 0;
-        for (const auto [_, bindingIndex] : GetLayout()->GetBindingMap()) {
-            const BindingInfo& bindingInfo = GetLayout()->GetBindingInfo(bindingIndex);
+    uint32_t numWrites = 0;
+    for (const auto [_, bindingIndex] : GetLayout()->GetBindingMap()) {
+        const BindingInfo& bindingInfo = GetLayout()->GetBindingInfo(bindingIndex);
 
-            auto& write = writes[numWrites];
-            write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
-            write.pNext = nullptr;
-            write.dstSet = GetHandle();
-            write.dstBinding = static_cast<uint32_t>(bindingIndex);
-            write.dstArrayElement = 0;
-            write.descriptorCount = 1;
-            write.descriptorType = VulkanDescriptorType(bindingInfo);
+        auto& write = writes[numWrites];
+        write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+        write.pNext = nullptr;
+        write.dstSet = GetHandle();
+        write.dstBinding = static_cast<uint32_t>(bindingIndex);
+        write.dstArrayElement = 0;
+        write.descriptorCount = 1;
+        write.descriptorType = VulkanDescriptorType(bindingInfo);
 
-            switch (bindingInfo.bindingType) {
-                case BindingInfoType::Buffer: {
-                    BufferBinding binding = GetBindingAsBufferBinding(bindingIndex);
+        switch (bindingInfo.bindingType) {
+            case BindingInfoType::Buffer: {
+                BufferBinding binding = GetBindingAsBufferBinding(bindingIndex);
 
-                    VkBuffer handle = ToBackend(binding.buffer)->GetHandle();
-                    if (handle == VK_NULL_HANDLE) {
-                        // The Buffer was destroyed. Skip this descriptor write since it would be
-                        // a Vulkan Validation Layers error. This bind group won't be used as it
-                        // is an error to submit a command buffer that references destroyed
-                        // resources.
-                        continue;
-                    }
-                    writeBufferInfo[numWrites].buffer = handle;
-                    writeBufferInfo[numWrites].offset = binding.offset;
-                    writeBufferInfo[numWrites].range = binding.size;
-                    write.pBufferInfo = &writeBufferInfo[numWrites];
-                    break;
+                VkBuffer handle = ToBackend(binding.buffer)->GetHandle();
+                if (handle == VK_NULL_HANDLE) {
+                    // The Buffer was destroyed. Skip this descriptor write since it would be
+                    // a Vulkan Validation Layers error. This bind group won't be used as it
+                    // is an error to submit a command buffer that references destroyed
+                    // resources.
+                    continue;
                 }
-
-                case BindingInfoType::Sampler: {
-                    Sampler* sampler = ToBackend(GetBindingAsSampler(bindingIndex));
-                    writeImageInfo[numWrites].sampler = sampler->GetHandle();
-                    write.pImageInfo = &writeImageInfo[numWrites];
-                    break;
-                }
-
-                case BindingInfoType::Texture: {
-                    TextureView* view = ToBackend(GetBindingAsTextureView(bindingIndex));
-
-                    VkImageView handle = view->GetHandle();
-                    if (handle == VK_NULL_HANDLE) {
-                        // The Texture was destroyed before the TextureView was created.
-                        // Skip this descriptor write since it would be
-                        // a Vulkan Validation Layers error. This bind group won't be used as it
-                        // is an error to submit a command buffer that references destroyed
-                        // resources.
-                        continue;
-                    }
-                    writeImageInfo[numWrites].imageView = handle;
-
-                    // The layout may be GENERAL here because of interactions between the Sampled
-                    // and ReadOnlyStorage usages. See the logic in VulkanImageLayout.
-                    writeImageInfo[numWrites].imageLayout = VulkanImageLayout(
-                        ToBackend(view->GetTexture()), wgpu::TextureUsage::TextureBinding);
-
-                    write.pImageInfo = &writeImageInfo[numWrites];
-                    break;
-                }
-
-                case BindingInfoType::StorageTexture: {
-                    TextureView* view = ToBackend(GetBindingAsTextureView(bindingIndex));
-
-                    VkImageView handle = view->GetHandle();
-                    if (handle == VK_NULL_HANDLE) {
-                        // The Texture was destroyed before the TextureView was created.
-                        // Skip this descriptor write since it would be
-                        // a Vulkan Validation Layers error. This bind group won't be used as it
-                        // is an error to submit a command buffer that references destroyed
-                        // resources.
-                        continue;
-                    }
-                    writeImageInfo[numWrites].imageView = handle;
-                    writeImageInfo[numWrites].imageLayout = VK_IMAGE_LAYOUT_GENERAL;
-
-                    write.pImageInfo = &writeImageInfo[numWrites];
-                    break;
-                }
-
-                case BindingInfoType::ExternalTexture:
-                    UNREACHABLE();
-                    break;
+                writeBufferInfo[numWrites].buffer = handle;
+                writeBufferInfo[numWrites].offset = binding.offset;
+                writeBufferInfo[numWrites].range = binding.size;
+                write.pBufferInfo = &writeBufferInfo[numWrites];
+                break;
             }
 
-            numWrites++;
+            case BindingInfoType::Sampler: {
+                Sampler* sampler = ToBackend(GetBindingAsSampler(bindingIndex));
+                writeImageInfo[numWrites].sampler = sampler->GetHandle();
+                write.pImageInfo = &writeImageInfo[numWrites];
+                break;
+            }
+
+            case BindingInfoType::Texture: {
+                TextureView* view = ToBackend(GetBindingAsTextureView(bindingIndex));
+
+                VkImageView handle = view->GetHandle();
+                if (handle == VK_NULL_HANDLE) {
+                    // The Texture was destroyed before the TextureView was created.
+                    // Skip this descriptor write since it would be
+                    // a Vulkan Validation Layers error. This bind group won't be used as it
+                    // is an error to submit a command buffer that references destroyed
+                    // resources.
+                    continue;
+                }
+                writeImageInfo[numWrites].imageView = handle;
+
+                // The layout may be GENERAL here because of interactions between the Sampled
+                // and ReadOnlyStorage usages. See the logic in VulkanImageLayout.
+                writeImageInfo[numWrites].imageLayout = VulkanImageLayout(
+                    ToBackend(view->GetTexture()), wgpu::TextureUsage::TextureBinding);
+
+                write.pImageInfo = &writeImageInfo[numWrites];
+                break;
+            }
+
+            case BindingInfoType::StorageTexture: {
+                TextureView* view = ToBackend(GetBindingAsTextureView(bindingIndex));
+
+                VkImageView handle = view->GetHandle();
+                if (handle == VK_NULL_HANDLE) {
+                    // The Texture was destroyed before the TextureView was created.
+                    // Skip this descriptor write since it would be
+                    // a Vulkan Validation Layers error. This bind group won't be used as it
+                    // is an error to submit a command buffer that references destroyed
+                    // resources.
+                    continue;
+                }
+                writeImageInfo[numWrites].imageView = handle;
+                writeImageInfo[numWrites].imageLayout = VK_IMAGE_LAYOUT_GENERAL;
+
+                write.pImageInfo = &writeImageInfo[numWrites];
+                break;
+            }
+
+            case BindingInfoType::ExternalTexture:
+                UNREACHABLE();
+                break;
         }
 
-        // TODO(crbug.com/dawn/855): Batch these updates
-        device->fn.UpdateDescriptorSets(device->GetVkDevice(), numWrites, writes.data(), 0,
-                                        nullptr);
-
-        SetLabelImpl();
+        numWrites++;
     }
 
-    BindGroup::~BindGroup() = default;
+    // TODO(crbug.com/dawn/855): Batch these updates
+    device->fn.UpdateDescriptorSets(device->GetVkDevice(), numWrites, writes.data(), 0, nullptr);
 
-    void BindGroup::DestroyImpl() {
-        BindGroupBase::DestroyImpl();
-        ToBackend(GetLayout())->DeallocateBindGroup(this, &mDescriptorSetAllocation);
-    }
+    SetLabelImpl();
+}
 
-    VkDescriptorSet BindGroup::GetHandle() const {
-        return mDescriptorSetAllocation.set;
-    }
+BindGroup::~BindGroup() = default;
 
-    void BindGroup::SetLabelImpl() {
-        SetDebugName(ToBackend(GetDevice()), mDescriptorSetAllocation.set, "Dawn_BindGroup",
-                     GetLabel());
-    }
+void BindGroup::DestroyImpl() {
+    BindGroupBase::DestroyImpl();
+    ToBackend(GetLayout())->DeallocateBindGroup(this, &mDescriptorSetAllocation);
+}
+
+VkDescriptorSet BindGroup::GetHandle() const {
+    return mDescriptorSetAllocation.set;
+}
+
+void BindGroup::SetLabelImpl() {
+    SetDebugName(ToBackend(GetDevice()), mDescriptorSetAllocation.set, "Dawn_BindGroup",
+                 GetLabel());
+}
 
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/BindGroupVk.h b/src/dawn/native/vulkan/BindGroupVk.h
index 0b08c6c..9d1d9a4 100644
--- a/src/dawn/native/vulkan/BindGroupVk.h
+++ b/src/dawn/native/vulkan/BindGroupVk.h
@@ -23,31 +23,31 @@
 
 namespace dawn::native::vulkan {
 
-    class Device;
+class Device;
 
-    class BindGroup final : public BindGroupBase, public PlacementAllocated {
-      public:
-        static ResultOrError<Ref<BindGroup>> Create(Device* device,
-                                                    const BindGroupDescriptor* descriptor);
+class BindGroup final : public BindGroupBase, public PlacementAllocated {
+  public:
+    static ResultOrError<Ref<BindGroup>> Create(Device* device,
+                                                const BindGroupDescriptor* descriptor);
 
-        BindGroup(Device* device,
-                  const BindGroupDescriptor* descriptor,
-                  DescriptorSetAllocation descriptorSetAllocation);
+    BindGroup(Device* device,
+              const BindGroupDescriptor* descriptor,
+              DescriptorSetAllocation descriptorSetAllocation);
 
-        VkDescriptorSet GetHandle() const;
+    VkDescriptorSet GetHandle() const;
 
-      private:
-        ~BindGroup() override;
+  private:
+    ~BindGroup() override;
 
-        void DestroyImpl() override;
+    void DestroyImpl() override;
 
-        // Dawn API
-        void SetLabelImpl() override;
+    // Dawn API
+    void SetLabelImpl() override;
 
-        // The descriptor set in this allocation outlives the BindGroup because it is owned by
-        // the BindGroupLayout which is referenced by the BindGroup.
-        DescriptorSetAllocation mDescriptorSetAllocation;
-    };
+    // The descriptor set in this allocation outlives the BindGroup because it is owned by
+    // the BindGroupLayout which is referenced by the BindGroup.
+    DescriptorSetAllocation mDescriptorSetAllocation;
+};
 
 }  // namespace dawn::native::vulkan
 
diff --git a/src/dawn/native/vulkan/BufferVk.cpp b/src/dawn/native/vulkan/BufferVk.cpp
index 519402e..5bec8da 100644
--- a/src/dawn/native/vulkan/BufferVk.cpp
+++ b/src/dawn/native/vulkan/BufferVk.cpp
@@ -29,387 +29,384 @@
 
 namespace dawn::native::vulkan {
 
-    namespace {
+namespace {
 
-        VkBufferUsageFlags VulkanBufferUsage(wgpu::BufferUsage usage) {
-            VkBufferUsageFlags flags = 0;
+VkBufferUsageFlags VulkanBufferUsage(wgpu::BufferUsage usage) {
+    VkBufferUsageFlags flags = 0;
 
-            if (usage & wgpu::BufferUsage::CopySrc) {
-                flags |= VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
-            }
-            if (usage & wgpu::BufferUsage::CopyDst) {
-                flags |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
-            }
-            if (usage & wgpu::BufferUsage::Index) {
-                flags |= VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
-            }
-            if (usage & wgpu::BufferUsage::Vertex) {
-                flags |= VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
-            }
-            if (usage & wgpu::BufferUsage::Uniform) {
-                flags |= VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
-            }
-            if (usage &
-                (wgpu::BufferUsage::Storage | kInternalStorageBuffer | kReadOnlyStorageBuffer)) {
-                flags |= VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
-            }
-            if (usage & wgpu::BufferUsage::Indirect) {
-                flags |= VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
-            }
-            if (usage & wgpu::BufferUsage::QueryResolve) {
-                flags |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
-            }
-
-            return flags;
-        }
-
-        VkPipelineStageFlags VulkanPipelineStage(wgpu::BufferUsage usage) {
-            VkPipelineStageFlags flags = 0;
-
-            if (usage & kMappableBufferUsages) {
-                flags |= VK_PIPELINE_STAGE_HOST_BIT;
-            }
-            if (usage & (wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst)) {
-                flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
-            }
-            if (usage & (wgpu::BufferUsage::Index | wgpu::BufferUsage::Vertex)) {
-                flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
-            }
-            if (usage & (wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage |
-                         kInternalStorageBuffer | kReadOnlyStorageBuffer)) {
-                flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
-                         VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
-                         VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
-            }
-            if (usage & wgpu::BufferUsage::Indirect) {
-                flags |= VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
-            }
-            if (usage & wgpu::BufferUsage::QueryResolve) {
-                flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
-            }
-
-            return flags;
-        }
-
-        VkAccessFlags VulkanAccessFlags(wgpu::BufferUsage usage) {
-            VkAccessFlags flags = 0;
-
-            if (usage & wgpu::BufferUsage::MapRead) {
-                flags |= VK_ACCESS_HOST_READ_BIT;
-            }
-            if (usage & wgpu::BufferUsage::MapWrite) {
-                flags |= VK_ACCESS_HOST_WRITE_BIT;
-            }
-            if (usage & wgpu::BufferUsage::CopySrc) {
-                flags |= VK_ACCESS_TRANSFER_READ_BIT;
-            }
-            if (usage & wgpu::BufferUsage::CopyDst) {
-                flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
-            }
-            if (usage & wgpu::BufferUsage::Index) {
-                flags |= VK_ACCESS_INDEX_READ_BIT;
-            }
-            if (usage & wgpu::BufferUsage::Vertex) {
-                flags |= VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
-            }
-            if (usage & wgpu::BufferUsage::Uniform) {
-                flags |= VK_ACCESS_UNIFORM_READ_BIT;
-            }
-            if (usage & (wgpu::BufferUsage::Storage | kInternalStorageBuffer)) {
-                flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
-            }
-            if (usage & kReadOnlyStorageBuffer) {
-                flags |= VK_ACCESS_SHADER_READ_BIT;
-            }
-            if (usage & wgpu::BufferUsage::Indirect) {
-                flags |= VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
-            }
-            if (usage & wgpu::BufferUsage::QueryResolve) {
-                flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
-            }
-
-            return flags;
-        }
-
-    }  // namespace
-
-    // static
-    ResultOrError<Ref<Buffer>> Buffer::Create(Device* device, const BufferDescriptor* descriptor) {
-        Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor));
-        DAWN_TRY(buffer->Initialize(descriptor->mappedAtCreation));
-        return std::move(buffer);
+    if (usage & wgpu::BufferUsage::CopySrc) {
+        flags |= VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
+    }
+    if (usage & wgpu::BufferUsage::CopyDst) {
+        flags |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+    }
+    if (usage & wgpu::BufferUsage::Index) {
+        flags |= VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
+    }
+    if (usage & wgpu::BufferUsage::Vertex) {
+        flags |= VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
+    }
+    if (usage & wgpu::BufferUsage::Uniform) {
+        flags |= VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
+    }
+    if (usage & (wgpu::BufferUsage::Storage | kInternalStorageBuffer | kReadOnlyStorageBuffer)) {
+        flags |= VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
+    }
+    if (usage & wgpu::BufferUsage::Indirect) {
+        flags |= VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
+    }
+    if (usage & wgpu::BufferUsage::QueryResolve) {
+        flags |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
     }
 
-    MaybeError Buffer::Initialize(bool mappedAtCreation) {
-        // vkCmdFillBuffer requires the size to be a multiple of 4.
-        constexpr size_t kAlignment = 4u;
+    return flags;
+}
 
-        uint32_t extraBytes = 0u;
-        if (GetUsage() & (wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Index)) {
-            // vkCmdSetIndexBuffer and vkCmdSetVertexBuffer are invalid if the offset
-            // is equal to the whole buffer size. Allocate at least one more byte so it
-            // is valid to setVertex/IndexBuffer with a zero-sized range at the end
-            // of the buffer with (offset=buffer.size, size=0).
-            extraBytes = 1u;
-        }
+VkPipelineStageFlags VulkanPipelineStage(wgpu::BufferUsage usage) {
+    VkPipelineStageFlags flags = 0;
 
-        uint64_t size = GetSize();
-        if (size > std::numeric_limits<uint64_t>::max() - extraBytes) {
-            return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
-        }
-
-        size += extraBytes;
-
-        // Allocate at least 4 bytes so clamped accesses are always in bounds.
-        // Also, Vulkan requires the size to be non-zero.
-        size = std::max(size, uint64_t(4u));
-
-        if (size > std::numeric_limits<uint64_t>::max() - kAlignment) {
-            // Alignment would overlow.
-            return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
-        }
-        mAllocatedSize = Align(size, kAlignment);
-
-        // Avoid passing ludicrously large sizes to drivers because it causes issues: drivers add
-        // some constants to the size passed and align it, but for values close to the maximum
-        // VkDeviceSize this can cause overflows and makes drivers crash or return bad sizes in the
-        // VkmemoryRequirements. See https://gitlab.khronos.org/vulkan/vulkan/issues/1904
-        // Any size with one of two top bits of VkDeviceSize set is a HUGE allocation and we can
-        // safely return an OOM error.
-        if (mAllocatedSize & (uint64_t(3) << uint64_t(62))) {
-            return DAWN_OUT_OF_MEMORY_ERROR("Buffer size is HUGE and could cause overflows");
-        }
-
-        VkBufferCreateInfo createInfo;
-        createInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
-        createInfo.pNext = nullptr;
-        createInfo.flags = 0;
-        createInfo.size = mAllocatedSize;
-        // Add CopyDst for non-mappable buffer initialization with mappedAtCreation
-        // and robust resource initialization.
-        createInfo.usage = VulkanBufferUsage(GetUsage() | wgpu::BufferUsage::CopyDst);
-        createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
-        createInfo.queueFamilyIndexCount = 0;
-        createInfo.pQueueFamilyIndices = 0;
-
-        Device* device = ToBackend(GetDevice());
-        DAWN_TRY(CheckVkOOMThenSuccess(
-            device->fn.CreateBuffer(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
-            "vkCreateBuffer"));
-
-        // Gather requirements for the buffer's memory and allocate it.
-        VkMemoryRequirements requirements;
-        device->fn.GetBufferMemoryRequirements(device->GetVkDevice(), mHandle, &requirements);
-
-        MemoryKind requestKind = MemoryKind::Linear;
-        if (GetUsage() & kMappableBufferUsages) {
-            requestKind = MemoryKind::LinearMappable;
-        }
-        DAWN_TRY_ASSIGN(mMemoryAllocation,
-                        device->GetResourceMemoryAllocator()->Allocate(requirements, requestKind));
-
-        // Finally associate it with the buffer.
-        DAWN_TRY(CheckVkSuccess(
-            device->fn.BindBufferMemory(device->GetVkDevice(), mHandle,
-                                        ToBackend(mMemoryAllocation.GetResourceHeap())->GetMemory(),
-                                        mMemoryAllocation.GetOffset()),
-            "vkBindBufferMemory"));
-
-        // The buffers with mappedAtCreation == true will be initialized in
-        // BufferBase::MapAtCreation().
-        if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
-            !mappedAtCreation) {
-            ClearBuffer(device->GetPendingRecordingContext(), 0x01010101);
-        }
-
-        // Initialize the padding bytes to zero.
-        if (device->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse) && !mappedAtCreation) {
-            uint32_t paddingBytes = GetAllocatedSize() - GetSize();
-            if (paddingBytes > 0) {
-                uint32_t clearSize = Align(paddingBytes, 4);
-                uint64_t clearOffset = GetAllocatedSize() - clearSize;
-
-                CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
-                ClearBuffer(recordingContext, 0, clearOffset, clearSize);
-            }
-        }
-
-        SetLabelImpl();
-
-        return {};
+    if (usage & kMappableBufferUsages) {
+        flags |= VK_PIPELINE_STAGE_HOST_BIT;
+    }
+    if (usage & (wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst)) {
+        flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
+    }
+    if (usage & (wgpu::BufferUsage::Index | wgpu::BufferUsage::Vertex)) {
+        flags |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
+    }
+    if (usage & (wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage | kInternalStorageBuffer |
+                 kReadOnlyStorageBuffer)) {
+        flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
+                 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+    }
+    if (usage & wgpu::BufferUsage::Indirect) {
+        flags |= VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
+    }
+    if (usage & wgpu::BufferUsage::QueryResolve) {
+        flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
     }
 
-    Buffer::~Buffer() = default;
+    return flags;
+}
 
-    VkBuffer Buffer::GetHandle() const {
-        return mHandle;
+VkAccessFlags VulkanAccessFlags(wgpu::BufferUsage usage) {
+    VkAccessFlags flags = 0;
+
+    if (usage & wgpu::BufferUsage::MapRead) {
+        flags |= VK_ACCESS_HOST_READ_BIT;
+    }
+    if (usage & wgpu::BufferUsage::MapWrite) {
+        flags |= VK_ACCESS_HOST_WRITE_BIT;
+    }
+    if (usage & wgpu::BufferUsage::CopySrc) {
+        flags |= VK_ACCESS_TRANSFER_READ_BIT;
+    }
+    if (usage & wgpu::BufferUsage::CopyDst) {
+        flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
+    }
+    if (usage & wgpu::BufferUsage::Index) {
+        flags |= VK_ACCESS_INDEX_READ_BIT;
+    }
+    if (usage & wgpu::BufferUsage::Vertex) {
+        flags |= VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
+    }
+    if (usage & wgpu::BufferUsage::Uniform) {
+        flags |= VK_ACCESS_UNIFORM_READ_BIT;
+    }
+    if (usage & (wgpu::BufferUsage::Storage | kInternalStorageBuffer)) {
+        flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+    }
+    if (usage & kReadOnlyStorageBuffer) {
+        flags |= VK_ACCESS_SHADER_READ_BIT;
+    }
+    if (usage & wgpu::BufferUsage::Indirect) {
+        flags |= VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
+    }
+    if (usage & wgpu::BufferUsage::QueryResolve) {
+        flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
     }
 
-    void Buffer::TransitionUsageNow(CommandRecordingContext* recordingContext,
-                                    wgpu::BufferUsage usage) {
-        VkBufferMemoryBarrier barrier;
-        VkPipelineStageFlags srcStages = 0;
-        VkPipelineStageFlags dstStages = 0;
+    return flags;
+}
 
-        if (TransitionUsageAndGetResourceBarrier(usage, &barrier, &srcStages, &dstStages)) {
-            ASSERT(srcStages != 0 && dstStages != 0);
-            ToBackend(GetDevice())
-                ->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
-                                        nullptr, 1u, &barrier, 0, nullptr);
+}  // namespace
+
+// static
+ResultOrError<Ref<Buffer>> Buffer::Create(Device* device, const BufferDescriptor* descriptor) {
+    Ref<Buffer> buffer = AcquireRef(new Buffer(device, descriptor));
+    DAWN_TRY(buffer->Initialize(descriptor->mappedAtCreation));
+    return std::move(buffer);
+}
+
+MaybeError Buffer::Initialize(bool mappedAtCreation) {
+    // vkCmdFillBuffer requires the size to be a multiple of 4.
+    constexpr size_t kAlignment = 4u;
+
+    uint32_t extraBytes = 0u;
+    if (GetUsage() & (wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Index)) {
+        // vkCmdSetIndexBuffer and vkCmdSetVertexBuffer are invalid if the offset
+        // is equal to the whole buffer size. Allocate at least one more byte so it
+        // is valid to setVertex/IndexBuffer with a zero-sized range at the end
+        // of the buffer with (offset=buffer.size, size=0).
+        extraBytes = 1u;
+    }
+
+    uint64_t size = GetSize();
+    if (size > std::numeric_limits<uint64_t>::max() - extraBytes) {
+        return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
+    }
+
+    size += extraBytes;
+
+    // Allocate at least 4 bytes so clamped accesses are always in bounds.
+    // Also, Vulkan requires the size to be non-zero.
+    size = std::max(size, uint64_t(4u));
+
+    if (size > std::numeric_limits<uint64_t>::max() - kAlignment) {
+        // Alignment would overlow.
+        return DAWN_OUT_OF_MEMORY_ERROR("Buffer allocation is too large");
+    }
+    mAllocatedSize = Align(size, kAlignment);
+
+    // Avoid passing ludicrously large sizes to drivers because it causes issues: drivers add
+    // some constants to the size passed and align it, but for values close to the maximum
+    // VkDeviceSize this can cause overflows and makes drivers crash or return bad sizes in the
+    // VkmemoryRequirements. See https://gitlab.khronos.org/vulkan/vulkan/issues/1904
+    // Any size with one of two top bits of VkDeviceSize set is a HUGE allocation and we can
+    // safely return an OOM error.
+    if (mAllocatedSize & (uint64_t(3) << uint64_t(62))) {
+        return DAWN_OUT_OF_MEMORY_ERROR("Buffer size is HUGE and could cause overflows");
+    }
+
+    VkBufferCreateInfo createInfo;
+    createInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+    createInfo.pNext = nullptr;
+    createInfo.flags = 0;
+    createInfo.size = mAllocatedSize;
+    // Add CopyDst for non-mappable buffer initialization with mappedAtCreation
+    // and robust resource initialization.
+    createInfo.usage = VulkanBufferUsage(GetUsage() | wgpu::BufferUsage::CopyDst);
+    createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+    createInfo.queueFamilyIndexCount = 0;
+    createInfo.pQueueFamilyIndices = 0;
+
+    Device* device = ToBackend(GetDevice());
+    DAWN_TRY(CheckVkOOMThenSuccess(
+        device->fn.CreateBuffer(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
+        "vkCreateBuffer"));
+
+    // Gather requirements for the buffer's memory and allocate it.
+    VkMemoryRequirements requirements;
+    device->fn.GetBufferMemoryRequirements(device->GetVkDevice(), mHandle, &requirements);
+
+    MemoryKind requestKind = MemoryKind::Linear;
+    if (GetUsage() & kMappableBufferUsages) {
+        requestKind = MemoryKind::LinearMappable;
+    }
+    DAWN_TRY_ASSIGN(mMemoryAllocation,
+                    device->GetResourceMemoryAllocator()->Allocate(requirements, requestKind));
+
+    // Finally associate it with the buffer.
+    DAWN_TRY(CheckVkSuccess(
+        device->fn.BindBufferMemory(device->GetVkDevice(), mHandle,
+                                    ToBackend(mMemoryAllocation.GetResourceHeap())->GetMemory(),
+                                    mMemoryAllocation.GetOffset()),
+        "vkBindBufferMemory"));
+
+    // The buffers with mappedAtCreation == true will be initialized in
+    // BufferBase::MapAtCreation().
+    if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting) &&
+        !mappedAtCreation) {
+        ClearBuffer(device->GetPendingRecordingContext(), 0x01010101);
+    }
+
+    // Initialize the padding bytes to zero.
+    if (device->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse) && !mappedAtCreation) {
+        uint32_t paddingBytes = GetAllocatedSize() - GetSize();
+        if (paddingBytes > 0) {
+            uint32_t clearSize = Align(paddingBytes, 4);
+            uint64_t clearOffset = GetAllocatedSize() - clearSize;
+
+            CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
+            ClearBuffer(recordingContext, 0, clearOffset, clearSize);
         }
     }
 
-    bool Buffer::TransitionUsageAndGetResourceBarrier(wgpu::BufferUsage usage,
-                                                      VkBufferMemoryBarrier* barrier,
-                                                      VkPipelineStageFlags* srcStages,
-                                                      VkPipelineStageFlags* dstStages) {
-        bool lastIncludesTarget = IsSubset(usage, mLastUsage);
-        bool lastReadOnly = IsSubset(mLastUsage, kReadOnlyBufferUsages);
+    SetLabelImpl();
 
-        // We can skip transitions to already current read-only usages.
-        if (lastIncludesTarget && lastReadOnly) {
-            return false;
-        }
+    return {};
+}
 
-        // Special-case for the initial transition: Vulkan doesn't allow access flags to be 0.
-        if (mLastUsage == wgpu::BufferUsage::None) {
-            mLastUsage = usage;
-            return false;
-        }
+Buffer::~Buffer() = default;
 
-        *srcStages |= VulkanPipelineStage(mLastUsage);
-        *dstStages |= VulkanPipelineStage(usage);
+VkBuffer Buffer::GetHandle() const {
+    return mHandle;
+}
 
-        barrier->sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
-        barrier->pNext = nullptr;
-        barrier->srcAccessMask = VulkanAccessFlags(mLastUsage);
-        barrier->dstAccessMask = VulkanAccessFlags(usage);
-        barrier->srcQueueFamilyIndex = 0;
-        barrier->dstQueueFamilyIndex = 0;
-        barrier->buffer = mHandle;
-        barrier->offset = 0;
-        // VK_WHOLE_SIZE doesn't work on old Windows Intel Vulkan drivers, so we don't use it.
-        barrier->size = GetAllocatedSize();
+void Buffer::TransitionUsageNow(CommandRecordingContext* recordingContext,
+                                wgpu::BufferUsage usage) {
+    VkBufferMemoryBarrier barrier;
+    VkPipelineStageFlags srcStages = 0;
+    VkPipelineStageFlags dstStages = 0;
 
+    if (TransitionUsageAndGetResourceBarrier(usage, &barrier, &srcStages, &dstStages)) {
+        ASSERT(srcStages != 0 && dstStages != 0);
+        ToBackend(GetDevice())
+            ->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
+                                    nullptr, 1u, &barrier, 0, nullptr);
+    }
+}
+
+bool Buffer::TransitionUsageAndGetResourceBarrier(wgpu::BufferUsage usage,
+                                                  VkBufferMemoryBarrier* barrier,
+                                                  VkPipelineStageFlags* srcStages,
+                                                  VkPipelineStageFlags* dstStages) {
+    bool lastIncludesTarget = IsSubset(usage, mLastUsage);
+    bool lastReadOnly = IsSubset(mLastUsage, kReadOnlyBufferUsages);
+
+    // We can skip transitions to already current read-only usages.
+    if (lastIncludesTarget && lastReadOnly) {
+        return false;
+    }
+
+    // Special-case for the initial transition: Vulkan doesn't allow access flags to be 0.
+    if (mLastUsage == wgpu::BufferUsage::None) {
         mLastUsage = usage;
-
-        return true;
+        return false;
     }
 
-    bool Buffer::IsCPUWritableAtCreation() const {
-        // TODO(enga): Handle CPU-visible memory on UMA
-        return mMemoryAllocation.GetMappedPointer() != nullptr;
+    *srcStages |= VulkanPipelineStage(mLastUsage);
+    *dstStages |= VulkanPipelineStage(usage);
+
+    barrier->sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
+    barrier->pNext = nullptr;
+    barrier->srcAccessMask = VulkanAccessFlags(mLastUsage);
+    barrier->dstAccessMask = VulkanAccessFlags(usage);
+    barrier->srcQueueFamilyIndex = 0;
+    barrier->dstQueueFamilyIndex = 0;
+    barrier->buffer = mHandle;
+    barrier->offset = 0;
+    // VK_WHOLE_SIZE doesn't work on old Windows Intel Vulkan drivers, so we don't use it.
+    barrier->size = GetAllocatedSize();
+
+    mLastUsage = usage;
+
+    return true;
+}
+
+bool Buffer::IsCPUWritableAtCreation() const {
+    // TODO(enga): Handle CPU-visible memory on UMA
+    return mMemoryAllocation.GetMappedPointer() != nullptr;
+}
+
+MaybeError Buffer::MapAtCreationImpl() {
+    return {};
+}
+
+MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
+    Device* device = ToBackend(GetDevice());
+
+    CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
+
+    // TODO(crbug.com/dawn/852): initialize mapped buffer in CPU side.
+    EnsureDataInitialized(recordingContext);
+
+    if (mode & wgpu::MapMode::Read) {
+        TransitionUsageNow(recordingContext, wgpu::BufferUsage::MapRead);
+    } else {
+        ASSERT(mode & wgpu::MapMode::Write);
+        TransitionUsageNow(recordingContext, wgpu::BufferUsage::MapWrite);
+    }
+    return {};
+}
+
+void Buffer::UnmapImpl() {
+    // No need to do anything, we keep CPU-visible memory mapped at all time.
+}
+
+void* Buffer::GetMappedPointerImpl() {
+    uint8_t* memory = mMemoryAllocation.GetMappedPointer();
+    ASSERT(memory != nullptr);
+    return memory;
+}
+
+void Buffer::DestroyImpl() {
+    BufferBase::DestroyImpl();
+
+    ToBackend(GetDevice())->GetResourceMemoryAllocator()->Deallocate(&mMemoryAllocation);
+
+    if (mHandle != VK_NULL_HANDLE) {
+        ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+        mHandle = VK_NULL_HANDLE;
+    }
+}
+
+bool Buffer::EnsureDataInitialized(CommandRecordingContext* recordingContext) {
+    if (!NeedsInitialization()) {
+        return false;
     }
 
-    MaybeError Buffer::MapAtCreationImpl() {
-        return {};
+    InitializeToZero(recordingContext);
+    return true;
+}
+
+bool Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
+                                                uint64_t offset,
+                                                uint64_t size) {
+    if (!NeedsInitialization()) {
+        return false;
     }
 
-    MaybeError Buffer::MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) {
-        Device* device = ToBackend(GetDevice());
-
-        CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
-
-        // TODO(crbug.com/dawn/852): initialize mapped buffer in CPU side.
-        EnsureDataInitialized(recordingContext);
-
-        if (mode & wgpu::MapMode::Read) {
-            TransitionUsageNow(recordingContext, wgpu::BufferUsage::MapRead);
-        } else {
-            ASSERT(mode & wgpu::MapMode::Write);
-            TransitionUsageNow(recordingContext, wgpu::BufferUsage::MapWrite);
-        }
-        return {};
-    }
-
-    void Buffer::UnmapImpl() {
-        // No need to do anything, we keep CPU-visible memory mapped at all time.
-    }
-
-    void* Buffer::GetMappedPointerImpl() {
-        uint8_t* memory = mMemoryAllocation.GetMappedPointer();
-        ASSERT(memory != nullptr);
-        return memory;
-    }
-
-    void Buffer::DestroyImpl() {
-        BufferBase::DestroyImpl();
-
-        ToBackend(GetDevice())->GetResourceMemoryAllocator()->Deallocate(&mMemoryAllocation);
-
-        if (mHandle != VK_NULL_HANDLE) {
-            ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
-            mHandle = VK_NULL_HANDLE;
-        }
-    }
-
-    bool Buffer::EnsureDataInitialized(CommandRecordingContext* recordingContext) {
-        if (!NeedsInitialization()) {
-            return false;
-        }
-
-        InitializeToZero(recordingContext);
-        return true;
-    }
-
-    bool Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
-                                                    uint64_t offset,
-                                                    uint64_t size) {
-        if (!NeedsInitialization()) {
-            return false;
-        }
-
-        if (IsFullBufferRange(offset, size)) {
-            SetIsDataInitialized();
-            return false;
-        }
-
-        InitializeToZero(recordingContext);
-        return true;
-    }
-
-    bool Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
-                                                    const CopyTextureToBufferCmd* copy) {
-        if (!NeedsInitialization()) {
-            return false;
-        }
-
-        if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
-            SetIsDataInitialized();
-            return false;
-        }
-
-        InitializeToZero(recordingContext);
-        return true;
-    }
-
-    void Buffer::SetLabelImpl() {
-        SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_Buffer", GetLabel());
-    }
-
-    void Buffer::InitializeToZero(CommandRecordingContext* recordingContext) {
-        ASSERT(NeedsInitialization());
-
-        ClearBuffer(recordingContext, 0u);
-        GetDevice()->IncrementLazyClearCountForTesting();
+    if (IsFullBufferRange(offset, size)) {
         SetIsDataInitialized();
+        return false;
     }
 
-    void Buffer::ClearBuffer(CommandRecordingContext* recordingContext,
-                             uint32_t clearValue,
-                             uint64_t offset,
-                             uint64_t size) {
-        ASSERT(recordingContext != nullptr);
-        size = size > 0 ? size : GetAllocatedSize();
-        ASSERT(size > 0);
+    InitializeToZero(recordingContext);
+    return true;
+}
 
-        TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
-
-        Device* device = ToBackend(GetDevice());
-        // VK_WHOLE_SIZE doesn't work on old Windows Intel Vulkan drivers, so we don't use it.
-        // Note: Allocated size must be a multiple of 4.
-        ASSERT(size % 4 == 0);
-        device->fn.CmdFillBuffer(recordingContext->commandBuffer, mHandle, offset, size,
-                                 clearValue);
+bool Buffer::EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
+                                                const CopyTextureToBufferCmd* copy) {
+    if (!NeedsInitialization()) {
+        return false;
     }
+
+    if (IsFullBufferOverwrittenInTextureToBufferCopy(copy)) {
+        SetIsDataInitialized();
+        return false;
+    }
+
+    InitializeToZero(recordingContext);
+    return true;
+}
+
+void Buffer::SetLabelImpl() {
+    SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_Buffer", GetLabel());
+}
+
+void Buffer::InitializeToZero(CommandRecordingContext* recordingContext) {
+    ASSERT(NeedsInitialization());
+
+    ClearBuffer(recordingContext, 0u);
+    GetDevice()->IncrementLazyClearCountForTesting();
+    SetIsDataInitialized();
+}
+
+void Buffer::ClearBuffer(CommandRecordingContext* recordingContext,
+                         uint32_t clearValue,
+                         uint64_t offset,
+                         uint64_t size) {
+    ASSERT(recordingContext != nullptr);
+    size = size > 0 ? size : GetAllocatedSize();
+    ASSERT(size > 0);
+
+    TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+
+    Device* device = ToBackend(GetDevice());
+    // VK_WHOLE_SIZE doesn't work on old Windows Intel Vulkan drivers, so we don't use it.
+    // Note: Allocated size must be a multiple of 4.
+    ASSERT(size % 4 == 0);
+    device->fn.CmdFillBuffer(recordingContext->commandBuffer, mHandle, offset, size, clearValue);
+}
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/BufferVk.h b/src/dawn/native/vulkan/BufferVk.h
index 185170e..8e95508 100644
--- a/src/dawn/native/vulkan/BufferVk.h
+++ b/src/dawn/native/vulkan/BufferVk.h
@@ -23,59 +23,58 @@
 
 namespace dawn::native::vulkan {
 
-    struct CommandRecordingContext;
-    class Device;
+struct CommandRecordingContext;
+class Device;
 
-    class Buffer final : public BufferBase {
-      public:
-        static ResultOrError<Ref<Buffer>> Create(Device* device,
-                                                 const BufferDescriptor* descriptor);
+class Buffer final : public BufferBase {
+  public:
+    static ResultOrError<Ref<Buffer>> Create(Device* device, const BufferDescriptor* descriptor);
 
-        VkBuffer GetHandle() const;
+    VkBuffer GetHandle() const;
 
-        // Transitions the buffer to be used as `usage`, recording any necessary barrier in
-        // `commands`.
-        // TODO(crbug.com/dawn/851): coalesce barriers and do them early when possible.
-        void TransitionUsageNow(CommandRecordingContext* recordingContext, wgpu::BufferUsage usage);
-        bool TransitionUsageAndGetResourceBarrier(wgpu::BufferUsage usage,
-                                                  VkBufferMemoryBarrier* barrier,
-                                                  VkPipelineStageFlags* srcStages,
-                                                  VkPipelineStageFlags* dstStages);
+    // Transitions the buffer to be used as `usage`, recording any necessary barrier in
+    // `commands`.
+    // TODO(crbug.com/dawn/851): coalesce barriers and do them early when possible.
+    void TransitionUsageNow(CommandRecordingContext* recordingContext, wgpu::BufferUsage usage);
+    bool TransitionUsageAndGetResourceBarrier(wgpu::BufferUsage usage,
+                                              VkBufferMemoryBarrier* barrier,
+                                              VkPipelineStageFlags* srcStages,
+                                              VkPipelineStageFlags* dstStages);
 
-        // All the Ensure methods return true if the buffer was initialized to zero.
-        bool EnsureDataInitialized(CommandRecordingContext* recordingContext);
-        bool EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
-                                                uint64_t offset,
-                                                uint64_t size);
-        bool EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
-                                                const CopyTextureToBufferCmd* copy);
+    // All the Ensure methods return true if the buffer was initialized to zero.
+    bool EnsureDataInitialized(CommandRecordingContext* recordingContext);
+    bool EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
+                                            uint64_t offset,
+                                            uint64_t size);
+    bool EnsureDataInitializedAsDestination(CommandRecordingContext* recordingContext,
+                                            const CopyTextureToBufferCmd* copy);
 
-        // Dawn API
-        void SetLabelImpl() override;
+    // Dawn API
+    void SetLabelImpl() override;
 
-      private:
-        ~Buffer() override;
-        using BufferBase::BufferBase;
+  private:
+    ~Buffer() override;
+    using BufferBase::BufferBase;
 
-        MaybeError Initialize(bool mappedAtCreation);
-        void InitializeToZero(CommandRecordingContext* recordingContext);
-        void ClearBuffer(CommandRecordingContext* recordingContext,
-                         uint32_t clearValue,
-                         uint64_t offset = 0,
-                         uint64_t size = 0);
+    MaybeError Initialize(bool mappedAtCreation);
+    void InitializeToZero(CommandRecordingContext* recordingContext);
+    void ClearBuffer(CommandRecordingContext* recordingContext,
+                     uint32_t clearValue,
+                     uint64_t offset = 0,
+                     uint64_t size = 0);
 
-        MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
-        void UnmapImpl() override;
-        void DestroyImpl() override;
-        bool IsCPUWritableAtCreation() const override;
-        MaybeError MapAtCreationImpl() override;
-        void* GetMappedPointerImpl() override;
+    MaybeError MapAsyncImpl(wgpu::MapMode mode, size_t offset, size_t size) override;
+    void UnmapImpl() override;
+    void DestroyImpl() override;
+    bool IsCPUWritableAtCreation() const override;
+    MaybeError MapAtCreationImpl() override;
+    void* GetMappedPointerImpl() override;
 
-        VkBuffer mHandle = VK_NULL_HANDLE;
-        ResourceMemoryAllocation mMemoryAllocation;
+    VkBuffer mHandle = VK_NULL_HANDLE;
+    ResourceMemoryAllocation mMemoryAllocation;
 
-        wgpu::BufferUsage mLastUsage = wgpu::BufferUsage::None;
-    };
+    wgpu::BufferUsage mLastUsage = wgpu::BufferUsage::None;
+};
 
 }  // namespace dawn::native::vulkan
 
diff --git a/src/dawn/native/vulkan/CacheKeyVk.cpp b/src/dawn/native/vulkan/CacheKeyVk.cpp
index a9391fa..d89649e 100644
--- a/src/dawn/native/vulkan/CacheKeyVk.cpp
+++ b/src/dawn/native/vulkan/CacheKeyVk.cpp
@@ -19,248 +19,247 @@
 
 namespace dawn::native {
 
-    template <>
-    void CacheKeySerializer<VkDescriptorSetLayoutBinding>::Serialize(
-        CacheKey* key,
-        const VkDescriptorSetLayoutBinding& t) {
-        key->Record(t.binding, t.descriptorType, t.descriptorCount, t.stageFlags);
+template <>
+void CacheKeySerializer<VkDescriptorSetLayoutBinding>::Serialize(
+    CacheKey* key,
+    const VkDescriptorSetLayoutBinding& t) {
+    key->Record(t.binding, t.descriptorType, t.descriptorCount, t.stageFlags);
+}
+
+template <>
+void CacheKeySerializer<VkDescriptorSetLayoutCreateInfo>::Serialize(
+    CacheKey* key,
+    const VkDescriptorSetLayoutCreateInfo& t) {
+    key->Record(t.flags).RecordIterable(t.pBindings, t.bindingCount);
+    vulkan::SerializePnext<>(key, &t);
+}
+
+template <>
+void CacheKeySerializer<VkPushConstantRange>::Serialize(CacheKey* key,
+                                                        const VkPushConstantRange& t) {
+    key->Record(t.stageFlags, t.offset, t.size);
+}
+
+template <>
+void CacheKeySerializer<VkPipelineLayoutCreateInfo>::Serialize(
+    CacheKey* key,
+    const VkPipelineLayoutCreateInfo& t) {
+    // The set layouts are not serialized here because they are pointers to backend objects.
+    // They need to be cross-referenced with the frontend objects and serialized from there.
+    key->Record(t.flags).RecordIterable(t.pPushConstantRanges, t.pushConstantRangeCount);
+    vulkan::SerializePnext<>(key, &t);
+}
+
+template <>
+void CacheKeySerializer<VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT>::Serialize(
+    CacheKey* key,
+    const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT& t) {
+    key->Record(t.requiredSubgroupSize);
+}
+
+template <>
+void CacheKeySerializer<VkSpecializationMapEntry>::Serialize(CacheKey* key,
+                                                             const VkSpecializationMapEntry& t) {
+    key->Record(t.constantID, t.offset, t.size);
+}
+
+template <>
+void CacheKeySerializer<VkSpecializationInfo>::Serialize(CacheKey* key,
+                                                         const VkSpecializationInfo& t) {
+    key->RecordIterable(t.pMapEntries, t.mapEntryCount)
+        .RecordIterable(static_cast<const uint8_t*>(t.pData), t.dataSize);
+}
+
+template <>
+void CacheKeySerializer<VkPipelineShaderStageCreateInfo>::Serialize(
+    CacheKey* key,
+    const VkPipelineShaderStageCreateInfo& t) {
+    // The shader module is not serialized here because it is a pointer to a backend object.
+    key->Record(t.flags, t.stage)
+        .RecordIterable(t.pName, strlen(t.pName))
+        .Record(t.pSpecializationInfo);
+    vulkan::SerializePnext<VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT>(key, &t);
+}
+
+template <>
+void CacheKeySerializer<VkComputePipelineCreateInfo>::Serialize(
+    CacheKey* key,
+    const VkComputePipelineCreateInfo& t) {
+    // The pipeline layout is not serialized here because it is a pointer to a backend object.
+    // It needs to be cross-referenced with the frontend objects and serialized from there. The
+    // base pipeline information is also currently not recorded since we do not use them in our
+    // backend implementation. If we decide to use them later on, they also need to be
+    // cross-referenced from the frontend.
+    key->Record(t.flags, t.stage);
+}
+
+template <>
+void CacheKeySerializer<VkVertexInputBindingDescription>::Serialize(
+    CacheKey* key,
+    const VkVertexInputBindingDescription& t) {
+    key->Record(t.binding, t.stride, t.inputRate);
+}
+
+template <>
+void CacheKeySerializer<VkVertexInputAttributeDescription>::Serialize(
+    CacheKey* key,
+    const VkVertexInputAttributeDescription& t) {
+    key->Record(t.location, t.binding, t.format, t.offset);
+}
+
+template <>
+void CacheKeySerializer<VkPipelineVertexInputStateCreateInfo>::Serialize(
+    CacheKey* key,
+    const VkPipelineVertexInputStateCreateInfo& t) {
+    key->Record(t.flags)
+        .RecordIterable(t.pVertexBindingDescriptions, t.vertexBindingDescriptionCount)
+        .RecordIterable(t.pVertexAttributeDescriptions, t.vertexAttributeDescriptionCount);
+    vulkan::SerializePnext<>(key, &t);
+}
+
+template <>
+void CacheKeySerializer<VkPipelineInputAssemblyStateCreateInfo>::Serialize(
+    CacheKey* key,
+    const VkPipelineInputAssemblyStateCreateInfo& t) {
+    key->Record(t.flags, t.topology, t.primitiveRestartEnable);
+    vulkan::SerializePnext<>(key, &t);
+}
+
+template <>
+void CacheKeySerializer<VkPipelineTessellationStateCreateInfo>::Serialize(
+    CacheKey* key,
+    const VkPipelineTessellationStateCreateInfo& t) {
+    key->Record(t.flags, t.patchControlPoints);
+    vulkan::SerializePnext<>(key, &t);
+}
+
+template <>
+void CacheKeySerializer<VkViewport>::Serialize(CacheKey* key, const VkViewport& t) {
+    key->Record(t.x, t.y, t.width, t.height, t.minDepth, t.maxDepth);
+}
+
+template <>
+void CacheKeySerializer<VkOffset2D>::Serialize(CacheKey* key, const VkOffset2D& t) {
+    key->Record(t.x, t.y);
+}
+
+template <>
+void CacheKeySerializer<VkExtent2D>::Serialize(CacheKey* key, const VkExtent2D& t) {
+    key->Record(t.width, t.height);
+}
+
+template <>
+void CacheKeySerializer<VkRect2D>::Serialize(CacheKey* key, const VkRect2D& t) {
+    key->Record(t.offset, t.extent);
+}
+
+template <>
+void CacheKeySerializer<VkPipelineViewportStateCreateInfo>::Serialize(
+    CacheKey* key,
+    const VkPipelineViewportStateCreateInfo& t) {
+    key->Record(t.flags)
+        .RecordIterable(t.pViewports, t.viewportCount)
+        .RecordIterable(t.pScissors, t.scissorCount);
+    vulkan::SerializePnext<>(key, &t);
+}
+
+template <>
+void CacheKeySerializer<VkPipelineRasterizationStateCreateInfo>::Serialize(
+    CacheKey* key,
+    const VkPipelineRasterizationStateCreateInfo& t) {
+    key->Record(t.flags, t.depthClampEnable, t.rasterizerDiscardEnable, t.polygonMode, t.cullMode,
+                t.frontFace, t.depthBiasEnable, t.depthBiasConstantFactor, t.depthBiasClamp,
+                t.depthBiasSlopeFactor, t.lineWidth);
+    vulkan::SerializePnext<>(key, &t);
+}
+
+template <>
+void CacheKeySerializer<VkPipelineMultisampleStateCreateInfo>::Serialize(
+    CacheKey* key,
+    const VkPipelineMultisampleStateCreateInfo& t) {
+    key->Record(t.flags, t.rasterizationSamples, t.sampleShadingEnable, t.minSampleShading,
+                t.pSampleMask, t.alphaToCoverageEnable, t.alphaToOneEnable);
+    vulkan::SerializePnext<>(key, &t);
+}
+
+template <>
+void CacheKeySerializer<VkStencilOpState>::Serialize(CacheKey* key, const VkStencilOpState& t) {
+    key->Record(t.failOp, t.passOp, t.depthFailOp, t.compareOp, t.compareMask, t.writeMask,
+                t.reference);
+}
+
+template <>
+void CacheKeySerializer<VkPipelineDepthStencilStateCreateInfo>::Serialize(
+    CacheKey* key,
+    const VkPipelineDepthStencilStateCreateInfo& t) {
+    key->Record(t.flags, t.depthTestEnable, t.depthWriteEnable, t.depthCompareOp,
+                t.depthBoundsTestEnable, t.stencilTestEnable, t.front, t.back, t.minDepthBounds,
+                t.maxDepthBounds);
+    vulkan::SerializePnext<>(key, &t);
+}
+
+template <>
+void CacheKeySerializer<VkPipelineColorBlendAttachmentState>::Serialize(
+    CacheKey* key,
+    const VkPipelineColorBlendAttachmentState& t) {
+    key->Record(t.blendEnable, t.srcColorBlendFactor, t.dstColorBlendFactor, t.colorBlendOp,
+                t.srcAlphaBlendFactor, t.dstAlphaBlendFactor, t.alphaBlendOp, t.colorWriteMask);
+}
+
+template <>
+void CacheKeySerializer<VkPipelineColorBlendStateCreateInfo>::Serialize(
+    CacheKey* key,
+    const VkPipelineColorBlendStateCreateInfo& t) {
+    key->Record(t.flags, t.logicOpEnable, t.logicOp)
+        .RecordIterable(t.pAttachments, t.attachmentCount)
+        .Record(t.blendConstants);
+    vulkan::SerializePnext<>(key, &t);
+}
+
+template <>
+void CacheKeySerializer<VkPipelineDynamicStateCreateInfo>::Serialize(
+    CacheKey* key,
+    const VkPipelineDynamicStateCreateInfo& t) {
+    key->Record(t.flags).RecordIterable(t.pDynamicStates, t.dynamicStateCount);
+    vulkan::SerializePnext<>(key, &t);
+}
+
+template <>
+void CacheKeySerializer<vulkan::RenderPassCacheQuery>::Serialize(
+    CacheKey* key,
+    const vulkan::RenderPassCacheQuery& t) {
+    key->Record(t.colorMask.to_ulong(), t.resolveTargetMask.to_ulong(), t.sampleCount);
+
+    // Manually iterate the color attachment indices and their corresponding format/load/store
+    // ops because the data is sparse and may be uninitialized. Since we record the colorMask
+    // member above, recording sparse data should be fine here.
+    for (ColorAttachmentIndex i : IterateBitSet(t.colorMask)) {
+        key->Record(t.colorFormats[i], t.colorLoadOp[i], t.colorStoreOp[i]);
     }
 
-    template <>
-    void CacheKeySerializer<VkDescriptorSetLayoutCreateInfo>::Serialize(
-        CacheKey* key,
-        const VkDescriptorSetLayoutCreateInfo& t) {
-        key->Record(t.flags).RecordIterable(t.pBindings, t.bindingCount);
-        vulkan::SerializePnext<>(key, &t);
+    // Serialize the depth-stencil toggle bit, and the parameters if applicable.
+    key->Record(t.hasDepthStencil);
+    if (t.hasDepthStencil) {
+        key->Record(t.depthStencilFormat, t.depthLoadOp, t.depthStoreOp, t.stencilLoadOp,
+                    t.stencilStoreOp, t.readOnlyDepthStencil);
     }
+}
 
-    template <>
-    void CacheKeySerializer<VkPushConstantRange>::Serialize(CacheKey* key,
-                                                            const VkPushConstantRange& t) {
-        key->Record(t.stageFlags, t.offset, t.size);
-    }
-
-    template <>
-    void CacheKeySerializer<VkPipelineLayoutCreateInfo>::Serialize(
-        CacheKey* key,
-        const VkPipelineLayoutCreateInfo& t) {
-        // The set layouts are not serialized here because they are pointers to backend objects.
-        // They need to be cross-referenced with the frontend objects and serialized from there.
-        key->Record(t.flags).RecordIterable(t.pPushConstantRanges, t.pushConstantRangeCount);
-        vulkan::SerializePnext<>(key, &t);
-    }
-
-    template <>
-    void CacheKeySerializer<VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT>::Serialize(
-        CacheKey* key,
-        const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT& t) {
-        key->Record(t.requiredSubgroupSize);
-    }
-
-    template <>
-    void CacheKeySerializer<VkSpecializationMapEntry>::Serialize(
-        CacheKey* key,
-        const VkSpecializationMapEntry& t) {
-        key->Record(t.constantID, t.offset, t.size);
-    }
-
-    template <>
-    void CacheKeySerializer<VkSpecializationInfo>::Serialize(CacheKey* key,
-                                                             const VkSpecializationInfo& t) {
-        key->RecordIterable(t.pMapEntries, t.mapEntryCount)
-            .RecordIterable(static_cast<const uint8_t*>(t.pData), t.dataSize);
-    }
-
-    template <>
-    void CacheKeySerializer<VkPipelineShaderStageCreateInfo>::Serialize(
-        CacheKey* key,
-        const VkPipelineShaderStageCreateInfo& t) {
-        // The shader module is not serialized here because it is a pointer to a backend object.
-        key->Record(t.flags, t.stage)
-            .RecordIterable(t.pName, strlen(t.pName))
-            .Record(t.pSpecializationInfo);
-        vulkan::SerializePnext<VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT>(key, &t);
-    }
-
-    template <>
-    void CacheKeySerializer<VkComputePipelineCreateInfo>::Serialize(
-        CacheKey* key,
-        const VkComputePipelineCreateInfo& t) {
-        // The pipeline layout is not serialized here because it is a pointer to a backend object.
-        // It needs to be cross-referenced with the frontend objects and serialized from there. The
-        // base pipeline information is also currently not recorded since we do not use them in our
-        // backend implementation. If we decide to use them later on, they also need to be
-        // cross-referenced from the frontend.
-        key->Record(t.flags, t.stage);
-    }
-
-    template <>
-    void CacheKeySerializer<VkVertexInputBindingDescription>::Serialize(
-        CacheKey* key,
-        const VkVertexInputBindingDescription& t) {
-        key->Record(t.binding, t.stride, t.inputRate);
-    }
-
-    template <>
-    void CacheKeySerializer<VkVertexInputAttributeDescription>::Serialize(
-        CacheKey* key,
-        const VkVertexInputAttributeDescription& t) {
-        key->Record(t.location, t.binding, t.format, t.offset);
-    }
-
-    template <>
-    void CacheKeySerializer<VkPipelineVertexInputStateCreateInfo>::Serialize(
-        CacheKey* key,
-        const VkPipelineVertexInputStateCreateInfo& t) {
-        key->Record(t.flags)
-            .RecordIterable(t.pVertexBindingDescriptions, t.vertexBindingDescriptionCount)
-            .RecordIterable(t.pVertexAttributeDescriptions, t.vertexAttributeDescriptionCount);
-        vulkan::SerializePnext<>(key, &t);
-    }
-
-    template <>
-    void CacheKeySerializer<VkPipelineInputAssemblyStateCreateInfo>::Serialize(
-        CacheKey* key,
-        const VkPipelineInputAssemblyStateCreateInfo& t) {
-        key->Record(t.flags, t.topology, t.primitiveRestartEnable);
-        vulkan::SerializePnext<>(key, &t);
-    }
-
-    template <>
-    void CacheKeySerializer<VkPipelineTessellationStateCreateInfo>::Serialize(
-        CacheKey* key,
-        const VkPipelineTessellationStateCreateInfo& t) {
-        key->Record(t.flags, t.patchControlPoints);
-        vulkan::SerializePnext<>(key, &t);
-    }
-
-    template <>
-    void CacheKeySerializer<VkViewport>::Serialize(CacheKey* key, const VkViewport& t) {
-        key->Record(t.x, t.y, t.width, t.height, t.minDepth, t.maxDepth);
-    }
-
-    template <>
-    void CacheKeySerializer<VkOffset2D>::Serialize(CacheKey* key, const VkOffset2D& t) {
-        key->Record(t.x, t.y);
-    }
-
-    template <>
-    void CacheKeySerializer<VkExtent2D>::Serialize(CacheKey* key, const VkExtent2D& t) {
-        key->Record(t.width, t.height);
-    }
-
-    template <>
-    void CacheKeySerializer<VkRect2D>::Serialize(CacheKey* key, const VkRect2D& t) {
-        key->Record(t.offset, t.extent);
-    }
-
-    template <>
-    void CacheKeySerializer<VkPipelineViewportStateCreateInfo>::Serialize(
-        CacheKey* key,
-        const VkPipelineViewportStateCreateInfo& t) {
-        key->Record(t.flags)
-            .RecordIterable(t.pViewports, t.viewportCount)
-            .RecordIterable(t.pScissors, t.scissorCount);
-        vulkan::SerializePnext<>(key, &t);
-    }
-
-    template <>
-    void CacheKeySerializer<VkPipelineRasterizationStateCreateInfo>::Serialize(
-        CacheKey* key,
-        const VkPipelineRasterizationStateCreateInfo& t) {
-        key->Record(t.flags, t.depthClampEnable, t.rasterizerDiscardEnable, t.polygonMode,
-                    t.cullMode, t.frontFace, t.depthBiasEnable, t.depthBiasConstantFactor,
-                    t.depthBiasClamp, t.depthBiasSlopeFactor, t.lineWidth);
-        vulkan::SerializePnext<>(key, &t);
-    }
-
-    template <>
-    void CacheKeySerializer<VkPipelineMultisampleStateCreateInfo>::Serialize(
-        CacheKey* key,
-        const VkPipelineMultisampleStateCreateInfo& t) {
-        key->Record(t.flags, t.rasterizationSamples, t.sampleShadingEnable, t.minSampleShading,
-                    t.pSampleMask, t.alphaToCoverageEnable, t.alphaToOneEnable);
-        vulkan::SerializePnext<>(key, &t);
-    }
-
-    template <>
-    void CacheKeySerializer<VkStencilOpState>::Serialize(CacheKey* key, const VkStencilOpState& t) {
-        key->Record(t.failOp, t.passOp, t.depthFailOp, t.compareOp, t.compareMask, t.writeMask,
-                    t.reference);
-    }
-
-    template <>
-    void CacheKeySerializer<VkPipelineDepthStencilStateCreateInfo>::Serialize(
-        CacheKey* key,
-        const VkPipelineDepthStencilStateCreateInfo& t) {
-        key->Record(t.flags, t.depthTestEnable, t.depthWriteEnable, t.depthCompareOp,
-                    t.depthBoundsTestEnable, t.stencilTestEnable, t.front, t.back, t.minDepthBounds,
-                    t.maxDepthBounds);
-        vulkan::SerializePnext<>(key, &t);
-    }
-
-    template <>
-    void CacheKeySerializer<VkPipelineColorBlendAttachmentState>::Serialize(
-        CacheKey* key,
-        const VkPipelineColorBlendAttachmentState& t) {
-        key->Record(t.blendEnable, t.srcColorBlendFactor, t.dstColorBlendFactor, t.colorBlendOp,
-                    t.srcAlphaBlendFactor, t.dstAlphaBlendFactor, t.alphaBlendOp, t.colorWriteMask);
-    }
-
-    template <>
-    void CacheKeySerializer<VkPipelineColorBlendStateCreateInfo>::Serialize(
-        CacheKey* key,
-        const VkPipelineColorBlendStateCreateInfo& t) {
-        key->Record(t.flags, t.logicOpEnable, t.logicOp)
-            .RecordIterable(t.pAttachments, t.attachmentCount)
-            .Record(t.blendConstants);
-        vulkan::SerializePnext<>(key, &t);
-    }
-
-    template <>
-    void CacheKeySerializer<VkPipelineDynamicStateCreateInfo>::Serialize(
-        CacheKey* key,
-        const VkPipelineDynamicStateCreateInfo& t) {
-        key->Record(t.flags).RecordIterable(t.pDynamicStates, t.dynamicStateCount);
-        vulkan::SerializePnext<>(key, &t);
-    }
-
-    template <>
-    void CacheKeySerializer<vulkan::RenderPassCacheQuery>::Serialize(
-        CacheKey* key,
-        const vulkan::RenderPassCacheQuery& t) {
-        key->Record(t.colorMask.to_ulong(), t.resolveTargetMask.to_ulong(), t.sampleCount);
-
-        // Manually iterate the color attachment indices and their corresponding format/load/store
-        // ops because the data is sparse and may be uninitialized. Since we record the colorMask
-        // member above, recording sparse data should be fine here.
-        for (ColorAttachmentIndex i : IterateBitSet(t.colorMask)) {
-            key->Record(t.colorFormats[i], t.colorLoadOp[i], t.colorStoreOp[i]);
-        }
-
-        // Serialize the depth-stencil toggle bit, and the parameters if applicable.
-        key->Record(t.hasDepthStencil);
-        if (t.hasDepthStencil) {
-            key->Record(t.depthStencilFormat, t.depthLoadOp, t.depthStoreOp, t.stencilLoadOp,
-                        t.stencilStoreOp, t.readOnlyDepthStencil);
-        }
-    }
-
-    template <>
-    void CacheKeySerializer<VkGraphicsPipelineCreateInfo>::Serialize(
-        CacheKey* key,
-        const VkGraphicsPipelineCreateInfo& t) {
-        // The pipeline layout and render pass are not serialized here because they are pointers to
-        // backend objects. They need to be cross-referenced with the frontend objects and
-        // serialized from there. The base pipeline information is also currently not recorded since
-        // we do not use them in our backend implementation. If we decide to use them later on, they
-        // also need to be cross-referenced from the frontend.
-        key->Record(t.flags)
-            .RecordIterable(t.pStages, t.stageCount)
-            .Record(t.pVertexInputState, t.pInputAssemblyState, t.pTessellationState,
-                    t.pViewportState, t.pRasterizationState, t.pMultisampleState,
-                    t.pDepthStencilState, t.pColorBlendState, t.pDynamicState, t.subpass);
-        vulkan::SerializePnext<>(key, &t);
-    }
+template <>
+void CacheKeySerializer<VkGraphicsPipelineCreateInfo>::Serialize(
+    CacheKey* key,
+    const VkGraphicsPipelineCreateInfo& t) {
+    // The pipeline layout and render pass are not serialized here because they are pointers to
+    // backend objects. They need to be cross-referenced with the frontend objects and
+    // serialized from there. The base pipeline information is also currently not recorded since
+    // we do not use them in our backend implementation. If we decide to use them later on, they
+    // also need to be cross-referenced from the frontend.
+    key->Record(t.flags)
+        .RecordIterable(t.pStages, t.stageCount)
+        .Record(t.pVertexInputState, t.pInputAssemblyState, t.pTessellationState, t.pViewportState,
+                t.pRasterizationState, t.pMultisampleState, t.pDepthStencilState,
+                t.pColorBlendState, t.pDynamicState, t.subpass);
+    vulkan::SerializePnext<>(key, &t);
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/vulkan/CacheKeyVk.h b/src/dawn/native/vulkan/CacheKeyVk.h
index 504c22f..80b04db 100644
--- a/src/dawn/native/vulkan/CacheKeyVk.h
+++ b/src/dawn/native/vulkan/CacheKeyVk.h
@@ -25,79 +25,75 @@
 
 namespace dawn::native::vulkan {
 
-    namespace detail {
+namespace detail {
 
-        template <typename... VK_STRUCT_TYPES>
-        void ValidatePnextImpl(const VkBaseOutStructure* root) {
-            const VkBaseOutStructure* next =
-                reinterpret_cast<const VkBaseOutStructure*>(root->pNext);
-            while (next != nullptr) {
-                // Assert that the type of each pNext struct is exactly one of the specified
-                // templates.
-                ASSERT(((LvlTypeMap<VK_STRUCT_TYPES>::kSType == next->sType ? 1 : 0) + ... + 0) ==
-                       1);
-                next = reinterpret_cast<const VkBaseOutStructure*>(next->pNext);
-            }
-        }
-
-        template <typename VK_STRUCT_TYPE>
-        void SerializePnextImpl(CacheKey* key, const VkBaseOutStructure* root) {
-            const VkBaseOutStructure* next =
-                reinterpret_cast<const VkBaseOutStructure*>(root->pNext);
-            const VK_STRUCT_TYPE* found = nullptr;
-            while (next != nullptr) {
-                if (LvlTypeMap<VK_STRUCT_TYPE>::kSType == next->sType) {
-                    if (found == nullptr) {
-                        found = reinterpret_cast<const VK_STRUCT_TYPE*>(next);
-                    } else {
-                        // Fail an assert here since that means that the chain had more than one of
-                        // the same typed chained object.
-                        ASSERT(false);
-                    }
-                }
-                next = reinterpret_cast<const VkBaseOutStructure*>(next->pNext);
-            }
-            if (found != nullptr) {
-                key->Record(found);
-            }
-        }
-
-        template <typename VK_STRUCT_TYPE,
-                  typename... VK_STRUCT_TYPES,
-                  typename = std::enable_if_t<(sizeof...(VK_STRUCT_TYPES) > 0)>>
-        void SerializePnextImpl(CacheKey* key, const VkBaseOutStructure* root) {
-            SerializePnextImpl<VK_STRUCT_TYPE>(key, root);
-            SerializePnextImpl<VK_STRUCT_TYPES...>(key, root);
-        }
-
-        template <typename VK_STRUCT_TYPE>
-        const VkBaseOutStructure* ToVkBaseOutStructure(const VK_STRUCT_TYPE* t) {
-            // Checks to ensure proper type safety.
-            static_assert(
-                offsetof(VK_STRUCT_TYPE, sType) == offsetof(VkBaseOutStructure, sType) &&
-                    offsetof(VK_STRUCT_TYPE, pNext) == offsetof(VkBaseOutStructure, pNext),
-                "Argument type is not a proper Vulkan structure type");
-            return reinterpret_cast<const VkBaseOutStructure*>(t);
-        }
-
-    }  // namespace detail
-
-    template <typename... VK_STRUCT_TYPES,
-              typename VK_STRUCT_TYPE,
-              typename = std::enable_if_t<(sizeof...(VK_STRUCT_TYPES) > 0)>>
-    void SerializePnext(CacheKey* key, const VK_STRUCT_TYPE* t) {
-        const VkBaseOutStructure* root = detail::ToVkBaseOutStructure(t);
-        detail::ValidatePnextImpl<VK_STRUCT_TYPES...>(root);
-        detail::SerializePnextImpl<VK_STRUCT_TYPES...>(key, root);
+template <typename... VK_STRUCT_TYPES>
+void ValidatePnextImpl(const VkBaseOutStructure* root) {
+    const VkBaseOutStructure* next = reinterpret_cast<const VkBaseOutStructure*>(root->pNext);
+    while (next != nullptr) {
+        // Assert that the type of each pNext struct is exactly one of the specified
+        // templates.
+        ASSERT(((LvlTypeMap<VK_STRUCT_TYPES>::kSType == next->sType ? 1 : 0) + ... + 0) == 1);
+        next = reinterpret_cast<const VkBaseOutStructure*>(next->pNext);
     }
+}
 
-    // Empty template specialization so that we can put this in to ensure failures occur if new
-    // extensions are added without updating serialization.
-    template <typename VK_STRUCT_TYPE>
-    void SerializePnext(CacheKey* key, const VK_STRUCT_TYPE* t) {
-        const VkBaseOutStructure* root = detail::ToVkBaseOutStructure(t);
-        detail::ValidatePnextImpl<>(root);
+template <typename VK_STRUCT_TYPE>
+void SerializePnextImpl(CacheKey* key, const VkBaseOutStructure* root) {
+    const VkBaseOutStructure* next = reinterpret_cast<const VkBaseOutStructure*>(root->pNext);
+    const VK_STRUCT_TYPE* found = nullptr;
+    while (next != nullptr) {
+        if (LvlTypeMap<VK_STRUCT_TYPE>::kSType == next->sType) {
+            if (found == nullptr) {
+                found = reinterpret_cast<const VK_STRUCT_TYPE*>(next);
+            } else {
+                // Fail an assert here since that means that the chain had more than one of
+                // the same typed chained object.
+                ASSERT(false);
+            }
+        }
+        next = reinterpret_cast<const VkBaseOutStructure*>(next->pNext);
     }
+    if (found != nullptr) {
+        key->Record(found);
+    }
+}
+
+template <typename VK_STRUCT_TYPE,
+          typename... VK_STRUCT_TYPES,
+          typename = std::enable_if_t<(sizeof...(VK_STRUCT_TYPES) > 0)>>
+void SerializePnextImpl(CacheKey* key, const VkBaseOutStructure* root) {
+    SerializePnextImpl<VK_STRUCT_TYPE>(key, root);
+    SerializePnextImpl<VK_STRUCT_TYPES...>(key, root);
+}
+
+template <typename VK_STRUCT_TYPE>
+const VkBaseOutStructure* ToVkBaseOutStructure(const VK_STRUCT_TYPE* t) {
+    // Checks to ensure proper type safety.
+    static_assert(offsetof(VK_STRUCT_TYPE, sType) == offsetof(VkBaseOutStructure, sType) &&
+                      offsetof(VK_STRUCT_TYPE, pNext) == offsetof(VkBaseOutStructure, pNext),
+                  "Argument type is not a proper Vulkan structure type");
+    return reinterpret_cast<const VkBaseOutStructure*>(t);
+}
+
+}  // namespace detail
+
+template <typename... VK_STRUCT_TYPES,
+          typename VK_STRUCT_TYPE,
+          typename = std::enable_if_t<(sizeof...(VK_STRUCT_TYPES) > 0)>>
+void SerializePnext(CacheKey* key, const VK_STRUCT_TYPE* t) {
+    const VkBaseOutStructure* root = detail::ToVkBaseOutStructure(t);
+    detail::ValidatePnextImpl<VK_STRUCT_TYPES...>(root);
+    detail::SerializePnextImpl<VK_STRUCT_TYPES...>(key, root);
+}
+
+// Empty template specialization so that we can put this in to ensure failures occur if new
+// extensions are added without updating serialization.
+template <typename VK_STRUCT_TYPE>
+void SerializePnext(CacheKey* key, const VK_STRUCT_TYPE* t) {
+    const VkBaseOutStructure* root = detail::ToVkBaseOutStructure(t);
+    detail::ValidatePnextImpl<>(root);
+}
 
 }  // namespace dawn::native::vulkan
 
diff --git a/src/dawn/native/vulkan/CommandBufferVk.cpp b/src/dawn/native/vulkan/CommandBufferVk.cpp
index 6408f29..fca86c2 100644
--- a/src/dawn/native/vulkan/CommandBufferVk.cpp
+++ b/src/dawn/native/vulkan/CommandBufferVk.cpp
@@ -41,1291 +41,1268 @@
 
 namespace dawn::native::vulkan {
 
-    namespace {
+namespace {
 
-        VkIndexType VulkanIndexType(wgpu::IndexFormat format) {
-            switch (format) {
-                case wgpu::IndexFormat::Uint16:
-                    return VK_INDEX_TYPE_UINT16;
-                case wgpu::IndexFormat::Uint32:
-                    return VK_INDEX_TYPE_UINT32;
-                case wgpu::IndexFormat::Undefined:
-                    break;
-            }
-            UNREACHABLE();
+VkIndexType VulkanIndexType(wgpu::IndexFormat format) {
+    switch (format) {
+        case wgpu::IndexFormat::Uint16:
+            return VK_INDEX_TYPE_UINT16;
+        case wgpu::IndexFormat::Uint32:
+            return VK_INDEX_TYPE_UINT32;
+        case wgpu::IndexFormat::Undefined:
+            break;
+    }
+    UNREACHABLE();
+}
+
+bool HasSameTextureCopyExtent(const TextureCopy& srcCopy,
+                              const TextureCopy& dstCopy,
+                              const Extent3D& copySize) {
+    Extent3D imageExtentSrc = ComputeTextureCopyExtent(srcCopy, copySize);
+    Extent3D imageExtentDst = ComputeTextureCopyExtent(dstCopy, copySize);
+    return imageExtentSrc.width == imageExtentDst.width &&
+           imageExtentSrc.height == imageExtentDst.height &&
+           imageExtentSrc.depthOrArrayLayers == imageExtentDst.depthOrArrayLayers;
+}
+
+VkImageCopy ComputeImageCopyRegion(const TextureCopy& srcCopy,
+                                   const TextureCopy& dstCopy,
+                                   const Extent3D& copySize,
+                                   Aspect aspect) {
+    const Texture* srcTexture = ToBackend(srcCopy.texture.Get());
+    const Texture* dstTexture = ToBackend(dstCopy.texture.Get());
+
+    VkImageCopy region;
+    region.srcSubresource.aspectMask = VulkanAspectMask(aspect);
+    region.srcSubresource.mipLevel = srcCopy.mipLevel;
+    region.dstSubresource.aspectMask = VulkanAspectMask(aspect);
+    region.dstSubresource.mipLevel = dstCopy.mipLevel;
+
+    bool has3DTextureInCopy = false;
+
+    region.srcOffset.x = srcCopy.origin.x;
+    region.srcOffset.y = srcCopy.origin.y;
+    switch (srcTexture->GetDimension()) {
+        case wgpu::TextureDimension::e1D:
+            region.srcSubresource.baseArrayLayer = 0;
+            region.srcSubresource.layerCount = 1;
+            region.srcOffset.z = 0;
+            break;
+        case wgpu::TextureDimension::e2D:
+            region.srcSubresource.baseArrayLayer = srcCopy.origin.z;
+            region.srcSubresource.layerCount = copySize.depthOrArrayLayers;
+            region.srcOffset.z = 0;
+            break;
+        case wgpu::TextureDimension::e3D:
+            has3DTextureInCopy = true;
+            region.srcSubresource.baseArrayLayer = 0;
+            region.srcSubresource.layerCount = 1;
+            region.srcOffset.z = srcCopy.origin.z;
+            break;
+    }
+
+    region.dstOffset.x = dstCopy.origin.x;
+    region.dstOffset.y = dstCopy.origin.y;
+    switch (dstTexture->GetDimension()) {
+        case wgpu::TextureDimension::e1D:
+            region.dstSubresource.baseArrayLayer = 0;
+            region.dstSubresource.layerCount = 1;
+            region.dstOffset.z = 0;
+            break;
+        case wgpu::TextureDimension::e2D:
+            region.dstSubresource.baseArrayLayer = dstCopy.origin.z;
+            region.dstSubresource.layerCount = copySize.depthOrArrayLayers;
+            region.dstOffset.z = 0;
+            break;
+        case wgpu::TextureDimension::e3D:
+            has3DTextureInCopy = true;
+            region.dstSubresource.baseArrayLayer = 0;
+            region.dstSubresource.layerCount = 1;
+            region.dstOffset.z = dstCopy.origin.z;
+            break;
+    }
+
+    ASSERT(HasSameTextureCopyExtent(srcCopy, dstCopy, copySize));
+    Extent3D imageExtent = ComputeTextureCopyExtent(dstCopy, copySize);
+    region.extent.width = imageExtent.width;
+    region.extent.height = imageExtent.height;
+    region.extent.depth = has3DTextureInCopy ? copySize.depthOrArrayLayers : 1;
+
+    return region;
+}
+
+class DescriptorSetTracker : public BindGroupTrackerBase<true, uint32_t> {
+  public:
+    DescriptorSetTracker() = default;
+
+    void Apply(Device* device,
+               CommandRecordingContext* recordingContext,
+               VkPipelineBindPoint bindPoint) {
+        BeforeApply();
+        for (BindGroupIndex dirtyIndex : IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
+            VkDescriptorSet set = ToBackend(mBindGroups[dirtyIndex])->GetHandle();
+            const uint32_t* dynamicOffset =
+                mDynamicOffsetCounts[dirtyIndex] > 0 ? mDynamicOffsets[dirtyIndex].data() : nullptr;
+            device->fn.CmdBindDescriptorSets(recordingContext->commandBuffer, bindPoint,
+                                             ToBackend(mPipelineLayout)->GetHandle(),
+                                             static_cast<uint32_t>(dirtyIndex), 1, &*set,
+                                             mDynamicOffsetCounts[dirtyIndex], dynamicOffset);
         }
+        AfterApply();
+    }
+};
 
-        bool HasSameTextureCopyExtent(const TextureCopy& srcCopy,
-                                      const TextureCopy& dstCopy,
-                                      const Extent3D& copySize) {
-            Extent3D imageExtentSrc = ComputeTextureCopyExtent(srcCopy, copySize);
-            Extent3D imageExtentDst = ComputeTextureCopyExtent(dstCopy, copySize);
-            return imageExtentSrc.width == imageExtentDst.width &&
-                   imageExtentSrc.height == imageExtentDst.height &&
-                   imageExtentSrc.depthOrArrayLayers == imageExtentDst.depthOrArrayLayers;
+// Records the necessary barriers for a synchronization scope using the resource usage
+// data pre-computed in the frontend. Also performs lazy initialization if required.
+void TransitionAndClearForSyncScope(Device* device,
+                                    CommandRecordingContext* recordingContext,
+                                    const SyncScopeResourceUsage& scope) {
+    std::vector<VkBufferMemoryBarrier> bufferBarriers;
+    std::vector<VkImageMemoryBarrier> imageBarriers;
+    VkPipelineStageFlags srcStages = 0;
+    VkPipelineStageFlags dstStages = 0;
+
+    for (size_t i = 0; i < scope.buffers.size(); ++i) {
+        Buffer* buffer = ToBackend(scope.buffers[i]);
+        buffer->EnsureDataInitialized(recordingContext);
+
+        VkBufferMemoryBarrier bufferBarrier;
+        if (buffer->TransitionUsageAndGetResourceBarrier(scope.bufferUsages[i], &bufferBarrier,
+                                                         &srcStages, &dstStages)) {
+            bufferBarriers.push_back(bufferBarrier);
         }
+    }
 
-        VkImageCopy ComputeImageCopyRegion(const TextureCopy& srcCopy,
-                                           const TextureCopy& dstCopy,
-                                           const Extent3D& copySize,
-                                           Aspect aspect) {
-            const Texture* srcTexture = ToBackend(srcCopy.texture.Get());
-            const Texture* dstTexture = ToBackend(dstCopy.texture.Get());
+    for (size_t i = 0; i < scope.textures.size(); ++i) {
+        Texture* texture = ToBackend(scope.textures[i]);
 
-            VkImageCopy region;
-            region.srcSubresource.aspectMask = VulkanAspectMask(aspect);
-            region.srcSubresource.mipLevel = srcCopy.mipLevel;
-            region.dstSubresource.aspectMask = VulkanAspectMask(aspect);
-            region.dstSubresource.mipLevel = dstCopy.mipLevel;
-
-            bool has3DTextureInCopy = false;
-
-            region.srcOffset.x = srcCopy.origin.x;
-            region.srcOffset.y = srcCopy.origin.y;
-            switch (srcTexture->GetDimension()) {
-                case wgpu::TextureDimension::e1D:
-                    region.srcSubresource.baseArrayLayer = 0;
-                    region.srcSubresource.layerCount = 1;
-                    region.srcOffset.z = 0;
-                    break;
-                case wgpu::TextureDimension::e2D:
-                    region.srcSubresource.baseArrayLayer = srcCopy.origin.z;
-                    region.srcSubresource.layerCount = copySize.depthOrArrayLayers;
-                    region.srcOffset.z = 0;
-                    break;
-                case wgpu::TextureDimension::e3D:
-                    has3DTextureInCopy = true;
-                    region.srcSubresource.baseArrayLayer = 0;
-                    region.srcSubresource.layerCount = 1;
-                    region.srcOffset.z = srcCopy.origin.z;
-                    break;
-            }
-
-            region.dstOffset.x = dstCopy.origin.x;
-            region.dstOffset.y = dstCopy.origin.y;
-            switch (dstTexture->GetDimension()) {
-                case wgpu::TextureDimension::e1D:
-                    region.dstSubresource.baseArrayLayer = 0;
-                    region.dstSubresource.layerCount = 1;
-                    region.dstOffset.z = 0;
-                    break;
-                case wgpu::TextureDimension::e2D:
-                    region.dstSubresource.baseArrayLayer = dstCopy.origin.z;
-                    region.dstSubresource.layerCount = copySize.depthOrArrayLayers;
-                    region.dstOffset.z = 0;
-                    break;
-                case wgpu::TextureDimension::e3D:
-                    has3DTextureInCopy = true;
-                    region.dstSubresource.baseArrayLayer = 0;
-                    region.dstSubresource.layerCount = 1;
-                    region.dstOffset.z = dstCopy.origin.z;
-                    break;
-            }
-
-            ASSERT(HasSameTextureCopyExtent(srcCopy, dstCopy, copySize));
-            Extent3D imageExtent = ComputeTextureCopyExtent(dstCopy, copySize);
-            region.extent.width = imageExtent.width;
-            region.extent.height = imageExtent.height;
-            region.extent.depth = has3DTextureInCopy ? copySize.depthOrArrayLayers : 1;
-
-            return region;
-        }
-
-        class DescriptorSetTracker : public BindGroupTrackerBase<true, uint32_t> {
-          public:
-            DescriptorSetTracker() = default;
-
-            void Apply(Device* device,
-                       CommandRecordingContext* recordingContext,
-                       VkPipelineBindPoint bindPoint) {
-                BeforeApply();
-                for (BindGroupIndex dirtyIndex :
-                     IterateBitSet(mDirtyBindGroupsObjectChangedOrIsDynamic)) {
-                    VkDescriptorSet set = ToBackend(mBindGroups[dirtyIndex])->GetHandle();
-                    const uint32_t* dynamicOffset = mDynamicOffsetCounts[dirtyIndex] > 0
-                                                        ? mDynamicOffsets[dirtyIndex].data()
-                                                        : nullptr;
-                    device->fn.CmdBindDescriptorSets(
-                        recordingContext->commandBuffer, bindPoint,
-                        ToBackend(mPipelineLayout)->GetHandle(), static_cast<uint32_t>(dirtyIndex),
-                        1, &*set, mDynamicOffsetCounts[dirtyIndex], dynamicOffset);
+        // Clear subresources that are not render attachments. Render attachments will be
+        // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
+        // subresource has not been initialized before the render pass.
+        scope.textureUsages[i].Iterate(
+            [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
+                if (usage & ~wgpu::TextureUsage::RenderAttachment) {
+                    texture->EnsureSubresourceContentInitialized(recordingContext, range);
                 }
-                AfterApply();
-            }
-        };
+            });
+        texture->TransitionUsageForPass(recordingContext, scope.textureUsages[i], &imageBarriers,
+                                        &srcStages, &dstStages);
+    }
 
-        // Records the necessary barriers for a synchronization scope using the resource usage
-        // data pre-computed in the frontend. Also performs lazy initialization if required.
-        void TransitionAndClearForSyncScope(Device* device,
+    if (bufferBarriers.size() || imageBarriers.size()) {
+        device->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
+                                      nullptr, bufferBarriers.size(), bufferBarriers.data(),
+                                      imageBarriers.size(), imageBarriers.data());
+    }
+}
+
+MaybeError RecordBeginRenderPass(CommandRecordingContext* recordingContext,
+                                 Device* device,
+                                 BeginRenderPassCmd* renderPass) {
+    VkCommandBuffer commands = recordingContext->commandBuffer;
+
+    // Query a VkRenderPass from the cache
+    VkRenderPass renderPassVK = VK_NULL_HANDLE;
+    {
+        RenderPassCacheQuery query;
+
+        for (ColorAttachmentIndex i :
+             IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+            const auto& attachmentInfo = renderPass->colorAttachments[i];
+
+            bool hasResolveTarget = attachmentInfo.resolveTarget != nullptr;
+
+            query.SetColor(i, attachmentInfo.view->GetFormat().format, attachmentInfo.loadOp,
+                           attachmentInfo.storeOp, hasResolveTarget);
+        }
+
+        if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+            const auto& attachmentInfo = renderPass->depthStencilAttachment;
+
+            query.SetDepthStencil(attachmentInfo.view->GetTexture()->GetFormat().format,
+                                  attachmentInfo.depthLoadOp, attachmentInfo.depthStoreOp,
+                                  attachmentInfo.stencilLoadOp, attachmentInfo.stencilStoreOp,
+                                  attachmentInfo.depthReadOnly || attachmentInfo.stencilReadOnly);
+        }
+
+        query.SetSampleCount(renderPass->attachmentState->GetSampleCount());
+
+        DAWN_TRY_ASSIGN(renderPassVK, device->GetRenderPassCache()->GetRenderPass(query));
+    }
+
+    // Create a framebuffer that will be used once for the render pass and gather the clear
+    // values for the attachments at the same time.
+    std::array<VkClearValue, kMaxColorAttachments + 1> clearValues;
+    VkFramebuffer framebuffer = VK_NULL_HANDLE;
+    uint32_t attachmentCount = 0;
+    {
+        // Fill in the attachment info that will be chained in the framebuffer create info.
+        std::array<VkImageView, kMaxColorAttachments * 2 + 1> attachments;
+
+        for (ColorAttachmentIndex i :
+             IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+            auto& attachmentInfo = renderPass->colorAttachments[i];
+            TextureView* view = ToBackend(attachmentInfo.view.Get());
+            if (view == nullptr) {
+                continue;
+            }
+
+            attachments[attachmentCount] = view->GetHandle();
+
+            switch (view->GetFormat().GetAspectInfo(Aspect::Color).baseType) {
+                case wgpu::TextureComponentType::Float: {
+                    const std::array<float, 4> appliedClearColor =
+                        ConvertToFloatColor(attachmentInfo.clearColor);
+                    for (uint32_t i = 0; i < 4; ++i) {
+                        clearValues[attachmentCount].color.float32[i] = appliedClearColor[i];
+                    }
+                    break;
+                }
+                case wgpu::TextureComponentType::Uint: {
+                    const std::array<uint32_t, 4> appliedClearColor =
+                        ConvertToUnsignedIntegerColor(attachmentInfo.clearColor);
+                    for (uint32_t i = 0; i < 4; ++i) {
+                        clearValues[attachmentCount].color.uint32[i] = appliedClearColor[i];
+                    }
+                    break;
+                }
+                case wgpu::TextureComponentType::Sint: {
+                    const std::array<int32_t, 4> appliedClearColor =
+                        ConvertToSignedIntegerColor(attachmentInfo.clearColor);
+                    for (uint32_t i = 0; i < 4; ++i) {
+                        clearValues[attachmentCount].color.int32[i] = appliedClearColor[i];
+                    }
+                    break;
+                }
+
+                case wgpu::TextureComponentType::DepthComparison:
+                    UNREACHABLE();
+            }
+            attachmentCount++;
+        }
+
+        if (renderPass->attachmentState->HasDepthStencilAttachment()) {
+            auto& attachmentInfo = renderPass->depthStencilAttachment;
+            TextureView* view = ToBackend(attachmentInfo.view.Get());
+
+            attachments[attachmentCount] = view->GetHandle();
+
+            clearValues[attachmentCount].depthStencil.depth = attachmentInfo.clearDepth;
+            clearValues[attachmentCount].depthStencil.stencil = attachmentInfo.clearStencil;
+
+            attachmentCount++;
+        }
+
+        for (ColorAttachmentIndex i :
+             IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
+            if (renderPass->colorAttachments[i].resolveTarget != nullptr) {
+                TextureView* view = ToBackend(renderPass->colorAttachments[i].resolveTarget.Get());
+
+                attachments[attachmentCount] = view->GetHandle();
+
+                attachmentCount++;
+            }
+        }
+
+        // Chain attachments and create the framebuffer
+        VkFramebufferCreateInfo createInfo;
+        createInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
+        createInfo.pNext = nullptr;
+        createInfo.flags = 0;
+        createInfo.renderPass = renderPassVK;
+        createInfo.attachmentCount = attachmentCount;
+        createInfo.pAttachments = AsVkArray(attachments.data());
+        createInfo.width = renderPass->width;
+        createInfo.height = renderPass->height;
+        createInfo.layers = 1;
+
+        DAWN_TRY(CheckVkSuccess(device->fn.CreateFramebuffer(device->GetVkDevice(), &createInfo,
+                                                             nullptr, &*framebuffer),
+                                "CreateFramebuffer"));
+
+        // We don't reuse VkFramebuffers so mark the framebuffer for deletion as soon as the
+        // commands currently being recorded are finished.
+        device->GetFencedDeleter()->DeleteWhenUnused(framebuffer);
+    }
+
+    VkRenderPassBeginInfo beginInfo;
+    beginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
+    beginInfo.pNext = nullptr;
+    beginInfo.renderPass = renderPassVK;
+    beginInfo.framebuffer = framebuffer;
+    beginInfo.renderArea.offset.x = 0;
+    beginInfo.renderArea.offset.y = 0;
+    beginInfo.renderArea.extent.width = renderPass->width;
+    beginInfo.renderArea.extent.height = renderPass->height;
+    beginInfo.clearValueCount = attachmentCount;
+    beginInfo.pClearValues = clearValues.data();
+
+    device->fn.CmdBeginRenderPass(commands, &beginInfo, VK_SUBPASS_CONTENTS_INLINE);
+
+    return {};
+}
+
+// Reset the query sets used on render pass because the reset command must be called outside
+// render pass.
+void ResetUsedQuerySetsOnRenderPass(Device* device,
+                                    VkCommandBuffer commands,
+                                    QuerySetBase* querySet,
+                                    const std::vector<bool>& availability) {
+    ASSERT(availability.size() == querySet->GetQueryAvailability().size());
+
+    auto currentIt = availability.begin();
+    auto lastIt = availability.end();
+    // Traverse the used queries which availability are true.
+    while (currentIt != lastIt) {
+        auto firstTrueIt = std::find(currentIt, lastIt, true);
+        // No used queries need to be reset
+        if (firstTrueIt == lastIt) {
+            break;
+        }
+
+        auto nextFalseIt = std::find(firstTrueIt, lastIt, false);
+
+        uint32_t queryIndex = std::distance(availability.begin(), firstTrueIt);
+        uint32_t queryCount = std::distance(firstTrueIt, nextFalseIt);
+
+        // Reset the queries between firstTrueIt and nextFalseIt (which is at most
+        // lastIt)
+        device->fn.CmdResetQueryPool(commands, ToBackend(querySet)->GetHandle(), queryIndex,
+                                     queryCount);
+
+        // Set current iterator to next false
+        currentIt = nextFalseIt;
+    }
+}
+
+void RecordWriteTimestampCmd(CommandRecordingContext* recordingContext,
+                             Device* device,
+                             WriteTimestampCmd* cmd) {
+    VkCommandBuffer commands = recordingContext->commandBuffer;
+    QuerySet* querySet = ToBackend(cmd->querySet.Get());
+
+    device->fn.CmdWriteTimestamp(commands, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
+                                 querySet->GetHandle(), cmd->queryIndex);
+}
+
+void RecordResolveQuerySetCmd(VkCommandBuffer commands,
+                              Device* device,
+                              QuerySet* querySet,
+                              uint32_t firstQuery,
+                              uint32_t queryCount,
+                              Buffer* destination,
+                              uint64_t destinationOffset) {
+    const std::vector<bool>& availability = querySet->GetQueryAvailability();
+
+    auto currentIt = availability.begin() + firstQuery;
+    auto lastIt = availability.begin() + firstQuery + queryCount;
+
+    // Traverse available queries in the range of [firstQuery, firstQuery +  queryCount - 1]
+    while (currentIt != lastIt) {
+        auto firstTrueIt = std::find(currentIt, lastIt, true);
+        // No available query found for resolving
+        if (firstTrueIt == lastIt) {
+            break;
+        }
+        auto nextFalseIt = std::find(firstTrueIt, lastIt, false);
+
+        // The query index of firstTrueIt where the resolving starts
+        uint32_t resolveQueryIndex = std::distance(availability.begin(), firstTrueIt);
+        // The queries count between firstTrueIt and nextFalseIt need to be resolved
+        uint32_t resolveQueryCount = std::distance(firstTrueIt, nextFalseIt);
+
+        // Calculate destinationOffset based on the current resolveQueryIndex and firstQuery
+        uint32_t resolveDestinationOffset =
+            destinationOffset + (resolveQueryIndex - firstQuery) * sizeof(uint64_t);
+
+        // Resolve the queries between firstTrueIt and nextFalseIt (which is at most lastIt)
+        device->fn.CmdCopyQueryPoolResults(commands, querySet->GetHandle(), resolveQueryIndex,
+                                           resolveQueryCount, destination->GetHandle(),
+                                           resolveDestinationOffset, sizeof(uint64_t),
+                                           VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT);
+
+        // Set current iterator to next false
+        currentIt = nextFalseIt;
+    }
+}
+
+}  // anonymous namespace
+
+// static
+Ref<CommandBuffer> CommandBuffer::Create(CommandEncoder* encoder,
+                                         const CommandBufferDescriptor* descriptor) {
+    return AcquireRef(new CommandBuffer(encoder, descriptor));
+}
+
+CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
+    : CommandBufferBase(encoder, descriptor) {}
+
+MaybeError CommandBuffer::RecordCopyImageWithTemporaryBuffer(
+    CommandRecordingContext* recordingContext,
+    const TextureCopy& srcCopy,
+    const TextureCopy& dstCopy,
+    const Extent3D& copySize) {
+    ASSERT(srcCopy.texture->GetFormat().CopyCompatibleWith(dstCopy.texture->GetFormat()));
+    ASSERT(srcCopy.aspect == dstCopy.aspect);
+    dawn::native::Format format = srcCopy.texture->GetFormat();
+    const TexelBlockInfo& blockInfo = format.GetAspectInfo(srcCopy.aspect).block;
+    ASSERT(copySize.width % blockInfo.width == 0);
+    uint32_t widthInBlocks = copySize.width / blockInfo.width;
+    ASSERT(copySize.height % blockInfo.height == 0);
+    uint32_t heightInBlocks = copySize.height / blockInfo.height;
+
+    // Create the temporary buffer. Note that We don't need to respect WebGPU's 256 alignment
+    // because it isn't a hard constraint in Vulkan.
+    uint64_t tempBufferSize =
+        widthInBlocks * heightInBlocks * copySize.depthOrArrayLayers * blockInfo.byteSize;
+    BufferDescriptor tempBufferDescriptor;
+    tempBufferDescriptor.size = tempBufferSize;
+    tempBufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+
+    Device* device = ToBackend(GetDevice());
+    Ref<BufferBase> tempBufferBase;
+    DAWN_TRY_ASSIGN(tempBufferBase, device->CreateBuffer(&tempBufferDescriptor));
+    Buffer* tempBuffer = ToBackend(tempBufferBase.Get());
+
+    BufferCopy tempBufferCopy;
+    tempBufferCopy.buffer = tempBuffer;
+    tempBufferCopy.rowsPerImage = heightInBlocks;
+    tempBufferCopy.offset = 0;
+    tempBufferCopy.bytesPerRow = copySize.width / blockInfo.width * blockInfo.byteSize;
+
+    VkCommandBuffer commands = recordingContext->commandBuffer;
+    VkImage srcImage = ToBackend(srcCopy.texture)->GetHandle();
+    VkImage dstImage = ToBackend(dstCopy.texture)->GetHandle();
+
+    tempBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+    VkBufferImageCopy srcToTempBufferRegion =
+        ComputeBufferImageCopyRegion(tempBufferCopy, srcCopy, copySize);
+
+    // The Dawn CopySrc usage is always mapped to GENERAL
+    device->fn.CmdCopyImageToBuffer(commands, srcImage, VK_IMAGE_LAYOUT_GENERAL,
+                                    tempBuffer->GetHandle(), 1, &srcToTempBufferRegion);
+
+    tempBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
+    VkBufferImageCopy tempBufferToDstRegion =
+        ComputeBufferImageCopyRegion(tempBufferCopy, dstCopy, copySize);
+
+    // Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after the
+    // copy command.
+    device->fn.CmdCopyBufferToImage(commands, tempBuffer->GetHandle(), dstImage,
+                                    VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
+                                    &tempBufferToDstRegion);
+
+    recordingContext->tempBuffers.emplace_back(tempBuffer);
+
+    return {};
+}
+
+MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* recordingContext) {
+    Device* device = ToBackend(GetDevice());
+    VkCommandBuffer commands = recordingContext->commandBuffer;
+
+    // Records the necessary barriers for the resource usage pre-computed by the frontend.
+    // And resets the used query sets which are rewritten on the render pass.
+    auto PrepareResourcesForRenderPass = [](Device* device,
                                             CommandRecordingContext* recordingContext,
-                                            const SyncScopeResourceUsage& scope) {
-            std::vector<VkBufferMemoryBarrier> bufferBarriers;
-            std::vector<VkImageMemoryBarrier> imageBarriers;
-            VkPipelineStageFlags srcStages = 0;
-            VkPipelineStageFlags dstStages = 0;
+                                            const RenderPassResourceUsage& usages) {
+        TransitionAndClearForSyncScope(device, recordingContext, usages);
 
-            for (size_t i = 0; i < scope.buffers.size(); ++i) {
-                Buffer* buffer = ToBackend(scope.buffers[i]);
-                buffer->EnsureDataInitialized(recordingContext);
-
-                VkBufferMemoryBarrier bufferBarrier;
-                if (buffer->TransitionUsageAndGetResourceBarrier(
-                        scope.bufferUsages[i], &bufferBarrier, &srcStages, &dstStages)) {
-                    bufferBarriers.push_back(bufferBarrier);
-                }
-            }
-
-            for (size_t i = 0; i < scope.textures.size(); ++i) {
-                Texture* texture = ToBackend(scope.textures[i]);
-
-                // Clear subresources that are not render attachments. Render attachments will be
-                // cleared in RecordBeginRenderPass by setting the loadop to clear when the texture
-                // subresource has not been initialized before the render pass.
-                scope.textureUsages[i].Iterate(
-                    [&](const SubresourceRange& range, wgpu::TextureUsage usage) {
-                        if (usage & ~wgpu::TextureUsage::RenderAttachment) {
-                            texture->EnsureSubresourceContentInitialized(recordingContext, range);
-                        }
-                    });
-                texture->TransitionUsageForPass(recordingContext, scope.textureUsages[i],
-                                                &imageBarriers, &srcStages, &dstStages);
-            }
-
-            if (bufferBarriers.size() || imageBarriers.size()) {
-                device->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages,
-                                              0, 0, nullptr, bufferBarriers.size(),
-                                              bufferBarriers.data(), imageBarriers.size(),
-                                              imageBarriers.data());
-            }
+        // Reset all query set used on current render pass together before beginning render pass
+        // because the reset command must be called outside render pass
+        for (size_t i = 0; i < usages.querySets.size(); ++i) {
+            ResetUsedQuerySetsOnRenderPass(device, recordingContext->commandBuffer,
+                                           usages.querySets[i], usages.queryAvailabilities[i]);
         }
+    };
 
-        MaybeError RecordBeginRenderPass(CommandRecordingContext* recordingContext,
-                                         Device* device,
-                                         BeginRenderPassCmd* renderPass) {
-            VkCommandBuffer commands = recordingContext->commandBuffer;
+    size_t nextComputePassNumber = 0;
+    size_t nextRenderPassNumber = 0;
 
-            // Query a VkRenderPass from the cache
-            VkRenderPass renderPassVK = VK_NULL_HANDLE;
-            {
-                RenderPassCacheQuery query;
-
-                for (ColorAttachmentIndex i :
-                     IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
-                    const auto& attachmentInfo = renderPass->colorAttachments[i];
-
-                    bool hasResolveTarget = attachmentInfo.resolveTarget != nullptr;
-
-                    query.SetColor(i, attachmentInfo.view->GetFormat().format,
-                                   attachmentInfo.loadOp, attachmentInfo.storeOp, hasResolveTarget);
-                }
-
-                if (renderPass->attachmentState->HasDepthStencilAttachment()) {
-                    const auto& attachmentInfo = renderPass->depthStencilAttachment;
-
-                    query.SetDepthStencil(
-                        attachmentInfo.view->GetTexture()->GetFormat().format,
-                        attachmentInfo.depthLoadOp, attachmentInfo.depthStoreOp,
-                        attachmentInfo.stencilLoadOp, attachmentInfo.stencilStoreOp,
-                        attachmentInfo.depthReadOnly || attachmentInfo.stencilReadOnly);
-                }
-
-                query.SetSampleCount(renderPass->attachmentState->GetSampleCount());
-
-                DAWN_TRY_ASSIGN(renderPassVK, device->GetRenderPassCache()->GetRenderPass(query));
-            }
-
-            // Create a framebuffer that will be used once for the render pass and gather the clear
-            // values for the attachments at the same time.
-            std::array<VkClearValue, kMaxColorAttachments + 1> clearValues;
-            VkFramebuffer framebuffer = VK_NULL_HANDLE;
-            uint32_t attachmentCount = 0;
-            {
-                // Fill in the attachment info that will be chained in the framebuffer create info.
-                std::array<VkImageView, kMaxColorAttachments * 2 + 1> attachments;
-
-                for (ColorAttachmentIndex i :
-                     IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
-                    auto& attachmentInfo = renderPass->colorAttachments[i];
-                    TextureView* view = ToBackend(attachmentInfo.view.Get());
-                    if (view == nullptr) {
-                        continue;
-                    }
-
-                    attachments[attachmentCount] = view->GetHandle();
-
-                    switch (view->GetFormat().GetAspectInfo(Aspect::Color).baseType) {
-                        case wgpu::TextureComponentType::Float: {
-                            const std::array<float, 4> appliedClearColor =
-                                ConvertToFloatColor(attachmentInfo.clearColor);
-                            for (uint32_t i = 0; i < 4; ++i) {
-                                clearValues[attachmentCount].color.float32[i] =
-                                    appliedClearColor[i];
-                            }
-                            break;
-                        }
-                        case wgpu::TextureComponentType::Uint: {
-                            const std::array<uint32_t, 4> appliedClearColor =
-                                ConvertToUnsignedIntegerColor(attachmentInfo.clearColor);
-                            for (uint32_t i = 0; i < 4; ++i) {
-                                clearValues[attachmentCount].color.uint32[i] = appliedClearColor[i];
-                            }
-                            break;
-                        }
-                        case wgpu::TextureComponentType::Sint: {
-                            const std::array<int32_t, 4> appliedClearColor =
-                                ConvertToSignedIntegerColor(attachmentInfo.clearColor);
-                            for (uint32_t i = 0; i < 4; ++i) {
-                                clearValues[attachmentCount].color.int32[i] = appliedClearColor[i];
-                            }
-                            break;
-                        }
-
-                        case wgpu::TextureComponentType::DepthComparison:
-                            UNREACHABLE();
-                    }
-                    attachmentCount++;
-                }
-
-                if (renderPass->attachmentState->HasDepthStencilAttachment()) {
-                    auto& attachmentInfo = renderPass->depthStencilAttachment;
-                    TextureView* view = ToBackend(attachmentInfo.view.Get());
-
-                    attachments[attachmentCount] = view->GetHandle();
-
-                    clearValues[attachmentCount].depthStencil.depth = attachmentInfo.clearDepth;
-                    clearValues[attachmentCount].depthStencil.stencil = attachmentInfo.clearStencil;
-
-                    attachmentCount++;
-                }
-
-                for (ColorAttachmentIndex i :
-                     IterateBitSet(renderPass->attachmentState->GetColorAttachmentsMask())) {
-                    if (renderPass->colorAttachments[i].resolveTarget != nullptr) {
-                        TextureView* view =
-                            ToBackend(renderPass->colorAttachments[i].resolveTarget.Get());
-
-                        attachments[attachmentCount] = view->GetHandle();
-
-                        attachmentCount++;
-                    }
-                }
-
-                // Chain attachments and create the framebuffer
-                VkFramebufferCreateInfo createInfo;
-                createInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
-                createInfo.pNext = nullptr;
-                createInfo.flags = 0;
-                createInfo.renderPass = renderPassVK;
-                createInfo.attachmentCount = attachmentCount;
-                createInfo.pAttachments = AsVkArray(attachments.data());
-                createInfo.width = renderPass->width;
-                createInfo.height = renderPass->height;
-                createInfo.layers = 1;
-
-                DAWN_TRY(
-                    CheckVkSuccess(device->fn.CreateFramebuffer(device->GetVkDevice(), &createInfo,
-                                                                nullptr, &*framebuffer),
-                                   "CreateFramebuffer"));
-
-                // We don't reuse VkFramebuffers so mark the framebuffer for deletion as soon as the
-                // commands currently being recorded are finished.
-                device->GetFencedDeleter()->DeleteWhenUnused(framebuffer);
-            }
-
-            VkRenderPassBeginInfo beginInfo;
-            beginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
-            beginInfo.pNext = nullptr;
-            beginInfo.renderPass = renderPassVK;
-            beginInfo.framebuffer = framebuffer;
-            beginInfo.renderArea.offset.x = 0;
-            beginInfo.renderArea.offset.y = 0;
-            beginInfo.renderArea.extent.width = renderPass->width;
-            beginInfo.renderArea.extent.height = renderPass->height;
-            beginInfo.clearValueCount = attachmentCount;
-            beginInfo.pClearValues = clearValues.data();
-
-            device->fn.CmdBeginRenderPass(commands, &beginInfo, VK_SUBPASS_CONTENTS_INLINE);
-
-            return {};
-        }
-
-        // Reset the query sets used on render pass because the reset command must be called outside
-        // render pass.
-        void ResetUsedQuerySetsOnRenderPass(Device* device,
-                                            VkCommandBuffer commands,
-                                            QuerySetBase* querySet,
-                                            const std::vector<bool>& availability) {
-            ASSERT(availability.size() == querySet->GetQueryAvailability().size());
-
-            auto currentIt = availability.begin();
-            auto lastIt = availability.end();
-            // Traverse the used queries which availability are true.
-            while (currentIt != lastIt) {
-                auto firstTrueIt = std::find(currentIt, lastIt, true);
-                // No used queries need to be reset
-                if (firstTrueIt == lastIt) {
+    Command type;
+    while (mCommands.NextCommandId(&type)) {
+        switch (type) {
+            case Command::CopyBufferToBuffer: {
+                CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
+                if (copy->size == 0) {
+                    // Skip no-op copies.
                     break;
                 }
 
-                auto nextFalseIt = std::find(firstTrueIt, lastIt, false);
+                Buffer* srcBuffer = ToBackend(copy->source.Get());
+                Buffer* dstBuffer = ToBackend(copy->destination.Get());
 
-                uint32_t queryIndex = std::distance(availability.begin(), firstTrueIt);
-                uint32_t queryCount = std::distance(firstTrueIt, nextFalseIt);
+                srcBuffer->EnsureDataInitialized(recordingContext);
+                dstBuffer->EnsureDataInitializedAsDestination(recordingContext,
+                                                              copy->destinationOffset, copy->size);
 
-                // Reset the queries between firstTrueIt and nextFalseIt (which is at most
-                // lastIt)
-                device->fn.CmdResetQueryPool(commands, ToBackend(querySet)->GetHandle(), queryIndex,
-                                             queryCount);
+                srcBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
+                dstBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
 
-                // Set current iterator to next false
-                currentIt = nextFalseIt;
+                VkBufferCopy region;
+                region.srcOffset = copy->sourceOffset;
+                region.dstOffset = copy->destinationOffset;
+                region.size = copy->size;
+
+                VkBuffer srcHandle = srcBuffer->GetHandle();
+                VkBuffer dstHandle = dstBuffer->GetHandle();
+                device->fn.CmdCopyBuffer(commands, srcHandle, dstHandle, 1, &region);
+                break;
             }
-        }
 
-        void RecordWriteTimestampCmd(CommandRecordingContext* recordingContext,
-                                     Device* device,
-                                     WriteTimestampCmd* cmd) {
-            VkCommandBuffer commands = recordingContext->commandBuffer;
-            QuerySet* querySet = ToBackend(cmd->querySet.Get());
-
-            device->fn.CmdWriteTimestamp(commands, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
-                                         querySet->GetHandle(), cmd->queryIndex);
-        }
-
-        void RecordResolveQuerySetCmd(VkCommandBuffer commands,
-                                      Device* device,
-                                      QuerySet* querySet,
-                                      uint32_t firstQuery,
-                                      uint32_t queryCount,
-                                      Buffer* destination,
-                                      uint64_t destinationOffset) {
-            const std::vector<bool>& availability = querySet->GetQueryAvailability();
-
-            auto currentIt = availability.begin() + firstQuery;
-            auto lastIt = availability.begin() + firstQuery + queryCount;
-
-            // Traverse available queries in the range of [firstQuery, firstQuery +  queryCount - 1]
-            while (currentIt != lastIt) {
-                auto firstTrueIt = std::find(currentIt, lastIt, true);
-                // No available query found for resolving
-                if (firstTrueIt == lastIt) {
-                    break;
+            case Command::CopyBufferToTexture: {
+                CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
+                if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+                    copy->copySize.depthOrArrayLayers == 0) {
+                    // Skip no-op copies.
+                    continue;
                 }
-                auto nextFalseIt = std::find(firstTrueIt, lastIt, false);
+                auto& src = copy->source;
+                auto& dst = copy->destination;
 
-                // The query index of firstTrueIt where the resolving starts
-                uint32_t resolveQueryIndex = std::distance(availability.begin(), firstTrueIt);
-                // The queries count between firstTrueIt and nextFalseIt need to be resolved
-                uint32_t resolveQueryCount = std::distance(firstTrueIt, nextFalseIt);
+                ToBackend(src.buffer)->EnsureDataInitialized(recordingContext);
 
-                // Calculate destinationOffset based on the current resolveQueryIndex and firstQuery
-                uint32_t resolveDestinationOffset =
-                    destinationOffset + (resolveQueryIndex - firstQuery) * sizeof(uint64_t);
+                VkBufferImageCopy region = ComputeBufferImageCopyRegion(src, dst, copy->copySize);
+                VkImageSubresourceLayers subresource = region.imageSubresource;
 
-                // Resolve the queries between firstTrueIt and nextFalseIt (which is at most lastIt)
-                device->fn.CmdCopyQueryPoolResults(
-                    commands, querySet->GetHandle(), resolveQueryIndex, resolveQueryCount,
-                    destination->GetHandle(), resolveDestinationOffset, sizeof(uint64_t),
-                    VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT);
+                SubresourceRange range =
+                    GetSubresourcesAffectedByCopy(copy->destination, copy->copySize);
 
-                // Set current iterator to next false
-                currentIt = nextFalseIt;
-            }
-        }
-
-    }  // anonymous namespace
-
-    // static
-    Ref<CommandBuffer> CommandBuffer::Create(CommandEncoder* encoder,
-                                             const CommandBufferDescriptor* descriptor) {
-        return AcquireRef(new CommandBuffer(encoder, descriptor));
-    }
-
-    CommandBuffer::CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor)
-        : CommandBufferBase(encoder, descriptor) {
-    }
-
-    MaybeError CommandBuffer::RecordCopyImageWithTemporaryBuffer(
-        CommandRecordingContext* recordingContext,
-        const TextureCopy& srcCopy,
-        const TextureCopy& dstCopy,
-        const Extent3D& copySize) {
-        ASSERT(srcCopy.texture->GetFormat().CopyCompatibleWith(dstCopy.texture->GetFormat()));
-        ASSERT(srcCopy.aspect == dstCopy.aspect);
-        dawn::native::Format format = srcCopy.texture->GetFormat();
-        const TexelBlockInfo& blockInfo = format.GetAspectInfo(srcCopy.aspect).block;
-        ASSERT(copySize.width % blockInfo.width == 0);
-        uint32_t widthInBlocks = copySize.width / blockInfo.width;
-        ASSERT(copySize.height % blockInfo.height == 0);
-        uint32_t heightInBlocks = copySize.height / blockInfo.height;
-
-        // Create the temporary buffer. Note that We don't need to respect WebGPU's 256 alignment
-        // because it isn't a hard constraint in Vulkan.
-        uint64_t tempBufferSize =
-            widthInBlocks * heightInBlocks * copySize.depthOrArrayLayers * blockInfo.byteSize;
-        BufferDescriptor tempBufferDescriptor;
-        tempBufferDescriptor.size = tempBufferSize;
-        tempBufferDescriptor.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
-
-        Device* device = ToBackend(GetDevice());
-        Ref<BufferBase> tempBufferBase;
-        DAWN_TRY_ASSIGN(tempBufferBase, device->CreateBuffer(&tempBufferDescriptor));
-        Buffer* tempBuffer = ToBackend(tempBufferBase.Get());
-
-        BufferCopy tempBufferCopy;
-        tempBufferCopy.buffer = tempBuffer;
-        tempBufferCopy.rowsPerImage = heightInBlocks;
-        tempBufferCopy.offset = 0;
-        tempBufferCopy.bytesPerRow = copySize.width / blockInfo.width * blockInfo.byteSize;
-
-        VkCommandBuffer commands = recordingContext->commandBuffer;
-        VkImage srcImage = ToBackend(srcCopy.texture)->GetHandle();
-        VkImage dstImage = ToBackend(dstCopy.texture)->GetHandle();
-
-        tempBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
-        VkBufferImageCopy srcToTempBufferRegion =
-            ComputeBufferImageCopyRegion(tempBufferCopy, srcCopy, copySize);
-
-        // The Dawn CopySrc usage is always mapped to GENERAL
-        device->fn.CmdCopyImageToBuffer(commands, srcImage, VK_IMAGE_LAYOUT_GENERAL,
-                                        tempBuffer->GetHandle(), 1, &srcToTempBufferRegion);
-
-        tempBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
-        VkBufferImageCopy tempBufferToDstRegion =
-            ComputeBufferImageCopyRegion(tempBufferCopy, dstCopy, copySize);
-
-        // Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after the
-        // copy command.
-        device->fn.CmdCopyBufferToImage(commands, tempBuffer->GetHandle(), dstImage,
-                                        VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
-                                        &tempBufferToDstRegion);
-
-        recordingContext->tempBuffers.emplace_back(tempBuffer);
-
-        return {};
-    }
-
-    MaybeError CommandBuffer::RecordCommands(CommandRecordingContext* recordingContext) {
-        Device* device = ToBackend(GetDevice());
-        VkCommandBuffer commands = recordingContext->commandBuffer;
-
-        // Records the necessary barriers for the resource usage pre-computed by the frontend.
-        // And resets the used query sets which are rewritten on the render pass.
-        auto PrepareResourcesForRenderPass = [](Device* device,
-                                                CommandRecordingContext* recordingContext,
-                                                const RenderPassResourceUsage& usages) {
-            TransitionAndClearForSyncScope(device, recordingContext, usages);
-
-            // Reset all query set used on current render pass together before beginning render pass
-            // because the reset command must be called outside render pass
-            for (size_t i = 0; i < usages.querySets.size(); ++i) {
-                ResetUsedQuerySetsOnRenderPass(device, recordingContext->commandBuffer,
-                                               usages.querySets[i], usages.queryAvailabilities[i]);
-            }
-        };
-
-        size_t nextComputePassNumber = 0;
-        size_t nextRenderPassNumber = 0;
-
-        Command type;
-        while (mCommands.NextCommandId(&type)) {
-            switch (type) {
-                case Command::CopyBufferToBuffer: {
-                    CopyBufferToBufferCmd* copy = mCommands.NextCommand<CopyBufferToBufferCmd>();
-                    if (copy->size == 0) {
-                        // Skip no-op copies.
-                        break;
-                    }
-
-                    Buffer* srcBuffer = ToBackend(copy->source.Get());
-                    Buffer* dstBuffer = ToBackend(copy->destination.Get());
-
-                    srcBuffer->EnsureDataInitialized(recordingContext);
-                    dstBuffer->EnsureDataInitializedAsDestination(
-                        recordingContext, copy->destinationOffset, copy->size);
-
-                    srcBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
-                    dstBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
-
-                    VkBufferCopy region;
-                    region.srcOffset = copy->sourceOffset;
-                    region.dstOffset = copy->destinationOffset;
-                    region.size = copy->size;
-
-                    VkBuffer srcHandle = srcBuffer->GetHandle();
-                    VkBuffer dstHandle = dstBuffer->GetHandle();
-                    device->fn.CmdCopyBuffer(commands, srcHandle, dstHandle, 1, &region);
-                    break;
-                }
-
-                case Command::CopyBufferToTexture: {
-                    CopyBufferToTextureCmd* copy = mCommands.NextCommand<CopyBufferToTextureCmd>();
-                    if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
-                        copy->copySize.depthOrArrayLayers == 0) {
-                        // Skip no-op copies.
-                        continue;
-                    }
-                    auto& src = copy->source;
-                    auto& dst = copy->destination;
-
-                    ToBackend(src.buffer)->EnsureDataInitialized(recordingContext);
-
-                    VkBufferImageCopy region =
-                        ComputeBufferImageCopyRegion(src, dst, copy->copySize);
-                    VkImageSubresourceLayers subresource = region.imageSubresource;
-
-                    SubresourceRange range =
-                        GetSubresourcesAffectedByCopy(copy->destination, copy->copySize);
-
-                    if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
-                                                      subresource.mipLevel)) {
-                        // Since texture has been overwritten, it has been "initialized"
-                        dst.texture->SetIsSubresourceContentInitialized(true, range);
-                    } else {
-                        ToBackend(dst.texture)
-                            ->EnsureSubresourceContentInitialized(recordingContext, range);
-                    }
-                    ToBackend(src.buffer)
-                        ->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
+                if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
+                                                  subresource.mipLevel)) {
+                    // Since texture has been overwritten, it has been "initialized"
+                    dst.texture->SetIsSubresourceContentInitialized(true, range);
+                } else {
                     ToBackend(dst.texture)
-                        ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, range);
-                    VkBuffer srcBuffer = ToBackend(src.buffer)->GetHandle();
+                        ->EnsureSubresourceContentInitialized(recordingContext, range);
+                }
+                ToBackend(src.buffer)
+                    ->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopySrc);
+                ToBackend(dst.texture)
+                    ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, range);
+                VkBuffer srcBuffer = ToBackend(src.buffer)->GetHandle();
+                VkImage dstImage = ToBackend(dst.texture)->GetHandle();
+
+                // Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after the
+                // copy command.
+                device->fn.CmdCopyBufferToImage(commands, srcBuffer, dstImage,
+                                                VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
+                break;
+            }
+
+            case Command::CopyTextureToBuffer: {
+                CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
+                if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+                    copy->copySize.depthOrArrayLayers == 0) {
+                    // Skip no-op copies.
+                    continue;
+                }
+                auto& src = copy->source;
+                auto& dst = copy->destination;
+
+                ToBackend(dst.buffer)->EnsureDataInitializedAsDestination(recordingContext, copy);
+
+                VkBufferImageCopy region = ComputeBufferImageCopyRegion(dst, src, copy->copySize);
+
+                SubresourceRange range =
+                    GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
+
+                ToBackend(src.texture)
+                    ->EnsureSubresourceContentInitialized(recordingContext, range);
+
+                ToBackend(src.texture)
+                    ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc, range);
+                ToBackend(dst.buffer)
+                    ->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+
+                VkImage srcImage = ToBackend(src.texture)->GetHandle();
+                VkBuffer dstBuffer = ToBackend(dst.buffer)->GetHandle();
+                // The Dawn CopySrc usage is always mapped to GENERAL
+                device->fn.CmdCopyImageToBuffer(commands, srcImage, VK_IMAGE_LAYOUT_GENERAL,
+                                                dstBuffer, 1, &region);
+                break;
+            }
+
+            case Command::CopyTextureToTexture: {
+                CopyTextureToTextureCmd* copy = mCommands.NextCommand<CopyTextureToTextureCmd>();
+                if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
+                    copy->copySize.depthOrArrayLayers == 0) {
+                    // Skip no-op copies.
+                    continue;
+                }
+                TextureCopy& src = copy->source;
+                TextureCopy& dst = copy->destination;
+                SubresourceRange srcRange = GetSubresourcesAffectedByCopy(src, copy->copySize);
+                SubresourceRange dstRange = GetSubresourcesAffectedByCopy(dst, copy->copySize);
+
+                ToBackend(src.texture)
+                    ->EnsureSubresourceContentInitialized(recordingContext, srcRange);
+                if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
+                                                  dst.mipLevel)) {
+                    // Since destination texture has been overwritten, it has been "initialized"
+                    dst.texture->SetIsSubresourceContentInitialized(true, dstRange);
+                } else {
+                    ToBackend(dst.texture)
+                        ->EnsureSubresourceContentInitialized(recordingContext, dstRange);
+                }
+
+                if (src.texture.Get() == dst.texture.Get() && src.mipLevel == dst.mipLevel) {
+                    // When there are overlapped subresources, the layout of the overlapped
+                    // subresources should all be GENERAL instead of what we set now. Currently
+                    // it is not allowed to copy with overlapped subresources, but we still
+                    // add the ASSERT here as a reminder for this possible misuse.
+                    ASSERT(!IsRangeOverlapped(src.origin.z, dst.origin.z,
+                                              copy->copySize.depthOrArrayLayers));
+                }
+
+                ToBackend(src.texture)
+                    ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc, srcRange);
+                ToBackend(dst.texture)
+                    ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, dstRange);
+
+                // In some situations we cannot do texture-to-texture copies with vkCmdCopyImage
+                // because as Vulkan SPEC always validates image copies with the virtual size of
+                // the image subresource, when the extent that fits in the copy region of one
+                // subresource but does not fit in the one of another subresource, we will fail
+                // to find a valid extent to satisfy the requirements on both source and
+                // destination image subresource. For example, when the source is the first
+                // level of a 16x16 texture in BC format, and the destination is the third level
+                // of a 60x60 texture in the same format, neither 16x16 nor 15x15 is valid as
+                // the extent of vkCmdCopyImage.
+                // Our workaround for this issue is replacing the texture-to-texture copy with
+                // one texture-to-buffer copy and one buffer-to-texture copy.
+                bool copyUsingTemporaryBuffer =
+                    device->IsToggleEnabled(
+                        Toggle::UseTemporaryBufferInCompressedTextureToTextureCopy) &&
+                    src.texture->GetFormat().isCompressed &&
+                    !HasSameTextureCopyExtent(src, dst, copy->copySize);
+
+                if (!copyUsingTemporaryBuffer) {
+                    VkImage srcImage = ToBackend(src.texture)->GetHandle();
                     VkImage dstImage = ToBackend(dst.texture)->GetHandle();
 
-                    // Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after the
-                    // copy command.
-                    device->fn.CmdCopyBufferToImage(commands, srcBuffer, dstImage,
-                                                    VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
-                                                    &region);
+                    for (Aspect aspect : IterateEnumMask(src.texture->GetFormat().aspects)) {
+                        ASSERT(dst.texture->GetFormat().aspects & aspect);
+                        VkImageCopy region =
+                            ComputeImageCopyRegion(src, dst, copy->copySize, aspect);
+
+                        // Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after
+                        // the copy command.
+                        device->fn.CmdCopyImage(commands, srcImage, VK_IMAGE_LAYOUT_GENERAL,
+                                                dstImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
+                                                &region);
+                    }
+                } else {
+                    DAWN_TRY(RecordCopyImageWithTemporaryBuffer(recordingContext, src, dst,
+                                                                copy->copySize));
+                }
+
+                break;
+            }
+
+            case Command::ClearBuffer: {
+                ClearBufferCmd* cmd = mCommands.NextCommand<ClearBufferCmd>();
+                if (cmd->size == 0) {
+                    // Skip no-op fills.
                     break;
                 }
 
-                case Command::CopyTextureToBuffer: {
-                    CopyTextureToBufferCmd* copy = mCommands.NextCommand<CopyTextureToBufferCmd>();
-                    if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
-                        copy->copySize.depthOrArrayLayers == 0) {
-                        // Skip no-op copies.
-                        continue;
-                    }
-                    auto& src = copy->source;
-                    auto& dst = copy->destination;
+                Buffer* dstBuffer = ToBackend(cmd->buffer.Get());
+                bool clearedToZero = dstBuffer->EnsureDataInitializedAsDestination(
+                    recordingContext, cmd->offset, cmd->size);
 
-                    ToBackend(dst.buffer)
-                        ->EnsureDataInitializedAsDestination(recordingContext, copy);
-
-                    VkBufferImageCopy region =
-                        ComputeBufferImageCopyRegion(dst, src, copy->copySize);
-
-                    SubresourceRange range =
-                        GetSubresourcesAffectedByCopy(copy->source, copy->copySize);
-
-                    ToBackend(src.texture)
-                        ->EnsureSubresourceContentInitialized(recordingContext, range);
-
-                    ToBackend(src.texture)
-                        ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc, range);
-                    ToBackend(dst.buffer)
-                        ->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
-
-                    VkImage srcImage = ToBackend(src.texture)->GetHandle();
-                    VkBuffer dstBuffer = ToBackend(dst.buffer)->GetHandle();
-                    // The Dawn CopySrc usage is always mapped to GENERAL
-                    device->fn.CmdCopyImageToBuffer(commands, srcImage, VK_IMAGE_LAYOUT_GENERAL,
-                                                    dstBuffer, 1, &region);
-                    break;
-                }
-
-                case Command::CopyTextureToTexture: {
-                    CopyTextureToTextureCmd* copy =
-                        mCommands.NextCommand<CopyTextureToTextureCmd>();
-                    if (copy->copySize.width == 0 || copy->copySize.height == 0 ||
-                        copy->copySize.depthOrArrayLayers == 0) {
-                        // Skip no-op copies.
-                        continue;
-                    }
-                    TextureCopy& src = copy->source;
-                    TextureCopy& dst = copy->destination;
-                    SubresourceRange srcRange = GetSubresourcesAffectedByCopy(src, copy->copySize);
-                    SubresourceRange dstRange = GetSubresourcesAffectedByCopy(dst, copy->copySize);
-
-                    ToBackend(src.texture)
-                        ->EnsureSubresourceContentInitialized(recordingContext, srcRange);
-                    if (IsCompleteSubresourceCopiedTo(dst.texture.Get(), copy->copySize,
-                                                      dst.mipLevel)) {
-                        // Since destination texture has been overwritten, it has been "initialized"
-                        dst.texture->SetIsSubresourceContentInitialized(true, dstRange);
-                    } else {
-                        ToBackend(dst.texture)
-                            ->EnsureSubresourceContentInitialized(recordingContext, dstRange);
-                    }
-
-                    if (src.texture.Get() == dst.texture.Get() && src.mipLevel == dst.mipLevel) {
-                        // When there are overlapped subresources, the layout of the overlapped
-                        // subresources should all be GENERAL instead of what we set now. Currently
-                        // it is not allowed to copy with overlapped subresources, but we still
-                        // add the ASSERT here as a reminder for this possible misuse.
-                        ASSERT(!IsRangeOverlapped(src.origin.z, dst.origin.z,
-                                                  copy->copySize.depthOrArrayLayers));
-                    }
-
-                    ToBackend(src.texture)
-                        ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc,
-                                             srcRange);
-                    ToBackend(dst.texture)
-                        ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst,
-                                             dstRange);
-
-                    // In some situations we cannot do texture-to-texture copies with vkCmdCopyImage
-                    // because as Vulkan SPEC always validates image copies with the virtual size of
-                    // the image subresource, when the extent that fits in the copy region of one
-                    // subresource but does not fit in the one of another subresource, we will fail
-                    // to find a valid extent to satisfy the requirements on both source and
-                    // destination image subresource. For example, when the source is the first
-                    // level of a 16x16 texture in BC format, and the destination is the third level
-                    // of a 60x60 texture in the same format, neither 16x16 nor 15x15 is valid as
-                    // the extent of vkCmdCopyImage.
-                    // Our workaround for this issue is replacing the texture-to-texture copy with
-                    // one texture-to-buffer copy and one buffer-to-texture copy.
-                    bool copyUsingTemporaryBuffer =
-                        device->IsToggleEnabled(
-                            Toggle::UseTemporaryBufferInCompressedTextureToTextureCopy) &&
-                        src.texture->GetFormat().isCompressed &&
-                        !HasSameTextureCopyExtent(src, dst, copy->copySize);
-
-                    if (!copyUsingTemporaryBuffer) {
-                        VkImage srcImage = ToBackend(src.texture)->GetHandle();
-                        VkImage dstImage = ToBackend(dst.texture)->GetHandle();
-
-                        for (Aspect aspect : IterateEnumMask(src.texture->GetFormat().aspects)) {
-                            ASSERT(dst.texture->GetFormat().aspects & aspect);
-                            VkImageCopy region =
-                                ComputeImageCopyRegion(src, dst, copy->copySize, aspect);
-
-                            // Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after
-                            // the copy command.
-                            device->fn.CmdCopyImage(commands, srcImage, VK_IMAGE_LAYOUT_GENERAL,
-                                                    dstImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
-                                                    1, &region);
-                        }
-                    } else {
-                        DAWN_TRY(RecordCopyImageWithTemporaryBuffer(recordingContext, src, dst,
-                                                                    copy->copySize));
-                    }
-
-                    break;
-                }
-
-                case Command::ClearBuffer: {
-                    ClearBufferCmd* cmd = mCommands.NextCommand<ClearBufferCmd>();
-                    if (cmd->size == 0) {
-                        // Skip no-op fills.
-                        break;
-                    }
-
-                    Buffer* dstBuffer = ToBackend(cmd->buffer.Get());
-                    bool clearedToZero = dstBuffer->EnsureDataInitializedAsDestination(
-                        recordingContext, cmd->offset, cmd->size);
-
-                    if (!clearedToZero) {
-                        dstBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
-                        device->fn.CmdFillBuffer(recordingContext->commandBuffer,
-                                                 dstBuffer->GetHandle(), cmd->offset, cmd->size,
-                                                 0u);
-                    }
-
-                    break;
-                }
-
-                case Command::BeginRenderPass: {
-                    BeginRenderPassCmd* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
-
-                    PrepareResourcesForRenderPass(
-                        device, recordingContext,
-                        GetResourceUsages().renderPasses[nextRenderPassNumber]);
-
-                    LazyClearRenderPassAttachments(cmd);
-                    DAWN_TRY(RecordRenderPass(recordingContext, cmd));
-
-                    nextRenderPassNumber++;
-                    break;
-                }
-
-                case Command::BeginComputePass: {
-                    mCommands.NextCommand<BeginComputePassCmd>();
-
-                    DAWN_TRY(RecordComputePass(
-                        recordingContext,
-                        GetResourceUsages().computePasses[nextComputePassNumber]));
-
-                    nextComputePassNumber++;
-                    break;
-                }
-
-                case Command::ResolveQuerySet: {
-                    ResolveQuerySetCmd* cmd = mCommands.NextCommand<ResolveQuerySetCmd>();
-                    QuerySet* querySet = ToBackend(cmd->querySet.Get());
-                    Buffer* destination = ToBackend(cmd->destination.Get());
-
-                    destination->EnsureDataInitializedAsDestination(
-                        recordingContext, cmd->destinationOffset,
-                        cmd->queryCount * sizeof(uint64_t));
-
-                    // vkCmdCopyQueryPoolResults only can retrieve available queries because
-                    // VK_QUERY_RESULT_WAIT_BIT is set. In order to resolve the unavailable queries
-                    // as 0s, we need to clear the resolving region of the destination buffer to 0s.
-                    auto startIt = querySet->GetQueryAvailability().begin() + cmd->firstQuery;
-                    auto endIt = querySet->GetQueryAvailability().begin() + cmd->firstQuery +
-                                 cmd->queryCount;
-                    bool hasUnavailableQueries = std::find(startIt, endIt, false) != endIt;
-                    if (hasUnavailableQueries) {
-                        destination->TransitionUsageNow(recordingContext,
-                                                        wgpu::BufferUsage::CopyDst);
-                        device->fn.CmdFillBuffer(commands, destination->GetHandle(),
-                                                 cmd->destinationOffset,
-                                                 cmd->queryCount * sizeof(uint64_t), 0u);
-                    }
-
-                    destination->TransitionUsageNow(recordingContext,
-                                                    wgpu::BufferUsage::QueryResolve);
-
-                    RecordResolveQuerySetCmd(commands, device, querySet, cmd->firstQuery,
-                                             cmd->queryCount, destination, cmd->destinationOffset);
-
-                    break;
-                }
-
-                case Command::WriteTimestamp: {
-                    WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
-
-                    // The query must be reset between uses.
-                    device->fn.CmdResetQueryPool(commands, ToBackend(cmd->querySet)->GetHandle(),
-                                                 cmd->queryIndex, 1);
-
-                    RecordWriteTimestampCmd(recordingContext, device, cmd);
-                    break;
-                }
-
-                case Command::InsertDebugMarker: {
-                    if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
-                        InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
-                        const char* label = mCommands.NextData<char>(cmd->length + 1);
-                        VkDebugUtilsLabelEXT utilsLabel;
-                        utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
-                        utilsLabel.pNext = nullptr;
-                        utilsLabel.pLabelName = label;
-                        // Default color to black
-                        utilsLabel.color[0] = 0.0;
-                        utilsLabel.color[1] = 0.0;
-                        utilsLabel.color[2] = 0.0;
-                        utilsLabel.color[3] = 1.0;
-                        device->fn.CmdInsertDebugUtilsLabelEXT(commands, &utilsLabel);
-                    } else {
-                        SkipCommand(&mCommands, Command::InsertDebugMarker);
-                    }
-                    break;
-                }
-
-                case Command::PopDebugGroup: {
-                    if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
-                        mCommands.NextCommand<PopDebugGroupCmd>();
-                        device->fn.CmdEndDebugUtilsLabelEXT(commands);
-                    } else {
-                        SkipCommand(&mCommands, Command::PopDebugGroup);
-                    }
-                    break;
-                }
-
-                case Command::PushDebugGroup: {
-                    if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
-                        PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
-                        const char* label = mCommands.NextData<char>(cmd->length + 1);
-                        VkDebugUtilsLabelEXT utilsLabel;
-                        utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
-                        utilsLabel.pNext = nullptr;
-                        utilsLabel.pLabelName = label;
-                        // Default color to black
-                        utilsLabel.color[0] = 0.0;
-                        utilsLabel.color[1] = 0.0;
-                        utilsLabel.color[2] = 0.0;
-                        utilsLabel.color[3] = 1.0;
-                        device->fn.CmdBeginDebugUtilsLabelEXT(commands, &utilsLabel);
-                    } else {
-                        SkipCommand(&mCommands, Command::PushDebugGroup);
-                    }
-                    break;
-                }
-
-                case Command::WriteBuffer: {
-                    WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
-                    const uint64_t offset = write->offset;
-                    const uint64_t size = write->size;
-                    if (size == 0) {
-                        continue;
-                    }
-
-                    Buffer* dstBuffer = ToBackend(write->buffer.Get());
-                    uint8_t* data = mCommands.NextData<uint8_t>(size);
-                    Device* device = ToBackend(GetDevice());
-
-                    UploadHandle uploadHandle;
-                    DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
-                                                      size, device->GetPendingCommandSerial(),
-                                                      kCopyBufferToBufferOffsetAlignment));
-                    ASSERT(uploadHandle.mappedBuffer != nullptr);
-                    memcpy(uploadHandle.mappedBuffer, data, size);
-
-                    dstBuffer->EnsureDataInitializedAsDestination(recordingContext, offset, size);
-
+                if (!clearedToZero) {
                     dstBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
-
-                    VkBufferCopy copy;
-                    copy.srcOffset = uploadHandle.startOffset;
-                    copy.dstOffset = offset;
-                    copy.size = size;
-
-                    device->fn.CmdCopyBuffer(
-                        commands, ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle(),
-                        dstBuffer->GetHandle(), 1, &copy);
-                    break;
+                    device->fn.CmdFillBuffer(recordingContext->commandBuffer,
+                                             dstBuffer->GetHandle(), cmd->offset, cmd->size, 0u);
                 }
 
-                default:
-                    break;
+                break;
             }
-        }
 
-        return {};
+            case Command::BeginRenderPass: {
+                BeginRenderPassCmd* cmd = mCommands.NextCommand<BeginRenderPassCmd>();
+
+                PrepareResourcesForRenderPass(
+                    device, recordingContext,
+                    GetResourceUsages().renderPasses[nextRenderPassNumber]);
+
+                LazyClearRenderPassAttachments(cmd);
+                DAWN_TRY(RecordRenderPass(recordingContext, cmd));
+
+                nextRenderPassNumber++;
+                break;
+            }
+
+            case Command::BeginComputePass: {
+                mCommands.NextCommand<BeginComputePassCmd>();
+
+                DAWN_TRY(RecordComputePass(
+                    recordingContext, GetResourceUsages().computePasses[nextComputePassNumber]));
+
+                nextComputePassNumber++;
+                break;
+            }
+
+            case Command::ResolveQuerySet: {
+                ResolveQuerySetCmd* cmd = mCommands.NextCommand<ResolveQuerySetCmd>();
+                QuerySet* querySet = ToBackend(cmd->querySet.Get());
+                Buffer* destination = ToBackend(cmd->destination.Get());
+
+                destination->EnsureDataInitializedAsDestination(
+                    recordingContext, cmd->destinationOffset, cmd->queryCount * sizeof(uint64_t));
+
+                // vkCmdCopyQueryPoolResults only can retrieve available queries because
+                // VK_QUERY_RESULT_WAIT_BIT is set. In order to resolve the unavailable queries
+                // as 0s, we need to clear the resolving region of the destination buffer to 0s.
+                auto startIt = querySet->GetQueryAvailability().begin() + cmd->firstQuery;
+                auto endIt =
+                    querySet->GetQueryAvailability().begin() + cmd->firstQuery + cmd->queryCount;
+                bool hasUnavailableQueries = std::find(startIt, endIt, false) != endIt;
+                if (hasUnavailableQueries) {
+                    destination->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+                    device->fn.CmdFillBuffer(commands, destination->GetHandle(),
+                                             cmd->destinationOffset,
+                                             cmd->queryCount * sizeof(uint64_t), 0u);
+                }
+
+                destination->TransitionUsageNow(recordingContext, wgpu::BufferUsage::QueryResolve);
+
+                RecordResolveQuerySetCmd(commands, device, querySet, cmd->firstQuery,
+                                         cmd->queryCount, destination, cmd->destinationOffset);
+
+                break;
+            }
+
+            case Command::WriteTimestamp: {
+                WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+
+                // The query must be reset between uses.
+                device->fn.CmdResetQueryPool(commands, ToBackend(cmd->querySet)->GetHandle(),
+                                             cmd->queryIndex, 1);
+
+                RecordWriteTimestampCmd(recordingContext, device, cmd);
+                break;
+            }
+
+            case Command::InsertDebugMarker: {
+                if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+                    InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
+                    const char* label = mCommands.NextData<char>(cmd->length + 1);
+                    VkDebugUtilsLabelEXT utilsLabel;
+                    utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
+                    utilsLabel.pNext = nullptr;
+                    utilsLabel.pLabelName = label;
+                    // Default color to black
+                    utilsLabel.color[0] = 0.0;
+                    utilsLabel.color[1] = 0.0;
+                    utilsLabel.color[2] = 0.0;
+                    utilsLabel.color[3] = 1.0;
+                    device->fn.CmdInsertDebugUtilsLabelEXT(commands, &utilsLabel);
+                } else {
+                    SkipCommand(&mCommands, Command::InsertDebugMarker);
+                }
+                break;
+            }
+
+            case Command::PopDebugGroup: {
+                if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+                    mCommands.NextCommand<PopDebugGroupCmd>();
+                    device->fn.CmdEndDebugUtilsLabelEXT(commands);
+                } else {
+                    SkipCommand(&mCommands, Command::PopDebugGroup);
+                }
+                break;
+            }
+
+            case Command::PushDebugGroup: {
+                if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+                    PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
+                    const char* label = mCommands.NextData<char>(cmd->length + 1);
+                    VkDebugUtilsLabelEXT utilsLabel;
+                    utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
+                    utilsLabel.pNext = nullptr;
+                    utilsLabel.pLabelName = label;
+                    // Default color to black
+                    utilsLabel.color[0] = 0.0;
+                    utilsLabel.color[1] = 0.0;
+                    utilsLabel.color[2] = 0.0;
+                    utilsLabel.color[3] = 1.0;
+                    device->fn.CmdBeginDebugUtilsLabelEXT(commands, &utilsLabel);
+                } else {
+                    SkipCommand(&mCommands, Command::PushDebugGroup);
+                }
+                break;
+            }
+
+            case Command::WriteBuffer: {
+                WriteBufferCmd* write = mCommands.NextCommand<WriteBufferCmd>();
+                const uint64_t offset = write->offset;
+                const uint64_t size = write->size;
+                if (size == 0) {
+                    continue;
+                }
+
+                Buffer* dstBuffer = ToBackend(write->buffer.Get());
+                uint8_t* data = mCommands.NextData<uint8_t>(size);
+                Device* device = ToBackend(GetDevice());
+
+                UploadHandle uploadHandle;
+                DAWN_TRY_ASSIGN(uploadHandle, device->GetDynamicUploader()->Allocate(
+                                                  size, device->GetPendingCommandSerial(),
+                                                  kCopyBufferToBufferOffsetAlignment));
+                ASSERT(uploadHandle.mappedBuffer != nullptr);
+                memcpy(uploadHandle.mappedBuffer, data, size);
+
+                dstBuffer->EnsureDataInitializedAsDestination(recordingContext, offset, size);
+
+                dstBuffer->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+
+                VkBufferCopy copy;
+                copy.srcOffset = uploadHandle.startOffset;
+                copy.dstOffset = offset;
+                copy.size = size;
+
+                device->fn.CmdCopyBuffer(commands,
+                                         ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle(),
+                                         dstBuffer->GetHandle(), 1, &copy);
+                break;
+            }
+
+            default:
+                break;
+        }
     }
 
-    MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* recordingContext,
-                                                const ComputePassResourceUsage& resourceUsages) {
-        Device* device = ToBackend(GetDevice());
-        VkCommandBuffer commands = recordingContext->commandBuffer;
+    return {};
+}
 
-        uint64_t currentDispatch = 0;
-        DescriptorSetTracker descriptorSets = {};
+MaybeError CommandBuffer::RecordComputePass(CommandRecordingContext* recordingContext,
+                                            const ComputePassResourceUsage& resourceUsages) {
+    Device* device = ToBackend(GetDevice());
+    VkCommandBuffer commands = recordingContext->commandBuffer;
 
-        Command type;
-        while (mCommands.NextCommandId(&type)) {
-            switch (type) {
-                case Command::EndComputePass: {
-                    mCommands.NextCommand<EndComputePassCmd>();
-                    return {};
-                }
+    uint64_t currentDispatch = 0;
+    DescriptorSetTracker descriptorSets = {};
 
-                case Command::Dispatch: {
-                    DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
-
-                    TransitionAndClearForSyncScope(device, recordingContext,
-                                                   resourceUsages.dispatchUsages[currentDispatch]);
-                    descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_COMPUTE);
-
-                    device->fn.CmdDispatch(commands, dispatch->x, dispatch->y, dispatch->z);
-                    currentDispatch++;
-                    break;
-                }
-
-                case Command::DispatchIndirect: {
-                    DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
-                    VkBuffer indirectBuffer = ToBackend(dispatch->indirectBuffer)->GetHandle();
-
-                    TransitionAndClearForSyncScope(device, recordingContext,
-                                                   resourceUsages.dispatchUsages[currentDispatch]);
-                    descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_COMPUTE);
-
-                    device->fn.CmdDispatchIndirect(
-                        commands, indirectBuffer,
-                        static_cast<VkDeviceSize>(dispatch->indirectOffset));
-                    currentDispatch++;
-                    break;
-                }
-
-                case Command::SetBindGroup: {
-                    SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
-
-                    BindGroup* bindGroup = ToBackend(cmd->group.Get());
-                    uint32_t* dynamicOffsets = nullptr;
-                    if (cmd->dynamicOffsetCount > 0) {
-                        dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
-                    }
-
-                    descriptorSets.OnSetBindGroup(cmd->index, bindGroup, cmd->dynamicOffsetCount,
-                                                  dynamicOffsets);
-                    break;
-                }
-
-                case Command::SetComputePipeline: {
-                    SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
-                    ComputePipeline* pipeline = ToBackend(cmd->pipeline).Get();
-
-                    device->fn.CmdBindPipeline(commands, VK_PIPELINE_BIND_POINT_COMPUTE,
-                                               pipeline->GetHandle());
-                    descriptorSets.OnSetPipeline(pipeline);
-                    break;
-                }
-
-                case Command::InsertDebugMarker: {
-                    if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
-                        InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
-                        const char* label = mCommands.NextData<char>(cmd->length + 1);
-                        VkDebugUtilsLabelEXT utilsLabel;
-                        utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
-                        utilsLabel.pNext = nullptr;
-                        utilsLabel.pLabelName = label;
-                        // Default color to black
-                        utilsLabel.color[0] = 0.0;
-                        utilsLabel.color[1] = 0.0;
-                        utilsLabel.color[2] = 0.0;
-                        utilsLabel.color[3] = 1.0;
-                        device->fn.CmdInsertDebugUtilsLabelEXT(commands, &utilsLabel);
-                    } else {
-                        SkipCommand(&mCommands, Command::InsertDebugMarker);
-                    }
-                    break;
-                }
-
-                case Command::PopDebugGroup: {
-                    if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
-                        mCommands.NextCommand<PopDebugGroupCmd>();
-                        device->fn.CmdEndDebugUtilsLabelEXT(commands);
-                    } else {
-                        SkipCommand(&mCommands, Command::PopDebugGroup);
-                    }
-                    break;
-                }
-
-                case Command::PushDebugGroup: {
-                    if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
-                        PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
-                        const char* label = mCommands.NextData<char>(cmd->length + 1);
-                        VkDebugUtilsLabelEXT utilsLabel;
-                        utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
-                        utilsLabel.pNext = nullptr;
-                        utilsLabel.pLabelName = label;
-                        // Default color to black
-                        utilsLabel.color[0] = 0.0;
-                        utilsLabel.color[1] = 0.0;
-                        utilsLabel.color[2] = 0.0;
-                        utilsLabel.color[3] = 1.0;
-                        device->fn.CmdBeginDebugUtilsLabelEXT(commands, &utilsLabel);
-                    } else {
-                        SkipCommand(&mCommands, Command::PushDebugGroup);
-                    }
-                    break;
-                }
-
-                case Command::WriteTimestamp: {
-                    WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
-
-                    // The query must be reset between uses.
-                    device->fn.CmdResetQueryPool(commands, ToBackend(cmd->querySet)->GetHandle(),
-                                                 cmd->queryIndex, 1);
-
-                    RecordWriteTimestampCmd(recordingContext, device, cmd);
-                    break;
-                }
-
-                default:
-                    UNREACHABLE();
+    Command type;
+    while (mCommands.NextCommandId(&type)) {
+        switch (type) {
+            case Command::EndComputePass: {
+                mCommands.NextCommand<EndComputePassCmd>();
+                return {};
             }
-        }
 
-        // EndComputePass should have been called
-        UNREACHABLE();
+            case Command::Dispatch: {
+                DispatchCmd* dispatch = mCommands.NextCommand<DispatchCmd>();
+
+                TransitionAndClearForSyncScope(device, recordingContext,
+                                               resourceUsages.dispatchUsages[currentDispatch]);
+                descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_COMPUTE);
+
+                device->fn.CmdDispatch(commands, dispatch->x, dispatch->y, dispatch->z);
+                currentDispatch++;
+                break;
+            }
+
+            case Command::DispatchIndirect: {
+                DispatchIndirectCmd* dispatch = mCommands.NextCommand<DispatchIndirectCmd>();
+                VkBuffer indirectBuffer = ToBackend(dispatch->indirectBuffer)->GetHandle();
+
+                TransitionAndClearForSyncScope(device, recordingContext,
+                                               resourceUsages.dispatchUsages[currentDispatch]);
+                descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_COMPUTE);
+
+                device->fn.CmdDispatchIndirect(commands, indirectBuffer,
+                                               static_cast<VkDeviceSize>(dispatch->indirectOffset));
+                currentDispatch++;
+                break;
+            }
+
+            case Command::SetBindGroup: {
+                SetBindGroupCmd* cmd = mCommands.NextCommand<SetBindGroupCmd>();
+
+                BindGroup* bindGroup = ToBackend(cmd->group.Get());
+                uint32_t* dynamicOffsets = nullptr;
+                if (cmd->dynamicOffsetCount > 0) {
+                    dynamicOffsets = mCommands.NextData<uint32_t>(cmd->dynamicOffsetCount);
+                }
+
+                descriptorSets.OnSetBindGroup(cmd->index, bindGroup, cmd->dynamicOffsetCount,
+                                              dynamicOffsets);
+                break;
+            }
+
+            case Command::SetComputePipeline: {
+                SetComputePipelineCmd* cmd = mCommands.NextCommand<SetComputePipelineCmd>();
+                ComputePipeline* pipeline = ToBackend(cmd->pipeline).Get();
+
+                device->fn.CmdBindPipeline(commands, VK_PIPELINE_BIND_POINT_COMPUTE,
+                                           pipeline->GetHandle());
+                descriptorSets.OnSetPipeline(pipeline);
+                break;
+            }
+
+            case Command::InsertDebugMarker: {
+                if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+                    InsertDebugMarkerCmd* cmd = mCommands.NextCommand<InsertDebugMarkerCmd>();
+                    const char* label = mCommands.NextData<char>(cmd->length + 1);
+                    VkDebugUtilsLabelEXT utilsLabel;
+                    utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
+                    utilsLabel.pNext = nullptr;
+                    utilsLabel.pLabelName = label;
+                    // Default color to black
+                    utilsLabel.color[0] = 0.0;
+                    utilsLabel.color[1] = 0.0;
+                    utilsLabel.color[2] = 0.0;
+                    utilsLabel.color[3] = 1.0;
+                    device->fn.CmdInsertDebugUtilsLabelEXT(commands, &utilsLabel);
+                } else {
+                    SkipCommand(&mCommands, Command::InsertDebugMarker);
+                }
+                break;
+            }
+
+            case Command::PopDebugGroup: {
+                if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+                    mCommands.NextCommand<PopDebugGroupCmd>();
+                    device->fn.CmdEndDebugUtilsLabelEXT(commands);
+                } else {
+                    SkipCommand(&mCommands, Command::PopDebugGroup);
+                }
+                break;
+            }
+
+            case Command::PushDebugGroup: {
+                if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+                    PushDebugGroupCmd* cmd = mCommands.NextCommand<PushDebugGroupCmd>();
+                    const char* label = mCommands.NextData<char>(cmd->length + 1);
+                    VkDebugUtilsLabelEXT utilsLabel;
+                    utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
+                    utilsLabel.pNext = nullptr;
+                    utilsLabel.pLabelName = label;
+                    // Default color to black
+                    utilsLabel.color[0] = 0.0;
+                    utilsLabel.color[1] = 0.0;
+                    utilsLabel.color[2] = 0.0;
+                    utilsLabel.color[3] = 1.0;
+                    device->fn.CmdBeginDebugUtilsLabelEXT(commands, &utilsLabel);
+                } else {
+                    SkipCommand(&mCommands, Command::PushDebugGroup);
+                }
+                break;
+            }
+
+            case Command::WriteTimestamp: {
+                WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
+
+                // The query must be reset between uses.
+                device->fn.CmdResetQueryPool(commands, ToBackend(cmd->querySet)->GetHandle(),
+                                             cmd->queryIndex, 1);
+
+                RecordWriteTimestampCmd(recordingContext, device, cmd);
+                break;
+            }
+
+            default:
+                UNREACHABLE();
+        }
     }
 
-    MaybeError CommandBuffer::RecordRenderPass(CommandRecordingContext* recordingContext,
-                                               BeginRenderPassCmd* renderPassCmd) {
-        Device* device = ToBackend(GetDevice());
-        VkCommandBuffer commands = recordingContext->commandBuffer;
+    // EndComputePass should have been called
+    UNREACHABLE();
+}
 
-        DAWN_TRY(RecordBeginRenderPass(recordingContext, device, renderPassCmd));
+MaybeError CommandBuffer::RecordRenderPass(CommandRecordingContext* recordingContext,
+                                           BeginRenderPassCmd* renderPassCmd) {
+    Device* device = ToBackend(GetDevice());
+    VkCommandBuffer commands = recordingContext->commandBuffer;
 
-        // Set the default value for the dynamic state
-        {
-            device->fn.CmdSetLineWidth(commands, 1.0f);
-            device->fn.CmdSetDepthBounds(commands, 0.0f, 1.0f);
+    DAWN_TRY(RecordBeginRenderPass(recordingContext, device, renderPassCmd));
 
-            device->fn.CmdSetStencilReference(commands, VK_STENCIL_FRONT_AND_BACK, 0);
+    // Set the default value for the dynamic state
+    {
+        device->fn.CmdSetLineWidth(commands, 1.0f);
+        device->fn.CmdSetDepthBounds(commands, 0.0f, 1.0f);
 
-            float blendConstants[4] = {
-                0.0f,
-                0.0f,
-                0.0f,
-                0.0f,
-            };
-            device->fn.CmdSetBlendConstants(commands, blendConstants);
+        device->fn.CmdSetStencilReference(commands, VK_STENCIL_FRONT_AND_BACK, 0);
 
-            // The viewport and scissor default to cover all of the attachments
-            VkViewport viewport;
-            viewport.x = 0.0f;
-            viewport.y = static_cast<float>(renderPassCmd->height);
-            viewport.width = static_cast<float>(renderPassCmd->width);
-            viewport.height = -static_cast<float>(renderPassCmd->height);
-            viewport.minDepth = 0.0f;
-            viewport.maxDepth = 1.0f;
-            device->fn.CmdSetViewport(commands, 0, 1, &viewport);
-
-            VkRect2D scissorRect;
-            scissorRect.offset.x = 0;
-            scissorRect.offset.y = 0;
-            scissorRect.extent.width = renderPassCmd->width;
-            scissorRect.extent.height = renderPassCmd->height;
-            device->fn.CmdSetScissor(commands, 0, 1, &scissorRect);
-        }
-
-        DescriptorSetTracker descriptorSets = {};
-        RenderPipeline* lastPipeline = nullptr;
-
-        auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) {
-            switch (type) {
-                case Command::Draw: {
-                    DrawCmd* draw = iter->NextCommand<DrawCmd>();
-
-                    descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
-                    device->fn.CmdDraw(commands, draw->vertexCount, draw->instanceCount,
-                                       draw->firstVertex, draw->firstInstance);
-                    break;
-                }
-
-                case Command::DrawIndexed: {
-                    DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
-
-                    descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
-                    device->fn.CmdDrawIndexed(commands, draw->indexCount, draw->instanceCount,
-                                              draw->firstIndex, draw->baseVertex,
-                                              draw->firstInstance);
-                    break;
-                }
-
-                case Command::DrawIndirect: {
-                    DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
-                    Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
-
-                    descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
-                    device->fn.CmdDrawIndirect(commands, buffer->GetHandle(),
-                                               static_cast<VkDeviceSize>(draw->indirectOffset), 1,
-                                               0);
-                    break;
-                }
-
-                case Command::DrawIndexedIndirect: {
-                    DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
-                    Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
-                    ASSERT(buffer != nullptr);
-
-                    descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
-                    device->fn.CmdDrawIndexedIndirect(
-                        commands, buffer->GetHandle(),
-                        static_cast<VkDeviceSize>(draw->indirectOffset), 1, 0);
-                    break;
-                }
-
-                case Command::InsertDebugMarker: {
-                    if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
-                        InsertDebugMarkerCmd* cmd = iter->NextCommand<InsertDebugMarkerCmd>();
-                        const char* label = iter->NextData<char>(cmd->length + 1);
-                        VkDebugUtilsLabelEXT utilsLabel;
-                        utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
-                        utilsLabel.pNext = nullptr;
-                        utilsLabel.pLabelName = label;
-                        // Default color to black
-                        utilsLabel.color[0] = 0.0;
-                        utilsLabel.color[1] = 0.0;
-                        utilsLabel.color[2] = 0.0;
-                        utilsLabel.color[3] = 1.0;
-                        device->fn.CmdInsertDebugUtilsLabelEXT(commands, &utilsLabel);
-                    } else {
-                        SkipCommand(iter, Command::InsertDebugMarker);
-                    }
-                    break;
-                }
-
-                case Command::PopDebugGroup: {
-                    if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
-                        iter->NextCommand<PopDebugGroupCmd>();
-                        device->fn.CmdEndDebugUtilsLabelEXT(commands);
-                    } else {
-                        SkipCommand(iter, Command::PopDebugGroup);
-                    }
-                    break;
-                }
-
-                case Command::PushDebugGroup: {
-                    if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
-                        PushDebugGroupCmd* cmd = iter->NextCommand<PushDebugGroupCmd>();
-                        const char* label = iter->NextData<char>(cmd->length + 1);
-                        VkDebugUtilsLabelEXT utilsLabel;
-                        utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
-                        utilsLabel.pNext = nullptr;
-                        utilsLabel.pLabelName = label;
-                        // Default color to black
-                        utilsLabel.color[0] = 0.0;
-                        utilsLabel.color[1] = 0.0;
-                        utilsLabel.color[2] = 0.0;
-                        utilsLabel.color[3] = 1.0;
-                        device->fn.CmdBeginDebugUtilsLabelEXT(commands, &utilsLabel);
-                    } else {
-                        SkipCommand(iter, Command::PushDebugGroup);
-                    }
-                    break;
-                }
-
-                case Command::SetBindGroup: {
-                    SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
-                    BindGroup* bindGroup = ToBackend(cmd->group.Get());
-                    uint32_t* dynamicOffsets = nullptr;
-                    if (cmd->dynamicOffsetCount > 0) {
-                        dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
-                    }
-
-                    descriptorSets.OnSetBindGroup(cmd->index, bindGroup, cmd->dynamicOffsetCount,
-                                                  dynamicOffsets);
-                    break;
-                }
-
-                case Command::SetIndexBuffer: {
-                    SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
-                    VkBuffer indexBuffer = ToBackend(cmd->buffer)->GetHandle();
-
-                    device->fn.CmdBindIndexBuffer(commands, indexBuffer, cmd->offset,
-                                                  VulkanIndexType(cmd->format));
-                    break;
-                }
-
-                case Command::SetRenderPipeline: {
-                    SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
-                    RenderPipeline* pipeline = ToBackend(cmd->pipeline).Get();
-
-                    device->fn.CmdBindPipeline(commands, VK_PIPELINE_BIND_POINT_GRAPHICS,
-                                               pipeline->GetHandle());
-                    lastPipeline = pipeline;
-
-                    descriptorSets.OnSetPipeline(pipeline);
-                    break;
-                }
-
-                case Command::SetVertexBuffer: {
-                    SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
-                    VkBuffer buffer = ToBackend(cmd->buffer)->GetHandle();
-                    VkDeviceSize offset = static_cast<VkDeviceSize>(cmd->offset);
-
-                    device->fn.CmdBindVertexBuffers(commands, static_cast<uint8_t>(cmd->slot), 1,
-                                                    &*buffer, &offset);
-                    break;
-                }
-
-                default:
-                    UNREACHABLE();
-                    break;
-            }
+        float blendConstants[4] = {
+            0.0f,
+            0.0f,
+            0.0f,
+            0.0f,
         };
+        device->fn.CmdSetBlendConstants(commands, blendConstants);
 
-        Command type;
-        while (mCommands.NextCommandId(&type)) {
-            switch (type) {
-                case Command::EndRenderPass: {
-                    mCommands.NextCommand<EndRenderPassCmd>();
-                    device->fn.CmdEndRenderPass(commands);
-                    return {};
+        // The viewport and scissor default to cover all of the attachments
+        VkViewport viewport;
+        viewport.x = 0.0f;
+        viewport.y = static_cast<float>(renderPassCmd->height);
+        viewport.width = static_cast<float>(renderPassCmd->width);
+        viewport.height = -static_cast<float>(renderPassCmd->height);
+        viewport.minDepth = 0.0f;
+        viewport.maxDepth = 1.0f;
+        device->fn.CmdSetViewport(commands, 0, 1, &viewport);
+
+        VkRect2D scissorRect;
+        scissorRect.offset.x = 0;
+        scissorRect.offset.y = 0;
+        scissorRect.extent.width = renderPassCmd->width;
+        scissorRect.extent.height = renderPassCmd->height;
+        device->fn.CmdSetScissor(commands, 0, 1, &scissorRect);
+    }
+
+    DescriptorSetTracker descriptorSets = {};
+    RenderPipeline* lastPipeline = nullptr;
+
+    auto EncodeRenderBundleCommand = [&](CommandIterator* iter, Command type) {
+        switch (type) {
+            case Command::Draw: {
+                DrawCmd* draw = iter->NextCommand<DrawCmd>();
+
+                descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
+                device->fn.CmdDraw(commands, draw->vertexCount, draw->instanceCount,
+                                   draw->firstVertex, draw->firstInstance);
+                break;
+            }
+
+            case Command::DrawIndexed: {
+                DrawIndexedCmd* draw = iter->NextCommand<DrawIndexedCmd>();
+
+                descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
+                device->fn.CmdDrawIndexed(commands, draw->indexCount, draw->instanceCount,
+                                          draw->firstIndex, draw->baseVertex, draw->firstInstance);
+                break;
+            }
+
+            case Command::DrawIndirect: {
+                DrawIndirectCmd* draw = iter->NextCommand<DrawIndirectCmd>();
+                Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
+
+                descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
+                device->fn.CmdDrawIndirect(commands, buffer->GetHandle(),
+                                           static_cast<VkDeviceSize>(draw->indirectOffset), 1, 0);
+                break;
+            }
+
+            case Command::DrawIndexedIndirect: {
+                DrawIndexedIndirectCmd* draw = iter->NextCommand<DrawIndexedIndirectCmd>();
+                Buffer* buffer = ToBackend(draw->indirectBuffer.Get());
+                ASSERT(buffer != nullptr);
+
+                descriptorSets.Apply(device, recordingContext, VK_PIPELINE_BIND_POINT_GRAPHICS);
+                device->fn.CmdDrawIndexedIndirect(commands, buffer->GetHandle(),
+                                                  static_cast<VkDeviceSize>(draw->indirectOffset),
+                                                  1, 0);
+                break;
+            }
+
+            case Command::InsertDebugMarker: {
+                if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+                    InsertDebugMarkerCmd* cmd = iter->NextCommand<InsertDebugMarkerCmd>();
+                    const char* label = iter->NextData<char>(cmd->length + 1);
+                    VkDebugUtilsLabelEXT utilsLabel;
+                    utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
+                    utilsLabel.pNext = nullptr;
+                    utilsLabel.pLabelName = label;
+                    // Default color to black
+                    utilsLabel.color[0] = 0.0;
+                    utilsLabel.color[1] = 0.0;
+                    utilsLabel.color[2] = 0.0;
+                    utilsLabel.color[3] = 1.0;
+                    device->fn.CmdInsertDebugUtilsLabelEXT(commands, &utilsLabel);
+                } else {
+                    SkipCommand(iter, Command::InsertDebugMarker);
+                }
+                break;
+            }
+
+            case Command::PopDebugGroup: {
+                if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+                    iter->NextCommand<PopDebugGroupCmd>();
+                    device->fn.CmdEndDebugUtilsLabelEXT(commands);
+                } else {
+                    SkipCommand(iter, Command::PopDebugGroup);
+                }
+                break;
+            }
+
+            case Command::PushDebugGroup: {
+                if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+                    PushDebugGroupCmd* cmd = iter->NextCommand<PushDebugGroupCmd>();
+                    const char* label = iter->NextData<char>(cmd->length + 1);
+                    VkDebugUtilsLabelEXT utilsLabel;
+                    utilsLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
+                    utilsLabel.pNext = nullptr;
+                    utilsLabel.pLabelName = label;
+                    // Default color to black
+                    utilsLabel.color[0] = 0.0;
+                    utilsLabel.color[1] = 0.0;
+                    utilsLabel.color[2] = 0.0;
+                    utilsLabel.color[3] = 1.0;
+                    device->fn.CmdBeginDebugUtilsLabelEXT(commands, &utilsLabel);
+                } else {
+                    SkipCommand(iter, Command::PushDebugGroup);
+                }
+                break;
+            }
+
+            case Command::SetBindGroup: {
+                SetBindGroupCmd* cmd = iter->NextCommand<SetBindGroupCmd>();
+                BindGroup* bindGroup = ToBackend(cmd->group.Get());
+                uint32_t* dynamicOffsets = nullptr;
+                if (cmd->dynamicOffsetCount > 0) {
+                    dynamicOffsets = iter->NextData<uint32_t>(cmd->dynamicOffsetCount);
                 }
 
-                case Command::SetBlendConstant: {
-                    SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
-                    const std::array<float, 4> blendConstants = ConvertToFloatColor(cmd->color);
-                    device->fn.CmdSetBlendConstants(commands, blendConstants.data());
-                    break;
+                descriptorSets.OnSetBindGroup(cmd->index, bindGroup, cmd->dynamicOffsetCount,
+                                              dynamicOffsets);
+                break;
+            }
+
+            case Command::SetIndexBuffer: {
+                SetIndexBufferCmd* cmd = iter->NextCommand<SetIndexBufferCmd>();
+                VkBuffer indexBuffer = ToBackend(cmd->buffer)->GetHandle();
+
+                device->fn.CmdBindIndexBuffer(commands, indexBuffer, cmd->offset,
+                                              VulkanIndexType(cmd->format));
+                break;
+            }
+
+            case Command::SetRenderPipeline: {
+                SetRenderPipelineCmd* cmd = iter->NextCommand<SetRenderPipelineCmd>();
+                RenderPipeline* pipeline = ToBackend(cmd->pipeline).Get();
+
+                device->fn.CmdBindPipeline(commands, VK_PIPELINE_BIND_POINT_GRAPHICS,
+                                           pipeline->GetHandle());
+                lastPipeline = pipeline;
+
+                descriptorSets.OnSetPipeline(pipeline);
+                break;
+            }
+
+            case Command::SetVertexBuffer: {
+                SetVertexBufferCmd* cmd = iter->NextCommand<SetVertexBufferCmd>();
+                VkBuffer buffer = ToBackend(cmd->buffer)->GetHandle();
+                VkDeviceSize offset = static_cast<VkDeviceSize>(cmd->offset);
+
+                device->fn.CmdBindVertexBuffers(commands, static_cast<uint8_t>(cmd->slot), 1,
+                                                &*buffer, &offset);
+                break;
+            }
+
+            default:
+                UNREACHABLE();
+                break;
+        }
+    };
+
+    Command type;
+    while (mCommands.NextCommandId(&type)) {
+        switch (type) {
+            case Command::EndRenderPass: {
+                mCommands.NextCommand<EndRenderPassCmd>();
+                device->fn.CmdEndRenderPass(commands);
+                return {};
+            }
+
+            case Command::SetBlendConstant: {
+                SetBlendConstantCmd* cmd = mCommands.NextCommand<SetBlendConstantCmd>();
+                const std::array<float, 4> blendConstants = ConvertToFloatColor(cmd->color);
+                device->fn.CmdSetBlendConstants(commands, blendConstants.data());
+                break;
+            }
+
+            case Command::SetStencilReference: {
+                SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
+                device->fn.CmdSetStencilReference(commands, VK_STENCIL_FRONT_AND_BACK,
+                                                  cmd->reference);
+                break;
+            }
+
+            case Command::SetViewport: {
+                SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
+                VkViewport viewport;
+                viewport.x = cmd->x;
+                viewport.y = cmd->y + cmd->height;
+                viewport.width = cmd->width;
+                viewport.height = -cmd->height;
+                viewport.minDepth = cmd->minDepth;
+                viewport.maxDepth = cmd->maxDepth;
+
+                // Vulkan disallows width = 0, but VK_KHR_maintenance1 which we require allows
+                // height = 0 so use that to do an empty viewport.
+                if (viewport.width == 0) {
+                    viewport.height = 0;
+
+                    // Set the viewport x range to a range that's always valid.
+                    viewport.x = 0;
+                    viewport.width = 1;
                 }
 
-                case Command::SetStencilReference: {
-                    SetStencilReferenceCmd* cmd = mCommands.NextCommand<SetStencilReferenceCmd>();
-                    device->fn.CmdSetStencilReference(commands, VK_STENCIL_FRONT_AND_BACK,
-                                                      cmd->reference);
-                    break;
-                }
+                device->fn.CmdSetViewport(commands, 0, 1, &viewport);
+                break;
+            }
 
-                case Command::SetViewport: {
-                    SetViewportCmd* cmd = mCommands.NextCommand<SetViewportCmd>();
-                    VkViewport viewport;
-                    viewport.x = cmd->x;
-                    viewport.y = cmd->y + cmd->height;
-                    viewport.width = cmd->width;
-                    viewport.height = -cmd->height;
-                    viewport.minDepth = cmd->minDepth;
-                    viewport.maxDepth = cmd->maxDepth;
+            case Command::SetScissorRect: {
+                SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
+                VkRect2D rect;
+                rect.offset.x = cmd->x;
+                rect.offset.y = cmd->y;
+                rect.extent.width = cmd->width;
+                rect.extent.height = cmd->height;
 
-                    // Vulkan disallows width = 0, but VK_KHR_maintenance1 which we require allows
-                    // height = 0 so use that to do an empty viewport.
-                    if (viewport.width == 0) {
-                        viewport.height = 0;
+                device->fn.CmdSetScissor(commands, 0, 1, &rect);
+                break;
+            }
 
-                        // Set the viewport x range to a range that's always valid.
-                        viewport.x = 0;
-                        viewport.width = 1;
+            case Command::ExecuteBundles: {
+                ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
+                auto bundles = mCommands.NextData<Ref<RenderBundleBase>>(cmd->count);
+
+                for (uint32_t i = 0; i < cmd->count; ++i) {
+                    CommandIterator* iter = bundles[i]->GetCommands();
+                    iter->Reset();
+                    while (iter->NextCommandId(&type)) {
+                        EncodeRenderBundleCommand(iter, type);
                     }
-
-                    device->fn.CmdSetViewport(commands, 0, 1, &viewport);
-                    break;
                 }
+                break;
+            }
 
-                case Command::SetScissorRect: {
-                    SetScissorRectCmd* cmd = mCommands.NextCommand<SetScissorRectCmd>();
-                    VkRect2D rect;
-                    rect.offset.x = cmd->x;
-                    rect.offset.y = cmd->y;
-                    rect.extent.width = cmd->width;
-                    rect.extent.height = cmd->height;
+            case Command::BeginOcclusionQuery: {
+                BeginOcclusionQueryCmd* cmd = mCommands.NextCommand<BeginOcclusionQueryCmd>();
 
-                    device->fn.CmdSetScissor(commands, 0, 1, &rect);
-                    break;
-                }
+                device->fn.CmdBeginQuery(commands, ToBackend(cmd->querySet.Get())->GetHandle(),
+                                         cmd->queryIndex, 0);
+                break;
+            }
 
-                case Command::ExecuteBundles: {
-                    ExecuteBundlesCmd* cmd = mCommands.NextCommand<ExecuteBundlesCmd>();
-                    auto bundles = mCommands.NextData<Ref<RenderBundleBase>>(cmd->count);
+            case Command::EndOcclusionQuery: {
+                EndOcclusionQueryCmd* cmd = mCommands.NextCommand<EndOcclusionQueryCmd>();
 
-                    for (uint32_t i = 0; i < cmd->count; ++i) {
-                        CommandIterator* iter = bundles[i]->GetCommands();
-                        iter->Reset();
-                        while (iter->NextCommandId(&type)) {
-                            EncodeRenderBundleCommand(iter, type);
-                        }
-                    }
-                    break;
-                }
+                device->fn.CmdEndQuery(commands, ToBackend(cmd->querySet.Get())->GetHandle(),
+                                       cmd->queryIndex);
+                break;
+            }
 
-                case Command::BeginOcclusionQuery: {
-                    BeginOcclusionQueryCmd* cmd = mCommands.NextCommand<BeginOcclusionQueryCmd>();
+            case Command::WriteTimestamp: {
+                WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
 
-                    device->fn.CmdBeginQuery(commands, ToBackend(cmd->querySet.Get())->GetHandle(),
-                                             cmd->queryIndex, 0);
-                    break;
-                }
+                RecordWriteTimestampCmd(recordingContext, device, cmd);
+                break;
+            }
 
-                case Command::EndOcclusionQuery: {
-                    EndOcclusionQueryCmd* cmd = mCommands.NextCommand<EndOcclusionQueryCmd>();
-
-                    device->fn.CmdEndQuery(commands, ToBackend(cmd->querySet.Get())->GetHandle(),
-                                           cmd->queryIndex);
-                    break;
-                }
-
-                case Command::WriteTimestamp: {
-                    WriteTimestampCmd* cmd = mCommands.NextCommand<WriteTimestampCmd>();
-
-                    RecordWriteTimestampCmd(recordingContext, device, cmd);
-                    break;
-                }
-
-                default: {
-                    EncodeRenderBundleCommand(&mCommands, type);
-                    break;
-                }
+            default: {
+                EncodeRenderBundleCommand(&mCommands, type);
+                break;
             }
         }
-
-        // EndRenderPass should have been called
-        UNREACHABLE();
     }
 
+    // EndRenderPass should have been called
+    UNREACHABLE();
+}
+
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/CommandBufferVk.h b/src/dawn/native/vulkan/CommandBufferVk.h
index e4ec410..dbb7fdc 100644
--- a/src/dawn/native/vulkan/CommandBufferVk.h
+++ b/src/dawn/native/vulkan/CommandBufferVk.h
@@ -21,34 +21,34 @@
 #include "dawn/common/vulkan_platform.h"
 
 namespace dawn::native {
-    struct BeginRenderPassCmd;
-    struct TextureCopy;
+struct BeginRenderPassCmd;
+struct TextureCopy;
 }  // namespace dawn::native
 
 namespace dawn::native::vulkan {
 
-    struct CommandRecordingContext;
-    class Device;
+struct CommandRecordingContext;
+class Device;
 
-    class CommandBuffer final : public CommandBufferBase {
-      public:
-        static Ref<CommandBuffer> Create(CommandEncoder* encoder,
-                                         const CommandBufferDescriptor* descriptor);
+class CommandBuffer final : public CommandBufferBase {
+  public:
+    static Ref<CommandBuffer> Create(CommandEncoder* encoder,
+                                     const CommandBufferDescriptor* descriptor);
 
-        MaybeError RecordCommands(CommandRecordingContext* recordingContext);
+    MaybeError RecordCommands(CommandRecordingContext* recordingContext);
 
-      private:
-        CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
+  private:
+    CommandBuffer(CommandEncoder* encoder, const CommandBufferDescriptor* descriptor);
 
-        MaybeError RecordComputePass(CommandRecordingContext* recordingContext,
-                                     const ComputePassResourceUsage& resourceUsages);
-        MaybeError RecordRenderPass(CommandRecordingContext* recordingContext,
-                                    BeginRenderPassCmd* renderPass);
-        MaybeError RecordCopyImageWithTemporaryBuffer(CommandRecordingContext* recordingContext,
-                                                      const TextureCopy& srcCopy,
-                                                      const TextureCopy& dstCopy,
-                                                      const Extent3D& copySize);
-    };
+    MaybeError RecordComputePass(CommandRecordingContext* recordingContext,
+                                 const ComputePassResourceUsage& resourceUsages);
+    MaybeError RecordRenderPass(CommandRecordingContext* recordingContext,
+                                BeginRenderPassCmd* renderPass);
+    MaybeError RecordCopyImageWithTemporaryBuffer(CommandRecordingContext* recordingContext,
+                                                  const TextureCopy& srcCopy,
+                                                  const TextureCopy& dstCopy,
+                                                  const Extent3D& copySize);
+};
 
 }  // namespace dawn::native::vulkan
 
diff --git a/src/dawn/native/vulkan/CommandRecordingContext.h b/src/dawn/native/vulkan/CommandRecordingContext.h
index 8399ba1..b5ced10 100644
--- a/src/dawn/native/vulkan/CommandRecordingContext.h
+++ b/src/dawn/native/vulkan/CommandRecordingContext.h
@@ -20,21 +20,21 @@
 #include "dawn/native/vulkan/BufferVk.h"
 
 namespace dawn::native::vulkan {
-    // Used to track operations that are handled after recording.
-    // Currently only tracks semaphores, but may be used to do barrier coalescing in the future.
-    struct CommandRecordingContext {
-        VkCommandBuffer commandBuffer = VK_NULL_HANDLE;
-        std::vector<VkSemaphore> waitSemaphores = {};
-        std::vector<VkSemaphore> signalSemaphores = {};
+// Used to track operations that are handled after recording.
+// Currently only tracks semaphores, but may be used to do barrier coalescing in the future.
+struct CommandRecordingContext {
+    VkCommandBuffer commandBuffer = VK_NULL_HANDLE;
+    std::vector<VkSemaphore> waitSemaphores = {};
+    std::vector<VkSemaphore> signalSemaphores = {};
 
-        // The internal buffers used in the workaround of texture-to-texture copies with compressed
-        // formats.
-        std::vector<Ref<Buffer>> tempBuffers;
+    // The internal buffers used in the workaround of texture-to-texture copies with compressed
+    // formats.
+    std::vector<Ref<Buffer>> tempBuffers;
 
-        // For Device state tracking only.
-        VkCommandPool commandPool = VK_NULL_HANDLE;
-        bool used = false;
-    };
+    // For Device state tracking only.
+    VkCommandPool commandPool = VK_NULL_HANDLE;
+    bool used = false;
+};
 
 }  // namespace dawn::native::vulkan
 
diff --git a/src/dawn/native/vulkan/ComputePipelineVk.cpp b/src/dawn/native/vulkan/ComputePipelineVk.cpp
index 037e330..97e22ee 100644
--- a/src/dawn/native/vulkan/ComputePipelineVk.cpp
+++ b/src/dawn/native/vulkan/ComputePipelineVk.cpp
@@ -28,98 +28,97 @@
 
 namespace dawn::native::vulkan {
 
-    // static
-    Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
-        Device* device,
-        const ComputePipelineDescriptor* descriptor) {
-        return AcquireRef(new ComputePipeline(device, descriptor));
+// static
+Ref<ComputePipeline> ComputePipeline::CreateUninitialized(
+    Device* device,
+    const ComputePipelineDescriptor* descriptor) {
+    return AcquireRef(new ComputePipeline(device, descriptor));
+}
+
+MaybeError ComputePipeline::Initialize() {
+    VkComputePipelineCreateInfo createInfo;
+    createInfo.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
+    createInfo.pNext = nullptr;
+    createInfo.flags = 0;
+    createInfo.layout = ToBackend(GetLayout())->GetHandle();
+    createInfo.basePipelineHandle = ::VK_NULL_HANDLE;
+    createInfo.basePipelineIndex = -1;
+
+    createInfo.stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
+    createInfo.stage.pNext = nullptr;
+    createInfo.stage.flags = 0;
+    createInfo.stage.stage = VK_SHADER_STAGE_COMPUTE_BIT;
+    // Generate a new VkShaderModule with BindingRemapper tint transform for each pipeline
+    const ProgrammableStage& computeStage = GetStage(SingleShaderStage::Compute);
+    ShaderModule* module = ToBackend(computeStage.module.Get());
+    PipelineLayout* layout = ToBackend(GetLayout());
+    const ShaderModule::Spirv* spirv;
+    DAWN_TRY_ASSIGN((std::tie(createInfo.stage.module, spirv)),
+                    module->GetHandleAndSpirv(computeStage.entryPoint.c_str(), layout));
+
+    createInfo.stage.pName = computeStage.entryPoint.c_str();
+
+    std::vector<OverridableConstantScalar> specializationDataEntries;
+    std::vector<VkSpecializationMapEntry> specializationMapEntries;
+    VkSpecializationInfo specializationInfo{};
+    createInfo.stage.pSpecializationInfo = GetVkSpecializationInfo(
+        computeStage, &specializationInfo, &specializationDataEntries, &specializationMapEntries);
+
+    Device* device = ToBackend(GetDevice());
+
+    PNextChainBuilder stageExtChain(&createInfo.stage);
+
+    VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroupSizeInfo = {};
+    uint32_t computeSubgroupSize = device->GetComputeSubgroupSize();
+    if (computeSubgroupSize != 0u) {
+        ASSERT(device->GetDeviceInfo().HasExt(DeviceExt::SubgroupSizeControl));
+        subgroupSizeInfo.requiredSubgroupSize = computeSubgroupSize;
+        stageExtChain.Add(
+            &subgroupSizeInfo,
+            VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT);
     }
 
-    MaybeError ComputePipeline::Initialize() {
-        VkComputePipelineCreateInfo createInfo;
-        createInfo.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
-        createInfo.pNext = nullptr;
-        createInfo.flags = 0;
-        createInfo.layout = ToBackend(GetLayout())->GetHandle();
-        createInfo.basePipelineHandle = ::VK_NULL_HANDLE;
-        createInfo.basePipelineIndex = -1;
+    // Record cache key information now since the createInfo is not stored.
+    GetCacheKey()
+        ->Record(createInfo, static_cast<const ComputePipeline*>(this)->GetLayout())
+        .RecordIterable(*spirv);
 
-        createInfo.stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
-        createInfo.stage.pNext = nullptr;
-        createInfo.stage.flags = 0;
-        createInfo.stage.stage = VK_SHADER_STAGE_COMPUTE_BIT;
-        // Generate a new VkShaderModule with BindingRemapper tint transform for each pipeline
-        const ProgrammableStage& computeStage = GetStage(SingleShaderStage::Compute);
-        ShaderModule* module = ToBackend(computeStage.module.Get());
-        PipelineLayout* layout = ToBackend(GetLayout());
-        const ShaderModule::Spirv* spirv;
-        DAWN_TRY_ASSIGN((std::tie(createInfo.stage.module, spirv)),
-                        module->GetHandleAndSpirv(computeStage.entryPoint.c_str(), layout));
+    DAWN_TRY(
+        CheckVkSuccess(device->fn.CreateComputePipelines(device->GetVkDevice(), ::VK_NULL_HANDLE, 1,
+                                                         &createInfo, nullptr, &*mHandle),
+                       "CreateComputePipeline"));
 
-        createInfo.stage.pName = computeStage.entryPoint.c_str();
+    SetLabelImpl();
 
-        std::vector<OverridableConstantScalar> specializationDataEntries;
-        std::vector<VkSpecializationMapEntry> specializationMapEntries;
-        VkSpecializationInfo specializationInfo{};
-        createInfo.stage.pSpecializationInfo =
-            GetVkSpecializationInfo(computeStage, &specializationInfo, &specializationDataEntries,
-                                    &specializationMapEntries);
+    return {};
+}
 
-        Device* device = ToBackend(GetDevice());
+void ComputePipeline::SetLabelImpl() {
+    SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_ComputePipeline", GetLabel());
+}
 
-        PNextChainBuilder stageExtChain(&createInfo.stage);
+ComputePipeline::~ComputePipeline() = default;
 
-        VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroupSizeInfo = {};
-        uint32_t computeSubgroupSize = device->GetComputeSubgroupSize();
-        if (computeSubgroupSize != 0u) {
-            ASSERT(device->GetDeviceInfo().HasExt(DeviceExt::SubgroupSizeControl));
-            subgroupSizeInfo.requiredSubgroupSize = computeSubgroupSize;
-            stageExtChain.Add(
-                &subgroupSizeInfo,
-                VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT);
-        }
+void ComputePipeline::DestroyImpl() {
+    ComputePipelineBase::DestroyImpl();
 
-        // Record cache key information now since the createInfo is not stored.
-        GetCacheKey()
-            ->Record(createInfo, static_cast<const ComputePipeline*>(this)->GetLayout())
-            .RecordIterable(*spirv);
-
-        DAWN_TRY(CheckVkSuccess(
-            device->fn.CreateComputePipelines(device->GetVkDevice(), ::VK_NULL_HANDLE, 1,
-                                              &createInfo, nullptr, &*mHandle),
-            "CreateComputePipeline"));
-
-        SetLabelImpl();
-
-        return {};
+    if (mHandle != VK_NULL_HANDLE) {
+        ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+        mHandle = VK_NULL_HANDLE;
     }
+}
 
-    void ComputePipeline::SetLabelImpl() {
-        SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_ComputePipeline", GetLabel());
-    }
+VkPipeline ComputePipeline::GetHandle() const {
+    return mHandle;
+}
 
-    ComputePipeline::~ComputePipeline() = default;
-
-    void ComputePipeline::DestroyImpl() {
-        ComputePipelineBase::DestroyImpl();
-
-        if (mHandle != VK_NULL_HANDLE) {
-            ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
-            mHandle = VK_NULL_HANDLE;
-        }
-    }
-
-    VkPipeline ComputePipeline::GetHandle() const {
-        return mHandle;
-    }
-
-    void ComputePipeline::InitializeAsync(Ref<ComputePipelineBase> computePipeline,
-                                          WGPUCreateComputePipelineAsyncCallback callback,
-                                          void* userdata) {
-        std::unique_ptr<CreateComputePipelineAsyncTask> asyncTask =
-            std::make_unique<CreateComputePipelineAsyncTask>(std::move(computePipeline), callback,
-                                                             userdata);
-        CreateComputePipelineAsyncTask::RunAsync(std::move(asyncTask));
-    }
+void ComputePipeline::InitializeAsync(Ref<ComputePipelineBase> computePipeline,
+                                      WGPUCreateComputePipelineAsyncCallback callback,
+                                      void* userdata) {
+    std::unique_ptr<CreateComputePipelineAsyncTask> asyncTask =
+        std::make_unique<CreateComputePipelineAsyncTask>(std::move(computePipeline), callback,
+                                                         userdata);
+    CreateComputePipelineAsyncTask::RunAsync(std::move(asyncTask));
+}
 
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/ComputePipelineVk.h b/src/dawn/native/vulkan/ComputePipelineVk.h
index c2b7d83..2159db8 100644
--- a/src/dawn/native/vulkan/ComputePipelineVk.h
+++ b/src/dawn/native/vulkan/ComputePipelineVk.h
@@ -22,31 +22,30 @@
 
 namespace dawn::native::vulkan {
 
-    class Device;
+class Device;
 
-    class ComputePipeline final : public ComputePipelineBase {
-      public:
-        static Ref<ComputePipeline> CreateUninitialized(
-            Device* device,
-            const ComputePipelineDescriptor* descriptor);
-        static void InitializeAsync(Ref<ComputePipelineBase> computePipeline,
-                                    WGPUCreateComputePipelineAsyncCallback callback,
-                                    void* userdata);
+class ComputePipeline final : public ComputePipelineBase {
+  public:
+    static Ref<ComputePipeline> CreateUninitialized(Device* device,
+                                                    const ComputePipelineDescriptor* descriptor);
+    static void InitializeAsync(Ref<ComputePipelineBase> computePipeline,
+                                WGPUCreateComputePipelineAsyncCallback callback,
+                                void* userdata);
 
-        VkPipeline GetHandle() const;
+    VkPipeline GetHandle() const;
 
-        MaybeError Initialize() override;
+    MaybeError Initialize() override;
 
-        // Dawn API
-        void SetLabelImpl() override;
+    // Dawn API
+    void SetLabelImpl() override;
 
-      private:
-        ~ComputePipeline() override;
-        void DestroyImpl() override;
-        using ComputePipelineBase::ComputePipelineBase;
+  private:
+    ~ComputePipeline() override;
+    void DestroyImpl() override;
+    using ComputePipelineBase::ComputePipelineBase;
 
-        VkPipeline mHandle = VK_NULL_HANDLE;
-    };
+    VkPipeline mHandle = VK_NULL_HANDLE;
+};
 
 }  // namespace dawn::native::vulkan
 
diff --git a/src/dawn/native/vulkan/DescriptorSetAllocation.h b/src/dawn/native/vulkan/DescriptorSetAllocation.h
index ffe543e..ad0c6f8 100644
--- a/src/dawn/native/vulkan/DescriptorSetAllocation.h
+++ b/src/dawn/native/vulkan/DescriptorSetAllocation.h
@@ -19,12 +19,12 @@
 
 namespace dawn::native::vulkan {
 
-    // Contains a descriptor set along with data necessary to track its allocation.
-    struct DescriptorSetAllocation {
-        VkDescriptorSet set = VK_NULL_HANDLE;
-        uint32_t poolIndex;
-        uint16_t setIndex;
-    };
+// Contains a descriptor set along with data necessary to track its allocation.
+struct DescriptorSetAllocation {
+    VkDescriptorSet set = VK_NULL_HANDLE;
+    uint32_t poolIndex;
+    uint16_t setIndex;
+};
 
 }  // namespace dawn::native::vulkan
 
diff --git a/src/dawn/native/vulkan/DescriptorSetAllocator.cpp b/src/dawn/native/vulkan/DescriptorSetAllocator.cpp
index 8063e58..422ff78 100644
--- a/src/dawn/native/vulkan/DescriptorSetAllocator.cpp
+++ b/src/dawn/native/vulkan/DescriptorSetAllocator.cpp
@@ -23,168 +23,167 @@
 
 namespace dawn::native::vulkan {
 
-    // TODO(enga): Figure out this value.
-    static constexpr uint32_t kMaxDescriptorsPerPool = 512;
+// TODO(enga): Figure out this value.
+static constexpr uint32_t kMaxDescriptorsPerPool = 512;
 
-    // static
-    Ref<DescriptorSetAllocator> DescriptorSetAllocator::Create(
-        BindGroupLayout* layout,
-        std::map<VkDescriptorType, uint32_t> descriptorCountPerType) {
-        return AcquireRef(new DescriptorSetAllocator(layout, descriptorCountPerType));
+// static
+Ref<DescriptorSetAllocator> DescriptorSetAllocator::Create(
+    BindGroupLayout* layout,
+    std::map<VkDescriptorType, uint32_t> descriptorCountPerType) {
+    return AcquireRef(new DescriptorSetAllocator(layout, descriptorCountPerType));
+}
+
+DescriptorSetAllocator::DescriptorSetAllocator(
+    BindGroupLayout* layout,
+    std::map<VkDescriptorType, uint32_t> descriptorCountPerType)
+    : ObjectBase(layout->GetDevice()), mLayout(layout) {
+    ASSERT(layout != nullptr);
+
+    // Compute the total number of descriptors for this layout.
+    uint32_t totalDescriptorCount = 0;
+    mPoolSizes.reserve(descriptorCountPerType.size());
+    for (const auto& [type, count] : descriptorCountPerType) {
+        ASSERT(count > 0);
+        totalDescriptorCount += count;
+        mPoolSizes.push_back(VkDescriptorPoolSize{type, count});
     }
 
-    DescriptorSetAllocator::DescriptorSetAllocator(
-        BindGroupLayout* layout,
-        std::map<VkDescriptorType, uint32_t> descriptorCountPerType)
-        : ObjectBase(layout->GetDevice()), mLayout(layout) {
-        ASSERT(layout != nullptr);
+    if (totalDescriptorCount == 0) {
+        // Vulkan requires that valid usage of vkCreateDescriptorPool must have a non-zero
+        // number of pools, each of which has non-zero descriptor counts.
+        // Since the descriptor set layout is empty, we should be able to allocate
+        // |kMaxDescriptorsPerPool| sets from this 1-sized descriptor pool.
+        // The type of this descriptor pool doesn't matter because it is never used.
+        mPoolSizes.push_back(VkDescriptorPoolSize{VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1});
+        mMaxSets = kMaxDescriptorsPerPool;
+    } else {
+        ASSERT(totalDescriptorCount <= kMaxBindingsPerPipelineLayout);
+        static_assert(kMaxBindingsPerPipelineLayout <= kMaxDescriptorsPerPool);
 
-        // Compute the total number of descriptors for this layout.
-        uint32_t totalDescriptorCount = 0;
-        mPoolSizes.reserve(descriptorCountPerType.size());
-        for (const auto& [type, count] : descriptorCountPerType) {
-            ASSERT(count > 0);
-            totalDescriptorCount += count;
-            mPoolSizes.push_back(VkDescriptorPoolSize{type, count});
-        }
+        // Compute the total number of descriptors sets that fits given the max.
+        mMaxSets = kMaxDescriptorsPerPool / totalDescriptorCount;
+        ASSERT(mMaxSets > 0);
 
-        if (totalDescriptorCount == 0) {
-            // Vulkan requires that valid usage of vkCreateDescriptorPool must have a non-zero
-            // number of pools, each of which has non-zero descriptor counts.
-            // Since the descriptor set layout is empty, we should be able to allocate
-            // |kMaxDescriptorsPerPool| sets from this 1-sized descriptor pool.
-            // The type of this descriptor pool doesn't matter because it is never used.
-            mPoolSizes.push_back(VkDescriptorPoolSize{VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1});
-            mMaxSets = kMaxDescriptorsPerPool;
-        } else {
-            ASSERT(totalDescriptorCount <= kMaxBindingsPerPipelineLayout);
-            static_assert(kMaxBindingsPerPipelineLayout <= kMaxDescriptorsPerPool);
-
-            // Compute the total number of descriptors sets that fits given the max.
-            mMaxSets = kMaxDescriptorsPerPool / totalDescriptorCount;
-            ASSERT(mMaxSets > 0);
-
-            // Grow the number of desciptors in the pool to fit the computed |mMaxSets|.
-            for (auto& poolSize : mPoolSizes) {
-                poolSize.descriptorCount *= mMaxSets;
-            }
+        // Grow the number of desciptors in the pool to fit the computed |mMaxSets|.
+        for (auto& poolSize : mPoolSizes) {
+            poolSize.descriptorCount *= mMaxSets;
         }
     }
+}
 
-    DescriptorSetAllocator::~DescriptorSetAllocator() {
-        for (auto& pool : mDescriptorPools) {
-            ASSERT(pool.freeSetIndices.size() == mMaxSets);
-            if (pool.vkPool != VK_NULL_HANDLE) {
-                Device* device = ToBackend(GetDevice());
-                device->GetFencedDeleter()->DeleteWhenUnused(pool.vkPool);
-            }
+DescriptorSetAllocator::~DescriptorSetAllocator() {
+    for (auto& pool : mDescriptorPools) {
+        ASSERT(pool.freeSetIndices.size() == mMaxSets);
+        if (pool.vkPool != VK_NULL_HANDLE) {
+            Device* device = ToBackend(GetDevice());
+            device->GetFencedDeleter()->DeleteWhenUnused(pool.vkPool);
         }
     }
+}
 
-    ResultOrError<DescriptorSetAllocation> DescriptorSetAllocator::Allocate() {
-        if (mAvailableDescriptorPoolIndices.empty()) {
-            DAWN_TRY(AllocateDescriptorPool());
-        }
-
-        ASSERT(!mAvailableDescriptorPoolIndices.empty());
-
-        const PoolIndex poolIndex = mAvailableDescriptorPoolIndices.back();
-        DescriptorPool* pool = &mDescriptorPools[poolIndex];
-
-        ASSERT(!pool->freeSetIndices.empty());
-
-        SetIndex setIndex = pool->freeSetIndices.back();
-        pool->freeSetIndices.pop_back();
-
-        if (pool->freeSetIndices.empty()) {
-            mAvailableDescriptorPoolIndices.pop_back();
-        }
-
-        return DescriptorSetAllocation{pool->sets[setIndex], poolIndex, setIndex};
+ResultOrError<DescriptorSetAllocation> DescriptorSetAllocator::Allocate() {
+    if (mAvailableDescriptorPoolIndices.empty()) {
+        DAWN_TRY(AllocateDescriptorPool());
     }
 
-    void DescriptorSetAllocator::Deallocate(DescriptorSetAllocation* allocationInfo) {
-        ASSERT(allocationInfo != nullptr);
-        ASSERT(allocationInfo->set != VK_NULL_HANDLE);
+    ASSERT(!mAvailableDescriptorPoolIndices.empty());
 
-        // We can't reuse the descriptor set right away because the Vulkan spec says in the
-        // documentation for vkCmdBindDescriptorSets that the set may be consumed any time between
-        // host execution of the command and the end of the draw/dispatch.
-        Device* device = ToBackend(GetDevice());
-        const ExecutionSerial serial = device->GetPendingCommandSerial();
-        mPendingDeallocations.Enqueue({allocationInfo->poolIndex, allocationInfo->setIndex},
-                                      serial);
+    const PoolIndex poolIndex = mAvailableDescriptorPoolIndices.back();
+    DescriptorPool* pool = &mDescriptorPools[poolIndex];
 
-        if (mLastDeallocationSerial != serial) {
-            device->EnqueueDeferredDeallocation(this);
-            mLastDeallocationSerial = serial;
-        }
+    ASSERT(!pool->freeSetIndices.empty());
 
-        // Clear the content of allocation so that use after frees are more visible.
-        *allocationInfo = {};
+    SetIndex setIndex = pool->freeSetIndices.back();
+    pool->freeSetIndices.pop_back();
+
+    if (pool->freeSetIndices.empty()) {
+        mAvailableDescriptorPoolIndices.pop_back();
     }
 
-    void DescriptorSetAllocator::FinishDeallocation(ExecutionSerial completedSerial) {
-        for (const Deallocation& dealloc : mPendingDeallocations.IterateUpTo(completedSerial)) {
-            ASSERT(dealloc.poolIndex < mDescriptorPools.size());
+    return DescriptorSetAllocation{pool->sets[setIndex], poolIndex, setIndex};
+}
 
-            auto& freeSetIndices = mDescriptorPools[dealloc.poolIndex].freeSetIndices;
-            if (freeSetIndices.empty()) {
-                mAvailableDescriptorPoolIndices.emplace_back(dealloc.poolIndex);
-            }
-            freeSetIndices.emplace_back(dealloc.setIndex);
-        }
-        mPendingDeallocations.ClearUpTo(completedSerial);
+void DescriptorSetAllocator::Deallocate(DescriptorSetAllocation* allocationInfo) {
+    ASSERT(allocationInfo != nullptr);
+    ASSERT(allocationInfo->set != VK_NULL_HANDLE);
+
+    // We can't reuse the descriptor set right away because the Vulkan spec says in the
+    // documentation for vkCmdBindDescriptorSets that the set may be consumed any time between
+    // host execution of the command and the end of the draw/dispatch.
+    Device* device = ToBackend(GetDevice());
+    const ExecutionSerial serial = device->GetPendingCommandSerial();
+    mPendingDeallocations.Enqueue({allocationInfo->poolIndex, allocationInfo->setIndex}, serial);
+
+    if (mLastDeallocationSerial != serial) {
+        device->EnqueueDeferredDeallocation(this);
+        mLastDeallocationSerial = serial;
     }
 
-    MaybeError DescriptorSetAllocator::AllocateDescriptorPool() {
-        VkDescriptorPoolCreateInfo createInfo;
-        createInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
-        createInfo.pNext = nullptr;
-        createInfo.flags = 0;
-        createInfo.maxSets = mMaxSets;
-        createInfo.poolSizeCount = static_cast<PoolIndex>(mPoolSizes.size());
-        createInfo.pPoolSizes = mPoolSizes.data();
+    // Clear the content of allocation so that use after frees are more visible.
+    *allocationInfo = {};
+}
 
-        Device* device = ToBackend(GetDevice());
+void DescriptorSetAllocator::FinishDeallocation(ExecutionSerial completedSerial) {
+    for (const Deallocation& dealloc : mPendingDeallocations.IterateUpTo(completedSerial)) {
+        ASSERT(dealloc.poolIndex < mDescriptorPools.size());
 
-        VkDescriptorPool descriptorPool;
-        DAWN_TRY(CheckVkSuccess(device->fn.CreateDescriptorPool(device->GetVkDevice(), &createInfo,
-                                                                nullptr, &*descriptorPool),
-                                "CreateDescriptorPool"));
-
-        std::vector<VkDescriptorSetLayout> layouts(mMaxSets, mLayout->GetHandle());
-
-        VkDescriptorSetAllocateInfo allocateInfo;
-        allocateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
-        allocateInfo.pNext = nullptr;
-        allocateInfo.descriptorPool = descriptorPool;
-        allocateInfo.descriptorSetCount = mMaxSets;
-        allocateInfo.pSetLayouts = AsVkArray(layouts.data());
-
-        std::vector<VkDescriptorSet> sets(mMaxSets);
-        MaybeError result =
-            CheckVkSuccess(device->fn.AllocateDescriptorSets(device->GetVkDevice(), &allocateInfo,
-                                                             AsVkArray(sets.data())),
-                           "AllocateDescriptorSets");
-        if (result.IsError()) {
-            // On an error we can destroy the pool immediately because no command references it.
-            device->fn.DestroyDescriptorPool(device->GetVkDevice(), descriptorPool, nullptr);
-            DAWN_TRY(std::move(result));
+        auto& freeSetIndices = mDescriptorPools[dealloc.poolIndex].freeSetIndices;
+        if (freeSetIndices.empty()) {
+            mAvailableDescriptorPoolIndices.emplace_back(dealloc.poolIndex);
         }
-
-        std::vector<SetIndex> freeSetIndices;
-        freeSetIndices.reserve(mMaxSets);
-
-        for (SetIndex i = 0; i < mMaxSets; ++i) {
-            freeSetIndices.push_back(i);
-        }
-
-        mAvailableDescriptorPoolIndices.push_back(mDescriptorPools.size());
-        mDescriptorPools.emplace_back(
-            DescriptorPool{descriptorPool, std::move(sets), std::move(freeSetIndices)});
-
-        return {};
+        freeSetIndices.emplace_back(dealloc.setIndex);
     }
+    mPendingDeallocations.ClearUpTo(completedSerial);
+}
+
+MaybeError DescriptorSetAllocator::AllocateDescriptorPool() {
+    VkDescriptorPoolCreateInfo createInfo;
+    createInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
+    createInfo.pNext = nullptr;
+    createInfo.flags = 0;
+    createInfo.maxSets = mMaxSets;
+    createInfo.poolSizeCount = static_cast<PoolIndex>(mPoolSizes.size());
+    createInfo.pPoolSizes = mPoolSizes.data();
+
+    Device* device = ToBackend(GetDevice());
+
+    VkDescriptorPool descriptorPool;
+    DAWN_TRY(CheckVkSuccess(device->fn.CreateDescriptorPool(device->GetVkDevice(), &createInfo,
+                                                            nullptr, &*descriptorPool),
+                            "CreateDescriptorPool"));
+
+    std::vector<VkDescriptorSetLayout> layouts(mMaxSets, mLayout->GetHandle());
+
+    VkDescriptorSetAllocateInfo allocateInfo;
+    allocateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
+    allocateInfo.pNext = nullptr;
+    allocateInfo.descriptorPool = descriptorPool;
+    allocateInfo.descriptorSetCount = mMaxSets;
+    allocateInfo.pSetLayouts = AsVkArray(layouts.data());
+
+    std::vector<VkDescriptorSet> sets(mMaxSets);
+    MaybeError result =
+        CheckVkSuccess(device->fn.AllocateDescriptorSets(device->GetVkDevice(), &allocateInfo,
+                                                         AsVkArray(sets.data())),
+                       "AllocateDescriptorSets");
+    if (result.IsError()) {
+        // On an error we can destroy the pool immediately because no command references it.
+        device->fn.DestroyDescriptorPool(device->GetVkDevice(), descriptorPool, nullptr);
+        DAWN_TRY(std::move(result));
+    }
+
+    std::vector<SetIndex> freeSetIndices;
+    freeSetIndices.reserve(mMaxSets);
+
+    for (SetIndex i = 0; i < mMaxSets; ++i) {
+        freeSetIndices.push_back(i);
+    }
+
+    mAvailableDescriptorPoolIndices.push_back(mDescriptorPools.size());
+    mDescriptorPools.emplace_back(
+        DescriptorPool{descriptorPool, std::move(sets), std::move(freeSetIndices)});
+
+    return {};
+}
 
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/DescriptorSetAllocator.h b/src/dawn/native/vulkan/DescriptorSetAllocator.h
index f0212c6..c9060fe 100644
--- a/src/dawn/native/vulkan/DescriptorSetAllocator.h
+++ b/src/dawn/native/vulkan/DescriptorSetAllocator.h
@@ -27,50 +27,50 @@
 
 namespace dawn::native::vulkan {
 
-    class BindGroupLayout;
+class BindGroupLayout;
 
-    class DescriptorSetAllocator : public ObjectBase {
-        using PoolIndex = uint32_t;
-        using SetIndex = uint16_t;
+class DescriptorSetAllocator : public ObjectBase {
+    using PoolIndex = uint32_t;
+    using SetIndex = uint16_t;
 
-      public:
-        static Ref<DescriptorSetAllocator> Create(
-            BindGroupLayout* layout,
-            std::map<VkDescriptorType, uint32_t> descriptorCountPerType);
+  public:
+    static Ref<DescriptorSetAllocator> Create(
+        BindGroupLayout* layout,
+        std::map<VkDescriptorType, uint32_t> descriptorCountPerType);
 
-        ResultOrError<DescriptorSetAllocation> Allocate();
-        void Deallocate(DescriptorSetAllocation* allocationInfo);
-        void FinishDeallocation(ExecutionSerial completedSerial);
+    ResultOrError<DescriptorSetAllocation> Allocate();
+    void Deallocate(DescriptorSetAllocation* allocationInfo);
+    void FinishDeallocation(ExecutionSerial completedSerial);
 
-      private:
-        DescriptorSetAllocator(BindGroupLayout* layout,
-                               std::map<VkDescriptorType, uint32_t> descriptorCountPerType);
-        ~DescriptorSetAllocator();
+  private:
+    DescriptorSetAllocator(BindGroupLayout* layout,
+                           std::map<VkDescriptorType, uint32_t> descriptorCountPerType);
+    ~DescriptorSetAllocator();
 
-        MaybeError AllocateDescriptorPool();
+    MaybeError AllocateDescriptorPool();
 
-        BindGroupLayout* mLayout;
+    BindGroupLayout* mLayout;
 
-        std::vector<VkDescriptorPoolSize> mPoolSizes;
-        SetIndex mMaxSets;
+    std::vector<VkDescriptorPoolSize> mPoolSizes;
+    SetIndex mMaxSets;
 
-        struct DescriptorPool {
-            VkDescriptorPool vkPool;
-            std::vector<VkDescriptorSet> sets;
-            std::vector<SetIndex> freeSetIndices;
-        };
-
-        std::vector<PoolIndex> mAvailableDescriptorPoolIndices;
-        std::vector<DescriptorPool> mDescriptorPools;
-
-        struct Deallocation {
-            PoolIndex poolIndex;
-            SetIndex setIndex;
-        };
-        SerialQueue<ExecutionSerial, Deallocation> mPendingDeallocations;
-        ExecutionSerial mLastDeallocationSerial = ExecutionSerial(0);
+    struct DescriptorPool {
+        VkDescriptorPool vkPool;
+        std::vector<VkDescriptorSet> sets;
+        std::vector<SetIndex> freeSetIndices;
     };
 
+    std::vector<PoolIndex> mAvailableDescriptorPoolIndices;
+    std::vector<DescriptorPool> mDescriptorPools;
+
+    struct Deallocation {
+        PoolIndex poolIndex;
+        SetIndex setIndex;
+    };
+    SerialQueue<ExecutionSerial, Deallocation> mPendingDeallocations;
+    ExecutionSerial mLastDeallocationSerial = ExecutionSerial(0);
+};
+
 }  // namespace dawn::native::vulkan
 
 #endif  // SRC_DAWN_NATIVE_VULKAN_DESCRIPTORSETALLOCATOR_H_
diff --git a/src/dawn/native/vulkan/DeviceVk.cpp b/src/dawn/native/vulkan/DeviceVk.cpp
index c3ded04..4e54253 100644
--- a/src/dawn/native/vulkan/DeviceVk.cpp
+++ b/src/dawn/native/vulkan/DeviceVk.cpp
@@ -44,1016 +44,1002 @@
 
 namespace dawn::native::vulkan {
 
-    // static
-    ResultOrError<Ref<Device>> Device::Create(Adapter* adapter,
-                                              const DeviceDescriptor* descriptor) {
-        Ref<Device> device = AcquireRef(new Device(adapter, descriptor));
-        DAWN_TRY(device->Initialize(descriptor));
-        return device;
+// static
+ResultOrError<Ref<Device>> Device::Create(Adapter* adapter, const DeviceDescriptor* descriptor) {
+    Ref<Device> device = AcquireRef(new Device(adapter, descriptor));
+    DAWN_TRY(device->Initialize(descriptor));
+    return device;
+}
+
+Device::Device(Adapter* adapter, const DeviceDescriptor* descriptor)
+    : DeviceBase(adapter, descriptor) {
+    InitTogglesFromDriver();
+}
+
+MaybeError Device::Initialize(const DeviceDescriptor* descriptor) {
+    // Copy the adapter's device info to the device so that we can change the "knobs"
+    mDeviceInfo = ToBackend(GetAdapter())->GetDeviceInfo();
+
+    // Initialize the "instance" procs of our local function table.
+    VulkanFunctions* functions = GetMutableFunctions();
+    *functions = ToBackend(GetAdapter())->GetVulkanInstance()->GetFunctions();
+
+    // Two things are crucial if device initialization fails: the function pointers to destroy
+    // objects, and the fence deleter that calls these functions. Do not do anything before
+    // these two are set up, so that a failed initialization doesn't cause a crash in
+    // DestroyImpl()
+    {
+        VkPhysicalDevice physicalDevice = ToBackend(GetAdapter())->GetPhysicalDevice();
+
+        VulkanDeviceKnobs usedDeviceKnobs = {};
+        DAWN_TRY_ASSIGN(usedDeviceKnobs, CreateDevice(physicalDevice));
+        *static_cast<VulkanDeviceKnobs*>(&mDeviceInfo) = usedDeviceKnobs;
+
+        DAWN_TRY(functions->LoadDeviceProcs(mVkDevice, mDeviceInfo));
+
+        // The queue can be loaded before the fenced deleter because their lifetime is tied to
+        // the device.
+        GatherQueueFromDevice();
+
+        mDeleter = std::make_unique<FencedDeleter>(this);
     }
 
-    Device::Device(Adapter* adapter, const DeviceDescriptor* descriptor)
-        : DeviceBase(adapter, descriptor) {
-        InitTogglesFromDriver();
+    mRenderPassCache = std::make_unique<RenderPassCache>(this);
+    mResourceMemoryAllocator = std::make_unique<ResourceMemoryAllocator>(this);
+
+    mExternalMemoryService = std::make_unique<external_memory::Service>(this);
+    mExternalSemaphoreService = std::make_unique<external_semaphore::Service>(this);
+
+    DAWN_TRY(PrepareRecordingContext());
+
+    // The environment can request to various options for depth-stencil formats that could be
+    // unavailable. Override the decision if it is not applicable.
+    ApplyDepthStencilFormatToggles();
+
+    // The environment can only request to use VK_KHR_zero_initialize_workgroup_memory when the
+    // extension is available. Override the decision if it is no applicable.
+    ApplyUseZeroInitializeWorkgroupMemoryExtensionToggle();
+
+    SetLabelImpl();
+
+    return DeviceBase::Initialize(Queue::Create(this, &descriptor->defaultQueue));
+}
+
+Device::~Device() {
+    Destroy();
+}
+
+ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
+    const BindGroupDescriptor* descriptor) {
+    return BindGroup::Create(this, descriptor);
+}
+ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
+    const BindGroupLayoutDescriptor* descriptor,
+    PipelineCompatibilityToken pipelineCompatibilityToken) {
+    return BindGroupLayout::Create(this, descriptor, pipelineCompatibilityToken);
+}
+ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
+    return Buffer::Create(this, descriptor);
+}
+ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
+    CommandEncoder* encoder,
+    const CommandBufferDescriptor* descriptor) {
+    return CommandBuffer::Create(encoder, descriptor);
+}
+Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
+    const ComputePipelineDescriptor* descriptor) {
+    return ComputePipeline::CreateUninitialized(this, descriptor);
+}
+ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
+    const PipelineLayoutDescriptor* descriptor) {
+    return PipelineLayout::Create(this, descriptor);
+}
+ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(const QuerySetDescriptor* descriptor) {
+    return QuerySet::Create(this, descriptor);
+}
+Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
+    const RenderPipelineDescriptor* descriptor) {
+    return RenderPipeline::CreateUninitialized(this, descriptor);
+}
+ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
+    return Sampler::Create(this, descriptor);
+}
+ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
+    const ShaderModuleDescriptor* descriptor,
+    ShaderModuleParseResult* parseResult) {
+    return ShaderModule::Create(this, descriptor, parseResult);
+}
+ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
+    const SwapChainDescriptor* descriptor) {
+    return OldSwapChain::Create(this, descriptor);
+}
+ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
+    Surface* surface,
+    NewSwapChainBase* previousSwapChain,
+    const SwapChainDescriptor* descriptor) {
+    return SwapChain::Create(this, surface, previousSwapChain, descriptor);
+}
+ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
+    return Texture::Create(this, descriptor);
+}
+ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
+    TextureBase* texture,
+    const TextureViewDescriptor* descriptor) {
+    return TextureView::Create(texture, descriptor);
+}
+void Device::InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
+                                                WGPUCreateComputePipelineAsyncCallback callback,
+                                                void* userdata) {
+    ComputePipeline::InitializeAsync(std::move(computePipeline), callback, userdata);
+}
+void Device::InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+                                               WGPUCreateRenderPipelineAsyncCallback callback,
+                                               void* userdata) {
+    RenderPipeline::InitializeAsync(std::move(renderPipeline), callback, userdata);
+}
+
+MaybeError Device::TickImpl() {
+    RecycleCompletedCommands();
+
+    ExecutionSerial completedSerial = GetCompletedCommandSerial();
+
+    for (Ref<DescriptorSetAllocator>& allocator :
+         mDescriptorAllocatorsPendingDeallocation.IterateUpTo(completedSerial)) {
+        allocator->FinishDeallocation(completedSerial);
     }
 
-    MaybeError Device::Initialize(const DeviceDescriptor* descriptor) {
-        // Copy the adapter's device info to the device so that we can change the "knobs"
-        mDeviceInfo = ToBackend(GetAdapter())->GetDeviceInfo();
+    mResourceMemoryAllocator->Tick(completedSerial);
+    mDeleter->Tick(completedSerial);
+    mDescriptorAllocatorsPendingDeallocation.ClearUpTo(completedSerial);
 
-        // Initialize the "instance" procs of our local function table.
-        VulkanFunctions* functions = GetMutableFunctions();
-        *functions = ToBackend(GetAdapter())->GetVulkanInstance()->GetFunctions();
-
-        // Two things are crucial if device initialization fails: the function pointers to destroy
-        // objects, and the fence deleter that calls these functions. Do not do anything before
-        // these two are set up, so that a failed initialization doesn't cause a crash in
-        // DestroyImpl()
-        {
-            VkPhysicalDevice physicalDevice = ToBackend(GetAdapter())->GetPhysicalDevice();
-
-            VulkanDeviceKnobs usedDeviceKnobs = {};
-            DAWN_TRY_ASSIGN(usedDeviceKnobs, CreateDevice(physicalDevice));
-            *static_cast<VulkanDeviceKnobs*>(&mDeviceInfo) = usedDeviceKnobs;
-
-            DAWN_TRY(functions->LoadDeviceProcs(mVkDevice, mDeviceInfo));
-
-            // The queue can be loaded before the fenced deleter because their lifetime is tied to
-            // the device.
-            GatherQueueFromDevice();
-
-            mDeleter = std::make_unique<FencedDeleter>(this);
-        }
-
-        mRenderPassCache = std::make_unique<RenderPassCache>(this);
-        mResourceMemoryAllocator = std::make_unique<ResourceMemoryAllocator>(this);
-
-        mExternalMemoryService = std::make_unique<external_memory::Service>(this);
-        mExternalSemaphoreService = std::make_unique<external_semaphore::Service>(this);
-
-        DAWN_TRY(PrepareRecordingContext());
-
-        // The environment can request to various options for depth-stencil formats that could be
-        // unavailable. Override the decision if it is not applicable.
-        ApplyDepthStencilFormatToggles();
-
-        // The environment can only request to use VK_KHR_zero_initialize_workgroup_memory when the
-        // extension is available. Override the decision if it is no applicable.
-        ApplyUseZeroInitializeWorkgroupMemoryExtensionToggle();
-
-        SetLabelImpl();
-
-        return DeviceBase::Initialize(Queue::Create(this, &descriptor->defaultQueue));
+    if (mRecordingContext.used) {
+        DAWN_TRY(SubmitPendingCommands());
     }
 
-    Device::~Device() {
-        Destroy();
-    }
+    return {};
+}
 
-    ResultOrError<Ref<BindGroupBase>> Device::CreateBindGroupImpl(
-        const BindGroupDescriptor* descriptor) {
-        return BindGroup::Create(this, descriptor);
-    }
-    ResultOrError<Ref<BindGroupLayoutBase>> Device::CreateBindGroupLayoutImpl(
-        const BindGroupLayoutDescriptor* descriptor,
-        PipelineCompatibilityToken pipelineCompatibilityToken) {
-        return BindGroupLayout::Create(this, descriptor, pipelineCompatibilityToken);
-    }
-    ResultOrError<Ref<BufferBase>> Device::CreateBufferImpl(const BufferDescriptor* descriptor) {
-        return Buffer::Create(this, descriptor);
-    }
-    ResultOrError<Ref<CommandBufferBase>> Device::CreateCommandBuffer(
-        CommandEncoder* encoder,
-        const CommandBufferDescriptor* descriptor) {
-        return CommandBuffer::Create(encoder, descriptor);
-    }
-    Ref<ComputePipelineBase> Device::CreateUninitializedComputePipelineImpl(
-        const ComputePipelineDescriptor* descriptor) {
-        return ComputePipeline::CreateUninitialized(this, descriptor);
-    }
-    ResultOrError<Ref<PipelineLayoutBase>> Device::CreatePipelineLayoutImpl(
-        const PipelineLayoutDescriptor* descriptor) {
-        return PipelineLayout::Create(this, descriptor);
-    }
-    ResultOrError<Ref<QuerySetBase>> Device::CreateQuerySetImpl(
-        const QuerySetDescriptor* descriptor) {
-        return QuerySet::Create(this, descriptor);
-    }
-    Ref<RenderPipelineBase> Device::CreateUninitializedRenderPipelineImpl(
-        const RenderPipelineDescriptor* descriptor) {
-        return RenderPipeline::CreateUninitialized(this, descriptor);
-    }
-    ResultOrError<Ref<SamplerBase>> Device::CreateSamplerImpl(const SamplerDescriptor* descriptor) {
-        return Sampler::Create(this, descriptor);
-    }
-    ResultOrError<Ref<ShaderModuleBase>> Device::CreateShaderModuleImpl(
-        const ShaderModuleDescriptor* descriptor,
-        ShaderModuleParseResult* parseResult) {
-        return ShaderModule::Create(this, descriptor, parseResult);
-    }
-    ResultOrError<Ref<SwapChainBase>> Device::CreateSwapChainImpl(
-        const SwapChainDescriptor* descriptor) {
-        return OldSwapChain::Create(this, descriptor);
-    }
-    ResultOrError<Ref<NewSwapChainBase>> Device::CreateSwapChainImpl(
-        Surface* surface,
-        NewSwapChainBase* previousSwapChain,
-        const SwapChainDescriptor* descriptor) {
-        return SwapChain::Create(this, surface, previousSwapChain, descriptor);
-    }
-    ResultOrError<Ref<TextureBase>> Device::CreateTextureImpl(const TextureDescriptor* descriptor) {
-        return Texture::Create(this, descriptor);
-    }
-    ResultOrError<Ref<TextureViewBase>> Device::CreateTextureViewImpl(
-        TextureBase* texture,
-        const TextureViewDescriptor* descriptor) {
-        return TextureView::Create(texture, descriptor);
-    }
-    void Device::InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
-                                                    WGPUCreateComputePipelineAsyncCallback callback,
-                                                    void* userdata) {
-        ComputePipeline::InitializeAsync(std::move(computePipeline), callback, userdata);
-    }
-    void Device::InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
-                                                   WGPUCreateRenderPipelineAsyncCallback callback,
-                                                   void* userdata) {
-        RenderPipeline::InitializeAsync(std::move(renderPipeline), callback, userdata);
-    }
+VkInstance Device::GetVkInstance() const {
+    return ToBackend(GetAdapter())->GetVulkanInstance()->GetVkInstance();
+}
+const VulkanDeviceInfo& Device::GetDeviceInfo() const {
+    return mDeviceInfo;
+}
 
-    MaybeError Device::TickImpl() {
-        RecycleCompletedCommands();
+const VulkanGlobalInfo& Device::GetGlobalInfo() const {
+    return ToBackend(GetAdapter())->GetVulkanInstance()->GetGlobalInfo();
+}
 
-        ExecutionSerial completedSerial = GetCompletedCommandSerial();
+VkDevice Device::GetVkDevice() const {
+    return mVkDevice;
+}
 
-        for (Ref<DescriptorSetAllocator>& allocator :
-             mDescriptorAllocatorsPendingDeallocation.IterateUpTo(completedSerial)) {
-            allocator->FinishDeallocation(completedSerial);
-        }
+uint32_t Device::GetGraphicsQueueFamily() const {
+    return mQueueFamily;
+}
 
-        mResourceMemoryAllocator->Tick(completedSerial);
-        mDeleter->Tick(completedSerial);
-        mDescriptorAllocatorsPendingDeallocation.ClearUpTo(completedSerial);
+VkQueue Device::GetQueue() const {
+    return mQueue;
+}
 
-        if (mRecordingContext.used) {
-            DAWN_TRY(SubmitPendingCommands());
-        }
+FencedDeleter* Device::GetFencedDeleter() const {
+    return mDeleter.get();
+}
 
+RenderPassCache* Device::GetRenderPassCache() const {
+    return mRenderPassCache.get();
+}
+
+ResourceMemoryAllocator* Device::GetResourceMemoryAllocator() const {
+    return mResourceMemoryAllocator.get();
+}
+
+void Device::EnqueueDeferredDeallocation(DescriptorSetAllocator* allocator) {
+    mDescriptorAllocatorsPendingDeallocation.Enqueue(allocator, GetPendingCommandSerial());
+}
+
+CommandRecordingContext* Device::GetPendingRecordingContext() {
+    ASSERT(mRecordingContext.commandBuffer != VK_NULL_HANDLE);
+    mRecordingContext.used = true;
+    return &mRecordingContext;
+}
+
+MaybeError Device::SubmitPendingCommands() {
+    if (!mRecordingContext.used) {
         return {};
     }
 
-    VkInstance Device::GetVkInstance() const {
-        return ToBackend(GetAdapter())->GetVulkanInstance()->GetVkInstance();
+    DAWN_TRY(
+        CheckVkSuccess(fn.EndCommandBuffer(mRecordingContext.commandBuffer), "vkEndCommandBuffer"));
+
+    std::vector<VkPipelineStageFlags> dstStageMasks(mRecordingContext.waitSemaphores.size(),
+                                                    VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
+
+    VkSubmitInfo submitInfo;
+    submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
+    submitInfo.pNext = nullptr;
+    submitInfo.waitSemaphoreCount = static_cast<uint32_t>(mRecordingContext.waitSemaphores.size());
+    submitInfo.pWaitSemaphores = AsVkArray(mRecordingContext.waitSemaphores.data());
+    submitInfo.pWaitDstStageMask = dstStageMasks.data();
+    submitInfo.commandBufferCount = 1;
+    submitInfo.pCommandBuffers = &mRecordingContext.commandBuffer;
+    submitInfo.signalSemaphoreCount =
+        static_cast<uint32_t>(mRecordingContext.signalSemaphores.size());
+    submitInfo.pSignalSemaphores = AsVkArray(mRecordingContext.signalSemaphores.data());
+
+    VkFence fence = VK_NULL_HANDLE;
+    DAWN_TRY_ASSIGN(fence, GetUnusedFence());
+    DAWN_TRY_WITH_CLEANUP(
+        CheckVkSuccess(fn.QueueSubmit(mQueue, 1, &submitInfo, fence), "vkQueueSubmit"), {
+            // If submitting to the queue fails, move the fence back into the unused fence
+            // list, as if it were never acquired. Not doing so would leak the fence since
+            // it would be neither in the unused list nor in the in-flight list.
+            mUnusedFences.push_back(fence);
+        });
+
+    // Enqueue the semaphores before incrementing the serial, so that they can be deleted as
+    // soon as the current submission is finished.
+    for (VkSemaphore semaphore : mRecordingContext.waitSemaphores) {
+        mDeleter->DeleteWhenUnused(semaphore);
     }
-    const VulkanDeviceInfo& Device::GetDeviceInfo() const {
-        return mDeviceInfo;
+    for (VkSemaphore semaphore : mRecordingContext.signalSemaphores) {
+        mDeleter->DeleteWhenUnused(semaphore);
     }
 
-    const VulkanGlobalInfo& Device::GetGlobalInfo() const {
-        return ToBackend(GetAdapter())->GetVulkanInstance()->GetGlobalInfo();
-    }
+    IncrementLastSubmittedCommandSerial();
+    ExecutionSerial lastSubmittedSerial = GetLastSubmittedCommandSerial();
+    mFencesInFlight.emplace(fence, lastSubmittedSerial);
 
-    VkDevice Device::GetVkDevice() const {
-        return mVkDevice;
-    }
+    CommandPoolAndBuffer submittedCommands = {mRecordingContext.commandPool,
+                                              mRecordingContext.commandBuffer};
+    mCommandsInFlight.Enqueue(submittedCommands, lastSubmittedSerial);
+    mRecordingContext = CommandRecordingContext();
+    DAWN_TRY(PrepareRecordingContext());
 
-    uint32_t Device::GetGraphicsQueueFamily() const {
-        return mQueueFamily;
-    }
+    return {};
+}
 
-    VkQueue Device::GetQueue() const {
-        return mQueue;
-    }
+ResultOrError<VulkanDeviceKnobs> Device::CreateDevice(VkPhysicalDevice physicalDevice) {
+    VulkanDeviceKnobs usedKnobs = {};
 
-    FencedDeleter* Device::GetFencedDeleter() const {
-        return mDeleter.get();
-    }
+    // Default to asking for all avilable known extensions.
+    usedKnobs.extensions = mDeviceInfo.extensions;
 
-    RenderPassCache* Device::GetRenderPassCache() const {
-        return mRenderPassCache.get();
-    }
+    // However only request the extensions that haven't been promoted in the device's apiVersion
+    std::vector<const char*> extensionNames;
+    for (DeviceExt ext : IterateBitSet(usedKnobs.extensions)) {
+        const DeviceExtInfo& info = GetDeviceExtInfo(ext);
 
-    ResourceMemoryAllocator* Device::GetResourceMemoryAllocator() const {
-        return mResourceMemoryAllocator.get();
-    }
-
-    void Device::EnqueueDeferredDeallocation(DescriptorSetAllocator* allocator) {
-        mDescriptorAllocatorsPendingDeallocation.Enqueue(allocator, GetPendingCommandSerial());
-    }
-
-    CommandRecordingContext* Device::GetPendingRecordingContext() {
-        ASSERT(mRecordingContext.commandBuffer != VK_NULL_HANDLE);
-        mRecordingContext.used = true;
-        return &mRecordingContext;
-    }
-
-    MaybeError Device::SubmitPendingCommands() {
-        if (!mRecordingContext.used) {
-            return {};
+        if (info.versionPromoted > mDeviceInfo.properties.apiVersion) {
+            extensionNames.push_back(info.name);
         }
-
-        DAWN_TRY(CheckVkSuccess(fn.EndCommandBuffer(mRecordingContext.commandBuffer),
-                                "vkEndCommandBuffer"));
-
-        std::vector<VkPipelineStageFlags> dstStageMasks(mRecordingContext.waitSemaphores.size(),
-                                                        VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
-
-        VkSubmitInfo submitInfo;
-        submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
-        submitInfo.pNext = nullptr;
-        submitInfo.waitSemaphoreCount =
-            static_cast<uint32_t>(mRecordingContext.waitSemaphores.size());
-        submitInfo.pWaitSemaphores = AsVkArray(mRecordingContext.waitSemaphores.data());
-        submitInfo.pWaitDstStageMask = dstStageMasks.data();
-        submitInfo.commandBufferCount = 1;
-        submitInfo.pCommandBuffers = &mRecordingContext.commandBuffer;
-        submitInfo.signalSemaphoreCount =
-            static_cast<uint32_t>(mRecordingContext.signalSemaphores.size());
-        submitInfo.pSignalSemaphores = AsVkArray(mRecordingContext.signalSemaphores.data());
-
-        VkFence fence = VK_NULL_HANDLE;
-        DAWN_TRY_ASSIGN(fence, GetUnusedFence());
-        DAWN_TRY_WITH_CLEANUP(
-            CheckVkSuccess(fn.QueueSubmit(mQueue, 1, &submitInfo, fence), "vkQueueSubmit"), {
-                // If submitting to the queue fails, move the fence back into the unused fence
-                // list, as if it were never acquired. Not doing so would leak the fence since
-                // it would be neither in the unused list nor in the in-flight list.
-                mUnusedFences.push_back(fence);
-            });
-
-        // Enqueue the semaphores before incrementing the serial, so that they can be deleted as
-        // soon as the current submission is finished.
-        for (VkSemaphore semaphore : mRecordingContext.waitSemaphores) {
-            mDeleter->DeleteWhenUnused(semaphore);
-        }
-        for (VkSemaphore semaphore : mRecordingContext.signalSemaphores) {
-            mDeleter->DeleteWhenUnused(semaphore);
-        }
-
-        IncrementLastSubmittedCommandSerial();
-        ExecutionSerial lastSubmittedSerial = GetLastSubmittedCommandSerial();
-        mFencesInFlight.emplace(fence, lastSubmittedSerial);
-
-        CommandPoolAndBuffer submittedCommands = {mRecordingContext.commandPool,
-                                                  mRecordingContext.commandBuffer};
-        mCommandsInFlight.Enqueue(submittedCommands, lastSubmittedSerial);
-        mRecordingContext = CommandRecordingContext();
-        DAWN_TRY(PrepareRecordingContext());
-
-        return {};
     }
 
-    ResultOrError<VulkanDeviceKnobs> Device::CreateDevice(VkPhysicalDevice physicalDevice) {
-        VulkanDeviceKnobs usedKnobs = {};
+    // Some device features can only be enabled using a VkPhysicalDeviceFeatures2 struct, which
+    // is supported by the VK_EXT_get_physical_properties2 instance extension, which was
+    // promoted as a core API in Vulkan 1.1.
+    //
+    // Prepare a VkPhysicalDeviceFeatures2 struct for this use case, it will only be populated
+    // if HasExt(DeviceExt::GetPhysicalDeviceProperties2) is true.
+    VkPhysicalDeviceFeatures2 features2 = {};
+    features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
+    features2.pNext = nullptr;
+    PNextChainBuilder featuresChain(&features2);
 
-        // Default to asking for all avilable known extensions.
-        usedKnobs.extensions = mDeviceInfo.extensions;
+    // Required for core WebGPU features.
+    usedKnobs.features.depthBiasClamp = VK_TRUE;
+    usedKnobs.features.fragmentStoresAndAtomics = VK_TRUE;
+    usedKnobs.features.fullDrawIndexUint32 = VK_TRUE;
+    usedKnobs.features.imageCubeArray = VK_TRUE;
+    usedKnobs.features.independentBlend = VK_TRUE;
+    usedKnobs.features.sampleRateShading = VK_TRUE;
 
-        // However only request the extensions that haven't been promoted in the device's apiVersion
-        std::vector<const char*> extensionNames;
-        for (DeviceExt ext : IterateBitSet(usedKnobs.extensions)) {
-            const DeviceExtInfo& info = GetDeviceExtInfo(ext);
+    if (IsRobustnessEnabled()) {
+        usedKnobs.features.robustBufferAccess = VK_TRUE;
+    }
 
-            if (info.versionPromoted > mDeviceInfo.properties.apiVersion) {
-                extensionNames.push_back(info.name);
+    if (mDeviceInfo.HasExt(DeviceExt::SubgroupSizeControl)) {
+        ASSERT(usedKnobs.HasExt(DeviceExt::SubgroupSizeControl));
+
+        // Always request all the features from VK_EXT_subgroup_size_control when available.
+        usedKnobs.subgroupSizeControlFeatures = mDeviceInfo.subgroupSizeControlFeatures;
+        featuresChain.Add(&usedKnobs.subgroupSizeControlFeatures);
+
+        mComputeSubgroupSize = FindComputeSubgroupSize();
+    }
+
+    if (mDeviceInfo.HasExt(DeviceExt::ZeroInitializeWorkgroupMemory)) {
+        ASSERT(usedKnobs.HasExt(DeviceExt::ZeroInitializeWorkgroupMemory));
+
+        usedKnobs.zeroInitializeWorkgroupMemoryFeatures.sType =
+            VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES_KHR;
+
+        // Always allow initializing workgroup memory with OpConstantNull when available.
+        // Note that the driver still won't initialize workgroup memory unless the workgroup
+        // variable is explicitly initialized with OpConstantNull.
+        usedKnobs.zeroInitializeWorkgroupMemoryFeatures.shaderZeroInitializeWorkgroupMemory =
+            VK_TRUE;
+        featuresChain.Add(&usedKnobs.zeroInitializeWorkgroupMemoryFeatures);
+    }
+
+    if (mDeviceInfo.features.samplerAnisotropy == VK_TRUE) {
+        usedKnobs.features.samplerAnisotropy = VK_TRUE;
+    }
+
+    if (IsFeatureEnabled(Feature::TextureCompressionBC)) {
+        ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.textureCompressionBC == VK_TRUE);
+        usedKnobs.features.textureCompressionBC = VK_TRUE;
+    }
+
+    if (IsFeatureEnabled(Feature::TextureCompressionETC2)) {
+        ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.textureCompressionETC2 == VK_TRUE);
+        usedKnobs.features.textureCompressionETC2 = VK_TRUE;
+    }
+
+    if (IsFeatureEnabled(Feature::TextureCompressionASTC)) {
+        ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.textureCompressionASTC_LDR ==
+               VK_TRUE);
+        usedKnobs.features.textureCompressionASTC_LDR = VK_TRUE;
+    }
+
+    if (IsFeatureEnabled(Feature::PipelineStatisticsQuery)) {
+        ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.pipelineStatisticsQuery ==
+               VK_TRUE);
+        usedKnobs.features.pipelineStatisticsQuery = VK_TRUE;
+    }
+
+    if (IsFeatureEnabled(Feature::ShaderFloat16)) {
+        const VulkanDeviceInfo& deviceInfo = ToBackend(GetAdapter())->GetDeviceInfo();
+        ASSERT(deviceInfo.HasExt(DeviceExt::ShaderFloat16Int8) &&
+               deviceInfo.shaderFloat16Int8Features.shaderFloat16 == VK_TRUE &&
+               deviceInfo.HasExt(DeviceExt::_16BitStorage) &&
+               deviceInfo._16BitStorageFeatures.storageBuffer16BitAccess == VK_TRUE &&
+               deviceInfo._16BitStorageFeatures.uniformAndStorageBuffer16BitAccess == VK_TRUE);
+
+        usedKnobs.shaderFloat16Int8Features.shaderFloat16 = VK_TRUE;
+        usedKnobs._16BitStorageFeatures.storageBuffer16BitAccess = VK_TRUE;
+        usedKnobs._16BitStorageFeatures.uniformAndStorageBuffer16BitAccess = VK_TRUE;
+
+        featuresChain.Add(&usedKnobs.shaderFloat16Int8Features,
+                          VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR);
+        featuresChain.Add(&usedKnobs._16BitStorageFeatures,
+                          VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES);
+    }
+
+    if (IsFeatureEnabled(Feature::DepthClamping)) {
+        ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.depthClamp == VK_TRUE);
+        usedKnobs.features.depthClamp = VK_TRUE;
+    }
+
+    // Find a universal queue family
+    {
+        // Note that GRAPHICS and COMPUTE imply TRANSFER so we don't need to check for it.
+        constexpr uint32_t kUniversalFlags = VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT;
+        int universalQueueFamily = -1;
+        for (unsigned int i = 0; i < mDeviceInfo.queueFamilies.size(); ++i) {
+            if ((mDeviceInfo.queueFamilies[i].queueFlags & kUniversalFlags) == kUniversalFlags) {
+                universalQueueFamily = i;
+                break;
             }
         }
 
-        // Some device features can only be enabled using a VkPhysicalDeviceFeatures2 struct, which
-        // is supported by the VK_EXT_get_physical_properties2 instance extension, which was
-        // promoted as a core API in Vulkan 1.1.
-        //
-        // Prepare a VkPhysicalDeviceFeatures2 struct for this use case, it will only be populated
-        // if HasExt(DeviceExt::GetPhysicalDeviceProperties2) is true.
-        VkPhysicalDeviceFeatures2 features2 = {};
-        features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
-        features2.pNext = nullptr;
-        PNextChainBuilder featuresChain(&features2);
-
-        // Required for core WebGPU features.
-        usedKnobs.features.depthBiasClamp = VK_TRUE;
-        usedKnobs.features.fragmentStoresAndAtomics = VK_TRUE;
-        usedKnobs.features.fullDrawIndexUint32 = VK_TRUE;
-        usedKnobs.features.imageCubeArray = VK_TRUE;
-        usedKnobs.features.independentBlend = VK_TRUE;
-        usedKnobs.features.sampleRateShading = VK_TRUE;
-
-        if (IsRobustnessEnabled()) {
-            usedKnobs.features.robustBufferAccess = VK_TRUE;
+        if (universalQueueFamily == -1) {
+            return DAWN_INTERNAL_ERROR("No universal queue family");
         }
-
-        if (mDeviceInfo.HasExt(DeviceExt::SubgroupSizeControl)) {
-            ASSERT(usedKnobs.HasExt(DeviceExt::SubgroupSizeControl));
-
-            // Always request all the features from VK_EXT_subgroup_size_control when available.
-            usedKnobs.subgroupSizeControlFeatures = mDeviceInfo.subgroupSizeControlFeatures;
-            featuresChain.Add(&usedKnobs.subgroupSizeControlFeatures);
-
-            mComputeSubgroupSize = FindComputeSubgroupSize();
-        }
-
-        if (mDeviceInfo.HasExt(DeviceExt::ZeroInitializeWorkgroupMemory)) {
-            ASSERT(usedKnobs.HasExt(DeviceExt::ZeroInitializeWorkgroupMemory));
-
-            usedKnobs.zeroInitializeWorkgroupMemoryFeatures.sType =
-                VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES_KHR;
-
-            // Always allow initializing workgroup memory with OpConstantNull when available.
-            // Note that the driver still won't initialize workgroup memory unless the workgroup
-            // variable is explicitly initialized with OpConstantNull.
-            usedKnobs.zeroInitializeWorkgroupMemoryFeatures.shaderZeroInitializeWorkgroupMemory =
-                VK_TRUE;
-            featuresChain.Add(&usedKnobs.zeroInitializeWorkgroupMemoryFeatures);
-        }
-
-        if (mDeviceInfo.features.samplerAnisotropy == VK_TRUE) {
-            usedKnobs.features.samplerAnisotropy = VK_TRUE;
-        }
-
-        if (IsFeatureEnabled(Feature::TextureCompressionBC)) {
-            ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.textureCompressionBC ==
-                   VK_TRUE);
-            usedKnobs.features.textureCompressionBC = VK_TRUE;
-        }
-
-        if (IsFeatureEnabled(Feature::TextureCompressionETC2)) {
-            ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.textureCompressionETC2 ==
-                   VK_TRUE);
-            usedKnobs.features.textureCompressionETC2 = VK_TRUE;
-        }
-
-        if (IsFeatureEnabled(Feature::TextureCompressionASTC)) {
-            ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.textureCompressionASTC_LDR ==
-                   VK_TRUE);
-            usedKnobs.features.textureCompressionASTC_LDR = VK_TRUE;
-        }
-
-        if (IsFeatureEnabled(Feature::PipelineStatisticsQuery)) {
-            ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.pipelineStatisticsQuery ==
-                   VK_TRUE);
-            usedKnobs.features.pipelineStatisticsQuery = VK_TRUE;
-        }
-
-        if (IsFeatureEnabled(Feature::ShaderFloat16)) {
-            const VulkanDeviceInfo& deviceInfo = ToBackend(GetAdapter())->GetDeviceInfo();
-            ASSERT(deviceInfo.HasExt(DeviceExt::ShaderFloat16Int8) &&
-                   deviceInfo.shaderFloat16Int8Features.shaderFloat16 == VK_TRUE &&
-                   deviceInfo.HasExt(DeviceExt::_16BitStorage) &&
-                   deviceInfo._16BitStorageFeatures.storageBuffer16BitAccess == VK_TRUE &&
-                   deviceInfo._16BitStorageFeatures.uniformAndStorageBuffer16BitAccess == VK_TRUE);
-
-            usedKnobs.shaderFloat16Int8Features.shaderFloat16 = VK_TRUE;
-            usedKnobs._16BitStorageFeatures.storageBuffer16BitAccess = VK_TRUE;
-            usedKnobs._16BitStorageFeatures.uniformAndStorageBuffer16BitAccess = VK_TRUE;
-
-            featuresChain.Add(&usedKnobs.shaderFloat16Int8Features,
-                              VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR);
-            featuresChain.Add(&usedKnobs._16BitStorageFeatures,
-                              VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES);
-        }
-
-        if (IsFeatureEnabled(Feature::DepthClamping)) {
-            ASSERT(ToBackend(GetAdapter())->GetDeviceInfo().features.depthClamp == VK_TRUE);
-            usedKnobs.features.depthClamp = VK_TRUE;
-        }
-
-        // Find a universal queue family
-        {
-            // Note that GRAPHICS and COMPUTE imply TRANSFER so we don't need to check for it.
-            constexpr uint32_t kUniversalFlags = VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT;
-            int universalQueueFamily = -1;
-            for (unsigned int i = 0; i < mDeviceInfo.queueFamilies.size(); ++i) {
-                if ((mDeviceInfo.queueFamilies[i].queueFlags & kUniversalFlags) ==
-                    kUniversalFlags) {
-                    universalQueueFamily = i;
-                    break;
-                }
-            }
-
-            if (universalQueueFamily == -1) {
-                return DAWN_INTERNAL_ERROR("No universal queue family");
-            }
-            mQueueFamily = static_cast<uint32_t>(universalQueueFamily);
-        }
-
-        // Choose to create a single universal queue
-        std::vector<VkDeviceQueueCreateInfo> queuesToRequest;
-        float zero = 0.0f;
-        {
-            VkDeviceQueueCreateInfo queueCreateInfo;
-            queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
-            queueCreateInfo.pNext = nullptr;
-            queueCreateInfo.flags = 0;
-            queueCreateInfo.queueFamilyIndex = static_cast<uint32_t>(mQueueFamily);
-            queueCreateInfo.queueCount = 1;
-            queueCreateInfo.pQueuePriorities = &zero;
-
-            queuesToRequest.push_back(queueCreateInfo);
-        }
-
-        VkDeviceCreateInfo createInfo;
-        createInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
-        createInfo.pNext = nullptr;
-        createInfo.flags = 0;
-        createInfo.queueCreateInfoCount = static_cast<uint32_t>(queuesToRequest.size());
-        createInfo.pQueueCreateInfos = queuesToRequest.data();
-        createInfo.enabledLayerCount = 0;
-        createInfo.ppEnabledLayerNames = nullptr;
-        createInfo.enabledExtensionCount = static_cast<uint32_t>(extensionNames.size());
-        createInfo.ppEnabledExtensionNames = extensionNames.data();
-
-        // When we have DeviceExt::GetPhysicalDeviceProperties2, use features2 so that features not
-        // covered by VkPhysicalDeviceFeatures can be enabled.
-        if (mDeviceInfo.HasExt(DeviceExt::GetPhysicalDeviceProperties2)) {
-            features2.features = usedKnobs.features;
-            createInfo.pNext = &features2;
-            createInfo.pEnabledFeatures = nullptr;
-        } else {
-            ASSERT(features2.pNext == nullptr);
-            createInfo.pEnabledFeatures = &usedKnobs.features;
-        }
-
-        DAWN_TRY(CheckVkSuccess(fn.CreateDevice(physicalDevice, &createInfo, nullptr, &mVkDevice),
-                                "vkCreateDevice"));
-
-        return usedKnobs;
+        mQueueFamily = static_cast<uint32_t>(universalQueueFamily);
     }
 
-    uint32_t Device::FindComputeSubgroupSize() const {
-        if (!mDeviceInfo.HasExt(DeviceExt::SubgroupSizeControl)) {
-            return 0;
-        }
+    // Choose to create a single universal queue
+    std::vector<VkDeviceQueueCreateInfo> queuesToRequest;
+    float zero = 0.0f;
+    {
+        VkDeviceQueueCreateInfo queueCreateInfo;
+        queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
+        queueCreateInfo.pNext = nullptr;
+        queueCreateInfo.flags = 0;
+        queueCreateInfo.queueFamilyIndex = static_cast<uint32_t>(mQueueFamily);
+        queueCreateInfo.queueCount = 1;
+        queueCreateInfo.pQueuePriorities = &zero;
 
-        const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& ext =
-            mDeviceInfo.subgroupSizeControlProperties;
-
-        if (ext.minSubgroupSize == ext.maxSubgroupSize) {
-            return 0;
-        }
-
-        // At the moment, only Intel devices support varying subgroup sizes and 16, which is the
-        // next value after the minimum of 8, is the sweet spot according to [1]. Hence the
-        // following heuristics, which may need to be adjusted in the future for other
-        // architectures, or if a specific API is added to let client code select the size..
-        //
-        // [1] https://bugs.freedesktop.org/show_bug.cgi?id=108875
-        uint32_t subgroupSize = ext.minSubgroupSize * 2;
-        if (subgroupSize <= ext.maxSubgroupSize) {
-            return subgroupSize;
-        } else {
-            return ext.minSubgroupSize;
-        }
+        queuesToRequest.push_back(queueCreateInfo);
     }
 
-    void Device::GatherQueueFromDevice() {
-        fn.GetDeviceQueue(mVkDevice, mQueueFamily, 0, &mQueue);
+    VkDeviceCreateInfo createInfo;
+    createInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
+    createInfo.pNext = nullptr;
+    createInfo.flags = 0;
+    createInfo.queueCreateInfoCount = static_cast<uint32_t>(queuesToRequest.size());
+    createInfo.pQueueCreateInfos = queuesToRequest.data();
+    createInfo.enabledLayerCount = 0;
+    createInfo.ppEnabledLayerNames = nullptr;
+    createInfo.enabledExtensionCount = static_cast<uint32_t>(extensionNames.size());
+    createInfo.ppEnabledExtensionNames = extensionNames.data();
+
+    // When we have DeviceExt::GetPhysicalDeviceProperties2, use features2 so that features not
+    // covered by VkPhysicalDeviceFeatures can be enabled.
+    if (mDeviceInfo.HasExt(DeviceExt::GetPhysicalDeviceProperties2)) {
+        features2.features = usedKnobs.features;
+        createInfo.pNext = &features2;
+        createInfo.pEnabledFeatures = nullptr;
+    } else {
+        ASSERT(features2.pNext == nullptr);
+        createInfo.pEnabledFeatures = &usedKnobs.features;
     }
 
-    // Note that this function is called before mDeviceInfo is initialized.
-    void Device::InitTogglesFromDriver() {
-        // TODO(crbug.com/dawn/857): tighten this workaround when this issue is fixed in both
-        // Vulkan SPEC and drivers.
-        SetToggle(Toggle::UseTemporaryBufferInCompressedTextureToTextureCopy, true);
+    DAWN_TRY(CheckVkSuccess(fn.CreateDevice(physicalDevice, &createInfo, nullptr, &mVkDevice),
+                            "vkCreateDevice"));
 
-        // By default try to use D32S8 for Depth24PlusStencil8
-        SetToggle(Toggle::VulkanUseD32S8, true);
+    return usedKnobs;
+}
 
-        // By default try to initialize workgroup memory with OpConstantNull according to the Vulkan
-        // extension VK_KHR_zero_initialize_workgroup_memory.
-        SetToggle(Toggle::VulkanUseZeroInitializeWorkgroupMemoryExtension, true);
-
-        // By default try to use S8 if available.
-        SetToggle(Toggle::VulkanUseS8, true);
+uint32_t Device::FindComputeSubgroupSize() const {
+    if (!mDeviceInfo.HasExt(DeviceExt::SubgroupSizeControl)) {
+        return 0;
     }
 
-    void Device::ApplyDepthStencilFormatToggles() {
-        bool supportsD32s8 =
-            ToBackend(GetAdapter())->IsDepthStencilFormatSupported(VK_FORMAT_D32_SFLOAT_S8_UINT);
-        bool supportsD24s8 =
-            ToBackend(GetAdapter())->IsDepthStencilFormatSupported(VK_FORMAT_D24_UNORM_S8_UINT);
-        bool supportsS8 = ToBackend(GetAdapter())->IsDepthStencilFormatSupported(VK_FORMAT_S8_UINT);
+    const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& ext =
+        mDeviceInfo.subgroupSizeControlProperties;
 
-        ASSERT(supportsD32s8 || supportsD24s8);
-
-        if (!supportsD24s8) {
-            ForceSetToggle(Toggle::VulkanUseD32S8, true);
-        }
-        if (!supportsD32s8) {
-            ForceSetToggle(Toggle::VulkanUseD32S8, false);
-        }
-        if (!supportsS8) {
-            ForceSetToggle(Toggle::VulkanUseS8, false);
-        }
+    if (ext.minSubgroupSize == ext.maxSubgroupSize) {
+        return 0;
     }
 
-    void Device::ApplyUseZeroInitializeWorkgroupMemoryExtensionToggle() {
-        if (!mDeviceInfo.HasExt(DeviceExt::ZeroInitializeWorkgroupMemory)) {
-            ForceSetToggle(Toggle::VulkanUseZeroInitializeWorkgroupMemoryExtension, false);
-        }
+    // At the moment, only Intel devices support varying subgroup sizes and 16, which is the
+    // next value after the minimum of 8, is the sweet spot according to [1]. Hence the
+    // following heuristics, which may need to be adjusted in the future for other
+    // architectures, or if a specific API is added to let client code select the size..
+    //
+    // [1] https://bugs.freedesktop.org/show_bug.cgi?id=108875
+    uint32_t subgroupSize = ext.minSubgroupSize * 2;
+    if (subgroupSize <= ext.maxSubgroupSize) {
+        return subgroupSize;
+    } else {
+        return ext.minSubgroupSize;
     }
+}
 
-    VulkanFunctions* Device::GetMutableFunctions() {
-        return const_cast<VulkanFunctions*>(&fn);
+void Device::GatherQueueFromDevice() {
+    fn.GetDeviceQueue(mVkDevice, mQueueFamily, 0, &mQueue);
+}
+
+// Note that this function is called before mDeviceInfo is initialized.
+void Device::InitTogglesFromDriver() {
+    // TODO(crbug.com/dawn/857): tighten this workaround when this issue is fixed in both
+    // Vulkan SPEC and drivers.
+    SetToggle(Toggle::UseTemporaryBufferInCompressedTextureToTextureCopy, true);
+
+    // By default try to use D32S8 for Depth24PlusStencil8
+    SetToggle(Toggle::VulkanUseD32S8, true);
+
+    // By default try to initialize workgroup memory with OpConstantNull according to the Vulkan
+    // extension VK_KHR_zero_initialize_workgroup_memory.
+    SetToggle(Toggle::VulkanUseZeroInitializeWorkgroupMemoryExtension, true);
+
+    // By default try to use S8 if available.
+    SetToggle(Toggle::VulkanUseS8, true);
+}
+
+void Device::ApplyDepthStencilFormatToggles() {
+    bool supportsD32s8 =
+        ToBackend(GetAdapter())->IsDepthStencilFormatSupported(VK_FORMAT_D32_SFLOAT_S8_UINT);
+    bool supportsD24s8 =
+        ToBackend(GetAdapter())->IsDepthStencilFormatSupported(VK_FORMAT_D24_UNORM_S8_UINT);
+    bool supportsS8 = ToBackend(GetAdapter())->IsDepthStencilFormatSupported(VK_FORMAT_S8_UINT);
+
+    ASSERT(supportsD32s8 || supportsD24s8);
+
+    if (!supportsD24s8) {
+        ForceSetToggle(Toggle::VulkanUseD32S8, true);
     }
+    if (!supportsD32s8) {
+        ForceSetToggle(Toggle::VulkanUseD32S8, false);
+    }
+    if (!supportsS8) {
+        ForceSetToggle(Toggle::VulkanUseS8, false);
+    }
+}
 
-    ResultOrError<VkFence> Device::GetUnusedFence() {
-        if (!mUnusedFences.empty()) {
-            VkFence fence = mUnusedFences.back();
-            DAWN_TRY(CheckVkSuccess(fn.ResetFences(mVkDevice, 1, &*fence), "vkResetFences"));
+void Device::ApplyUseZeroInitializeWorkgroupMemoryExtensionToggle() {
+    if (!mDeviceInfo.HasExt(DeviceExt::ZeroInitializeWorkgroupMemory)) {
+        ForceSetToggle(Toggle::VulkanUseZeroInitializeWorkgroupMemoryExtension, false);
+    }
+}
 
-            mUnusedFences.pop_back();
-            return fence;
-        }
+VulkanFunctions* Device::GetMutableFunctions() {
+    return const_cast<VulkanFunctions*>(&fn);
+}
 
-        VkFenceCreateInfo createInfo;
-        createInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
-        createInfo.pNext = nullptr;
-        createInfo.flags = 0;
+ResultOrError<VkFence> Device::GetUnusedFence() {
+    if (!mUnusedFences.empty()) {
+        VkFence fence = mUnusedFences.back();
+        DAWN_TRY(CheckVkSuccess(fn.ResetFences(mVkDevice, 1, &*fence), "vkResetFences"));
 
-        VkFence fence = VK_NULL_HANDLE;
-        DAWN_TRY(CheckVkSuccess(fn.CreateFence(mVkDevice, &createInfo, nullptr, &*fence),
-                                "vkCreateFence"));
-
+        mUnusedFences.pop_back();
         return fence;
     }
 
-    ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
-        ExecutionSerial fenceSerial(0);
-        while (!mFencesInFlight.empty()) {
-            VkFence fence = mFencesInFlight.front().first;
-            ExecutionSerial tentativeSerial = mFencesInFlight.front().second;
-            VkResult result = VkResult::WrapUnsafe(
-                INJECT_ERROR_OR_RUN(fn.GetFenceStatus(mVkDevice, fence), VK_ERROR_DEVICE_LOST));
+    VkFenceCreateInfo createInfo;
+    createInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
+    createInfo.pNext = nullptr;
+    createInfo.flags = 0;
 
-            // Fence are added in order, so we can stop searching as soon
-            // as we see one that's not ready.
-            if (result == VK_NOT_READY) {
-                return fenceSerial;
-            } else {
-                DAWN_TRY(CheckVkSuccess(::VkResult(result), "GetFenceStatus"));
-            }
+    VkFence fence = VK_NULL_HANDLE;
+    DAWN_TRY(
+        CheckVkSuccess(fn.CreateFence(mVkDevice, &createInfo, nullptr, &*fence), "vkCreateFence"));
 
-            // Update fenceSerial since fence is ready.
-            fenceSerial = tentativeSerial;
+    return fence;
+}
 
-            mUnusedFences.push_back(fence);
+ResultOrError<ExecutionSerial> Device::CheckAndUpdateCompletedSerials() {
+    ExecutionSerial fenceSerial(0);
+    while (!mFencesInFlight.empty()) {
+        VkFence fence = mFencesInFlight.front().first;
+        ExecutionSerial tentativeSerial = mFencesInFlight.front().second;
+        VkResult result = VkResult::WrapUnsafe(
+            INJECT_ERROR_OR_RUN(fn.GetFenceStatus(mVkDevice, fence), VK_ERROR_DEVICE_LOST));
 
-            ASSERT(fenceSerial > GetCompletedCommandSerial());
-            mFencesInFlight.pop();
-        }
-        return fenceSerial;
-    }
-
-    MaybeError Device::PrepareRecordingContext() {
-        ASSERT(!mRecordingContext.used);
-        ASSERT(mRecordingContext.commandBuffer == VK_NULL_HANDLE);
-        ASSERT(mRecordingContext.commandPool == VK_NULL_HANDLE);
-
-        // First try to recycle unused command pools.
-        if (!mUnusedCommands.empty()) {
-            CommandPoolAndBuffer commands = mUnusedCommands.back();
-            mUnusedCommands.pop_back();
-            DAWN_TRY_WITH_CLEANUP(CheckVkSuccess(fn.ResetCommandPool(mVkDevice, commands.pool, 0),
-                                                 "vkResetCommandPool"),
-                                  {
-                                      // vkResetCommandPool failed (it may return out-of-memory).
-                                      // Free the commands in the cleanup step before returning to
-                                      // reclaim memory.
-
-                                      // The VkCommandBuffer memory should be wholly owned by the
-                                      // pool and freed when it is destroyed, but that's not the
-                                      // case in some drivers and they leak memory. So we call
-                                      // FreeCommandBuffers before DestroyCommandPool to be safe.
-                                      // TODO(enga): Only do this on a known list of bad drivers.
-                                      fn.FreeCommandBuffers(mVkDevice, commands.pool, 1,
-                                                            &commands.commandBuffer);
-                                      fn.DestroyCommandPool(mVkDevice, commands.pool, nullptr);
-                                  });
-
-            mRecordingContext.commandBuffer = commands.commandBuffer;
-            mRecordingContext.commandPool = commands.pool;
+        // Fence are added in order, so we can stop searching as soon
+        // as we see one that's not ready.
+        if (result == VK_NOT_READY) {
+            return fenceSerial;
         } else {
-            // Create a new command pool for our commands and allocate the command buffer.
-            VkCommandPoolCreateInfo createInfo;
-            createInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
-            createInfo.pNext = nullptr;
-            createInfo.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT;
-            createInfo.queueFamilyIndex = mQueueFamily;
-
-            DAWN_TRY(CheckVkSuccess(fn.CreateCommandPool(mVkDevice, &createInfo, nullptr,
-                                                         &*mRecordingContext.commandPool),
-                                    "vkCreateCommandPool"));
-
-            VkCommandBufferAllocateInfo allocateInfo;
-            allocateInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
-            allocateInfo.pNext = nullptr;
-            allocateInfo.commandPool = mRecordingContext.commandPool;
-            allocateInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
-            allocateInfo.commandBufferCount = 1;
-
-            DAWN_TRY(CheckVkSuccess(fn.AllocateCommandBuffers(mVkDevice, &allocateInfo,
-                                                              &mRecordingContext.commandBuffer),
-                                    "vkAllocateCommandBuffers"));
+            DAWN_TRY(CheckVkSuccess(::VkResult(result), "GetFenceStatus"));
         }
 
-        // Start the recording of commands in the command buffer.
-        VkCommandBufferBeginInfo beginInfo;
-        beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
-        beginInfo.pNext = nullptr;
-        beginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
-        beginInfo.pInheritanceInfo = nullptr;
+        // Update fenceSerial since fence is ready.
+        fenceSerial = tentativeSerial;
 
-        return CheckVkSuccess(fn.BeginCommandBuffer(mRecordingContext.commandBuffer, &beginInfo),
-                              "vkBeginCommandBuffer");
+        mUnusedFences.push_back(fence);
+
+        ASSERT(fenceSerial > GetCompletedCommandSerial());
+        mFencesInFlight.pop();
+    }
+    return fenceSerial;
+}
+
+MaybeError Device::PrepareRecordingContext() {
+    ASSERT(!mRecordingContext.used);
+    ASSERT(mRecordingContext.commandBuffer == VK_NULL_HANDLE);
+    ASSERT(mRecordingContext.commandPool == VK_NULL_HANDLE);
+
+    // First try to recycle unused command pools.
+    if (!mUnusedCommands.empty()) {
+        CommandPoolAndBuffer commands = mUnusedCommands.back();
+        mUnusedCommands.pop_back();
+        DAWN_TRY_WITH_CLEANUP(
+            CheckVkSuccess(fn.ResetCommandPool(mVkDevice, commands.pool, 0), "vkResetCommandPool"),
+            {
+                // vkResetCommandPool failed (it may return out-of-memory).
+                // Free the commands in the cleanup step before returning to
+                // reclaim memory.
+
+                // The VkCommandBuffer memory should be wholly owned by the
+                // pool and freed when it is destroyed, but that's not the
+                // case in some drivers and they leak memory. So we call
+                // FreeCommandBuffers before DestroyCommandPool to be safe.
+                // TODO(enga): Only do this on a known list of bad drivers.
+                fn.FreeCommandBuffers(mVkDevice, commands.pool, 1, &commands.commandBuffer);
+                fn.DestroyCommandPool(mVkDevice, commands.pool, nullptr);
+            });
+
+        mRecordingContext.commandBuffer = commands.commandBuffer;
+        mRecordingContext.commandPool = commands.pool;
+    } else {
+        // Create a new command pool for our commands and allocate the command buffer.
+        VkCommandPoolCreateInfo createInfo;
+        createInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
+        createInfo.pNext = nullptr;
+        createInfo.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT;
+        createInfo.queueFamilyIndex = mQueueFamily;
+
+        DAWN_TRY(CheckVkSuccess(
+            fn.CreateCommandPool(mVkDevice, &createInfo, nullptr, &*mRecordingContext.commandPool),
+            "vkCreateCommandPool"));
+
+        VkCommandBufferAllocateInfo allocateInfo;
+        allocateInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
+        allocateInfo.pNext = nullptr;
+        allocateInfo.commandPool = mRecordingContext.commandPool;
+        allocateInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
+        allocateInfo.commandBufferCount = 1;
+
+        DAWN_TRY(CheckVkSuccess(
+            fn.AllocateCommandBuffers(mVkDevice, &allocateInfo, &mRecordingContext.commandBuffer),
+            "vkAllocateCommandBuffers"));
     }
 
-    void Device::RecycleCompletedCommands() {
-        for (auto& commands : mCommandsInFlight.IterateUpTo(GetCompletedCommandSerial())) {
-            mUnusedCommands.push_back(commands);
-        }
-        mCommandsInFlight.ClearUpTo(GetCompletedCommandSerial());
+    // Start the recording of commands in the command buffer.
+    VkCommandBufferBeginInfo beginInfo;
+    beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+    beginInfo.pNext = nullptr;
+    beginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+    beginInfo.pInheritanceInfo = nullptr;
+
+    return CheckVkSuccess(fn.BeginCommandBuffer(mRecordingContext.commandBuffer, &beginInfo),
+                          "vkBeginCommandBuffer");
+}
+
+void Device::RecycleCompletedCommands() {
+    for (auto& commands : mCommandsInFlight.IterateUpTo(GetCompletedCommandSerial())) {
+        mUnusedCommands.push_back(commands);
+    }
+    mCommandsInFlight.ClearUpTo(GetCompletedCommandSerial());
+}
+
+ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
+    std::unique_ptr<StagingBufferBase> stagingBuffer = std::make_unique<StagingBuffer>(size, this);
+    DAWN_TRY(stagingBuffer->Initialize());
+    return std::move(stagingBuffer);
+}
+
+MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
+                                           uint64_t sourceOffset,
+                                           BufferBase* destination,
+                                           uint64_t destinationOffset,
+                                           uint64_t size) {
+    // It is a validation error to do a 0-sized copy in Vulkan, check it is skipped prior to
+    // calling this function.
+    ASSERT(size != 0);
+
+    CommandRecordingContext* recordingContext = GetPendingRecordingContext();
+
+    ToBackend(destination)
+        ->EnsureDataInitializedAsDestination(recordingContext, destinationOffset, size);
+
+    // There is no need of a barrier to make host writes available and visible to the copy
+    // operation for HOST_COHERENT memory. The Vulkan spec for vkQueueSubmit describes that it
+    // does an implicit availability, visibility and domain operation.
+
+    // Insert pipeline barrier to ensure correct ordering with previous memory operations on the
+    // buffer.
+    ToBackend(destination)->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
+
+    VkBufferCopy copy;
+    copy.srcOffset = sourceOffset;
+    copy.dstOffset = destinationOffset;
+    copy.size = size;
+
+    this->fn.CmdCopyBuffer(recordingContext->commandBuffer, ToBackend(source)->GetBufferHandle(),
+                           ToBackend(destination)->GetHandle(), 1, &copy);
+
+    return {};
+}
+
+MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
+                                            const TextureDataLayout& src,
+                                            TextureCopy* dst,
+                                            const Extent3D& copySizePixels) {
+    // There is no need of a barrier to make host writes available and visible to the copy
+    // operation for HOST_COHERENT memory. The Vulkan spec for vkQueueSubmit describes that it
+    // does an implicit availability, visibility and domain operation.
+
+    CommandRecordingContext* recordingContext = GetPendingRecordingContext();
+
+    VkBufferImageCopy region = ComputeBufferImageCopyRegion(src, *dst, copySizePixels);
+    VkImageSubresourceLayers subresource = region.imageSubresource;
+
+    SubresourceRange range = GetSubresourcesAffectedByCopy(*dst, copySizePixels);
+
+    if (IsCompleteSubresourceCopiedTo(dst->texture.Get(), copySizePixels, subresource.mipLevel)) {
+        // Since texture has been overwritten, it has been "initialized"
+        dst->texture->SetIsSubresourceContentInitialized(true, range);
+    } else {
+        ToBackend(dst->texture)->EnsureSubresourceContentInitialized(recordingContext, range);
+    }
+    // Insert pipeline barrier to ensure correct ordering with previous memory operations on the
+    // texture.
+    ToBackend(dst->texture)
+        ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, range);
+    VkImage dstImage = ToBackend(dst->texture)->GetHandle();
+
+    // Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after the
+    // copy command.
+    this->fn.CmdCopyBufferToImage(recordingContext->commandBuffer,
+                                  ToBackend(source)->GetBufferHandle(), dstImage,
+                                  VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
+    return {};
+}
+
+MaybeError Device::ImportExternalImage(const ExternalImageDescriptorVk* descriptor,
+                                       ExternalMemoryHandle memoryHandle,
+                                       VkImage image,
+                                       const std::vector<ExternalSemaphoreHandle>& waitHandles,
+                                       VkSemaphore* outSignalSemaphore,
+                                       VkDeviceMemory* outAllocation,
+                                       std::vector<VkSemaphore>* outWaitSemaphores) {
+    const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
+
+    const DawnTextureInternalUsageDescriptor* internalUsageDesc = nullptr;
+    FindInChain(textureDescriptor->nextInChain, &internalUsageDesc);
+
+    wgpu::TextureUsage usage = textureDescriptor->usage;
+    if (internalUsageDesc != nullptr) {
+        usage |= internalUsageDesc->internalUsage;
     }
 
-    ResultOrError<std::unique_ptr<StagingBufferBase>> Device::CreateStagingBuffer(size_t size) {
-        std::unique_ptr<StagingBufferBase> stagingBuffer =
-            std::make_unique<StagingBuffer>(size, this);
-        DAWN_TRY(stagingBuffer->Initialize());
-        return std::move(stagingBuffer);
+    // Check services support this combination of handle type / image info
+    DAWN_INVALID_IF(!mExternalSemaphoreService->Supported(),
+                    "External semaphore usage not supported");
+
+    DAWN_INVALID_IF(!mExternalMemoryService->SupportsImportMemory(
+                        VulkanImageFormat(this, textureDescriptor->format), VK_IMAGE_TYPE_2D,
+                        VK_IMAGE_TILING_OPTIMAL,
+                        VulkanImageUsage(usage, GetValidInternalFormat(textureDescriptor->format)),
+                        VK_IMAGE_CREATE_ALIAS_BIT_KHR),
+                    "External memory usage not supported");
+
+    // Create an external semaphore to signal when the texture is done being used
+    DAWN_TRY_ASSIGN(*outSignalSemaphore, mExternalSemaphoreService->CreateExportableSemaphore());
+
+    // Import the external image's memory
+    external_memory::MemoryImportParams importParams;
+    DAWN_TRY_ASSIGN(importParams, mExternalMemoryService->GetMemoryImportParams(descriptor, image));
+    DAWN_TRY_ASSIGN(*outAllocation,
+                    mExternalMemoryService->ImportMemory(memoryHandle, importParams, image));
+
+    // Import semaphores we have to wait on before using the texture
+    for (const ExternalSemaphoreHandle& handle : waitHandles) {
+        VkSemaphore semaphore = VK_NULL_HANDLE;
+        DAWN_TRY_ASSIGN(semaphore, mExternalSemaphoreService->ImportSemaphore(handle));
+        outWaitSemaphores->push_back(semaphore);
     }
 
-    MaybeError Device::CopyFromStagingToBuffer(StagingBufferBase* source,
-                                               uint64_t sourceOffset,
-                                               BufferBase* destination,
-                                               uint64_t destinationOffset,
-                                               uint64_t size) {
-        // It is a validation error to do a 0-sized copy in Vulkan, check it is skipped prior to
-        // calling this function.
-        ASSERT(size != 0);
+    return {};
+}
 
-        CommandRecordingContext* recordingContext = GetPendingRecordingContext();
+bool Device::SignalAndExportExternalTexture(
+    Texture* texture,
+    VkImageLayout desiredLayout,
+    ExternalImageExportInfoVk* info,
+    std::vector<ExternalSemaphoreHandle>* semaphoreHandles) {
+    return !ConsumedError([&]() -> MaybeError {
+        DAWN_TRY(ValidateObject(texture));
 
-        ToBackend(destination)
-            ->EnsureDataInitializedAsDestination(recordingContext, destinationOffset, size);
+        VkSemaphore signalSemaphore;
+        VkImageLayout releasedOldLayout;
+        VkImageLayout releasedNewLayout;
+        DAWN_TRY(texture->ExportExternalTexture(desiredLayout, &signalSemaphore, &releasedOldLayout,
+                                                &releasedNewLayout));
 
-        // There is no need of a barrier to make host writes available and visible to the copy
-        // operation for HOST_COHERENT memory. The Vulkan spec for vkQueueSubmit describes that it
-        // does an implicit availability, visibility and domain operation.
-
-        // Insert pipeline barrier to ensure correct ordering with previous memory operations on the
-        // buffer.
-        ToBackend(destination)->TransitionUsageNow(recordingContext, wgpu::BufferUsage::CopyDst);
-
-        VkBufferCopy copy;
-        copy.srcOffset = sourceOffset;
-        copy.dstOffset = destinationOffset;
-        copy.size = size;
-
-        this->fn.CmdCopyBuffer(recordingContext->commandBuffer,
-                               ToBackend(source)->GetBufferHandle(),
-                               ToBackend(destination)->GetHandle(), 1, &copy);
+        ExternalSemaphoreHandle semaphoreHandle;
+        DAWN_TRY_ASSIGN(semaphoreHandle,
+                        mExternalSemaphoreService->ExportSemaphore(signalSemaphore));
+        semaphoreHandles->push_back(semaphoreHandle);
+        info->releasedOldLayout = releasedOldLayout;
+        info->releasedNewLayout = releasedNewLayout;
+        info->isInitialized =
+            texture->IsSubresourceContentInitialized(texture->GetAllSubresources());
 
         return {};
+    }());
+}
+
+TextureBase* Device::CreateTextureWrappingVulkanImage(
+    const ExternalImageDescriptorVk* descriptor,
+    ExternalMemoryHandle memoryHandle,
+    const std::vector<ExternalSemaphoreHandle>& waitHandles) {
+    const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
+
+    // Initial validation
+    if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
+        return nullptr;
+    }
+    if (ConsumedError(ValidateVulkanImageCanBeWrapped(this, textureDescriptor),
+                      "validating that a Vulkan image can be wrapped with %s.",
+                      textureDescriptor)) {
+        return nullptr;
     }
 
-    MaybeError Device::CopyFromStagingToTexture(const StagingBufferBase* source,
-                                                const TextureDataLayout& src,
-                                                TextureCopy* dst,
-                                                const Extent3D& copySizePixels) {
-        // There is no need of a barrier to make host writes available and visible to the copy
-        // operation for HOST_COHERENT memory. The Vulkan spec for vkQueueSubmit describes that it
-        // does an implicit availability, visibility and domain operation.
+    VkSemaphore signalSemaphore = VK_NULL_HANDLE;
+    VkDeviceMemory allocation = VK_NULL_HANDLE;
+    std::vector<VkSemaphore> waitSemaphores;
+    waitSemaphores.reserve(waitHandles.size());
 
-        CommandRecordingContext* recordingContext = GetPendingRecordingContext();
-
-        VkBufferImageCopy region = ComputeBufferImageCopyRegion(src, *dst, copySizePixels);
-        VkImageSubresourceLayers subresource = region.imageSubresource;
-
-        SubresourceRange range = GetSubresourcesAffectedByCopy(*dst, copySizePixels);
-
-        if (IsCompleteSubresourceCopiedTo(dst->texture.Get(), copySizePixels,
-                                          subresource.mipLevel)) {
-            // Since texture has been overwritten, it has been "initialized"
-            dst->texture->SetIsSubresourceContentInitialized(true, range);
-        } else {
-            ToBackend(dst->texture)->EnsureSubresourceContentInitialized(recordingContext, range);
+    // Cleanup in case of a failure, the image creation doesn't acquire the external objects
+    // if a failure happems.
+    Texture* result = nullptr;
+    // TODO(crbug.com/1026480): Consolidate this into a single CreateFromExternal call.
+    if (ConsumedError(Texture::CreateFromExternal(this, descriptor, textureDescriptor,
+                                                  mExternalMemoryService.get()),
+                      &result) ||
+        ConsumedError(ImportExternalImage(descriptor, memoryHandle, result->GetHandle(),
+                                          waitHandles, &signalSemaphore, &allocation,
+                                          &waitSemaphores)) ||
+        ConsumedError(
+            result->BindExternalMemory(descriptor, signalSemaphore, allocation, waitSemaphores))) {
+        // Delete the Texture if it was created
+        if (result != nullptr) {
+            result->Release();
         }
-        // Insert pipeline barrier to ensure correct ordering with previous memory operations on the
-        // texture.
-        ToBackend(dst->texture)
-            ->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, range);
-        VkImage dstImage = ToBackend(dst->texture)->GetHandle();
 
-        // Dawn guarantees dstImage be in the TRANSFER_DST_OPTIMAL layout after the
-        // copy command.
-        this->fn.CmdCopyBufferToImage(recordingContext->commandBuffer,
-                                      ToBackend(source)->GetBufferHandle(), dstImage,
-                                      VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
-        return {};
+        // Clear the signal semaphore
+        fn.DestroySemaphore(GetVkDevice(), signalSemaphore, nullptr);
+
+        // Clear image memory
+        fn.FreeMemory(GetVkDevice(), allocation, nullptr);
+
+        // Clear any wait semaphores we were able to import
+        for (VkSemaphore semaphore : waitSemaphores) {
+            fn.DestroySemaphore(GetVkDevice(), semaphore, nullptr);
+        }
+        return nullptr;
     }
 
-    MaybeError Device::ImportExternalImage(const ExternalImageDescriptorVk* descriptor,
-                                           ExternalMemoryHandle memoryHandle,
-                                           VkImage image,
-                                           const std::vector<ExternalSemaphoreHandle>& waitHandles,
-                                           VkSemaphore* outSignalSemaphore,
-                                           VkDeviceMemory* outAllocation,
-                                           std::vector<VkSemaphore>* outWaitSemaphores) {
-        const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
+    return result;
+}
 
-        const DawnTextureInternalUsageDescriptor* internalUsageDesc = nullptr;
-        FindInChain(textureDescriptor->nextInChain, &internalUsageDesc);
+uint32_t Device::GetComputeSubgroupSize() const {
+    return mComputeSubgroupSize;
+}
 
-        wgpu::TextureUsage usage = textureDescriptor->usage;
-        if (internalUsageDesc != nullptr) {
-            usage |= internalUsageDesc->internalUsage;
-        }
-
-        // Check services support this combination of handle type / image info
-        DAWN_INVALID_IF(!mExternalSemaphoreService->Supported(),
-                        "External semaphore usage not supported");
-
-        DAWN_INVALID_IF(
-            !mExternalMemoryService->SupportsImportMemory(
-                VulkanImageFormat(this, textureDescriptor->format), VK_IMAGE_TYPE_2D,
-                VK_IMAGE_TILING_OPTIMAL,
-                VulkanImageUsage(usage, GetValidInternalFormat(textureDescriptor->format)),
-                VK_IMAGE_CREATE_ALIAS_BIT_KHR),
-            "External memory usage not supported");
-
-        // Create an external semaphore to signal when the texture is done being used
-        DAWN_TRY_ASSIGN(*outSignalSemaphore,
-                        mExternalSemaphoreService->CreateExportableSemaphore());
-
-        // Import the external image's memory
-        external_memory::MemoryImportParams importParams;
-        DAWN_TRY_ASSIGN(importParams,
-                        mExternalMemoryService->GetMemoryImportParams(descriptor, image));
-        DAWN_TRY_ASSIGN(*outAllocation,
-                        mExternalMemoryService->ImportMemory(memoryHandle, importParams, image));
-
-        // Import semaphores we have to wait on before using the texture
-        for (const ExternalSemaphoreHandle& handle : waitHandles) {
-            VkSemaphore semaphore = VK_NULL_HANDLE;
-            DAWN_TRY_ASSIGN(semaphore, mExternalSemaphoreService->ImportSemaphore(handle));
-            outWaitSemaphores->push_back(semaphore);
-        }
-
-        return {};
+MaybeError Device::WaitForIdleForDestruction() {
+    // Immediately tag the recording context as unused so we don't try to submit it in Tick.
+    // Move the mRecordingContext.used to mUnusedCommands so it can be cleaned up in
+    // ShutDownImpl
+    if (mRecordingContext.used) {
+        CommandPoolAndBuffer commands = {mRecordingContext.commandPool,
+                                         mRecordingContext.commandBuffer};
+        mUnusedCommands.push_back(commands);
+        mRecordingContext = CommandRecordingContext();
     }
 
-    bool Device::SignalAndExportExternalTexture(
-        Texture* texture,
-        VkImageLayout desiredLayout,
-        ExternalImageExportInfoVk* info,
-        std::vector<ExternalSemaphoreHandle>* semaphoreHandles) {
-        return !ConsumedError([&]() -> MaybeError {
-            DAWN_TRY(ValidateObject(texture));
+    VkResult waitIdleResult = VkResult::WrapUnsafe(fn.QueueWaitIdle(mQueue));
+    // Ignore the result of QueueWaitIdle: it can return OOM which we can't really do anything
+    // about, Device lost, which means workloads running on the GPU are no longer accessible
+    // (so they are as good as waited on) or success.
+    DAWN_UNUSED(waitIdleResult);
 
-            VkSemaphore signalSemaphore;
-            VkImageLayout releasedOldLayout;
-            VkImageLayout releasedNewLayout;
-            DAWN_TRY(texture->ExportExternalTexture(desiredLayout, &signalSemaphore,
-                                                    &releasedOldLayout, &releasedNewLayout));
+    // Make sure all fences are complete by explicitly waiting on them all
+    while (!mFencesInFlight.empty()) {
+        VkFence fence = mFencesInFlight.front().first;
+        ExecutionSerial fenceSerial = mFencesInFlight.front().second;
+        ASSERT(fenceSerial > GetCompletedCommandSerial());
 
-            ExternalSemaphoreHandle semaphoreHandle;
-            DAWN_TRY_ASSIGN(semaphoreHandle,
-                            mExternalSemaphoreService->ExportSemaphore(signalSemaphore));
-            semaphoreHandles->push_back(semaphoreHandle);
-            info->releasedOldLayout = releasedOldLayout;
-            info->releasedNewLayout = releasedNewLayout;
-            info->isInitialized =
-                texture->IsSubresourceContentInitialized(texture->GetAllSubresources());
-
-            return {};
-        }());
-    }
-
-    TextureBase* Device::CreateTextureWrappingVulkanImage(
-        const ExternalImageDescriptorVk* descriptor,
-        ExternalMemoryHandle memoryHandle,
-        const std::vector<ExternalSemaphoreHandle>& waitHandles) {
-        const TextureDescriptor* textureDescriptor = FromAPI(descriptor->cTextureDescriptor);
-
-        // Initial validation
-        if (ConsumedError(ValidateTextureDescriptor(this, textureDescriptor))) {
-            return nullptr;
-        }
-        if (ConsumedError(ValidateVulkanImageCanBeWrapped(this, textureDescriptor),
-                          "validating that a Vulkan image can be wrapped with %s.",
-                          textureDescriptor)) {
-            return nullptr;
-        }
-
-        VkSemaphore signalSemaphore = VK_NULL_HANDLE;
-        VkDeviceMemory allocation = VK_NULL_HANDLE;
-        std::vector<VkSemaphore> waitSemaphores;
-        waitSemaphores.reserve(waitHandles.size());
-
-        // Cleanup in case of a failure, the image creation doesn't acquire the external objects
-        // if a failure happems.
-        Texture* result = nullptr;
-        // TODO(crbug.com/1026480): Consolidate this into a single CreateFromExternal call.
-        if (ConsumedError(Texture::CreateFromExternal(this, descriptor, textureDescriptor,
-                                                      mExternalMemoryService.get()),
-                          &result) ||
-            ConsumedError(ImportExternalImage(descriptor, memoryHandle, result->GetHandle(),
-                                              waitHandles, &signalSemaphore, &allocation,
-                                              &waitSemaphores)) ||
-            ConsumedError(result->BindExternalMemory(descriptor, signalSemaphore, allocation,
-                                                     waitSemaphores))) {
-            // Delete the Texture if it was created
-            if (result != nullptr) {
-                result->Release();
+        VkResult result = VkResult::WrapUnsafe(VK_TIMEOUT);
+        do {
+            // If WaitForIdleForDesctruction is called while we are Disconnected, it means that
+            // the device lost came from the ErrorInjector and we need to wait without allowing
+            // any more error to be injected. This is because the device lost was "fake" and
+            // commands might still be running.
+            if (GetState() == State::Disconnected) {
+                result =
+                    VkResult::WrapUnsafe(fn.WaitForFences(mVkDevice, 1, &*fence, true, UINT64_MAX));
+                continue;
             }
 
-            // Clear the signal semaphore
-            fn.DestroySemaphore(GetVkDevice(), signalSemaphore, nullptr);
+            result = VkResult::WrapUnsafe(INJECT_ERROR_OR_RUN(
+                fn.WaitForFences(mVkDevice, 1, &*fence, true, UINT64_MAX), VK_ERROR_DEVICE_LOST));
+        } while (result == VK_TIMEOUT);
+        // Ignore errors from vkWaitForFences: it can be either OOM which we can't do anything
+        // about (and we need to keep going with the destruction of all fences), or device
+        // loss, which means the workload on the GPU is no longer accessible and we can
+        // safely destroy the fence.
 
-            // Clear image memory
-            fn.FreeMemory(GetVkDevice(), allocation, nullptr);
+        fn.DestroyFence(mVkDevice, fence, nullptr);
+        mFencesInFlight.pop();
+    }
+    return {};
+}
 
-            // Clear any wait semaphores we were able to import
-            for (VkSemaphore semaphore : waitSemaphores) {
-                fn.DestroySemaphore(GetVkDevice(), semaphore, nullptr);
-            }
-            return nullptr;
-        }
+void Device::DestroyImpl() {
+    ASSERT(GetState() == State::Disconnected);
 
-        return result;
+    // We failed during initialization so early that we don't even have a VkDevice. There is
+    // nothing to do.
+    if (mVkDevice == VK_NULL_HANDLE) {
+        return;
     }
 
-    uint32_t Device::GetComputeSubgroupSize() const {
-        return mComputeSubgroupSize;
-    }
-
-    MaybeError Device::WaitForIdleForDestruction() {
-        // Immediately tag the recording context as unused so we don't try to submit it in Tick.
-        // Move the mRecordingContext.used to mUnusedCommands so it can be cleaned up in
-        // ShutDownImpl
-        if (mRecordingContext.used) {
-            CommandPoolAndBuffer commands = {mRecordingContext.commandPool,
-                                             mRecordingContext.commandBuffer};
-            mUnusedCommands.push_back(commands);
-            mRecordingContext = CommandRecordingContext();
-        }
-
-        VkResult waitIdleResult = VkResult::WrapUnsafe(fn.QueueWaitIdle(mQueue));
-        // Ignore the result of QueueWaitIdle: it can return OOM which we can't really do anything
-        // about, Device lost, which means workloads running on the GPU are no longer accessible
-        // (so they are as good as waited on) or success.
-        DAWN_UNUSED(waitIdleResult);
-
-        // Make sure all fences are complete by explicitly waiting on them all
-        while (!mFencesInFlight.empty()) {
-            VkFence fence = mFencesInFlight.front().first;
-            ExecutionSerial fenceSerial = mFencesInFlight.front().second;
-            ASSERT(fenceSerial > GetCompletedCommandSerial());
-
-            VkResult result = VkResult::WrapUnsafe(VK_TIMEOUT);
-            do {
-                // If WaitForIdleForDesctruction is called while we are Disconnected, it means that
-                // the device lost came from the ErrorInjector and we need to wait without allowing
-                // any more error to be injected. This is because the device lost was "fake" and
-                // commands might still be running.
-                if (GetState() == State::Disconnected) {
-                    result = VkResult::WrapUnsafe(
-                        fn.WaitForFences(mVkDevice, 1, &*fence, true, UINT64_MAX));
-                    continue;
-                }
-
-                result = VkResult::WrapUnsafe(
-                    INJECT_ERROR_OR_RUN(fn.WaitForFences(mVkDevice, 1, &*fence, true, UINT64_MAX),
-                                        VK_ERROR_DEVICE_LOST));
-            } while (result == VK_TIMEOUT);
-            // Ignore errors from vkWaitForFences: it can be either OOM which we can't do anything
-            // about (and we need to keep going with the destruction of all fences), or device
-            // loss, which means the workload on the GPU is no longer accessible and we can
-            // safely destroy the fence.
-
-            fn.DestroyFence(mVkDevice, fence, nullptr);
-            mFencesInFlight.pop();
-        }
-        return {};
-    }
-
-    void Device::DestroyImpl() {
-        ASSERT(GetState() == State::Disconnected);
-
-        // We failed during initialization so early that we don't even have a VkDevice. There is
-        // nothing to do.
-        if (mVkDevice == VK_NULL_HANDLE) {
-            return;
-        }
-
-        // The deleter is the second thing we initialize. If it is not present, it means that
-        // only the VkDevice was created and nothing else. Destroy the device and do nothing else
-        // because the function pointers might not have been loaded (and there is nothing to
-        // destroy anyway).
-        if (mDeleter == nullptr) {
-            fn.DestroyDevice(mVkDevice, nullptr);
-            mVkDevice = VK_NULL_HANDLE;
-            return;
-        }
-
-        // Enough of the Device's initialization happened that we can now do regular robust
-        // deinitialization.
-
-        // Immediately tag the recording context as unused so we don't try to submit it in Tick.
-        mRecordingContext.used = false;
-        if (mRecordingContext.commandPool != VK_NULL_HANDLE) {
-            // The VkCommandBuffer memory should be wholly owned by the pool and freed when it is
-            // destroyed, but that's not the case in some drivers and the leak memory.
-            // So we call FreeCommandBuffers before DestroyCommandPool to be safe.
-            // TODO(enga): Only do this on a known list of bad drivers.
-            fn.FreeCommandBuffers(mVkDevice, mRecordingContext.commandPool, 1,
-                                  &mRecordingContext.commandBuffer);
-            fn.DestroyCommandPool(mVkDevice, mRecordingContext.commandPool, nullptr);
-        }
-
-        for (VkSemaphore semaphore : mRecordingContext.waitSemaphores) {
-            fn.DestroySemaphore(mVkDevice, semaphore, nullptr);
-        }
-        mRecordingContext.waitSemaphores.clear();
-
-        for (VkSemaphore semaphore : mRecordingContext.signalSemaphores) {
-            fn.DestroySemaphore(mVkDevice, semaphore, nullptr);
-        }
-        mRecordingContext.signalSemaphores.clear();
-
-        // Some commands might still be marked as in-flight if we shut down because of a device
-        // loss. Recycle them as unused so that we free them below.
-        RecycleCompletedCommands();
-        ASSERT(mCommandsInFlight.Empty());
-
-        for (const CommandPoolAndBuffer& commands : mUnusedCommands) {
-            // The VkCommandBuffer memory should be wholly owned by the pool and freed when it is
-            // destroyed, but that's not the case in some drivers and the leak memory.
-            // So we call FreeCommandBuffers before DestroyCommandPool to be safe.
-            // TODO(enga): Only do this on a known list of bad drivers.
-            fn.FreeCommandBuffers(mVkDevice, commands.pool, 1, &commands.commandBuffer);
-            fn.DestroyCommandPool(mVkDevice, commands.pool, nullptr);
-        }
-        mUnusedCommands.clear();
-
-        // Some fences might still be marked as in-flight if we shut down because of a device loss.
-        // Delete them since at this point all commands are complete.
-        while (!mFencesInFlight.empty()) {
-            fn.DestroyFence(mVkDevice, *mFencesInFlight.front().first, nullptr);
-            mFencesInFlight.pop();
-        }
-
-        for (VkFence fence : mUnusedFences) {
-            fn.DestroyFence(mVkDevice, fence, nullptr);
-        }
-        mUnusedFences.clear();
-
-        ExecutionSerial completedSerial = GetCompletedCommandSerial();
-        for (Ref<DescriptorSetAllocator>& allocator :
-             mDescriptorAllocatorsPendingDeallocation.IterateUpTo(completedSerial)) {
-            allocator->FinishDeallocation(completedSerial);
-        }
-
-        // Releasing the uploader enqueues buffers to be released.
-        // Call Tick() again to clear them before releasing the deleter.
-        mResourceMemoryAllocator->Tick(completedSerial);
-        mDeleter->Tick(completedSerial);
-        mDescriptorAllocatorsPendingDeallocation.ClearUpTo(completedSerial);
-
-        // Allow recycled memory to be deleted.
-        mResourceMemoryAllocator->DestroyPool();
-
-        // The VkRenderPasses in the cache can be destroyed immediately since all commands referring
-        // to them are guaranteed to be finished executing.
-        mRenderPassCache = nullptr;
-
-        // We need handle deleting all child objects by calling Tick() again with a large serial to
-        // force all operations to look as if they were completed, and delete all objects before
-        // destroying the Deleter and vkDevice.
-        ASSERT(mDeleter != nullptr);
-        mDeleter->Tick(kMaxExecutionSerial);
-        mDeleter = nullptr;
-
-        // VkQueues are destroyed when the VkDevice is destroyed
-        // The VkDevice is needed to destroy child objects, so it must be destroyed last after all
-        // child objects have been deleted.
-        ASSERT(mVkDevice != VK_NULL_HANDLE);
+    // The deleter is the second thing we initialize. If it is not present, it means that
+    // only the VkDevice was created and nothing else. Destroy the device and do nothing else
+    // because the function pointers might not have been loaded (and there is nothing to
+    // destroy anyway).
+    if (mDeleter == nullptr) {
         fn.DestroyDevice(mVkDevice, nullptr);
         mVkDevice = VK_NULL_HANDLE;
+        return;
     }
 
-    uint32_t Device::GetOptimalBytesPerRowAlignment() const {
-        return mDeviceInfo.properties.limits.optimalBufferCopyRowPitchAlignment;
+    // Enough of the Device's initialization happened that we can now do regular robust
+    // deinitialization.
+
+    // Immediately tag the recording context as unused so we don't try to submit it in Tick.
+    mRecordingContext.used = false;
+    if (mRecordingContext.commandPool != VK_NULL_HANDLE) {
+        // The VkCommandBuffer memory should be wholly owned by the pool and freed when it is
+        // destroyed, but that's not the case in some drivers and the leak memory.
+        // So we call FreeCommandBuffers before DestroyCommandPool to be safe.
+        // TODO(enga): Only do this on a known list of bad drivers.
+        fn.FreeCommandBuffers(mVkDevice, mRecordingContext.commandPool, 1,
+                              &mRecordingContext.commandBuffer);
+        fn.DestroyCommandPool(mVkDevice, mRecordingContext.commandPool, nullptr);
     }
 
-    uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
-        return mDeviceInfo.properties.limits.optimalBufferCopyOffsetAlignment;
+    for (VkSemaphore semaphore : mRecordingContext.waitSemaphores) {
+        fn.DestroySemaphore(mVkDevice, semaphore, nullptr);
+    }
+    mRecordingContext.waitSemaphores.clear();
+
+    for (VkSemaphore semaphore : mRecordingContext.signalSemaphores) {
+        fn.DestroySemaphore(mVkDevice, semaphore, nullptr);
+    }
+    mRecordingContext.signalSemaphores.clear();
+
+    // Some commands might still be marked as in-flight if we shut down because of a device
+    // loss. Recycle them as unused so that we free them below.
+    RecycleCompletedCommands();
+    ASSERT(mCommandsInFlight.Empty());
+
+    for (const CommandPoolAndBuffer& commands : mUnusedCommands) {
+        // The VkCommandBuffer memory should be wholly owned by the pool and freed when it is
+        // destroyed, but that's not the case in some drivers and the leak memory.
+        // So we call FreeCommandBuffers before DestroyCommandPool to be safe.
+        // TODO(enga): Only do this on a known list of bad drivers.
+        fn.FreeCommandBuffers(mVkDevice, commands.pool, 1, &commands.commandBuffer);
+        fn.DestroyCommandPool(mVkDevice, commands.pool, nullptr);
+    }
+    mUnusedCommands.clear();
+
+    // Some fences might still be marked as in-flight if we shut down because of a device loss.
+    // Delete them since at this point all commands are complete.
+    while (!mFencesInFlight.empty()) {
+        fn.DestroyFence(mVkDevice, *mFencesInFlight.front().first, nullptr);
+        mFencesInFlight.pop();
     }
 
-    float Device::GetTimestampPeriodInNS() const {
-        return mDeviceInfo.properties.limits.timestampPeriod;
+    for (VkFence fence : mUnusedFences) {
+        fn.DestroyFence(mVkDevice, fence, nullptr);
+    }
+    mUnusedFences.clear();
+
+    ExecutionSerial completedSerial = GetCompletedCommandSerial();
+    for (Ref<DescriptorSetAllocator>& allocator :
+         mDescriptorAllocatorsPendingDeallocation.IterateUpTo(completedSerial)) {
+        allocator->FinishDeallocation(completedSerial);
     }
 
-    void Device::SetLabelImpl() {
-        SetDebugName(this, VK_OBJECT_TYPE_DEVICE, mVkDevice, "Dawn_Device", GetLabel());
-    }
+    // Releasing the uploader enqueues buffers to be released.
+    // Call Tick() again to clear them before releasing the deleter.
+    mResourceMemoryAllocator->Tick(completedSerial);
+    mDeleter->Tick(completedSerial);
+    mDescriptorAllocatorsPendingDeallocation.ClearUpTo(completedSerial);
+
+    // Allow recycled memory to be deleted.
+    mResourceMemoryAllocator->DestroyPool();
+
+    // The VkRenderPasses in the cache can be destroyed immediately since all commands referring
+    // to them are guaranteed to be finished executing.
+    mRenderPassCache = nullptr;
+
+    // We need handle deleting all child objects by calling Tick() again with a large serial to
+    // force all operations to look as if they were completed, and delete all objects before
+    // destroying the Deleter and vkDevice.
+    ASSERT(mDeleter != nullptr);
+    mDeleter->Tick(kMaxExecutionSerial);
+    mDeleter = nullptr;
+
+    // VkQueues are destroyed when the VkDevice is destroyed
+    // The VkDevice is needed to destroy child objects, so it must be destroyed last after all
+    // child objects have been deleted.
+    ASSERT(mVkDevice != VK_NULL_HANDLE);
+    fn.DestroyDevice(mVkDevice, nullptr);
+    mVkDevice = VK_NULL_HANDLE;
+}
+
+uint32_t Device::GetOptimalBytesPerRowAlignment() const {
+    return mDeviceInfo.properties.limits.optimalBufferCopyRowPitchAlignment;
+}
+
+uint64_t Device::GetOptimalBufferToTextureCopyOffsetAlignment() const {
+    return mDeviceInfo.properties.limits.optimalBufferCopyOffsetAlignment;
+}
+
+float Device::GetTimestampPeriodInNS() const {
+    return mDeviceInfo.properties.limits.timestampPeriod;
+}
+
+void Device::SetLabelImpl() {
+    SetDebugName(this, VK_OBJECT_TYPE_DEVICE, mVkDevice, "Dawn_Device", GetLabel());
+}
 
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/DeviceVk.h b/src/dawn/native/vulkan/DeviceVk.h
index 0d1ec93..6c398c3 100644
--- a/src/dawn/native/vulkan/DeviceVk.h
+++ b/src/dawn/native/vulkan/DeviceVk.h
@@ -20,10 +20,10 @@
 #include <utility>
 #include <vector>
 
-#include "dawn/native/dawn_platform.h"
 #include "dawn/common/SerialQueue.h"
 #include "dawn/native/Commands.h"
 #include "dawn/native/Device.h"
+#include "dawn/native/dawn_platform.h"
 #include "dawn/native/vulkan/CommandRecordingContext.h"
 #include "dawn/native/vulkan/DescriptorSetAllocator.h"
 #include "dawn/native/vulkan/Forward.h"
@@ -35,182 +35,178 @@
 
 namespace dawn::native::vulkan {
 
-    class Adapter;
-    class BindGroupLayout;
-    class BufferUploader;
-    class FencedDeleter;
-    class RenderPassCache;
-    class ResourceMemoryAllocator;
+class Adapter;
+class BindGroupLayout;
+class BufferUploader;
+class FencedDeleter;
+class RenderPassCache;
+class ResourceMemoryAllocator;
 
-    class Device final : public DeviceBase {
-      public:
-        static ResultOrError<Ref<Device>> Create(Adapter* adapter,
-                                                 const DeviceDescriptor* descriptor);
-        ~Device() override;
+class Device final : public DeviceBase {
+  public:
+    static ResultOrError<Ref<Device>> Create(Adapter* adapter, const DeviceDescriptor* descriptor);
+    ~Device() override;
 
-        MaybeError Initialize(const DeviceDescriptor* descriptor);
+    MaybeError Initialize(const DeviceDescriptor* descriptor);
 
-        // Contains all the Vulkan entry points, vkDoFoo is called via device->fn.DoFoo.
-        const VulkanFunctions fn;
+    // Contains all the Vulkan entry points, vkDoFoo is called via device->fn.DoFoo.
+    const VulkanFunctions fn;
 
-        VkInstance GetVkInstance() const;
-        const VulkanDeviceInfo& GetDeviceInfo() const;
-        const VulkanGlobalInfo& GetGlobalInfo() const;
-        VkDevice GetVkDevice() const;
-        uint32_t GetGraphicsQueueFamily() const;
-        VkQueue GetQueue() const;
+    VkInstance GetVkInstance() const;
+    const VulkanDeviceInfo& GetDeviceInfo() const;
+    const VulkanGlobalInfo& GetGlobalInfo() const;
+    VkDevice GetVkDevice() const;
+    uint32_t GetGraphicsQueueFamily() const;
+    VkQueue GetQueue() const;
 
-        FencedDeleter* GetFencedDeleter() const;
-        RenderPassCache* GetRenderPassCache() const;
-        ResourceMemoryAllocator* GetResourceMemoryAllocator() const;
+    FencedDeleter* GetFencedDeleter() const;
+    RenderPassCache* GetRenderPassCache() const;
+    ResourceMemoryAllocator* GetResourceMemoryAllocator() const;
 
-        CommandRecordingContext* GetPendingRecordingContext();
-        MaybeError SubmitPendingCommands();
+    CommandRecordingContext* GetPendingRecordingContext();
+    MaybeError SubmitPendingCommands();
 
-        void EnqueueDeferredDeallocation(DescriptorSetAllocator* allocator);
+    void EnqueueDeferredDeallocation(DescriptorSetAllocator* allocator);
 
-        // Dawn Native API
+    // Dawn Native API
 
-        TextureBase* CreateTextureWrappingVulkanImage(
-            const ExternalImageDescriptorVk* descriptor,
-            ExternalMemoryHandle memoryHandle,
-            const std::vector<ExternalSemaphoreHandle>& waitHandles);
-        bool SignalAndExportExternalTexture(Texture* texture,
-                                            VkImageLayout desiredLayout,
-                                            ExternalImageExportInfoVk* info,
-                                            std::vector<ExternalSemaphoreHandle>* semaphoreHandle);
+    TextureBase* CreateTextureWrappingVulkanImage(
+        const ExternalImageDescriptorVk* descriptor,
+        ExternalMemoryHandle memoryHandle,
+        const std::vector<ExternalSemaphoreHandle>& waitHandles);
+    bool SignalAndExportExternalTexture(Texture* texture,
+                                        VkImageLayout desiredLayout,
+                                        ExternalImageExportInfoVk* info,
+                                        std::vector<ExternalSemaphoreHandle>* semaphoreHandle);
 
-        ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
-            CommandEncoder* encoder,
-            const CommandBufferDescriptor* descriptor) override;
+    ResultOrError<Ref<CommandBufferBase>> CreateCommandBuffer(
+        CommandEncoder* encoder,
+        const CommandBufferDescriptor* descriptor) override;
 
-        MaybeError TickImpl() override;
+    MaybeError TickImpl() override;
 
-        ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
-        MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
-                                           uint64_t sourceOffset,
-                                           BufferBase* destination,
-                                           uint64_t destinationOffset,
-                                           uint64_t size) override;
-        MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
-                                            const TextureDataLayout& src,
-                                            TextureCopy* dst,
-                                            const Extent3D& copySizePixels) override;
+    ResultOrError<std::unique_ptr<StagingBufferBase>> CreateStagingBuffer(size_t size) override;
+    MaybeError CopyFromStagingToBuffer(StagingBufferBase* source,
+                                       uint64_t sourceOffset,
+                                       BufferBase* destination,
+                                       uint64_t destinationOffset,
+                                       uint64_t size) override;
+    MaybeError CopyFromStagingToTexture(const StagingBufferBase* source,
+                                        const TextureDataLayout& src,
+                                        TextureCopy* dst,
+                                        const Extent3D& copySizePixels) override;
 
-        // Return the fixed subgroup size to use for compute shaders on this device or 0 if none
-        // needs to be set.
-        uint32_t GetComputeSubgroupSize() const;
+    // Return the fixed subgroup size to use for compute shaders on this device or 0 if none
+    // needs to be set.
+    uint32_t GetComputeSubgroupSize() const;
 
-        uint32_t GetOptimalBytesPerRowAlignment() const override;
-        uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
+    uint32_t GetOptimalBytesPerRowAlignment() const override;
+    uint64_t GetOptimalBufferToTextureCopyOffsetAlignment() const override;
 
-        float GetTimestampPeriodInNS() const override;
+    float GetTimestampPeriodInNS() const override;
 
-        void SetLabelImpl() override;
+    void SetLabelImpl() override;
 
-      private:
-        Device(Adapter* adapter, const DeviceDescriptor* descriptor);
+  private:
+    Device(Adapter* adapter, const DeviceDescriptor* descriptor);
 
-        ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
-            const BindGroupDescriptor* descriptor) override;
-        ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
-            const BindGroupLayoutDescriptor* descriptor,
-            PipelineCompatibilityToken pipelineCompatibilityToken) override;
-        ResultOrError<Ref<BufferBase>> CreateBufferImpl(
-            const BufferDescriptor* descriptor) override;
-        ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
-            const PipelineLayoutDescriptor* descriptor) override;
-        ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
-            const QuerySetDescriptor* descriptor) override;
-        ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(
-            const SamplerDescriptor* descriptor) override;
-        ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
-            const ShaderModuleDescriptor* descriptor,
-            ShaderModuleParseResult* parseResult) override;
-        ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
-            const SwapChainDescriptor* descriptor) override;
-        ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
-            Surface* surface,
-            NewSwapChainBase* previousSwapChain,
-            const SwapChainDescriptor* descriptor) override;
-        ResultOrError<Ref<TextureBase>> CreateTextureImpl(
-            const TextureDescriptor* descriptor) override;
-        ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
-            TextureBase* texture,
-            const TextureViewDescriptor* descriptor) override;
-        Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
-            const ComputePipelineDescriptor* descriptor) override;
-        Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
-            const RenderPipelineDescriptor* descriptor) override;
-        void InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
-                                                WGPUCreateComputePipelineAsyncCallback callback,
-                                                void* userdata) override;
-        void InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
-                                               WGPUCreateRenderPipelineAsyncCallback callback,
-                                               void* userdata) override;
+    ResultOrError<Ref<BindGroupBase>> CreateBindGroupImpl(
+        const BindGroupDescriptor* descriptor) override;
+    ResultOrError<Ref<BindGroupLayoutBase>> CreateBindGroupLayoutImpl(
+        const BindGroupLayoutDescriptor* descriptor,
+        PipelineCompatibilityToken pipelineCompatibilityToken) override;
+    ResultOrError<Ref<BufferBase>> CreateBufferImpl(const BufferDescriptor* descriptor) override;
+    ResultOrError<Ref<PipelineLayoutBase>> CreatePipelineLayoutImpl(
+        const PipelineLayoutDescriptor* descriptor) override;
+    ResultOrError<Ref<QuerySetBase>> CreateQuerySetImpl(
+        const QuerySetDescriptor* descriptor) override;
+    ResultOrError<Ref<SamplerBase>> CreateSamplerImpl(const SamplerDescriptor* descriptor) override;
+    ResultOrError<Ref<ShaderModuleBase>> CreateShaderModuleImpl(
+        const ShaderModuleDescriptor* descriptor,
+        ShaderModuleParseResult* parseResult) override;
+    ResultOrError<Ref<SwapChainBase>> CreateSwapChainImpl(
+        const SwapChainDescriptor* descriptor) override;
+    ResultOrError<Ref<NewSwapChainBase>> CreateSwapChainImpl(
+        Surface* surface,
+        NewSwapChainBase* previousSwapChain,
+        const SwapChainDescriptor* descriptor) override;
+    ResultOrError<Ref<TextureBase>> CreateTextureImpl(const TextureDescriptor* descriptor) override;
+    ResultOrError<Ref<TextureViewBase>> CreateTextureViewImpl(
+        TextureBase* texture,
+        const TextureViewDescriptor* descriptor) override;
+    Ref<ComputePipelineBase> CreateUninitializedComputePipelineImpl(
+        const ComputePipelineDescriptor* descriptor) override;
+    Ref<RenderPipelineBase> CreateUninitializedRenderPipelineImpl(
+        const RenderPipelineDescriptor* descriptor) override;
+    void InitializeComputePipelineAsyncImpl(Ref<ComputePipelineBase> computePipeline,
+                                            WGPUCreateComputePipelineAsyncCallback callback,
+                                            void* userdata) override;
+    void InitializeRenderPipelineAsyncImpl(Ref<RenderPipelineBase> renderPipeline,
+                                           WGPUCreateRenderPipelineAsyncCallback callback,
+                                           void* userdata) override;
 
-        ResultOrError<VulkanDeviceKnobs> CreateDevice(VkPhysicalDevice physicalDevice);
-        void GatherQueueFromDevice();
+    ResultOrError<VulkanDeviceKnobs> CreateDevice(VkPhysicalDevice physicalDevice);
+    void GatherQueueFromDevice();
 
-        uint32_t FindComputeSubgroupSize() const;
-        void InitTogglesFromDriver();
-        void ApplyDepthStencilFormatToggles();
-        void ApplyUseZeroInitializeWorkgroupMemoryExtensionToggle();
+    uint32_t FindComputeSubgroupSize() const;
+    void InitTogglesFromDriver();
+    void ApplyDepthStencilFormatToggles();
+    void ApplyUseZeroInitializeWorkgroupMemoryExtensionToggle();
 
-        void DestroyImpl() override;
-        MaybeError WaitForIdleForDestruction() override;
+    void DestroyImpl() override;
+    MaybeError WaitForIdleForDestruction() override;
 
-        // To make it easier to use fn it is a public const member. However
-        // the Device is allowed to mutate them through these private methods.
-        VulkanFunctions* GetMutableFunctions();
+    // To make it easier to use fn it is a public const member. However
+    // the Device is allowed to mutate them through these private methods.
+    VulkanFunctions* GetMutableFunctions();
 
-        VulkanDeviceInfo mDeviceInfo = {};
-        VkDevice mVkDevice = VK_NULL_HANDLE;
-        uint32_t mQueueFamily = 0;
-        VkQueue mQueue = VK_NULL_HANDLE;
-        uint32_t mComputeSubgroupSize = 0;
+    VulkanDeviceInfo mDeviceInfo = {};
+    VkDevice mVkDevice = VK_NULL_HANDLE;
+    uint32_t mQueueFamily = 0;
+    VkQueue mQueue = VK_NULL_HANDLE;
+    uint32_t mComputeSubgroupSize = 0;
 
-        SerialQueue<ExecutionSerial, Ref<DescriptorSetAllocator>>
-            mDescriptorAllocatorsPendingDeallocation;
-        std::unique_ptr<FencedDeleter> mDeleter;
-        std::unique_ptr<ResourceMemoryAllocator> mResourceMemoryAllocator;
-        std::unique_ptr<RenderPassCache> mRenderPassCache;
+    SerialQueue<ExecutionSerial, Ref<DescriptorSetAllocator>>
+        mDescriptorAllocatorsPendingDeallocation;
+    std::unique_ptr<FencedDeleter> mDeleter;
+    std::unique_ptr<ResourceMemoryAllocator> mResourceMemoryAllocator;
+    std::unique_ptr<RenderPassCache> mRenderPassCache;
 
-        std::unique_ptr<external_memory::Service> mExternalMemoryService;
-        std::unique_ptr<external_semaphore::Service> mExternalSemaphoreService;
+    std::unique_ptr<external_memory::Service> mExternalMemoryService;
+    std::unique_ptr<external_semaphore::Service> mExternalSemaphoreService;
 
-        ResultOrError<VkFence> GetUnusedFence();
-        ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
+    ResultOrError<VkFence> GetUnusedFence();
+    ResultOrError<ExecutionSerial> CheckAndUpdateCompletedSerials() override;
 
-        // We track which operations are in flight on the GPU with an increasing serial.
-        // This works only because we have a single queue. Each submit to a queue is associated
-        // to a serial and a fence, such that when the fence is "ready" we know the operations
-        // have finished.
-        std::queue<std::pair<VkFence, ExecutionSerial>> mFencesInFlight;
-        // Fences in the unused list aren't reset yet.
-        std::vector<VkFence> mUnusedFences;
+    // We track which operations are in flight on the GPU with an increasing serial.
+    // This works only because we have a single queue. Each submit to a queue is associated
+    // to a serial and a fence, such that when the fence is "ready" we know the operations
+    // have finished.
+    std::queue<std::pair<VkFence, ExecutionSerial>> mFencesInFlight;
+    // Fences in the unused list aren't reset yet.
+    std::vector<VkFence> mUnusedFences;
 
-        MaybeError PrepareRecordingContext();
-        void RecycleCompletedCommands();
+    MaybeError PrepareRecordingContext();
+    void RecycleCompletedCommands();
 
-        struct CommandPoolAndBuffer {
-            VkCommandPool pool = VK_NULL_HANDLE;
-            VkCommandBuffer commandBuffer = VK_NULL_HANDLE;
-        };
-        SerialQueue<ExecutionSerial, CommandPoolAndBuffer> mCommandsInFlight;
-        // Command pools in the unused list haven't been reset yet.
-        std::vector<CommandPoolAndBuffer> mUnusedCommands;
-        // There is always a valid recording context stored in mRecordingContext
-        CommandRecordingContext mRecordingContext;
-
-        MaybeError ImportExternalImage(const ExternalImageDescriptorVk* descriptor,
-                                       ExternalMemoryHandle memoryHandle,
-                                       VkImage image,
-                                       const std::vector<ExternalSemaphoreHandle>& waitHandles,
-                                       VkSemaphore* outSignalSemaphore,
-                                       VkDeviceMemory* outAllocation,
-                                       std::vector<VkSemaphore>* outWaitSemaphores);
+    struct CommandPoolAndBuffer {
+        VkCommandPool pool = VK_NULL_HANDLE;
+        VkCommandBuffer commandBuffer = VK_NULL_HANDLE;
     };
+    SerialQueue<ExecutionSerial, CommandPoolAndBuffer> mCommandsInFlight;
+    // Command pools in the unused list haven't been reset yet.
+    std::vector<CommandPoolAndBuffer> mUnusedCommands;
+    // There is always a valid recording context stored in mRecordingContext
+    CommandRecordingContext mRecordingContext;
+
+    MaybeError ImportExternalImage(const ExternalImageDescriptorVk* descriptor,
+                                   ExternalMemoryHandle memoryHandle,
+                                   VkImage image,
+                                   const std::vector<ExternalSemaphoreHandle>& waitHandles,
+                                   VkSemaphore* outSignalSemaphore,
+                                   VkDeviceMemory* outAllocation,
+                                   std::vector<VkSemaphore>* outWaitSemaphores);
+};
 
 }  // namespace dawn::native::vulkan
 
diff --git a/src/dawn/native/vulkan/ExternalHandle.h b/src/dawn/native/vulkan/ExternalHandle.h
index d5a607e..309262d 100644
--- a/src/dawn/native/vulkan/ExternalHandle.h
+++ b/src/dawn/native/vulkan/ExternalHandle.h
@@ -20,19 +20,19 @@
 namespace dawn::native::vulkan {
 
 #if DAWN_PLATFORM_LINUX
-    // File descriptor
-    using ExternalMemoryHandle = int;
-    // File descriptor
-    using ExternalSemaphoreHandle = int;
+// File descriptor
+using ExternalMemoryHandle = int;
+// File descriptor
+using ExternalSemaphoreHandle = int;
 #elif DAWN_PLATFORM_FUCHSIA
-    // Really a Zircon vmo handle.
-    using ExternalMemoryHandle = zx_handle_t;
-    // Really a Zircon event handle.
-    using ExternalSemaphoreHandle = zx_handle_t;
+// Really a Zircon vmo handle.
+using ExternalMemoryHandle = zx_handle_t;
+// Really a Zircon event handle.
+using ExternalSemaphoreHandle = zx_handle_t;
 #else
-    // Generic types so that the Null service can compile, not used for real handles
-    using ExternalMemoryHandle = void*;
-    using ExternalSemaphoreHandle = void*;
+// Generic types so that the Null service can compile, not used for real handles
+using ExternalMemoryHandle = void*;
+using ExternalSemaphoreHandle = void*;
 #endif
 
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/FencedDeleter.cpp b/src/dawn/native/vulkan/FencedDeleter.cpp
index 09c91b4..167e588 100644
--- a/src/dawn/native/vulkan/FencedDeleter.cpp
+++ b/src/dawn/native/vulkan/FencedDeleter.cpp
@@ -18,166 +18,165 @@
 
 namespace dawn::native::vulkan {
 
-    FencedDeleter::FencedDeleter(Device* device) : mDevice(device) {
+FencedDeleter::FencedDeleter(Device* device) : mDevice(device) {}
+
+FencedDeleter::~FencedDeleter() {
+    ASSERT(mBuffersToDelete.Empty());
+    ASSERT(mDescriptorPoolsToDelete.Empty());
+    ASSERT(mFramebuffersToDelete.Empty());
+    ASSERT(mImagesToDelete.Empty());
+    ASSERT(mImageViewsToDelete.Empty());
+    ASSERT(mMemoriesToDelete.Empty());
+    ASSERT(mPipelinesToDelete.Empty());
+    ASSERT(mPipelineLayoutsToDelete.Empty());
+    ASSERT(mQueryPoolsToDelete.Empty());
+    ASSERT(mRenderPassesToDelete.Empty());
+    ASSERT(mSamplersToDelete.Empty());
+    ASSERT(mSemaphoresToDelete.Empty());
+    ASSERT(mShaderModulesToDelete.Empty());
+    ASSERT(mSurfacesToDelete.Empty());
+    ASSERT(mSwapChainsToDelete.Empty());
+}
+
+void FencedDeleter::DeleteWhenUnused(VkBuffer buffer) {
+    mBuffersToDelete.Enqueue(buffer, mDevice->GetPendingCommandSerial());
+}
+
+void FencedDeleter::DeleteWhenUnused(VkDescriptorPool pool) {
+    mDescriptorPoolsToDelete.Enqueue(pool, mDevice->GetPendingCommandSerial());
+}
+
+void FencedDeleter::DeleteWhenUnused(VkDeviceMemory memory) {
+    mMemoriesToDelete.Enqueue(memory, mDevice->GetPendingCommandSerial());
+}
+
+void FencedDeleter::DeleteWhenUnused(VkFramebuffer framebuffer) {
+    mFramebuffersToDelete.Enqueue(framebuffer, mDevice->GetPendingCommandSerial());
+}
+
+void FencedDeleter::DeleteWhenUnused(VkImage image) {
+    mImagesToDelete.Enqueue(image, mDevice->GetPendingCommandSerial());
+}
+
+void FencedDeleter::DeleteWhenUnused(VkImageView view) {
+    mImageViewsToDelete.Enqueue(view, mDevice->GetPendingCommandSerial());
+}
+
+void FencedDeleter::DeleteWhenUnused(VkPipeline pipeline) {
+    mPipelinesToDelete.Enqueue(pipeline, mDevice->GetPendingCommandSerial());
+}
+
+void FencedDeleter::DeleteWhenUnused(VkPipelineLayout layout) {
+    mPipelineLayoutsToDelete.Enqueue(layout, mDevice->GetPendingCommandSerial());
+}
+
+void FencedDeleter::DeleteWhenUnused(VkQueryPool querypool) {
+    mQueryPoolsToDelete.Enqueue(querypool, mDevice->GetPendingCommandSerial());
+}
+
+void FencedDeleter::DeleteWhenUnused(VkRenderPass renderPass) {
+    mRenderPassesToDelete.Enqueue(renderPass, mDevice->GetPendingCommandSerial());
+}
+
+void FencedDeleter::DeleteWhenUnused(VkSampler sampler) {
+    mSamplersToDelete.Enqueue(sampler, mDevice->GetPendingCommandSerial());
+}
+
+void FencedDeleter::DeleteWhenUnused(VkSemaphore semaphore) {
+    mSemaphoresToDelete.Enqueue(semaphore, mDevice->GetPendingCommandSerial());
+}
+
+void FencedDeleter::DeleteWhenUnused(VkShaderModule module) {
+    mShaderModulesToDelete.Enqueue(module, mDevice->GetPendingCommandSerial());
+}
+
+void FencedDeleter::DeleteWhenUnused(VkSurfaceKHR surface) {
+    mSurfacesToDelete.Enqueue(surface, mDevice->GetPendingCommandSerial());
+}
+
+void FencedDeleter::DeleteWhenUnused(VkSwapchainKHR swapChain) {
+    mSwapChainsToDelete.Enqueue(swapChain, mDevice->GetPendingCommandSerial());
+}
+
+void FencedDeleter::Tick(ExecutionSerial completedSerial) {
+    VkDevice vkDevice = mDevice->GetVkDevice();
+    VkInstance instance = mDevice->GetVkInstance();
+
+    // Buffers and images must be deleted before memories because it is invalid to free memory
+    // that still have resources bound to it.
+    for (VkBuffer buffer : mBuffersToDelete.IterateUpTo(completedSerial)) {
+        mDevice->fn.DestroyBuffer(vkDevice, buffer, nullptr);
     }
-
-    FencedDeleter::~FencedDeleter() {
-        ASSERT(mBuffersToDelete.Empty());
-        ASSERT(mDescriptorPoolsToDelete.Empty());
-        ASSERT(mFramebuffersToDelete.Empty());
-        ASSERT(mImagesToDelete.Empty());
-        ASSERT(mImageViewsToDelete.Empty());
-        ASSERT(mMemoriesToDelete.Empty());
-        ASSERT(mPipelinesToDelete.Empty());
-        ASSERT(mPipelineLayoutsToDelete.Empty());
-        ASSERT(mQueryPoolsToDelete.Empty());
-        ASSERT(mRenderPassesToDelete.Empty());
-        ASSERT(mSamplersToDelete.Empty());
-        ASSERT(mSemaphoresToDelete.Empty());
-        ASSERT(mShaderModulesToDelete.Empty());
-        ASSERT(mSurfacesToDelete.Empty());
-        ASSERT(mSwapChainsToDelete.Empty());
+    mBuffersToDelete.ClearUpTo(completedSerial);
+    for (VkImage image : mImagesToDelete.IterateUpTo(completedSerial)) {
+        mDevice->fn.DestroyImage(vkDevice, image, nullptr);
     }
+    mImagesToDelete.ClearUpTo(completedSerial);
 
-    void FencedDeleter::DeleteWhenUnused(VkBuffer buffer) {
-        mBuffersToDelete.Enqueue(buffer, mDevice->GetPendingCommandSerial());
+    for (VkDeviceMemory memory : mMemoriesToDelete.IterateUpTo(completedSerial)) {
+        mDevice->fn.FreeMemory(vkDevice, memory, nullptr);
     }
+    mMemoriesToDelete.ClearUpTo(completedSerial);
 
-    void FencedDeleter::DeleteWhenUnused(VkDescriptorPool pool) {
-        mDescriptorPoolsToDelete.Enqueue(pool, mDevice->GetPendingCommandSerial());
+    for (VkPipelineLayout layout : mPipelineLayoutsToDelete.IterateUpTo(completedSerial)) {
+        mDevice->fn.DestroyPipelineLayout(vkDevice, layout, nullptr);
     }
+    mPipelineLayoutsToDelete.ClearUpTo(completedSerial);
 
-    void FencedDeleter::DeleteWhenUnused(VkDeviceMemory memory) {
-        mMemoriesToDelete.Enqueue(memory, mDevice->GetPendingCommandSerial());
+    for (VkRenderPass renderPass : mRenderPassesToDelete.IterateUpTo(completedSerial)) {
+        mDevice->fn.DestroyRenderPass(vkDevice, renderPass, nullptr);
     }
+    mRenderPassesToDelete.ClearUpTo(completedSerial);
 
-    void FencedDeleter::DeleteWhenUnused(VkFramebuffer framebuffer) {
-        mFramebuffersToDelete.Enqueue(framebuffer, mDevice->GetPendingCommandSerial());
+    for (VkFramebuffer framebuffer : mFramebuffersToDelete.IterateUpTo(completedSerial)) {
+        mDevice->fn.DestroyFramebuffer(vkDevice, framebuffer, nullptr);
     }
+    mFramebuffersToDelete.ClearUpTo(completedSerial);
 
-    void FencedDeleter::DeleteWhenUnused(VkImage image) {
-        mImagesToDelete.Enqueue(image, mDevice->GetPendingCommandSerial());
+    for (VkImageView view : mImageViewsToDelete.IterateUpTo(completedSerial)) {
+        mDevice->fn.DestroyImageView(vkDevice, view, nullptr);
     }
+    mImageViewsToDelete.ClearUpTo(completedSerial);
 
-    void FencedDeleter::DeleteWhenUnused(VkImageView view) {
-        mImageViewsToDelete.Enqueue(view, mDevice->GetPendingCommandSerial());
+    for (VkShaderModule module : mShaderModulesToDelete.IterateUpTo(completedSerial)) {
+        mDevice->fn.DestroyShaderModule(vkDevice, module, nullptr);
     }
+    mShaderModulesToDelete.ClearUpTo(completedSerial);
 
-    void FencedDeleter::DeleteWhenUnused(VkPipeline pipeline) {
-        mPipelinesToDelete.Enqueue(pipeline, mDevice->GetPendingCommandSerial());
+    for (VkPipeline pipeline : mPipelinesToDelete.IterateUpTo(completedSerial)) {
+        mDevice->fn.DestroyPipeline(vkDevice, pipeline, nullptr);
     }
+    mPipelinesToDelete.ClearUpTo(completedSerial);
 
-    void FencedDeleter::DeleteWhenUnused(VkPipelineLayout layout) {
-        mPipelineLayoutsToDelete.Enqueue(layout, mDevice->GetPendingCommandSerial());
+    // Vulkan swapchains must be destroyed before their corresponding VkSurface
+    for (VkSwapchainKHR swapChain : mSwapChainsToDelete.IterateUpTo(completedSerial)) {
+        mDevice->fn.DestroySwapchainKHR(vkDevice, swapChain, nullptr);
     }
-
-    void FencedDeleter::DeleteWhenUnused(VkQueryPool querypool) {
-        mQueryPoolsToDelete.Enqueue(querypool, mDevice->GetPendingCommandSerial());
+    mSwapChainsToDelete.ClearUpTo(completedSerial);
+    for (VkSurfaceKHR surface : mSurfacesToDelete.IterateUpTo(completedSerial)) {
+        mDevice->fn.DestroySurfaceKHR(instance, surface, nullptr);
     }
+    mSurfacesToDelete.ClearUpTo(completedSerial);
 
-    void FencedDeleter::DeleteWhenUnused(VkRenderPass renderPass) {
-        mRenderPassesToDelete.Enqueue(renderPass, mDevice->GetPendingCommandSerial());
+    for (VkSemaphore semaphore : mSemaphoresToDelete.IterateUpTo(completedSerial)) {
+        mDevice->fn.DestroySemaphore(vkDevice, semaphore, nullptr);
     }
+    mSemaphoresToDelete.ClearUpTo(completedSerial);
 
-    void FencedDeleter::DeleteWhenUnused(VkSampler sampler) {
-        mSamplersToDelete.Enqueue(sampler, mDevice->GetPendingCommandSerial());
+    for (VkDescriptorPool pool : mDescriptorPoolsToDelete.IterateUpTo(completedSerial)) {
+        mDevice->fn.DestroyDescriptorPool(vkDevice, pool, nullptr);
     }
+    mDescriptorPoolsToDelete.ClearUpTo(completedSerial);
 
-    void FencedDeleter::DeleteWhenUnused(VkSemaphore semaphore) {
-        mSemaphoresToDelete.Enqueue(semaphore, mDevice->GetPendingCommandSerial());
+    for (VkQueryPool pool : mQueryPoolsToDelete.IterateUpTo(completedSerial)) {
+        mDevice->fn.DestroyQueryPool(vkDevice, pool, nullptr);
     }
+    mQueryPoolsToDelete.ClearUpTo(completedSerial);
 
-    void FencedDeleter::DeleteWhenUnused(VkShaderModule module) {
-        mShaderModulesToDelete.Enqueue(module, mDevice->GetPendingCommandSerial());
+    for (VkSampler sampler : mSamplersToDelete.IterateUpTo(completedSerial)) {
+        mDevice->fn.DestroySampler(vkDevice, sampler, nullptr);
     }
-
-    void FencedDeleter::DeleteWhenUnused(VkSurfaceKHR surface) {
-        mSurfacesToDelete.Enqueue(surface, mDevice->GetPendingCommandSerial());
-    }
-
-    void FencedDeleter::DeleteWhenUnused(VkSwapchainKHR swapChain) {
-        mSwapChainsToDelete.Enqueue(swapChain, mDevice->GetPendingCommandSerial());
-    }
-
-    void FencedDeleter::Tick(ExecutionSerial completedSerial) {
-        VkDevice vkDevice = mDevice->GetVkDevice();
-        VkInstance instance = mDevice->GetVkInstance();
-
-        // Buffers and images must be deleted before memories because it is invalid to free memory
-        // that still have resources bound to it.
-        for (VkBuffer buffer : mBuffersToDelete.IterateUpTo(completedSerial)) {
-            mDevice->fn.DestroyBuffer(vkDevice, buffer, nullptr);
-        }
-        mBuffersToDelete.ClearUpTo(completedSerial);
-        for (VkImage image : mImagesToDelete.IterateUpTo(completedSerial)) {
-            mDevice->fn.DestroyImage(vkDevice, image, nullptr);
-        }
-        mImagesToDelete.ClearUpTo(completedSerial);
-
-        for (VkDeviceMemory memory : mMemoriesToDelete.IterateUpTo(completedSerial)) {
-            mDevice->fn.FreeMemory(vkDevice, memory, nullptr);
-        }
-        mMemoriesToDelete.ClearUpTo(completedSerial);
-
-        for (VkPipelineLayout layout : mPipelineLayoutsToDelete.IterateUpTo(completedSerial)) {
-            mDevice->fn.DestroyPipelineLayout(vkDevice, layout, nullptr);
-        }
-        mPipelineLayoutsToDelete.ClearUpTo(completedSerial);
-
-        for (VkRenderPass renderPass : mRenderPassesToDelete.IterateUpTo(completedSerial)) {
-            mDevice->fn.DestroyRenderPass(vkDevice, renderPass, nullptr);
-        }
-        mRenderPassesToDelete.ClearUpTo(completedSerial);
-
-        for (VkFramebuffer framebuffer : mFramebuffersToDelete.IterateUpTo(completedSerial)) {
-            mDevice->fn.DestroyFramebuffer(vkDevice, framebuffer, nullptr);
-        }
-        mFramebuffersToDelete.ClearUpTo(completedSerial);
-
-        for (VkImageView view : mImageViewsToDelete.IterateUpTo(completedSerial)) {
-            mDevice->fn.DestroyImageView(vkDevice, view, nullptr);
-        }
-        mImageViewsToDelete.ClearUpTo(completedSerial);
-
-        for (VkShaderModule module : mShaderModulesToDelete.IterateUpTo(completedSerial)) {
-            mDevice->fn.DestroyShaderModule(vkDevice, module, nullptr);
-        }
-        mShaderModulesToDelete.ClearUpTo(completedSerial);
-
-        for (VkPipeline pipeline : mPipelinesToDelete.IterateUpTo(completedSerial)) {
-            mDevice->fn.DestroyPipeline(vkDevice, pipeline, nullptr);
-        }
-        mPipelinesToDelete.ClearUpTo(completedSerial);
-
-        // Vulkan swapchains must be destroyed before their corresponding VkSurface
-        for (VkSwapchainKHR swapChain : mSwapChainsToDelete.IterateUpTo(completedSerial)) {
-            mDevice->fn.DestroySwapchainKHR(vkDevice, swapChain, nullptr);
-        }
-        mSwapChainsToDelete.ClearUpTo(completedSerial);
-        for (VkSurfaceKHR surface : mSurfacesToDelete.IterateUpTo(completedSerial)) {
-            mDevice->fn.DestroySurfaceKHR(instance, surface, nullptr);
-        }
-        mSurfacesToDelete.ClearUpTo(completedSerial);
-
-        for (VkSemaphore semaphore : mSemaphoresToDelete.IterateUpTo(completedSerial)) {
-            mDevice->fn.DestroySemaphore(vkDevice, semaphore, nullptr);
-        }
-        mSemaphoresToDelete.ClearUpTo(completedSerial);
-
-        for (VkDescriptorPool pool : mDescriptorPoolsToDelete.IterateUpTo(completedSerial)) {
-            mDevice->fn.DestroyDescriptorPool(vkDevice, pool, nullptr);
-        }
-        mDescriptorPoolsToDelete.ClearUpTo(completedSerial);
-
-        for (VkQueryPool pool : mQueryPoolsToDelete.IterateUpTo(completedSerial)) {
-            mDevice->fn.DestroyQueryPool(vkDevice, pool, nullptr);
-        }
-        mQueryPoolsToDelete.ClearUpTo(completedSerial);
-
-        for (VkSampler sampler : mSamplersToDelete.IterateUpTo(completedSerial)) {
-            mDevice->fn.DestroySampler(vkDevice, sampler, nullptr);
-        }
-        mSamplersToDelete.ClearUpTo(completedSerial);
-    }
+    mSamplersToDelete.ClearUpTo(completedSerial);
+}
 
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/FencedDeleter.h b/src/dawn/native/vulkan/FencedDeleter.h
index aefea7e..4c90615 100644
--- a/src/dawn/native/vulkan/FencedDeleter.h
+++ b/src/dawn/native/vulkan/FencedDeleter.h
@@ -21,49 +21,49 @@
 
 namespace dawn::native::vulkan {
 
-    class Device;
+class Device;
 
-    class FencedDeleter {
-      public:
-        explicit FencedDeleter(Device* device);
-        ~FencedDeleter();
+class FencedDeleter {
+  public:
+    explicit FencedDeleter(Device* device);
+    ~FencedDeleter();
 
-        void DeleteWhenUnused(VkBuffer buffer);
-        void DeleteWhenUnused(VkDescriptorPool pool);
-        void DeleteWhenUnused(VkDeviceMemory memory);
-        void DeleteWhenUnused(VkFramebuffer framebuffer);
-        void DeleteWhenUnused(VkImage image);
-        void DeleteWhenUnused(VkImageView view);
-        void DeleteWhenUnused(VkPipelineLayout layout);
-        void DeleteWhenUnused(VkRenderPass renderPass);
-        void DeleteWhenUnused(VkPipeline pipeline);
-        void DeleteWhenUnused(VkQueryPool querypool);
-        void DeleteWhenUnused(VkSampler sampler);
-        void DeleteWhenUnused(VkSemaphore semaphore);
-        void DeleteWhenUnused(VkShaderModule module);
-        void DeleteWhenUnused(VkSurfaceKHR surface);
-        void DeleteWhenUnused(VkSwapchainKHR swapChain);
+    void DeleteWhenUnused(VkBuffer buffer);
+    void DeleteWhenUnused(VkDescriptorPool pool);
+    void DeleteWhenUnused(VkDeviceMemory memory);
+    void DeleteWhenUnused(VkFramebuffer framebuffer);
+    void DeleteWhenUnused(VkImage image);
+    void DeleteWhenUnused(VkImageView view);
+    void DeleteWhenUnused(VkPipelineLayout layout);
+    void DeleteWhenUnused(VkRenderPass renderPass);
+    void DeleteWhenUnused(VkPipeline pipeline);
+    void DeleteWhenUnused(VkQueryPool querypool);
+    void DeleteWhenUnused(VkSampler sampler);
+    void DeleteWhenUnused(VkSemaphore semaphore);
+    void DeleteWhenUnused(VkShaderModule module);
+    void DeleteWhenUnused(VkSurfaceKHR surface);
+    void DeleteWhenUnused(VkSwapchainKHR swapChain);
 
-        void Tick(ExecutionSerial completedSerial);
+    void Tick(ExecutionSerial completedSerial);
 
-      private:
-        Device* mDevice = nullptr;
-        SerialQueue<ExecutionSerial, VkBuffer> mBuffersToDelete;
-        SerialQueue<ExecutionSerial, VkDescriptorPool> mDescriptorPoolsToDelete;
-        SerialQueue<ExecutionSerial, VkDeviceMemory> mMemoriesToDelete;
-        SerialQueue<ExecutionSerial, VkFramebuffer> mFramebuffersToDelete;
-        SerialQueue<ExecutionSerial, VkImage> mImagesToDelete;
-        SerialQueue<ExecutionSerial, VkImageView> mImageViewsToDelete;
-        SerialQueue<ExecutionSerial, VkPipeline> mPipelinesToDelete;
-        SerialQueue<ExecutionSerial, VkPipelineLayout> mPipelineLayoutsToDelete;
-        SerialQueue<ExecutionSerial, VkQueryPool> mQueryPoolsToDelete;
-        SerialQueue<ExecutionSerial, VkRenderPass> mRenderPassesToDelete;
-        SerialQueue<ExecutionSerial, VkSampler> mSamplersToDelete;
-        SerialQueue<ExecutionSerial, VkSemaphore> mSemaphoresToDelete;
-        SerialQueue<ExecutionSerial, VkShaderModule> mShaderModulesToDelete;
-        SerialQueue<ExecutionSerial, VkSurfaceKHR> mSurfacesToDelete;
-        SerialQueue<ExecutionSerial, VkSwapchainKHR> mSwapChainsToDelete;
-    };
+  private:
+    Device* mDevice = nullptr;
+    SerialQueue<ExecutionSerial, VkBuffer> mBuffersToDelete;
+    SerialQueue<ExecutionSerial, VkDescriptorPool> mDescriptorPoolsToDelete;
+    SerialQueue<ExecutionSerial, VkDeviceMemory> mMemoriesToDelete;
+    SerialQueue<ExecutionSerial, VkFramebuffer> mFramebuffersToDelete;
+    SerialQueue<ExecutionSerial, VkImage> mImagesToDelete;
+    SerialQueue<ExecutionSerial, VkImageView> mImageViewsToDelete;
+    SerialQueue<ExecutionSerial, VkPipeline> mPipelinesToDelete;
+    SerialQueue<ExecutionSerial, VkPipelineLayout> mPipelineLayoutsToDelete;
+    SerialQueue<ExecutionSerial, VkQueryPool> mQueryPoolsToDelete;
+    SerialQueue<ExecutionSerial, VkRenderPass> mRenderPassesToDelete;
+    SerialQueue<ExecutionSerial, VkSampler> mSamplersToDelete;
+    SerialQueue<ExecutionSerial, VkSemaphore> mSemaphoresToDelete;
+    SerialQueue<ExecutionSerial, VkShaderModule> mShaderModulesToDelete;
+    SerialQueue<ExecutionSerial, VkSurfaceKHR> mSurfacesToDelete;
+    SerialQueue<ExecutionSerial, VkSwapchainKHR> mSwapChainsToDelete;
+};
 
 }  // namespace dawn::native::vulkan
 
diff --git a/src/dawn/native/vulkan/Forward.h b/src/dawn/native/vulkan/Forward.h
index e6ac30b..49c0c32 100644
--- a/src/dawn/native/vulkan/Forward.h
+++ b/src/dawn/native/vulkan/Forward.h
@@ -19,50 +19,50 @@
 
 namespace dawn::native::vulkan {
 
-    class Adapter;
-    class BindGroup;
-    class BindGroupLayout;
-    class Buffer;
-    class CommandBuffer;
-    class ComputePipeline;
-    class Device;
-    class PipelineLayout;
-    class QuerySet;
-    class Queue;
-    class RenderPipeline;
-    class ResourceHeap;
-    class Sampler;
-    class ShaderModule;
-    class StagingBuffer;
-    class SwapChain;
-    class Texture;
-    class TextureView;
+class Adapter;
+class BindGroup;
+class BindGroupLayout;
+class Buffer;
+class CommandBuffer;
+class ComputePipeline;
+class Device;
+class PipelineLayout;
+class QuerySet;
+class Queue;
+class RenderPipeline;
+class ResourceHeap;
+class Sampler;
+class ShaderModule;
+class StagingBuffer;
+class SwapChain;
+class Texture;
+class TextureView;
 
-    struct VulkanBackendTraits {
-        using AdapterType = Adapter;
-        using BindGroupType = BindGroup;
-        using BindGroupLayoutType = BindGroupLayout;
-        using BufferType = Buffer;
-        using CommandBufferType = CommandBuffer;
-        using ComputePipelineType = ComputePipeline;
-        using DeviceType = Device;
-        using PipelineLayoutType = PipelineLayout;
-        using QuerySetType = QuerySet;
-        using QueueType = Queue;
-        using RenderPipelineType = RenderPipeline;
-        using ResourceHeapType = ResourceHeap;
-        using SamplerType = Sampler;
-        using ShaderModuleType = ShaderModule;
-        using StagingBufferType = StagingBuffer;
-        using SwapChainType = SwapChain;
-        using TextureType = Texture;
-        using TextureViewType = TextureView;
-    };
+struct VulkanBackendTraits {
+    using AdapterType = Adapter;
+    using BindGroupType = BindGroup;
+    using BindGroupLayoutType = BindGroupLayout;
+    using BufferType = Buffer;
+    using CommandBufferType = CommandBuffer;
+    using ComputePipelineType = ComputePipeline;
+    using DeviceType = Device;
+    using PipelineLayoutType = PipelineLayout;
+    using QuerySetType = QuerySet;
+    using QueueType = Queue;
+    using RenderPipelineType = RenderPipeline;
+    using ResourceHeapType = ResourceHeap;
+    using SamplerType = Sampler;
+    using ShaderModuleType = ShaderModule;
+    using StagingBufferType = StagingBuffer;
+    using SwapChainType = SwapChain;
+    using TextureType = Texture;
+    using TextureViewType = TextureView;
+};
 
-    template <typename T>
-    auto ToBackend(T&& common) -> decltype(ToBackendBase<VulkanBackendTraits>(common)) {
-        return ToBackendBase<VulkanBackendTraits>(common);
-    }
+template <typename T>
+auto ToBackend(T&& common) -> decltype(ToBackendBase<VulkanBackendTraits>(common)) {
+    return ToBackendBase<VulkanBackendTraits>(common);
+}
 
 }  // namespace dawn::native::vulkan
 
diff --git a/src/dawn/native/vulkan/NativeSwapChainImplVk.cpp b/src/dawn/native/vulkan/NativeSwapChainImplVk.cpp
index d73d3df..375db0e 100644
--- a/src/dawn/native/vulkan/NativeSwapChainImplVk.cpp
+++ b/src/dawn/native/vulkan/NativeSwapChainImplVk.cpp
@@ -22,204 +22,204 @@
 
 namespace dawn::native::vulkan {
 
-    namespace {
+namespace {
 
-        bool chooseSwapPresentMode(const std::vector<VkPresentModeKHR>& availablePresentModes,
-                                   bool turnOffVsync,
-                                   VkPresentModeKHR* presentMode) {
-            if (turnOffVsync) {
-                for (const auto& availablePresentMode : availablePresentModes) {
-                    if (availablePresentMode == VK_PRESENT_MODE_IMMEDIATE_KHR) {
-                        *presentMode = availablePresentMode;
-                        return true;
-                    }
-                }
-                return false;
+bool chooseSwapPresentMode(const std::vector<VkPresentModeKHR>& availablePresentModes,
+                           bool turnOffVsync,
+                           VkPresentModeKHR* presentMode) {
+    if (turnOffVsync) {
+        for (const auto& availablePresentMode : availablePresentModes) {
+            if (availablePresentMode == VK_PRESENT_MODE_IMMEDIATE_KHR) {
+                *presentMode = availablePresentMode;
+                return true;
             }
-
-            *presentMode = VK_PRESENT_MODE_FIFO_KHR;
-            return true;
         }
-
-        bool ChooseSurfaceConfig(const VulkanSurfaceInfo& info,
-                                 NativeSwapChainImpl::ChosenConfig* config,
-                                 bool turnOffVsync) {
-            VkPresentModeKHR presentMode;
-            if (!chooseSwapPresentMode(info.presentModes, turnOffVsync, &presentMode)) {
-                return false;
-            }
-            // TODO(crbug.com/dawn/269): For now this is hardcoded to what works with one NVIDIA
-            // driver. Need to generalize
-            config->nativeFormat = VK_FORMAT_B8G8R8A8_UNORM;
-            config->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
-            config->format = wgpu::TextureFormat::BGRA8Unorm;
-            config->minImageCount = 3;
-            // TODO(crbug.com/dawn/269): This is upside down compared to what we want, at least
-            // on Linux
-            config->preTransform = info.capabilities.currentTransform;
-            config->presentMode = presentMode;
-            config->compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
-
-            return true;
-        }
-    }  // anonymous namespace
-
-    NativeSwapChainImpl::NativeSwapChainImpl(Device* device, VkSurfaceKHR surface)
-        : mSurface(surface), mDevice(device) {
-        // Call this immediately, so that BackendBinding::GetPreferredSwapChainTextureFormat
-        // will return a correct result before a SwapChain is created.
-        UpdateSurfaceConfig();
+        return false;
     }
 
-    NativeSwapChainImpl::~NativeSwapChainImpl() {
-        if (mSwapChain != VK_NULL_HANDLE) {
-            mDevice->GetFencedDeleter()->DeleteWhenUnused(mSwapChain);
-            mSwapChain = VK_NULL_HANDLE;
-        }
-        if (mSurface != VK_NULL_HANDLE) {
-            mDevice->GetFencedDeleter()->DeleteWhenUnused(mSurface);
-            mSurface = VK_NULL_HANDLE;
-        }
+    *presentMode = VK_PRESENT_MODE_FIFO_KHR;
+    return true;
+}
+
+bool ChooseSurfaceConfig(const VulkanSurfaceInfo& info,
+                         NativeSwapChainImpl::ChosenConfig* config,
+                         bool turnOffVsync) {
+    VkPresentModeKHR presentMode;
+    if (!chooseSwapPresentMode(info.presentModes, turnOffVsync, &presentMode)) {
+        return false;
+    }
+    // TODO(crbug.com/dawn/269): For now this is hardcoded to what works with one NVIDIA
+    // driver. Need to generalize
+    config->nativeFormat = VK_FORMAT_B8G8R8A8_UNORM;
+    config->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
+    config->format = wgpu::TextureFormat::BGRA8Unorm;
+    config->minImageCount = 3;
+    // TODO(crbug.com/dawn/269): This is upside down compared to what we want, at least
+    // on Linux
+    config->preTransform = info.capabilities.currentTransform;
+    config->presentMode = presentMode;
+    config->compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
+
+    return true;
+}
+}  // anonymous namespace
+
+NativeSwapChainImpl::NativeSwapChainImpl(Device* device, VkSurfaceKHR surface)
+    : mSurface(surface), mDevice(device) {
+    // Call this immediately, so that BackendBinding::GetPreferredSwapChainTextureFormat
+    // will return a correct result before a SwapChain is created.
+    UpdateSurfaceConfig();
+}
+
+NativeSwapChainImpl::~NativeSwapChainImpl() {
+    if (mSwapChain != VK_NULL_HANDLE) {
+        mDevice->GetFencedDeleter()->DeleteWhenUnused(mSwapChain);
+        mSwapChain = VK_NULL_HANDLE;
+    }
+    if (mSurface != VK_NULL_HANDLE) {
+        mDevice->GetFencedDeleter()->DeleteWhenUnused(mSurface);
+        mSurface = VK_NULL_HANDLE;
+    }
+}
+
+void NativeSwapChainImpl::UpdateSurfaceConfig() {
+    if (mDevice->ConsumedError(GatherSurfaceInfo(*ToBackend(mDevice->GetAdapter()), mSurface),
+                               &mInfo)) {
+        ASSERT(false);
     }
 
-    void NativeSwapChainImpl::UpdateSurfaceConfig() {
-        if (mDevice->ConsumedError(GatherSurfaceInfo(*ToBackend(mDevice->GetAdapter()), mSurface),
-                                   &mInfo)) {
-            ASSERT(false);
-        }
+    if (!ChooseSurfaceConfig(mInfo, &mConfig, mDevice->IsToggleEnabled(Toggle::TurnOffVsync))) {
+        ASSERT(false);
+    }
+}
 
-        if (!ChooseSurfaceConfig(mInfo, &mConfig, mDevice->IsToggleEnabled(Toggle::TurnOffVsync))) {
-            ASSERT(false);
-        }
+void NativeSwapChainImpl::Init(DawnWSIContextVulkan* /*context*/) {
+    UpdateSurfaceConfig();
+}
+
+DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
+                                                  WGPUTextureUsage usage,
+                                                  uint32_t width,
+                                                  uint32_t height) {
+    UpdateSurfaceConfig();
+
+    ASSERT(mInfo.capabilities.minImageExtent.width <= width);
+    ASSERT(mInfo.capabilities.maxImageExtent.width >= width);
+    ASSERT(mInfo.capabilities.minImageExtent.height <= height);
+    ASSERT(mInfo.capabilities.maxImageExtent.height >= height);
+
+    ASSERT(format == static_cast<WGPUTextureFormat>(GetPreferredFormat()));
+    // TODO(crbug.com/dawn/269): need to check usage works too
+
+    // Create the swapchain with the configuration we chose
+    VkSwapchainKHR oldSwapchain = mSwapChain;
+    VkSwapchainCreateInfoKHR createInfo;
+    createInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
+    createInfo.pNext = nullptr;
+    createInfo.flags = 0;
+    createInfo.surface = mSurface;
+    createInfo.minImageCount = mConfig.minImageCount;
+    createInfo.imageFormat = mConfig.nativeFormat;
+    createInfo.imageColorSpace = mConfig.colorSpace;
+    createInfo.imageExtent.width = width;
+    createInfo.imageExtent.height = height;
+    createInfo.imageArrayLayers = 1;
+    createInfo.imageUsage = VulkanImageUsage(static_cast<wgpu::TextureUsage>(usage),
+                                             mDevice->GetValidInternalFormat(mConfig.format));
+    createInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
+    createInfo.queueFamilyIndexCount = 0;
+    createInfo.pQueueFamilyIndices = nullptr;
+    createInfo.preTransform = mConfig.preTransform;
+    createInfo.compositeAlpha = mConfig.compositeAlpha;
+    createInfo.presentMode = mConfig.presentMode;
+    createInfo.clipped = false;
+    createInfo.oldSwapchain = oldSwapchain;
+
+    if (mDevice->fn.CreateSwapchainKHR(mDevice->GetVkDevice(), &createInfo, nullptr,
+                                       &*mSwapChain) != VK_SUCCESS) {
+        ASSERT(false);
     }
 
-    void NativeSwapChainImpl::Init(DawnWSIContextVulkan* /*context*/) {
-        UpdateSurfaceConfig();
+    // Gather the swapchain's images. Implementations are allowed to return more images than the
+    // number we asked for.
+    uint32_t count = 0;
+    if (mDevice->fn.GetSwapchainImagesKHR(mDevice->GetVkDevice(), mSwapChain, &count, nullptr) !=
+        VK_SUCCESS) {
+        ASSERT(false);
     }
 
-    DawnSwapChainError NativeSwapChainImpl::Configure(WGPUTextureFormat format,
-                                                      WGPUTextureUsage usage,
-                                                      uint32_t width,
-                                                      uint32_t height) {
-        UpdateSurfaceConfig();
+    ASSERT(count >= mConfig.minImageCount);
+    mSwapChainImages.resize(count);
+    if (mDevice->fn.GetSwapchainImagesKHR(mDevice->GetVkDevice(), mSwapChain, &count,
+                                          AsVkArray(mSwapChainImages.data())) != VK_SUCCESS) {
+        ASSERT(false);
+    }
 
-        ASSERT(mInfo.capabilities.minImageExtent.width <= width);
-        ASSERT(mInfo.capabilities.maxImageExtent.width >= width);
-        ASSERT(mInfo.capabilities.minImageExtent.height <= height);
-        ASSERT(mInfo.capabilities.maxImageExtent.height >= height);
+    if (oldSwapchain != VK_NULL_HANDLE) {
+        mDevice->GetFencedDeleter()->DeleteWhenUnused(oldSwapchain);
+    }
 
-        ASSERT(format == static_cast<WGPUTextureFormat>(GetPreferredFormat()));
-        // TODO(crbug.com/dawn/269): need to check usage works too
+    return DAWN_SWAP_CHAIN_NO_ERROR;
+}
 
-        // Create the swapchain with the configuration we chose
-        VkSwapchainKHR oldSwapchain = mSwapChain;
-        VkSwapchainCreateInfoKHR createInfo;
-        createInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
+DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
+    // Transiently create a semaphore that will be signaled when the presentation engine is done
+    // with the swapchain image. Further operations on the image will wait for this semaphore.
+    VkSemaphore semaphore = VK_NULL_HANDLE;
+    {
+        VkSemaphoreCreateInfo createInfo;
+        createInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
         createInfo.pNext = nullptr;
         createInfo.flags = 0;
-        createInfo.surface = mSurface;
-        createInfo.minImageCount = mConfig.minImageCount;
-        createInfo.imageFormat = mConfig.nativeFormat;
-        createInfo.imageColorSpace = mConfig.colorSpace;
-        createInfo.imageExtent.width = width;
-        createInfo.imageExtent.height = height;
-        createInfo.imageArrayLayers = 1;
-        createInfo.imageUsage = VulkanImageUsage(static_cast<wgpu::TextureUsage>(usage),
-                                                 mDevice->GetValidInternalFormat(mConfig.format));
-        createInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
-        createInfo.queueFamilyIndexCount = 0;
-        createInfo.pQueueFamilyIndices = nullptr;
-        createInfo.preTransform = mConfig.preTransform;
-        createInfo.compositeAlpha = mConfig.compositeAlpha;
-        createInfo.presentMode = mConfig.presentMode;
-        createInfo.clipped = false;
-        createInfo.oldSwapchain = oldSwapchain;
-
-        if (mDevice->fn.CreateSwapchainKHR(mDevice->GetVkDevice(), &createInfo, nullptr,
-                                           &*mSwapChain) != VK_SUCCESS) {
+        if (mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &createInfo, nullptr,
+                                        &*semaphore) != VK_SUCCESS) {
             ASSERT(false);
         }
-
-        // Gather the swapchain's images. Implementations are allowed to return more images than the
-        // number we asked for.
-        uint32_t count = 0;
-        if (mDevice->fn.GetSwapchainImagesKHR(mDevice->GetVkDevice(), mSwapChain, &count,
-                                              nullptr) != VK_SUCCESS) {
-            ASSERT(false);
-        }
-
-        ASSERT(count >= mConfig.minImageCount);
-        mSwapChainImages.resize(count);
-        if (mDevice->fn.GetSwapchainImagesKHR(mDevice->GetVkDevice(), mSwapChain, &count,
-                                              AsVkArray(mSwapChainImages.data())) != VK_SUCCESS) {
-            ASSERT(false);
-        }
-
-        if (oldSwapchain != VK_NULL_HANDLE) {
-            mDevice->GetFencedDeleter()->DeleteWhenUnused(oldSwapchain);
-        }
-
-        return DAWN_SWAP_CHAIN_NO_ERROR;
     }
 
-    DawnSwapChainError NativeSwapChainImpl::GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
-        // Transiently create a semaphore that will be signaled when the presentation engine is done
-        // with the swapchain image. Further operations on the image will wait for this semaphore.
-        VkSemaphore semaphore = VK_NULL_HANDLE;
-        {
-            VkSemaphoreCreateInfo createInfo;
-            createInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
-            createInfo.pNext = nullptr;
-            createInfo.flags = 0;
-            if (mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &createInfo, nullptr,
-                                            &*semaphore) != VK_SUCCESS) {
-                ASSERT(false);
-            }
-        }
+    if (mDevice->fn.AcquireNextImageKHR(mDevice->GetVkDevice(), mSwapChain,
+                                        std::numeric_limits<uint64_t>::max(), semaphore, VkFence{},
+                                        &mLastImageIndex) != VK_SUCCESS) {
+        ASSERT(false);
+    }
 
-        if (mDevice->fn.AcquireNextImageKHR(mDevice->GetVkDevice(), mSwapChain,
-                                            std::numeric_limits<uint64_t>::max(), semaphore,
-                                            VkFence{}, &mLastImageIndex) != VK_SUCCESS) {
-            ASSERT(false);
-        }
-
-        nextTexture->texture.u64 =
+    nextTexture->texture.u64 =
 #if defined(DAWN_PLATFORM_64_BIT)
-            reinterpret_cast<uint64_t>
+        reinterpret_cast<uint64_t>
 #endif
-            (*mSwapChainImages[mLastImageIndex]);
-        mDevice->GetPendingRecordingContext()->waitSemaphores.push_back(semaphore);
+        (*mSwapChainImages[mLastImageIndex]);
+    mDevice->GetPendingRecordingContext()->waitSemaphores.push_back(semaphore);
 
-        return DAWN_SWAP_CHAIN_NO_ERROR;
+    return DAWN_SWAP_CHAIN_NO_ERROR;
+}
+
+DawnSwapChainError NativeSwapChainImpl::Present() {
+    // This assumes that the image has already been transitioned to the PRESENT layout and
+    // writes were made available to the stage.
+
+    // Assuming that the present queue is the same as the graphics queue, the proper
+    // synchronization has already been done on the queue so we don't need to wait on any
+    // semaphores.
+    VkPresentInfoKHR presentInfo;
+    presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
+    presentInfo.pNext = nullptr;
+    presentInfo.waitSemaphoreCount = 0;
+    presentInfo.pWaitSemaphores = nullptr;
+    presentInfo.swapchainCount = 1;
+    presentInfo.pSwapchains = &*mSwapChain;
+    presentInfo.pImageIndices = &mLastImageIndex;
+    presentInfo.pResults = nullptr;
+
+    VkQueue queue = mDevice->GetQueue();
+    if (mDevice->fn.QueuePresentKHR(queue, &presentInfo) != VK_SUCCESS) {
+        ASSERT(false);
     }
 
-    DawnSwapChainError NativeSwapChainImpl::Present() {
-        // This assumes that the image has already been transitioned to the PRESENT layout and
-        // writes were made available to the stage.
+    return DAWN_SWAP_CHAIN_NO_ERROR;
+}
 
-        // Assuming that the present queue is the same as the graphics queue, the proper
-        // synchronization has already been done on the queue so we don't need to wait on any
-        // semaphores.
-        VkPresentInfoKHR presentInfo;
-        presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
-        presentInfo.pNext = nullptr;
-        presentInfo.waitSemaphoreCount = 0;
-        presentInfo.pWaitSemaphores = nullptr;
-        presentInfo.swapchainCount = 1;
-        presentInfo.pSwapchains = &*mSwapChain;
-        presentInfo.pImageIndices = &mLastImageIndex;
-        presentInfo.pResults = nullptr;
-
-        VkQueue queue = mDevice->GetQueue();
-        if (mDevice->fn.QueuePresentKHR(queue, &presentInfo) != VK_SUCCESS) {
-            ASSERT(false);
-        }
-
-        return DAWN_SWAP_CHAIN_NO_ERROR;
-    }
-
-    wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
-        return mConfig.format;
-    }
+wgpu::TextureFormat NativeSwapChainImpl::GetPreferredFormat() const {
+    return mConfig.format;
+}
 
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/NativeSwapChainImplVk.h b/src/dawn/native/vulkan/NativeSwapChainImplVk.h
index f639e68..db22472 100644
--- a/src/dawn/native/vulkan/NativeSwapChainImplVk.h
+++ b/src/dawn/native/vulkan/NativeSwapChainImplVk.h
@@ -23,50 +23,50 @@
 
 namespace dawn::native::vulkan {
 
-    class Device;
+class Device;
 
-    class NativeSwapChainImpl {
-      public:
-        using WSIContext = DawnWSIContextVulkan;
+class NativeSwapChainImpl {
+  public:
+    using WSIContext = DawnWSIContextVulkan;
 
-        NativeSwapChainImpl(Device* device, VkSurfaceKHR surface);
-        ~NativeSwapChainImpl();
+    NativeSwapChainImpl(Device* device, VkSurfaceKHR surface);
+    ~NativeSwapChainImpl();
 
-        void Init(DawnWSIContextVulkan* context);
-        DawnSwapChainError Configure(WGPUTextureFormat format,
-                                     WGPUTextureUsage,
-                                     uint32_t width,
-                                     uint32_t height);
-        DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
-        DawnSwapChainError Present();
+    void Init(DawnWSIContextVulkan* context);
+    DawnSwapChainError Configure(WGPUTextureFormat format,
+                                 WGPUTextureUsage,
+                                 uint32_t width,
+                                 uint32_t height);
+    DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture);
+    DawnSwapChainError Present();
 
-        wgpu::TextureFormat GetPreferredFormat() const;
+    wgpu::TextureFormat GetPreferredFormat() const;
 
-        struct ChosenConfig {
-            VkFormat nativeFormat;
-            wgpu::TextureFormat format;
-            VkColorSpaceKHR colorSpace;
-            VkSurfaceTransformFlagBitsKHR preTransform;
-            uint32_t minImageCount;
-            VkPresentModeKHR presentMode;
-            VkCompositeAlphaFlagBitsKHR compositeAlpha;
-        };
-
-      private:
-        void UpdateSurfaceConfig();
-
-        VkSurfaceKHR mSurface = VK_NULL_HANDLE;
-        VkSwapchainKHR mSwapChain = VK_NULL_HANDLE;
-        std::vector<VkImage> mSwapChainImages;
-        uint32_t mLastImageIndex = 0;
-
-        VulkanSurfaceInfo mInfo;
-
-        ChosenConfig mConfig;
-
-        Device* mDevice = nullptr;
+    struct ChosenConfig {
+        VkFormat nativeFormat;
+        wgpu::TextureFormat format;
+        VkColorSpaceKHR colorSpace;
+        VkSurfaceTransformFlagBitsKHR preTransform;
+        uint32_t minImageCount;
+        VkPresentModeKHR presentMode;
+        VkCompositeAlphaFlagBitsKHR compositeAlpha;
     };
 
+  private:
+    void UpdateSurfaceConfig();
+
+    VkSurfaceKHR mSurface = VK_NULL_HANDLE;
+    VkSwapchainKHR mSwapChain = VK_NULL_HANDLE;
+    std::vector<VkImage> mSwapChainImages;
+    uint32_t mLastImageIndex = 0;
+
+    VulkanSurfaceInfo mInfo;
+
+    ChosenConfig mConfig;
+
+    Device* mDevice = nullptr;
+};
+
 }  // namespace dawn::native::vulkan
 
 #endif  // SRC_DAWN_NATIVE_VULKAN_NATIVESWAPCHAINIMPLVK_H_
diff --git a/src/dawn/native/vulkan/PipelineLayoutVk.cpp b/src/dawn/native/vulkan/PipelineLayoutVk.cpp
index 560731b..222a545 100644
--- a/src/dawn/native/vulkan/PipelineLayoutVk.cpp
+++ b/src/dawn/native/vulkan/PipelineLayoutVk.cpp
@@ -23,67 +23,67 @@
 
 namespace dawn::native::vulkan {
 
-    // static
-    ResultOrError<Ref<PipelineLayout>> PipelineLayout::Create(
-        Device* device,
-        const PipelineLayoutDescriptor* descriptor) {
-        Ref<PipelineLayout> layout = AcquireRef(new PipelineLayout(device, descriptor));
-        DAWN_TRY(layout->Initialize());
-        return layout;
+// static
+ResultOrError<Ref<PipelineLayout>> PipelineLayout::Create(
+    Device* device,
+    const PipelineLayoutDescriptor* descriptor) {
+    Ref<PipelineLayout> layout = AcquireRef(new PipelineLayout(device, descriptor));
+    DAWN_TRY(layout->Initialize());
+    return layout;
+}
+
+MaybeError PipelineLayout::Initialize() {
+    // Compute the array of VkDescriptorSetLayouts that will be chained in the create info.
+    // TODO(crbug.com/dawn/277) Vulkan doesn't allow holes in this array, should we expose
+    // this constraints at the Dawn level?
+    uint32_t numSetLayouts = 0;
+    std::array<VkDescriptorSetLayout, kMaxBindGroups> setLayouts;
+    std::array<const CachedObject*, kMaxBindGroups> cachedObjects;
+    for (BindGroupIndex setIndex : IterateBitSet(GetBindGroupLayoutsMask())) {
+        const BindGroupLayoutBase* bindGroupLayout = GetBindGroupLayout(setIndex);
+        setLayouts[numSetLayouts] = ToBackend(bindGroupLayout)->GetHandle();
+        cachedObjects[numSetLayouts] = bindGroupLayout;
+        numSetLayouts++;
     }
 
-    MaybeError PipelineLayout::Initialize() {
-        // Compute the array of VkDescriptorSetLayouts that will be chained in the create info.
-        // TODO(crbug.com/dawn/277) Vulkan doesn't allow holes in this array, should we expose
-        // this constraints at the Dawn level?
-        uint32_t numSetLayouts = 0;
-        std::array<VkDescriptorSetLayout, kMaxBindGroups> setLayouts;
-        std::array<const CachedObject*, kMaxBindGroups> cachedObjects;
-        for (BindGroupIndex setIndex : IterateBitSet(GetBindGroupLayoutsMask())) {
-            const BindGroupLayoutBase* bindGroupLayout = GetBindGroupLayout(setIndex);
-            setLayouts[numSetLayouts] = ToBackend(bindGroupLayout)->GetHandle();
-            cachedObjects[numSetLayouts] = bindGroupLayout;
-            numSetLayouts++;
-        }
+    VkPipelineLayoutCreateInfo createInfo;
+    createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
+    createInfo.pNext = nullptr;
+    createInfo.flags = 0;
+    createInfo.setLayoutCount = numSetLayouts;
+    createInfo.pSetLayouts = AsVkArray(setLayouts.data());
+    createInfo.pushConstantRangeCount = 0;
+    createInfo.pPushConstantRanges = nullptr;
 
-        VkPipelineLayoutCreateInfo createInfo;
-        createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
-        createInfo.pNext = nullptr;
-        createInfo.flags = 0;
-        createInfo.setLayoutCount = numSetLayouts;
-        createInfo.pSetLayouts = AsVkArray(setLayouts.data());
-        createInfo.pushConstantRangeCount = 0;
-        createInfo.pPushConstantRanges = nullptr;
+    // Record cache key information now since the createInfo is not stored.
+    GetCacheKey()->RecordIterable(cachedObjects.data(), numSetLayouts).Record(createInfo);
 
-        // Record cache key information now since the createInfo is not stored.
-        GetCacheKey()->RecordIterable(cachedObjects.data(), numSetLayouts).Record(createInfo);
+    Device* device = ToBackend(GetDevice());
+    DAWN_TRY(CheckVkSuccess(
+        device->fn.CreatePipelineLayout(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
+        "CreatePipelineLayout"));
 
-        Device* device = ToBackend(GetDevice());
-        DAWN_TRY(CheckVkSuccess(
-            device->fn.CreatePipelineLayout(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
-            "CreatePipelineLayout"));
+    SetLabelImpl();
 
-        SetLabelImpl();
+    return {};
+}
 
-        return {};
+PipelineLayout::~PipelineLayout() = default;
+
+void PipelineLayout::DestroyImpl() {
+    PipelineLayoutBase::DestroyImpl();
+    if (mHandle != VK_NULL_HANDLE) {
+        ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+        mHandle = VK_NULL_HANDLE;
     }
+}
 
-    PipelineLayout::~PipelineLayout() = default;
+VkPipelineLayout PipelineLayout::GetHandle() const {
+    return mHandle;
+}
 
-    void PipelineLayout::DestroyImpl() {
-        PipelineLayoutBase::DestroyImpl();
-        if (mHandle != VK_NULL_HANDLE) {
-            ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
-            mHandle = VK_NULL_HANDLE;
-        }
-    }
-
-    VkPipelineLayout PipelineLayout::GetHandle() const {
-        return mHandle;
-    }
-
-    void PipelineLayout::SetLabelImpl() {
-        SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_PipelineLayout", GetLabel());
-    }
+void PipelineLayout::SetLabelImpl() {
+    SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_PipelineLayout", GetLabel());
+}
 
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/PipelineLayoutVk.h b/src/dawn/native/vulkan/PipelineLayoutVk.h
index 26f9f4e..ca157f8 100644
--- a/src/dawn/native/vulkan/PipelineLayoutVk.h
+++ b/src/dawn/native/vulkan/PipelineLayoutVk.h
@@ -22,28 +22,27 @@
 
 namespace dawn::native::vulkan {
 
-    class Device;
+class Device;
 
-    class PipelineLayout final : public PipelineLayoutBase {
-      public:
-        static ResultOrError<Ref<PipelineLayout>> Create(
-            Device* device,
-            const PipelineLayoutDescriptor* descriptor);
+class PipelineLayout final : public PipelineLayoutBase {
+  public:
+    static ResultOrError<Ref<PipelineLayout>> Create(Device* device,
+                                                     const PipelineLayoutDescriptor* descriptor);
 
-        VkPipelineLayout GetHandle() const;
+    VkPipelineLayout GetHandle() const;
 
-      private:
-        ~PipelineLayout() override;
-        void DestroyImpl() override;
+  private:
+    ~PipelineLayout() override;
+    void DestroyImpl() override;
 
-        using PipelineLayoutBase::PipelineLayoutBase;
-        MaybeError Initialize();
+    using PipelineLayoutBase::PipelineLayoutBase;
+    MaybeError Initialize();
 
-        // Dawn API
-        void SetLabelImpl() override;
+    // Dawn API
+    void SetLabelImpl() override;
 
-        VkPipelineLayout mHandle = VK_NULL_HANDLE;
-    };
+    VkPipelineLayout mHandle = VK_NULL_HANDLE;
+};
 
 }  // namespace dawn::native::vulkan
 
diff --git a/src/dawn/native/vulkan/QuerySetVk.cpp b/src/dawn/native/vulkan/QuerySetVk.cpp
index 45d153c..aa1aac2 100644
--- a/src/dawn/native/vulkan/QuerySetVk.cpp
+++ b/src/dawn/native/vulkan/QuerySetVk.cpp
@@ -25,95 +25,91 @@
 
 namespace dawn::native::vulkan {
 
-    namespace {
-        VkQueryType VulkanQueryType(wgpu::QueryType type) {
-            switch (type) {
-                case wgpu::QueryType::Occlusion:
-                    return VK_QUERY_TYPE_OCCLUSION;
-                case wgpu::QueryType::PipelineStatistics:
-                    return VK_QUERY_TYPE_PIPELINE_STATISTICS;
-                case wgpu::QueryType::Timestamp:
-                    return VK_QUERY_TYPE_TIMESTAMP;
-            }
-            UNREACHABLE();
-        }
-
-        VkQueryPipelineStatisticFlags VulkanQueryPipelineStatisticFlags(
-            std::vector<wgpu::PipelineStatisticName> pipelineStatisticsSet) {
-            VkQueryPipelineStatisticFlags pipelineStatistics = 0;
-            for (size_t i = 0; i < pipelineStatisticsSet.size(); ++i) {
-                switch (pipelineStatisticsSet[i]) {
-                    case wgpu::PipelineStatisticName::ClipperInvocations:
-                        pipelineStatistics |= VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT;
-                        break;
-                    case wgpu::PipelineStatisticName::ClipperPrimitivesOut:
-                        pipelineStatistics |= VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT;
-                        break;
-                    case wgpu::PipelineStatisticName::ComputeShaderInvocations:
-                        pipelineStatistics |=
-                            VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT;
-                        break;
-                    case wgpu::PipelineStatisticName::FragmentShaderInvocations:
-                        pipelineStatistics |=
-                            VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT;
-                        break;
-                    case wgpu::PipelineStatisticName::VertexShaderInvocations:
-                        pipelineStatistics |=
-                            VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT;
-                        break;
-                }
-            }
-
-            return pipelineStatistics;
-        }
-    }  // anonymous namespace
-
-    // static
-    ResultOrError<Ref<QuerySet>> QuerySet::Create(Device* device,
-                                                  const QuerySetDescriptor* descriptor) {
-        Ref<QuerySet> queryset = AcquireRef(new QuerySet(device, descriptor));
-        DAWN_TRY(queryset->Initialize());
-        return queryset;
+namespace {
+VkQueryType VulkanQueryType(wgpu::QueryType type) {
+    switch (type) {
+        case wgpu::QueryType::Occlusion:
+            return VK_QUERY_TYPE_OCCLUSION;
+        case wgpu::QueryType::PipelineStatistics:
+            return VK_QUERY_TYPE_PIPELINE_STATISTICS;
+        case wgpu::QueryType::Timestamp:
+            return VK_QUERY_TYPE_TIMESTAMP;
     }
+    UNREACHABLE();
+}
 
-    MaybeError QuerySet::Initialize() {
-        VkQueryPoolCreateInfo createInfo;
-        createInfo.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
-        createInfo.pNext = NULL;
-        createInfo.flags = 0;
-        createInfo.queryType = VulkanQueryType(GetQueryType());
-        createInfo.queryCount = std::max(GetQueryCount(), uint32_t(1u));
-        if (GetQueryType() == wgpu::QueryType::PipelineStatistics) {
-            createInfo.pipelineStatistics =
-                VulkanQueryPipelineStatisticFlags(GetPipelineStatistics());
-        }
-
-        Device* device = ToBackend(GetDevice());
-        DAWN_TRY(CheckVkOOMThenSuccess(
-            device->fn.CreateQueryPool(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
-            "vkCreateQueryPool"));
-
-        SetLabelImpl();
-
-        return {};
-    }
-
-    VkQueryPool QuerySet::GetHandle() const {
-        return mHandle;
-    }
-
-    QuerySet::~QuerySet() = default;
-
-    void QuerySet::DestroyImpl() {
-        QuerySetBase::DestroyImpl();
-        if (mHandle != VK_NULL_HANDLE) {
-            ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
-            mHandle = VK_NULL_HANDLE;
+VkQueryPipelineStatisticFlags VulkanQueryPipelineStatisticFlags(
+    std::vector<wgpu::PipelineStatisticName> pipelineStatisticsSet) {
+    VkQueryPipelineStatisticFlags pipelineStatistics = 0;
+    for (size_t i = 0; i < pipelineStatisticsSet.size(); ++i) {
+        switch (pipelineStatisticsSet[i]) {
+            case wgpu::PipelineStatisticName::ClipperInvocations:
+                pipelineStatistics |= VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT;
+                break;
+            case wgpu::PipelineStatisticName::ClipperPrimitivesOut:
+                pipelineStatistics |= VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT;
+                break;
+            case wgpu::PipelineStatisticName::ComputeShaderInvocations:
+                pipelineStatistics |= VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT;
+                break;
+            case wgpu::PipelineStatisticName::FragmentShaderInvocations:
+                pipelineStatistics |= VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT;
+                break;
+            case wgpu::PipelineStatisticName::VertexShaderInvocations:
+                pipelineStatistics |= VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT;
+                break;
         }
     }
 
-    void QuerySet::SetLabelImpl() {
-        SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_QuerySet", GetLabel());
+    return pipelineStatistics;
+}
+}  // anonymous namespace
+
+// static
+ResultOrError<Ref<QuerySet>> QuerySet::Create(Device* device,
+                                              const QuerySetDescriptor* descriptor) {
+    Ref<QuerySet> queryset = AcquireRef(new QuerySet(device, descriptor));
+    DAWN_TRY(queryset->Initialize());
+    return queryset;
+}
+
+MaybeError QuerySet::Initialize() {
+    VkQueryPoolCreateInfo createInfo;
+    createInfo.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
+    createInfo.pNext = NULL;
+    createInfo.flags = 0;
+    createInfo.queryType = VulkanQueryType(GetQueryType());
+    createInfo.queryCount = std::max(GetQueryCount(), uint32_t(1u));
+    if (GetQueryType() == wgpu::QueryType::PipelineStatistics) {
+        createInfo.pipelineStatistics = VulkanQueryPipelineStatisticFlags(GetPipelineStatistics());
     }
 
+    Device* device = ToBackend(GetDevice());
+    DAWN_TRY(CheckVkOOMThenSuccess(
+        device->fn.CreateQueryPool(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
+        "vkCreateQueryPool"));
+
+    SetLabelImpl();
+
+    return {};
+}
+
+VkQueryPool QuerySet::GetHandle() const {
+    return mHandle;
+}
+
+QuerySet::~QuerySet() = default;
+
+void QuerySet::DestroyImpl() {
+    QuerySetBase::DestroyImpl();
+    if (mHandle != VK_NULL_HANDLE) {
+        ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+        mHandle = VK_NULL_HANDLE;
+    }
+}
+
+void QuerySet::SetLabelImpl() {
+    SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_QuerySet", GetLabel());
+}
+
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/QuerySetVk.h b/src/dawn/native/vulkan/QuerySetVk.h
index d0a3c93..dfee351 100644
--- a/src/dawn/native/vulkan/QuerySetVk.h
+++ b/src/dawn/native/vulkan/QuerySetVk.h
@@ -21,26 +21,26 @@
 
 namespace dawn::native::vulkan {
 
-    class Device;
+class Device;
 
-    class QuerySet final : public QuerySetBase {
-      public:
-        static ResultOrError<Ref<QuerySet>> Create(Device* device,
-                                                   const QuerySetDescriptor* descriptor);
+class QuerySet final : public QuerySetBase {
+  public:
+    static ResultOrError<Ref<QuerySet>> Create(Device* device,
+                                               const QuerySetDescriptor* descriptor);
 
-        VkQueryPool GetHandle() const;
+    VkQueryPool GetHandle() const;
 
-      private:
-        ~QuerySet() override;
-        using QuerySetBase::QuerySetBase;
-        MaybeError Initialize();
+  private:
+    ~QuerySet() override;
+    using QuerySetBase::QuerySetBase;
+    MaybeError Initialize();
 
-        // Dawn API
-        void DestroyImpl() override;
-        void SetLabelImpl() override;
+    // Dawn API
+    void DestroyImpl() override;
+    void SetLabelImpl() override;
 
-        VkQueryPool mHandle = VK_NULL_HANDLE;
-    };
+    VkQueryPool mHandle = VK_NULL_HANDLE;
+};
 
 }  // namespace dawn::native::vulkan
 
diff --git a/src/dawn/native/vulkan/QueueVk.cpp b/src/dawn/native/vulkan/QueueVk.cpp
index 2166be1..b0e40a7 100644
--- a/src/dawn/native/vulkan/QueueVk.cpp
+++ b/src/dawn/native/vulkan/QueueVk.cpp
@@ -28,47 +28,43 @@
 
 namespace dawn::native::vulkan {
 
-    // static
-    Ref<Queue> Queue::Create(Device* device, const QueueDescriptor* descriptor) {
-        Ref<Queue> queue = AcquireRef(new Queue(device, descriptor));
-        queue->Initialize();
-        return queue;
+// static
+Ref<Queue> Queue::Create(Device* device, const QueueDescriptor* descriptor) {
+    Ref<Queue> queue = AcquireRef(new Queue(device, descriptor));
+    queue->Initialize();
+    return queue;
+}
+
+Queue::Queue(Device* device, const QueueDescriptor* descriptor) : QueueBase(device, descriptor) {}
+
+Queue::~Queue() {}
+
+void Queue::Initialize() {
+    SetLabelImpl();
+}
+
+MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
+    Device* device = ToBackend(GetDevice());
+
+    DAWN_TRY(device->Tick());
+
+    TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording, "CommandBufferVk::RecordCommands");
+    CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
+    for (uint32_t i = 0; i < commandCount; ++i) {
+        DAWN_TRY(ToBackend(commands[i])->RecordCommands(recordingContext));
     }
+    TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording, "CommandBufferVk::RecordCommands");
 
-    Queue::Queue(Device* device, const QueueDescriptor* descriptor)
-        : QueueBase(device, descriptor) {
-    }
+    DAWN_TRY(device->SubmitPendingCommands());
 
-    Queue::~Queue() {
-    }
+    return {};
+}
 
-    void Queue::Initialize() {
-        SetLabelImpl();
-    }
-
-    MaybeError Queue::SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) {
-        Device* device = ToBackend(GetDevice());
-
-        DAWN_TRY(device->Tick());
-
-        TRACE_EVENT_BEGIN0(GetDevice()->GetPlatform(), Recording,
-                           "CommandBufferVk::RecordCommands");
-        CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
-        for (uint32_t i = 0; i < commandCount; ++i) {
-            DAWN_TRY(ToBackend(commands[i])->RecordCommands(recordingContext));
-        }
-        TRACE_EVENT_END0(GetDevice()->GetPlatform(), Recording, "CommandBufferVk::RecordCommands");
-
-        DAWN_TRY(device->SubmitPendingCommands());
-
-        return {};
-    }
-
-    void Queue::SetLabelImpl() {
-        Device* device = ToBackend(GetDevice());
-        // TODO(crbug.com/dawn/1344): When we start using multiple queues this needs to be adjusted
-        // so it doesn't always change the default queue's label.
-        SetDebugName(device, VK_OBJECT_TYPE_QUEUE, device->GetQueue(), "Dawn_Queue", GetLabel());
-    }
+void Queue::SetLabelImpl() {
+    Device* device = ToBackend(GetDevice());
+    // TODO(crbug.com/dawn/1344): When we start using multiple queues this needs to be adjusted
+    // so it doesn't always change the default queue's label.
+    SetDebugName(device, VK_OBJECT_TYPE_QUEUE, device->GetQueue(), "Dawn_Queue", GetLabel());
+}
 
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/QueueVk.h b/src/dawn/native/vulkan/QueueVk.h
index 2bca3be..470efd7 100644
--- a/src/dawn/native/vulkan/QueueVk.h
+++ b/src/dawn/native/vulkan/QueueVk.h
@@ -19,24 +19,24 @@
 
 namespace dawn::native::vulkan {
 
-    class Device;
+class Device;
 
-    class Queue final : public QueueBase {
-      public:
-        static Ref<Queue> Create(Device* device, const QueueDescriptor* descriptor);
+class Queue final : public QueueBase {
+  public:
+    static Ref<Queue> Create(Device* device, const QueueDescriptor* descriptor);
 
-      private:
-        Queue(Device* device, const QueueDescriptor* descriptor);
-        ~Queue() override;
-        using QueueBase::QueueBase;
+  private:
+    Queue(Device* device, const QueueDescriptor* descriptor);
+    ~Queue() override;
+    using QueueBase::QueueBase;
 
-        void Initialize();
+    void Initialize();
 
-        MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
+    MaybeError SubmitImpl(uint32_t commandCount, CommandBufferBase* const* commands) override;
 
-        // Dawn API
-        void SetLabelImpl() override;
-    };
+    // Dawn API
+    void SetLabelImpl() override;
+};
 
 }  // namespace dawn::native::vulkan
 
diff --git a/src/dawn/native/vulkan/RenderPassCache.cpp b/src/dawn/native/vulkan/RenderPassCache.cpp
index f1735ee..f948a4e 100644
--- a/src/dawn/native/vulkan/RenderPassCache.cpp
+++ b/src/dawn/native/vulkan/RenderPassCache.cpp
@@ -22,281 +22,279 @@
 
 namespace dawn::native::vulkan {
 
-    namespace {
-        VkAttachmentLoadOp VulkanAttachmentLoadOp(wgpu::LoadOp op) {
-            switch (op) {
-                case wgpu::LoadOp::Load:
-                    return VK_ATTACHMENT_LOAD_OP_LOAD;
-                case wgpu::LoadOp::Clear:
-                    return VK_ATTACHMENT_LOAD_OP_CLEAR;
-                case wgpu::LoadOp::Undefined:
-                    UNREACHABLE();
-                    break;
-            }
+namespace {
+VkAttachmentLoadOp VulkanAttachmentLoadOp(wgpu::LoadOp op) {
+    switch (op) {
+        case wgpu::LoadOp::Load:
+            return VK_ATTACHMENT_LOAD_OP_LOAD;
+        case wgpu::LoadOp::Clear:
+            return VK_ATTACHMENT_LOAD_OP_CLEAR;
+        case wgpu::LoadOp::Undefined:
             UNREACHABLE();
-        }
+            break;
+    }
+    UNREACHABLE();
+}
 
-        VkAttachmentStoreOp VulkanAttachmentStoreOp(wgpu::StoreOp op) {
-            // TODO(crbug.com/dawn/485): return STORE_OP_STORE_NONE_QCOM if the device has required
-            // extension.
-            switch (op) {
-                case wgpu::StoreOp::Store:
-                    return VK_ATTACHMENT_STORE_OP_STORE;
-                case wgpu::StoreOp::Discard:
-                    return VK_ATTACHMENT_STORE_OP_DONT_CARE;
-                case wgpu::StoreOp::Undefined:
-                    UNREACHABLE();
-                    break;
-            }
+VkAttachmentStoreOp VulkanAttachmentStoreOp(wgpu::StoreOp op) {
+    // TODO(crbug.com/dawn/485): return STORE_OP_STORE_NONE_QCOM if the device has required
+    // extension.
+    switch (op) {
+        case wgpu::StoreOp::Store:
+            return VK_ATTACHMENT_STORE_OP_STORE;
+        case wgpu::StoreOp::Discard:
+            return VK_ATTACHMENT_STORE_OP_DONT_CARE;
+        case wgpu::StoreOp::Undefined:
             UNREACHABLE();
-        }
-    }  // anonymous namespace
+            break;
+    }
+    UNREACHABLE();
+}
+}  // anonymous namespace
 
-    // RenderPassCacheQuery
+// RenderPassCacheQuery
 
-    void RenderPassCacheQuery::SetColor(ColorAttachmentIndex index,
-                                        wgpu::TextureFormat format,
-                                        wgpu::LoadOp loadOp,
-                                        wgpu::StoreOp storeOp,
-                                        bool hasResolveTarget) {
-        colorMask.set(index);
-        colorFormats[index] = format;
-        colorLoadOp[index] = loadOp;
-        colorStoreOp[index] = storeOp;
-        resolveTargetMask[index] = hasResolveTarget;
+void RenderPassCacheQuery::SetColor(ColorAttachmentIndex index,
+                                    wgpu::TextureFormat format,
+                                    wgpu::LoadOp loadOp,
+                                    wgpu::StoreOp storeOp,
+                                    bool hasResolveTarget) {
+    colorMask.set(index);
+    colorFormats[index] = format;
+    colorLoadOp[index] = loadOp;
+    colorStoreOp[index] = storeOp;
+    resolveTargetMask[index] = hasResolveTarget;
+}
+
+void RenderPassCacheQuery::SetDepthStencil(wgpu::TextureFormat format,
+                                           wgpu::LoadOp depthLoadOpIn,
+                                           wgpu::StoreOp depthStoreOpIn,
+                                           wgpu::LoadOp stencilLoadOpIn,
+                                           wgpu::StoreOp stencilStoreOpIn,
+                                           bool readOnly) {
+    hasDepthStencil = true;
+    depthStencilFormat = format;
+    depthLoadOp = depthLoadOpIn;
+    depthStoreOp = depthStoreOpIn;
+    stencilLoadOp = stencilLoadOpIn;
+    stencilStoreOp = stencilStoreOpIn;
+    readOnlyDepthStencil = readOnly;
+}
+
+void RenderPassCacheQuery::SetSampleCount(uint32_t sampleCount) {
+    this->sampleCount = sampleCount;
+}
+
+// RenderPassCache
+
+RenderPassCache::RenderPassCache(Device* device) : mDevice(device) {}
+
+RenderPassCache::~RenderPassCache() {
+    std::lock_guard<std::mutex> lock(mMutex);
+    for (auto [_, renderPass] : mCache) {
+        mDevice->fn.DestroyRenderPass(mDevice->GetVkDevice(), renderPass, nullptr);
     }
 
-    void RenderPassCacheQuery::SetDepthStencil(wgpu::TextureFormat format,
-                                               wgpu::LoadOp depthLoadOpIn,
-                                               wgpu::StoreOp depthStoreOpIn,
-                                               wgpu::LoadOp stencilLoadOpIn,
-                                               wgpu::StoreOp stencilStoreOpIn,
-                                               bool readOnly) {
-        hasDepthStencil = true;
-        depthStencilFormat = format;
-        depthLoadOp = depthLoadOpIn;
-        depthStoreOp = depthStoreOpIn;
-        stencilLoadOp = stencilLoadOpIn;
-        stencilStoreOp = stencilStoreOpIn;
-        readOnlyDepthStencil = readOnly;
+    mCache.clear();
+}
+
+ResultOrError<VkRenderPass> RenderPassCache::GetRenderPass(const RenderPassCacheQuery& query) {
+    std::lock_guard<std::mutex> lock(mMutex);
+    auto it = mCache.find(query);
+    if (it != mCache.end()) {
+        return VkRenderPass(it->second);
     }
 
-    void RenderPassCacheQuery::SetSampleCount(uint32_t sampleCount) {
-        this->sampleCount = sampleCount;
+    VkRenderPass renderPass;
+    DAWN_TRY_ASSIGN(renderPass, CreateRenderPassForQuery(query));
+    mCache.emplace(query, renderPass);
+    return renderPass;
+}
+
+ResultOrError<VkRenderPass> RenderPassCache::CreateRenderPassForQuery(
+    const RenderPassCacheQuery& query) const {
+    // The Vulkan subpasses want to know the layout of the attachments with VkAttachmentRef.
+    // Precompute them as they must be pointer-chained in VkSubpassDescription.
+    // Note that both colorAttachmentRefs and resolveAttachmentRefs can be sparse with holes
+    // filled with VK_ATTACHMENT_UNUSED.
+    ityp::array<ColorAttachmentIndex, VkAttachmentReference, kMaxColorAttachments>
+        colorAttachmentRefs;
+    ityp::array<ColorAttachmentIndex, VkAttachmentReference, kMaxColorAttachments>
+        resolveAttachmentRefs;
+    VkAttachmentReference depthStencilAttachmentRef;
+
+    for (ColorAttachmentIndex i(uint8_t(0)); i < kMaxColorAttachmentsTyped; i++) {
+        colorAttachmentRefs[i].attachment = VK_ATTACHMENT_UNUSED;
+        resolveAttachmentRefs[i].attachment = VK_ATTACHMENT_UNUSED;
+        // The Khronos Vulkan validation layer will complain if not set
+        colorAttachmentRefs[i].layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+        resolveAttachmentRefs[i].layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
     }
 
-    // RenderPassCache
+    // Contains the attachment description that will be chained in the create info
+    // The order of all attachments in attachmentDescs is "color-depthstencil-resolve".
+    constexpr uint8_t kMaxAttachmentCount = kMaxColorAttachments * 2 + 1;
+    std::array<VkAttachmentDescription, kMaxAttachmentCount> attachmentDescs = {};
 
-    RenderPassCache::RenderPassCache(Device* device) : mDevice(device) {
+    VkSampleCountFlagBits vkSampleCount = VulkanSampleCount(query.sampleCount);
+
+    uint32_t attachmentCount = 0;
+    ColorAttachmentIndex highestColorAttachmentIndexPlusOne(static_cast<uint8_t>(0));
+    for (ColorAttachmentIndex i : IterateBitSet(query.colorMask)) {
+        auto& attachmentRef = colorAttachmentRefs[i];
+        auto& attachmentDesc = attachmentDescs[attachmentCount];
+
+        attachmentRef.attachment = attachmentCount;
+        attachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+
+        attachmentDesc.flags = 0;
+        attachmentDesc.format = VulkanImageFormat(mDevice, query.colorFormats[i]);
+        attachmentDesc.samples = vkSampleCount;
+        attachmentDesc.loadOp = VulkanAttachmentLoadOp(query.colorLoadOp[i]);
+        attachmentDesc.storeOp = VulkanAttachmentStoreOp(query.colorStoreOp[i]);
+        attachmentDesc.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+        attachmentDesc.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+
+        attachmentCount++;
+        highestColorAttachmentIndexPlusOne =
+            ColorAttachmentIndex(static_cast<uint8_t>(static_cast<uint8_t>(i) + 1u));
     }
 
-    RenderPassCache::~RenderPassCache() {
-        std::lock_guard<std::mutex> lock(mMutex);
-        for (auto [_, renderPass] : mCache) {
-            mDevice->fn.DestroyRenderPass(mDevice->GetVkDevice(), renderPass, nullptr);
-        }
+    VkAttachmentReference* depthStencilAttachment = nullptr;
+    if (query.hasDepthStencil) {
+        auto& attachmentDesc = attachmentDescs[attachmentCount];
 
-        mCache.clear();
+        depthStencilAttachment = &depthStencilAttachmentRef;
+
+        depthStencilAttachmentRef.attachment = attachmentCount;
+        depthStencilAttachmentRef.layout = query.readOnlyDepthStencil
+                                               ? VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL
+                                               : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+
+        attachmentDesc.flags = 0;
+        attachmentDesc.format = VulkanImageFormat(mDevice, query.depthStencilFormat);
+        attachmentDesc.samples = vkSampleCount;
+
+        attachmentDesc.loadOp = VulkanAttachmentLoadOp(query.depthLoadOp);
+        attachmentDesc.storeOp = VulkanAttachmentStoreOp(query.depthStoreOp);
+        attachmentDesc.stencilLoadOp = VulkanAttachmentLoadOp(query.stencilLoadOp);
+        attachmentDesc.stencilStoreOp = VulkanAttachmentStoreOp(query.stencilStoreOp);
+
+        // There is only one subpass, so it is safe to set both initialLayout and finalLayout to
+        // the only subpass's layout.
+        attachmentDesc.initialLayout = depthStencilAttachmentRef.layout;
+        attachmentDesc.finalLayout = depthStencilAttachmentRef.layout;
+
+        attachmentCount++;
     }
 
-    ResultOrError<VkRenderPass> RenderPassCache::GetRenderPass(const RenderPassCacheQuery& query) {
-        std::lock_guard<std::mutex> lock(mMutex);
-        auto it = mCache.find(query);
-        if (it != mCache.end()) {
-            return VkRenderPass(it->second);
-        }
+    for (ColorAttachmentIndex i : IterateBitSet(query.resolveTargetMask)) {
+        auto& attachmentRef = resolveAttachmentRefs[i];
+        auto& attachmentDesc = attachmentDescs[attachmentCount];
 
-        VkRenderPass renderPass;
-        DAWN_TRY_ASSIGN(renderPass, CreateRenderPassForQuery(query));
-        mCache.emplace(query, renderPass);
-        return renderPass;
+        attachmentRef.attachment = attachmentCount;
+        attachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+
+        attachmentDesc.flags = 0;
+        attachmentDesc.format = VulkanImageFormat(mDevice, query.colorFormats[i]);
+        attachmentDesc.samples = VK_SAMPLE_COUNT_1_BIT;
+        attachmentDesc.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+        attachmentDesc.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+        attachmentDesc.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+        attachmentDesc.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+
+        attachmentCount++;
     }
 
-    ResultOrError<VkRenderPass> RenderPassCache::CreateRenderPassForQuery(
-        const RenderPassCacheQuery& query) const {
-        // The Vulkan subpasses want to know the layout of the attachments with VkAttachmentRef.
-        // Precompute them as they must be pointer-chained in VkSubpassDescription.
-        // Note that both colorAttachmentRefs and resolveAttachmentRefs can be sparse with holes
-        // filled with VK_ATTACHMENT_UNUSED.
-        ityp::array<ColorAttachmentIndex, VkAttachmentReference, kMaxColorAttachments>
-            colorAttachmentRefs;
-        ityp::array<ColorAttachmentIndex, VkAttachmentReference, kMaxColorAttachments>
-            resolveAttachmentRefs;
-        VkAttachmentReference depthStencilAttachmentRef;
+    // Create the VkSubpassDescription that will be chained in the VkRenderPassCreateInfo
+    VkSubpassDescription subpassDesc;
+    subpassDesc.flags = 0;
+    subpassDesc.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
+    subpassDesc.inputAttachmentCount = 0;
+    subpassDesc.pInputAttachments = nullptr;
+    subpassDesc.colorAttachmentCount = static_cast<uint8_t>(highestColorAttachmentIndexPlusOne);
+    subpassDesc.pColorAttachments = colorAttachmentRefs.data();
+    subpassDesc.pResolveAttachments = resolveAttachmentRefs.data();
+    subpassDesc.pDepthStencilAttachment = depthStencilAttachment;
+    subpassDesc.preserveAttachmentCount = 0;
+    subpassDesc.pPreserveAttachments = nullptr;
 
-        for (ColorAttachmentIndex i(uint8_t(0)); i < kMaxColorAttachmentsTyped; i++) {
-            colorAttachmentRefs[i].attachment = VK_ATTACHMENT_UNUSED;
-            resolveAttachmentRefs[i].attachment = VK_ATTACHMENT_UNUSED;
-            // The Khronos Vulkan validation layer will complain if not set
-            colorAttachmentRefs[i].layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
-            resolveAttachmentRefs[i].layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
-        }
+    // Chain everything in VkRenderPassCreateInfo
+    VkRenderPassCreateInfo createInfo;
+    createInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
+    createInfo.pNext = nullptr;
+    createInfo.flags = 0;
+    createInfo.attachmentCount = attachmentCount;
+    createInfo.pAttachments = attachmentDescs.data();
+    createInfo.subpassCount = 1;
+    createInfo.pSubpasses = &subpassDesc;
+    createInfo.dependencyCount = 0;
+    createInfo.pDependencies = nullptr;
 
-        // Contains the attachment description that will be chained in the create info
-        // The order of all attachments in attachmentDescs is "color-depthstencil-resolve".
-        constexpr uint8_t kMaxAttachmentCount = kMaxColorAttachments * 2 + 1;
-        std::array<VkAttachmentDescription, kMaxAttachmentCount> attachmentDescs = {};
+    // Create the render pass from the zillion parameters
+    VkRenderPass renderPass;
+    DAWN_TRY(CheckVkSuccess(
+        mDevice->fn.CreateRenderPass(mDevice->GetVkDevice(), &createInfo, nullptr, &*renderPass),
+        "CreateRenderPass"));
+    return renderPass;
+}
 
-        VkSampleCountFlagBits vkSampleCount = VulkanSampleCount(query.sampleCount);
+// RenderPassCache
 
-        uint32_t attachmentCount = 0;
-        ColorAttachmentIndex highestColorAttachmentIndexPlusOne(static_cast<uint8_t>(0));
-        for (ColorAttachmentIndex i : IterateBitSet(query.colorMask)) {
-            auto& attachmentRef = colorAttachmentRefs[i];
-            auto& attachmentDesc = attachmentDescs[attachmentCount];
+size_t RenderPassCache::CacheFuncs::operator()(const RenderPassCacheQuery& query) const {
+    size_t hash = Hash(query.colorMask);
 
-            attachmentRef.attachment = attachmentCount;
-            attachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+    HashCombine(&hash, Hash(query.resolveTargetMask));
 
-            attachmentDesc.flags = 0;
-            attachmentDesc.format = VulkanImageFormat(mDevice, query.colorFormats[i]);
-            attachmentDesc.samples = vkSampleCount;
-            attachmentDesc.loadOp = VulkanAttachmentLoadOp(query.colorLoadOp[i]);
-            attachmentDesc.storeOp = VulkanAttachmentStoreOp(query.colorStoreOp[i]);
-            attachmentDesc.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
-            attachmentDesc.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
-
-            attachmentCount++;
-            highestColorAttachmentIndexPlusOne =
-                ColorAttachmentIndex(static_cast<uint8_t>(static_cast<uint8_t>(i) + 1u));
-        }
-
-        VkAttachmentReference* depthStencilAttachment = nullptr;
-        if (query.hasDepthStencil) {
-            auto& attachmentDesc = attachmentDescs[attachmentCount];
-
-            depthStencilAttachment = &depthStencilAttachmentRef;
-
-            depthStencilAttachmentRef.attachment = attachmentCount;
-            depthStencilAttachmentRef.layout =
-                query.readOnlyDepthStencil ? VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL
-                                           : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
-
-            attachmentDesc.flags = 0;
-            attachmentDesc.format = VulkanImageFormat(mDevice, query.depthStencilFormat);
-            attachmentDesc.samples = vkSampleCount;
-
-            attachmentDesc.loadOp = VulkanAttachmentLoadOp(query.depthLoadOp);
-            attachmentDesc.storeOp = VulkanAttachmentStoreOp(query.depthStoreOp);
-            attachmentDesc.stencilLoadOp = VulkanAttachmentLoadOp(query.stencilLoadOp);
-            attachmentDesc.stencilStoreOp = VulkanAttachmentStoreOp(query.stencilStoreOp);
-
-            // There is only one subpass, so it is safe to set both initialLayout and finalLayout to
-            // the only subpass's layout.
-            attachmentDesc.initialLayout = depthStencilAttachmentRef.layout;
-            attachmentDesc.finalLayout = depthStencilAttachmentRef.layout;
-
-            attachmentCount++;
-        }
-
-        for (ColorAttachmentIndex i : IterateBitSet(query.resolveTargetMask)) {
-            auto& attachmentRef = resolveAttachmentRefs[i];
-            auto& attachmentDesc = attachmentDescs[attachmentCount];
-
-            attachmentRef.attachment = attachmentCount;
-            attachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
-
-            attachmentDesc.flags = 0;
-            attachmentDesc.format = VulkanImageFormat(mDevice, query.colorFormats[i]);
-            attachmentDesc.samples = VK_SAMPLE_COUNT_1_BIT;
-            attachmentDesc.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
-            attachmentDesc.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
-            attachmentDesc.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
-            attachmentDesc.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
-
-            attachmentCount++;
-        }
-
-        // Create the VkSubpassDescription that will be chained in the VkRenderPassCreateInfo
-        VkSubpassDescription subpassDesc;
-        subpassDesc.flags = 0;
-        subpassDesc.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
-        subpassDesc.inputAttachmentCount = 0;
-        subpassDesc.pInputAttachments = nullptr;
-        subpassDesc.colorAttachmentCount = static_cast<uint8_t>(highestColorAttachmentIndexPlusOne);
-        subpassDesc.pColorAttachments = colorAttachmentRefs.data();
-        subpassDesc.pResolveAttachments = resolveAttachmentRefs.data();
-        subpassDesc.pDepthStencilAttachment = depthStencilAttachment;
-        subpassDesc.preserveAttachmentCount = 0;
-        subpassDesc.pPreserveAttachments = nullptr;
-
-        // Chain everything in VkRenderPassCreateInfo
-        VkRenderPassCreateInfo createInfo;
-        createInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
-        createInfo.pNext = nullptr;
-        createInfo.flags = 0;
-        createInfo.attachmentCount = attachmentCount;
-        createInfo.pAttachments = attachmentDescs.data();
-        createInfo.subpassCount = 1;
-        createInfo.pSubpasses = &subpassDesc;
-        createInfo.dependencyCount = 0;
-        createInfo.pDependencies = nullptr;
-
-        // Create the render pass from the zillion parameters
-        VkRenderPass renderPass;
-        DAWN_TRY(CheckVkSuccess(mDevice->fn.CreateRenderPass(mDevice->GetVkDevice(), &createInfo,
-                                                             nullptr, &*renderPass),
-                                "CreateRenderPass"));
-        return renderPass;
+    for (ColorAttachmentIndex i : IterateBitSet(query.colorMask)) {
+        HashCombine(&hash, query.colorFormats[i], query.colorLoadOp[i], query.colorStoreOp[i]);
     }
 
-    // RenderPassCache
-
-    size_t RenderPassCache::CacheFuncs::operator()(const RenderPassCacheQuery& query) const {
-        size_t hash = Hash(query.colorMask);
-
-        HashCombine(&hash, Hash(query.resolveTargetMask));
-
-        for (ColorAttachmentIndex i : IterateBitSet(query.colorMask)) {
-            HashCombine(&hash, query.colorFormats[i], query.colorLoadOp[i], query.colorStoreOp[i]);
-        }
-
-        HashCombine(&hash, query.hasDepthStencil);
-        if (query.hasDepthStencil) {
-            HashCombine(&hash, query.depthStencilFormat, query.depthLoadOp, query.depthStoreOp,
-                        query.stencilLoadOp, query.stencilStoreOp, query.readOnlyDepthStencil);
-        }
-
-        HashCombine(&hash, query.sampleCount);
-
-        return hash;
+    HashCombine(&hash, query.hasDepthStencil);
+    if (query.hasDepthStencil) {
+        HashCombine(&hash, query.depthStencilFormat, query.depthLoadOp, query.depthStoreOp,
+                    query.stencilLoadOp, query.stencilStoreOp, query.readOnlyDepthStencil);
     }
 
-    bool RenderPassCache::CacheFuncs::operator()(const RenderPassCacheQuery& a,
-                                                 const RenderPassCacheQuery& b) const {
-        if (a.colorMask != b.colorMask) {
+    HashCombine(&hash, query.sampleCount);
+
+    return hash;
+}
+
+bool RenderPassCache::CacheFuncs::operator()(const RenderPassCacheQuery& a,
+                                             const RenderPassCacheQuery& b) const {
+    if (a.colorMask != b.colorMask) {
+        return false;
+    }
+
+    if (a.resolveTargetMask != b.resolveTargetMask) {
+        return false;
+    }
+
+    if (a.sampleCount != b.sampleCount) {
+        return false;
+    }
+
+    for (ColorAttachmentIndex i : IterateBitSet(a.colorMask)) {
+        if ((a.colorFormats[i] != b.colorFormats[i]) || (a.colorLoadOp[i] != b.colorLoadOp[i]) ||
+            (a.colorStoreOp[i] != b.colorStoreOp[i])) {
             return false;
         }
-
-        if (a.resolveTargetMask != b.resolveTargetMask) {
-            return false;
-        }
-
-        if (a.sampleCount != b.sampleCount) {
-            return false;
-        }
-
-        for (ColorAttachmentIndex i : IterateBitSet(a.colorMask)) {
-            if ((a.colorFormats[i] != b.colorFormats[i]) ||
-                (a.colorLoadOp[i] != b.colorLoadOp[i]) ||
-                (a.colorStoreOp[i] != b.colorStoreOp[i])) {
-                return false;
-            }
-        }
-
-        if (a.hasDepthStencil != b.hasDepthStencil) {
-            return false;
-        }
-
-        if (a.hasDepthStencil) {
-            if ((a.depthStencilFormat != b.depthStencilFormat) ||
-                (a.depthLoadOp != b.depthLoadOp) || (a.stencilLoadOp != b.stencilLoadOp) ||
-                (a.depthStoreOp != b.depthStoreOp) || (a.stencilStoreOp != b.stencilStoreOp) ||
-                (a.readOnlyDepthStencil != b.readOnlyDepthStencil)) {
-                return false;
-            }
-        }
-
-        return true;
     }
+
+    if (a.hasDepthStencil != b.hasDepthStencil) {
+        return false;
+    }
+
+    if (a.hasDepthStencil) {
+        if ((a.depthStencilFormat != b.depthStencilFormat) || (a.depthLoadOp != b.depthLoadOp) ||
+            (a.stencilLoadOp != b.stencilLoadOp) || (a.depthStoreOp != b.depthStoreOp) ||
+            (a.stencilStoreOp != b.stencilStoreOp) ||
+            (a.readOnlyDepthStencil != b.readOnlyDepthStencil)) {
+            return false;
+        }
+    }
+
+    return true;
+}
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/RenderPassCache.h b/src/dawn/native/vulkan/RenderPassCache.h
index 4b2635b..45a9de7 100644
--- a/src/dawn/native/vulkan/RenderPassCache.h
+++ b/src/dawn/native/vulkan/RenderPassCache.h
@@ -30,76 +30,74 @@
 
 namespace dawn::native::vulkan {
 
-    class Device;
+class Device;
 
-    // This is a key to query the RenderPassCache, it can be sparse meaning that only the
-    // information for bits set in colorMask or hasDepthStencil need to be provided and the rest can
-    // be uninintialized.
-    struct RenderPassCacheQuery {
-        // Use these helpers to build the query, they make sure all relevant data is initialized and
-        // masks set.
-        void SetColor(ColorAttachmentIndex index,
-                      wgpu::TextureFormat format,
-                      wgpu::LoadOp loadOp,
-                      wgpu::StoreOp storeOp,
-                      bool hasResolveTarget);
-        void SetDepthStencil(wgpu::TextureFormat format,
-                             wgpu::LoadOp depthLoadOp,
-                             wgpu::StoreOp depthStoreOp,
-                             wgpu::LoadOp stencilLoadOp,
-                             wgpu::StoreOp stencilStoreOp,
-                             bool readOnly);
-        void SetSampleCount(uint32_t sampleCount);
+// This is a key to query the RenderPassCache, it can be sparse meaning that only the
+// information for bits set in colorMask or hasDepthStencil need to be provided and the rest can
+// be uninintialized.
+struct RenderPassCacheQuery {
+    // Use these helpers to build the query, they make sure all relevant data is initialized and
+    // masks set.
+    void SetColor(ColorAttachmentIndex index,
+                  wgpu::TextureFormat format,
+                  wgpu::LoadOp loadOp,
+                  wgpu::StoreOp storeOp,
+                  bool hasResolveTarget);
+    void SetDepthStencil(wgpu::TextureFormat format,
+                         wgpu::LoadOp depthLoadOp,
+                         wgpu::StoreOp depthStoreOp,
+                         wgpu::LoadOp stencilLoadOp,
+                         wgpu::StoreOp stencilStoreOp,
+                         bool readOnly);
+    void SetSampleCount(uint32_t sampleCount);
 
-        ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> colorMask;
-        ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> resolveTargetMask;
-        ityp::array<ColorAttachmentIndex, wgpu::TextureFormat, kMaxColorAttachments> colorFormats;
-        ityp::array<ColorAttachmentIndex, wgpu::LoadOp, kMaxColorAttachments> colorLoadOp;
-        ityp::array<ColorAttachmentIndex, wgpu::StoreOp, kMaxColorAttachments> colorStoreOp;
+    ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> colorMask;
+    ityp::bitset<ColorAttachmentIndex, kMaxColorAttachments> resolveTargetMask;
+    ityp::array<ColorAttachmentIndex, wgpu::TextureFormat, kMaxColorAttachments> colorFormats;
+    ityp::array<ColorAttachmentIndex, wgpu::LoadOp, kMaxColorAttachments> colorLoadOp;
+    ityp::array<ColorAttachmentIndex, wgpu::StoreOp, kMaxColorAttachments> colorStoreOp;
 
-        bool hasDepthStencil = false;
-        wgpu::TextureFormat depthStencilFormat;
-        wgpu::LoadOp depthLoadOp;
-        wgpu::StoreOp depthStoreOp;
-        wgpu::LoadOp stencilLoadOp;
-        wgpu::StoreOp stencilStoreOp;
-        bool readOnlyDepthStencil;
+    bool hasDepthStencil = false;
+    wgpu::TextureFormat depthStencilFormat;
+    wgpu::LoadOp depthLoadOp;
+    wgpu::StoreOp depthStoreOp;
+    wgpu::LoadOp stencilLoadOp;
+    wgpu::StoreOp stencilStoreOp;
+    bool readOnlyDepthStencil;
 
-        uint32_t sampleCount;
+    uint32_t sampleCount;
+};
+
+// Caches VkRenderPasses so that we don't create duplicate ones for every RenderPipeline or
+// render pass. We always arrange the order of attachments in "color-depthstencil-resolve" order
+// when creating render pass and framebuffer so that we can always make sure the order of
+// attachments in the rendering pipeline matches the one of the framebuffer.
+// All the operations on RenderPassCache are guaranteed to be thread-safe.
+// TODO(cwallez@chromium.org): Make it an LRU cache somehow?
+class RenderPassCache {
+  public:
+    explicit RenderPassCache(Device* device);
+    ~RenderPassCache();
+
+    ResultOrError<VkRenderPass> GetRenderPass(const RenderPassCacheQuery& query);
+
+  private:
+    // Does the actual VkRenderPass creation on a cache miss.
+    ResultOrError<VkRenderPass> CreateRenderPassForQuery(const RenderPassCacheQuery& query) const;
+
+    // Implements the functors necessary for to use RenderPassCacheQueries as unordered_map
+    // keys.
+    struct CacheFuncs {
+        size_t operator()(const RenderPassCacheQuery& query) const;
+        bool operator()(const RenderPassCacheQuery& a, const RenderPassCacheQuery& b) const;
     };
+    using Cache = std::unordered_map<RenderPassCacheQuery, VkRenderPass, CacheFuncs, CacheFuncs>;
 
-    // Caches VkRenderPasses so that we don't create duplicate ones for every RenderPipeline or
-    // render pass. We always arrange the order of attachments in "color-depthstencil-resolve" order
-    // when creating render pass and framebuffer so that we can always make sure the order of
-    // attachments in the rendering pipeline matches the one of the framebuffer.
-    // All the operations on RenderPassCache are guaranteed to be thread-safe.
-    // TODO(cwallez@chromium.org): Make it an LRU cache somehow?
-    class RenderPassCache {
-      public:
-        explicit RenderPassCache(Device* device);
-        ~RenderPassCache();
+    Device* mDevice = nullptr;
 
-        ResultOrError<VkRenderPass> GetRenderPass(const RenderPassCacheQuery& query);
-
-      private:
-        // Does the actual VkRenderPass creation on a cache miss.
-        ResultOrError<VkRenderPass> CreateRenderPassForQuery(
-            const RenderPassCacheQuery& query) const;
-
-        // Implements the functors necessary for to use RenderPassCacheQueries as unordered_map
-        // keys.
-        struct CacheFuncs {
-            size_t operator()(const RenderPassCacheQuery& query) const;
-            bool operator()(const RenderPassCacheQuery& a, const RenderPassCacheQuery& b) const;
-        };
-        using Cache =
-            std::unordered_map<RenderPassCacheQuery, VkRenderPass, CacheFuncs, CacheFuncs>;
-
-        Device* mDevice = nullptr;
-
-        std::mutex mMutex;
-        Cache mCache;
-    };
+    std::mutex mMutex;
+    Cache mCache;
+};
 
 }  // namespace dawn::native::vulkan
 
diff --git a/src/dawn/native/vulkan/RenderPipelineVk.cpp b/src/dawn/native/vulkan/RenderPipelineVk.cpp
index febc7b6..b8822f8 100644
--- a/src/dawn/native/vulkan/RenderPipelineVk.cpp
+++ b/src/dawn/native/vulkan/RenderPipelineVk.cpp
@@ -30,620 +30,613 @@
 
 namespace dawn::native::vulkan {
 
-    namespace {
+namespace {
 
-        VkVertexInputRate VulkanInputRate(wgpu::VertexStepMode stepMode) {
-            switch (stepMode) {
-                case wgpu::VertexStepMode::Vertex:
-                    return VK_VERTEX_INPUT_RATE_VERTEX;
-                case wgpu::VertexStepMode::Instance:
-                    return VK_VERTEX_INPUT_RATE_INSTANCE;
-            }
+VkVertexInputRate VulkanInputRate(wgpu::VertexStepMode stepMode) {
+    switch (stepMode) {
+        case wgpu::VertexStepMode::Vertex:
+            return VK_VERTEX_INPUT_RATE_VERTEX;
+        case wgpu::VertexStepMode::Instance:
+            return VK_VERTEX_INPUT_RATE_INSTANCE;
+    }
+    UNREACHABLE();
+}
+
+VkFormat VulkanVertexFormat(wgpu::VertexFormat format) {
+    switch (format) {
+        case wgpu::VertexFormat::Uint8x2:
+            return VK_FORMAT_R8G8_UINT;
+        case wgpu::VertexFormat::Uint8x4:
+            return VK_FORMAT_R8G8B8A8_UINT;
+        case wgpu::VertexFormat::Sint8x2:
+            return VK_FORMAT_R8G8_SINT;
+        case wgpu::VertexFormat::Sint8x4:
+            return VK_FORMAT_R8G8B8A8_SINT;
+        case wgpu::VertexFormat::Unorm8x2:
+            return VK_FORMAT_R8G8_UNORM;
+        case wgpu::VertexFormat::Unorm8x4:
+            return VK_FORMAT_R8G8B8A8_UNORM;
+        case wgpu::VertexFormat::Snorm8x2:
+            return VK_FORMAT_R8G8_SNORM;
+        case wgpu::VertexFormat::Snorm8x4:
+            return VK_FORMAT_R8G8B8A8_SNORM;
+        case wgpu::VertexFormat::Uint16x2:
+            return VK_FORMAT_R16G16_UINT;
+        case wgpu::VertexFormat::Uint16x4:
+            return VK_FORMAT_R16G16B16A16_UINT;
+        case wgpu::VertexFormat::Sint16x2:
+            return VK_FORMAT_R16G16_SINT;
+        case wgpu::VertexFormat::Sint16x4:
+            return VK_FORMAT_R16G16B16A16_SINT;
+        case wgpu::VertexFormat::Unorm16x2:
+            return VK_FORMAT_R16G16_UNORM;
+        case wgpu::VertexFormat::Unorm16x4:
+            return VK_FORMAT_R16G16B16A16_UNORM;
+        case wgpu::VertexFormat::Snorm16x2:
+            return VK_FORMAT_R16G16_SNORM;
+        case wgpu::VertexFormat::Snorm16x4:
+            return VK_FORMAT_R16G16B16A16_SNORM;
+        case wgpu::VertexFormat::Float16x2:
+            return VK_FORMAT_R16G16_SFLOAT;
+        case wgpu::VertexFormat::Float16x4:
+            return VK_FORMAT_R16G16B16A16_SFLOAT;
+        case wgpu::VertexFormat::Float32:
+            return VK_FORMAT_R32_SFLOAT;
+        case wgpu::VertexFormat::Float32x2:
+            return VK_FORMAT_R32G32_SFLOAT;
+        case wgpu::VertexFormat::Float32x3:
+            return VK_FORMAT_R32G32B32_SFLOAT;
+        case wgpu::VertexFormat::Float32x4:
+            return VK_FORMAT_R32G32B32A32_SFLOAT;
+        case wgpu::VertexFormat::Uint32:
+            return VK_FORMAT_R32_UINT;
+        case wgpu::VertexFormat::Uint32x2:
+            return VK_FORMAT_R32G32_UINT;
+        case wgpu::VertexFormat::Uint32x3:
+            return VK_FORMAT_R32G32B32_UINT;
+        case wgpu::VertexFormat::Uint32x4:
+            return VK_FORMAT_R32G32B32A32_UINT;
+        case wgpu::VertexFormat::Sint32:
+            return VK_FORMAT_R32_SINT;
+        case wgpu::VertexFormat::Sint32x2:
+            return VK_FORMAT_R32G32_SINT;
+        case wgpu::VertexFormat::Sint32x3:
+            return VK_FORMAT_R32G32B32_SINT;
+        case wgpu::VertexFormat::Sint32x4:
+            return VK_FORMAT_R32G32B32A32_SINT;
+        default:
             UNREACHABLE();
-        }
+    }
+}
 
-        VkFormat VulkanVertexFormat(wgpu::VertexFormat format) {
-            switch (format) {
-                case wgpu::VertexFormat::Uint8x2:
-                    return VK_FORMAT_R8G8_UINT;
-                case wgpu::VertexFormat::Uint8x4:
-                    return VK_FORMAT_R8G8B8A8_UINT;
-                case wgpu::VertexFormat::Sint8x2:
-                    return VK_FORMAT_R8G8_SINT;
-                case wgpu::VertexFormat::Sint8x4:
-                    return VK_FORMAT_R8G8B8A8_SINT;
-                case wgpu::VertexFormat::Unorm8x2:
-                    return VK_FORMAT_R8G8_UNORM;
-                case wgpu::VertexFormat::Unorm8x4:
-                    return VK_FORMAT_R8G8B8A8_UNORM;
-                case wgpu::VertexFormat::Snorm8x2:
-                    return VK_FORMAT_R8G8_SNORM;
-                case wgpu::VertexFormat::Snorm8x4:
-                    return VK_FORMAT_R8G8B8A8_SNORM;
-                case wgpu::VertexFormat::Uint16x2:
-                    return VK_FORMAT_R16G16_UINT;
-                case wgpu::VertexFormat::Uint16x4:
-                    return VK_FORMAT_R16G16B16A16_UINT;
-                case wgpu::VertexFormat::Sint16x2:
-                    return VK_FORMAT_R16G16_SINT;
-                case wgpu::VertexFormat::Sint16x4:
-                    return VK_FORMAT_R16G16B16A16_SINT;
-                case wgpu::VertexFormat::Unorm16x2:
-                    return VK_FORMAT_R16G16_UNORM;
-                case wgpu::VertexFormat::Unorm16x4:
-                    return VK_FORMAT_R16G16B16A16_UNORM;
-                case wgpu::VertexFormat::Snorm16x2:
-                    return VK_FORMAT_R16G16_SNORM;
-                case wgpu::VertexFormat::Snorm16x4:
-                    return VK_FORMAT_R16G16B16A16_SNORM;
-                case wgpu::VertexFormat::Float16x2:
-                    return VK_FORMAT_R16G16_SFLOAT;
-                case wgpu::VertexFormat::Float16x4:
-                    return VK_FORMAT_R16G16B16A16_SFLOAT;
-                case wgpu::VertexFormat::Float32:
-                    return VK_FORMAT_R32_SFLOAT;
-                case wgpu::VertexFormat::Float32x2:
-                    return VK_FORMAT_R32G32_SFLOAT;
-                case wgpu::VertexFormat::Float32x3:
-                    return VK_FORMAT_R32G32B32_SFLOAT;
-                case wgpu::VertexFormat::Float32x4:
-                    return VK_FORMAT_R32G32B32A32_SFLOAT;
-                case wgpu::VertexFormat::Uint32:
-                    return VK_FORMAT_R32_UINT;
-                case wgpu::VertexFormat::Uint32x2:
-                    return VK_FORMAT_R32G32_UINT;
-                case wgpu::VertexFormat::Uint32x3:
-                    return VK_FORMAT_R32G32B32_UINT;
-                case wgpu::VertexFormat::Uint32x4:
-                    return VK_FORMAT_R32G32B32A32_UINT;
-                case wgpu::VertexFormat::Sint32:
-                    return VK_FORMAT_R32_SINT;
-                case wgpu::VertexFormat::Sint32x2:
-                    return VK_FORMAT_R32G32_SINT;
-                case wgpu::VertexFormat::Sint32x3:
-                    return VK_FORMAT_R32G32B32_SINT;
-                case wgpu::VertexFormat::Sint32x4:
-                    return VK_FORMAT_R32G32B32A32_SINT;
-                default:
-                    UNREACHABLE();
+VkPrimitiveTopology VulkanPrimitiveTopology(wgpu::PrimitiveTopology topology) {
+    switch (topology) {
+        case wgpu::PrimitiveTopology::PointList:
+            return VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
+        case wgpu::PrimitiveTopology::LineList:
+            return VK_PRIMITIVE_TOPOLOGY_LINE_LIST;
+        case wgpu::PrimitiveTopology::LineStrip:
+            return VK_PRIMITIVE_TOPOLOGY_LINE_STRIP;
+        case wgpu::PrimitiveTopology::TriangleList:
+            return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
+        case wgpu::PrimitiveTopology::TriangleStrip:
+            return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
+    }
+    UNREACHABLE();
+}
+
+bool ShouldEnablePrimitiveRestart(wgpu::PrimitiveTopology topology) {
+    // Primitive restart is always enabled in WebGPU but Vulkan validation rules ask that
+    // primitive restart be only enabled on primitive topologies that support restarting.
+    switch (topology) {
+        case wgpu::PrimitiveTopology::PointList:
+        case wgpu::PrimitiveTopology::LineList:
+        case wgpu::PrimitiveTopology::TriangleList:
+            return false;
+        case wgpu::PrimitiveTopology::LineStrip:
+        case wgpu::PrimitiveTopology::TriangleStrip:
+            return true;
+    }
+    UNREACHABLE();
+}
+
+VkFrontFace VulkanFrontFace(wgpu::FrontFace face) {
+    switch (face) {
+        case wgpu::FrontFace::CCW:
+            return VK_FRONT_FACE_COUNTER_CLOCKWISE;
+        case wgpu::FrontFace::CW:
+            return VK_FRONT_FACE_CLOCKWISE;
+    }
+    UNREACHABLE();
+}
+
+VkCullModeFlagBits VulkanCullMode(wgpu::CullMode mode) {
+    switch (mode) {
+        case wgpu::CullMode::None:
+            return VK_CULL_MODE_NONE;
+        case wgpu::CullMode::Front:
+            return VK_CULL_MODE_FRONT_BIT;
+        case wgpu::CullMode::Back:
+            return VK_CULL_MODE_BACK_BIT;
+    }
+    UNREACHABLE();
+}
+
+VkBlendFactor VulkanBlendFactor(wgpu::BlendFactor factor) {
+    switch (factor) {
+        case wgpu::BlendFactor::Zero:
+            return VK_BLEND_FACTOR_ZERO;
+        case wgpu::BlendFactor::One:
+            return VK_BLEND_FACTOR_ONE;
+        case wgpu::BlendFactor::Src:
+            return VK_BLEND_FACTOR_SRC_COLOR;
+        case wgpu::BlendFactor::OneMinusSrc:
+            return VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR;
+        case wgpu::BlendFactor::SrcAlpha:
+            return VK_BLEND_FACTOR_SRC_ALPHA;
+        case wgpu::BlendFactor::OneMinusSrcAlpha:
+            return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
+        case wgpu::BlendFactor::Dst:
+            return VK_BLEND_FACTOR_DST_COLOR;
+        case wgpu::BlendFactor::OneMinusDst:
+            return VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR;
+        case wgpu::BlendFactor::DstAlpha:
+            return VK_BLEND_FACTOR_DST_ALPHA;
+        case wgpu::BlendFactor::OneMinusDstAlpha:
+            return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
+        case wgpu::BlendFactor::SrcAlphaSaturated:
+            return VK_BLEND_FACTOR_SRC_ALPHA_SATURATE;
+        case wgpu::BlendFactor::Constant:
+            return VK_BLEND_FACTOR_CONSTANT_COLOR;
+        case wgpu::BlendFactor::OneMinusConstant:
+            return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR;
+    }
+    UNREACHABLE();
+}
+
+VkBlendOp VulkanBlendOperation(wgpu::BlendOperation operation) {
+    switch (operation) {
+        case wgpu::BlendOperation::Add:
+            return VK_BLEND_OP_ADD;
+        case wgpu::BlendOperation::Subtract:
+            return VK_BLEND_OP_SUBTRACT;
+        case wgpu::BlendOperation::ReverseSubtract:
+            return VK_BLEND_OP_REVERSE_SUBTRACT;
+        case wgpu::BlendOperation::Min:
+            return VK_BLEND_OP_MIN;
+        case wgpu::BlendOperation::Max:
+            return VK_BLEND_OP_MAX;
+    }
+    UNREACHABLE();
+}
+
+VkColorComponentFlags VulkanColorWriteMask(wgpu::ColorWriteMask mask,
+                                           bool isDeclaredInFragmentShader) {
+    // Vulkan and Dawn color write masks match, static assert it and return the mask
+    static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Red) ==
+                  VK_COLOR_COMPONENT_R_BIT);
+    static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Green) ==
+                  VK_COLOR_COMPONENT_G_BIT);
+    static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Blue) ==
+                  VK_COLOR_COMPONENT_B_BIT);
+    static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Alpha) ==
+                  VK_COLOR_COMPONENT_A_BIT);
+
+    // According to Vulkan SPEC (Chapter 14.3): "The input values to blending or color
+    // attachment writes are undefined for components which do not correspond to a fragment
+    // shader outputs", we set the color write mask to 0 to prevent such undefined values
+    // being written into the color attachments.
+    return isDeclaredInFragmentShader ? static_cast<VkColorComponentFlags>(mask)
+                                      : static_cast<VkColorComponentFlags>(0);
+}
+
+VkPipelineColorBlendAttachmentState ComputeColorDesc(const ColorTargetState* state,
+                                                     bool isDeclaredInFragmentShader) {
+    VkPipelineColorBlendAttachmentState attachment;
+    attachment.blendEnable = state->blend != nullptr ? VK_TRUE : VK_FALSE;
+    if (attachment.blendEnable) {
+        attachment.srcColorBlendFactor = VulkanBlendFactor(state->blend->color.srcFactor);
+        attachment.dstColorBlendFactor = VulkanBlendFactor(state->blend->color.dstFactor);
+        attachment.colorBlendOp = VulkanBlendOperation(state->blend->color.operation);
+        attachment.srcAlphaBlendFactor = VulkanBlendFactor(state->blend->alpha.srcFactor);
+        attachment.dstAlphaBlendFactor = VulkanBlendFactor(state->blend->alpha.dstFactor);
+        attachment.alphaBlendOp = VulkanBlendOperation(state->blend->alpha.operation);
+    } else {
+        // Swiftshader's Vulkan implementation appears to expect these values to be valid
+        // even when blending is not enabled.
+        attachment.srcColorBlendFactor = VK_BLEND_FACTOR_ONE;
+        attachment.dstColorBlendFactor = VK_BLEND_FACTOR_ZERO;
+        attachment.colorBlendOp = VK_BLEND_OP_ADD;
+        attachment.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE;
+        attachment.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
+        attachment.alphaBlendOp = VK_BLEND_OP_ADD;
+    }
+    attachment.colorWriteMask = VulkanColorWriteMask(state->writeMask, isDeclaredInFragmentShader);
+    return attachment;
+}
+
+VkStencilOp VulkanStencilOp(wgpu::StencilOperation op) {
+    switch (op) {
+        case wgpu::StencilOperation::Keep:
+            return VK_STENCIL_OP_KEEP;
+        case wgpu::StencilOperation::Zero:
+            return VK_STENCIL_OP_ZERO;
+        case wgpu::StencilOperation::Replace:
+            return VK_STENCIL_OP_REPLACE;
+        case wgpu::StencilOperation::IncrementClamp:
+            return VK_STENCIL_OP_INCREMENT_AND_CLAMP;
+        case wgpu::StencilOperation::DecrementClamp:
+            return VK_STENCIL_OP_DECREMENT_AND_CLAMP;
+        case wgpu::StencilOperation::Invert:
+            return VK_STENCIL_OP_INVERT;
+        case wgpu::StencilOperation::IncrementWrap:
+            return VK_STENCIL_OP_INCREMENT_AND_WRAP;
+        case wgpu::StencilOperation::DecrementWrap:
+            return VK_STENCIL_OP_DECREMENT_AND_WRAP;
+    }
+    UNREACHABLE();
+}
+
+VkPipelineDepthStencilStateCreateInfo ComputeDepthStencilDesc(const DepthStencilState* descriptor) {
+    VkPipelineDepthStencilStateCreateInfo depthStencilState;
+    depthStencilState.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
+    depthStencilState.pNext = nullptr;
+    depthStencilState.flags = 0;
+
+    // Depth writes only occur if depth is enabled
+    depthStencilState.depthTestEnable =
+        (descriptor->depthCompare == wgpu::CompareFunction::Always &&
+         !descriptor->depthWriteEnabled)
+            ? VK_FALSE
+            : VK_TRUE;
+    depthStencilState.depthWriteEnable = descriptor->depthWriteEnabled ? VK_TRUE : VK_FALSE;
+    depthStencilState.depthCompareOp = ToVulkanCompareOp(descriptor->depthCompare);
+    depthStencilState.depthBoundsTestEnable = false;
+    depthStencilState.minDepthBounds = 0.0f;
+    depthStencilState.maxDepthBounds = 1.0f;
+
+    depthStencilState.stencilTestEnable = StencilTestEnabled(descriptor) ? VK_TRUE : VK_FALSE;
+
+    depthStencilState.front.failOp = VulkanStencilOp(descriptor->stencilFront.failOp);
+    depthStencilState.front.passOp = VulkanStencilOp(descriptor->stencilFront.passOp);
+    depthStencilState.front.depthFailOp = VulkanStencilOp(descriptor->stencilFront.depthFailOp);
+    depthStencilState.front.compareOp = ToVulkanCompareOp(descriptor->stencilFront.compare);
+
+    depthStencilState.back.failOp = VulkanStencilOp(descriptor->stencilBack.failOp);
+    depthStencilState.back.passOp = VulkanStencilOp(descriptor->stencilBack.passOp);
+    depthStencilState.back.depthFailOp = VulkanStencilOp(descriptor->stencilBack.depthFailOp);
+    depthStencilState.back.compareOp = ToVulkanCompareOp(descriptor->stencilBack.compare);
+
+    // Dawn doesn't have separate front and back stencil masks.
+    depthStencilState.front.compareMask = descriptor->stencilReadMask;
+    depthStencilState.back.compareMask = descriptor->stencilReadMask;
+    depthStencilState.front.writeMask = descriptor->stencilWriteMask;
+    depthStencilState.back.writeMask = descriptor->stencilWriteMask;
+
+    // The stencil reference is always dynamic
+    depthStencilState.front.reference = 0;
+    depthStencilState.back.reference = 0;
+
+    return depthStencilState;
+}
+
+}  // anonymous namespace
+
+// static
+Ref<RenderPipeline> RenderPipeline::CreateUninitialized(
+    Device* device,
+    const RenderPipelineDescriptor* descriptor) {
+    return AcquireRef(new RenderPipeline(device, descriptor));
+}
+
+MaybeError RenderPipeline::Initialize() {
+    Device* device = ToBackend(GetDevice());
+    PipelineLayout* layout = ToBackend(GetLayout());
+
+    // There are at most 2 shader stages in render pipeline, i.e. vertex and fragment
+    std::array<VkPipelineShaderStageCreateInfo, 2> shaderStages;
+    std::array<std::vector<OverridableConstantScalar>, 2> specializationDataEntriesPerStages;
+    std::array<std::vector<VkSpecializationMapEntry>, 2> specializationMapEntriesPerStages;
+    std::array<VkSpecializationInfo, 2> specializationInfoPerStages;
+    uint32_t stageCount = 0;
+
+    for (auto stage : IterateStages(this->GetStageMask())) {
+        VkPipelineShaderStageCreateInfo shaderStage;
+
+        const ProgrammableStage& programmableStage = GetStage(stage);
+        ShaderModule* module = ToBackend(programmableStage.module.Get());
+        const ShaderModule::Spirv* spirv;
+        DAWN_TRY_ASSIGN(std::tie(shaderStage.module, spirv),
+                        module->GetHandleAndSpirv(programmableStage.entryPoint.c_str(), layout));
+
+        shaderStage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
+        shaderStage.pNext = nullptr;
+        shaderStage.flags = 0;
+        shaderStage.pSpecializationInfo = nullptr;
+        shaderStage.pName = programmableStage.entryPoint.c_str();
+
+        switch (stage) {
+            case dawn::native::SingleShaderStage::Vertex: {
+                shaderStage.stage = VK_SHADER_STAGE_VERTEX_BIT;
+                break;
+            }
+            case dawn::native::SingleShaderStage::Fragment: {
+                shaderStage.stage = VK_SHADER_STAGE_FRAGMENT_BIT;
+                break;
+            }
+            default: {
+                // For render pipeline only Vertex and Fragment stage is possible
+                DAWN_UNREACHABLE();
+                break;
             }
         }
 
-        VkPrimitiveTopology VulkanPrimitiveTopology(wgpu::PrimitiveTopology topology) {
-            switch (topology) {
-                case wgpu::PrimitiveTopology::PointList:
-                    return VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
-                case wgpu::PrimitiveTopology::LineList:
-                    return VK_PRIMITIVE_TOPOLOGY_LINE_LIST;
-                case wgpu::PrimitiveTopology::LineStrip:
-                    return VK_PRIMITIVE_TOPOLOGY_LINE_STRIP;
-                case wgpu::PrimitiveTopology::TriangleList:
-                    return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
-                case wgpu::PrimitiveTopology::TriangleStrip:
-                    return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
-            }
-            UNREACHABLE();
-        }
+        shaderStage.pSpecializationInfo =
+            GetVkSpecializationInfo(programmableStage, &specializationInfoPerStages[stageCount],
+                                    &specializationDataEntriesPerStages[stageCount],
+                                    &specializationMapEntriesPerStages[stageCount]);
 
-        bool ShouldEnablePrimitiveRestart(wgpu::PrimitiveTopology topology) {
-            // Primitive restart is always enabled in WebGPU but Vulkan validation rules ask that
-            // primitive restart be only enabled on primitive topologies that support restarting.
-            switch (topology) {
-                case wgpu::PrimitiveTopology::PointList:
-                case wgpu::PrimitiveTopology::LineList:
-                case wgpu::PrimitiveTopology::TriangleList:
-                    return false;
-                case wgpu::PrimitiveTopology::LineStrip:
-                case wgpu::PrimitiveTopology::TriangleStrip:
-                    return true;
-            }
-            UNREACHABLE();
-        }
+        DAWN_ASSERT(stageCount < 2);
+        shaderStages[stageCount] = shaderStage;
+        stageCount++;
 
-        VkFrontFace VulkanFrontFace(wgpu::FrontFace face) {
-            switch (face) {
-                case wgpu::FrontFace::CCW:
-                    return VK_FRONT_FACE_COUNTER_CLOCKWISE;
-                case wgpu::FrontFace::CW:
-                    return VK_FRONT_FACE_CLOCKWISE;
-            }
-            UNREACHABLE();
-        }
-
-        VkCullModeFlagBits VulkanCullMode(wgpu::CullMode mode) {
-            switch (mode) {
-                case wgpu::CullMode::None:
-                    return VK_CULL_MODE_NONE;
-                case wgpu::CullMode::Front:
-                    return VK_CULL_MODE_FRONT_BIT;
-                case wgpu::CullMode::Back:
-                    return VK_CULL_MODE_BACK_BIT;
-            }
-            UNREACHABLE();
-        }
-
-        VkBlendFactor VulkanBlendFactor(wgpu::BlendFactor factor) {
-            switch (factor) {
-                case wgpu::BlendFactor::Zero:
-                    return VK_BLEND_FACTOR_ZERO;
-                case wgpu::BlendFactor::One:
-                    return VK_BLEND_FACTOR_ONE;
-                case wgpu::BlendFactor::Src:
-                    return VK_BLEND_FACTOR_SRC_COLOR;
-                case wgpu::BlendFactor::OneMinusSrc:
-                    return VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR;
-                case wgpu::BlendFactor::SrcAlpha:
-                    return VK_BLEND_FACTOR_SRC_ALPHA;
-                case wgpu::BlendFactor::OneMinusSrcAlpha:
-                    return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
-                case wgpu::BlendFactor::Dst:
-                    return VK_BLEND_FACTOR_DST_COLOR;
-                case wgpu::BlendFactor::OneMinusDst:
-                    return VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR;
-                case wgpu::BlendFactor::DstAlpha:
-                    return VK_BLEND_FACTOR_DST_ALPHA;
-                case wgpu::BlendFactor::OneMinusDstAlpha:
-                    return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
-                case wgpu::BlendFactor::SrcAlphaSaturated:
-                    return VK_BLEND_FACTOR_SRC_ALPHA_SATURATE;
-                case wgpu::BlendFactor::Constant:
-                    return VK_BLEND_FACTOR_CONSTANT_COLOR;
-                case wgpu::BlendFactor::OneMinusConstant:
-                    return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR;
-            }
-            UNREACHABLE();
-        }
-
-        VkBlendOp VulkanBlendOperation(wgpu::BlendOperation operation) {
-            switch (operation) {
-                case wgpu::BlendOperation::Add:
-                    return VK_BLEND_OP_ADD;
-                case wgpu::BlendOperation::Subtract:
-                    return VK_BLEND_OP_SUBTRACT;
-                case wgpu::BlendOperation::ReverseSubtract:
-                    return VK_BLEND_OP_REVERSE_SUBTRACT;
-                case wgpu::BlendOperation::Min:
-                    return VK_BLEND_OP_MIN;
-                case wgpu::BlendOperation::Max:
-                    return VK_BLEND_OP_MAX;
-            }
-            UNREACHABLE();
-        }
-
-        VkColorComponentFlags VulkanColorWriteMask(wgpu::ColorWriteMask mask,
-                                                   bool isDeclaredInFragmentShader) {
-            // Vulkan and Dawn color write masks match, static assert it and return the mask
-            static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Red) ==
-                          VK_COLOR_COMPONENT_R_BIT);
-            static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Green) ==
-                          VK_COLOR_COMPONENT_G_BIT);
-            static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Blue) ==
-                          VK_COLOR_COMPONENT_B_BIT);
-            static_assert(static_cast<VkColorComponentFlagBits>(wgpu::ColorWriteMask::Alpha) ==
-                          VK_COLOR_COMPONENT_A_BIT);
-
-            // According to Vulkan SPEC (Chapter 14.3): "The input values to blending or color
-            // attachment writes are undefined for components which do not correspond to a fragment
-            // shader outputs", we set the color write mask to 0 to prevent such undefined values
-            // being written into the color attachments.
-            return isDeclaredInFragmentShader ? static_cast<VkColorComponentFlags>(mask)
-                                              : static_cast<VkColorComponentFlags>(0);
-        }
-
-        VkPipelineColorBlendAttachmentState ComputeColorDesc(const ColorTargetState* state,
-                                                             bool isDeclaredInFragmentShader) {
-            VkPipelineColorBlendAttachmentState attachment;
-            attachment.blendEnable = state->blend != nullptr ? VK_TRUE : VK_FALSE;
-            if (attachment.blendEnable) {
-                attachment.srcColorBlendFactor = VulkanBlendFactor(state->blend->color.srcFactor);
-                attachment.dstColorBlendFactor = VulkanBlendFactor(state->blend->color.dstFactor);
-                attachment.colorBlendOp = VulkanBlendOperation(state->blend->color.operation);
-                attachment.srcAlphaBlendFactor = VulkanBlendFactor(state->blend->alpha.srcFactor);
-                attachment.dstAlphaBlendFactor = VulkanBlendFactor(state->blend->alpha.dstFactor);
-                attachment.alphaBlendOp = VulkanBlendOperation(state->blend->alpha.operation);
-            } else {
-                // Swiftshader's Vulkan implementation appears to expect these values to be valid
-                // even when blending is not enabled.
-                attachment.srcColorBlendFactor = VK_BLEND_FACTOR_ONE;
-                attachment.dstColorBlendFactor = VK_BLEND_FACTOR_ZERO;
-                attachment.colorBlendOp = VK_BLEND_OP_ADD;
-                attachment.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE;
-                attachment.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
-                attachment.alphaBlendOp = VK_BLEND_OP_ADD;
-            }
-            attachment.colorWriteMask =
-                VulkanColorWriteMask(state->writeMask, isDeclaredInFragmentShader);
-            return attachment;
-        }
-
-        VkStencilOp VulkanStencilOp(wgpu::StencilOperation op) {
-            switch (op) {
-                case wgpu::StencilOperation::Keep:
-                    return VK_STENCIL_OP_KEEP;
-                case wgpu::StencilOperation::Zero:
-                    return VK_STENCIL_OP_ZERO;
-                case wgpu::StencilOperation::Replace:
-                    return VK_STENCIL_OP_REPLACE;
-                case wgpu::StencilOperation::IncrementClamp:
-                    return VK_STENCIL_OP_INCREMENT_AND_CLAMP;
-                case wgpu::StencilOperation::DecrementClamp:
-                    return VK_STENCIL_OP_DECREMENT_AND_CLAMP;
-                case wgpu::StencilOperation::Invert:
-                    return VK_STENCIL_OP_INVERT;
-                case wgpu::StencilOperation::IncrementWrap:
-                    return VK_STENCIL_OP_INCREMENT_AND_WRAP;
-                case wgpu::StencilOperation::DecrementWrap:
-                    return VK_STENCIL_OP_DECREMENT_AND_WRAP;
-            }
-            UNREACHABLE();
-        }
-
-        VkPipelineDepthStencilStateCreateInfo ComputeDepthStencilDesc(
-            const DepthStencilState* descriptor) {
-            VkPipelineDepthStencilStateCreateInfo depthStencilState;
-            depthStencilState.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
-            depthStencilState.pNext = nullptr;
-            depthStencilState.flags = 0;
-
-            // Depth writes only occur if depth is enabled
-            depthStencilState.depthTestEnable =
-                (descriptor->depthCompare == wgpu::CompareFunction::Always &&
-                 !descriptor->depthWriteEnabled)
-                    ? VK_FALSE
-                    : VK_TRUE;
-            depthStencilState.depthWriteEnable = descriptor->depthWriteEnabled ? VK_TRUE : VK_FALSE;
-            depthStencilState.depthCompareOp = ToVulkanCompareOp(descriptor->depthCompare);
-            depthStencilState.depthBoundsTestEnable = false;
-            depthStencilState.minDepthBounds = 0.0f;
-            depthStencilState.maxDepthBounds = 1.0f;
-
-            depthStencilState.stencilTestEnable =
-                StencilTestEnabled(descriptor) ? VK_TRUE : VK_FALSE;
-
-            depthStencilState.front.failOp = VulkanStencilOp(descriptor->stencilFront.failOp);
-            depthStencilState.front.passOp = VulkanStencilOp(descriptor->stencilFront.passOp);
-            depthStencilState.front.depthFailOp =
-                VulkanStencilOp(descriptor->stencilFront.depthFailOp);
-            depthStencilState.front.compareOp = ToVulkanCompareOp(descriptor->stencilFront.compare);
-
-            depthStencilState.back.failOp = VulkanStencilOp(descriptor->stencilBack.failOp);
-            depthStencilState.back.passOp = VulkanStencilOp(descriptor->stencilBack.passOp);
-            depthStencilState.back.depthFailOp =
-                VulkanStencilOp(descriptor->stencilBack.depthFailOp);
-            depthStencilState.back.compareOp = ToVulkanCompareOp(descriptor->stencilBack.compare);
-
-            // Dawn doesn't have separate front and back stencil masks.
-            depthStencilState.front.compareMask = descriptor->stencilReadMask;
-            depthStencilState.back.compareMask = descriptor->stencilReadMask;
-            depthStencilState.front.writeMask = descriptor->stencilWriteMask;
-            depthStencilState.back.writeMask = descriptor->stencilWriteMask;
-
-            // The stencil reference is always dynamic
-            depthStencilState.front.reference = 0;
-            depthStencilState.back.reference = 0;
-
-            return depthStencilState;
-        }
-
-    }  // anonymous namespace
-
-    // static
-    Ref<RenderPipeline> RenderPipeline::CreateUninitialized(
-        Device* device,
-        const RenderPipelineDescriptor* descriptor) {
-        return AcquireRef(new RenderPipeline(device, descriptor));
+        // Record cache key for each shader since it will become inaccessible later on.
+        GetCacheKey()->Record(stage).RecordIterable(*spirv);
     }
 
-    MaybeError RenderPipeline::Initialize() {
-        Device* device = ToBackend(GetDevice());
-        PipelineLayout* layout = ToBackend(GetLayout());
+    PipelineVertexInputStateCreateInfoTemporaryAllocations tempAllocations;
+    VkPipelineVertexInputStateCreateInfo vertexInputCreateInfo =
+        ComputeVertexInputDesc(&tempAllocations);
 
-        // There are at most 2 shader stages in render pipeline, i.e. vertex and fragment
-        std::array<VkPipelineShaderStageCreateInfo, 2> shaderStages;
-        std::array<std::vector<OverridableConstantScalar>, 2> specializationDataEntriesPerStages;
-        std::array<std::vector<VkSpecializationMapEntry>, 2> specializationMapEntriesPerStages;
-        std::array<VkSpecializationInfo, 2> specializationInfoPerStages;
-        uint32_t stageCount = 0;
+    VkPipelineInputAssemblyStateCreateInfo inputAssembly;
+    inputAssembly.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
+    inputAssembly.pNext = nullptr;
+    inputAssembly.flags = 0;
+    inputAssembly.topology = VulkanPrimitiveTopology(GetPrimitiveTopology());
+    inputAssembly.primitiveRestartEnable = ShouldEnablePrimitiveRestart(GetPrimitiveTopology());
 
-        for (auto stage : IterateStages(this->GetStageMask())) {
-            VkPipelineShaderStageCreateInfo shaderStage;
+    // A placeholder viewport/scissor info. The validation layers force use to provide at least
+    // one scissor and one viewport here, even if we choose to make them dynamic.
+    VkViewport viewportDesc;
+    viewportDesc.x = 0.0f;
+    viewportDesc.y = 0.0f;
+    viewportDesc.width = 1.0f;
+    viewportDesc.height = 1.0f;
+    viewportDesc.minDepth = 0.0f;
+    viewportDesc.maxDepth = 1.0f;
+    VkRect2D scissorRect;
+    scissorRect.offset.x = 0;
+    scissorRect.offset.y = 0;
+    scissorRect.extent.width = 1;
+    scissorRect.extent.height = 1;
+    VkPipelineViewportStateCreateInfo viewport;
+    viewport.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
+    viewport.pNext = nullptr;
+    viewport.flags = 0;
+    viewport.viewportCount = 1;
+    viewport.pViewports = &viewportDesc;
+    viewport.scissorCount = 1;
+    viewport.pScissors = &scissorRect;
 
-            const ProgrammableStage& programmableStage = GetStage(stage);
-            ShaderModule* module = ToBackend(programmableStage.module.Get());
-            const ShaderModule::Spirv* spirv;
-            DAWN_TRY_ASSIGN(
-                std::tie(shaderStage.module, spirv),
-                module->GetHandleAndSpirv(programmableStage.entryPoint.c_str(), layout));
+    VkPipelineRasterizationStateCreateInfo rasterization;
+    rasterization.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
+    rasterization.pNext = nullptr;
+    rasterization.flags = 0;
+    rasterization.depthClampEnable = ShouldClampDepth() ? VK_TRUE : VK_FALSE;
+    rasterization.rasterizerDiscardEnable = VK_FALSE;
+    rasterization.polygonMode = VK_POLYGON_MODE_FILL;
+    rasterization.cullMode = VulkanCullMode(GetCullMode());
+    rasterization.frontFace = VulkanFrontFace(GetFrontFace());
+    rasterization.depthBiasEnable = IsDepthBiasEnabled();
+    rasterization.depthBiasConstantFactor = GetDepthBias();
+    rasterization.depthBiasClamp = GetDepthBiasClamp();
+    rasterization.depthBiasSlopeFactor = GetDepthBiasSlopeScale();
+    rasterization.lineWidth = 1.0f;
 
-            shaderStage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
-            shaderStage.pNext = nullptr;
-            shaderStage.flags = 0;
-            shaderStage.pSpecializationInfo = nullptr;
-            shaderStage.pName = programmableStage.entryPoint.c_str();
+    VkPipelineMultisampleStateCreateInfo multisample;
+    multisample.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
+    multisample.pNext = nullptr;
+    multisample.flags = 0;
+    multisample.rasterizationSamples = VulkanSampleCount(GetSampleCount());
+    multisample.sampleShadingEnable = VK_FALSE;
+    multisample.minSampleShading = 0.0f;
+    // VkPipelineMultisampleStateCreateInfo.pSampleMask is an array of length
+    // ceil(rasterizationSamples / 32) and since we're passing a single uint32_t
+    // we have to assert that this length is indeed 1.
+    ASSERT(multisample.rasterizationSamples <= 32);
+    VkSampleMask sampleMask = GetSampleMask();
+    multisample.pSampleMask = &sampleMask;
+    multisample.alphaToCoverageEnable = IsAlphaToCoverageEnabled();
+    multisample.alphaToOneEnable = VK_FALSE;
 
-            switch (stage) {
-                case dawn::native::SingleShaderStage::Vertex: {
-                    shaderStage.stage = VK_SHADER_STAGE_VERTEX_BIT;
-                    break;
-                }
-                case dawn::native::SingleShaderStage::Fragment: {
-                    shaderStage.stage = VK_SHADER_STAGE_FRAGMENT_BIT;
-                    break;
-                }
-                default: {
-                    // For render pipeline only Vertex and Fragment stage is possible
-                    DAWN_UNREACHABLE();
-                    break;
-                }
-            }
+    VkPipelineDepthStencilStateCreateInfo depthStencilState =
+        ComputeDepthStencilDesc(GetDepthStencilState());
 
-            shaderStage.pSpecializationInfo =
-                GetVkSpecializationInfo(programmableStage, &specializationInfoPerStages[stageCount],
-                                        &specializationDataEntriesPerStages[stageCount],
-                                        &specializationMapEntriesPerStages[stageCount]);
-
-            DAWN_ASSERT(stageCount < 2);
-            shaderStages[stageCount] = shaderStage;
-            stageCount++;
-
-            // Record cache key for each shader since it will become inaccessible later on.
-            GetCacheKey()->Record(stage).RecordIterable(*spirv);
+    VkPipelineColorBlendStateCreateInfo colorBlend;
+    // colorBlend may hold pointers to elements in colorBlendAttachments, so it must have a
+    // definition scope as same as colorBlend
+    ityp::array<ColorAttachmentIndex, VkPipelineColorBlendAttachmentState, kMaxColorAttachments>
+        colorBlendAttachments;
+    if (GetStageMask() & wgpu::ShaderStage::Fragment) {
+        // Initialize the "blend state info" that will be chained in the "create info" from the
+        // data pre-computed in the ColorState
+        for (auto& blend : colorBlendAttachments) {
+            blend.blendEnable = VK_FALSE;
+            blend.srcColorBlendFactor = VK_BLEND_FACTOR_ONE;
+            blend.dstColorBlendFactor = VK_BLEND_FACTOR_ZERO;
+            blend.colorBlendOp = VK_BLEND_OP_ADD;
+            blend.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE;
+            blend.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
+            blend.alphaBlendOp = VK_BLEND_OP_ADD;
+            blend.colorWriteMask = 0;
         }
 
-        PipelineVertexInputStateCreateInfoTemporaryAllocations tempAllocations;
-        VkPipelineVertexInputStateCreateInfo vertexInputCreateInfo =
-            ComputeVertexInputDesc(&tempAllocations);
-
-        VkPipelineInputAssemblyStateCreateInfo inputAssembly;
-        inputAssembly.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
-        inputAssembly.pNext = nullptr;
-        inputAssembly.flags = 0;
-        inputAssembly.topology = VulkanPrimitiveTopology(GetPrimitiveTopology());
-        inputAssembly.primitiveRestartEnable = ShouldEnablePrimitiveRestart(GetPrimitiveTopology());
-
-        // A placeholder viewport/scissor info. The validation layers force use to provide at least
-        // one scissor and one viewport here, even if we choose to make them dynamic.
-        VkViewport viewportDesc;
-        viewportDesc.x = 0.0f;
-        viewportDesc.y = 0.0f;
-        viewportDesc.width = 1.0f;
-        viewportDesc.height = 1.0f;
-        viewportDesc.minDepth = 0.0f;
-        viewportDesc.maxDepth = 1.0f;
-        VkRect2D scissorRect;
-        scissorRect.offset.x = 0;
-        scissorRect.offset.y = 0;
-        scissorRect.extent.width = 1;
-        scissorRect.extent.height = 1;
-        VkPipelineViewportStateCreateInfo viewport;
-        viewport.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
-        viewport.pNext = nullptr;
-        viewport.flags = 0;
-        viewport.viewportCount = 1;
-        viewport.pViewports = &viewportDesc;
-        viewport.scissorCount = 1;
-        viewport.pScissors = &scissorRect;
-
-        VkPipelineRasterizationStateCreateInfo rasterization;
-        rasterization.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
-        rasterization.pNext = nullptr;
-        rasterization.flags = 0;
-        rasterization.depthClampEnable = ShouldClampDepth() ? VK_TRUE : VK_FALSE;
-        rasterization.rasterizerDiscardEnable = VK_FALSE;
-        rasterization.polygonMode = VK_POLYGON_MODE_FILL;
-        rasterization.cullMode = VulkanCullMode(GetCullMode());
-        rasterization.frontFace = VulkanFrontFace(GetFrontFace());
-        rasterization.depthBiasEnable = IsDepthBiasEnabled();
-        rasterization.depthBiasConstantFactor = GetDepthBias();
-        rasterization.depthBiasClamp = GetDepthBiasClamp();
-        rasterization.depthBiasSlopeFactor = GetDepthBiasSlopeScale();
-        rasterization.lineWidth = 1.0f;
-
-        VkPipelineMultisampleStateCreateInfo multisample;
-        multisample.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
-        multisample.pNext = nullptr;
-        multisample.flags = 0;
-        multisample.rasterizationSamples = VulkanSampleCount(GetSampleCount());
-        multisample.sampleShadingEnable = VK_FALSE;
-        multisample.minSampleShading = 0.0f;
-        // VkPipelineMultisampleStateCreateInfo.pSampleMask is an array of length
-        // ceil(rasterizationSamples / 32) and since we're passing a single uint32_t
-        // we have to assert that this length is indeed 1.
-        ASSERT(multisample.rasterizationSamples <= 32);
-        VkSampleMask sampleMask = GetSampleMask();
-        multisample.pSampleMask = &sampleMask;
-        multisample.alphaToCoverageEnable = IsAlphaToCoverageEnabled();
-        multisample.alphaToOneEnable = VK_FALSE;
-
-        VkPipelineDepthStencilStateCreateInfo depthStencilState =
-            ComputeDepthStencilDesc(GetDepthStencilState());
-
-        VkPipelineColorBlendStateCreateInfo colorBlend;
-        // colorBlend may hold pointers to elements in colorBlendAttachments, so it must have a
-        // definition scope as same as colorBlend
-        ityp::array<ColorAttachmentIndex, VkPipelineColorBlendAttachmentState, kMaxColorAttachments>
-            colorBlendAttachments;
-        if (GetStageMask() & wgpu::ShaderStage::Fragment) {
-            // Initialize the "blend state info" that will be chained in the "create info" from the
-            // data pre-computed in the ColorState
-            for (auto& blend : colorBlendAttachments) {
-                blend.blendEnable = VK_FALSE;
-                blend.srcColorBlendFactor = VK_BLEND_FACTOR_ONE;
-                blend.dstColorBlendFactor = VK_BLEND_FACTOR_ZERO;
-                blend.colorBlendOp = VK_BLEND_OP_ADD;
-                blend.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE;
-                blend.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
-                blend.alphaBlendOp = VK_BLEND_OP_ADD;
-                blend.colorWriteMask = 0;
-            }
-
-            const auto& fragmentOutputsWritten =
-                GetStage(SingleShaderStage::Fragment).metadata->fragmentOutputsWritten;
-            ColorAttachmentIndex highestColorAttachmentIndexPlusOne =
-                GetHighestBitIndexPlusOne(GetColorAttachmentsMask());
-            for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
-                const ColorTargetState* target = GetColorTargetState(i);
-                colorBlendAttachments[i] = ComputeColorDesc(target, fragmentOutputsWritten[i]);
-            }
-
-            colorBlend.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
-            colorBlend.pNext = nullptr;
-            colorBlend.flags = 0;
-            // LogicOp isn't supported so we disable it.
-            colorBlend.logicOpEnable = VK_FALSE;
-            colorBlend.logicOp = VK_LOGIC_OP_CLEAR;
-            colorBlend.attachmentCount = static_cast<uint8_t>(highestColorAttachmentIndexPlusOne);
-            colorBlend.pAttachments = colorBlendAttachments.data();
-            // The blend constant is always dynamic so we fill in a placeholder value
-            colorBlend.blendConstants[0] = 0.0f;
-            colorBlend.blendConstants[1] = 0.0f;
-            colorBlend.blendConstants[2] = 0.0f;
-            colorBlend.blendConstants[3] = 0.0f;
+        const auto& fragmentOutputsWritten =
+            GetStage(SingleShaderStage::Fragment).metadata->fragmentOutputsWritten;
+        ColorAttachmentIndex highestColorAttachmentIndexPlusOne =
+            GetHighestBitIndexPlusOne(GetColorAttachmentsMask());
+        for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
+            const ColorTargetState* target = GetColorTargetState(i);
+            colorBlendAttachments[i] = ComputeColorDesc(target, fragmentOutputsWritten[i]);
         }
 
-        // Tag all state as dynamic but stencil masks and depth bias.
-        VkDynamicState dynamicStates[] = {
-            VK_DYNAMIC_STATE_VIEWPORT,     VK_DYNAMIC_STATE_SCISSOR,
-            VK_DYNAMIC_STATE_LINE_WIDTH,   VK_DYNAMIC_STATE_BLEND_CONSTANTS,
-            VK_DYNAMIC_STATE_DEPTH_BOUNDS, VK_DYNAMIC_STATE_STENCIL_REFERENCE,
-        };
-        VkPipelineDynamicStateCreateInfo dynamic;
-        dynamic.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
-        dynamic.pNext = nullptr;
-        dynamic.flags = 0;
-        dynamic.dynamicStateCount = sizeof(dynamicStates) / sizeof(dynamicStates[0]);
-        dynamic.pDynamicStates = dynamicStates;
+        colorBlend.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
+        colorBlend.pNext = nullptr;
+        colorBlend.flags = 0;
+        // LogicOp isn't supported so we disable it.
+        colorBlend.logicOpEnable = VK_FALSE;
+        colorBlend.logicOp = VK_LOGIC_OP_CLEAR;
+        colorBlend.attachmentCount = static_cast<uint8_t>(highestColorAttachmentIndexPlusOne);
+        colorBlend.pAttachments = colorBlendAttachments.data();
+        // The blend constant is always dynamic so we fill in a placeholder value
+        colorBlend.blendConstants[0] = 0.0f;
+        colorBlend.blendConstants[1] = 0.0f;
+        colorBlend.blendConstants[2] = 0.0f;
+        colorBlend.blendConstants[3] = 0.0f;
+    }
 
-        // Get a VkRenderPass that matches the attachment formats for this pipeline, load/store ops
-        // don't matter so set them all to LoadOp::Load / StoreOp::Store. Whether the render pass
-        // has resolve target and whether depth/stencil attachment is read-only also don't matter,
-        // so set them both to false.
-        VkRenderPass renderPass = VK_NULL_HANDLE;
-        {
-            RenderPassCacheQuery query;
+    // Tag all state as dynamic but stencil masks and depth bias.
+    VkDynamicState dynamicStates[] = {
+        VK_DYNAMIC_STATE_VIEWPORT,     VK_DYNAMIC_STATE_SCISSOR,
+        VK_DYNAMIC_STATE_LINE_WIDTH,   VK_DYNAMIC_STATE_BLEND_CONSTANTS,
+        VK_DYNAMIC_STATE_DEPTH_BOUNDS, VK_DYNAMIC_STATE_STENCIL_REFERENCE,
+    };
+    VkPipelineDynamicStateCreateInfo dynamic;
+    dynamic.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
+    dynamic.pNext = nullptr;
+    dynamic.flags = 0;
+    dynamic.dynamicStateCount = sizeof(dynamicStates) / sizeof(dynamicStates[0]);
+    dynamic.pDynamicStates = dynamicStates;
 
-            for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
-                query.SetColor(i, GetColorAttachmentFormat(i), wgpu::LoadOp::Load,
-                               wgpu::StoreOp::Store, false);
-            }
+    // Get a VkRenderPass that matches the attachment formats for this pipeline, load/store ops
+    // don't matter so set them all to LoadOp::Load / StoreOp::Store. Whether the render pass
+    // has resolve target and whether depth/stencil attachment is read-only also don't matter,
+    // so set them both to false.
+    VkRenderPass renderPass = VK_NULL_HANDLE;
+    {
+        RenderPassCacheQuery query;
 
-            if (HasDepthStencilAttachment()) {
-                query.SetDepthStencil(GetDepthStencilFormat(), wgpu::LoadOp::Load,
-                                      wgpu::StoreOp::Store, wgpu::LoadOp::Load,
-                                      wgpu::StoreOp::Store, false);
-            }
-
-            query.SetSampleCount(GetSampleCount());
-
-            GetCacheKey()->Record(query);
-            DAWN_TRY_ASSIGN(renderPass, device->GetRenderPassCache()->GetRenderPass(query));
+        for (ColorAttachmentIndex i : IterateBitSet(GetColorAttachmentsMask())) {
+            query.SetColor(i, GetColorAttachmentFormat(i), wgpu::LoadOp::Load, wgpu::StoreOp::Store,
+                           false);
         }
 
-        // The create info chains in a bunch of things created on the stack here or inside state
-        // objects.
-        VkGraphicsPipelineCreateInfo createInfo;
-        createInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
-        createInfo.pNext = nullptr;
-        createInfo.flags = 0;
-        createInfo.stageCount = stageCount;
-        createInfo.pStages = shaderStages.data();
-        createInfo.pVertexInputState = &vertexInputCreateInfo;
-        createInfo.pInputAssemblyState = &inputAssembly;
-        createInfo.pTessellationState = nullptr;
-        createInfo.pViewportState = &viewport;
-        createInfo.pRasterizationState = &rasterization;
-        createInfo.pMultisampleState = &multisample;
-        createInfo.pDepthStencilState = &depthStencilState;
-        createInfo.pColorBlendState =
-            (GetStageMask() & wgpu::ShaderStage::Fragment) ? &colorBlend : nullptr;
-        createInfo.pDynamicState = &dynamic;
-        createInfo.layout = ToBackend(GetLayout())->GetHandle();
-        createInfo.renderPass = renderPass;
-        createInfo.subpass = 0;
-        createInfo.basePipelineHandle = VkPipeline{};
-        createInfo.basePipelineIndex = -1;
-
-        // Record cache key information now since createInfo is not stored.
-        GetCacheKey()->Record(createInfo,
-                              static_cast<const RenderPipeline*>(this)->GetLayout()->GetCacheKey());
-
-        DAWN_TRY(CheckVkSuccess(
-            device->fn.CreateGraphicsPipelines(device->GetVkDevice(), VkPipelineCache{}, 1,
-                                               &createInfo, nullptr, &*mHandle),
-            "CreateGraphicsPipeline"));
-
-        SetLabelImpl();
-
-        return {};
-    }
-
-    void RenderPipeline::SetLabelImpl() {
-        SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_RenderPipeline", GetLabel());
-    }
-
-    VkPipelineVertexInputStateCreateInfo RenderPipeline::ComputeVertexInputDesc(
-        PipelineVertexInputStateCreateInfoTemporaryAllocations* tempAllocations) {
-        // Fill in the "binding info" that will be chained in the create info
-        uint32_t bindingCount = 0;
-        for (VertexBufferSlot slot : IterateBitSet(GetVertexBufferSlotsUsed())) {
-            const VertexBufferInfo& bindingInfo = GetVertexBuffer(slot);
-
-            VkVertexInputBindingDescription* bindingDesc = &tempAllocations->bindings[bindingCount];
-            bindingDesc->binding = static_cast<uint8_t>(slot);
-            bindingDesc->stride = bindingInfo.arrayStride;
-            bindingDesc->inputRate = VulkanInputRate(bindingInfo.stepMode);
-
-            bindingCount++;
+        if (HasDepthStencilAttachment()) {
+            query.SetDepthStencil(GetDepthStencilFormat(), wgpu::LoadOp::Load, wgpu::StoreOp::Store,
+                                  wgpu::LoadOp::Load, wgpu::StoreOp::Store, false);
         }
 
-        // Fill in the "attribute info" that will be chained in the create info
-        uint32_t attributeCount = 0;
-        for (VertexAttributeLocation loc : IterateBitSet(GetAttributeLocationsUsed())) {
-            const VertexAttributeInfo& attributeInfo = GetAttribute(loc);
+        query.SetSampleCount(GetSampleCount());
 
-            VkVertexInputAttributeDescription* attributeDesc =
-                &tempAllocations->attributes[attributeCount];
-            attributeDesc->location = static_cast<uint8_t>(loc);
-            attributeDesc->binding = static_cast<uint8_t>(attributeInfo.vertexBufferSlot);
-            attributeDesc->format = VulkanVertexFormat(attributeInfo.format);
-            attributeDesc->offset = attributeInfo.offset;
-
-            attributeCount++;
-        }
-
-        // Build the create info
-        VkPipelineVertexInputStateCreateInfo createInfo;
-        createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
-        createInfo.pNext = nullptr;
-        createInfo.flags = 0;
-        createInfo.vertexBindingDescriptionCount = bindingCount;
-        createInfo.pVertexBindingDescriptions = tempAllocations->bindings.data();
-        createInfo.vertexAttributeDescriptionCount = attributeCount;
-        createInfo.pVertexAttributeDescriptions = tempAllocations->attributes.data();
-        return createInfo;
+        GetCacheKey()->Record(query);
+        DAWN_TRY_ASSIGN(renderPass, device->GetRenderPassCache()->GetRenderPass(query));
     }
 
-    RenderPipeline::~RenderPipeline() = default;
+    // The create info chains in a bunch of things created on the stack here or inside state
+    // objects.
+    VkGraphicsPipelineCreateInfo createInfo;
+    createInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
+    createInfo.pNext = nullptr;
+    createInfo.flags = 0;
+    createInfo.stageCount = stageCount;
+    createInfo.pStages = shaderStages.data();
+    createInfo.pVertexInputState = &vertexInputCreateInfo;
+    createInfo.pInputAssemblyState = &inputAssembly;
+    createInfo.pTessellationState = nullptr;
+    createInfo.pViewportState = &viewport;
+    createInfo.pRasterizationState = &rasterization;
+    createInfo.pMultisampleState = &multisample;
+    createInfo.pDepthStencilState = &depthStencilState;
+    createInfo.pColorBlendState =
+        (GetStageMask() & wgpu::ShaderStage::Fragment) ? &colorBlend : nullptr;
+    createInfo.pDynamicState = &dynamic;
+    createInfo.layout = ToBackend(GetLayout())->GetHandle();
+    createInfo.renderPass = renderPass;
+    createInfo.subpass = 0;
+    createInfo.basePipelineHandle = VkPipeline{};
+    createInfo.basePipelineIndex = -1;
 
-    void RenderPipeline::DestroyImpl() {
-        RenderPipelineBase::DestroyImpl();
-        if (mHandle != VK_NULL_HANDLE) {
-            ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
-            mHandle = VK_NULL_HANDLE;
-        }
+    // Record cache key information now since createInfo is not stored.
+    GetCacheKey()->Record(createInfo,
+                          static_cast<const RenderPipeline*>(this)->GetLayout()->GetCacheKey());
+
+    DAWN_TRY(
+        CheckVkSuccess(device->fn.CreateGraphicsPipelines(device->GetVkDevice(), VkPipelineCache{},
+                                                          1, &createInfo, nullptr, &*mHandle),
+                       "CreateGraphicsPipeline"));
+
+    SetLabelImpl();
+
+    return {};
+}
+
+void RenderPipeline::SetLabelImpl() {
+    SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_RenderPipeline", GetLabel());
+}
+
+VkPipelineVertexInputStateCreateInfo RenderPipeline::ComputeVertexInputDesc(
+    PipelineVertexInputStateCreateInfoTemporaryAllocations* tempAllocations) {
+    // Fill in the "binding info" that will be chained in the create info
+    uint32_t bindingCount = 0;
+    for (VertexBufferSlot slot : IterateBitSet(GetVertexBufferSlotsUsed())) {
+        const VertexBufferInfo& bindingInfo = GetVertexBuffer(slot);
+
+        VkVertexInputBindingDescription* bindingDesc = &tempAllocations->bindings[bindingCount];
+        bindingDesc->binding = static_cast<uint8_t>(slot);
+        bindingDesc->stride = bindingInfo.arrayStride;
+        bindingDesc->inputRate = VulkanInputRate(bindingInfo.stepMode);
+
+        bindingCount++;
     }
 
-    VkPipeline RenderPipeline::GetHandle() const {
-        return mHandle;
+    // Fill in the "attribute info" that will be chained in the create info
+    uint32_t attributeCount = 0;
+    for (VertexAttributeLocation loc : IterateBitSet(GetAttributeLocationsUsed())) {
+        const VertexAttributeInfo& attributeInfo = GetAttribute(loc);
+
+        VkVertexInputAttributeDescription* attributeDesc =
+            &tempAllocations->attributes[attributeCount];
+        attributeDesc->location = static_cast<uint8_t>(loc);
+        attributeDesc->binding = static_cast<uint8_t>(attributeInfo.vertexBufferSlot);
+        attributeDesc->format = VulkanVertexFormat(attributeInfo.format);
+        attributeDesc->offset = attributeInfo.offset;
+
+        attributeCount++;
     }
 
-    void RenderPipeline::InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
-                                         WGPUCreateRenderPipelineAsyncCallback callback,
-                                         void* userdata) {
-        std::unique_ptr<CreateRenderPipelineAsyncTask> asyncTask =
-            std::make_unique<CreateRenderPipelineAsyncTask>(std::move(renderPipeline), callback,
-                                                            userdata);
-        CreateRenderPipelineAsyncTask::RunAsync(std::move(asyncTask));
+    // Build the create info
+    VkPipelineVertexInputStateCreateInfo createInfo;
+    createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
+    createInfo.pNext = nullptr;
+    createInfo.flags = 0;
+    createInfo.vertexBindingDescriptionCount = bindingCount;
+    createInfo.pVertexBindingDescriptions = tempAllocations->bindings.data();
+    createInfo.vertexAttributeDescriptionCount = attributeCount;
+    createInfo.pVertexAttributeDescriptions = tempAllocations->attributes.data();
+    return createInfo;
+}
+
+RenderPipeline::~RenderPipeline() = default;
+
+void RenderPipeline::DestroyImpl() {
+    RenderPipelineBase::DestroyImpl();
+    if (mHandle != VK_NULL_HANDLE) {
+        ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+        mHandle = VK_NULL_HANDLE;
     }
+}
+
+VkPipeline RenderPipeline::GetHandle() const {
+    return mHandle;
+}
+
+void RenderPipeline::InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+                                     WGPUCreateRenderPipelineAsyncCallback callback,
+                                     void* userdata) {
+    std::unique_ptr<CreateRenderPipelineAsyncTask> asyncTask =
+        std::make_unique<CreateRenderPipelineAsyncTask>(std::move(renderPipeline), callback,
+                                                        userdata);
+    CreateRenderPipelineAsyncTask::RunAsync(std::move(asyncTask));
+}
 
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/RenderPipelineVk.h b/src/dawn/native/vulkan/RenderPipelineVk.h
index 2c99f7e..2546269 100644
--- a/src/dawn/native/vulkan/RenderPipelineVk.h
+++ b/src/dawn/native/vulkan/RenderPipelineVk.h
@@ -22,37 +22,37 @@
 
 namespace dawn::native::vulkan {
 
-    class Device;
+class Device;
 
-    class RenderPipeline final : public RenderPipelineBase {
-      public:
-        static Ref<RenderPipeline> CreateUninitialized(Device* device,
-                                                       const RenderPipelineDescriptor* descriptor);
-        static void InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
-                                    WGPUCreateRenderPipelineAsyncCallback callback,
-                                    void* userdata);
+class RenderPipeline final : public RenderPipelineBase {
+  public:
+    static Ref<RenderPipeline> CreateUninitialized(Device* device,
+                                                   const RenderPipelineDescriptor* descriptor);
+    static void InitializeAsync(Ref<RenderPipelineBase> renderPipeline,
+                                WGPUCreateRenderPipelineAsyncCallback callback,
+                                void* userdata);
 
-        VkPipeline GetHandle() const;
+    VkPipeline GetHandle() const;
 
-        MaybeError Initialize() override;
+    MaybeError Initialize() override;
 
-        // Dawn API
-        void SetLabelImpl() override;
+    // Dawn API
+    void SetLabelImpl() override;
 
-      private:
-        ~RenderPipeline() override;
-        void DestroyImpl() override;
-        using RenderPipelineBase::RenderPipelineBase;
+  private:
+    ~RenderPipeline() override;
+    void DestroyImpl() override;
+    using RenderPipelineBase::RenderPipelineBase;
 
-        struct PipelineVertexInputStateCreateInfoTemporaryAllocations {
-            std::array<VkVertexInputBindingDescription, kMaxVertexBuffers> bindings;
-            std::array<VkVertexInputAttributeDescription, kMaxVertexAttributes> attributes;
-        };
-        VkPipelineVertexInputStateCreateInfo ComputeVertexInputDesc(
-            PipelineVertexInputStateCreateInfoTemporaryAllocations* temporaryAllocations);
-
-        VkPipeline mHandle = VK_NULL_HANDLE;
+    struct PipelineVertexInputStateCreateInfoTemporaryAllocations {
+        std::array<VkVertexInputBindingDescription, kMaxVertexBuffers> bindings;
+        std::array<VkVertexInputAttributeDescription, kMaxVertexAttributes> attributes;
     };
+    VkPipelineVertexInputStateCreateInfo ComputeVertexInputDesc(
+        PipelineVertexInputStateCreateInfoTemporaryAllocations* temporaryAllocations);
+
+    VkPipeline mHandle = VK_NULL_HANDLE;
+};
 
 }  // namespace dawn::native::vulkan
 
diff --git a/src/dawn/native/vulkan/ResourceHeapVk.cpp b/src/dawn/native/vulkan/ResourceHeapVk.cpp
index 94ce7fc..e1a5d19 100644
--- a/src/dawn/native/vulkan/ResourceHeapVk.cpp
+++ b/src/dawn/native/vulkan/ResourceHeapVk.cpp
@@ -16,16 +16,15 @@
 
 namespace dawn::native::vulkan {
 
-    ResourceHeap::ResourceHeap(VkDeviceMemory memory, size_t memoryType)
-        : mMemory(memory), mMemoryType(memoryType) {
-    }
+ResourceHeap::ResourceHeap(VkDeviceMemory memory, size_t memoryType)
+    : mMemory(memory), mMemoryType(memoryType) {}
 
-    VkDeviceMemory ResourceHeap::GetMemory() const {
-        return mMemory;
-    }
+VkDeviceMemory ResourceHeap::GetMemory() const {
+    return mMemory;
+}
 
-    size_t ResourceHeap::GetMemoryType() const {
-        return mMemoryType;
-    }
+size_t ResourceHeap::GetMemoryType() const {
+    return mMemoryType;
+}
 
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/ResourceHeapVk.h b/src/dawn/native/vulkan/ResourceHeapVk.h
index b7d8068..a148334 100644
--- a/src/dawn/native/vulkan/ResourceHeapVk.h
+++ b/src/dawn/native/vulkan/ResourceHeapVk.h
@@ -20,19 +20,19 @@
 
 namespace dawn::native::vulkan {
 
-    // Wrapper for physical memory used with or without a resource object.
-    class ResourceHeap : public ResourceHeapBase {
-      public:
-        ResourceHeap(VkDeviceMemory memory, size_t memoryType);
-        ~ResourceHeap() = default;
+// Wrapper for physical memory used with or without a resource object.
+class ResourceHeap : public ResourceHeapBase {
+  public:
+    ResourceHeap(VkDeviceMemory memory, size_t memoryType);
+    ~ResourceHeap() = default;
 
-        VkDeviceMemory GetMemory() const;
-        size_t GetMemoryType() const;
+    VkDeviceMemory GetMemory() const;
+    size_t GetMemoryType() const;
 
-      private:
-        VkDeviceMemory mMemory = VK_NULL_HANDLE;
-        size_t mMemoryType = 0;
-    };
+  private:
+    VkDeviceMemory mMemory = VK_NULL_HANDLE;
+    size_t mMemoryType = 0;
+};
 
 }  // namespace dawn::native::vulkan
 
diff --git a/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.cpp b/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.cpp
index 1ec3526..390b326 100644
--- a/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.cpp
+++ b/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.cpp
@@ -27,270 +27,263 @@
 
 namespace dawn::native::vulkan {
 
-    namespace {
+namespace {
 
-        // TODO(crbug.com/dawn/849): This is a hardcoded heurstic to choose when to
-        // suballocate but it should ideally depend on the size of the memory heaps and other
-        // factors.
-        constexpr uint64_t kMaxSizeForSubAllocation = 4ull * 1024ull * 1024ull;  // 4MiB
+// TODO(crbug.com/dawn/849): This is a hardcoded heurstic to choose when to
+// suballocate but it should ideally depend on the size of the memory heaps and other
+// factors.
+constexpr uint64_t kMaxSizeForSubAllocation = 4ull * 1024ull * 1024ull;  // 4MiB
 
-        // Have each bucket of the buddy system allocate at least some resource of the maximum
-        // size
-        constexpr uint64_t kBuddyHeapsSize = 2 * kMaxSizeForSubAllocation;
+// Have each bucket of the buddy system allocate at least some resource of the maximum
+// size
+constexpr uint64_t kBuddyHeapsSize = 2 * kMaxSizeForSubAllocation;
 
-    }  // anonymous namespace
+}  // anonymous namespace
 
-    // SingleTypeAllocator is a combination of a BuddyMemoryAllocator and its client and can
-    // service suballocation requests, but for a single Vulkan memory type.
+// SingleTypeAllocator is a combination of a BuddyMemoryAllocator and its client and can
+// service suballocation requests, but for a single Vulkan memory type.
 
-    class ResourceMemoryAllocator::SingleTypeAllocator : public ResourceHeapAllocator {
-      public:
-        SingleTypeAllocator(Device* device, size_t memoryTypeIndex, VkDeviceSize memoryHeapSize)
-            : mDevice(device),
-              mMemoryTypeIndex(memoryTypeIndex),
-              mMemoryHeapSize(memoryHeapSize),
-              mPooledMemoryAllocator(this),
-              mBuddySystem(
-                  // Round down to a power of 2 that's <= mMemoryHeapSize. This will always
-                  // be a multiple of kBuddyHeapsSize because kBuddyHeapsSize is a power of 2.
-                  uint64_t(1) << Log2(mMemoryHeapSize),
-                  // Take the min in the very unlikely case the memory heap is tiny.
-                  std::min(uint64_t(1) << Log2(mMemoryHeapSize), kBuddyHeapsSize),
-                  &mPooledMemoryAllocator) {
-            ASSERT(IsPowerOfTwo(kBuddyHeapsSize));
-        }
-        ~SingleTypeAllocator() override = default;
+class ResourceMemoryAllocator::SingleTypeAllocator : public ResourceHeapAllocator {
+  public:
+    SingleTypeAllocator(Device* device, size_t memoryTypeIndex, VkDeviceSize memoryHeapSize)
+        : mDevice(device),
+          mMemoryTypeIndex(memoryTypeIndex),
+          mMemoryHeapSize(memoryHeapSize),
+          mPooledMemoryAllocator(this),
+          mBuddySystem(
+              // Round down to a power of 2 that's <= mMemoryHeapSize. This will always
+              // be a multiple of kBuddyHeapsSize because kBuddyHeapsSize is a power of 2.
+              uint64_t(1) << Log2(mMemoryHeapSize),
+              // Take the min in the very unlikely case the memory heap is tiny.
+              std::min(uint64_t(1) << Log2(mMemoryHeapSize), kBuddyHeapsSize),
+              &mPooledMemoryAllocator) {
+        ASSERT(IsPowerOfTwo(kBuddyHeapsSize));
+    }
+    ~SingleTypeAllocator() override = default;
 
-        void DestroyPool() {
-            mPooledMemoryAllocator.DestroyPool();
+    void DestroyPool() { mPooledMemoryAllocator.DestroyPool(); }
+
+    ResultOrError<ResourceMemoryAllocation> AllocateMemory(uint64_t size, uint64_t alignment) {
+        return mBuddySystem.Allocate(size, alignment);
+    }
+
+    void DeallocateMemory(const ResourceMemoryAllocation& allocation) {
+        mBuddySystem.Deallocate(allocation);
+    }
+
+    // Implementation of the MemoryAllocator interface to be a client of BuddyMemoryAllocator
+
+    ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(uint64_t size) override {
+        if (size > mMemoryHeapSize) {
+            return DAWN_OUT_OF_MEMORY_ERROR("Allocation size too large");
         }
 
-        ResultOrError<ResourceMemoryAllocation> AllocateMemory(uint64_t size, uint64_t alignment) {
-            return mBuddySystem.Allocate(size, alignment);
-        }
+        VkMemoryAllocateInfo allocateInfo;
+        allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+        allocateInfo.pNext = nullptr;
+        allocateInfo.allocationSize = size;
+        allocateInfo.memoryTypeIndex = mMemoryTypeIndex;
 
-        void DeallocateMemory(const ResourceMemoryAllocation& allocation) {
-            mBuddySystem.Deallocate(allocation);
-        }
+        VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
 
-        // Implementation of the MemoryAllocator interface to be a client of BuddyMemoryAllocator
+        // First check OOM that we want to surface to the application.
+        DAWN_TRY(
+            CheckVkOOMThenSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo,
+                                                             nullptr, &*allocatedMemory),
+                                  "vkAllocateMemory"));
 
-        ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
-            uint64_t size) override {
-            if (size > mMemoryHeapSize) {
-                return DAWN_OUT_OF_MEMORY_ERROR("Allocation size too large");
-            }
+        ASSERT(allocatedMemory != VK_NULL_HANDLE);
+        return {std::make_unique<ResourceHeap>(allocatedMemory, mMemoryTypeIndex)};
+    }
 
-            VkMemoryAllocateInfo allocateInfo;
-            allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
-            allocateInfo.pNext = nullptr;
-            allocateInfo.allocationSize = size;
-            allocateInfo.memoryTypeIndex = mMemoryTypeIndex;
+    void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override {
+        mDevice->GetFencedDeleter()->DeleteWhenUnused(ToBackend(allocation.get())->GetMemory());
+    }
 
-            VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
+  private:
+    Device* mDevice;
+    size_t mMemoryTypeIndex;
+    VkDeviceSize mMemoryHeapSize;
+    PooledResourceMemoryAllocator mPooledMemoryAllocator;
+    BuddyMemoryAllocator mBuddySystem;
+};
 
-            // First check OOM that we want to surface to the application.
-            DAWN_TRY(CheckVkOOMThenSuccess(
-                mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo, nullptr,
-                                           &*allocatedMemory),
-                "vkAllocateMemory"));
+// Implementation of ResourceMemoryAllocator
 
-            ASSERT(allocatedMemory != VK_NULL_HANDLE);
-            return {std::make_unique<ResourceHeap>(allocatedMemory, mMemoryTypeIndex)};
-        }
+ResourceMemoryAllocator::ResourceMemoryAllocator(Device* device) : mDevice(device) {
+    const VulkanDeviceInfo& info = mDevice->GetDeviceInfo();
+    mAllocatorsPerType.reserve(info.memoryTypes.size());
 
-        void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override {
-            mDevice->GetFencedDeleter()->DeleteWhenUnused(ToBackend(allocation.get())->GetMemory());
-        }
+    for (size_t i = 0; i < info.memoryTypes.size(); i++) {
+        mAllocatorsPerType.emplace_back(std::make_unique<SingleTypeAllocator>(
+            mDevice, i, info.memoryHeaps[info.memoryTypes[i].heapIndex].size));
+    }
+}
 
-      private:
-        Device* mDevice;
-        size_t mMemoryTypeIndex;
-        VkDeviceSize mMemoryHeapSize;
-        PooledResourceMemoryAllocator mPooledMemoryAllocator;
-        BuddyMemoryAllocator mBuddySystem;
-    };
+ResourceMemoryAllocator::~ResourceMemoryAllocator() = default;
 
-    // Implementation of ResourceMemoryAllocator
+ResultOrError<ResourceMemoryAllocation> ResourceMemoryAllocator::Allocate(
+    const VkMemoryRequirements& requirements,
+    MemoryKind kind) {
+    // The Vulkan spec guarantees at least on memory type is valid.
+    int memoryType = FindBestTypeIndex(requirements, kind);
+    ASSERT(memoryType >= 0);
 
-    ResourceMemoryAllocator::ResourceMemoryAllocator(Device* device) : mDevice(device) {
-        const VulkanDeviceInfo& info = mDevice->GetDeviceInfo();
-        mAllocatorsPerType.reserve(info.memoryTypes.size());
+    VkDeviceSize size = requirements.size;
 
-        for (size_t i = 0; i < info.memoryTypes.size(); i++) {
-            mAllocatorsPerType.emplace_back(std::make_unique<SingleTypeAllocator>(
-                mDevice, i, info.memoryHeaps[info.memoryTypes[i].heapIndex].size));
+    // Sub-allocate non-mappable resources because at the moment the mapped pointer
+    // is part of the resource and not the heap, which doesn't match the Vulkan model.
+    // TODO(crbug.com/dawn/849): allow sub-allocating mappable resources, maybe.
+    if (requirements.size < kMaxSizeForSubAllocation && kind != MemoryKind::LinearMappable &&
+        !mDevice->IsToggleEnabled(Toggle::DisableResourceSuballocation)) {
+        // When sub-allocating, Vulkan requires that we respect bufferImageGranularity. Some
+        // hardware puts information on the memory's page table entry and allocating a linear
+        // resource in the same page as a non-linear (aka opaque) resource can cause issues.
+        // Probably because some texture compression flags are stored on the page table entry,
+        // and allocating a linear resource removes these flags.
+        //
+        // Anyway, just to be safe we ask that all sub-allocated resources are allocated with at
+        // least this alignment. TODO(crbug.com/dawn/849): this is suboptimal because multiple
+        // linear (resp. opaque) resources can coexist in the same page. In particular Nvidia
+        // GPUs often use a granularity of 64k which will lead to a lot of wasted spec. Revisit
+        // with a more efficient algorithm later.
+        uint64_t alignment =
+            std::max(requirements.alignment,
+                     mDevice->GetDeviceInfo().properties.limits.bufferImageGranularity);
+
+        ResourceMemoryAllocation subAllocation;
+        DAWN_TRY_ASSIGN(subAllocation, mAllocatorsPerType[memoryType]->AllocateMemory(
+                                           requirements.size, alignment));
+        if (subAllocation.GetInfo().mMethod != AllocationMethod::kInvalid) {
+            return std::move(subAllocation);
         }
     }
 
-    ResourceMemoryAllocator::~ResourceMemoryAllocator() = default;
+    // If sub-allocation failed, allocate memory just for it.
+    std::unique_ptr<ResourceHeapBase> resourceHeap;
+    DAWN_TRY_ASSIGN(resourceHeap, mAllocatorsPerType[memoryType]->AllocateResourceHeap(size));
 
-    ResultOrError<ResourceMemoryAllocation> ResourceMemoryAllocator::Allocate(
-        const VkMemoryRequirements& requirements,
-        MemoryKind kind) {
-        // The Vulkan spec guarantees at least on memory type is valid.
-        int memoryType = FindBestTypeIndex(requirements, kind);
-        ASSERT(memoryType >= 0);
-
-        VkDeviceSize size = requirements.size;
-
-        // Sub-allocate non-mappable resources because at the moment the mapped pointer
-        // is part of the resource and not the heap, which doesn't match the Vulkan model.
-        // TODO(crbug.com/dawn/849): allow sub-allocating mappable resources, maybe.
-        if (requirements.size < kMaxSizeForSubAllocation && kind != MemoryKind::LinearMappable &&
-            !mDevice->IsToggleEnabled(Toggle::DisableResourceSuballocation)) {
-            // When sub-allocating, Vulkan requires that we respect bufferImageGranularity. Some
-            // hardware puts information on the memory's page table entry and allocating a linear
-            // resource in the same page as a non-linear (aka opaque) resource can cause issues.
-            // Probably because some texture compression flags are stored on the page table entry,
-            // and allocating a linear resource removes these flags.
-            //
-            // Anyway, just to be safe we ask that all sub-allocated resources are allocated with at
-            // least this alignment. TODO(crbug.com/dawn/849): this is suboptimal because multiple
-            // linear (resp. opaque) resources can coexist in the same page. In particular Nvidia
-            // GPUs often use a granularity of 64k which will lead to a lot of wasted spec. Revisit
-            // with a more efficient algorithm later.
-            uint64_t alignment =
-                std::max(requirements.alignment,
-                         mDevice->GetDeviceInfo().properties.limits.bufferImageGranularity);
-
-            ResourceMemoryAllocation subAllocation;
-            DAWN_TRY_ASSIGN(subAllocation, mAllocatorsPerType[memoryType]->AllocateMemory(
-                                               requirements.size, alignment));
-            if (subAllocation.GetInfo().mMethod != AllocationMethod::kInvalid) {
-                return std::move(subAllocation);
-            }
-        }
-
-        // If sub-allocation failed, allocate memory just for it.
-        std::unique_ptr<ResourceHeapBase> resourceHeap;
-        DAWN_TRY_ASSIGN(resourceHeap, mAllocatorsPerType[memoryType]->AllocateResourceHeap(size));
-
-        void* mappedPointer = nullptr;
-        if (kind == MemoryKind::LinearMappable) {
-            DAWN_TRY_WITH_CLEANUP(
-                CheckVkSuccess(mDevice->fn.MapMemory(mDevice->GetVkDevice(),
-                                                     ToBackend(resourceHeap.get())->GetMemory(), 0,
-                                                     size, 0, &mappedPointer),
-                               "vkMapMemory"),
-                {
-                    mAllocatorsPerType[memoryType]->DeallocateResourceHeap(std::move(resourceHeap));
-                });
-        }
-
-        AllocationInfo info;
-        info.mMethod = AllocationMethod::kDirect;
-        return ResourceMemoryAllocation(info, /*offset*/ 0, resourceHeap.release(),
-                                        static_cast<uint8_t*>(mappedPointer));
+    void* mappedPointer = nullptr;
+    if (kind == MemoryKind::LinearMappable) {
+        DAWN_TRY_WITH_CLEANUP(
+            CheckVkSuccess(mDevice->fn.MapMemory(mDevice->GetVkDevice(),
+                                                 ToBackend(resourceHeap.get())->GetMemory(), 0,
+                                                 size, 0, &mappedPointer),
+                           "vkMapMemory"),
+            { mAllocatorsPerType[memoryType]->DeallocateResourceHeap(std::move(resourceHeap)); });
     }
 
-    void ResourceMemoryAllocator::Deallocate(ResourceMemoryAllocation* allocation) {
-        switch (allocation->GetInfo().mMethod) {
-            // Some memory allocation can never be initialized, for example when wrapping
-            // swapchain VkImages with a Texture.
-            case AllocationMethod::kInvalid:
-                break;
+    AllocationInfo info;
+    info.mMethod = AllocationMethod::kDirect;
+    return ResourceMemoryAllocation(info, /*offset*/ 0, resourceHeap.release(),
+                                    static_cast<uint8_t*>(mappedPointer));
+}
 
-            // For direct allocation we can put the memory for deletion immediately and the fence
-            // deleter will make sure the resources are freed before the memory.
-            case AllocationMethod::kDirect: {
-                ResourceHeap* heap = ToBackend(allocation->GetResourceHeap());
-                allocation->Invalidate();
-                mDevice->GetFencedDeleter()->DeleteWhenUnused(heap->GetMemory());
-                delete heap;
-                break;
-            }
+void ResourceMemoryAllocator::Deallocate(ResourceMemoryAllocation* allocation) {
+    switch (allocation->GetInfo().mMethod) {
+        // Some memory allocation can never be initialized, for example when wrapping
+        // swapchain VkImages with a Texture.
+        case AllocationMethod::kInvalid:
+            break;
 
-            // Suballocations aren't freed immediately, otherwise another resource allocation could
-            // happen just after that aliases the old one and would require a barrier.
-            // TODO(crbug.com/dawn/851): Maybe we can produce the correct barriers to reduce the
-            // latency to reclaim memory.
-            case AllocationMethod::kSubAllocated:
-                mSubAllocationsToDelete.Enqueue(*allocation, mDevice->GetPendingCommandSerial());
-                break;
-
-            default:
-                UNREACHABLE();
-                break;
+        // For direct allocation we can put the memory for deletion immediately and the fence
+        // deleter will make sure the resources are freed before the memory.
+        case AllocationMethod::kDirect: {
+            ResourceHeap* heap = ToBackend(allocation->GetResourceHeap());
+            allocation->Invalidate();
+            mDevice->GetFencedDeleter()->DeleteWhenUnused(heap->GetMemory());
+            delete heap;
+            break;
         }
 
-        // Invalidate the underlying resource heap in case the client accidentally
-        // calls DeallocateMemory again using the same allocation.
-        allocation->Invalidate();
+        // Suballocations aren't freed immediately, otherwise another resource allocation could
+        // happen just after that aliases the old one and would require a barrier.
+        // TODO(crbug.com/dawn/851): Maybe we can produce the correct barriers to reduce the
+        // latency to reclaim memory.
+        case AllocationMethod::kSubAllocated:
+            mSubAllocationsToDelete.Enqueue(*allocation, mDevice->GetPendingCommandSerial());
+            break;
+
+        default:
+            UNREACHABLE();
+            break;
     }
 
-    void ResourceMemoryAllocator::Tick(ExecutionSerial completedSerial) {
-        for (const ResourceMemoryAllocation& allocation :
-             mSubAllocationsToDelete.IterateUpTo(completedSerial)) {
-            ASSERT(allocation.GetInfo().mMethod == AllocationMethod::kSubAllocated);
-            size_t memoryType = ToBackend(allocation.GetResourceHeap())->GetMemoryType();
+    // Invalidate the underlying resource heap in case the client accidentally
+    // calls DeallocateMemory again using the same allocation.
+    allocation->Invalidate();
+}
 
-            mAllocatorsPerType[memoryType]->DeallocateMemory(allocation);
+void ResourceMemoryAllocator::Tick(ExecutionSerial completedSerial) {
+    for (const ResourceMemoryAllocation& allocation :
+         mSubAllocationsToDelete.IterateUpTo(completedSerial)) {
+        ASSERT(allocation.GetInfo().mMethod == AllocationMethod::kSubAllocated);
+        size_t memoryType = ToBackend(allocation.GetResourceHeap())->GetMemoryType();
+
+        mAllocatorsPerType[memoryType]->DeallocateMemory(allocation);
+    }
+
+    mSubAllocationsToDelete.ClearUpTo(completedSerial);
+}
+
+int ResourceMemoryAllocator::FindBestTypeIndex(VkMemoryRequirements requirements, MemoryKind kind) {
+    const VulkanDeviceInfo& info = mDevice->GetDeviceInfo();
+    bool mappable = kind == MemoryKind::LinearMappable;
+
+    // Find a suitable memory type for this allocation
+    int bestType = -1;
+    for (size_t i = 0; i < info.memoryTypes.size(); ++i) {
+        // Resource must support this memory type
+        if ((requirements.memoryTypeBits & (1 << i)) == 0) {
+            continue;
         }
 
-        mSubAllocationsToDelete.ClearUpTo(completedSerial);
-    }
+        // Mappable resource must be host visible
+        if (mappable &&
+            (info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
+            continue;
+        }
 
-    int ResourceMemoryAllocator::FindBestTypeIndex(VkMemoryRequirements requirements,
-                                                   MemoryKind kind) {
-        const VulkanDeviceInfo& info = mDevice->GetDeviceInfo();
-        bool mappable = kind == MemoryKind::LinearMappable;
+        // Mappable must also be host coherent.
+        if (mappable &&
+            (info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0) {
+            continue;
+        }
 
-        // Find a suitable memory type for this allocation
-        int bestType = -1;
-        for (size_t i = 0; i < info.memoryTypes.size(); ++i) {
-            // Resource must support this memory type
-            if ((requirements.memoryTypeBits & (1 << i)) == 0) {
-                continue;
-            }
+        // Found the first candidate memory type
+        if (bestType == -1) {
+            bestType = static_cast<int>(i);
+            continue;
+        }
 
-            // Mappable resource must be host visible
-            if (mappable &&
-                (info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
-                continue;
-            }
-
-            // Mappable must also be host coherent.
-            if (mappable &&
-                (info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0) {
-                continue;
-            }
-
-            // Found the first candidate memory type
-            if (bestType == -1) {
+        // For non-mappable resources, favor device local memory.
+        bool currentDeviceLocal =
+            info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+        bool bestDeviceLocal =
+            info.memoryTypes[bestType].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+        if (!mappable && (currentDeviceLocal != bestDeviceLocal)) {
+            if (currentDeviceLocal) {
                 bestType = static_cast<int>(i);
-                continue;
             }
-
-            // For non-mappable resources, favor device local memory.
-            bool currentDeviceLocal =
-                info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-            bool bestDeviceLocal =
-                info.memoryTypes[bestType].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-            if (!mappable && (currentDeviceLocal != bestDeviceLocal)) {
-                if (currentDeviceLocal) {
-                    bestType = static_cast<int>(i);
-                }
-                continue;
-            }
-
-            // All things equal favor the memory in the biggest heap
-            VkDeviceSize bestTypeHeapSize =
-                info.memoryHeaps[info.memoryTypes[bestType].heapIndex].size;
-            VkDeviceSize candidateHeapSize = info.memoryHeaps[info.memoryTypes[i].heapIndex].size;
-            if (candidateHeapSize > bestTypeHeapSize) {
-                bestType = static_cast<int>(i);
-                continue;
-            }
+            continue;
         }
 
-        return bestType;
-    }
-
-    void ResourceMemoryAllocator::DestroyPool() {
-        for (auto& alloc : mAllocatorsPerType) {
-            alloc->DestroyPool();
+        // All things equal favor the memory in the biggest heap
+        VkDeviceSize bestTypeHeapSize = info.memoryHeaps[info.memoryTypes[bestType].heapIndex].size;
+        VkDeviceSize candidateHeapSize = info.memoryHeaps[info.memoryTypes[i].heapIndex].size;
+        if (candidateHeapSize > bestTypeHeapSize) {
+            bestType = static_cast<int>(i);
+            continue;
         }
     }
 
+    return bestType;
+}
+
+void ResourceMemoryAllocator::DestroyPool() {
+    for (auto& alloc : mAllocatorsPerType) {
+        alloc->DestroyPool();
+    }
+}
+
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.h b/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.h
index 9616fd5..1ece6d7 100644
--- a/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.h
+++ b/src/dawn/native/vulkan/ResourceMemoryAllocatorVk.h
@@ -27,39 +27,39 @@
 
 namespace dawn::native::vulkan {
 
-    class Device;
+class Device;
 
-    // Various kinds of memory that influence the result of the allocation. For example, to take
-    // into account mappability and Vulkan's bufferImageGranularity.
-    enum class MemoryKind {
-        Linear,
-        LinearMappable,
-        Opaque,
-    };
+// Various kinds of memory that influence the result of the allocation. For example, to take
+// into account mappability and Vulkan's bufferImageGranularity.
+enum class MemoryKind {
+    Linear,
+    LinearMappable,
+    Opaque,
+};
 
-    class ResourceMemoryAllocator {
-      public:
-        explicit ResourceMemoryAllocator(Device* device);
-        ~ResourceMemoryAllocator();
+class ResourceMemoryAllocator {
+  public:
+    explicit ResourceMemoryAllocator(Device* device);
+    ~ResourceMemoryAllocator();
 
-        ResultOrError<ResourceMemoryAllocation> Allocate(const VkMemoryRequirements& requirements,
-                                                         MemoryKind kind);
-        void Deallocate(ResourceMemoryAllocation* allocation);
+    ResultOrError<ResourceMemoryAllocation> Allocate(const VkMemoryRequirements& requirements,
+                                                     MemoryKind kind);
+    void Deallocate(ResourceMemoryAllocation* allocation);
 
-        void DestroyPool();
+    void DestroyPool();
 
-        void Tick(ExecutionSerial completedSerial);
+    void Tick(ExecutionSerial completedSerial);
 
-        int FindBestTypeIndex(VkMemoryRequirements requirements, MemoryKind kind);
+    int FindBestTypeIndex(VkMemoryRequirements requirements, MemoryKind kind);
 
-      private:
-        Device* mDevice;
+  private:
+    Device* mDevice;
 
-        class SingleTypeAllocator;
-        std::vector<std::unique_ptr<SingleTypeAllocator>> mAllocatorsPerType;
+    class SingleTypeAllocator;
+    std::vector<std::unique_ptr<SingleTypeAllocator>> mAllocatorsPerType;
 
-        SerialQueue<ExecutionSerial, ResourceMemoryAllocation> mSubAllocationsToDelete;
-    };
+    SerialQueue<ExecutionSerial, ResourceMemoryAllocation> mSubAllocationsToDelete;
+};
 
 }  // namespace dawn::native::vulkan
 
diff --git a/src/dawn/native/vulkan/SamplerVk.cpp b/src/dawn/native/vulkan/SamplerVk.cpp
index bac9b92..fb46491 100644
--- a/src/dawn/native/vulkan/SamplerVk.cpp
+++ b/src/dawn/native/vulkan/SamplerVk.cpp
@@ -23,110 +23,109 @@
 
 namespace dawn::native::vulkan {
 
-    namespace {
-        VkSamplerAddressMode VulkanSamplerAddressMode(wgpu::AddressMode mode) {
-            switch (mode) {
-                case wgpu::AddressMode::Repeat:
-                    return VK_SAMPLER_ADDRESS_MODE_REPEAT;
-                case wgpu::AddressMode::MirrorRepeat:
-                    return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
-                case wgpu::AddressMode::ClampToEdge:
-                    return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
-            }
-            UNREACHABLE();
-        }
+namespace {
+VkSamplerAddressMode VulkanSamplerAddressMode(wgpu::AddressMode mode) {
+    switch (mode) {
+        case wgpu::AddressMode::Repeat:
+            return VK_SAMPLER_ADDRESS_MODE_REPEAT;
+        case wgpu::AddressMode::MirrorRepeat:
+            return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
+        case wgpu::AddressMode::ClampToEdge:
+            return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
+    }
+    UNREACHABLE();
+}
 
-        VkFilter VulkanSamplerFilter(wgpu::FilterMode filter) {
-            switch (filter) {
-                case wgpu::FilterMode::Linear:
-                    return VK_FILTER_LINEAR;
-                case wgpu::FilterMode::Nearest:
-                    return VK_FILTER_NEAREST;
-            }
-            UNREACHABLE();
-        }
+VkFilter VulkanSamplerFilter(wgpu::FilterMode filter) {
+    switch (filter) {
+        case wgpu::FilterMode::Linear:
+            return VK_FILTER_LINEAR;
+        case wgpu::FilterMode::Nearest:
+            return VK_FILTER_NEAREST;
+    }
+    UNREACHABLE();
+}
 
-        VkSamplerMipmapMode VulkanMipMapMode(wgpu::FilterMode filter) {
-            switch (filter) {
-                case wgpu::FilterMode::Linear:
-                    return VK_SAMPLER_MIPMAP_MODE_LINEAR;
-                case wgpu::FilterMode::Nearest:
-                    return VK_SAMPLER_MIPMAP_MODE_NEAREST;
-            }
-            UNREACHABLE();
-        }
-    }  // anonymous namespace
+VkSamplerMipmapMode VulkanMipMapMode(wgpu::FilterMode filter) {
+    switch (filter) {
+        case wgpu::FilterMode::Linear:
+            return VK_SAMPLER_MIPMAP_MODE_LINEAR;
+        case wgpu::FilterMode::Nearest:
+            return VK_SAMPLER_MIPMAP_MODE_NEAREST;
+    }
+    UNREACHABLE();
+}
+}  // anonymous namespace
 
-    // static
-    ResultOrError<Ref<Sampler>> Sampler::Create(Device* device,
-                                                const SamplerDescriptor* descriptor) {
-        Ref<Sampler> sampler = AcquireRef(new Sampler(device, descriptor));
-        DAWN_TRY(sampler->Initialize(descriptor));
-        return sampler;
+// static
+ResultOrError<Ref<Sampler>> Sampler::Create(Device* device, const SamplerDescriptor* descriptor) {
+    Ref<Sampler> sampler = AcquireRef(new Sampler(device, descriptor));
+    DAWN_TRY(sampler->Initialize(descriptor));
+    return sampler;
+}
+
+MaybeError Sampler::Initialize(const SamplerDescriptor* descriptor) {
+    VkSamplerCreateInfo createInfo = {};
+    createInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
+    createInfo.pNext = nullptr;
+    createInfo.flags = 0;
+    createInfo.magFilter = VulkanSamplerFilter(descriptor->magFilter);
+    createInfo.minFilter = VulkanSamplerFilter(descriptor->minFilter);
+    createInfo.mipmapMode = VulkanMipMapMode(descriptor->mipmapFilter);
+    createInfo.addressModeU = VulkanSamplerAddressMode(descriptor->addressModeU);
+    createInfo.addressModeV = VulkanSamplerAddressMode(descriptor->addressModeV);
+    createInfo.addressModeW = VulkanSamplerAddressMode(descriptor->addressModeW);
+    createInfo.mipLodBias = 0.0f;
+    if (descriptor->compare != wgpu::CompareFunction::Undefined) {
+        createInfo.compareOp = ToVulkanCompareOp(descriptor->compare);
+        createInfo.compareEnable = VK_TRUE;
+    } else {
+        // Still set the compareOp so it's not garbage.
+        createInfo.compareOp = VK_COMPARE_OP_NEVER;
+        createInfo.compareEnable = VK_FALSE;
+    }
+    createInfo.minLod = descriptor->lodMinClamp;
+    createInfo.maxLod = descriptor->lodMaxClamp;
+    createInfo.unnormalizedCoordinates = VK_FALSE;
+
+    Device* device = ToBackend(GetDevice());
+    uint16_t maxAnisotropy = GetMaxAnisotropy();
+    if (device->GetDeviceInfo().features.samplerAnisotropy == VK_TRUE && maxAnisotropy > 1) {
+        createInfo.anisotropyEnable = VK_TRUE;
+        // https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkSamplerCreateInfo.html
+        createInfo.maxAnisotropy =
+            std::min(static_cast<float>(maxAnisotropy),
+                     device->GetDeviceInfo().properties.limits.maxSamplerAnisotropy);
+    } else {
+        createInfo.anisotropyEnable = VK_FALSE;
+        createInfo.maxAnisotropy = 1;
     }
 
-    MaybeError Sampler::Initialize(const SamplerDescriptor* descriptor) {
-        VkSamplerCreateInfo createInfo = {};
-        createInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
-        createInfo.pNext = nullptr;
-        createInfo.flags = 0;
-        createInfo.magFilter = VulkanSamplerFilter(descriptor->magFilter);
-        createInfo.minFilter = VulkanSamplerFilter(descriptor->minFilter);
-        createInfo.mipmapMode = VulkanMipMapMode(descriptor->mipmapFilter);
-        createInfo.addressModeU = VulkanSamplerAddressMode(descriptor->addressModeU);
-        createInfo.addressModeV = VulkanSamplerAddressMode(descriptor->addressModeV);
-        createInfo.addressModeW = VulkanSamplerAddressMode(descriptor->addressModeW);
-        createInfo.mipLodBias = 0.0f;
-        if (descriptor->compare != wgpu::CompareFunction::Undefined) {
-            createInfo.compareOp = ToVulkanCompareOp(descriptor->compare);
-            createInfo.compareEnable = VK_TRUE;
-        } else {
-            // Still set the compareOp so it's not garbage.
-            createInfo.compareOp = VK_COMPARE_OP_NEVER;
-            createInfo.compareEnable = VK_FALSE;
-        }
-        createInfo.minLod = descriptor->lodMinClamp;
-        createInfo.maxLod = descriptor->lodMaxClamp;
-        createInfo.unnormalizedCoordinates = VK_FALSE;
+    DAWN_TRY(CheckVkSuccess(
+        device->fn.CreateSampler(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
+        "CreateSampler"));
 
-        Device* device = ToBackend(GetDevice());
-        uint16_t maxAnisotropy = GetMaxAnisotropy();
-        if (device->GetDeviceInfo().features.samplerAnisotropy == VK_TRUE && maxAnisotropy > 1) {
-            createInfo.anisotropyEnable = VK_TRUE;
-            // https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VkSamplerCreateInfo.html
-            createInfo.maxAnisotropy =
-                std::min(static_cast<float>(maxAnisotropy),
-                         device->GetDeviceInfo().properties.limits.maxSamplerAnisotropy);
-        } else {
-            createInfo.anisotropyEnable = VK_FALSE;
-            createInfo.maxAnisotropy = 1;
-        }
+    SetLabelImpl();
 
-        DAWN_TRY(CheckVkSuccess(
-            device->fn.CreateSampler(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
-            "CreateSampler"));
+    return {};
+}
 
-        SetLabelImpl();
+Sampler::~Sampler() = default;
 
-        return {};
+void Sampler::DestroyImpl() {
+    SamplerBase::DestroyImpl();
+    if (mHandle != VK_NULL_HANDLE) {
+        ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+        mHandle = VK_NULL_HANDLE;
     }
+}
 
-    Sampler::~Sampler() = default;
+VkSampler Sampler::GetHandle() const {
+    return mHandle;
+}
 
-    void Sampler::DestroyImpl() {
-        SamplerBase::DestroyImpl();
-        if (mHandle != VK_NULL_HANDLE) {
-            ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mHandle);
-            mHandle = VK_NULL_HANDLE;
-        }
-    }
-
-    VkSampler Sampler::GetHandle() const {
-        return mHandle;
-    }
-
-    void Sampler::SetLabelImpl() {
-        SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_Sampler", GetLabel());
-    }
+void Sampler::SetLabelImpl() {
+    SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_Sampler", GetLabel());
+}
 
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/SamplerVk.h b/src/dawn/native/vulkan/SamplerVk.h
index 078e02d..038e1bc9 100644
--- a/src/dawn/native/vulkan/SamplerVk.h
+++ b/src/dawn/native/vulkan/SamplerVk.h
@@ -22,26 +22,25 @@
 
 namespace dawn::native::vulkan {
 
-    class Device;
+class Device;
 
-    class Sampler final : public SamplerBase {
-      public:
-        static ResultOrError<Ref<Sampler>> Create(Device* device,
-                                                  const SamplerDescriptor* descriptor);
+class Sampler final : public SamplerBase {
+  public:
+    static ResultOrError<Ref<Sampler>> Create(Device* device, const SamplerDescriptor* descriptor);
 
-        VkSampler GetHandle() const;
+    VkSampler GetHandle() const;
 
-      private:
-        ~Sampler() override;
-        void DestroyImpl() override;
-        using SamplerBase::SamplerBase;
-        MaybeError Initialize(const SamplerDescriptor* descriptor);
+  private:
+    ~Sampler() override;
+    void DestroyImpl() override;
+    using SamplerBase::SamplerBase;
+    MaybeError Initialize(const SamplerDescriptor* descriptor);
 
-        // Dawn API
-        void SetLabelImpl() override;
+    // Dawn API
+    void SetLabelImpl() override;
 
-        VkSampler mHandle = VK_NULL_HANDLE;
-    };
+    VkSampler mHandle = VK_NULL_HANDLE;
+};
 
 }  // namespace dawn::native::vulkan
 
diff --git a/src/dawn/native/vulkan/ShaderModuleVk.cpp b/src/dawn/native/vulkan/ShaderModuleVk.cpp
index cb69cb0..6b82a6c 100644
--- a/src/dawn/native/vulkan/ShaderModuleVk.cpp
+++ b/src/dawn/native/vulkan/ShaderModuleVk.cpp
@@ -32,227 +32,223 @@
 
 namespace dawn::native::vulkan {
 
-    ShaderModule::ConcurrentTransformedShaderModuleCache::ConcurrentTransformedShaderModuleCache(
-        Device* device)
-        : mDevice(device) {
-    }
+ShaderModule::ConcurrentTransformedShaderModuleCache::ConcurrentTransformedShaderModuleCache(
+    Device* device)
+    : mDevice(device) {}
 
-    ShaderModule::ConcurrentTransformedShaderModuleCache::
-        ~ConcurrentTransformedShaderModuleCache() {
-        std::lock_guard<std::mutex> lock(mMutex);
-        for (const auto& [_, moduleAndSpirv] : mTransformedShaderModuleCache) {
-            mDevice->GetFencedDeleter()->DeleteWhenUnused(moduleAndSpirv.first);
-        }
+ShaderModule::ConcurrentTransformedShaderModuleCache::~ConcurrentTransformedShaderModuleCache() {
+    std::lock_guard<std::mutex> lock(mMutex);
+    for (const auto& [_, moduleAndSpirv] : mTransformedShaderModuleCache) {
+        mDevice->GetFencedDeleter()->DeleteWhenUnused(moduleAndSpirv.first);
     }
+}
 
-    std::optional<ShaderModule::ModuleAndSpirv>
-    ShaderModule::ConcurrentTransformedShaderModuleCache::Find(
-        const PipelineLayoutEntryPointPair& key) {
-        std::lock_guard<std::mutex> lock(mMutex);
-        auto iter = mTransformedShaderModuleCache.find(key);
-        if (iter != mTransformedShaderModuleCache.end()) {
-            return std::make_pair(iter->second.first, iter->second.second.get());
-        }
-        return {};
-    }
-
-    ShaderModule::ModuleAndSpirv ShaderModule::ConcurrentTransformedShaderModuleCache::AddOrGet(
-        const PipelineLayoutEntryPointPair& key,
-        VkShaderModule module,
-        std::vector<uint32_t>&& spirv) {
-        ASSERT(module != VK_NULL_HANDLE);
-        std::lock_guard<std::mutex> lock(mMutex);
-        auto iter = mTransformedShaderModuleCache.find(key);
-        if (iter == mTransformedShaderModuleCache.end()) {
-            mTransformedShaderModuleCache.emplace(
-                key, std::make_pair(module, std::unique_ptr<Spirv>(new Spirv(spirv))));
-        } else {
-            mDevice->GetFencedDeleter()->DeleteWhenUnused(module);
-        }
-        // Now the key should exist in the map, so find it again and return it.
-        iter = mTransformedShaderModuleCache.find(key);
+std::optional<ShaderModule::ModuleAndSpirv>
+ShaderModule::ConcurrentTransformedShaderModuleCache::Find(
+    const PipelineLayoutEntryPointPair& key) {
+    std::lock_guard<std::mutex> lock(mMutex);
+    auto iter = mTransformedShaderModuleCache.find(key);
+    if (iter != mTransformedShaderModuleCache.end()) {
         return std::make_pair(iter->second.first, iter->second.second.get());
     }
+    return {};
+}
 
-    // static
-    ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
-                                                          const ShaderModuleDescriptor* descriptor,
-                                                          ShaderModuleParseResult* parseResult) {
-        Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
-        DAWN_TRY(module->Initialize(parseResult));
-        return module;
+ShaderModule::ModuleAndSpirv ShaderModule::ConcurrentTransformedShaderModuleCache::AddOrGet(
+    const PipelineLayoutEntryPointPair& key,
+    VkShaderModule module,
+    std::vector<uint32_t>&& spirv) {
+    ASSERT(module != VK_NULL_HANDLE);
+    std::lock_guard<std::mutex> lock(mMutex);
+    auto iter = mTransformedShaderModuleCache.find(key);
+    if (iter == mTransformedShaderModuleCache.end()) {
+        mTransformedShaderModuleCache.emplace(
+            key, std::make_pair(module, std::unique_ptr<Spirv>(new Spirv(spirv))));
+    } else {
+        mDevice->GetFencedDeleter()->DeleteWhenUnused(module);
     }
+    // Now the key should exist in the map, so find it again and return it.
+    iter = mTransformedShaderModuleCache.find(key);
+    return std::make_pair(iter->second.first, iter->second.second.get());
+}
 
-    ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
-        : ShaderModuleBase(device, descriptor),
-          mTransformedShaderModuleCache(
-              std::make_unique<ConcurrentTransformedShaderModuleCache>(device)) {
-    }
+// static
+ResultOrError<Ref<ShaderModule>> ShaderModule::Create(Device* device,
+                                                      const ShaderModuleDescriptor* descriptor,
+                                                      ShaderModuleParseResult* parseResult) {
+    Ref<ShaderModule> module = AcquireRef(new ShaderModule(device, descriptor));
+    DAWN_TRY(module->Initialize(parseResult));
+    return module;
+}
 
-    MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
-        if (GetDevice()->IsRobustnessEnabled()) {
-            ScopedTintICEHandler scopedICEHandler(GetDevice());
+ShaderModule::ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor)
+    : ShaderModuleBase(device, descriptor),
+      mTransformedShaderModuleCache(
+          std::make_unique<ConcurrentTransformedShaderModuleCache>(device)) {}
 
-            tint::transform::Robustness robustness;
-            tint::transform::DataMap transformInputs;
-
-            tint::Program program;
-            DAWN_TRY_ASSIGN(program, RunTransforms(&robustness, parseResult->tintProgram.get(),
-                                                   transformInputs, nullptr, nullptr));
-            // Rather than use a new ParseResult object, we just reuse the original parseResult
-            parseResult->tintProgram = std::make_unique<tint::Program>(std::move(program));
-        }
-
-        return InitializeBase(parseResult);
-    }
-
-    void ShaderModule::DestroyImpl() {
-        ShaderModuleBase::DestroyImpl();
-        // Remove reference to internal cache to trigger cleanup.
-        mTransformedShaderModuleCache = nullptr;
-    }
-
-    ShaderModule::~ShaderModule() = default;
-
-    ResultOrError<ShaderModule::ModuleAndSpirv> ShaderModule::GetHandleAndSpirv(
-        const char* entryPointName,
-        PipelineLayout* layout) {
-        TRACE_EVENT0(GetDevice()->GetPlatform(), General, "ShaderModuleVk::GetHandleAndSpirv");
-
-        // If the shader was destroyed, we should never call this function.
-        ASSERT(IsAlive());
-
+MaybeError ShaderModule::Initialize(ShaderModuleParseResult* parseResult) {
+    if (GetDevice()->IsRobustnessEnabled()) {
         ScopedTintICEHandler scopedICEHandler(GetDevice());
 
-        // Check to see if we have the handle and spirv cached already.
-        auto cacheKey = std::make_pair(layout, entryPointName);
-        auto handleAndSpirv = mTransformedShaderModuleCache->Find(cacheKey);
-        if (handleAndSpirv.has_value()) {
-            return std::move(*handleAndSpirv);
-        }
-
-        // Creation of module and spirv is deferred to this point when using tint generator
-
-        // Remap BindingNumber to BindingIndex in WGSL shader
-        using BindingRemapper = tint::transform::BindingRemapper;
-        using BindingPoint = tint::transform::BindingPoint;
-        BindingRemapper::BindingPoints bindingPoints;
-        BindingRemapper::AccessControls accessControls;
-
-        const BindingInfoArray& moduleBindingInfo = GetEntryPoint(entryPointName).bindings;
-
-        for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
-            const BindGroupLayout* bgl = ToBackend(layout->GetBindGroupLayout(group));
-            const auto& groupBindingInfo = moduleBindingInfo[group];
-            for (const auto& it : groupBindingInfo) {
-                BindingNumber binding = it.first;
-                BindingIndex bindingIndex = bgl->GetBindingIndex(binding);
-                BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
-                                             static_cast<uint32_t>(binding)};
-
-                BindingPoint dstBindingPoint{static_cast<uint32_t>(group),
-                                             static_cast<uint32_t>(bindingIndex)};
-                if (srcBindingPoint != dstBindingPoint) {
-                    bindingPoints.emplace(srcBindingPoint, dstBindingPoint);
-                }
-            }
-        }
-
-        tint::transform::Manager transformManager;
-        transformManager.append(std::make_unique<tint::transform::BindingRemapper>());
-        // Many Vulkan drivers can't handle multi-entrypoint shader modules.
-        transformManager.append(std::make_unique<tint::transform::SingleEntryPoint>());
-
+        tint::transform::Robustness robustness;
         tint::transform::DataMap transformInputs;
-        transformInputs.Add<BindingRemapper::Remappings>(std::move(bindingPoints),
-                                                         std::move(accessControls),
-                                                         /* mayCollide */ false);
-        transformInputs.Add<tint::transform::SingleEntryPoint::Config>(entryPointName);
-
-        // Transform external textures into the binding locations specified in the bgl
-        // TODO(dawn:1082): Replace this block with ShaderModuleBase::AddExternalTextureTransform.
-        tint::transform::MultiplanarExternalTexture::BindingsMap newBindingsMap;
-        for (BindGroupIndex i : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
-            BindGroupLayoutBase* bgl = layout->GetBindGroupLayout(i);
-
-            ExternalTextureBindingExpansionMap expansions =
-                bgl->GetExternalTextureBindingExpansionMap();
-
-            std::map<BindingNumber, dawn_native::ExternalTextureBindingExpansion>::iterator it =
-                expansions.begin();
-
-            while (it != expansions.end()) {
-                newBindingsMap[{static_cast<uint32_t>(i),
-                                static_cast<uint32_t>(bgl->GetBindingIndex(it->second.plane0))}] = {
-                    {static_cast<uint32_t>(i),
-                     static_cast<uint32_t>(bgl->GetBindingIndex(it->second.plane1))},
-                    {static_cast<uint32_t>(i),
-                     static_cast<uint32_t>(bgl->GetBindingIndex(it->second.params))}};
-                it++;
-            }
-        }
-
-        if (!newBindingsMap.empty()) {
-            transformManager.Add<tint::transform::MultiplanarExternalTexture>();
-            transformInputs.Add<tint::transform::MultiplanarExternalTexture::NewBindingPoints>(
-                newBindingsMap);
-        }
 
         tint::Program program;
-        {
-            TRACE_EVENT0(GetDevice()->GetPlatform(), General, "RunTransforms");
-            DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, GetTintProgram(),
-                                                   transformInputs, nullptr, nullptr));
+        DAWN_TRY_ASSIGN(program, RunTransforms(&robustness, parseResult->tintProgram.get(),
+                                               transformInputs, nullptr, nullptr));
+        // Rather than use a new ParseResult object, we just reuse the original parseResult
+        parseResult->tintProgram = std::make_unique<tint::Program>(std::move(program));
+    }
+
+    return InitializeBase(parseResult);
+}
+
+void ShaderModule::DestroyImpl() {
+    ShaderModuleBase::DestroyImpl();
+    // Remove reference to internal cache to trigger cleanup.
+    mTransformedShaderModuleCache = nullptr;
+}
+
+ShaderModule::~ShaderModule() = default;
+
+ResultOrError<ShaderModule::ModuleAndSpirv> ShaderModule::GetHandleAndSpirv(
+    const char* entryPointName,
+    PipelineLayout* layout) {
+    TRACE_EVENT0(GetDevice()->GetPlatform(), General, "ShaderModuleVk::GetHandleAndSpirv");
+
+    // If the shader was destroyed, we should never call this function.
+    ASSERT(IsAlive());
+
+    ScopedTintICEHandler scopedICEHandler(GetDevice());
+
+    // Check to see if we have the handle and spirv cached already.
+    auto cacheKey = std::make_pair(layout, entryPointName);
+    auto handleAndSpirv = mTransformedShaderModuleCache->Find(cacheKey);
+    if (handleAndSpirv.has_value()) {
+        return std::move(*handleAndSpirv);
+    }
+
+    // Creation of module and spirv is deferred to this point when using tint generator
+
+    // Remap BindingNumber to BindingIndex in WGSL shader
+    using BindingRemapper = tint::transform::BindingRemapper;
+    using BindingPoint = tint::transform::BindingPoint;
+    BindingRemapper::BindingPoints bindingPoints;
+    BindingRemapper::AccessControls accessControls;
+
+    const BindingInfoArray& moduleBindingInfo = GetEntryPoint(entryPointName).bindings;
+
+    for (BindGroupIndex group : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+        const BindGroupLayout* bgl = ToBackend(layout->GetBindGroupLayout(group));
+        const auto& groupBindingInfo = moduleBindingInfo[group];
+        for (const auto& it : groupBindingInfo) {
+            BindingNumber binding = it.first;
+            BindingIndex bindingIndex = bgl->GetBindingIndex(binding);
+            BindingPoint srcBindingPoint{static_cast<uint32_t>(group),
+                                         static_cast<uint32_t>(binding)};
+
+            BindingPoint dstBindingPoint{static_cast<uint32_t>(group),
+                                         static_cast<uint32_t>(bindingIndex)};
+            if (srcBindingPoint != dstBindingPoint) {
+                bindingPoints.emplace(srcBindingPoint, dstBindingPoint);
+            }
         }
+    }
+
+    tint::transform::Manager transformManager;
+    transformManager.append(std::make_unique<tint::transform::BindingRemapper>());
+    // Many Vulkan drivers can't handle multi-entrypoint shader modules.
+    transformManager.append(std::make_unique<tint::transform::SingleEntryPoint>());
+
+    tint::transform::DataMap transformInputs;
+    transformInputs.Add<BindingRemapper::Remappings>(std::move(bindingPoints),
+                                                     std::move(accessControls),
+                                                     /* mayCollide */ false);
+    transformInputs.Add<tint::transform::SingleEntryPoint::Config>(entryPointName);
+
+    // Transform external textures into the binding locations specified in the bgl
+    // TODO(dawn:1082): Replace this block with ShaderModuleBase::AddExternalTextureTransform.
+    tint::transform::MultiplanarExternalTexture::BindingsMap newBindingsMap;
+    for (BindGroupIndex i : IterateBitSet(layout->GetBindGroupLayoutsMask())) {
+        BindGroupLayoutBase* bgl = layout->GetBindGroupLayout(i);
+
+        ExternalTextureBindingExpansionMap expansions =
+            bgl->GetExternalTextureBindingExpansionMap();
+
+        std::map<BindingNumber, dawn_native::ExternalTextureBindingExpansion>::iterator it =
+            expansions.begin();
+
+        while (it != expansions.end()) {
+            newBindingsMap[{static_cast<uint32_t>(i),
+                            static_cast<uint32_t>(bgl->GetBindingIndex(it->second.plane0))}] = {
+                {static_cast<uint32_t>(i),
+                 static_cast<uint32_t>(bgl->GetBindingIndex(it->second.plane1))},
+                {static_cast<uint32_t>(i),
+                 static_cast<uint32_t>(bgl->GetBindingIndex(it->second.params))}};
+            it++;
+        }
+    }
+
+    if (!newBindingsMap.empty()) {
+        transformManager.Add<tint::transform::MultiplanarExternalTexture>();
+        transformInputs.Add<tint::transform::MultiplanarExternalTexture::NewBindingPoints>(
+            newBindingsMap);
+    }
+
+    tint::Program program;
+    {
+        TRACE_EVENT0(GetDevice()->GetPlatform(), General, "RunTransforms");
+        DAWN_TRY_ASSIGN(program, RunTransforms(&transformManager, GetTintProgram(), transformInputs,
+                                               nullptr, nullptr));
+    }
 
 #if TINT_BUILD_SPV_WRITER
-        tint::writer::spirv::Options options;
-        options.emit_vertex_point_size = true;
-        options.disable_workgroup_init = GetDevice()->IsToggleEnabled(Toggle::DisableWorkgroupInit);
-        options.use_zero_initialize_workgroup_memory_extension =
-            GetDevice()->IsToggleEnabled(Toggle::VulkanUseZeroInitializeWorkgroupMemoryExtension);
+    tint::writer::spirv::Options options;
+    options.emit_vertex_point_size = true;
+    options.disable_workgroup_init = GetDevice()->IsToggleEnabled(Toggle::DisableWorkgroupInit);
+    options.use_zero_initialize_workgroup_memory_extension =
+        GetDevice()->IsToggleEnabled(Toggle::VulkanUseZeroInitializeWorkgroupMemoryExtension);
 
-        Spirv spirv;
-        {
-            TRACE_EVENT0(GetDevice()->GetPlatform(), General, "tint::writer::spirv::Generate()");
-            auto result = tint::writer::spirv::Generate(&program, options);
-            DAWN_INVALID_IF(!result.success, "An error occured while generating SPIR-V: %s.",
-                            result.error);
+    Spirv spirv;
+    {
+        TRACE_EVENT0(GetDevice()->GetPlatform(), General, "tint::writer::spirv::Generate()");
+        auto result = tint::writer::spirv::Generate(&program, options);
+        DAWN_INVALID_IF(!result.success, "An error occured while generating SPIR-V: %s.",
+                        result.error);
 
-            spirv = std::move(result.spirv);
-        }
-
-        DAWN_TRY(
-            ValidateSpirv(GetDevice(), spirv, GetDevice()->IsToggleEnabled(Toggle::DumpShaders)));
-
-        VkShaderModuleCreateInfo createInfo;
-        createInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
-        createInfo.pNext = nullptr;
-        createInfo.flags = 0;
-        createInfo.codeSize = spirv.size() * sizeof(uint32_t);
-        createInfo.pCode = spirv.data();
-
-        Device* device = ToBackend(GetDevice());
-
-        VkShaderModule newHandle = VK_NULL_HANDLE;
-        {
-            TRACE_EVENT0(GetDevice()->GetPlatform(), General, "vkCreateShaderModule");
-            DAWN_TRY(CheckVkSuccess(device->fn.CreateShaderModule(
-                                        device->GetVkDevice(), &createInfo, nullptr, &*newHandle),
-                                    "CreateShaderModule"));
-        }
-        ModuleAndSpirv moduleAndSpirv;
-        if (newHandle != VK_NULL_HANDLE) {
-            moduleAndSpirv =
-                mTransformedShaderModuleCache->AddOrGet(cacheKey, newHandle, std::move(spirv));
-        }
-
-        SetDebugName(ToBackend(GetDevice()), moduleAndSpirv.first, "Dawn_ShaderModule", GetLabel());
-
-        return std::move(moduleAndSpirv);
-#else
-        return DAWN_INTERNAL_ERROR("TINT_BUILD_SPV_WRITER is not defined.");
-#endif
+        spirv = std::move(result.spirv);
     }
 
+    DAWN_TRY(ValidateSpirv(GetDevice(), spirv, GetDevice()->IsToggleEnabled(Toggle::DumpShaders)));
+
+    VkShaderModuleCreateInfo createInfo;
+    createInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
+    createInfo.pNext = nullptr;
+    createInfo.flags = 0;
+    createInfo.codeSize = spirv.size() * sizeof(uint32_t);
+    createInfo.pCode = spirv.data();
+
+    Device* device = ToBackend(GetDevice());
+
+    VkShaderModule newHandle = VK_NULL_HANDLE;
+    {
+        TRACE_EVENT0(GetDevice()->GetPlatform(), General, "vkCreateShaderModule");
+        DAWN_TRY(CheckVkSuccess(
+            device->fn.CreateShaderModule(device->GetVkDevice(), &createInfo, nullptr, &*newHandle),
+            "CreateShaderModule"));
+    }
+    ModuleAndSpirv moduleAndSpirv;
+    if (newHandle != VK_NULL_HANDLE) {
+        moduleAndSpirv =
+            mTransformedShaderModuleCache->AddOrGet(cacheKey, newHandle, std::move(spirv));
+    }
+
+    SetDebugName(ToBackend(GetDevice()), moduleAndSpirv.first, "Dawn_ShaderModule", GetLabel());
+
+    return std::move(moduleAndSpirv);
+#else
+    return DAWN_INTERNAL_ERROR("TINT_BUILD_SPV_WRITER is not defined.");
+#endif
+}
+
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/ShaderModuleVk.h b/src/dawn/native/vulkan/ShaderModuleVk.h
index 63e0106..b7bdd14 100644
--- a/src/dawn/native/vulkan/ShaderModuleVk.h
+++ b/src/dawn/native/vulkan/ShaderModuleVk.h
@@ -30,50 +30,49 @@
 
 namespace dawn::native::vulkan {
 
-    class Device;
-    class PipelineLayout;
+class Device;
+class PipelineLayout;
 
-    class ShaderModule final : public ShaderModuleBase {
+class ShaderModule final : public ShaderModuleBase {
+  public:
+    using Spirv = std::vector<uint32_t>;
+    using ModuleAndSpirv = std::pair<VkShaderModule, const Spirv*>;
+
+    static ResultOrError<Ref<ShaderModule>> Create(Device* device,
+                                                   const ShaderModuleDescriptor* descriptor,
+                                                   ShaderModuleParseResult* parseResult);
+
+    ResultOrError<ModuleAndSpirv> GetHandleAndSpirv(const char* entryPointName,
+                                                    PipelineLayout* layout);
+
+  private:
+    ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
+    ~ShaderModule() override;
+    MaybeError Initialize(ShaderModuleParseResult* parseResult);
+    void DestroyImpl() override;
+
+    // New handles created by GetHandleAndSpirv at pipeline creation time.
+    class ConcurrentTransformedShaderModuleCache {
       public:
-        using Spirv = std::vector<uint32_t>;
-        using ModuleAndSpirv = std::pair<VkShaderModule, const Spirv*>;
+        explicit ConcurrentTransformedShaderModuleCache(Device* device);
+        ~ConcurrentTransformedShaderModuleCache();
 
-        static ResultOrError<Ref<ShaderModule>> Create(Device* device,
-                                                       const ShaderModuleDescriptor* descriptor,
-                                                       ShaderModuleParseResult* parseResult);
-
-        ResultOrError<ModuleAndSpirv> GetHandleAndSpirv(const char* entryPointName,
-                                                        PipelineLayout* layout);
+        std::optional<ModuleAndSpirv> Find(const PipelineLayoutEntryPointPair& key);
+        ModuleAndSpirv AddOrGet(const PipelineLayoutEntryPointPair& key,
+                                VkShaderModule module,
+                                std::vector<uint32_t>&& spirv);
 
       private:
-        ShaderModule(Device* device, const ShaderModuleDescriptor* descriptor);
-        ~ShaderModule() override;
-        MaybeError Initialize(ShaderModuleParseResult* parseResult);
-        void DestroyImpl() override;
+        using Entry = std::pair<VkShaderModule, std::unique_ptr<Spirv>>;
 
-        // New handles created by GetHandleAndSpirv at pipeline creation time.
-        class ConcurrentTransformedShaderModuleCache {
-          public:
-            explicit ConcurrentTransformedShaderModuleCache(Device* device);
-            ~ConcurrentTransformedShaderModuleCache();
-
-            std::optional<ModuleAndSpirv> Find(const PipelineLayoutEntryPointPair& key);
-            ModuleAndSpirv AddOrGet(const PipelineLayoutEntryPointPair& key,
-                                    VkShaderModule module,
-                                    std::vector<uint32_t>&& spirv);
-
-          private:
-            using Entry = std::pair<VkShaderModule, std::unique_ptr<Spirv>>;
-
-            Device* mDevice;
-            std::mutex mMutex;
-            std::unordered_map<PipelineLayoutEntryPointPair,
-                               Entry,
-                               PipelineLayoutEntryPointPairHashFunc>
+        Device* mDevice;
+        std::mutex mMutex;
+        std::
+            unordered_map<PipelineLayoutEntryPointPair, Entry, PipelineLayoutEntryPointPairHashFunc>
                 mTransformedShaderModuleCache;
-        };
-        std::unique_ptr<ConcurrentTransformedShaderModuleCache> mTransformedShaderModuleCache;
     };
+    std::unique_ptr<ConcurrentTransformedShaderModuleCache> mTransformedShaderModuleCache;
+};
 
 }  // namespace dawn::native::vulkan
 
diff --git a/src/dawn/native/vulkan/StagingBufferVk.cpp b/src/dawn/native/vulkan/StagingBufferVk.cpp
index 97b0c61..f5b35bd 100644
--- a/src/dawn/native/vulkan/StagingBufferVk.cpp
+++ b/src/dawn/native/vulkan/StagingBufferVk.cpp
@@ -22,55 +22,54 @@
 
 namespace dawn::native::vulkan {
 
-    StagingBuffer::StagingBuffer(size_t size, Device* device)
-        : StagingBufferBase(size), mDevice(device) {
+StagingBuffer::StagingBuffer(size_t size, Device* device)
+    : StagingBufferBase(size), mDevice(device) {}
+
+MaybeError StagingBuffer::Initialize() {
+    VkBufferCreateInfo createInfo;
+    createInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+    createInfo.pNext = nullptr;
+    createInfo.flags = 0;
+    createInfo.size = GetSize();
+    createInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
+    createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+    createInfo.queueFamilyIndexCount = 0;
+    createInfo.pQueueFamilyIndices = 0;
+
+    DAWN_TRY(CheckVkSuccess(
+        mDevice->fn.CreateBuffer(mDevice->GetVkDevice(), &createInfo, nullptr, &*mBuffer),
+        "vkCreateBuffer"));
+
+    VkMemoryRequirements requirements;
+    mDevice->fn.GetBufferMemoryRequirements(mDevice->GetVkDevice(), mBuffer, &requirements);
+
+    DAWN_TRY_ASSIGN(mAllocation, mDevice->GetResourceMemoryAllocator()->Allocate(
+                                     requirements, MemoryKind::LinearMappable));
+
+    DAWN_TRY(CheckVkSuccess(
+        mDevice->fn.BindBufferMemory(mDevice->GetVkDevice(), mBuffer,
+                                     ToBackend(mAllocation.GetResourceHeap())->GetMemory(),
+                                     mAllocation.GetOffset()),
+        "vkBindBufferMemory"));
+
+    mMappedPointer = mAllocation.GetMappedPointer();
+    if (mMappedPointer == nullptr) {
+        return DAWN_INTERNAL_ERROR("Unable to map staging buffer.");
     }
 
-    MaybeError StagingBuffer::Initialize() {
-        VkBufferCreateInfo createInfo;
-        createInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
-        createInfo.pNext = nullptr;
-        createInfo.flags = 0;
-        createInfo.size = GetSize();
-        createInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
-        createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
-        createInfo.queueFamilyIndexCount = 0;
-        createInfo.pQueueFamilyIndices = 0;
+    SetDebugName(mDevice, mBuffer, "Dawn_StagingBuffer");
 
-        DAWN_TRY(CheckVkSuccess(
-            mDevice->fn.CreateBuffer(mDevice->GetVkDevice(), &createInfo, nullptr, &*mBuffer),
-            "vkCreateBuffer"));
+    return {};
+}
 
-        VkMemoryRequirements requirements;
-        mDevice->fn.GetBufferMemoryRequirements(mDevice->GetVkDevice(), mBuffer, &requirements);
+StagingBuffer::~StagingBuffer() {
+    mMappedPointer = nullptr;
+    mDevice->GetFencedDeleter()->DeleteWhenUnused(mBuffer);
+    mDevice->GetResourceMemoryAllocator()->Deallocate(&mAllocation);
+}
 
-        DAWN_TRY_ASSIGN(mAllocation, mDevice->GetResourceMemoryAllocator()->Allocate(
-                                         requirements, MemoryKind::LinearMappable));
-
-        DAWN_TRY(CheckVkSuccess(
-            mDevice->fn.BindBufferMemory(mDevice->GetVkDevice(), mBuffer,
-                                         ToBackend(mAllocation.GetResourceHeap())->GetMemory(),
-                                         mAllocation.GetOffset()),
-            "vkBindBufferMemory"));
-
-        mMappedPointer = mAllocation.GetMappedPointer();
-        if (mMappedPointer == nullptr) {
-            return DAWN_INTERNAL_ERROR("Unable to map staging buffer.");
-        }
-
-        SetDebugName(mDevice, mBuffer, "Dawn_StagingBuffer");
-
-        return {};
-    }
-
-    StagingBuffer::~StagingBuffer() {
-        mMappedPointer = nullptr;
-        mDevice->GetFencedDeleter()->DeleteWhenUnused(mBuffer);
-        mDevice->GetResourceMemoryAllocator()->Deallocate(&mAllocation);
-    }
-
-    VkBuffer StagingBuffer::GetBufferHandle() const {
-        return mBuffer;
-    }
+VkBuffer StagingBuffer::GetBufferHandle() const {
+    return mBuffer;
+}
 
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/StagingBufferVk.h b/src/dawn/native/vulkan/StagingBufferVk.h
index e69634c..dbd48ed 100644
--- a/src/dawn/native/vulkan/StagingBufferVk.h
+++ b/src/dawn/native/vulkan/StagingBufferVk.h
@@ -21,22 +21,22 @@
 
 namespace dawn::native::vulkan {
 
-    class Device;
+class Device;
 
-    class StagingBuffer : public StagingBufferBase {
-      public:
-        StagingBuffer(size_t size, Device* device);
-        ~StagingBuffer() override;
+class StagingBuffer : public StagingBufferBase {
+  public:
+    StagingBuffer(size_t size, Device* device);
+    ~StagingBuffer() override;
 
-        VkBuffer GetBufferHandle() const;
+    VkBuffer GetBufferHandle() const;
 
-        MaybeError Initialize() override;
+    MaybeError Initialize() override;
 
-      private:
-        Device* mDevice;
-        VkBuffer mBuffer;
-        ResourceMemoryAllocation mAllocation;
-    };
+  private:
+    Device* mDevice;
+    VkBuffer mBuffer;
+    ResourceMemoryAllocation mAllocation;
+};
 }  // namespace dawn::native::vulkan
 
 #endif  // SRC_DAWN_NATIVE_VULKAN_STAGINGBUFFERVK_H_
diff --git a/src/dawn/native/vulkan/SwapChainVk.cpp b/src/dawn/native/vulkan/SwapChainVk.cpp
index 03daa6d..63de95e 100644
--- a/src/dawn/native/vulkan/SwapChainVk.cpp
+++ b/src/dawn/native/vulkan/SwapChainVk.cpp
@@ -29,676 +29,671 @@
 #include "dawn/native/vulkan/VulkanError.h"
 
 #if defined(DAWN_USE_X11)
-#    include "dawn/native/XlibXcbFunctions.h"
+#include "dawn/native/XlibXcbFunctions.h"
 #endif  // defined(DAWN_USE_X11)
 
 namespace dawn::native::vulkan {
 
-    // OldSwapChain
+// OldSwapChain
 
-    // static
-    Ref<OldSwapChain> OldSwapChain::Create(Device* device, const SwapChainDescriptor* descriptor) {
-        return AcquireRef(new OldSwapChain(device, descriptor));
+// static
+Ref<OldSwapChain> OldSwapChain::Create(Device* device, const SwapChainDescriptor* descriptor) {
+    return AcquireRef(new OldSwapChain(device, descriptor));
+}
+
+OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
+    : OldSwapChainBase(device, descriptor) {
+    const auto& im = GetImplementation();
+    DawnWSIContextVulkan wsiContext = {};
+    im.Init(im.userData, &wsiContext);
+
+    ASSERT(im.textureUsage != WGPUTextureUsage_None);
+    mTextureUsage = static_cast<wgpu::TextureUsage>(im.textureUsage);
+}
+
+OldSwapChain::~OldSwapChain() {}
+
+TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
+    const auto& im = GetImplementation();
+    DawnSwapChainNextTexture next = {};
+    DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
+
+    if (error) {
+        GetDevice()->HandleError(InternalErrorType::Internal, error);
+        return nullptr;
     }
 
-    OldSwapChain::OldSwapChain(Device* device, const SwapChainDescriptor* descriptor)
-        : OldSwapChainBase(device, descriptor) {
-        const auto& im = GetImplementation();
-        DawnWSIContextVulkan wsiContext = {};
-        im.Init(im.userData, &wsiContext);
+    ::VkImage image = NativeNonDispatachableHandleFromU64<::VkImage>(next.texture.u64);
+    VkImage nativeTexture = VkImage::CreateFromHandle(image);
+    return Texture::CreateForSwapChain(ToBackend(GetDevice()), descriptor, nativeTexture).Detach();
+}
 
-        ASSERT(im.textureUsage != WGPUTextureUsage_None);
-        mTextureUsage = static_cast<wgpu::TextureUsage>(im.textureUsage);
-    }
+MaybeError OldSwapChain::OnBeforePresent(TextureViewBase* view) {
+    Device* device = ToBackend(GetDevice());
 
-    OldSwapChain::~OldSwapChain() {
-    }
+    // Perform the necessary pipeline barriers for the texture to be used with the usage
+    // requested by the implementation.
+    CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
+    ToBackend(view->GetTexture())
+        ->TransitionUsageNow(recordingContext, mTextureUsage, view->GetSubresourceRange());
 
-    TextureBase* OldSwapChain::GetNextTextureImpl(const TextureDescriptor* descriptor) {
-        const auto& im = GetImplementation();
-        DawnSwapChainNextTexture next = {};
-        DawnSwapChainError error = im.GetNextTexture(im.userData, &next);
+    DAWN_TRY(device->SubmitPendingCommands());
 
-        if (error) {
-            GetDevice()->HandleError(InternalErrorType::Internal, error);
-            return nullptr;
-        }
+    return {};
+}
 
-        ::VkImage image = NativeNonDispatachableHandleFromU64<::VkImage>(next.texture.u64);
-        VkImage nativeTexture = VkImage::CreateFromHandle(image);
-        return Texture::CreateForSwapChain(ToBackend(GetDevice()), descriptor, nativeTexture)
-            .Detach();
-    }
+// SwapChain
 
-    MaybeError OldSwapChain::OnBeforePresent(TextureViewBase* view) {
-        Device* device = ToBackend(GetDevice());
+namespace {
 
-        // Perform the necessary pipeline barriers for the texture to be used with the usage
-        // requested by the implementation.
-        CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
-        ToBackend(view->GetTexture())
-            ->TransitionUsageNow(recordingContext, mTextureUsage, view->GetSubresourceRange());
+ResultOrError<VkSurfaceKHR> CreateVulkanSurface(Adapter* adapter, Surface* surface) {
+    const VulkanGlobalInfo& info = adapter->GetVulkanInstance()->GetGlobalInfo();
+    const VulkanFunctions& fn = adapter->GetVulkanInstance()->GetFunctions();
+    VkInstance instance = adapter->GetVulkanInstance()->GetVkInstance();
 
-        DAWN_TRY(device->SubmitPendingCommands());
+    // May not be used in the platform-specific switches below.
+    DAWN_UNUSED(info);
+    DAWN_UNUSED(fn);
+    DAWN_UNUSED(instance);
 
-        return {};
-    }
-
-    // SwapChain
-
-    namespace {
-
-        ResultOrError<VkSurfaceKHR> CreateVulkanSurface(Adapter* adapter, Surface* surface) {
-            const VulkanGlobalInfo& info = adapter->GetVulkanInstance()->GetGlobalInfo();
-            const VulkanFunctions& fn = adapter->GetVulkanInstance()->GetFunctions();
-            VkInstance instance = adapter->GetVulkanInstance()->GetVkInstance();
-
-            // May not be used in the platform-specific switches below.
-            DAWN_UNUSED(info);
-            DAWN_UNUSED(fn);
-            DAWN_UNUSED(instance);
-
-            switch (surface->GetType()) {
+    switch (surface->GetType()) {
 #if defined(DAWN_ENABLE_BACKEND_METAL)
-                case Surface::Type::MetalLayer:
-                    if (info.HasExt(InstanceExt::MetalSurface)) {
-                        VkMetalSurfaceCreateInfoEXT createInfo;
-                        createInfo.sType = VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT;
-                        createInfo.pNext = nullptr;
-                        createInfo.flags = 0;
-                        createInfo.pLayer = surface->GetMetalLayer();
+        case Surface::Type::MetalLayer:
+            if (info.HasExt(InstanceExt::MetalSurface)) {
+                VkMetalSurfaceCreateInfoEXT createInfo;
+                createInfo.sType = VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT;
+                createInfo.pNext = nullptr;
+                createInfo.flags = 0;
+                createInfo.pLayer = surface->GetMetalLayer();
 
-                        VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
-                        DAWN_TRY(CheckVkSuccess(
-                            fn.CreateMetalSurfaceEXT(instance, &createInfo, nullptr, &*vkSurface),
-                            "CreateMetalSurface"));
-                        return vkSurface;
-                    }
-                    break;
+                VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
+                DAWN_TRY(CheckVkSuccess(
+                    fn.CreateMetalSurfaceEXT(instance, &createInfo, nullptr, &*vkSurface),
+                    "CreateMetalSurface"));
+                return vkSurface;
+            }
+            break;
 #endif  // defined(DAWN_ENABLE_BACKEND_METAL)
 
 #if defined(DAWN_PLATFORM_WINDOWS)
-                case Surface::Type::WindowsHWND:
-                    if (info.HasExt(InstanceExt::Win32Surface)) {
-                        VkWin32SurfaceCreateInfoKHR createInfo;
-                        createInfo.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR;
-                        createInfo.pNext = nullptr;
-                        createInfo.flags = 0;
-                        createInfo.hinstance = static_cast<HINSTANCE>(surface->GetHInstance());
-                        createInfo.hwnd = static_cast<HWND>(surface->GetHWND());
+        case Surface::Type::WindowsHWND:
+            if (info.HasExt(InstanceExt::Win32Surface)) {
+                VkWin32SurfaceCreateInfoKHR createInfo;
+                createInfo.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR;
+                createInfo.pNext = nullptr;
+                createInfo.flags = 0;
+                createInfo.hinstance = static_cast<HINSTANCE>(surface->GetHInstance());
+                createInfo.hwnd = static_cast<HWND>(surface->GetHWND());
 
-                        VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
-                        DAWN_TRY(CheckVkSuccess(
-                            fn.CreateWin32SurfaceKHR(instance, &createInfo, nullptr, &*vkSurface),
-                            "CreateWin32Surface"));
-                        return vkSurface;
-                    }
-                    break;
+                VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
+                DAWN_TRY(CheckVkSuccess(
+                    fn.CreateWin32SurfaceKHR(instance, &createInfo, nullptr, &*vkSurface),
+                    "CreateWin32Surface"));
+                return vkSurface;
+            }
+            break;
 #endif  // defined(DAWN_PLATFORM_WINDOWS)
 
 #if defined(DAWN_PLATFORM_ANDROID)
-                case Surface::Type::AndroidWindow: {
-                    if (info.HasExt(InstanceExt::AndroidSurface)) {
-                        ASSERT(surface->GetAndroidNativeWindow() != nullptr);
+        case Surface::Type::AndroidWindow: {
+            if (info.HasExt(InstanceExt::AndroidSurface)) {
+                ASSERT(surface->GetAndroidNativeWindow() != nullptr);
 
-                        VkAndroidSurfaceCreateInfoKHR createInfo;
-                        createInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
-                        createInfo.pNext = nullptr;
-                        createInfo.flags = 0;
-                        createInfo.window =
-                            static_cast<struct ANativeWindow*>(surface->GetAndroidNativeWindow());
+                VkAndroidSurfaceCreateInfoKHR createInfo;
+                createInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR;
+                createInfo.pNext = nullptr;
+                createInfo.flags = 0;
+                createInfo.window =
+                    static_cast<struct ANativeWindow*>(surface->GetAndroidNativeWindow());
 
-                        VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
-                        DAWN_TRY(CheckVkSuccess(
-                            fn.CreateAndroidSurfaceKHR(instance, &createInfo, nullptr, &*vkSurface),
-                            "CreateAndroidSurfaceKHR"));
-                        return vkSurface;
-                    }
+                VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
+                DAWN_TRY(CheckVkSuccess(
+                    fn.CreateAndroidSurfaceKHR(instance, &createInfo, nullptr, &*vkSurface),
+                    "CreateAndroidSurfaceKHR"));
+                return vkSurface;
+            }
 
-                    break;
-                }
+            break;
+        }
 
 #endif  // defined(DAWN_PLATFORM_ANDROID)
 
 #if defined(DAWN_USE_X11)
-                case Surface::Type::XlibWindow: {
-                    if (info.HasExt(InstanceExt::XlibSurface)) {
-                        VkXlibSurfaceCreateInfoKHR createInfo;
-                        createInfo.sType = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR;
-                        createInfo.pNext = nullptr;
-                        createInfo.flags = 0;
-                        createInfo.dpy = static_cast<Display*>(surface->GetXDisplay());
-                        createInfo.window = surface->GetXWindow();
+        case Surface::Type::XlibWindow: {
+            if (info.HasExt(InstanceExt::XlibSurface)) {
+                VkXlibSurfaceCreateInfoKHR createInfo;
+                createInfo.sType = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR;
+                createInfo.pNext = nullptr;
+                createInfo.flags = 0;
+                createInfo.dpy = static_cast<Display*>(surface->GetXDisplay());
+                createInfo.window = surface->GetXWindow();
 
-                        VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
-                        DAWN_TRY(CheckVkSuccess(
-                            fn.CreateXlibSurfaceKHR(instance, &createInfo, nullptr, &*vkSurface),
-                            "CreateXlibSurface"));
-                        return vkSurface;
-                    }
+                VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
+                DAWN_TRY(CheckVkSuccess(
+                    fn.CreateXlibSurfaceKHR(instance, &createInfo, nullptr, &*vkSurface),
+                    "CreateXlibSurface"));
+                return vkSurface;
+            }
 
-                    // Fall back to using XCB surfaces if the Xlib extension isn't available.
-                    // See https://xcb.freedesktop.org/MixingCalls/ for more information about
-                    // interoperability between Xlib and XCB
-                    const XlibXcbFunctions* xlibXcb =
-                        adapter->GetInstance()->GetOrCreateXlibXcbFunctions();
-                    ASSERT(xlibXcb != nullptr);
+            // Fall back to using XCB surfaces if the Xlib extension isn't available.
+            // See https://xcb.freedesktop.org/MixingCalls/ for more information about
+            // interoperability between Xlib and XCB
+            const XlibXcbFunctions* xlibXcb = adapter->GetInstance()->GetOrCreateXlibXcbFunctions();
+            ASSERT(xlibXcb != nullptr);
 
-                    if (info.HasExt(InstanceExt::XcbSurface) && xlibXcb->IsLoaded()) {
-                        VkXcbSurfaceCreateInfoKHR createInfo;
-                        createInfo.sType = VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR;
-                        createInfo.pNext = nullptr;
-                        createInfo.flags = 0;
-                        // The XCB connection lives as long as the X11 display.
-                        createInfo.connection = xlibXcb->xGetXCBConnection(
-                            static_cast<Display*>(surface->GetXDisplay()));
-                        createInfo.window = surface->GetXWindow();
+            if (info.HasExt(InstanceExt::XcbSurface) && xlibXcb->IsLoaded()) {
+                VkXcbSurfaceCreateInfoKHR createInfo;
+                createInfo.sType = VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR;
+                createInfo.pNext = nullptr;
+                createInfo.flags = 0;
+                // The XCB connection lives as long as the X11 display.
+                createInfo.connection =
+                    xlibXcb->xGetXCBConnection(static_cast<Display*>(surface->GetXDisplay()));
+                createInfo.window = surface->GetXWindow();
 
-                        VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
-                        DAWN_TRY(CheckVkSuccess(
-                            fn.CreateXcbSurfaceKHR(instance, &createInfo, nullptr, &*vkSurface),
-                            "CreateXcbSurfaceKHR"));
-                        return vkSurface;
-                    }
-                    break;
-                }
+                VkSurfaceKHR vkSurface = VK_NULL_HANDLE;
+                DAWN_TRY(CheckVkSuccess(
+                    fn.CreateXcbSurfaceKHR(instance, &createInfo, nullptr, &*vkSurface),
+                    "CreateXcbSurfaceKHR"));
+                return vkSurface;
+            }
+            break;
+        }
 #endif  // defined(DAWN_USE_X11)
 
-                default:
-                    break;
-            }
-
-            return DAWN_FORMAT_VALIDATION_ERROR("Unsupported surface type (%s) for Vulkan.",
-                                                surface->GetType());
-        }
-
-        VkPresentModeKHR ToVulkanPresentMode(wgpu::PresentMode mode) {
-            switch (mode) {
-                case wgpu::PresentMode::Fifo:
-                    return VK_PRESENT_MODE_FIFO_KHR;
-                case wgpu::PresentMode::Immediate:
-                    return VK_PRESENT_MODE_IMMEDIATE_KHR;
-                case wgpu::PresentMode::Mailbox:
-                    return VK_PRESENT_MODE_MAILBOX_KHR;
-            }
-            UNREACHABLE();
-        }
-
-        uint32_t MinImageCountForPresentMode(VkPresentModeKHR mode) {
-            switch (mode) {
-                case VK_PRESENT_MODE_FIFO_KHR:
-                case VK_PRESENT_MODE_IMMEDIATE_KHR:
-                    return 2;
-                case VK_PRESENT_MODE_MAILBOX_KHR:
-                    return 3;
-                default:
-                    break;
-            }
-            UNREACHABLE();
-        }
-
-    }  // anonymous namespace
-
-    // static
-    ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
-                                                    Surface* surface,
-                                                    NewSwapChainBase* previousSwapChain,
-                                                    const SwapChainDescriptor* descriptor) {
-        Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
-        DAWN_TRY(swapchain->Initialize(previousSwapChain));
-        return swapchain;
+        default:
+            break;
     }
 
-    SwapChain::~SwapChain() = default;
+    return DAWN_FORMAT_VALIDATION_ERROR("Unsupported surface type (%s) for Vulkan.",
+                                        surface->GetType());
+}
 
-    void SwapChain::DestroyImpl() {
-        SwapChainBase::DestroyImpl();
-        DetachFromSurface();
+VkPresentModeKHR ToVulkanPresentMode(wgpu::PresentMode mode) {
+    switch (mode) {
+        case wgpu::PresentMode::Fifo:
+            return VK_PRESENT_MODE_FIFO_KHR;
+        case wgpu::PresentMode::Immediate:
+            return VK_PRESENT_MODE_IMMEDIATE_KHR;
+        case wgpu::PresentMode::Mailbox:
+            return VK_PRESENT_MODE_MAILBOX_KHR;
+    }
+    UNREACHABLE();
+}
+
+uint32_t MinImageCountForPresentMode(VkPresentModeKHR mode) {
+    switch (mode) {
+        case VK_PRESENT_MODE_FIFO_KHR:
+        case VK_PRESENT_MODE_IMMEDIATE_KHR:
+            return 2;
+        case VK_PRESENT_MODE_MAILBOX_KHR:
+            return 3;
+        default:
+            break;
+    }
+    UNREACHABLE();
+}
+
+}  // anonymous namespace
+
+// static
+ResultOrError<Ref<SwapChain>> SwapChain::Create(Device* device,
+                                                Surface* surface,
+                                                NewSwapChainBase* previousSwapChain,
+                                                const SwapChainDescriptor* descriptor) {
+    Ref<SwapChain> swapchain = AcquireRef(new SwapChain(device, surface, descriptor));
+    DAWN_TRY(swapchain->Initialize(previousSwapChain));
+    return swapchain;
+}
+
+SwapChain::~SwapChain() = default;
+
+void SwapChain::DestroyImpl() {
+    SwapChainBase::DestroyImpl();
+    DetachFromSurface();
+}
+
+// Note that when we need to re-create the swapchain because it is out of date,
+// previousSwapChain can be set to `this`.
+MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
+    Device* device = ToBackend(GetDevice());
+    Adapter* adapter = ToBackend(GetDevice()->GetAdapter());
+
+    VkSwapchainKHR previousVkSwapChain = VK_NULL_HANDLE;
+
+    if (previousSwapChain != nullptr) {
+        // TODO(crbug.com/dawn/269): The first time a surface is used with a Device, check
+        // it is supported with vkGetPhysicalDeviceSurfaceSupportKHR.
+
+        // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by
+        // multiple backends one after the other. It probably needs to block until the backend
+        // and GPU are completely finished with the previous swapchain.
+        DAWN_INVALID_IF(previousSwapChain->GetBackendType() != wgpu::BackendType::Vulkan,
+                        "Vulkan SwapChain cannot switch backend types from %s to %s.",
+                        previousSwapChain->GetBackendType(), wgpu::BackendType::Vulkan);
+
+        // TODO(crbug.com/dawn/269): use ToBackend once OldSwapChainBase is removed.
+        SwapChain* previousVulkanSwapChain = static_cast<SwapChain*>(previousSwapChain);
+
+        // TODO(crbug.com/dawn/269): Figure out switching a single surface between multiple
+        // Vulkan devices on different VkInstances. Probably needs to block too!
+        VkInstance previousInstance = ToBackend(previousSwapChain->GetDevice())->GetVkInstance();
+        DAWN_INVALID_IF(previousInstance != ToBackend(GetDevice())->GetVkInstance(),
+                        "Vulkan SwapChain cannot switch between Vulkan instances.");
+
+        // The previous swapchain is a dawn::native::vulkan::SwapChain so we can reuse its
+        // VkSurfaceKHR provided since they are on the same instance.
+        std::swap(previousVulkanSwapChain->mVkSurface, mVkSurface);
+
+        // The previous swapchain was on the same Vulkan instance so we can use Vulkan's
+        // "oldSwapchain" mechanism to ensure a seamless transition. We track the previous
+        // swapchain for release immediately so it is not leaked in case of an error. (Vulkan
+        // allows destroying it immediately after the call to vkCreateSwapChainKHR but tracking
+        // using the fenced deleter makes the code simpler).
+        std::swap(previousVulkanSwapChain->mSwapChain, previousVkSwapChain);
+        ToBackend(previousSwapChain->GetDevice())
+            ->GetFencedDeleter()
+            ->DeleteWhenUnused(previousVkSwapChain);
     }
 
-    // Note that when we need to re-create the swapchain because it is out of date,
-    // previousSwapChain can be set to `this`.
-    MaybeError SwapChain::Initialize(NewSwapChainBase* previousSwapChain) {
-        Device* device = ToBackend(GetDevice());
-        Adapter* adapter = ToBackend(GetDevice()->GetAdapter());
-
-        VkSwapchainKHR previousVkSwapChain = VK_NULL_HANDLE;
-
-        if (previousSwapChain != nullptr) {
-            // TODO(crbug.com/dawn/269): The first time a surface is used with a Device, check
-            // it is supported with vkGetPhysicalDeviceSurfaceSupportKHR.
-
-            // TODO(crbug.com/dawn/269): figure out what should happen when surfaces are used by
-            // multiple backends one after the other. It probably needs to block until the backend
-            // and GPU are completely finished with the previous swapchain.
-            DAWN_INVALID_IF(previousSwapChain->GetBackendType() != wgpu::BackendType::Vulkan,
-                            "Vulkan SwapChain cannot switch backend types from %s to %s.",
-                            previousSwapChain->GetBackendType(), wgpu::BackendType::Vulkan);
-
-            // TODO(crbug.com/dawn/269): use ToBackend once OldSwapChainBase is removed.
-            SwapChain* previousVulkanSwapChain = static_cast<SwapChain*>(previousSwapChain);
-
-            // TODO(crbug.com/dawn/269): Figure out switching a single surface between multiple
-            // Vulkan devices on different VkInstances. Probably needs to block too!
-            VkInstance previousInstance =
-                ToBackend(previousSwapChain->GetDevice())->GetVkInstance();
-            DAWN_INVALID_IF(previousInstance != ToBackend(GetDevice())->GetVkInstance(),
-                            "Vulkan SwapChain cannot switch between Vulkan instances.");
-
-            // The previous swapchain is a dawn::native::vulkan::SwapChain so we can reuse its
-            // VkSurfaceKHR provided since they are on the same instance.
-            std::swap(previousVulkanSwapChain->mVkSurface, mVkSurface);
-
-            // The previous swapchain was on the same Vulkan instance so we can use Vulkan's
-            // "oldSwapchain" mechanism to ensure a seamless transition. We track the previous
-            // swapchain for release immediately so it is not leaked in case of an error. (Vulkan
-            // allows destroying it immediately after the call to vkCreateSwapChainKHR but tracking
-            // using the fenced deleter makes the code simpler).
-            std::swap(previousVulkanSwapChain->mSwapChain, previousVkSwapChain);
-            ToBackend(previousSwapChain->GetDevice())
-                ->GetFencedDeleter()
-                ->DeleteWhenUnused(previousVkSwapChain);
-        }
-
-        if (mVkSurface == VK_NULL_HANDLE) {
-            DAWN_TRY_ASSIGN(mVkSurface, CreateVulkanSurface(adapter, GetSurface()));
-        }
-
-        VulkanSurfaceInfo surfaceInfo;
-        DAWN_TRY_ASSIGN(surfaceInfo, GatherSurfaceInfo(*adapter, mVkSurface));
-
-        DAWN_TRY_ASSIGN(mConfig, ChooseConfig(surfaceInfo));
-
-        // TODO(dawn:269): Choose config instead of hardcoding
-        VkSwapchainCreateInfoKHR createInfo;
-        createInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
-        createInfo.pNext = nullptr;
-        createInfo.flags = 0;
-        createInfo.surface = mVkSurface;
-        createInfo.minImageCount = mConfig.targetImageCount;
-        createInfo.imageFormat = mConfig.format;
-        createInfo.imageColorSpace = mConfig.colorSpace;
-        createInfo.imageExtent = mConfig.extent;
-        createInfo.imageArrayLayers = 1;
-        createInfo.imageUsage = mConfig.usage;
-        createInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
-        createInfo.queueFamilyIndexCount = 0;
-        createInfo.pQueueFamilyIndices = nullptr;
-        createInfo.preTransform = mConfig.transform;
-        createInfo.compositeAlpha = mConfig.alphaMode;
-        createInfo.presentMode = mConfig.presentMode;
-        createInfo.clipped = false;
-        createInfo.oldSwapchain = previousVkSwapChain;
-
-        DAWN_TRY(CheckVkSuccess(device->fn.CreateSwapchainKHR(device->GetVkDevice(), &createInfo,
-                                                              nullptr, &*mSwapChain),
-                                "CreateSwapChain"));
-
-        // Gather the swapchain's images. Implementations are allowed to return more images than the
-        // number we asked for.
-        uint32_t count = 0;
-        DAWN_TRY(CheckVkSuccess(
-            device->fn.GetSwapchainImagesKHR(device->GetVkDevice(), mSwapChain, &count, nullptr),
-            "GetSwapChainImages1"));
-
-        mSwapChainImages.resize(count);
-        DAWN_TRY(CheckVkSuccess(
-            device->fn.GetSwapchainImagesKHR(device->GetVkDevice(), mSwapChain, &count,
-                                             AsVkArray(mSwapChainImages.data())),
-            "GetSwapChainImages2"));
-
-        return {};
+    if (mVkSurface == VK_NULL_HANDLE) {
+        DAWN_TRY_ASSIGN(mVkSurface, CreateVulkanSurface(adapter, GetSurface()));
     }
 
-    ResultOrError<SwapChain::Config> SwapChain::ChooseConfig(
-        const VulkanSurfaceInfo& surfaceInfo) const {
-        Config config;
+    VulkanSurfaceInfo surfaceInfo;
+    DAWN_TRY_ASSIGN(surfaceInfo, GatherSurfaceInfo(*adapter, mVkSurface));
 
-        // Choose the present mode. The only guaranteed one is FIFO so it has to be the fallback for
-        // all other present modes. IMMEDIATE has tearing which is generally undesirable so it can't
-        // be the fallback for MAILBOX. So the fallback order is always IMMEDIATE -> MAILBOX ->
-        // FIFO.
-        {
-            auto HasPresentMode = [](const std::vector<VkPresentModeKHR>& modes,
-                                     VkPresentModeKHR target) -> bool {
-                return std::find(modes.begin(), modes.end(), target) != modes.end();
-            };
+    DAWN_TRY_ASSIGN(mConfig, ChooseConfig(surfaceInfo));
 
-            VkPresentModeKHR targetMode = ToVulkanPresentMode(GetPresentMode());
-            const std::array<VkPresentModeKHR, 3> kPresentModeFallbacks = {
-                VK_PRESENT_MODE_IMMEDIATE_KHR,
-                VK_PRESENT_MODE_MAILBOX_KHR,
-                VK_PRESENT_MODE_FIFO_KHR,
-            };
+    // TODO(dawn:269): Choose config instead of hardcoding
+    VkSwapchainCreateInfoKHR createInfo;
+    createInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
+    createInfo.pNext = nullptr;
+    createInfo.flags = 0;
+    createInfo.surface = mVkSurface;
+    createInfo.minImageCount = mConfig.targetImageCount;
+    createInfo.imageFormat = mConfig.format;
+    createInfo.imageColorSpace = mConfig.colorSpace;
+    createInfo.imageExtent = mConfig.extent;
+    createInfo.imageArrayLayers = 1;
+    createInfo.imageUsage = mConfig.usage;
+    createInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
+    createInfo.queueFamilyIndexCount = 0;
+    createInfo.pQueueFamilyIndices = nullptr;
+    createInfo.preTransform = mConfig.transform;
+    createInfo.compositeAlpha = mConfig.alphaMode;
+    createInfo.presentMode = mConfig.presentMode;
+    createInfo.clipped = false;
+    createInfo.oldSwapchain = previousVkSwapChain;
 
-            // Go to the target mode.
-            size_t modeIndex = 0;
-            while (kPresentModeFallbacks[modeIndex] != targetMode) {
-                modeIndex++;
-            }
+    DAWN_TRY(CheckVkSuccess(
+        device->fn.CreateSwapchainKHR(device->GetVkDevice(), &createInfo, nullptr, &*mSwapChain),
+        "CreateSwapChain"));
 
-            // Find the first available fallback.
-            while (!HasPresentMode(surfaceInfo.presentModes, kPresentModeFallbacks[modeIndex])) {
-                modeIndex++;
-            }
+    // Gather the swapchain's images. Implementations are allowed to return more images than the
+    // number we asked for.
+    uint32_t count = 0;
+    DAWN_TRY(CheckVkSuccess(
+        device->fn.GetSwapchainImagesKHR(device->GetVkDevice(), mSwapChain, &count, nullptr),
+        "GetSwapChainImages1"));
 
-            ASSERT(modeIndex < kPresentModeFallbacks.size());
-            config.presentMode = kPresentModeFallbacks[modeIndex];
-        }
+    mSwapChainImages.resize(count);
+    DAWN_TRY(
+        CheckVkSuccess(device->fn.GetSwapchainImagesKHR(device->GetVkDevice(), mSwapChain, &count,
+                                                        AsVkArray(mSwapChainImages.data())),
+                       "GetSwapChainImages2"));
 
-        // Choose the target width or do a blit.
-        if (GetWidth() < surfaceInfo.capabilities.minImageExtent.width ||
-            GetWidth() > surfaceInfo.capabilities.maxImageExtent.width ||
-            GetHeight() < surfaceInfo.capabilities.minImageExtent.height ||
-            GetHeight() > surfaceInfo.capabilities.maxImageExtent.height) {
-            config.needsBlit = true;
-        } else {
-            config.extent.width = GetWidth();
-            config.extent.height = GetHeight();
-        }
+    return {};
+}
 
-        // Choose the target usage or do a blit.
-        VkImageUsageFlags targetUsages =
-            VulkanImageUsage(GetUsage(), GetDevice()->GetValidInternalFormat(GetFormat()));
-        VkImageUsageFlags supportedUsages = surfaceInfo.capabilities.supportedUsageFlags;
-        if (!IsSubset(targetUsages, supportedUsages)) {
-            config.needsBlit = true;
-        } else {
-            config.usage = targetUsages;
-            config.wgpuUsage = GetUsage();
-        }
+ResultOrError<SwapChain::Config> SwapChain::ChooseConfig(
+    const VulkanSurfaceInfo& surfaceInfo) const {
+    Config config;
 
-        // Only support BGRA8Unorm (and RGBA8Unorm on android) with SRGB color space for now.
-        config.wgpuFormat = GetFormat();
-        config.format = VulkanImageFormat(ToBackend(GetDevice()), config.wgpuFormat);
-        config.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
-
-        bool formatIsSupported = false;
-        for (const VkSurfaceFormatKHR& format : surfaceInfo.formats) {
-            if (format.format == config.format && format.colorSpace == config.colorSpace) {
-                formatIsSupported = true;
-                break;
-            }
-        }
-        if (!formatIsSupported) {
-            return DAWN_INTERNAL_ERROR(absl::StrFormat(
-                "Vulkan SwapChain must support %s with sRGB colorspace.", config.wgpuFormat));
-        }
-
-        // Only the identity transform with opaque alpha is supported for now.
-        DAWN_INVALID_IF((surfaceInfo.capabilities.supportedTransforms &
-                         VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR) == 0,
-                        "Vulkan SwapChain must support the identity transform.");
-
-        config.transform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
-
-        config.alphaMode = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
-#if !defined(DAWN_PLATFORM_ANDROID)
-        DAWN_INVALID_IF((surfaceInfo.capabilities.supportedCompositeAlpha &
-                         VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR) == 0,
-                        "Vulkan SwapChain must support opaque alpha.");
-#else
-        // TODO(dawn:286): investigate composite alpha for WebGPU native
-        VkCompositeAlphaFlagBitsKHR compositeAlphaFlags[4] = {
-            VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
-            VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR,
-            VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR,
-            VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR,
+    // Choose the present mode. The only guaranteed one is FIFO so it has to be the fallback for
+    // all other present modes. IMMEDIATE has tearing which is generally undesirable so it can't
+    // be the fallback for MAILBOX. So the fallback order is always IMMEDIATE -> MAILBOX ->
+    // FIFO.
+    {
+        auto HasPresentMode = [](const std::vector<VkPresentModeKHR>& modes,
+                                 VkPresentModeKHR target) -> bool {
+            return std::find(modes.begin(), modes.end(), target) != modes.end();
         };
-        for (uint32_t i = 0; i < 4; i++) {
-            if (surfaceInfo.capabilities.supportedCompositeAlpha & compositeAlphaFlags[i]) {
-                config.alphaMode = compositeAlphaFlags[i];
-                break;
-            }
+
+        VkPresentModeKHR targetMode = ToVulkanPresentMode(GetPresentMode());
+        const std::array<VkPresentModeKHR, 3> kPresentModeFallbacks = {
+            VK_PRESENT_MODE_IMMEDIATE_KHR,
+            VK_PRESENT_MODE_MAILBOX_KHR,
+            VK_PRESENT_MODE_FIFO_KHR,
+        };
+
+        // Go to the target mode.
+        size_t modeIndex = 0;
+        while (kPresentModeFallbacks[modeIndex] != targetMode) {
+            modeIndex++;
         }
+
+        // Find the first available fallback.
+        while (!HasPresentMode(surfaceInfo.presentModes, kPresentModeFallbacks[modeIndex])) {
+            modeIndex++;
+        }
+
+        ASSERT(modeIndex < kPresentModeFallbacks.size());
+        config.presentMode = kPresentModeFallbacks[modeIndex];
+    }
+
+    // Choose the target width or do a blit.
+    if (GetWidth() < surfaceInfo.capabilities.minImageExtent.width ||
+        GetWidth() > surfaceInfo.capabilities.maxImageExtent.width ||
+        GetHeight() < surfaceInfo.capabilities.minImageExtent.height ||
+        GetHeight() > surfaceInfo.capabilities.maxImageExtent.height) {
+        config.needsBlit = true;
+    } else {
+        config.extent.width = GetWidth();
+        config.extent.height = GetHeight();
+    }
+
+    // Choose the target usage or do a blit.
+    VkImageUsageFlags targetUsages =
+        VulkanImageUsage(GetUsage(), GetDevice()->GetValidInternalFormat(GetFormat()));
+    VkImageUsageFlags supportedUsages = surfaceInfo.capabilities.supportedUsageFlags;
+    if (!IsSubset(targetUsages, supportedUsages)) {
+        config.needsBlit = true;
+    } else {
+        config.usage = targetUsages;
+        config.wgpuUsage = GetUsage();
+    }
+
+    // Only support BGRA8Unorm (and RGBA8Unorm on android) with SRGB color space for now.
+    config.wgpuFormat = GetFormat();
+    config.format = VulkanImageFormat(ToBackend(GetDevice()), config.wgpuFormat);
+    config.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
+
+    bool formatIsSupported = false;
+    for (const VkSurfaceFormatKHR& format : surfaceInfo.formats) {
+        if (format.format == config.format && format.colorSpace == config.colorSpace) {
+            formatIsSupported = true;
+            break;
+        }
+    }
+    if (!formatIsSupported) {
+        return DAWN_INTERNAL_ERROR(absl::StrFormat(
+            "Vulkan SwapChain must support %s with sRGB colorspace.", config.wgpuFormat));
+    }
+
+    // Only the identity transform with opaque alpha is supported for now.
+    DAWN_INVALID_IF(
+        (surfaceInfo.capabilities.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR) == 0,
+        "Vulkan SwapChain must support the identity transform.");
+
+    config.transform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
+
+    config.alphaMode = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
+#if !defined(DAWN_PLATFORM_ANDROID)
+    DAWN_INVALID_IF(
+        (surfaceInfo.capabilities.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR) == 0,
+        "Vulkan SwapChain must support opaque alpha.");
+#else
+    // TODO(dawn:286): investigate composite alpha for WebGPU native
+    VkCompositeAlphaFlagBitsKHR compositeAlphaFlags[4] = {
+        VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
+        VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR,
+        VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR,
+        VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR,
+    };
+    for (uint32_t i = 0; i < 4; i++) {
+        if (surfaceInfo.capabilities.supportedCompositeAlpha & compositeAlphaFlags[i]) {
+            config.alphaMode = compositeAlphaFlags[i];
+            break;
+        }
+    }
 #endif  // #if !defined(DAWN_PLATFORM_ANDROID)
 
-        // Choose the number of images for the swapchain= and clamp it to the min and max from the
-        // surface capabilities. maxImageCount = 0 means there is no limit.
-        ASSERT(surfaceInfo.capabilities.maxImageCount == 0 ||
-               surfaceInfo.capabilities.minImageCount <= surfaceInfo.capabilities.maxImageCount);
-        uint32_t targetCount = MinImageCountForPresentMode(config.presentMode);
+    // Choose the number of images for the swapchain= and clamp it to the min and max from the
+    // surface capabilities. maxImageCount = 0 means there is no limit.
+    ASSERT(surfaceInfo.capabilities.maxImageCount == 0 ||
+           surfaceInfo.capabilities.minImageCount <= surfaceInfo.capabilities.maxImageCount);
+    uint32_t targetCount = MinImageCountForPresentMode(config.presentMode);
 
-        targetCount = std::max(targetCount, surfaceInfo.capabilities.minImageCount);
-        if (surfaceInfo.capabilities.maxImageCount != 0) {
-            targetCount = std::min(targetCount, surfaceInfo.capabilities.maxImageCount);
-        }
-
-        config.targetImageCount = targetCount;
-
-        // Choose a valid config for the swapchain texture that will receive the blit.
-        if (config.needsBlit) {
-            // Vulkan has provisions to have surfaces that adapt to the swapchain size. If that's
-            // the case it is very likely that the target extent works, but clamp it just in case.
-            // Using the target extent for the blit is better when possible so that texels don't
-            // get stretched. This case is exposed by having the special "-1" value in both
-            // dimensions of the extent.
-            constexpr uint32_t kSpecialValue = 0xFFFF'FFFF;
-            if (surfaceInfo.capabilities.currentExtent.width == kSpecialValue &&
-                surfaceInfo.capabilities.currentExtent.height == kSpecialValue) {
-                // extent = clamp(targetExtent, minExtent, maxExtent)
-                config.extent.width = GetWidth();
-                config.extent.width =
-                    std::min(config.extent.width, surfaceInfo.capabilities.maxImageExtent.width);
-                config.extent.width =
-                    std::max(config.extent.width, surfaceInfo.capabilities.minImageExtent.width);
-
-                config.extent.height = GetHeight();
-                config.extent.height =
-                    std::min(config.extent.height, surfaceInfo.capabilities.maxImageExtent.height);
-                config.extent.height =
-                    std::max(config.extent.height, surfaceInfo.capabilities.minImageExtent.height);
-            } else {
-                // If it is not an adaptable swapchain, just use the current extent for the blit
-                // texture.
-                config.extent = surfaceInfo.capabilities.currentExtent;
-            }
-
-            // TODO(crbug.com/dawn/269): If the swapchain image doesn't support TRANSFER_DST
-            // then we'll need to have a second fallback that uses a blit shader :(
-            if ((supportedUsages & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == 0) {
-                return DAWN_INTERNAL_ERROR(
-                    "SwapChain cannot fallback to a blit because of a missing "
-                    "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
-            }
-            config.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT;
-            config.wgpuUsage = wgpu::TextureUsage::CopyDst;
-        }
-
-        return config;
+    targetCount = std::max(targetCount, surfaceInfo.capabilities.minImageCount);
+    if (surfaceInfo.capabilities.maxImageCount != 0) {
+        targetCount = std::min(targetCount, surfaceInfo.capabilities.maxImageCount);
     }
 
-    MaybeError SwapChain::PresentImpl() {
-        Device* device = ToBackend(GetDevice());
+    config.targetImageCount = targetCount;
 
-        CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
+    // Choose a valid config for the swapchain texture that will receive the blit.
+    if (config.needsBlit) {
+        // Vulkan has provisions to have surfaces that adapt to the swapchain size. If that's
+        // the case it is very likely that the target extent works, but clamp it just in case.
+        // Using the target extent for the blit is better when possible so that texels don't
+        // get stretched. This case is exposed by having the special "-1" value in both
+        // dimensions of the extent.
+        constexpr uint32_t kSpecialValue = 0xFFFF'FFFF;
+        if (surfaceInfo.capabilities.currentExtent.width == kSpecialValue &&
+            surfaceInfo.capabilities.currentExtent.height == kSpecialValue) {
+            // extent = clamp(targetExtent, minExtent, maxExtent)
+            config.extent.width = GetWidth();
+            config.extent.width =
+                std::min(config.extent.width, surfaceInfo.capabilities.maxImageExtent.width);
+            config.extent.width =
+                std::max(config.extent.width, surfaceInfo.capabilities.minImageExtent.width);
 
-        if (mConfig.needsBlit) {
-            // TODO(dawn:269): ditto same as present below: eagerly transition the blit texture to
-            // CopySrc.
-            mBlitTexture->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc,
-                                             mBlitTexture->GetAllSubresources());
-            mTexture->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst,
-                                         mTexture->GetAllSubresources());
-
-            VkImageBlit region;
-            region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
-            region.srcSubresource.mipLevel = 0;
-            region.srcSubresource.baseArrayLayer = 0;
-            region.srcSubresource.layerCount = 1;
-            region.srcOffsets[0] = {0, 0, 0};
-            region.srcOffsets[1] = {static_cast<int32_t>(mBlitTexture->GetWidth()),
-                                    static_cast<int32_t>(mBlitTexture->GetHeight()), 1};
-
-            region.dstSubresource = region.srcSubresource;
-            region.dstOffsets[0] = {0, 0, 0};
-            region.dstOffsets[1] = {static_cast<int32_t>(mTexture->GetWidth()),
-                                    static_cast<int32_t>(mTexture->GetHeight()), 1};
-
-            device->fn.CmdBlitImage(recordingContext->commandBuffer, mBlitTexture->GetHandle(),
-                                    mBlitTexture->GetCurrentLayoutForSwapChain(),
-                                    mTexture->GetHandle(), mTexture->GetCurrentLayoutForSwapChain(),
-                                    1, &region, VK_FILTER_LINEAR);
-
-            // TODO(crbug.com/dawn/269): Find a way to reuse the blit texture between frames
-            // instead of creating a new one every time. This will involve "un-destroying" the
-            // texture or making the blit texture "external".
-            mBlitTexture->APIDestroy();
-            mBlitTexture = nullptr;
+            config.extent.height = GetHeight();
+            config.extent.height =
+                std::min(config.extent.height, surfaceInfo.capabilities.maxImageExtent.height);
+            config.extent.height =
+                std::max(config.extent.height, surfaceInfo.capabilities.minImageExtent.height);
+        } else {
+            // If it is not an adaptable swapchain, just use the current extent for the blit
+            // texture.
+            config.extent = surfaceInfo.capabilities.currentExtent;
         }
 
-        // TODO(crbug.com/dawn/269): Remove the need for this by eagerly transitioning the
-        // presentable texture to present at the end of submits that use them and ideally even
-        // folding that in the free layout transition at the end of render passes.
-        mTexture->TransitionUsageNow(recordingContext, kPresentTextureUsage,
+        // TODO(crbug.com/dawn/269): If the swapchain image doesn't support TRANSFER_DST
+        // then we'll need to have a second fallback that uses a blit shader :(
+        if ((supportedUsages & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == 0) {
+            return DAWN_INTERNAL_ERROR(
+                "SwapChain cannot fallback to a blit because of a missing "
+                "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
+        }
+        config.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+        config.wgpuUsage = wgpu::TextureUsage::CopyDst;
+    }
+
+    return config;
+}
+
+MaybeError SwapChain::PresentImpl() {
+    Device* device = ToBackend(GetDevice());
+
+    CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
+
+    if (mConfig.needsBlit) {
+        // TODO(dawn:269): ditto same as present below: eagerly transition the blit texture to
+        // CopySrc.
+        mBlitTexture->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopySrc,
+                                         mBlitTexture->GetAllSubresources());
+        mTexture->TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst,
                                      mTexture->GetAllSubresources());
 
-        DAWN_TRY(device->SubmitPendingCommands());
+        VkImageBlit region;
+        region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+        region.srcSubresource.mipLevel = 0;
+        region.srcSubresource.baseArrayLayer = 0;
+        region.srcSubresource.layerCount = 1;
+        region.srcOffsets[0] = {0, 0, 0};
+        region.srcOffsets[1] = {static_cast<int32_t>(mBlitTexture->GetWidth()),
+                                static_cast<int32_t>(mBlitTexture->GetHeight()), 1};
 
-        // Assuming that the present queue is the same as the graphics queue, the proper
-        // synchronization has already been done on the queue so we don't need to wait on any
-        // semaphores.
-        // TODO(crbug.com/dawn/269): Support the present queue not being the main queue.
-        VkPresentInfoKHR presentInfo;
-        presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
-        presentInfo.pNext = nullptr;
-        presentInfo.waitSemaphoreCount = 0;
-        presentInfo.pWaitSemaphores = nullptr;
-        presentInfo.swapchainCount = 1;
-        presentInfo.pSwapchains = &*mSwapChain;
-        presentInfo.pImageIndices = &mLastImageIndex;
-        presentInfo.pResults = nullptr;
+        region.dstSubresource = region.srcSubresource;
+        region.dstOffsets[0] = {0, 0, 0};
+        region.dstOffsets[1] = {static_cast<int32_t>(mTexture->GetWidth()),
+                                static_cast<int32_t>(mTexture->GetHeight()), 1};
 
-        // Free the texture before present so error handling doesn't skip that step.
-        mTexture->APIDestroy();
-        mTexture = nullptr;
+        device->fn.CmdBlitImage(recordingContext->commandBuffer, mBlitTexture->GetHandle(),
+                                mBlitTexture->GetCurrentLayoutForSwapChain(), mTexture->GetHandle(),
+                                mTexture->GetCurrentLayoutForSwapChain(), 1, &region,
+                                VK_FILTER_LINEAR);
 
-        VkResult result =
-            VkResult::WrapUnsafe(device->fn.QueuePresentKHR(device->GetQueue(), &presentInfo));
-
-        switch (result) {
-            case VK_SUCCESS:
-            // VK_SUBOPTIMAL_KHR means "a swapchain no longer matches the surface properties
-            // exactly, but can still be used to present to the surface successfully", so we
-            // can also treat it as a "success" error code of vkQueuePresentKHR().
-            case VK_SUBOPTIMAL_KHR:
-                return {};
-
-            // This present cannot be recovered. Re-initialize the VkSwapchain so that future
-            // presents work..
-            case VK_ERROR_OUT_OF_DATE_KHR:
-                return Initialize(this);
-
-            // TODO(crbug.com/dawn/269): Allow losing the surface at Dawn's API level?
-            case VK_ERROR_SURFACE_LOST_KHR:
-            default:
-                return CheckVkSuccess(::VkResult(result), "QueuePresent");
-        }
+        // TODO(crbug.com/dawn/269): Find a way to reuse the blit texture between frames
+        // instead of creating a new one every time. This will involve "un-destroying" the
+        // texture or making the blit texture "external".
+        mBlitTexture->APIDestroy();
+        mBlitTexture = nullptr;
     }
 
-    ResultOrError<Ref<TextureViewBase>> SwapChain::GetCurrentTextureViewImpl() {
-        return GetCurrentTextureViewInternal();
+    // TODO(crbug.com/dawn/269): Remove the need for this by eagerly transitioning the
+    // presentable texture to present at the end of submits that use them and ideally even
+    // folding that in the free layout transition at the end of render passes.
+    mTexture->TransitionUsageNow(recordingContext, kPresentTextureUsage,
+                                 mTexture->GetAllSubresources());
+
+    DAWN_TRY(device->SubmitPendingCommands());
+
+    // Assuming that the present queue is the same as the graphics queue, the proper
+    // synchronization has already been done on the queue so we don't need to wait on any
+    // semaphores.
+    // TODO(crbug.com/dawn/269): Support the present queue not being the main queue.
+    VkPresentInfoKHR presentInfo;
+    presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
+    presentInfo.pNext = nullptr;
+    presentInfo.waitSemaphoreCount = 0;
+    presentInfo.pWaitSemaphores = nullptr;
+    presentInfo.swapchainCount = 1;
+    presentInfo.pSwapchains = &*mSwapChain;
+    presentInfo.pImageIndices = &mLastImageIndex;
+    presentInfo.pResults = nullptr;
+
+    // Free the texture before present so error handling doesn't skip that step.
+    mTexture->APIDestroy();
+    mTexture = nullptr;
+
+    VkResult result =
+        VkResult::WrapUnsafe(device->fn.QueuePresentKHR(device->GetQueue(), &presentInfo));
+
+    switch (result) {
+        case VK_SUCCESS:
+        // VK_SUBOPTIMAL_KHR means "a swapchain no longer matches the surface properties
+        // exactly, but can still be used to present to the surface successfully", so we
+        // can also treat it as a "success" error code of vkQueuePresentKHR().
+        case VK_SUBOPTIMAL_KHR:
+            return {};
+
+        // This present cannot be recovered. Re-initialize the VkSwapchain so that future
+        // presents work..
+        case VK_ERROR_OUT_OF_DATE_KHR:
+            return Initialize(this);
+
+        // TODO(crbug.com/dawn/269): Allow losing the surface at Dawn's API level?
+        case VK_ERROR_SURFACE_LOST_KHR:
+        default:
+            return CheckVkSuccess(::VkResult(result), "QueuePresent");
+    }
+}
+
+ResultOrError<Ref<TextureViewBase>> SwapChain::GetCurrentTextureViewImpl() {
+    return GetCurrentTextureViewInternal();
+}
+
+ResultOrError<Ref<TextureViewBase>> SwapChain::GetCurrentTextureViewInternal(bool isReentrant) {
+    Device* device = ToBackend(GetDevice());
+
+    // Transiently create a semaphore that will be signaled when the presentation engine is done
+    // with the swapchain image. Further operations on the image will wait for this semaphore.
+    VkSemaphoreCreateInfo createInfo;
+    createInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+    createInfo.pNext = nullptr;
+    createInfo.flags = 0;
+
+    VkSemaphore semaphore = VK_NULL_HANDLE;
+    DAWN_TRY(CheckVkSuccess(
+        device->fn.CreateSemaphore(device->GetVkDevice(), &createInfo, nullptr, &*semaphore),
+        "CreateSemaphore"));
+
+    VkResult result = VkResult::WrapUnsafe(device->fn.AcquireNextImageKHR(
+        device->GetVkDevice(), mSwapChain, std::numeric_limits<uint64_t>::max(), semaphore,
+        VkFence{}, &mLastImageIndex));
+
+    if (result == VK_SUCCESS) {
+        // TODO(crbug.com/dawn/269) put the semaphore on the texture so it is waited on when
+        // used instead of directly on the recording context?
+        device->GetPendingRecordingContext()->waitSemaphores.push_back(semaphore);
+    } else {
+        // The semaphore wasn't actually used (? this is unclear in the spec). Delete it when
+        // we get a chance.
+        ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(semaphore);
     }
 
-    ResultOrError<Ref<TextureViewBase>> SwapChain::GetCurrentTextureViewInternal(bool isReentrant) {
-        Device* device = ToBackend(GetDevice());
+    switch (result) {
+        // TODO(crbug.com/dawn/269): Introduce a mechanism to notify the application that
+        // the swapchain is in a suboptimal state?
+        case VK_SUBOPTIMAL_KHR:
+        case VK_SUCCESS:
+            break;
 
-        // Transiently create a semaphore that will be signaled when the presentation engine is done
-        // with the swapchain image. Further operations on the image will wait for this semaphore.
-        VkSemaphoreCreateInfo createInfo;
-        createInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
-        createInfo.pNext = nullptr;
-        createInfo.flags = 0;
-
-        VkSemaphore semaphore = VK_NULL_HANDLE;
-        DAWN_TRY(CheckVkSuccess(
-            device->fn.CreateSemaphore(device->GetVkDevice(), &createInfo, nullptr, &*semaphore),
-            "CreateSemaphore"));
-
-        VkResult result = VkResult::WrapUnsafe(device->fn.AcquireNextImageKHR(
-            device->GetVkDevice(), mSwapChain, std::numeric_limits<uint64_t>::max(), semaphore,
-            VkFence{}, &mLastImageIndex));
-
-        if (result == VK_SUCCESS) {
-            // TODO(crbug.com/dawn/269) put the semaphore on the texture so it is waited on when
-            // used instead of directly on the recording context?
-            device->GetPendingRecordingContext()->waitSemaphores.push_back(semaphore);
-        } else {
-            // The semaphore wasn't actually used (? this is unclear in the spec). Delete it when
-            // we get a chance.
-            ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(semaphore);
-        }
-
-        switch (result) {
-            // TODO(crbug.com/dawn/269): Introduce a mechanism to notify the application that
-            // the swapchain is in a suboptimal state?
-            case VK_SUBOPTIMAL_KHR:
-            case VK_SUCCESS:
-                break;
-
-            case VK_ERROR_OUT_OF_DATE_KHR: {
-                // Prevent infinite recursive calls to GetCurrentTextureViewInternal when the
-                // swapchains always return that they are out of date.
-                if (isReentrant) {
-                    // TODO(crbug.com/dawn/269): Allow losing the surface instead?
-                    return DAWN_INTERNAL_ERROR(
-                        "Wasn't able to recuperate the surface after a VK_ERROR_OUT_OF_DATE_KHR");
-                }
-
-                // Re-initialize the VkSwapchain and try getting the texture again.
-                DAWN_TRY(Initialize(this));
-                return GetCurrentTextureViewInternal(true);
+        case VK_ERROR_OUT_OF_DATE_KHR: {
+            // Prevent infinite recursive calls to GetCurrentTextureViewInternal when the
+            // swapchains always return that they are out of date.
+            if (isReentrant) {
+                // TODO(crbug.com/dawn/269): Allow losing the surface instead?
+                return DAWN_INTERNAL_ERROR(
+                    "Wasn't able to recuperate the surface after a VK_ERROR_OUT_OF_DATE_KHR");
             }
 
-            // TODO(crbug.com/dawn/269): Allow losing the surface at Dawn's API level?
-            case VK_ERROR_SURFACE_LOST_KHR:
-            default:
-                DAWN_TRY(CheckVkSuccess(::VkResult(result), "AcquireNextImage"));
+            // Re-initialize the VkSwapchain and try getting the texture again.
+            DAWN_TRY(Initialize(this));
+            return GetCurrentTextureViewInternal(true);
         }
 
-        TextureDescriptor textureDesc;
-        textureDesc.size.width = mConfig.extent.width;
-        textureDesc.size.height = mConfig.extent.height;
-        textureDesc.format = mConfig.wgpuFormat;
-        textureDesc.usage = mConfig.wgpuUsage;
-
-        VkImage currentImage = mSwapChainImages[mLastImageIndex];
-        mTexture = Texture::CreateForSwapChain(device, &textureDesc, currentImage);
-
-        // In the happy path we can use the swapchain image directly.
-        if (!mConfig.needsBlit) {
-            return mTexture->CreateView();
-        }
-
-        // The blit texture always perfectly matches what the user requested for the swapchain.
-        // We need to add the Vulkan TRANSFER_SRC flag for the vkCmdBlitImage call.
-        TextureDescriptor desc = GetSwapChainBaseTextureDescriptor(this);
-        DAWN_TRY_ASSIGN(mBlitTexture,
-                        Texture::Create(device, &desc, VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
-        return mBlitTexture->CreateView();
+        // TODO(crbug.com/dawn/269): Allow losing the surface at Dawn's API level?
+        case VK_ERROR_SURFACE_LOST_KHR:
+        default:
+            DAWN_TRY(CheckVkSuccess(::VkResult(result), "AcquireNextImage"));
     }
 
-    void SwapChain::DetachFromSurfaceImpl() {
-        if (mTexture != nullptr) {
-            mTexture->APIDestroy();
-            mTexture = nullptr;
-        }
+    TextureDescriptor textureDesc;
+    textureDesc.size.width = mConfig.extent.width;
+    textureDesc.size.height = mConfig.extent.height;
+    textureDesc.format = mConfig.wgpuFormat;
+    textureDesc.usage = mConfig.wgpuUsage;
 
-        if (mBlitTexture != nullptr) {
-            mBlitTexture->APIDestroy();
-            mBlitTexture = nullptr;
-        }
+    VkImage currentImage = mSwapChainImages[mLastImageIndex];
+    mTexture = Texture::CreateForSwapChain(device, &textureDesc, currentImage);
 
-        // The swapchain images are destroyed with the swapchain.
-        if (mSwapChain != VK_NULL_HANDLE) {
-            ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mSwapChain);
-            mSwapChain = VK_NULL_HANDLE;
-        }
-
-        if (mVkSurface != VK_NULL_HANDLE) {
-            ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mVkSurface);
-            mVkSurface = VK_NULL_HANDLE;
-        }
+    // In the happy path we can use the swapchain image directly.
+    if (!mConfig.needsBlit) {
+        return mTexture->CreateView();
     }
 
+    // The blit texture always perfectly matches what the user requested for the swapchain.
+    // We need to add the Vulkan TRANSFER_SRC flag for the vkCmdBlitImage call.
+    TextureDescriptor desc = GetSwapChainBaseTextureDescriptor(this);
+    DAWN_TRY_ASSIGN(mBlitTexture, Texture::Create(device, &desc, VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
+    return mBlitTexture->CreateView();
+}
+
+void SwapChain::DetachFromSurfaceImpl() {
+    if (mTexture != nullptr) {
+        mTexture->APIDestroy();
+        mTexture = nullptr;
+    }
+
+    if (mBlitTexture != nullptr) {
+        mBlitTexture->APIDestroy();
+        mBlitTexture = nullptr;
+    }
+
+    // The swapchain images are destroyed with the swapchain.
+    if (mSwapChain != VK_NULL_HANDLE) {
+        ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mSwapChain);
+        mSwapChain = VK_NULL_HANDLE;
+    }
+
+    if (mVkSurface != VK_NULL_HANDLE) {
+        ToBackend(GetDevice())->GetFencedDeleter()->DeleteWhenUnused(mVkSurface);
+        mVkSurface = VK_NULL_HANDLE;
+    }
+}
+
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/SwapChainVk.h b/src/dawn/native/vulkan/SwapChainVk.h
index 1f1f9ca..7163de7 100644
--- a/src/dawn/native/vulkan/SwapChainVk.h
+++ b/src/dawn/native/vulkan/SwapChainVk.h
@@ -23,75 +23,75 @@
 
 namespace dawn::native::vulkan {
 
-    class Device;
-    class Texture;
-    struct VulkanSurfaceInfo;
+class Device;
+class Texture;
+struct VulkanSurfaceInfo;
 
-    class OldSwapChain : public OldSwapChainBase {
-      public:
-        static Ref<OldSwapChain> Create(Device* device, const SwapChainDescriptor* descriptor);
+class OldSwapChain : public OldSwapChainBase {
+  public:
+    static Ref<OldSwapChain> Create(Device* device, const SwapChainDescriptor* descriptor);
 
-      protected:
-        OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
-        ~OldSwapChain() override;
+  protected:
+    OldSwapChain(Device* device, const SwapChainDescriptor* descriptor);
+    ~OldSwapChain() override;
 
-        TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
-        MaybeError OnBeforePresent(TextureViewBase* texture) override;
+    TextureBase* GetNextTextureImpl(const TextureDescriptor* descriptor) override;
+    MaybeError OnBeforePresent(TextureViewBase* texture) override;
 
-      private:
-        wgpu::TextureUsage mTextureUsage;
+  private:
+    wgpu::TextureUsage mTextureUsage;
+};
+
+class SwapChain : public NewSwapChainBase {
+  public:
+    static ResultOrError<Ref<SwapChain>> Create(Device* device,
+                                                Surface* surface,
+                                                NewSwapChainBase* previousSwapChain,
+                                                const SwapChainDescriptor* descriptor);
+    ~SwapChain() override;
+
+  private:
+    using NewSwapChainBase::NewSwapChainBase;
+    MaybeError Initialize(NewSwapChainBase* previousSwapChain);
+    void DestroyImpl() override;
+
+    struct Config {
+        // Information that's passed to vulkan swapchain creation.
+        VkPresentModeKHR presentMode;
+        VkExtent2D extent;
+        VkImageUsageFlags usage;
+        VkFormat format;
+        VkColorSpaceKHR colorSpace;
+        uint32_t targetImageCount;
+        VkSurfaceTransformFlagBitsKHR transform;
+        VkCompositeAlphaFlagBitsKHR alphaMode;
+
+        // Redundant information but as WebGPU enums to create the wgpu::Texture that
+        // encapsulates the native swapchain texture.
+        wgpu::TextureUsage wgpuUsage;
+        wgpu::TextureFormat wgpuFormat;
+
+        // Information about the blit workarounds we need to do (if any)
+        bool needsBlit = false;
     };
+    ResultOrError<Config> ChooseConfig(const VulkanSurfaceInfo& surfaceInfo) const;
+    ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewInternal(bool isReentrant = false);
 
-    class SwapChain : public NewSwapChainBase {
-      public:
-        static ResultOrError<Ref<SwapChain>> Create(Device* device,
-                                                    Surface* surface,
-                                                    NewSwapChainBase* previousSwapChain,
-                                                    const SwapChainDescriptor* descriptor);
-        ~SwapChain() override;
+    // NewSwapChainBase implementation
+    MaybeError PresentImpl() override;
+    ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewImpl() override;
+    void DetachFromSurfaceImpl() override;
 
-      private:
-        using NewSwapChainBase::NewSwapChainBase;
-        MaybeError Initialize(NewSwapChainBase* previousSwapChain);
-        void DestroyImpl() override;
+    Config mConfig;
 
-        struct Config {
-            // Information that's passed to vulkan swapchain creation.
-            VkPresentModeKHR presentMode;
-            VkExtent2D extent;
-            VkImageUsageFlags usage;
-            VkFormat format;
-            VkColorSpaceKHR colorSpace;
-            uint32_t targetImageCount;
-            VkSurfaceTransformFlagBitsKHR transform;
-            VkCompositeAlphaFlagBitsKHR alphaMode;
+    VkSurfaceKHR mVkSurface = VK_NULL_HANDLE;
+    VkSwapchainKHR mSwapChain = VK_NULL_HANDLE;
+    std::vector<VkImage> mSwapChainImages;
+    uint32_t mLastImageIndex = 0;
 
-            // Redundant information but as WebGPU enums to create the wgpu::Texture that
-            // encapsulates the native swapchain texture.
-            wgpu::TextureUsage wgpuUsage;
-            wgpu::TextureFormat wgpuFormat;
-
-            // Information about the blit workarounds we need to do (if any)
-            bool needsBlit = false;
-        };
-        ResultOrError<Config> ChooseConfig(const VulkanSurfaceInfo& surfaceInfo) const;
-        ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewInternal(bool isReentrant = false);
-
-        // NewSwapChainBase implementation
-        MaybeError PresentImpl() override;
-        ResultOrError<Ref<TextureViewBase>> GetCurrentTextureViewImpl() override;
-        void DetachFromSurfaceImpl() override;
-
-        Config mConfig;
-
-        VkSurfaceKHR mVkSurface = VK_NULL_HANDLE;
-        VkSwapchainKHR mSwapChain = VK_NULL_HANDLE;
-        std::vector<VkImage> mSwapChainImages;
-        uint32_t mLastImageIndex = 0;
-
-        Ref<Texture> mBlitTexture;
-        Ref<Texture> mTexture;
-    };
+    Ref<Texture> mBlitTexture;
+    Ref<Texture> mTexture;
+};
 
 }  // namespace dawn::native::vulkan
 
diff --git a/src/dawn/native/vulkan/TextureVk.cpp b/src/dawn/native/vulkan/TextureVk.cpp
index 378a2c5..050050a 100644
--- a/src/dawn/native/vulkan/TextureVk.cpp
+++ b/src/dawn/native/vulkan/TextureVk.cpp
@@ -35,1397 +35,1380 @@
 
 namespace dawn::native::vulkan {
 
-    namespace {
-        // Converts an Dawn texture dimension to a Vulkan image view type.
-        // Contrary to image types, image view types include arrayness and cubemapness
-        VkImageViewType VulkanImageViewType(wgpu::TextureViewDimension dimension) {
-            switch (dimension) {
-                case wgpu::TextureViewDimension::e1D:
-                    return VK_IMAGE_VIEW_TYPE_1D;
-                case wgpu::TextureViewDimension::e2D:
-                    return VK_IMAGE_VIEW_TYPE_2D;
-                case wgpu::TextureViewDimension::e2DArray:
-                    return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
-                case wgpu::TextureViewDimension::Cube:
-                    return VK_IMAGE_VIEW_TYPE_CUBE;
-                case wgpu::TextureViewDimension::CubeArray:
-                    return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
-                case wgpu::TextureViewDimension::e3D:
-                    return VK_IMAGE_VIEW_TYPE_3D;
+namespace {
+// Converts an Dawn texture dimension to a Vulkan image view type.
+// Contrary to image types, image view types include arrayness and cubemapness
+VkImageViewType VulkanImageViewType(wgpu::TextureViewDimension dimension) {
+    switch (dimension) {
+        case wgpu::TextureViewDimension::e1D:
+            return VK_IMAGE_VIEW_TYPE_1D;
+        case wgpu::TextureViewDimension::e2D:
+            return VK_IMAGE_VIEW_TYPE_2D;
+        case wgpu::TextureViewDimension::e2DArray:
+            return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
+        case wgpu::TextureViewDimension::Cube:
+            return VK_IMAGE_VIEW_TYPE_CUBE;
+        case wgpu::TextureViewDimension::CubeArray:
+            return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
+        case wgpu::TextureViewDimension::e3D:
+            return VK_IMAGE_VIEW_TYPE_3D;
 
-                case wgpu::TextureViewDimension::Undefined:
-                    break;
-            }
-            UNREACHABLE();
+        case wgpu::TextureViewDimension::Undefined:
+            break;
+    }
+    UNREACHABLE();
+}
+
+// Computes which vulkan access type could be required for the given Dawn usage.
+// TODO(crbug.com/dawn/269): We shouldn't need any access usages for srcAccessMask when
+// the previous usage is readonly because an execution dependency is sufficient.
+VkAccessFlags VulkanAccessFlags(wgpu::TextureUsage usage, const Format& format) {
+    VkAccessFlags flags = 0;
+
+    if (usage & wgpu::TextureUsage::CopySrc) {
+        flags |= VK_ACCESS_TRANSFER_READ_BIT;
+    }
+    if (usage & wgpu::TextureUsage::CopyDst) {
+        flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
+    }
+    if (usage & wgpu::TextureUsage::TextureBinding) {
+        flags |= VK_ACCESS_SHADER_READ_BIT;
+    }
+    if (usage & wgpu::TextureUsage::StorageBinding) {
+        flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
+    }
+    if (usage & wgpu::TextureUsage::RenderAttachment) {
+        if (format.HasDepthOrStencil()) {
+            flags |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
+                     VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+        } else {
+            flags |= VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
         }
-
-        // Computes which vulkan access type could be required for the given Dawn usage.
-        // TODO(crbug.com/dawn/269): We shouldn't need any access usages for srcAccessMask when
-        // the previous usage is readonly because an execution dependency is sufficient.
-        VkAccessFlags VulkanAccessFlags(wgpu::TextureUsage usage, const Format& format) {
-            VkAccessFlags flags = 0;
-
-            if (usage & wgpu::TextureUsage::CopySrc) {
-                flags |= VK_ACCESS_TRANSFER_READ_BIT;
-            }
-            if (usage & wgpu::TextureUsage::CopyDst) {
-                flags |= VK_ACCESS_TRANSFER_WRITE_BIT;
-            }
-            if (usage & wgpu::TextureUsage::TextureBinding) {
-                flags |= VK_ACCESS_SHADER_READ_BIT;
-            }
-            if (usage & wgpu::TextureUsage::StorageBinding) {
-                flags |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
-            }
-            if (usage & wgpu::TextureUsage::RenderAttachment) {
-                if (format.HasDepthOrStencil()) {
-                    flags |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
-                             VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
-                } else {
-                    flags |=
-                        VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
-                }
-            }
-            if (usage & kReadOnlyRenderAttachment) {
-                flags |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
-            }
-            if (usage & kPresentTextureUsage) {
-                // The present usage is only used internally by the swapchain and is never used in
-                // combination with other usages.
-                ASSERT(usage == kPresentTextureUsage);
-                // The Vulkan spec has the following note:
-                //
-                //   When transitioning the image to VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR or
-                //   VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, there is no need to delay subsequent
-                //   processing, or perform any visibility operations (as vkQueuePresentKHR performs
-                //   automatic visibility operations). To achieve this, the dstAccessMask member of
-                //   the VkImageMemoryBarrier should be set to 0, and the dstStageMask parameter
-                //   should be set to VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT.
-                //
-                // So on the transition to Present we don't need an access flag. The other
-                // direction doesn't matter because swapchain textures always start a new frame
-                // as uninitialized.
-                flags |= 0;
-            }
-
-            return flags;
-        }
-
-        // Computes which Vulkan pipeline stage can access a texture in the given Dawn usage
-        VkPipelineStageFlags VulkanPipelineStage(wgpu::TextureUsage usage, const Format& format) {
-            VkPipelineStageFlags flags = 0;
-
-            if (usage == wgpu::TextureUsage::None) {
-                // This only happens when a texture is initially created (and for srcAccessMask) in
-                // which case there is no need to wait on anything to stop accessing this texture.
-                return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
-            }
-            if (usage & (wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst)) {
-                flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
-            }
-            if (usage & wgpu::TextureUsage::TextureBinding) {
-                // TODO(crbug.com/dawn/851): Only transition to the usage we care about to avoid
-                // introducing FS -> VS dependencies that would prevent parallelization on tiler
-                // GPUs
-                flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
-                         VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
-                         VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
-            }
-            if (usage & wgpu::TextureUsage::StorageBinding) {
-                flags |=
-                    VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
-            }
-            if (usage & (wgpu::TextureUsage::RenderAttachment | kReadOnlyRenderAttachment)) {
-                if (format.HasDepthOrStencil()) {
-                    flags |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
-                             VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
-                } else {
-                    flags |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
-                }
-            }
-            if (usage & kPresentTextureUsage) {
-                // The present usage is only used internally by the swapchain and is never used in
-                // combination with other usages.
-                ASSERT(usage == kPresentTextureUsage);
-                // The Vulkan spec has the following note:
-                //
-                //   When transitioning the image to VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR or
-                //   VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, there is no need to delay subsequent
-                //   processing, or perform any visibility operations (as vkQueuePresentKHR performs
-                //   automatic visibility operations). To achieve this, the dstAccessMask member of
-                //   the VkImageMemoryBarrier should be set to 0, and the dstStageMask parameter
-                //   should be set to VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT.
-                //
-                // So on the transition to Present we use the "bottom of pipe" stage. The other
-                // direction doesn't matter because swapchain textures always start a new frame
-                // as uninitialized.
-                flags |= VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
-            }
-
-            // A zero value isn't a valid pipeline stage mask
-            ASSERT(flags != 0);
-            return flags;
-        }
-
-        VkImageMemoryBarrier BuildMemoryBarrier(const Texture* texture,
-                                                wgpu::TextureUsage lastUsage,
-                                                wgpu::TextureUsage usage,
-                                                const SubresourceRange& range) {
-            VkImageMemoryBarrier barrier;
-            barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
-            barrier.pNext = nullptr;
-            barrier.srcAccessMask = VulkanAccessFlags(lastUsage, texture->GetFormat());
-            barrier.dstAccessMask = VulkanAccessFlags(usage, texture->GetFormat());
-            barrier.oldLayout = VulkanImageLayout(texture, lastUsage);
-            barrier.newLayout = VulkanImageLayout(texture, usage);
-            barrier.image = texture->GetHandle();
-            barrier.subresourceRange.aspectMask = VulkanAspectMask(range.aspects);
-            barrier.subresourceRange.baseMipLevel = range.baseMipLevel;
-            barrier.subresourceRange.levelCount = range.levelCount;
-            barrier.subresourceRange.baseArrayLayer = range.baseArrayLayer;
-            barrier.subresourceRange.layerCount = range.layerCount;
-
-            barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
-            barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
-            return barrier;
-        }
-
-        void FillVulkanCreateInfoSizesAndType(const Texture& texture, VkImageCreateInfo* info) {
-            const Extent3D& size = texture.GetSize();
-
-            info->mipLevels = texture.GetNumMipLevels();
-            info->samples = VulkanSampleCount(texture.GetSampleCount());
-
-            // Fill in the image type, and paper over differences in how the array layer count is
-            // specified between WebGPU and Vulkan.
-            switch (texture.GetDimension()) {
-                case wgpu::TextureDimension::e1D:
-                    info->imageType = VK_IMAGE_TYPE_1D;
-                    info->extent = {size.width, 1, 1};
-                    info->arrayLayers = 1;
-                    break;
-
-                case wgpu::TextureDimension::e2D:
-                    info->imageType = VK_IMAGE_TYPE_2D;
-                    info->extent = {size.width, size.height, 1};
-                    info->arrayLayers = size.depthOrArrayLayers;
-                    break;
-
-                case wgpu::TextureDimension::e3D:
-                    info->imageType = VK_IMAGE_TYPE_3D;
-                    info->extent = {size.width, size.height, size.depthOrArrayLayers};
-                    info->arrayLayers = 1;
-                    break;
-            }
-        }
-
-    }  // namespace
-
-    // Converts Dawn texture format to Vulkan formats.
-    VkFormat VulkanImageFormat(const Device* device, wgpu::TextureFormat format) {
-        switch (format) {
-            case wgpu::TextureFormat::R8Unorm:
-                return VK_FORMAT_R8_UNORM;
-            case wgpu::TextureFormat::R8Snorm:
-                return VK_FORMAT_R8_SNORM;
-            case wgpu::TextureFormat::R8Uint:
-                return VK_FORMAT_R8_UINT;
-            case wgpu::TextureFormat::R8Sint:
-                return VK_FORMAT_R8_SINT;
-
-            case wgpu::TextureFormat::R16Uint:
-                return VK_FORMAT_R16_UINT;
-            case wgpu::TextureFormat::R16Sint:
-                return VK_FORMAT_R16_SINT;
-            case wgpu::TextureFormat::R16Float:
-                return VK_FORMAT_R16_SFLOAT;
-            case wgpu::TextureFormat::RG8Unorm:
-                return VK_FORMAT_R8G8_UNORM;
-            case wgpu::TextureFormat::RG8Snorm:
-                return VK_FORMAT_R8G8_SNORM;
-            case wgpu::TextureFormat::RG8Uint:
-                return VK_FORMAT_R8G8_UINT;
-            case wgpu::TextureFormat::RG8Sint:
-                return VK_FORMAT_R8G8_SINT;
-
-            case wgpu::TextureFormat::R32Uint:
-                return VK_FORMAT_R32_UINT;
-            case wgpu::TextureFormat::R32Sint:
-                return VK_FORMAT_R32_SINT;
-            case wgpu::TextureFormat::R32Float:
-                return VK_FORMAT_R32_SFLOAT;
-            case wgpu::TextureFormat::RG16Uint:
-                return VK_FORMAT_R16G16_UINT;
-            case wgpu::TextureFormat::RG16Sint:
-                return VK_FORMAT_R16G16_SINT;
-            case wgpu::TextureFormat::RG16Float:
-                return VK_FORMAT_R16G16_SFLOAT;
-            case wgpu::TextureFormat::RGBA8Unorm:
-                return VK_FORMAT_R8G8B8A8_UNORM;
-            case wgpu::TextureFormat::RGBA8UnormSrgb:
-                return VK_FORMAT_R8G8B8A8_SRGB;
-            case wgpu::TextureFormat::RGBA8Snorm:
-                return VK_FORMAT_R8G8B8A8_SNORM;
-            case wgpu::TextureFormat::RGBA8Uint:
-                return VK_FORMAT_R8G8B8A8_UINT;
-            case wgpu::TextureFormat::RGBA8Sint:
-                return VK_FORMAT_R8G8B8A8_SINT;
-            case wgpu::TextureFormat::BGRA8Unorm:
-                return VK_FORMAT_B8G8R8A8_UNORM;
-            case wgpu::TextureFormat::BGRA8UnormSrgb:
-                return VK_FORMAT_B8G8R8A8_SRGB;
-            case wgpu::TextureFormat::RGB10A2Unorm:
-                return VK_FORMAT_A2B10G10R10_UNORM_PACK32;
-            case wgpu::TextureFormat::RG11B10Ufloat:
-                return VK_FORMAT_B10G11R11_UFLOAT_PACK32;
-            case wgpu::TextureFormat::RGB9E5Ufloat:
-                return VK_FORMAT_E5B9G9R9_UFLOAT_PACK32;
-
-            case wgpu::TextureFormat::RG32Uint:
-                return VK_FORMAT_R32G32_UINT;
-            case wgpu::TextureFormat::RG32Sint:
-                return VK_FORMAT_R32G32_SINT;
-            case wgpu::TextureFormat::RG32Float:
-                return VK_FORMAT_R32G32_SFLOAT;
-            case wgpu::TextureFormat::RGBA16Uint:
-                return VK_FORMAT_R16G16B16A16_UINT;
-            case wgpu::TextureFormat::RGBA16Sint:
-                return VK_FORMAT_R16G16B16A16_SINT;
-            case wgpu::TextureFormat::RGBA16Float:
-                return VK_FORMAT_R16G16B16A16_SFLOAT;
-
-            case wgpu::TextureFormat::RGBA32Uint:
-                return VK_FORMAT_R32G32B32A32_UINT;
-            case wgpu::TextureFormat::RGBA32Sint:
-                return VK_FORMAT_R32G32B32A32_SINT;
-            case wgpu::TextureFormat::RGBA32Float:
-                return VK_FORMAT_R32G32B32A32_SFLOAT;
-
-            case wgpu::TextureFormat::Depth16Unorm:
-                return VK_FORMAT_D16_UNORM;
-            case wgpu::TextureFormat::Depth32Float:
-                return VK_FORMAT_D32_SFLOAT;
-            case wgpu::TextureFormat::Depth24Plus:
-                return VK_FORMAT_D32_SFLOAT;
-            case wgpu::TextureFormat::Depth24PlusStencil8:
-                // Depth24PlusStencil8 maps to either of these two formats because only requires
-                // that one of the two be present. The VulkanUseD32S8 toggle combines the wish of
-                // the environment, default to using D32S8, and availability information so we know
-                // that the format is available.
-                if (device->IsToggleEnabled(Toggle::VulkanUseD32S8)) {
-                    return VK_FORMAT_D32_SFLOAT_S8_UINT;
-                } else {
-                    return VK_FORMAT_D24_UNORM_S8_UINT;
-                }
-            case wgpu::TextureFormat::Depth24UnormStencil8:
-                return VK_FORMAT_D24_UNORM_S8_UINT;
-            case wgpu::TextureFormat::Depth32FloatStencil8:
-                return VK_FORMAT_D32_SFLOAT_S8_UINT;
-            case wgpu::TextureFormat::Stencil8:
-                // Try to use the stencil8 format if possible, otherwise use whatever format we can
-                // use that contains a stencil8 component.
-                if (device->IsToggleEnabled(Toggle::VulkanUseS8)) {
-                    return VK_FORMAT_S8_UINT;
-                } else {
-                    return VulkanImageFormat(device, wgpu::TextureFormat::Depth24PlusStencil8);
-                }
-
-            case wgpu::TextureFormat::BC1RGBAUnorm:
-                return VK_FORMAT_BC1_RGBA_UNORM_BLOCK;
-            case wgpu::TextureFormat::BC1RGBAUnormSrgb:
-                return VK_FORMAT_BC1_RGBA_SRGB_BLOCK;
-            case wgpu::TextureFormat::BC2RGBAUnorm:
-                return VK_FORMAT_BC2_UNORM_BLOCK;
-            case wgpu::TextureFormat::BC2RGBAUnormSrgb:
-                return VK_FORMAT_BC2_SRGB_BLOCK;
-            case wgpu::TextureFormat::BC3RGBAUnorm:
-                return VK_FORMAT_BC3_UNORM_BLOCK;
-            case wgpu::TextureFormat::BC3RGBAUnormSrgb:
-                return VK_FORMAT_BC3_SRGB_BLOCK;
-            case wgpu::TextureFormat::BC4RSnorm:
-                return VK_FORMAT_BC4_SNORM_BLOCK;
-            case wgpu::TextureFormat::BC4RUnorm:
-                return VK_FORMAT_BC4_UNORM_BLOCK;
-            case wgpu::TextureFormat::BC5RGSnorm:
-                return VK_FORMAT_BC5_SNORM_BLOCK;
-            case wgpu::TextureFormat::BC5RGUnorm:
-                return VK_FORMAT_BC5_UNORM_BLOCK;
-            case wgpu::TextureFormat::BC6HRGBFloat:
-                return VK_FORMAT_BC6H_SFLOAT_BLOCK;
-            case wgpu::TextureFormat::BC6HRGBUfloat:
-                return VK_FORMAT_BC6H_UFLOAT_BLOCK;
-            case wgpu::TextureFormat::BC7RGBAUnorm:
-                return VK_FORMAT_BC7_UNORM_BLOCK;
-            case wgpu::TextureFormat::BC7RGBAUnormSrgb:
-                return VK_FORMAT_BC7_SRGB_BLOCK;
-
-            case wgpu::TextureFormat::ETC2RGB8Unorm:
-                return VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK;
-            case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
-                return VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK;
-            case wgpu::TextureFormat::ETC2RGB8A1Unorm:
-                return VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK;
-            case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
-                return VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK;
-            case wgpu::TextureFormat::ETC2RGBA8Unorm:
-                return VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK;
-            case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
-                return VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK;
-            case wgpu::TextureFormat::EACR11Unorm:
-                return VK_FORMAT_EAC_R11_UNORM_BLOCK;
-            case wgpu::TextureFormat::EACR11Snorm:
-                return VK_FORMAT_EAC_R11_SNORM_BLOCK;
-            case wgpu::TextureFormat::EACRG11Unorm:
-                return VK_FORMAT_EAC_R11G11_UNORM_BLOCK;
-            case wgpu::TextureFormat::EACRG11Snorm:
-                return VK_FORMAT_EAC_R11G11_SNORM_BLOCK;
-
-            case wgpu::TextureFormat::ASTC4x4Unorm:
-                return VK_FORMAT_ASTC_4x4_UNORM_BLOCK;
-            case wgpu::TextureFormat::ASTC4x4UnormSrgb:
-                return VK_FORMAT_ASTC_4x4_SRGB_BLOCK;
-            case wgpu::TextureFormat::ASTC5x4Unorm:
-                return VK_FORMAT_ASTC_5x4_UNORM_BLOCK;
-            case wgpu::TextureFormat::ASTC5x4UnormSrgb:
-                return VK_FORMAT_ASTC_5x4_SRGB_BLOCK;
-            case wgpu::TextureFormat::ASTC5x5Unorm:
-                return VK_FORMAT_ASTC_5x5_UNORM_BLOCK;
-            case wgpu::TextureFormat::ASTC5x5UnormSrgb:
-                return VK_FORMAT_ASTC_5x5_SRGB_BLOCK;
-            case wgpu::TextureFormat::ASTC6x5Unorm:
-                return VK_FORMAT_ASTC_6x5_UNORM_BLOCK;
-            case wgpu::TextureFormat::ASTC6x5UnormSrgb:
-                return VK_FORMAT_ASTC_6x5_SRGB_BLOCK;
-            case wgpu::TextureFormat::ASTC6x6Unorm:
-                return VK_FORMAT_ASTC_6x6_UNORM_BLOCK;
-            case wgpu::TextureFormat::ASTC6x6UnormSrgb:
-                return VK_FORMAT_ASTC_6x6_SRGB_BLOCK;
-            case wgpu::TextureFormat::ASTC8x5Unorm:
-                return VK_FORMAT_ASTC_8x5_UNORM_BLOCK;
-            case wgpu::TextureFormat::ASTC8x5UnormSrgb:
-                return VK_FORMAT_ASTC_8x5_SRGB_BLOCK;
-            case wgpu::TextureFormat::ASTC8x6Unorm:
-                return VK_FORMAT_ASTC_8x6_UNORM_BLOCK;
-            case wgpu::TextureFormat::ASTC8x6UnormSrgb:
-                return VK_FORMAT_ASTC_8x6_SRGB_BLOCK;
-            case wgpu::TextureFormat::ASTC8x8Unorm:
-                return VK_FORMAT_ASTC_8x8_UNORM_BLOCK;
-            case wgpu::TextureFormat::ASTC8x8UnormSrgb:
-                return VK_FORMAT_ASTC_8x8_SRGB_BLOCK;
-            case wgpu::TextureFormat::ASTC10x5Unorm:
-                return VK_FORMAT_ASTC_10x5_UNORM_BLOCK;
-            case wgpu::TextureFormat::ASTC10x5UnormSrgb:
-                return VK_FORMAT_ASTC_10x5_SRGB_BLOCK;
-            case wgpu::TextureFormat::ASTC10x6Unorm:
-                return VK_FORMAT_ASTC_10x6_UNORM_BLOCK;
-            case wgpu::TextureFormat::ASTC10x6UnormSrgb:
-                return VK_FORMAT_ASTC_10x6_SRGB_BLOCK;
-            case wgpu::TextureFormat::ASTC10x8Unorm:
-                return VK_FORMAT_ASTC_10x8_UNORM_BLOCK;
-            case wgpu::TextureFormat::ASTC10x8UnormSrgb:
-                return VK_FORMAT_ASTC_10x8_SRGB_BLOCK;
-            case wgpu::TextureFormat::ASTC10x10Unorm:
-                return VK_FORMAT_ASTC_10x10_UNORM_BLOCK;
-            case wgpu::TextureFormat::ASTC10x10UnormSrgb:
-                return VK_FORMAT_ASTC_10x10_SRGB_BLOCK;
-            case wgpu::TextureFormat::ASTC12x10Unorm:
-                return VK_FORMAT_ASTC_12x10_UNORM_BLOCK;
-            case wgpu::TextureFormat::ASTC12x10UnormSrgb:
-                return VK_FORMAT_ASTC_12x10_SRGB_BLOCK;
-            case wgpu::TextureFormat::ASTC12x12Unorm:
-                return VK_FORMAT_ASTC_12x12_UNORM_BLOCK;
-            case wgpu::TextureFormat::ASTC12x12UnormSrgb:
-                return VK_FORMAT_ASTC_12x12_SRGB_BLOCK;
-
-            case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
-                return VK_FORMAT_G8_B8R8_2PLANE_420_UNORM;
-
-            case wgpu::TextureFormat::Undefined:
-                break;
-        }
-        UNREACHABLE();
+    }
+    if (usage & kReadOnlyRenderAttachment) {
+        flags |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
+    }
+    if (usage & kPresentTextureUsage) {
+        // The present usage is only used internally by the swapchain and is never used in
+        // combination with other usages.
+        ASSERT(usage == kPresentTextureUsage);
+        // The Vulkan spec has the following note:
+        //
+        //   When transitioning the image to VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR or
+        //   VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, there is no need to delay subsequent
+        //   processing, or perform any visibility operations (as vkQueuePresentKHR performs
+        //   automatic visibility operations). To achieve this, the dstAccessMask member of
+        //   the VkImageMemoryBarrier should be set to 0, and the dstStageMask parameter
+        //   should be set to VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT.
+        //
+        // So on the transition to Present we don't need an access flag. The other
+        // direction doesn't matter because swapchain textures always start a new frame
+        // as uninitialized.
+        flags |= 0;
     }
 
-    // Converts the Dawn usage flags to Vulkan usage flags. Also needs the format to choose
-    // between color and depth attachment usages.
-    VkImageUsageFlags VulkanImageUsage(wgpu::TextureUsage usage, const Format& format) {
-        VkImageUsageFlags flags = 0;
+    return flags;
+}
 
-        if (usage & wgpu::TextureUsage::CopySrc) {
-            flags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
+// Computes which Vulkan pipeline stage can access a texture in the given Dawn usage
+VkPipelineStageFlags VulkanPipelineStage(wgpu::TextureUsage usage, const Format& format) {
+    VkPipelineStageFlags flags = 0;
+
+    if (usage == wgpu::TextureUsage::None) {
+        // This only happens when a texture is initially created (and for srcAccessMask) in
+        // which case there is no need to wait on anything to stop accessing this texture.
+        return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+    }
+    if (usage & (wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst)) {
+        flags |= VK_PIPELINE_STAGE_TRANSFER_BIT;
+    }
+    if (usage & wgpu::TextureUsage::TextureBinding) {
+        // TODO(crbug.com/dawn/851): Only transition to the usage we care about to avoid
+        // introducing FS -> VS dependencies that would prevent parallelization on tiler
+        // GPUs
+        flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
+                 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+    }
+    if (usage & wgpu::TextureUsage::StorageBinding) {
+        flags |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
+    }
+    if (usage & (wgpu::TextureUsage::RenderAttachment | kReadOnlyRenderAttachment)) {
+        if (format.HasDepthOrStencil()) {
+            flags |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
+                     VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
+        } else {
+            flags |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
         }
-        if (usage & wgpu::TextureUsage::CopyDst) {
-            flags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
-        }
-        if (usage & wgpu::TextureUsage::TextureBinding) {
-            flags |= VK_IMAGE_USAGE_SAMPLED_BIT;
-            // If the sampled texture is a depth/stencil texture, its image layout will be set
-            // to DEPTH_STENCIL_READ_ONLY_OPTIMAL in order to support readonly depth/stencil
-            // attachment. That layout requires DEPTH_STENCIL_ATTACHMENT_BIT image usage.
-            if (format.HasDepthOrStencil() && format.isRenderable) {
-                flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
-            }
-        }
-        if (usage & wgpu::TextureUsage::StorageBinding) {
-            flags |= VK_IMAGE_USAGE_STORAGE_BIT;
-        }
-        if (usage & wgpu::TextureUsage::RenderAttachment) {
-            if (format.HasDepthOrStencil()) {
-                flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+    }
+    if (usage & kPresentTextureUsage) {
+        // The present usage is only used internally by the swapchain and is never used in
+        // combination with other usages.
+        ASSERT(usage == kPresentTextureUsage);
+        // The Vulkan spec has the following note:
+        //
+        //   When transitioning the image to VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR or
+        //   VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, there is no need to delay subsequent
+        //   processing, or perform any visibility operations (as vkQueuePresentKHR performs
+        //   automatic visibility operations). To achieve this, the dstAccessMask member of
+        //   the VkImageMemoryBarrier should be set to 0, and the dstStageMask parameter
+        //   should be set to VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT.
+        //
+        // So on the transition to Present we use the "bottom of pipe" stage. The other
+        // direction doesn't matter because swapchain textures always start a new frame
+        // as uninitialized.
+        flags |= VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
+    }
+
+    // A zero value isn't a valid pipeline stage mask
+    ASSERT(flags != 0);
+    return flags;
+}
+
+VkImageMemoryBarrier BuildMemoryBarrier(const Texture* texture,
+                                        wgpu::TextureUsage lastUsage,
+                                        wgpu::TextureUsage usage,
+                                        const SubresourceRange& range) {
+    VkImageMemoryBarrier barrier;
+    barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+    barrier.pNext = nullptr;
+    barrier.srcAccessMask = VulkanAccessFlags(lastUsage, texture->GetFormat());
+    barrier.dstAccessMask = VulkanAccessFlags(usage, texture->GetFormat());
+    barrier.oldLayout = VulkanImageLayout(texture, lastUsage);
+    barrier.newLayout = VulkanImageLayout(texture, usage);
+    barrier.image = texture->GetHandle();
+    barrier.subresourceRange.aspectMask = VulkanAspectMask(range.aspects);
+    barrier.subresourceRange.baseMipLevel = range.baseMipLevel;
+    barrier.subresourceRange.levelCount = range.levelCount;
+    barrier.subresourceRange.baseArrayLayer = range.baseArrayLayer;
+    barrier.subresourceRange.layerCount = range.layerCount;
+
+    barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+    barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+    return barrier;
+}
+
+void FillVulkanCreateInfoSizesAndType(const Texture& texture, VkImageCreateInfo* info) {
+    const Extent3D& size = texture.GetSize();
+
+    info->mipLevels = texture.GetNumMipLevels();
+    info->samples = VulkanSampleCount(texture.GetSampleCount());
+
+    // Fill in the image type, and paper over differences in how the array layer count is
+    // specified between WebGPU and Vulkan.
+    switch (texture.GetDimension()) {
+        case wgpu::TextureDimension::e1D:
+            info->imageType = VK_IMAGE_TYPE_1D;
+            info->extent = {size.width, 1, 1};
+            info->arrayLayers = 1;
+            break;
+
+        case wgpu::TextureDimension::e2D:
+            info->imageType = VK_IMAGE_TYPE_2D;
+            info->extent = {size.width, size.height, 1};
+            info->arrayLayers = size.depthOrArrayLayers;
+            break;
+
+        case wgpu::TextureDimension::e3D:
+            info->imageType = VK_IMAGE_TYPE_3D;
+            info->extent = {size.width, size.height, size.depthOrArrayLayers};
+            info->arrayLayers = 1;
+            break;
+    }
+}
+
+}  // namespace
+
+// Converts Dawn texture format to Vulkan formats.
+VkFormat VulkanImageFormat(const Device* device, wgpu::TextureFormat format) {
+    switch (format) {
+        case wgpu::TextureFormat::R8Unorm:
+            return VK_FORMAT_R8_UNORM;
+        case wgpu::TextureFormat::R8Snorm:
+            return VK_FORMAT_R8_SNORM;
+        case wgpu::TextureFormat::R8Uint:
+            return VK_FORMAT_R8_UINT;
+        case wgpu::TextureFormat::R8Sint:
+            return VK_FORMAT_R8_SINT;
+
+        case wgpu::TextureFormat::R16Uint:
+            return VK_FORMAT_R16_UINT;
+        case wgpu::TextureFormat::R16Sint:
+            return VK_FORMAT_R16_SINT;
+        case wgpu::TextureFormat::R16Float:
+            return VK_FORMAT_R16_SFLOAT;
+        case wgpu::TextureFormat::RG8Unorm:
+            return VK_FORMAT_R8G8_UNORM;
+        case wgpu::TextureFormat::RG8Snorm:
+            return VK_FORMAT_R8G8_SNORM;
+        case wgpu::TextureFormat::RG8Uint:
+            return VK_FORMAT_R8G8_UINT;
+        case wgpu::TextureFormat::RG8Sint:
+            return VK_FORMAT_R8G8_SINT;
+
+        case wgpu::TextureFormat::R32Uint:
+            return VK_FORMAT_R32_UINT;
+        case wgpu::TextureFormat::R32Sint:
+            return VK_FORMAT_R32_SINT;
+        case wgpu::TextureFormat::R32Float:
+            return VK_FORMAT_R32_SFLOAT;
+        case wgpu::TextureFormat::RG16Uint:
+            return VK_FORMAT_R16G16_UINT;
+        case wgpu::TextureFormat::RG16Sint:
+            return VK_FORMAT_R16G16_SINT;
+        case wgpu::TextureFormat::RG16Float:
+            return VK_FORMAT_R16G16_SFLOAT;
+        case wgpu::TextureFormat::RGBA8Unorm:
+            return VK_FORMAT_R8G8B8A8_UNORM;
+        case wgpu::TextureFormat::RGBA8UnormSrgb:
+            return VK_FORMAT_R8G8B8A8_SRGB;
+        case wgpu::TextureFormat::RGBA8Snorm:
+            return VK_FORMAT_R8G8B8A8_SNORM;
+        case wgpu::TextureFormat::RGBA8Uint:
+            return VK_FORMAT_R8G8B8A8_UINT;
+        case wgpu::TextureFormat::RGBA8Sint:
+            return VK_FORMAT_R8G8B8A8_SINT;
+        case wgpu::TextureFormat::BGRA8Unorm:
+            return VK_FORMAT_B8G8R8A8_UNORM;
+        case wgpu::TextureFormat::BGRA8UnormSrgb:
+            return VK_FORMAT_B8G8R8A8_SRGB;
+        case wgpu::TextureFormat::RGB10A2Unorm:
+            return VK_FORMAT_A2B10G10R10_UNORM_PACK32;
+        case wgpu::TextureFormat::RG11B10Ufloat:
+            return VK_FORMAT_B10G11R11_UFLOAT_PACK32;
+        case wgpu::TextureFormat::RGB9E5Ufloat:
+            return VK_FORMAT_E5B9G9R9_UFLOAT_PACK32;
+
+        case wgpu::TextureFormat::RG32Uint:
+            return VK_FORMAT_R32G32_UINT;
+        case wgpu::TextureFormat::RG32Sint:
+            return VK_FORMAT_R32G32_SINT;
+        case wgpu::TextureFormat::RG32Float:
+            return VK_FORMAT_R32G32_SFLOAT;
+        case wgpu::TextureFormat::RGBA16Uint:
+            return VK_FORMAT_R16G16B16A16_UINT;
+        case wgpu::TextureFormat::RGBA16Sint:
+            return VK_FORMAT_R16G16B16A16_SINT;
+        case wgpu::TextureFormat::RGBA16Float:
+            return VK_FORMAT_R16G16B16A16_SFLOAT;
+
+        case wgpu::TextureFormat::RGBA32Uint:
+            return VK_FORMAT_R32G32B32A32_UINT;
+        case wgpu::TextureFormat::RGBA32Sint:
+            return VK_FORMAT_R32G32B32A32_SINT;
+        case wgpu::TextureFormat::RGBA32Float:
+            return VK_FORMAT_R32G32B32A32_SFLOAT;
+
+        case wgpu::TextureFormat::Depth16Unorm:
+            return VK_FORMAT_D16_UNORM;
+        case wgpu::TextureFormat::Depth32Float:
+            return VK_FORMAT_D32_SFLOAT;
+        case wgpu::TextureFormat::Depth24Plus:
+            return VK_FORMAT_D32_SFLOAT;
+        case wgpu::TextureFormat::Depth24PlusStencil8:
+            // Depth24PlusStencil8 maps to either of these two formats because only requires
+            // that one of the two be present. The VulkanUseD32S8 toggle combines the wish of
+            // the environment, default to using D32S8, and availability information so we know
+            // that the format is available.
+            if (device->IsToggleEnabled(Toggle::VulkanUseD32S8)) {
+                return VK_FORMAT_D32_SFLOAT_S8_UINT;
             } else {
-                flags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+                return VK_FORMAT_D24_UNORM_S8_UINT;
             }
-        }
-        if (usage & kReadOnlyRenderAttachment) {
+        case wgpu::TextureFormat::Depth24UnormStencil8:
+            return VK_FORMAT_D24_UNORM_S8_UINT;
+        case wgpu::TextureFormat::Depth32FloatStencil8:
+            return VK_FORMAT_D32_SFLOAT_S8_UINT;
+        case wgpu::TextureFormat::Stencil8:
+            // Try to use the stencil8 format if possible, otherwise use whatever format we can
+            // use that contains a stencil8 component.
+            if (device->IsToggleEnabled(Toggle::VulkanUseS8)) {
+                return VK_FORMAT_S8_UINT;
+            } else {
+                return VulkanImageFormat(device, wgpu::TextureFormat::Depth24PlusStencil8);
+            }
+
+        case wgpu::TextureFormat::BC1RGBAUnorm:
+            return VK_FORMAT_BC1_RGBA_UNORM_BLOCK;
+        case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+            return VK_FORMAT_BC1_RGBA_SRGB_BLOCK;
+        case wgpu::TextureFormat::BC2RGBAUnorm:
+            return VK_FORMAT_BC2_UNORM_BLOCK;
+        case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+            return VK_FORMAT_BC2_SRGB_BLOCK;
+        case wgpu::TextureFormat::BC3RGBAUnorm:
+            return VK_FORMAT_BC3_UNORM_BLOCK;
+        case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+            return VK_FORMAT_BC3_SRGB_BLOCK;
+        case wgpu::TextureFormat::BC4RSnorm:
+            return VK_FORMAT_BC4_SNORM_BLOCK;
+        case wgpu::TextureFormat::BC4RUnorm:
+            return VK_FORMAT_BC4_UNORM_BLOCK;
+        case wgpu::TextureFormat::BC5RGSnorm:
+            return VK_FORMAT_BC5_SNORM_BLOCK;
+        case wgpu::TextureFormat::BC5RGUnorm:
+            return VK_FORMAT_BC5_UNORM_BLOCK;
+        case wgpu::TextureFormat::BC6HRGBFloat:
+            return VK_FORMAT_BC6H_SFLOAT_BLOCK;
+        case wgpu::TextureFormat::BC6HRGBUfloat:
+            return VK_FORMAT_BC6H_UFLOAT_BLOCK;
+        case wgpu::TextureFormat::BC7RGBAUnorm:
+            return VK_FORMAT_BC7_UNORM_BLOCK;
+        case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+            return VK_FORMAT_BC7_SRGB_BLOCK;
+
+        case wgpu::TextureFormat::ETC2RGB8Unorm:
+            return VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK;
+        case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+            return VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK;
+        case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+            return VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK;
+        case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+            return VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK;
+        case wgpu::TextureFormat::ETC2RGBA8Unorm:
+            return VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK;
+        case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+            return VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK;
+        case wgpu::TextureFormat::EACR11Unorm:
+            return VK_FORMAT_EAC_R11_UNORM_BLOCK;
+        case wgpu::TextureFormat::EACR11Snorm:
+            return VK_FORMAT_EAC_R11_SNORM_BLOCK;
+        case wgpu::TextureFormat::EACRG11Unorm:
+            return VK_FORMAT_EAC_R11G11_UNORM_BLOCK;
+        case wgpu::TextureFormat::EACRG11Snorm:
+            return VK_FORMAT_EAC_R11G11_SNORM_BLOCK;
+
+        case wgpu::TextureFormat::ASTC4x4Unorm:
+            return VK_FORMAT_ASTC_4x4_UNORM_BLOCK;
+        case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+            return VK_FORMAT_ASTC_4x4_SRGB_BLOCK;
+        case wgpu::TextureFormat::ASTC5x4Unorm:
+            return VK_FORMAT_ASTC_5x4_UNORM_BLOCK;
+        case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+            return VK_FORMAT_ASTC_5x4_SRGB_BLOCK;
+        case wgpu::TextureFormat::ASTC5x5Unorm:
+            return VK_FORMAT_ASTC_5x5_UNORM_BLOCK;
+        case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+            return VK_FORMAT_ASTC_5x5_SRGB_BLOCK;
+        case wgpu::TextureFormat::ASTC6x5Unorm:
+            return VK_FORMAT_ASTC_6x5_UNORM_BLOCK;
+        case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+            return VK_FORMAT_ASTC_6x5_SRGB_BLOCK;
+        case wgpu::TextureFormat::ASTC6x6Unorm:
+            return VK_FORMAT_ASTC_6x6_UNORM_BLOCK;
+        case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+            return VK_FORMAT_ASTC_6x6_SRGB_BLOCK;
+        case wgpu::TextureFormat::ASTC8x5Unorm:
+            return VK_FORMAT_ASTC_8x5_UNORM_BLOCK;
+        case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+            return VK_FORMAT_ASTC_8x5_SRGB_BLOCK;
+        case wgpu::TextureFormat::ASTC8x6Unorm:
+            return VK_FORMAT_ASTC_8x6_UNORM_BLOCK;
+        case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+            return VK_FORMAT_ASTC_8x6_SRGB_BLOCK;
+        case wgpu::TextureFormat::ASTC8x8Unorm:
+            return VK_FORMAT_ASTC_8x8_UNORM_BLOCK;
+        case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+            return VK_FORMAT_ASTC_8x8_SRGB_BLOCK;
+        case wgpu::TextureFormat::ASTC10x5Unorm:
+            return VK_FORMAT_ASTC_10x5_UNORM_BLOCK;
+        case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+            return VK_FORMAT_ASTC_10x5_SRGB_BLOCK;
+        case wgpu::TextureFormat::ASTC10x6Unorm:
+            return VK_FORMAT_ASTC_10x6_UNORM_BLOCK;
+        case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+            return VK_FORMAT_ASTC_10x6_SRGB_BLOCK;
+        case wgpu::TextureFormat::ASTC10x8Unorm:
+            return VK_FORMAT_ASTC_10x8_UNORM_BLOCK;
+        case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+            return VK_FORMAT_ASTC_10x8_SRGB_BLOCK;
+        case wgpu::TextureFormat::ASTC10x10Unorm:
+            return VK_FORMAT_ASTC_10x10_UNORM_BLOCK;
+        case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+            return VK_FORMAT_ASTC_10x10_SRGB_BLOCK;
+        case wgpu::TextureFormat::ASTC12x10Unorm:
+            return VK_FORMAT_ASTC_12x10_UNORM_BLOCK;
+        case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+            return VK_FORMAT_ASTC_12x10_SRGB_BLOCK;
+        case wgpu::TextureFormat::ASTC12x12Unorm:
+            return VK_FORMAT_ASTC_12x12_UNORM_BLOCK;
+        case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+            return VK_FORMAT_ASTC_12x12_SRGB_BLOCK;
+
+        case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+            return VK_FORMAT_G8_B8R8_2PLANE_420_UNORM;
+
+        case wgpu::TextureFormat::Undefined:
+            break;
+    }
+    UNREACHABLE();
+}
+
+// Converts the Dawn usage flags to Vulkan usage flags. Also needs the format to choose
+// between color and depth attachment usages.
+VkImageUsageFlags VulkanImageUsage(wgpu::TextureUsage usage, const Format& format) {
+    VkImageUsageFlags flags = 0;
+
+    if (usage & wgpu::TextureUsage::CopySrc) {
+        flags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
+    }
+    if (usage & wgpu::TextureUsage::CopyDst) {
+        flags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+    }
+    if (usage & wgpu::TextureUsage::TextureBinding) {
+        flags |= VK_IMAGE_USAGE_SAMPLED_BIT;
+        // If the sampled texture is a depth/stencil texture, its image layout will be set
+        // to DEPTH_STENCIL_READ_ONLY_OPTIMAL in order to support readonly depth/stencil
+        // attachment. That layout requires DEPTH_STENCIL_ATTACHMENT_BIT image usage.
+        if (format.HasDepthOrStencil() && format.isRenderable) {
             flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
         }
-
-        return flags;
+    }
+    if (usage & wgpu::TextureUsage::StorageBinding) {
+        flags |= VK_IMAGE_USAGE_STORAGE_BIT;
+    }
+    if (usage & wgpu::TextureUsage::RenderAttachment) {
+        if (format.HasDepthOrStencil()) {
+            flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+        } else {
+            flags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+        }
+    }
+    if (usage & kReadOnlyRenderAttachment) {
+        flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
     }
 
-    // Chooses which Vulkan image layout should be used for the given Dawn usage. Note that this
-    // layout must match the layout given to various Vulkan operations as well as the layout given
-    // to descriptor set writes.
-    VkImageLayout VulkanImageLayout(const Texture* texture, wgpu::TextureUsage usage) {
-        if (usage == wgpu::TextureUsage::None) {
-            return VK_IMAGE_LAYOUT_UNDEFINED;
-        }
+    return flags;
+}
 
-        if (!wgpu::HasZeroOrOneBits(usage)) {
-            // Sampled | kReadOnlyRenderAttachment is the only possible multi-bit usage, if more
-            // appear we might need additional special-casing.
-            ASSERT(usage == (wgpu::TextureUsage::TextureBinding | kReadOnlyRenderAttachment));
+// Chooses which Vulkan image layout should be used for the given Dawn usage. Note that this
+// layout must match the layout given to various Vulkan operations as well as the layout given
+// to descriptor set writes.
+VkImageLayout VulkanImageLayout(const Texture* texture, wgpu::TextureUsage usage) {
+    if (usage == wgpu::TextureUsage::None) {
+        return VK_IMAGE_LAYOUT_UNDEFINED;
+    }
 
-            // WebGPU requires both aspects to be readonly if the attachment's format does have
-            // both depth and stencil aspects. Vulkan 1.0 supports readonly for both aspects too
-            // via DEPTH_STENCIL_READ_ONLY image layout. Vulkan 1.1 and above can support separate
-            // readonly for a single aspect via DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL and
-            // DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL layouts. But Vulkan 1.0 cannot support
-            // it, and WebGPU doesn't need that currently.
-            return VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
-        }
+    if (!wgpu::HasZeroOrOneBits(usage)) {
+        // Sampled | kReadOnlyRenderAttachment is the only possible multi-bit usage, if more
+        // appear we might need additional special-casing.
+        ASSERT(usage == (wgpu::TextureUsage::TextureBinding | kReadOnlyRenderAttachment));
 
-        // Usage has a single bit so we can switch on its value directly.
-        switch (usage) {
-            case wgpu::TextureUsage::CopyDst:
-                return VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
+        // WebGPU requires both aspects to be readonly if the attachment's format does have
+        // both depth and stencil aspects. Vulkan 1.0 supports readonly for both aspects too
+        // via DEPTH_STENCIL_READ_ONLY image layout. Vulkan 1.1 and above can support separate
+        // readonly for a single aspect via DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL and
+        // DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL layouts. But Vulkan 1.0 cannot support
+        // it, and WebGPU doesn't need that currently.
+        return VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
+    }
 
-                // The layout returned here is the one that will be used at bindgroup creation time.
-                // The bindgrpup's layout must match the runtime layout of the image when it is
-                // used via the bindgroup, but we don't know exactly what it will be yet. So we
-                // have to prepare for the pessimistic case.
-            case wgpu::TextureUsage::TextureBinding:
-                // Only VK_IMAGE_LAYOUT_GENERAL can do sampling and storage access of texture at the
-                // same time.
-                if (texture->GetInternalUsage() & wgpu::TextureUsage::StorageBinding) {
-                    return VK_IMAGE_LAYOUT_GENERAL;
-                }
-                // The sampled image can be used as a readonly depth/stencil attachment at the same
-                // time if it is a depth/stencil renderable format, so the image layout need to be
-                // VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL.
-                if (texture->GetFormat().HasDepthOrStencil() && texture->GetFormat().isRenderable) {
-                    return VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
-                }
-                return VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+    // Usage has a single bit so we can switch on its value directly.
+    switch (usage) {
+        case wgpu::TextureUsage::CopyDst:
+            return VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
 
-                // Vulkan texture copy functions require the image to be in _one_  known layout.
-                // Depending on whether parts of the texture have been transitioned to only CopySrc
-                // or a combination with something else, the texture could be in a combination of
-                // GENERAL and TRANSFER_SRC_OPTIMAL. This would be a problem, so we make CopySrc use
-                // GENERAL.
-                // TODO(crbug.com/dawn/851): We no longer need to transition resources all at
-                // once and can instead track subresources so we should lift this limitation.
-            case wgpu::TextureUsage::CopySrc:
-                // Read-only and write-only storage textures must use general layout because load
-                // and store operations on storage images can only be done on the images in
-                // VK_IMAGE_LAYOUT_GENERAL layout.
-            case wgpu::TextureUsage::StorageBinding:
+            // The layout returned here is the one that will be used at bindgroup creation time.
+            // The bindgrpup's layout must match the runtime layout of the image when it is
+            // used via the bindgroup, but we don't know exactly what it will be yet. So we
+            // have to prepare for the pessimistic case.
+        case wgpu::TextureUsage::TextureBinding:
+            // Only VK_IMAGE_LAYOUT_GENERAL can do sampling and storage access of texture at the
+            // same time.
+            if (texture->GetInternalUsage() & wgpu::TextureUsage::StorageBinding) {
                 return VK_IMAGE_LAYOUT_GENERAL;
-
-            case wgpu::TextureUsage::RenderAttachment:
-                if (texture->GetFormat().HasDepthOrStencil()) {
-                    return VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
-                } else {
-                    return VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
-                }
-
-            case kReadOnlyRenderAttachment:
+            }
+            // The sampled image can be used as a readonly depth/stencil attachment at the same
+            // time if it is a depth/stencil renderable format, so the image layout need to be
+            // VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL.
+            if (texture->GetFormat().HasDepthOrStencil() && texture->GetFormat().isRenderable) {
                 return VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
-
-            case kPresentTextureUsage:
-                return VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
-
-            case wgpu::TextureUsage::None:
-                break;
-        }
-        UNREACHABLE();
-    }
-
-    VkSampleCountFlagBits VulkanSampleCount(uint32_t sampleCount) {
-        switch (sampleCount) {
-            case 1:
-                return VK_SAMPLE_COUNT_1_BIT;
-            case 4:
-                return VK_SAMPLE_COUNT_4_BIT;
-        }
-        UNREACHABLE();
-    }
-
-    MaybeError ValidateVulkanImageCanBeWrapped(const DeviceBase*,
-                                               const TextureDescriptor* descriptor) {
-        DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
-                        "Texture dimension (%s) is not %s.", descriptor->dimension,
-                        wgpu::TextureDimension::e2D);
-
-        DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
-                        descriptor->mipLevelCount);
-
-        DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1,
-                        "Array layer count (%u) is not 1.", descriptor->size.depthOrArrayLayers);
-
-        DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
-                        descriptor->sampleCount);
-
-        return {};
-    }
-
-    bool IsSampleCountSupported(const dawn::native::vulkan::Device* device,
-                                const VkImageCreateInfo& imageCreateInfo) {
-        ASSERT(device);
-
-        VkPhysicalDevice physicalDevice = ToBackend(device->GetAdapter())->GetPhysicalDevice();
-        VkImageFormatProperties properties;
-        if (device->fn.GetPhysicalDeviceImageFormatProperties(
-                physicalDevice, imageCreateInfo.format, imageCreateInfo.imageType,
-                imageCreateInfo.tiling, imageCreateInfo.usage, imageCreateInfo.flags,
-                &properties) != VK_SUCCESS) {
-            UNREACHABLE();
-        }
-
-        return properties.sampleCounts & imageCreateInfo.samples;
-    }
-
-    // static
-    ResultOrError<Ref<Texture>> Texture::Create(Device* device,
-                                                const TextureDescriptor* descriptor,
-                                                VkImageUsageFlags extraUsages) {
-        Ref<Texture> texture =
-            AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
-        DAWN_TRY(texture->InitializeAsInternalTexture(extraUsages));
-        return std::move(texture);
-    }
-
-    // static
-    ResultOrError<Texture*> Texture::CreateFromExternal(
-        Device* device,
-        const ExternalImageDescriptorVk* descriptor,
-        const TextureDescriptor* textureDescriptor,
-        external_memory::Service* externalMemoryService) {
-        Ref<Texture> texture =
-            AcquireRef(new Texture(device, textureDescriptor, TextureState::OwnedInternal));
-        DAWN_TRY(texture->InitializeFromExternal(descriptor, externalMemoryService));
-        return texture.Detach();
-    }
-
-    // static
-    Ref<Texture> Texture::CreateForSwapChain(Device* device,
-                                             const TextureDescriptor* descriptor,
-                                             VkImage nativeImage) {
-        Ref<Texture> texture =
-            AcquireRef(new Texture(device, descriptor, TextureState::OwnedExternal));
-        texture->InitializeForSwapChain(nativeImage);
-        return texture;
-    }
-
-    Texture::Texture(Device* device, const TextureDescriptor* descriptor, TextureState state)
-        : TextureBase(device, descriptor, state),
-          // A usage of none will make sure the texture is transitioned before its first use as
-          // required by the Vulkan spec.
-          mSubresourceLastUsages(std::make_unique<SubresourceStorage<wgpu::TextureUsage>>(
-              (ShouldCombineDepthStencilBarriers() ? Aspect::CombinedDepthStencil
-                                                   : GetFormat().aspects),
-              GetArrayLayers(),
-              GetNumMipLevels(),
-              wgpu::TextureUsage::None)) {
-    }
-
-    MaybeError Texture::InitializeAsInternalTexture(VkImageUsageFlags extraUsages) {
-        Device* device = ToBackend(GetDevice());
-
-        // Create the Vulkan image "container". We don't need to check that the format supports the
-        // combination of sample, usage etc. because validation should have been done in the Dawn
-        // frontend already based on the minimum supported formats in the Vulkan spec
-        VkImageCreateInfo createInfo = {};
-        FillVulkanCreateInfoSizesAndType(*this, &createInfo);
-
-        PNextChainBuilder createInfoChain(&createInfo);
-
-        createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
-        createInfo.format = VulkanImageFormat(device, GetFormat().format);
-        createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
-        createInfo.usage = VulkanImageUsage(GetInternalUsage(), GetFormat()) | extraUsages;
-        createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
-        createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
-
-        VkImageFormatListCreateInfo imageFormatListInfo = {};
-        std::vector<VkFormat> viewFormats;
-        if (GetViewFormats().any()) {
-            createInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
-            if (device->GetDeviceInfo().HasExt(DeviceExt::ImageFormatList)) {
-                createInfoChain.Add(&imageFormatListInfo,
-                                    VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO);
-                viewFormats.push_back(VulkanImageFormat(device, GetFormat().format));
-                for (FormatIndex i : IterateBitSet(GetViewFormats())) {
-                    const Format& viewFormat = device->GetValidInternalFormat(i);
-                    viewFormats.push_back(VulkanImageFormat(device, viewFormat.format));
-                }
-
-                imageFormatListInfo.viewFormatCount = viewFormats.size();
-                imageFormatListInfo.pViewFormats = viewFormats.data();
             }
-        }
+            return VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
 
-        ASSERT(IsSampleCountSupported(device, createInfo));
+            // Vulkan texture copy functions require the image to be in _one_  known layout.
+            // Depending on whether parts of the texture have been transitioned to only CopySrc
+            // or a combination with something else, the texture could be in a combination of
+            // GENERAL and TRANSFER_SRC_OPTIMAL. This would be a problem, so we make CopySrc use
+            // GENERAL.
+            // TODO(crbug.com/dawn/851): We no longer need to transition resources all at
+            // once and can instead track subresources so we should lift this limitation.
+        case wgpu::TextureUsage::CopySrc:
+            // Read-only and write-only storage textures must use general layout because load
+            // and store operations on storage images can only be done on the images in
+            // VK_IMAGE_LAYOUT_GENERAL layout.
+        case wgpu::TextureUsage::StorageBinding:
+            return VK_IMAGE_LAYOUT_GENERAL;
 
-        if (GetArrayLayers() >= 6 && GetWidth() == GetHeight()) {
-            createInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
-        }
-
-        // We always set VK_IMAGE_USAGE_TRANSFER_DST_BIT unconditionally beause the Vulkan images
-        // that are used in vkCmdClearColorImage() must have been created with this flag, which is
-        // also required for the implementation of robust resource initialization.
-        createInfo.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
-
-        DAWN_TRY(CheckVkSuccess(
-            device->fn.CreateImage(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
-            "CreateImage"));
-
-        // Create the image memory and associate it with the container
-        VkMemoryRequirements requirements;
-        device->fn.GetImageMemoryRequirements(device->GetVkDevice(), mHandle, &requirements);
-
-        DAWN_TRY_ASSIGN(mMemoryAllocation, device->GetResourceMemoryAllocator()->Allocate(
-                                               requirements, MemoryKind::Opaque));
-
-        DAWN_TRY(CheckVkSuccess(
-            device->fn.BindImageMemory(device->GetVkDevice(), mHandle,
-                                       ToBackend(mMemoryAllocation.GetResourceHeap())->GetMemory(),
-                                       mMemoryAllocation.GetOffset()),
-            "BindImageMemory"));
-
-        if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
-            DAWN_TRY(ClearTexture(ToBackend(GetDevice())->GetPendingRecordingContext(),
-                                  GetAllSubresources(), TextureBase::ClearValue::NonZero));
-        }
-
-        SetLabelImpl();
-
-        return {};
-    }
-
-    // Internally managed, but imported from external handle
-    MaybeError Texture::InitializeFromExternal(const ExternalImageDescriptorVk* descriptor,
-                                               external_memory::Service* externalMemoryService) {
-        Device* device = ToBackend(GetDevice());
-        VkFormat format = VulkanImageFormat(device, GetFormat().format);
-        VkImageUsageFlags usage = VulkanImageUsage(GetInternalUsage(), GetFormat());
-        DAWN_INVALID_IF(!externalMemoryService->SupportsCreateImage(descriptor, format, usage,
-                                                                    &mSupportsDisjointVkImage),
-                        "Creating an image from external memory is not supported.");
-        // mSubresourceLastUsages was initialized with Plane0/Plane1 in the constructor for
-        // multiplanar formats, so we need to correct it to Color here.
-        if (ShouldCombineMultiPlaneBarriers()) {
-            mSubresourceLastUsages = std::make_unique<SubresourceStorage<wgpu::TextureUsage>>(
-                ComputeAspectsForSubresourceStorage(), GetArrayLayers(), GetNumMipLevels(),
-                wgpu::TextureUsage::None);
-        }
-
-        mExternalState = ExternalState::PendingAcquire;
-
-        mPendingAcquireOldLayout = descriptor->releasedOldLayout;
-        mPendingAcquireNewLayout = descriptor->releasedNewLayout;
-
-        VkImageCreateInfo baseCreateInfo = {};
-        FillVulkanCreateInfoSizesAndType(*this, &baseCreateInfo);
-
-        PNextChainBuilder createInfoChain(&baseCreateInfo);
-
-        baseCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
-        baseCreateInfo.format = format;
-        baseCreateInfo.usage = usage;
-        baseCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
-        baseCreateInfo.queueFamilyIndexCount = 0;
-        baseCreateInfo.pQueueFamilyIndices = nullptr;
-
-        // We always set VK_IMAGE_USAGE_TRANSFER_DST_BIT unconditionally beause the Vulkan images
-        // that are used in vkCmdClearColorImage() must have been created with this flag, which is
-        // also required for the implementation of robust resource initialization.
-        baseCreateInfo.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
-
-        VkImageFormatListCreateInfo imageFormatListInfo = {};
-        std::vector<VkFormat> viewFormats;
-        if (GetViewFormats().any()) {
-            baseCreateInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
-            if (device->GetDeviceInfo().HasExt(DeviceExt::ImageFormatList)) {
-                createInfoChain.Add(&imageFormatListInfo,
-                                    VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO);
-                for (FormatIndex i : IterateBitSet(GetViewFormats())) {
-                    const Format& viewFormat = device->GetValidInternalFormat(i);
-                    viewFormats.push_back(VulkanImageFormat(device, viewFormat.format));
-                }
-
-                imageFormatListInfo.viewFormatCount = viewFormats.size();
-                imageFormatListInfo.pViewFormats = viewFormats.data();
-            }
-        }
-
-        DAWN_TRY_ASSIGN(mHandle, externalMemoryService->CreateImage(descriptor, baseCreateInfo));
-
-        SetLabelHelper("Dawn_ExternalTexture");
-
-        return {};
-    }
-
-    void Texture::InitializeForSwapChain(VkImage nativeImage) {
-        mHandle = nativeImage;
-        SetLabelHelper("Dawn_SwapChainTexture");
-    }
-
-    MaybeError Texture::BindExternalMemory(const ExternalImageDescriptorVk* descriptor,
-                                           VkSemaphore signalSemaphore,
-                                           VkDeviceMemory externalMemoryAllocation,
-                                           std::vector<VkSemaphore> waitSemaphores) {
-        Device* device = ToBackend(GetDevice());
-        DAWN_TRY(CheckVkSuccess(
-            device->fn.BindImageMemory(device->GetVkDevice(), mHandle, externalMemoryAllocation, 0),
-            "BindImageMemory (external)"));
-
-        // Don't clear imported texture if already initialized
-        if (descriptor->isInitialized) {
-            SetIsSubresourceContentInitialized(true, GetAllSubresources());
-        }
-
-        // Success, acquire all the external objects.
-        mExternalAllocation = externalMemoryAllocation;
-        mSignalSemaphore = signalSemaphore;
-        mWaitRequirements = std::move(waitSemaphores);
-        return {};
-    }
-
-    MaybeError Texture::ExportExternalTexture(VkImageLayout desiredLayout,
-                                              VkSemaphore* signalSemaphore,
-                                              VkImageLayout* releasedOldLayout,
-                                              VkImageLayout* releasedNewLayout) {
-        Device* device = ToBackend(GetDevice());
-
-        DAWN_INVALID_IF(mExternalState == ExternalState::Released,
-                        "Can't export a signal semaphore from signaled texture %s.", this);
-
-        DAWN_INVALID_IF(
-            mExternalAllocation == VK_NULL_HANDLE,
-            "Can't export a signal semaphore from destroyed or non-external texture %s.", this);
-
-        ASSERT(mSignalSemaphore != VK_NULL_HANDLE);
-
-        // Release the texture
-        mExternalState = ExternalState::Released;
-
-        Aspect aspects = ComputeAspectsForSubresourceStorage();
-        ASSERT(GetNumMipLevels() == 1 && GetArrayLayers() == 1);
-        wgpu::TextureUsage usage = mSubresourceLastUsages->Get(aspects, 0, 0);
-
-        VkImageMemoryBarrier barrier;
-        barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
-        barrier.pNext = nullptr;
-        barrier.image = GetHandle();
-        barrier.subresourceRange.aspectMask = VulkanAspectMask(aspects);
-        barrier.subresourceRange.baseMipLevel = 0;
-        barrier.subresourceRange.levelCount = 1;
-        barrier.subresourceRange.baseArrayLayer = 0;
-        barrier.subresourceRange.layerCount = 1;
-
-        barrier.srcAccessMask = VulkanAccessFlags(usage, GetFormat());
-        barrier.dstAccessMask = 0;  // The barrier must be paired with another barrier that will
-                                    // specify the dst access mask on the importing queue.
-
-        barrier.oldLayout = VulkanImageLayout(this, usage);
-        if (desiredLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
-            // VK_IMAGE_LAYOUT_UNDEFINED is invalid here. We use it as a
-            // special value to indicate no layout transition should be done.
-            barrier.newLayout = barrier.oldLayout;
-        } else {
-            barrier.newLayout = desiredLayout;
-        }
-
-        barrier.srcQueueFamilyIndex = device->GetGraphicsQueueFamily();
-        barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_EXTERNAL_KHR;
-
-        VkPipelineStageFlags srcStages = VulkanPipelineStage(usage, GetFormat());
-        VkPipelineStageFlags dstStages =
-            VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;  // We don't know when the importing queue will need
-                                                // the texture, so pass
-                                                // VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT to ensure
-                                                // the barrier happens-before any usage in the
-                                                // importing queue.
-
-        CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
-        device->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
-                                      nullptr, 0, nullptr, 1, &barrier);
-
-        // Queue submit to signal we are done with the texture
-        recordingContext->signalSemaphores.push_back(mSignalSemaphore);
-        DAWN_TRY(device->SubmitPendingCommands());
-
-        // Write out the layouts and signal semaphore
-        *releasedOldLayout = barrier.oldLayout;
-        *releasedNewLayout = barrier.newLayout;
-        *signalSemaphore = mSignalSemaphore;
-
-        mSignalSemaphore = VK_NULL_HANDLE;
-
-        // Destroy the texture so it can't be used again
-        Destroy();
-        return {};
-    }
-
-    Texture::~Texture() {
-    }
-
-    void Texture::SetLabelHelper(const char* prefix) {
-        SetDebugName(ToBackend(GetDevice()), mHandle, prefix, GetLabel());
-    }
-
-    void Texture::SetLabelImpl() {
-        SetLabelHelper("Dawn_InternalTexture");
-    }
-
-    void Texture::DestroyImpl() {
-        if (GetTextureState() == TextureState::OwnedInternal) {
-            Device* device = ToBackend(GetDevice());
-
-            // For textures created from a VkImage, the allocation if kInvalid so the Device knows
-            // to skip the deallocation of the (absence of) VkDeviceMemory.
-            device->GetResourceMemoryAllocator()->Deallocate(&mMemoryAllocation);
-
-            if (mHandle != VK_NULL_HANDLE) {
-                device->GetFencedDeleter()->DeleteWhenUnused(mHandle);
-            }
-
-            if (mExternalAllocation != VK_NULL_HANDLE) {
-                device->GetFencedDeleter()->DeleteWhenUnused(mExternalAllocation);
-            }
-
-            mHandle = VK_NULL_HANDLE;
-            mExternalAllocation = VK_NULL_HANDLE;
-            // If a signal semaphore exists it should be requested before we delete the texture
-            ASSERT(mSignalSemaphore == VK_NULL_HANDLE);
-        }
-        // For Vulkan, we currently run the base destruction code after the internal changes because
-        // of the dependency on the texture state which the base code overwrites too early.
-        TextureBase::DestroyImpl();
-    }
-
-    VkImage Texture::GetHandle() const {
-        return mHandle;
-    }
-
-    void Texture::TweakTransitionForExternalUsage(CommandRecordingContext* recordingContext,
-                                                  std::vector<VkImageMemoryBarrier>* barriers,
-                                                  size_t transitionBarrierStart) {
-        ASSERT(GetNumMipLevels() == 1 && GetArrayLayers() == 1);
-
-        // transitionBarrierStart specify the index where barriers for current transition start in
-        // the vector. barriers->size() - transitionBarrierStart is the number of barriers that we
-        // have already added into the vector during current transition.
-        ASSERT(barriers->size() - transitionBarrierStart <= 1);
-
-        if (mExternalState == ExternalState::PendingAcquire) {
-            if (barriers->size() == transitionBarrierStart) {
-                barriers->push_back(
-                    BuildMemoryBarrier(this, wgpu::TextureUsage::None, wgpu::TextureUsage::None,
-                                       SubresourceRange::SingleMipAndLayer(
-                                           0, 0, ComputeAspectsForSubresourceStorage())));
-            }
-
-            VkImageMemoryBarrier* barrier = &(*barriers)[transitionBarrierStart];
-            // Transfer texture from external queue to graphics queue
-            barrier->srcQueueFamilyIndex = VK_QUEUE_FAMILY_EXTERNAL_KHR;
-            barrier->dstQueueFamilyIndex = ToBackend(GetDevice())->GetGraphicsQueueFamily();
-
-            // srcAccessMask means nothing when importing. Queue transfers require a barrier on
-            // both the importing and exporting queues. The exporting queue should have specified
-            // this.
-            barrier->srcAccessMask = 0;
-
-            // This should be the first barrier after import.
-            ASSERT(barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED);
-
-            // Save the desired layout. We may need to transition through an intermediate
-            // |mPendingAcquireLayout| first.
-            VkImageLayout desiredLayout = barrier->newLayout;
-
-            bool isInitialized = IsSubresourceContentInitialized(GetAllSubresources());
-
-            // We don't care about the pending old layout if the texture is uninitialized. The
-            // driver is free to discard it. Also it is invalid to transition to layout UNDEFINED or
-            // PREINITIALIZED. If the embedder provided no new layout, or we don't care about the
-            // previous contents, we can skip the layout transition.
-            // https://www.khronos.org/registry/vulkan/specs/1.3-extensions/html/vkspec.html#VUID-VkImageMemoryBarrier-newLayout-01198
-            if (!isInitialized || mPendingAcquireNewLayout == VK_IMAGE_LAYOUT_UNDEFINED ||
-                mPendingAcquireNewLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
-                barrier->oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
-                barrier->newLayout = desiredLayout;
+        case wgpu::TextureUsage::RenderAttachment:
+            if (texture->GetFormat().HasDepthOrStencil()) {
+                return VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
             } else {
-                barrier->oldLayout = mPendingAcquireOldLayout;
-                barrier->newLayout = mPendingAcquireNewLayout;
+                return VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
             }
 
-            // If these are unequal, we need an another barrier to transition the layout.
-            if (barrier->newLayout != desiredLayout) {
-                VkImageMemoryBarrier layoutBarrier;
-                layoutBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
-                layoutBarrier.pNext = nullptr;
-                layoutBarrier.image = GetHandle();
-                layoutBarrier.subresourceRange = barrier->subresourceRange;
+        case kReadOnlyRenderAttachment:
+            return VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
 
-                // Transition from the acquired new layout to the desired layout.
-                layoutBarrier.oldLayout = barrier->newLayout;
-                layoutBarrier.newLayout = desiredLayout;
+        case kPresentTextureUsage:
+            return VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
 
-                // We already transitioned these.
-                layoutBarrier.srcAccessMask = 0;
-                layoutBarrier.dstAccessMask = 0;
-                layoutBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
-                layoutBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+        case wgpu::TextureUsage::None:
+            break;
+    }
+    UNREACHABLE();
+}
 
-                barriers->push_back(layoutBarrier);
+VkSampleCountFlagBits VulkanSampleCount(uint32_t sampleCount) {
+    switch (sampleCount) {
+        case 1:
+            return VK_SAMPLE_COUNT_1_BIT;
+        case 4:
+            return VK_SAMPLE_COUNT_4_BIT;
+    }
+    UNREACHABLE();
+}
+
+MaybeError ValidateVulkanImageCanBeWrapped(const DeviceBase*, const TextureDescriptor* descriptor) {
+    DAWN_INVALID_IF(descriptor->dimension != wgpu::TextureDimension::e2D,
+                    "Texture dimension (%s) is not %s.", descriptor->dimension,
+                    wgpu::TextureDimension::e2D);
+
+    DAWN_INVALID_IF(descriptor->mipLevelCount != 1, "Mip level count (%u) is not 1.",
+                    descriptor->mipLevelCount);
+
+    DAWN_INVALID_IF(descriptor->size.depthOrArrayLayers != 1, "Array layer count (%u) is not 1.",
+                    descriptor->size.depthOrArrayLayers);
+
+    DAWN_INVALID_IF(descriptor->sampleCount != 1, "Sample count (%u) is not 1.",
+                    descriptor->sampleCount);
+
+    return {};
+}
+
+bool IsSampleCountSupported(const dawn::native::vulkan::Device* device,
+                            const VkImageCreateInfo& imageCreateInfo) {
+    ASSERT(device);
+
+    VkPhysicalDevice physicalDevice = ToBackend(device->GetAdapter())->GetPhysicalDevice();
+    VkImageFormatProperties properties;
+    if (device->fn.GetPhysicalDeviceImageFormatProperties(
+            physicalDevice, imageCreateInfo.format, imageCreateInfo.imageType,
+            imageCreateInfo.tiling, imageCreateInfo.usage, imageCreateInfo.flags,
+            &properties) != VK_SUCCESS) {
+        UNREACHABLE();
+    }
+
+    return properties.sampleCounts & imageCreateInfo.samples;
+}
+
+// static
+ResultOrError<Ref<Texture>> Texture::Create(Device* device,
+                                            const TextureDescriptor* descriptor,
+                                            VkImageUsageFlags extraUsages) {
+    Ref<Texture> texture = AcquireRef(new Texture(device, descriptor, TextureState::OwnedInternal));
+    DAWN_TRY(texture->InitializeAsInternalTexture(extraUsages));
+    return std::move(texture);
+}
+
+// static
+ResultOrError<Texture*> Texture::CreateFromExternal(
+    Device* device,
+    const ExternalImageDescriptorVk* descriptor,
+    const TextureDescriptor* textureDescriptor,
+    external_memory::Service* externalMemoryService) {
+    Ref<Texture> texture =
+        AcquireRef(new Texture(device, textureDescriptor, TextureState::OwnedInternal));
+    DAWN_TRY(texture->InitializeFromExternal(descriptor, externalMemoryService));
+    return texture.Detach();
+}
+
+// static
+Ref<Texture> Texture::CreateForSwapChain(Device* device,
+                                         const TextureDescriptor* descriptor,
+                                         VkImage nativeImage) {
+    Ref<Texture> texture = AcquireRef(new Texture(device, descriptor, TextureState::OwnedExternal));
+    texture->InitializeForSwapChain(nativeImage);
+    return texture;
+}
+
+Texture::Texture(Device* device, const TextureDescriptor* descriptor, TextureState state)
+    : TextureBase(device, descriptor, state),
+      // A usage of none will make sure the texture is transitioned before its first use as
+      // required by the Vulkan spec.
+      mSubresourceLastUsages(std::make_unique<SubresourceStorage<wgpu::TextureUsage>>(
+          (ShouldCombineDepthStencilBarriers() ? Aspect::CombinedDepthStencil
+                                               : GetFormat().aspects),
+          GetArrayLayers(),
+          GetNumMipLevels(),
+          wgpu::TextureUsage::None)) {}
+
+MaybeError Texture::InitializeAsInternalTexture(VkImageUsageFlags extraUsages) {
+    Device* device = ToBackend(GetDevice());
+
+    // Create the Vulkan image "container". We don't need to check that the format supports the
+    // combination of sample, usage etc. because validation should have been done in the Dawn
+    // frontend already based on the minimum supported formats in the Vulkan spec
+    VkImageCreateInfo createInfo = {};
+    FillVulkanCreateInfoSizesAndType(*this, &createInfo);
+
+    PNextChainBuilder createInfoChain(&createInfo);
+
+    createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
+    createInfo.format = VulkanImageFormat(device, GetFormat().format);
+    createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+    createInfo.usage = VulkanImageUsage(GetInternalUsage(), GetFormat()) | extraUsages;
+    createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+    createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+
+    VkImageFormatListCreateInfo imageFormatListInfo = {};
+    std::vector<VkFormat> viewFormats;
+    if (GetViewFormats().any()) {
+        createInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
+        if (device->GetDeviceInfo().HasExt(DeviceExt::ImageFormatList)) {
+            createInfoChain.Add(&imageFormatListInfo,
+                                VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO);
+            viewFormats.push_back(VulkanImageFormat(device, GetFormat().format));
+            for (FormatIndex i : IterateBitSet(GetViewFormats())) {
+                const Format& viewFormat = device->GetValidInternalFormat(i);
+                viewFormats.push_back(VulkanImageFormat(device, viewFormat.format));
             }
 
-            mExternalState = ExternalState::Acquired;
-        }
-
-        mLastExternalState = mExternalState;
-
-        recordingContext->waitSemaphores.insert(recordingContext->waitSemaphores.end(),
-                                                mWaitRequirements.begin(), mWaitRequirements.end());
-        mWaitRequirements.clear();
-    }
-
-    bool Texture::CanReuseWithoutBarrier(wgpu::TextureUsage lastUsage, wgpu::TextureUsage usage) {
-        // Reuse the texture directly and avoid encoding barriers when it isn't needed.
-        bool lastReadOnly = IsSubset(lastUsage, kReadOnlyTextureUsages);
-        if (lastReadOnly && lastUsage == usage && mLastExternalState == mExternalState) {
-            return true;
-        }
-        return false;
-    }
-
-    // Base Vulkan doesn't support transitioning depth and stencil separately. We work around
-    // this limitation by combining the usages in the two planes of `textureUsages` into a
-    // single plane in a new SubresourceStorage<TextureUsage>. The barriers will be produced
-    // for DEPTH | STENCIL since the SubresourceRange uses Aspect::CombinedDepthStencil.
-    bool Texture::ShouldCombineDepthStencilBarriers() const {
-        // If the Stencil8 format is being emulated then memory barriers also need to include
-        // the depth aspect. (See: crbug.com/dawn/1331)
-        if (GetFormat().format == wgpu::TextureFormat::Stencil8 &&
-            !GetDevice()->IsToggleEnabled(Toggle::VulkanUseS8)) {
-            return true;
-        }
-        return GetFormat().aspects == (Aspect::Depth | Aspect::Stencil);
-    }
-
-    // The Vulkan spec requires:
-    // "If image has a single-plane color format or is not disjoint, then the aspectMask member of
-    // subresourceRange must be VK_IMAGE_ASPECT_COLOR_BIT.".
-    // For multi-planar formats, we currently only support import them in non-disjoint way.
-    bool Texture::ShouldCombineMultiPlaneBarriers() const {
-        // TODO(chromium:1258986): Figure out how to support disjoint vkImage.
-        ASSERT(!mSupportsDisjointVkImage);
-        return GetFormat().aspects == (Aspect::Plane0 | Aspect::Plane1);
-    }
-
-    Aspect Texture::ComputeAspectsForSubresourceStorage() const {
-        if (ShouldCombineDepthStencilBarriers()) {
-            return Aspect::CombinedDepthStencil;
-        }
-        // Force to use Aspect::Color for Aspect::Plane0/1.
-        if (ShouldCombineMultiPlaneBarriers()) {
-            return Aspect::Color;
-        }
-        return GetFormat().aspects;
-    }
-
-    void Texture::TransitionUsageForPass(CommandRecordingContext* recordingContext,
-                                         const TextureSubresourceUsage& textureUsages,
-                                         std::vector<VkImageMemoryBarrier>* imageBarriers,
-                                         VkPipelineStageFlags* srcStages,
-                                         VkPipelineStageFlags* dstStages) {
-        if (ShouldCombineBarriers()) {
-            Aspect combinedAspect = ComputeAspectsForSubresourceStorage();
-            SubresourceStorage<wgpu::TextureUsage> combinedUsages(combinedAspect, GetArrayLayers(),
-                                                                  GetNumMipLevels());
-            textureUsages.Iterate([&](const SubresourceRange& range, wgpu::TextureUsage usage) {
-                SubresourceRange updateRange = range;
-                updateRange.aspects = combinedAspect;
-
-                combinedUsages.Update(
-                    updateRange, [&](const SubresourceRange&, wgpu::TextureUsage* combinedUsage) {
-                        *combinedUsage |= usage;
-                    });
-            });
-
-            TransitionUsageForPassImpl(recordingContext, combinedUsages, imageBarriers, srcStages,
-                                       dstStages);
-        } else {
-            TransitionUsageForPassImpl(recordingContext, textureUsages, imageBarriers, srcStages,
-                                       dstStages);
+            imageFormatListInfo.viewFormatCount = viewFormats.size();
+            imageFormatListInfo.pViewFormats = viewFormats.data();
         }
     }
 
-    void Texture::TransitionUsageForPassImpl(
-        CommandRecordingContext* recordingContext,
-        const SubresourceStorage<wgpu::TextureUsage>& subresourceUsages,
-        std::vector<VkImageMemoryBarrier>* imageBarriers,
-        VkPipelineStageFlags* srcStages,
-        VkPipelineStageFlags* dstStages) {
-        size_t transitionBarrierStart = imageBarriers->size();
-        const Format& format = GetFormat();
+    ASSERT(IsSampleCountSupported(device, createInfo));
 
-        wgpu::TextureUsage allUsages = wgpu::TextureUsage::None;
-        wgpu::TextureUsage allLastUsages = wgpu::TextureUsage::None;
-
-        mSubresourceLastUsages->Merge(
-            subresourceUsages, [&](const SubresourceRange& range, wgpu::TextureUsage* lastUsage,
-                                   const wgpu::TextureUsage& newUsage) {
-                if (newUsage == wgpu::TextureUsage::None ||
-                    CanReuseWithoutBarrier(*lastUsage, newUsage)) {
-                    return;
-                }
-
-                imageBarriers->push_back(BuildMemoryBarrier(this, *lastUsage, newUsage, range));
-
-                allLastUsages |= *lastUsage;
-                allUsages |= newUsage;
-
-                *lastUsage = newUsage;
-            });
-
-        if (mExternalState != ExternalState::InternalOnly) {
-            TweakTransitionForExternalUsage(recordingContext, imageBarriers,
-                                            transitionBarrierStart);
-        }
-
-        *srcStages |= VulkanPipelineStage(allLastUsages, format);
-        *dstStages |= VulkanPipelineStage(allUsages, format);
+    if (GetArrayLayers() >= 6 && GetWidth() == GetHeight()) {
+        createInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
     }
 
-    void Texture::TransitionUsageNow(CommandRecordingContext* recordingContext,
-                                     wgpu::TextureUsage usage,
-                                     const SubresourceRange& range) {
-        std::vector<VkImageMemoryBarrier> barriers;
+    // We always set VK_IMAGE_USAGE_TRANSFER_DST_BIT unconditionally beause the Vulkan images
+    // that are used in vkCmdClearColorImage() must have been created with this flag, which is
+    // also required for the implementation of robust resource initialization.
+    createInfo.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
 
-        VkPipelineStageFlags srcStages = 0;
-        VkPipelineStageFlags dstStages = 0;
+    DAWN_TRY(CheckVkSuccess(
+        device->fn.CreateImage(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
+        "CreateImage"));
 
-        TransitionUsageAndGetResourceBarrier(usage, range, &barriers, &srcStages, &dstStages);
+    // Create the image memory and associate it with the container
+    VkMemoryRequirements requirements;
+    device->fn.GetImageMemoryRequirements(device->GetVkDevice(), mHandle, &requirements);
 
-        if (mExternalState != ExternalState::InternalOnly) {
-            TweakTransitionForExternalUsage(recordingContext, &barriers, 0);
-        }
+    DAWN_TRY_ASSIGN(mMemoryAllocation, device->GetResourceMemoryAllocator()->Allocate(
+                                           requirements, MemoryKind::Opaque));
 
-        if (!barriers.empty()) {
-            ASSERT(srcStages != 0 && dstStages != 0);
-            ToBackend(GetDevice())
-                ->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
-                                        nullptr, 0, nullptr, barriers.size(), barriers.data());
+    DAWN_TRY(CheckVkSuccess(
+        device->fn.BindImageMemory(device->GetVkDevice(), mHandle,
+                                   ToBackend(mMemoryAllocation.GetResourceHeap())->GetMemory(),
+                                   mMemoryAllocation.GetOffset()),
+        "BindImageMemory"));
+
+    if (device->IsToggleEnabled(Toggle::NonzeroClearResourcesOnCreationForTesting)) {
+        DAWN_TRY(ClearTexture(ToBackend(GetDevice())->GetPendingRecordingContext(),
+                              GetAllSubresources(), TextureBase::ClearValue::NonZero));
+    }
+
+    SetLabelImpl();
+
+    return {};
+}
+
+// Internally managed, but imported from external handle
+MaybeError Texture::InitializeFromExternal(const ExternalImageDescriptorVk* descriptor,
+                                           external_memory::Service* externalMemoryService) {
+    Device* device = ToBackend(GetDevice());
+    VkFormat format = VulkanImageFormat(device, GetFormat().format);
+    VkImageUsageFlags usage = VulkanImageUsage(GetInternalUsage(), GetFormat());
+    DAWN_INVALID_IF(!externalMemoryService->SupportsCreateImage(descriptor, format, usage,
+                                                                &mSupportsDisjointVkImage),
+                    "Creating an image from external memory is not supported.");
+    // mSubresourceLastUsages was initialized with Plane0/Plane1 in the constructor for
+    // multiplanar formats, so we need to correct it to Color here.
+    if (ShouldCombineMultiPlaneBarriers()) {
+        mSubresourceLastUsages = std::make_unique<SubresourceStorage<wgpu::TextureUsage>>(
+            ComputeAspectsForSubresourceStorage(), GetArrayLayers(), GetNumMipLevels(),
+            wgpu::TextureUsage::None);
+    }
+
+    mExternalState = ExternalState::PendingAcquire;
+
+    mPendingAcquireOldLayout = descriptor->releasedOldLayout;
+    mPendingAcquireNewLayout = descriptor->releasedNewLayout;
+
+    VkImageCreateInfo baseCreateInfo = {};
+    FillVulkanCreateInfoSizesAndType(*this, &baseCreateInfo);
+
+    PNextChainBuilder createInfoChain(&baseCreateInfo);
+
+    baseCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
+    baseCreateInfo.format = format;
+    baseCreateInfo.usage = usage;
+    baseCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+    baseCreateInfo.queueFamilyIndexCount = 0;
+    baseCreateInfo.pQueueFamilyIndices = nullptr;
+
+    // We always set VK_IMAGE_USAGE_TRANSFER_DST_BIT unconditionally beause the Vulkan images
+    // that are used in vkCmdClearColorImage() must have been created with this flag, which is
+    // also required for the implementation of robust resource initialization.
+    baseCreateInfo.usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+
+    VkImageFormatListCreateInfo imageFormatListInfo = {};
+    std::vector<VkFormat> viewFormats;
+    if (GetViewFormats().any()) {
+        baseCreateInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
+        if (device->GetDeviceInfo().HasExt(DeviceExt::ImageFormatList)) {
+            createInfoChain.Add(&imageFormatListInfo,
+                                VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO);
+            for (FormatIndex i : IterateBitSet(GetViewFormats())) {
+                const Format& viewFormat = device->GetValidInternalFormat(i);
+                viewFormats.push_back(VulkanImageFormat(device, viewFormat.format));
+            }
+
+            imageFormatListInfo.viewFormatCount = viewFormats.size();
+            imageFormatListInfo.pViewFormats = viewFormats.data();
         }
     }
 
-    void Texture::TransitionUsageAndGetResourceBarrier(
-        wgpu::TextureUsage usage,
-        const SubresourceRange& range,
-        std::vector<VkImageMemoryBarrier>* imageBarriers,
-        VkPipelineStageFlags* srcStages,
-        VkPipelineStageFlags* dstStages) {
-        if (ShouldCombineBarriers()) {
-            SubresourceRange updatedRange = range;
-            updatedRange.aspects = ComputeAspectsForSubresourceStorage();
-            TransitionUsageAndGetResourceBarrierImpl(usage, updatedRange, imageBarriers, srcStages,
-                                                     dstStages);
-        } else {
-            TransitionUsageAndGetResourceBarrierImpl(usage, range, imageBarriers, srcStages,
-                                                     dstStages);
-        }
+    DAWN_TRY_ASSIGN(mHandle, externalMemoryService->CreateImage(descriptor, baseCreateInfo));
+
+    SetLabelHelper("Dawn_ExternalTexture");
+
+    return {};
+}
+
+void Texture::InitializeForSwapChain(VkImage nativeImage) {
+    mHandle = nativeImage;
+    SetLabelHelper("Dawn_SwapChainTexture");
+}
+
+MaybeError Texture::BindExternalMemory(const ExternalImageDescriptorVk* descriptor,
+                                       VkSemaphore signalSemaphore,
+                                       VkDeviceMemory externalMemoryAllocation,
+                                       std::vector<VkSemaphore> waitSemaphores) {
+    Device* device = ToBackend(GetDevice());
+    DAWN_TRY(CheckVkSuccess(
+        device->fn.BindImageMemory(device->GetVkDevice(), mHandle, externalMemoryAllocation, 0),
+        "BindImageMemory (external)"));
+
+    // Don't clear imported texture if already initialized
+    if (descriptor->isInitialized) {
+        SetIsSubresourceContentInitialized(true, GetAllSubresources());
     }
 
-    void Texture::TransitionUsageAndGetResourceBarrierImpl(
-        wgpu::TextureUsage usage,
-        const SubresourceRange& range,
-        std::vector<VkImageMemoryBarrier>* imageBarriers,
-        VkPipelineStageFlags* srcStages,
-        VkPipelineStageFlags* dstStages) {
-        ASSERT(imageBarriers != nullptr);
-        const Format& format = GetFormat();
+    // Success, acquire all the external objects.
+    mExternalAllocation = externalMemoryAllocation;
+    mSignalSemaphore = signalSemaphore;
+    mWaitRequirements = std::move(waitSemaphores);
+    return {};
+}
 
-        wgpu::TextureUsage allLastUsages = wgpu::TextureUsage::None;
-        mSubresourceLastUsages->Update(
-            range, [&](const SubresourceRange& range, wgpu::TextureUsage* lastUsage) {
-                if (CanReuseWithoutBarrier(*lastUsage, usage)) {
-                    return;
-                }
+MaybeError Texture::ExportExternalTexture(VkImageLayout desiredLayout,
+                                          VkSemaphore* signalSemaphore,
+                                          VkImageLayout* releasedOldLayout,
+                                          VkImageLayout* releasedNewLayout) {
+    Device* device = ToBackend(GetDevice());
 
-                imageBarriers->push_back(BuildMemoryBarrier(this, *lastUsage, usage, range));
+    DAWN_INVALID_IF(mExternalState == ExternalState::Released,
+                    "Can't export a signal semaphore from signaled texture %s.", this);
 
-                allLastUsages |= *lastUsage;
-                *lastUsage = usage;
-            });
+    DAWN_INVALID_IF(mExternalAllocation == VK_NULL_HANDLE,
+                    "Can't export a signal semaphore from destroyed or non-external texture %s.",
+                    this);
 
-        *srcStages |= VulkanPipelineStage(allLastUsages, format);
-        *dstStages |= VulkanPipelineStage(usage, format);
+    ASSERT(mSignalSemaphore != VK_NULL_HANDLE);
+
+    // Release the texture
+    mExternalState = ExternalState::Released;
+
+    Aspect aspects = ComputeAspectsForSubresourceStorage();
+    ASSERT(GetNumMipLevels() == 1 && GetArrayLayers() == 1);
+    wgpu::TextureUsage usage = mSubresourceLastUsages->Get(aspects, 0, 0);
+
+    VkImageMemoryBarrier barrier;
+    barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+    barrier.pNext = nullptr;
+    barrier.image = GetHandle();
+    barrier.subresourceRange.aspectMask = VulkanAspectMask(aspects);
+    barrier.subresourceRange.baseMipLevel = 0;
+    barrier.subresourceRange.levelCount = 1;
+    barrier.subresourceRange.baseArrayLayer = 0;
+    barrier.subresourceRange.layerCount = 1;
+
+    barrier.srcAccessMask = VulkanAccessFlags(usage, GetFormat());
+    barrier.dstAccessMask = 0;  // The barrier must be paired with another barrier that will
+                                // specify the dst access mask on the importing queue.
+
+    barrier.oldLayout = VulkanImageLayout(this, usage);
+    if (desiredLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
+        // VK_IMAGE_LAYOUT_UNDEFINED is invalid here. We use it as a
+        // special value to indicate no layout transition should be done.
+        barrier.newLayout = barrier.oldLayout;
+    } else {
+        barrier.newLayout = desiredLayout;
     }
 
-    MaybeError Texture::ClearTexture(CommandRecordingContext* recordingContext,
-                                     const SubresourceRange& range,
-                                     TextureBase::ClearValue clearValue) {
+    barrier.srcQueueFamilyIndex = device->GetGraphicsQueueFamily();
+    barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_EXTERNAL_KHR;
+
+    VkPipelineStageFlags srcStages = VulkanPipelineStage(usage, GetFormat());
+    VkPipelineStageFlags dstStages =
+        VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;  // We don't know when the importing queue will need
+                                            // the texture, so pass
+                                            // VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT to ensure
+                                            // the barrier happens-before any usage in the
+                                            // importing queue.
+
+    CommandRecordingContext* recordingContext = device->GetPendingRecordingContext();
+    device->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
+                                  nullptr, 0, nullptr, 1, &barrier);
+
+    // Queue submit to signal we are done with the texture
+    recordingContext->signalSemaphores.push_back(mSignalSemaphore);
+    DAWN_TRY(device->SubmitPendingCommands());
+
+    // Write out the layouts and signal semaphore
+    *releasedOldLayout = barrier.oldLayout;
+    *releasedNewLayout = barrier.newLayout;
+    *signalSemaphore = mSignalSemaphore;
+
+    mSignalSemaphore = VK_NULL_HANDLE;
+
+    // Destroy the texture so it can't be used again
+    Destroy();
+    return {};
+}
+
+Texture::~Texture() {}
+
+void Texture::SetLabelHelper(const char* prefix) {
+    SetDebugName(ToBackend(GetDevice()), mHandle, prefix, GetLabel());
+}
+
+void Texture::SetLabelImpl() {
+    SetLabelHelper("Dawn_InternalTexture");
+}
+
+void Texture::DestroyImpl() {
+    if (GetTextureState() == TextureState::OwnedInternal) {
         Device* device = ToBackend(GetDevice());
 
-        const bool isZero = clearValue == TextureBase::ClearValue::Zero;
-        uint32_t uClearColor = isZero ? 0 : 1;
-        int32_t sClearColor = isZero ? 0 : 1;
-        float fClearColor = isZero ? 0.f : 1.f;
-
-        TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, range);
-
-        VkImageSubresourceRange imageRange = {};
-        imageRange.levelCount = 1;
-        imageRange.layerCount = 1;
-
-        if (GetFormat().isCompressed) {
-            if (range.aspects == Aspect::None) {
-                return {};
-            }
-            // need to clear the texture with a copy from buffer
-            ASSERT(range.aspects == Aspect::Color);
-            const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(range.aspects).block;
-
-            Extent3D largestMipSize = GetMipLevelPhysicalSize(range.baseMipLevel);
-
-            uint32_t bytesPerRow =
-                Align((largestMipSize.width / blockInfo.width) * blockInfo.byteSize,
-                      device->GetOptimalBytesPerRowAlignment());
-            uint64_t bufferSize = bytesPerRow * (largestMipSize.height / blockInfo.height) *
-                                  largestMipSize.depthOrArrayLayers;
-            DynamicUploader* uploader = device->GetDynamicUploader();
-            UploadHandle uploadHandle;
-            DAWN_TRY_ASSIGN(uploadHandle,
-                            uploader->Allocate(bufferSize, device->GetPendingCommandSerial(),
-                                               blockInfo.byteSize));
-            memset(uploadHandle.mappedBuffer, uClearColor, bufferSize);
-
-            std::vector<VkBufferImageCopy> regions;
-            for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
-                 ++level) {
-                Extent3D copySize = GetMipLevelPhysicalSize(level);
-                imageRange.baseMipLevel = level;
-                for (uint32_t layer = range.baseArrayLayer;
-                     layer < range.baseArrayLayer + range.layerCount; ++layer) {
-                    if (clearValue == TextureBase::ClearValue::Zero &&
-                        IsSubresourceContentInitialized(
-                            SubresourceRange::SingleMipAndLayer(level, layer, range.aspects))) {
-                        // Skip lazy clears if already initialized.
-                        continue;
-                    }
-
-                    TextureDataLayout dataLayout;
-                    dataLayout.offset = uploadHandle.startOffset;
-                    dataLayout.rowsPerImage = copySize.height / blockInfo.height;
-                    dataLayout.bytesPerRow = bytesPerRow;
-                    TextureCopy textureCopy;
-                    textureCopy.aspect = range.aspects;
-                    textureCopy.mipLevel = level;
-                    textureCopy.origin = {0, 0, layer};
-                    textureCopy.texture = this;
-
-                    regions.push_back(
-                        ComputeBufferImageCopyRegion(dataLayout, textureCopy, copySize));
-                }
-            }
-            device->fn.CmdCopyBufferToImage(
-                recordingContext->commandBuffer,
-                ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle(), GetHandle(),
-                VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, regions.size(), regions.data());
-        } else {
-            for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
-                 ++level) {
-                imageRange.baseMipLevel = level;
-                for (uint32_t layer = range.baseArrayLayer;
-                     layer < range.baseArrayLayer + range.layerCount; ++layer) {
-                    Aspect aspects = Aspect::None;
-                    for (Aspect aspect : IterateEnumMask(range.aspects)) {
-                        if (clearValue == TextureBase::ClearValue::Zero &&
-                            IsSubresourceContentInitialized(
-                                SubresourceRange::SingleMipAndLayer(level, layer, aspect))) {
-                            // Skip lazy clears if already initialized.
-                            continue;
-                        }
-                        aspects |= aspect;
-                    }
-
-                    if (aspects == Aspect::None) {
-                        continue;
-                    }
-
-                    imageRange.aspectMask = VulkanAspectMask(aspects);
-                    imageRange.baseArrayLayer = layer;
-
-                    if (aspects &
-                        (Aspect::Depth | Aspect::Stencil | Aspect::CombinedDepthStencil)) {
-                        VkClearDepthStencilValue clearDepthStencilValue[1];
-                        clearDepthStencilValue[0].depth = fClearColor;
-                        clearDepthStencilValue[0].stencil = uClearColor;
-                        device->fn.CmdClearDepthStencilImage(
-                            recordingContext->commandBuffer, GetHandle(),
-                            VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, clearDepthStencilValue, 1,
-                            &imageRange);
-                    } else {
-                        ASSERT(aspects == Aspect::Color);
-                        VkClearColorValue clearColorValue;
-                        switch (GetFormat().GetAspectInfo(Aspect::Color).baseType) {
-                            case wgpu::TextureComponentType::Float:
-                                clearColorValue.float32[0] = fClearColor;
-                                clearColorValue.float32[1] = fClearColor;
-                                clearColorValue.float32[2] = fClearColor;
-                                clearColorValue.float32[3] = fClearColor;
-                                break;
-                            case wgpu::TextureComponentType::Sint:
-                                clearColorValue.int32[0] = sClearColor;
-                                clearColorValue.int32[1] = sClearColor;
-                                clearColorValue.int32[2] = sClearColor;
-                                clearColorValue.int32[3] = sClearColor;
-                                break;
-                            case wgpu::TextureComponentType::Uint:
-                                clearColorValue.uint32[0] = uClearColor;
-                                clearColorValue.uint32[1] = uClearColor;
-                                clearColorValue.uint32[2] = uClearColor;
-                                clearColorValue.uint32[3] = uClearColor;
-                                break;
-                            case wgpu::TextureComponentType::DepthComparison:
-                                UNREACHABLE();
-                        }
-                        device->fn.CmdClearColorImage(recordingContext->commandBuffer, GetHandle(),
-                                                      VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
-                                                      &clearColorValue, 1, &imageRange);
-                    }
-                }
-            }
-        }
-
-        if (clearValue == TextureBase::ClearValue::Zero) {
-            SetIsSubresourceContentInitialized(true, range);
-            device->IncrementLazyClearCountForTesting();
-        }
-        return {};
-    }
-
-    void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext,
-                                                      const SubresourceRange& range) {
-        if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
-            return;
-        }
-        if (!IsSubresourceContentInitialized(range)) {
-            // If subresource has not been initialized, clear it to black as it could contain dirty
-            // bits from recycled memory
-            GetDevice()->ConsumedError(
-                ClearTexture(recordingContext, range, TextureBase::ClearValue::Zero));
-        }
-    }
-
-    VkImageLayout Texture::GetCurrentLayoutForSwapChain() const {
-        ASSERT(GetFormat().aspects == Aspect::Color);
-        return VulkanImageLayout(this, mSubresourceLastUsages->Get(Aspect::Color, 0, 0));
-    }
-
-    // static
-    ResultOrError<Ref<TextureView>> TextureView::Create(TextureBase* texture,
-                                                        const TextureViewDescriptor* descriptor) {
-        Ref<TextureView> view = AcquireRef(new TextureView(texture, descriptor));
-        DAWN_TRY(view->Initialize(descriptor));
-        return view;
-    }
-
-    MaybeError TextureView::Initialize(const TextureViewDescriptor* descriptor) {
-        if ((GetTexture()->GetInternalUsage() &
-             ~(wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst)) == 0) {
-            // If the texture view has no other usage than CopySrc and CopyDst, then it can't
-            // actually be used as a render pass attachment or sampled/storage texture. The Vulkan
-            // validation errors warn if you create such a vkImageView, so return early.
-            return {};
-        }
-
-        // Texture could be destroyed by the time we make a view.
-        if (GetTexture()->GetTextureState() == Texture::TextureState::Destroyed) {
-            return {};
-        }
-
-        Device* device = ToBackend(GetTexture()->GetDevice());
-
-        VkImageViewCreateInfo createInfo;
-        createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
-        createInfo.pNext = nullptr;
-        createInfo.flags = 0;
-        createInfo.image = ToBackend(GetTexture())->GetHandle();
-        createInfo.viewType = VulkanImageViewType(descriptor->dimension);
-
-        const Format& textureFormat = GetTexture()->GetFormat();
-        if (textureFormat.HasStencil() &&
-            (textureFormat.HasDepth() || !device->IsToggleEnabled(Toggle::VulkanUseS8))) {
-            // Unlike multi-planar formats, depth-stencil formats have multiple aspects but are not
-            // created with VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT.
-            // https://www.khronos.org/registry/vulkan/specs/1.3-extensions/man/html/VkImageViewCreateInfo.html#VUID-VkImageViewCreateInfo-image-01762
-            // Without, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, the view format must match the texture
-            // format.
-            createInfo.format = VulkanImageFormat(device, textureFormat.format);
-        } else {
-            createInfo.format = VulkanImageFormat(device, descriptor->format);
-        }
-
-        createInfo.components = VkComponentMapping{VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G,
-                                                   VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A};
-
-        const SubresourceRange& subresources = GetSubresourceRange();
-        createInfo.subresourceRange.baseMipLevel = subresources.baseMipLevel;
-        createInfo.subresourceRange.levelCount = subresources.levelCount;
-        createInfo.subresourceRange.baseArrayLayer = subresources.baseArrayLayer;
-        createInfo.subresourceRange.layerCount = subresources.layerCount;
-        createInfo.subresourceRange.aspectMask = VulkanAspectMask(subresources.aspects);
-
-        DAWN_TRY(CheckVkSuccess(
-            device->fn.CreateImageView(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
-            "CreateImageView"));
-
-        SetLabelImpl();
-
-        return {};
-    }
-
-    TextureView::~TextureView() {
-    }
-
-    void TextureView::DestroyImpl() {
-        Device* device = ToBackend(GetTexture()->GetDevice());
+        // For textures created from a VkImage, the allocation if kInvalid so the Device knows
+        // to skip the deallocation of the (absence of) VkDeviceMemory.
+        device->GetResourceMemoryAllocator()->Deallocate(&mMemoryAllocation);
 
         if (mHandle != VK_NULL_HANDLE) {
             device->GetFencedDeleter()->DeleteWhenUnused(mHandle);
-            mHandle = VK_NULL_HANDLE;
+        }
+
+        if (mExternalAllocation != VK_NULL_HANDLE) {
+            device->GetFencedDeleter()->DeleteWhenUnused(mExternalAllocation);
+        }
+
+        mHandle = VK_NULL_HANDLE;
+        mExternalAllocation = VK_NULL_HANDLE;
+        // If a signal semaphore exists it should be requested before we delete the texture
+        ASSERT(mSignalSemaphore == VK_NULL_HANDLE);
+    }
+    // For Vulkan, we currently run the base destruction code after the internal changes because
+    // of the dependency on the texture state which the base code overwrites too early.
+    TextureBase::DestroyImpl();
+}
+
+VkImage Texture::GetHandle() const {
+    return mHandle;
+}
+
+void Texture::TweakTransitionForExternalUsage(CommandRecordingContext* recordingContext,
+                                              std::vector<VkImageMemoryBarrier>* barriers,
+                                              size_t transitionBarrierStart) {
+    ASSERT(GetNumMipLevels() == 1 && GetArrayLayers() == 1);
+
+    // transitionBarrierStart specify the index where barriers for current transition start in
+    // the vector. barriers->size() - transitionBarrierStart is the number of barriers that we
+    // have already added into the vector during current transition.
+    ASSERT(barriers->size() - transitionBarrierStart <= 1);
+
+    if (mExternalState == ExternalState::PendingAcquire) {
+        if (barriers->size() == transitionBarrierStart) {
+            barriers->push_back(BuildMemoryBarrier(
+                this, wgpu::TextureUsage::None, wgpu::TextureUsage::None,
+                SubresourceRange::SingleMipAndLayer(0, 0, ComputeAspectsForSubresourceStorage())));
+        }
+
+        VkImageMemoryBarrier* barrier = &(*barriers)[transitionBarrierStart];
+        // Transfer texture from external queue to graphics queue
+        barrier->srcQueueFamilyIndex = VK_QUEUE_FAMILY_EXTERNAL_KHR;
+        barrier->dstQueueFamilyIndex = ToBackend(GetDevice())->GetGraphicsQueueFamily();
+
+        // srcAccessMask means nothing when importing. Queue transfers require a barrier on
+        // both the importing and exporting queues. The exporting queue should have specified
+        // this.
+        barrier->srcAccessMask = 0;
+
+        // This should be the first barrier after import.
+        ASSERT(barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED);
+
+        // Save the desired layout. We may need to transition through an intermediate
+        // |mPendingAcquireLayout| first.
+        VkImageLayout desiredLayout = barrier->newLayout;
+
+        bool isInitialized = IsSubresourceContentInitialized(GetAllSubresources());
+
+        // We don't care about the pending old layout if the texture is uninitialized. The
+        // driver is free to discard it. Also it is invalid to transition to layout UNDEFINED or
+        // PREINITIALIZED. If the embedder provided no new layout, or we don't care about the
+        // previous contents, we can skip the layout transition.
+        // https://www.khronos.org/registry/vulkan/specs/1.3-extensions/html/vkspec.html#VUID-VkImageMemoryBarrier-newLayout-01198
+        if (!isInitialized || mPendingAcquireNewLayout == VK_IMAGE_LAYOUT_UNDEFINED ||
+            mPendingAcquireNewLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
+            barrier->oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+            barrier->newLayout = desiredLayout;
+        } else {
+            barrier->oldLayout = mPendingAcquireOldLayout;
+            barrier->newLayout = mPendingAcquireNewLayout;
+        }
+
+        // If these are unequal, we need an another barrier to transition the layout.
+        if (barrier->newLayout != desiredLayout) {
+            VkImageMemoryBarrier layoutBarrier;
+            layoutBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
+            layoutBarrier.pNext = nullptr;
+            layoutBarrier.image = GetHandle();
+            layoutBarrier.subresourceRange = barrier->subresourceRange;
+
+            // Transition from the acquired new layout to the desired layout.
+            layoutBarrier.oldLayout = barrier->newLayout;
+            layoutBarrier.newLayout = desiredLayout;
+
+            // We already transitioned these.
+            layoutBarrier.srcAccessMask = 0;
+            layoutBarrier.dstAccessMask = 0;
+            layoutBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+            layoutBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
+
+            barriers->push_back(layoutBarrier);
+        }
+
+        mExternalState = ExternalState::Acquired;
+    }
+
+    mLastExternalState = mExternalState;
+
+    recordingContext->waitSemaphores.insert(recordingContext->waitSemaphores.end(),
+                                            mWaitRequirements.begin(), mWaitRequirements.end());
+    mWaitRequirements.clear();
+}
+
+bool Texture::CanReuseWithoutBarrier(wgpu::TextureUsage lastUsage, wgpu::TextureUsage usage) {
+    // Reuse the texture directly and avoid encoding barriers when it isn't needed.
+    bool lastReadOnly = IsSubset(lastUsage, kReadOnlyTextureUsages);
+    if (lastReadOnly && lastUsage == usage && mLastExternalState == mExternalState) {
+        return true;
+    }
+    return false;
+}
+
+// Base Vulkan doesn't support transitioning depth and stencil separately. We work around
+// this limitation by combining the usages in the two planes of `textureUsages` into a
+// single plane in a new SubresourceStorage<TextureUsage>. The barriers will be produced
+// for DEPTH | STENCIL since the SubresourceRange uses Aspect::CombinedDepthStencil.
+bool Texture::ShouldCombineDepthStencilBarriers() const {
+    // If the Stencil8 format is being emulated then memory barriers also need to include
+    // the depth aspect. (See: crbug.com/dawn/1331)
+    if (GetFormat().format == wgpu::TextureFormat::Stencil8 &&
+        !GetDevice()->IsToggleEnabled(Toggle::VulkanUseS8)) {
+        return true;
+    }
+    return GetFormat().aspects == (Aspect::Depth | Aspect::Stencil);
+}
+
+// The Vulkan spec requires:
+// "If image has a single-plane color format or is not disjoint, then the aspectMask member of
+// subresourceRange must be VK_IMAGE_ASPECT_COLOR_BIT.".
+// For multi-planar formats, we currently only support import them in non-disjoint way.
+bool Texture::ShouldCombineMultiPlaneBarriers() const {
+    // TODO(chromium:1258986): Figure out how to support disjoint vkImage.
+    ASSERT(!mSupportsDisjointVkImage);
+    return GetFormat().aspects == (Aspect::Plane0 | Aspect::Plane1);
+}
+
+Aspect Texture::ComputeAspectsForSubresourceStorage() const {
+    if (ShouldCombineDepthStencilBarriers()) {
+        return Aspect::CombinedDepthStencil;
+    }
+    // Force to use Aspect::Color for Aspect::Plane0/1.
+    if (ShouldCombineMultiPlaneBarriers()) {
+        return Aspect::Color;
+    }
+    return GetFormat().aspects;
+}
+
+void Texture::TransitionUsageForPass(CommandRecordingContext* recordingContext,
+                                     const TextureSubresourceUsage& textureUsages,
+                                     std::vector<VkImageMemoryBarrier>* imageBarriers,
+                                     VkPipelineStageFlags* srcStages,
+                                     VkPipelineStageFlags* dstStages) {
+    if (ShouldCombineBarriers()) {
+        Aspect combinedAspect = ComputeAspectsForSubresourceStorage();
+        SubresourceStorage<wgpu::TextureUsage> combinedUsages(combinedAspect, GetArrayLayers(),
+                                                              GetNumMipLevels());
+        textureUsages.Iterate([&](const SubresourceRange& range, wgpu::TextureUsage usage) {
+            SubresourceRange updateRange = range;
+            updateRange.aspects = combinedAspect;
+
+            combinedUsages.Update(updateRange,
+                                  [&](const SubresourceRange&, wgpu::TextureUsage* combinedUsage) {
+                                      *combinedUsage |= usage;
+                                  });
+        });
+
+        TransitionUsageForPassImpl(recordingContext, combinedUsages, imageBarriers, srcStages,
+                                   dstStages);
+    } else {
+        TransitionUsageForPassImpl(recordingContext, textureUsages, imageBarriers, srcStages,
+                                   dstStages);
+    }
+}
+
+void Texture::TransitionUsageForPassImpl(
+    CommandRecordingContext* recordingContext,
+    const SubresourceStorage<wgpu::TextureUsage>& subresourceUsages,
+    std::vector<VkImageMemoryBarrier>* imageBarriers,
+    VkPipelineStageFlags* srcStages,
+    VkPipelineStageFlags* dstStages) {
+    size_t transitionBarrierStart = imageBarriers->size();
+    const Format& format = GetFormat();
+
+    wgpu::TextureUsage allUsages = wgpu::TextureUsage::None;
+    wgpu::TextureUsage allLastUsages = wgpu::TextureUsage::None;
+
+    mSubresourceLastUsages->Merge(subresourceUsages, [&](const SubresourceRange& range,
+                                                         wgpu::TextureUsage* lastUsage,
+                                                         const wgpu::TextureUsage& newUsage) {
+        if (newUsage == wgpu::TextureUsage::None || CanReuseWithoutBarrier(*lastUsage, newUsage)) {
+            return;
+        }
+
+        imageBarriers->push_back(BuildMemoryBarrier(this, *lastUsage, newUsage, range));
+
+        allLastUsages |= *lastUsage;
+        allUsages |= newUsage;
+
+        *lastUsage = newUsage;
+    });
+
+    if (mExternalState != ExternalState::InternalOnly) {
+        TweakTransitionForExternalUsage(recordingContext, imageBarriers, transitionBarrierStart);
+    }
+
+    *srcStages |= VulkanPipelineStage(allLastUsages, format);
+    *dstStages |= VulkanPipelineStage(allUsages, format);
+}
+
+void Texture::TransitionUsageNow(CommandRecordingContext* recordingContext,
+                                 wgpu::TextureUsage usage,
+                                 const SubresourceRange& range) {
+    std::vector<VkImageMemoryBarrier> barriers;
+
+    VkPipelineStageFlags srcStages = 0;
+    VkPipelineStageFlags dstStages = 0;
+
+    TransitionUsageAndGetResourceBarrier(usage, range, &barriers, &srcStages, &dstStages);
+
+    if (mExternalState != ExternalState::InternalOnly) {
+        TweakTransitionForExternalUsage(recordingContext, &barriers, 0);
+    }
+
+    if (!barriers.empty()) {
+        ASSERT(srcStages != 0 && dstStages != 0);
+        ToBackend(GetDevice())
+            ->fn.CmdPipelineBarrier(recordingContext->commandBuffer, srcStages, dstStages, 0, 0,
+                                    nullptr, 0, nullptr, barriers.size(), barriers.data());
+    }
+}
+
+void Texture::TransitionUsageAndGetResourceBarrier(wgpu::TextureUsage usage,
+                                                   const SubresourceRange& range,
+                                                   std::vector<VkImageMemoryBarrier>* imageBarriers,
+                                                   VkPipelineStageFlags* srcStages,
+                                                   VkPipelineStageFlags* dstStages) {
+    if (ShouldCombineBarriers()) {
+        SubresourceRange updatedRange = range;
+        updatedRange.aspects = ComputeAspectsForSubresourceStorage();
+        TransitionUsageAndGetResourceBarrierImpl(usage, updatedRange, imageBarriers, srcStages,
+                                                 dstStages);
+    } else {
+        TransitionUsageAndGetResourceBarrierImpl(usage, range, imageBarriers, srcStages, dstStages);
+    }
+}
+
+void Texture::TransitionUsageAndGetResourceBarrierImpl(
+    wgpu::TextureUsage usage,
+    const SubresourceRange& range,
+    std::vector<VkImageMemoryBarrier>* imageBarriers,
+    VkPipelineStageFlags* srcStages,
+    VkPipelineStageFlags* dstStages) {
+    ASSERT(imageBarriers != nullptr);
+    const Format& format = GetFormat();
+
+    wgpu::TextureUsage allLastUsages = wgpu::TextureUsage::None;
+    mSubresourceLastUsages->Update(
+        range, [&](const SubresourceRange& range, wgpu::TextureUsage* lastUsage) {
+            if (CanReuseWithoutBarrier(*lastUsage, usage)) {
+                return;
+            }
+
+            imageBarriers->push_back(BuildMemoryBarrier(this, *lastUsage, usage, range));
+
+            allLastUsages |= *lastUsage;
+            *lastUsage = usage;
+        });
+
+    *srcStages |= VulkanPipelineStage(allLastUsages, format);
+    *dstStages |= VulkanPipelineStage(usage, format);
+}
+
+MaybeError Texture::ClearTexture(CommandRecordingContext* recordingContext,
+                                 const SubresourceRange& range,
+                                 TextureBase::ClearValue clearValue) {
+    Device* device = ToBackend(GetDevice());
+
+    const bool isZero = clearValue == TextureBase::ClearValue::Zero;
+    uint32_t uClearColor = isZero ? 0 : 1;
+    int32_t sClearColor = isZero ? 0 : 1;
+    float fClearColor = isZero ? 0.f : 1.f;
+
+    TransitionUsageNow(recordingContext, wgpu::TextureUsage::CopyDst, range);
+
+    VkImageSubresourceRange imageRange = {};
+    imageRange.levelCount = 1;
+    imageRange.layerCount = 1;
+
+    if (GetFormat().isCompressed) {
+        if (range.aspects == Aspect::None) {
+            return {};
+        }
+        // need to clear the texture with a copy from buffer
+        ASSERT(range.aspects == Aspect::Color);
+        const TexelBlockInfo& blockInfo = GetFormat().GetAspectInfo(range.aspects).block;
+
+        Extent3D largestMipSize = GetMipLevelPhysicalSize(range.baseMipLevel);
+
+        uint32_t bytesPerRow = Align((largestMipSize.width / blockInfo.width) * blockInfo.byteSize,
+                                     device->GetOptimalBytesPerRowAlignment());
+        uint64_t bufferSize = bytesPerRow * (largestMipSize.height / blockInfo.height) *
+                              largestMipSize.depthOrArrayLayers;
+        DynamicUploader* uploader = device->GetDynamicUploader();
+        UploadHandle uploadHandle;
+        DAWN_TRY_ASSIGN(
+            uploadHandle,
+            uploader->Allocate(bufferSize, device->GetPendingCommandSerial(), blockInfo.byteSize));
+        memset(uploadHandle.mappedBuffer, uClearColor, bufferSize);
+
+        std::vector<VkBufferImageCopy> regions;
+        for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+             ++level) {
+            Extent3D copySize = GetMipLevelPhysicalSize(level);
+            imageRange.baseMipLevel = level;
+            for (uint32_t layer = range.baseArrayLayer;
+                 layer < range.baseArrayLayer + range.layerCount; ++layer) {
+                if (clearValue == TextureBase::ClearValue::Zero &&
+                    IsSubresourceContentInitialized(
+                        SubresourceRange::SingleMipAndLayer(level, layer, range.aspects))) {
+                    // Skip lazy clears if already initialized.
+                    continue;
+                }
+
+                TextureDataLayout dataLayout;
+                dataLayout.offset = uploadHandle.startOffset;
+                dataLayout.rowsPerImage = copySize.height / blockInfo.height;
+                dataLayout.bytesPerRow = bytesPerRow;
+                TextureCopy textureCopy;
+                textureCopy.aspect = range.aspects;
+                textureCopy.mipLevel = level;
+                textureCopy.origin = {0, 0, layer};
+                textureCopy.texture = this;
+
+                regions.push_back(ComputeBufferImageCopyRegion(dataLayout, textureCopy, copySize));
+            }
+        }
+        device->fn.CmdCopyBufferToImage(recordingContext->commandBuffer,
+                                        ToBackend(uploadHandle.stagingBuffer)->GetBufferHandle(),
+                                        GetHandle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+                                        regions.size(), regions.data());
+    } else {
+        for (uint32_t level = range.baseMipLevel; level < range.baseMipLevel + range.levelCount;
+             ++level) {
+            imageRange.baseMipLevel = level;
+            for (uint32_t layer = range.baseArrayLayer;
+                 layer < range.baseArrayLayer + range.layerCount; ++layer) {
+                Aspect aspects = Aspect::None;
+                for (Aspect aspect : IterateEnumMask(range.aspects)) {
+                    if (clearValue == TextureBase::ClearValue::Zero &&
+                        IsSubresourceContentInitialized(
+                            SubresourceRange::SingleMipAndLayer(level, layer, aspect))) {
+                        // Skip lazy clears if already initialized.
+                        continue;
+                    }
+                    aspects |= aspect;
+                }
+
+                if (aspects == Aspect::None) {
+                    continue;
+                }
+
+                imageRange.aspectMask = VulkanAspectMask(aspects);
+                imageRange.baseArrayLayer = layer;
+
+                if (aspects & (Aspect::Depth | Aspect::Stencil | Aspect::CombinedDepthStencil)) {
+                    VkClearDepthStencilValue clearDepthStencilValue[1];
+                    clearDepthStencilValue[0].depth = fClearColor;
+                    clearDepthStencilValue[0].stencil = uClearColor;
+                    device->fn.CmdClearDepthStencilImage(recordingContext->commandBuffer,
+                                                         GetHandle(),
+                                                         VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+                                                         clearDepthStencilValue, 1, &imageRange);
+                } else {
+                    ASSERT(aspects == Aspect::Color);
+                    VkClearColorValue clearColorValue;
+                    switch (GetFormat().GetAspectInfo(Aspect::Color).baseType) {
+                        case wgpu::TextureComponentType::Float:
+                            clearColorValue.float32[0] = fClearColor;
+                            clearColorValue.float32[1] = fClearColor;
+                            clearColorValue.float32[2] = fClearColor;
+                            clearColorValue.float32[3] = fClearColor;
+                            break;
+                        case wgpu::TextureComponentType::Sint:
+                            clearColorValue.int32[0] = sClearColor;
+                            clearColorValue.int32[1] = sClearColor;
+                            clearColorValue.int32[2] = sClearColor;
+                            clearColorValue.int32[3] = sClearColor;
+                            break;
+                        case wgpu::TextureComponentType::Uint:
+                            clearColorValue.uint32[0] = uClearColor;
+                            clearColorValue.uint32[1] = uClearColor;
+                            clearColorValue.uint32[2] = uClearColor;
+                            clearColorValue.uint32[3] = uClearColor;
+                            break;
+                        case wgpu::TextureComponentType::DepthComparison:
+                            UNREACHABLE();
+                    }
+                    device->fn.CmdClearColorImage(recordingContext->commandBuffer, GetHandle(),
+                                                  VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+                                                  &clearColorValue, 1, &imageRange);
+                }
+            }
         }
     }
 
-    VkImageView TextureView::GetHandle() const {
-        return mHandle;
+    if (clearValue == TextureBase::ClearValue::Zero) {
+        SetIsSubresourceContentInitialized(true, range);
+        device->IncrementLazyClearCountForTesting();
+    }
+    return {};
+}
+
+void Texture::EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext,
+                                                  const SubresourceRange& range) {
+    if (!GetDevice()->IsToggleEnabled(Toggle::LazyClearResourceOnFirstUse)) {
+        return;
+    }
+    if (!IsSubresourceContentInitialized(range)) {
+        // If subresource has not been initialized, clear it to black as it could contain dirty
+        // bits from recycled memory
+        GetDevice()->ConsumedError(
+            ClearTexture(recordingContext, range, TextureBase::ClearValue::Zero));
+    }
+}
+
+VkImageLayout Texture::GetCurrentLayoutForSwapChain() const {
+    ASSERT(GetFormat().aspects == Aspect::Color);
+    return VulkanImageLayout(this, mSubresourceLastUsages->Get(Aspect::Color, 0, 0));
+}
+
+// static
+ResultOrError<Ref<TextureView>> TextureView::Create(TextureBase* texture,
+                                                    const TextureViewDescriptor* descriptor) {
+    Ref<TextureView> view = AcquireRef(new TextureView(texture, descriptor));
+    DAWN_TRY(view->Initialize(descriptor));
+    return view;
+}
+
+MaybeError TextureView::Initialize(const TextureViewDescriptor* descriptor) {
+    if ((GetTexture()->GetInternalUsage() &
+         ~(wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst)) == 0) {
+        // If the texture view has no other usage than CopySrc and CopyDst, then it can't
+        // actually be used as a render pass attachment or sampled/storage texture. The Vulkan
+        // validation errors warn if you create such a vkImageView, so return early.
+        return {};
     }
 
-    void TextureView::SetLabelImpl() {
-        SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_TextureView", GetLabel());
+    // Texture could be destroyed by the time we make a view.
+    if (GetTexture()->GetTextureState() == Texture::TextureState::Destroyed) {
+        return {};
     }
 
+    Device* device = ToBackend(GetTexture()->GetDevice());
+
+    VkImageViewCreateInfo createInfo;
+    createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
+    createInfo.pNext = nullptr;
+    createInfo.flags = 0;
+    createInfo.image = ToBackend(GetTexture())->GetHandle();
+    createInfo.viewType = VulkanImageViewType(descriptor->dimension);
+
+    const Format& textureFormat = GetTexture()->GetFormat();
+    if (textureFormat.HasStencil() &&
+        (textureFormat.HasDepth() || !device->IsToggleEnabled(Toggle::VulkanUseS8))) {
+        // Unlike multi-planar formats, depth-stencil formats have multiple aspects but are not
+        // created with VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT.
+        // https://www.khronos.org/registry/vulkan/specs/1.3-extensions/man/html/VkImageViewCreateInfo.html#VUID-VkImageViewCreateInfo-image-01762
+        // Without, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, the view format must match the texture
+        // format.
+        createInfo.format = VulkanImageFormat(device, textureFormat.format);
+    } else {
+        createInfo.format = VulkanImageFormat(device, descriptor->format);
+    }
+
+    createInfo.components = VkComponentMapping{VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G,
+                                               VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A};
+
+    const SubresourceRange& subresources = GetSubresourceRange();
+    createInfo.subresourceRange.baseMipLevel = subresources.baseMipLevel;
+    createInfo.subresourceRange.levelCount = subresources.levelCount;
+    createInfo.subresourceRange.baseArrayLayer = subresources.baseArrayLayer;
+    createInfo.subresourceRange.layerCount = subresources.layerCount;
+    createInfo.subresourceRange.aspectMask = VulkanAspectMask(subresources.aspects);
+
+    DAWN_TRY(CheckVkSuccess(
+        device->fn.CreateImageView(device->GetVkDevice(), &createInfo, nullptr, &*mHandle),
+        "CreateImageView"));
+
+    SetLabelImpl();
+
+    return {};
+}
+
+TextureView::~TextureView() {}
+
+void TextureView::DestroyImpl() {
+    Device* device = ToBackend(GetTexture()->GetDevice());
+
+    if (mHandle != VK_NULL_HANDLE) {
+        device->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+        mHandle = VK_NULL_HANDLE;
+    }
+}
+
+VkImageView TextureView::GetHandle() const {
+    return mHandle;
+}
+
+void TextureView::SetLabelImpl() {
+    SetDebugName(ToBackend(GetDevice()), mHandle, "Dawn_TextureView", GetLabel());
+}
+
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/TextureVk.h b/src/dawn/native/vulkan/TextureVk.h
index ae95729..4be6477 100644
--- a/src/dawn/native/vulkan/TextureVk.h
+++ b/src/dawn/native/vulkan/TextureVk.h
@@ -27,172 +27,170 @@
 
 namespace dawn::native::vulkan {
 
-    struct CommandRecordingContext;
-    class Device;
-    class Texture;
+struct CommandRecordingContext;
+class Device;
+class Texture;
 
-    VkFormat VulkanImageFormat(const Device* device, wgpu::TextureFormat format);
-    VkImageUsageFlags VulkanImageUsage(wgpu::TextureUsage usage, const Format& format);
-    VkImageLayout VulkanImageLayout(const Texture* texture, wgpu::TextureUsage usage);
-    VkSampleCountFlagBits VulkanSampleCount(uint32_t sampleCount);
+VkFormat VulkanImageFormat(const Device* device, wgpu::TextureFormat format);
+VkImageUsageFlags VulkanImageUsage(wgpu::TextureUsage usage, const Format& format);
+VkImageLayout VulkanImageLayout(const Texture* texture, wgpu::TextureUsage usage);
+VkSampleCountFlagBits VulkanSampleCount(uint32_t sampleCount);
 
-    MaybeError ValidateVulkanImageCanBeWrapped(const DeviceBase* device,
-                                               const TextureDescriptor* descriptor);
+MaybeError ValidateVulkanImageCanBeWrapped(const DeviceBase* device,
+                                           const TextureDescriptor* descriptor);
 
-    bool IsSampleCountSupported(const dawn::native::vulkan::Device* device,
-                                const VkImageCreateInfo& imageCreateInfo);
+bool IsSampleCountSupported(const dawn::native::vulkan::Device* device,
+                            const VkImageCreateInfo& imageCreateInfo);
 
-    class Texture final : public TextureBase {
-      public:
-        // Used to create a regular texture from a descriptor.
-        static ResultOrError<Ref<Texture>> Create(Device* device,
-                                                  const TextureDescriptor* descriptor,
-                                                  VkImageUsageFlags extraUsages = 0);
+class Texture final : public TextureBase {
+  public:
+    // Used to create a regular texture from a descriptor.
+    static ResultOrError<Ref<Texture>> Create(Device* device,
+                                              const TextureDescriptor* descriptor,
+                                              VkImageUsageFlags extraUsages = 0);
 
-        // Creates a texture and initializes it with a VkImage that references an external memory
-        // object. Before the texture can be used, the VkDeviceMemory associated with the external
-        // image must be bound via Texture::BindExternalMemory.
-        static ResultOrError<Texture*> CreateFromExternal(
-            Device* device,
-            const ExternalImageDescriptorVk* descriptor,
-            const TextureDescriptor* textureDescriptor,
-            external_memory::Service* externalMemoryService);
+    // Creates a texture and initializes it with a VkImage that references an external memory
+    // object. Before the texture can be used, the VkDeviceMemory associated with the external
+    // image must be bound via Texture::BindExternalMemory.
+    static ResultOrError<Texture*> CreateFromExternal(
+        Device* device,
+        const ExternalImageDescriptorVk* descriptor,
+        const TextureDescriptor* textureDescriptor,
+        external_memory::Service* externalMemoryService);
 
-        // Creates a texture that wraps a swapchain-allocated VkImage.
-        static Ref<Texture> CreateForSwapChain(Device* device,
-                                               const TextureDescriptor* descriptor,
-                                               VkImage nativeImage);
+    // Creates a texture that wraps a swapchain-allocated VkImage.
+    static Ref<Texture> CreateForSwapChain(Device* device,
+                                           const TextureDescriptor* descriptor,
+                                           VkImage nativeImage);
 
-        VkImage GetHandle() const;
+    VkImage GetHandle() const;
 
-        // Transitions the texture to be used as `usage`, recording any necessary barrier in
-        // `commands`.
-        // TODO(crbug.com/dawn/851): coalesce barriers and do them early when possible.
-        void TransitionUsageNow(CommandRecordingContext* recordingContext,
-                                wgpu::TextureUsage usage,
-                                const SubresourceRange& range);
-        void TransitionUsageForPass(CommandRecordingContext* recordingContext,
-                                    const TextureSubresourceUsage& textureUsages,
+    // Transitions the texture to be used as `usage`, recording any necessary barrier in
+    // `commands`.
+    // TODO(crbug.com/dawn/851): coalesce barriers and do them early when possible.
+    void TransitionUsageNow(CommandRecordingContext* recordingContext,
+                            wgpu::TextureUsage usage,
+                            const SubresourceRange& range);
+    void TransitionUsageForPass(CommandRecordingContext* recordingContext,
+                                const TextureSubresourceUsage& textureUsages,
+                                std::vector<VkImageMemoryBarrier>* imageBarriers,
+                                VkPipelineStageFlags* srcStages,
+                                VkPipelineStageFlags* dstStages);
+
+    void EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext,
+                                             const SubresourceRange& range);
+
+    VkImageLayout GetCurrentLayoutForSwapChain() const;
+
+    // Binds externally allocated memory to the VkImage and on success, takes ownership of
+    // semaphores.
+    MaybeError BindExternalMemory(const ExternalImageDescriptorVk* descriptor,
+                                  VkSemaphore signalSemaphore,
+                                  VkDeviceMemory externalMemoryAllocation,
+                                  std::vector<VkSemaphore> waitSemaphores);
+
+    MaybeError ExportExternalTexture(VkImageLayout desiredLayout,
+                                     VkSemaphore* signalSemaphore,
+                                     VkImageLayout* releasedOldLayout,
+                                     VkImageLayout* releasedNewLayout);
+
+    void SetLabelHelper(const char* prefix);
+
+    // Dawn API
+    void SetLabelImpl() override;
+
+  private:
+    ~Texture() override;
+    Texture(Device* device, const TextureDescriptor* descriptor, TextureState state);
+
+    MaybeError InitializeAsInternalTexture(VkImageUsageFlags extraUsages);
+    MaybeError InitializeFromExternal(const ExternalImageDescriptorVk* descriptor,
+                                      external_memory::Service* externalMemoryService);
+    void InitializeForSwapChain(VkImage nativeImage);
+
+    void DestroyImpl() override;
+    MaybeError ClearTexture(CommandRecordingContext* recordingContext,
+                            const SubresourceRange& range,
+                            TextureBase::ClearValue);
+
+    // Implementation details of the barrier computations for the texture.
+    void TransitionUsageAndGetResourceBarrier(wgpu::TextureUsage usage,
+                                              const SubresourceRange& range,
+                                              std::vector<VkImageMemoryBarrier>* imageBarriers,
+                                              VkPipelineStageFlags* srcStages,
+                                              VkPipelineStageFlags* dstStages);
+    void TransitionUsageForPassImpl(CommandRecordingContext* recordingContext,
+                                    const SubresourceStorage<wgpu::TextureUsage>& subresourceUsages,
                                     std::vector<VkImageMemoryBarrier>* imageBarriers,
                                     VkPipelineStageFlags* srcStages,
                                     VkPipelineStageFlags* dstStages);
-
-        void EnsureSubresourceContentInitialized(CommandRecordingContext* recordingContext,
-                                                 const SubresourceRange& range);
-
-        VkImageLayout GetCurrentLayoutForSwapChain() const;
-
-        // Binds externally allocated memory to the VkImage and on success, takes ownership of
-        // semaphores.
-        MaybeError BindExternalMemory(const ExternalImageDescriptorVk* descriptor,
-                                      VkSemaphore signalSemaphore,
-                                      VkDeviceMemory externalMemoryAllocation,
-                                      std::vector<VkSemaphore> waitSemaphores);
-
-        MaybeError ExportExternalTexture(VkImageLayout desiredLayout,
-                                         VkSemaphore* signalSemaphore,
-                                         VkImageLayout* releasedOldLayout,
-                                         VkImageLayout* releasedNewLayout);
-
-        void SetLabelHelper(const char* prefix);
-
-        // Dawn API
-        void SetLabelImpl() override;
-
-      private:
-        ~Texture() override;
-        Texture(Device* device, const TextureDescriptor* descriptor, TextureState state);
-
-        MaybeError InitializeAsInternalTexture(VkImageUsageFlags extraUsages);
-        MaybeError InitializeFromExternal(const ExternalImageDescriptorVk* descriptor,
-                                          external_memory::Service* externalMemoryService);
-        void InitializeForSwapChain(VkImage nativeImage);
-
-        void DestroyImpl() override;
-        MaybeError ClearTexture(CommandRecordingContext* recordingContext,
-                                const SubresourceRange& range,
-                                TextureBase::ClearValue);
-
-        // Implementation details of the barrier computations for the texture.
-        void TransitionUsageAndGetResourceBarrier(wgpu::TextureUsage usage,
+    void TransitionUsageAndGetResourceBarrierImpl(wgpu::TextureUsage usage,
                                                   const SubresourceRange& range,
                                                   std::vector<VkImageMemoryBarrier>* imageBarriers,
                                                   VkPipelineStageFlags* srcStages,
                                                   VkPipelineStageFlags* dstStages);
-        void TransitionUsageForPassImpl(
-            CommandRecordingContext* recordingContext,
-            const SubresourceStorage<wgpu::TextureUsage>& subresourceUsages,
-            std::vector<VkImageMemoryBarrier>* imageBarriers,
-            VkPipelineStageFlags* srcStages,
-            VkPipelineStageFlags* dstStages);
-        void TransitionUsageAndGetResourceBarrierImpl(
-            wgpu::TextureUsage usage,
-            const SubresourceRange& range,
-            std::vector<VkImageMemoryBarrier>* imageBarriers,
-            VkPipelineStageFlags* srcStages,
-            VkPipelineStageFlags* dstStages);
-        void TweakTransitionForExternalUsage(CommandRecordingContext* recordingContext,
-                                             std::vector<VkImageMemoryBarrier>* barriers,
-                                             size_t transitionBarrierStart);
-        bool CanReuseWithoutBarrier(wgpu::TextureUsage lastUsage, wgpu::TextureUsage usage);
+    void TweakTransitionForExternalUsage(CommandRecordingContext* recordingContext,
+                                         std::vector<VkImageMemoryBarrier>* barriers,
+                                         size_t transitionBarrierStart);
+    bool CanReuseWithoutBarrier(wgpu::TextureUsage lastUsage, wgpu::TextureUsage usage);
 
-        // In base Vulkan, Depth and stencil can only be transitioned together. This function
-        // indicates whether we should combine depth and stencil barriers to accommodate this
-        // limitation.
-        bool ShouldCombineDepthStencilBarriers() const;
+    // In base Vulkan, Depth and stencil can only be transitioned together. This function
+    // indicates whether we should combine depth and stencil barriers to accommodate this
+    // limitation.
+    bool ShouldCombineDepthStencilBarriers() const;
 
-        // This indicates whether the VK_IMAGE_ASPECT_COLOR_BIT instead of
-        // VK_IMAGE_ASPECT_PLANE_n_BIT must be used.
-        bool ShouldCombineMultiPlaneBarriers() const;
+    // This indicates whether the VK_IMAGE_ASPECT_COLOR_BIT instead of
+    // VK_IMAGE_ASPECT_PLANE_n_BIT must be used.
+    bool ShouldCombineMultiPlaneBarriers() const;
 
-        bool ShouldCombineBarriers() const {
-            return ShouldCombineDepthStencilBarriers() || ShouldCombineMultiPlaneBarriers();
-        }
+    bool ShouldCombineBarriers() const {
+        return ShouldCombineDepthStencilBarriers() || ShouldCombineMultiPlaneBarriers();
+    }
 
-        // Compute the Aspects of the SubresourceStoage for this texture depending on whether we're
-        // doing the workaround for combined depth and stencil barriers, or combining multi-plane
-        // barriers.
-        Aspect ComputeAspectsForSubresourceStorage() const;
+    // Compute the Aspects of the SubresourceStoage for this texture depending on whether we're
+    // doing the workaround for combined depth and stencil barriers, or combining multi-plane
+    // barriers.
+    Aspect ComputeAspectsForSubresourceStorage() const;
 
-        VkImage mHandle = VK_NULL_HANDLE;
-        ResourceMemoryAllocation mMemoryAllocation;
-        VkDeviceMemory mExternalAllocation = VK_NULL_HANDLE;
+    VkImage mHandle = VK_NULL_HANDLE;
+    ResourceMemoryAllocation mMemoryAllocation;
+    VkDeviceMemory mExternalAllocation = VK_NULL_HANDLE;
 
-        enum class ExternalState { InternalOnly, PendingAcquire, Acquired, Released };
-        ExternalState mExternalState = ExternalState::InternalOnly;
-        ExternalState mLastExternalState = ExternalState::InternalOnly;
+    enum class ExternalState { InternalOnly, PendingAcquire, Acquired, Released };
+    ExternalState mExternalState = ExternalState::InternalOnly;
+    ExternalState mLastExternalState = ExternalState::InternalOnly;
 
-        VkImageLayout mPendingAcquireOldLayout;
-        VkImageLayout mPendingAcquireNewLayout;
+    VkImageLayout mPendingAcquireOldLayout;
+    VkImageLayout mPendingAcquireNewLayout;
 
-        VkSemaphore mSignalSemaphore = VK_NULL_HANDLE;
-        std::vector<VkSemaphore> mWaitRequirements;
+    VkSemaphore mSignalSemaphore = VK_NULL_HANDLE;
+    std::vector<VkSemaphore> mWaitRequirements;
 
-        // Note that in early Vulkan versions it is not possible to transition depth and stencil
-        // separately so textures with Depth|Stencil aspects will have a single Depth aspect in the
-        // storage.
-        std::unique_ptr<SubresourceStorage<wgpu::TextureUsage>> mSubresourceLastUsages;
+    // Note that in early Vulkan versions it is not possible to transition depth and stencil
+    // separately so textures with Depth|Stencil aspects will have a single Depth aspect in the
+    // storage.
+    std::unique_ptr<SubresourceStorage<wgpu::TextureUsage>> mSubresourceLastUsages;
 
-        bool mSupportsDisjointVkImage = false;
-    };
+    bool mSupportsDisjointVkImage = false;
+};
 
-    class TextureView final : public TextureViewBase {
-      public:
-        static ResultOrError<Ref<TextureView>> Create(TextureBase* texture,
-                                                      const TextureViewDescriptor* descriptor);
-        VkImageView GetHandle() const;
+class TextureView final : public TextureViewBase {
+  public:
+    static ResultOrError<Ref<TextureView>> Create(TextureBase* texture,
+                                                  const TextureViewDescriptor* descriptor);
+    VkImageView GetHandle() const;
 
-      private:
-        ~TextureView() override;
-        void DestroyImpl() override;
-        using TextureViewBase::TextureViewBase;
-        MaybeError Initialize(const TextureViewDescriptor* descriptor);
+  private:
+    ~TextureView() override;
+    void DestroyImpl() override;
+    using TextureViewBase::TextureViewBase;
+    MaybeError Initialize(const TextureViewDescriptor* descriptor);
 
-        // Dawn API
-        void SetLabelImpl() override;
+    // Dawn API
+    void SetLabelImpl() override;
 
-        VkImageView mHandle = VK_NULL_HANDLE;
-    };
+    VkImageView mHandle = VK_NULL_HANDLE;
+};
 
 }  // namespace dawn::native::vulkan
 
diff --git a/src/dawn/native/vulkan/UtilsVulkan.cpp b/src/dawn/native/vulkan/UtilsVulkan.cpp
index 0d0e86f..a64e659 100644
--- a/src/dawn/native/vulkan/UtilsVulkan.cpp
+++ b/src/dawn/native/vulkan/UtilsVulkan.cpp
@@ -32,261 +32,260 @@
         return objectType;                                \
     }
 
-    VK_OBJECT_TYPE_GETTER(VkBuffer, VK_OBJECT_TYPE_BUFFER)
-    VK_OBJECT_TYPE_GETTER(VkDescriptorSetLayout, VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT)
-    VK_OBJECT_TYPE_GETTER(VkDescriptorSet, VK_OBJECT_TYPE_DESCRIPTOR_SET)
-    VK_OBJECT_TYPE_GETTER(VkPipeline, VK_OBJECT_TYPE_PIPELINE)
-    VK_OBJECT_TYPE_GETTER(VkPipelineLayout, VK_OBJECT_TYPE_PIPELINE_LAYOUT)
-    VK_OBJECT_TYPE_GETTER(VkQueryPool, VK_OBJECT_TYPE_QUERY_POOL)
-    VK_OBJECT_TYPE_GETTER(VkSampler, VK_OBJECT_TYPE_SAMPLER)
-    VK_OBJECT_TYPE_GETTER(VkShaderModule, VK_OBJECT_TYPE_SHADER_MODULE)
-    VK_OBJECT_TYPE_GETTER(VkImage, VK_OBJECT_TYPE_IMAGE)
-    VK_OBJECT_TYPE_GETTER(VkImageView, VK_OBJECT_TYPE_IMAGE_VIEW)
+VK_OBJECT_TYPE_GETTER(VkBuffer, VK_OBJECT_TYPE_BUFFER)
+VK_OBJECT_TYPE_GETTER(VkDescriptorSetLayout, VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT)
+VK_OBJECT_TYPE_GETTER(VkDescriptorSet, VK_OBJECT_TYPE_DESCRIPTOR_SET)
+VK_OBJECT_TYPE_GETTER(VkPipeline, VK_OBJECT_TYPE_PIPELINE)
+VK_OBJECT_TYPE_GETTER(VkPipelineLayout, VK_OBJECT_TYPE_PIPELINE_LAYOUT)
+VK_OBJECT_TYPE_GETTER(VkQueryPool, VK_OBJECT_TYPE_QUERY_POOL)
+VK_OBJECT_TYPE_GETTER(VkSampler, VK_OBJECT_TYPE_SAMPLER)
+VK_OBJECT_TYPE_GETTER(VkShaderModule, VK_OBJECT_TYPE_SHADER_MODULE)
+VK_OBJECT_TYPE_GETTER(VkImage, VK_OBJECT_TYPE_IMAGE)
+VK_OBJECT_TYPE_GETTER(VkImageView, VK_OBJECT_TYPE_IMAGE_VIEW)
 
 #undef VK_OBJECT_TYPE_GETTER
 
-    VkCompareOp ToVulkanCompareOp(wgpu::CompareFunction op) {
-        switch (op) {
-            case wgpu::CompareFunction::Never:
-                return VK_COMPARE_OP_NEVER;
-            case wgpu::CompareFunction::Less:
-                return VK_COMPARE_OP_LESS;
-            case wgpu::CompareFunction::LessEqual:
-                return VK_COMPARE_OP_LESS_OR_EQUAL;
-            case wgpu::CompareFunction::Greater:
-                return VK_COMPARE_OP_GREATER;
-            case wgpu::CompareFunction::GreaterEqual:
-                return VK_COMPARE_OP_GREATER_OR_EQUAL;
-            case wgpu::CompareFunction::Equal:
-                return VK_COMPARE_OP_EQUAL;
-            case wgpu::CompareFunction::NotEqual:
-                return VK_COMPARE_OP_NOT_EQUAL;
-            case wgpu::CompareFunction::Always:
-                return VK_COMPARE_OP_ALWAYS;
+VkCompareOp ToVulkanCompareOp(wgpu::CompareFunction op) {
+    switch (op) {
+        case wgpu::CompareFunction::Never:
+            return VK_COMPARE_OP_NEVER;
+        case wgpu::CompareFunction::Less:
+            return VK_COMPARE_OP_LESS;
+        case wgpu::CompareFunction::LessEqual:
+            return VK_COMPARE_OP_LESS_OR_EQUAL;
+        case wgpu::CompareFunction::Greater:
+            return VK_COMPARE_OP_GREATER;
+        case wgpu::CompareFunction::GreaterEqual:
+            return VK_COMPARE_OP_GREATER_OR_EQUAL;
+        case wgpu::CompareFunction::Equal:
+            return VK_COMPARE_OP_EQUAL;
+        case wgpu::CompareFunction::NotEqual:
+            return VK_COMPARE_OP_NOT_EQUAL;
+        case wgpu::CompareFunction::Always:
+            return VK_COMPARE_OP_ALWAYS;
 
-            case wgpu::CompareFunction::Undefined:
+        case wgpu::CompareFunction::Undefined:
+            break;
+    }
+    UNREACHABLE();
+}
+
+// Convert Dawn texture aspects to  Vulkan texture aspect flags
+VkImageAspectFlags VulkanAspectMask(const Aspect& aspects) {
+    VkImageAspectFlags flags = 0;
+    for (Aspect aspect : IterateEnumMask(aspects)) {
+        switch (aspect) {
+            case Aspect::Color:
+                flags |= VK_IMAGE_ASPECT_COLOR_BIT;
                 break;
-        }
-        UNREACHABLE();
-    }
-
-    // Convert Dawn texture aspects to  Vulkan texture aspect flags
-    VkImageAspectFlags VulkanAspectMask(const Aspect& aspects) {
-        VkImageAspectFlags flags = 0;
-        for (Aspect aspect : IterateEnumMask(aspects)) {
-            switch (aspect) {
-                case Aspect::Color:
-                    flags |= VK_IMAGE_ASPECT_COLOR_BIT;
-                    break;
-                case Aspect::Depth:
-                    flags |= VK_IMAGE_ASPECT_DEPTH_BIT;
-                    break;
-                case Aspect::Stencil:
-                    flags |= VK_IMAGE_ASPECT_STENCIL_BIT;
-                    break;
-
-                case Aspect::CombinedDepthStencil:
-                    flags |= VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
-                    break;
-
-                case Aspect::Plane0:
-                    flags |= VK_IMAGE_ASPECT_PLANE_0_BIT;
-                    break;
-                case Aspect::Plane1:
-                    flags |= VK_IMAGE_ASPECT_PLANE_1_BIT;
-                    break;
-
-                case Aspect::None:
-                    UNREACHABLE();
-            }
-        }
-        return flags;
-    }
-
-    // Vulkan SPEC requires the source/destination region specified by each element of
-    // pRegions must be a region that is contained within srcImage/dstImage. Here the size of
-    // the image refers to the virtual size, while Dawn validates texture copy extent with the
-    // physical size, so we need to re-calculate the texture copy extent to ensure it should fit
-    // in the virtual size of the subresource.
-    Extent3D ComputeTextureCopyExtent(const TextureCopy& textureCopy, const Extent3D& copySize) {
-        Extent3D validTextureCopyExtent = copySize;
-        const TextureBase* texture = textureCopy.texture.Get();
-        Extent3D virtualSizeAtLevel = texture->GetMipLevelVirtualSize(textureCopy.mipLevel);
-        ASSERT(textureCopy.origin.x <= virtualSizeAtLevel.width);
-        ASSERT(textureCopy.origin.y <= virtualSizeAtLevel.height);
-        if (copySize.width > virtualSizeAtLevel.width - textureCopy.origin.x) {
-            ASSERT(texture->GetFormat().isCompressed);
-            validTextureCopyExtent.width = virtualSizeAtLevel.width - textureCopy.origin.x;
-        }
-        if (copySize.height > virtualSizeAtLevel.height - textureCopy.origin.y) {
-            ASSERT(texture->GetFormat().isCompressed);
-            validTextureCopyExtent.height = virtualSizeAtLevel.height - textureCopy.origin.y;
-        }
-
-        return validTextureCopyExtent;
-    }
-
-    VkBufferImageCopy ComputeBufferImageCopyRegion(const BufferCopy& bufferCopy,
-                                                   const TextureCopy& textureCopy,
-                                                   const Extent3D& copySize) {
-        TextureDataLayout passDataLayout;
-        passDataLayout.offset = bufferCopy.offset;
-        passDataLayout.rowsPerImage = bufferCopy.rowsPerImage;
-        passDataLayout.bytesPerRow = bufferCopy.bytesPerRow;
-        return ComputeBufferImageCopyRegion(passDataLayout, textureCopy, copySize);
-    }
-
-    VkBufferImageCopy ComputeBufferImageCopyRegion(const TextureDataLayout& dataLayout,
-                                                   const TextureCopy& textureCopy,
-                                                   const Extent3D& copySize) {
-        const Texture* texture = ToBackend(textureCopy.texture.Get());
-
-        VkBufferImageCopy region;
-
-        region.bufferOffset = dataLayout.offset;
-        // In Vulkan the row length is in texels while it is in bytes for Dawn
-        const TexelBlockInfo& blockInfo =
-            texture->GetFormat().GetAspectInfo(textureCopy.aspect).block;
-        ASSERT(dataLayout.bytesPerRow % blockInfo.byteSize == 0);
-        region.bufferRowLength = dataLayout.bytesPerRow / blockInfo.byteSize * blockInfo.width;
-        region.bufferImageHeight = dataLayout.rowsPerImage * blockInfo.height;
-
-        region.imageSubresource.aspectMask = VulkanAspectMask(textureCopy.aspect);
-        region.imageSubresource.mipLevel = textureCopy.mipLevel;
-
-        switch (textureCopy.texture->GetDimension()) {
-            case wgpu::TextureDimension::e1D:
-                ASSERT(textureCopy.origin.z == 0 && copySize.depthOrArrayLayers == 1);
-                region.imageOffset.x = textureCopy.origin.x;
-                region.imageOffset.y = 0;
-                region.imageOffset.z = 0;
-                region.imageSubresource.baseArrayLayer = 0;
-                region.imageSubresource.layerCount = 1;
-
-                ASSERT(!textureCopy.texture->GetFormat().isCompressed);
-                region.imageExtent.width = copySize.width;
-                region.imageExtent.height = 1;
-                region.imageExtent.depth = 1;
+            case Aspect::Depth:
+                flags |= VK_IMAGE_ASPECT_DEPTH_BIT;
+                break;
+            case Aspect::Stencil:
+                flags |= VK_IMAGE_ASPECT_STENCIL_BIT;
                 break;
 
-            case wgpu::TextureDimension::e2D: {
-                region.imageOffset.x = textureCopy.origin.x;
-                region.imageOffset.y = textureCopy.origin.y;
-                region.imageOffset.z = 0;
-                region.imageSubresource.baseArrayLayer = textureCopy.origin.z;
-                region.imageSubresource.layerCount = copySize.depthOrArrayLayers;
-
-                Extent3D imageExtent = ComputeTextureCopyExtent(textureCopy, copySize);
-                region.imageExtent.width = imageExtent.width;
-                region.imageExtent.height = imageExtent.height;
-                region.imageExtent.depth = 1;
+            case Aspect::CombinedDepthStencil:
+                flags |= VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
                 break;
-            }
 
-            case wgpu::TextureDimension::e3D: {
-                region.imageOffset.x = textureCopy.origin.x;
-                region.imageOffset.y = textureCopy.origin.y;
-                region.imageOffset.z = textureCopy.origin.z;
-                region.imageSubresource.baseArrayLayer = 0;
-                region.imageSubresource.layerCount = 1;
-
-                ASSERT(!textureCopy.texture->GetFormat().isCompressed);
-                region.imageExtent.width = copySize.width;
-                region.imageExtent.height = copySize.height;
-                region.imageExtent.depth = copySize.depthOrArrayLayers;
+            case Aspect::Plane0:
+                flags |= VK_IMAGE_ASPECT_PLANE_0_BIT;
                 break;
-            }
+            case Aspect::Plane1:
+                flags |= VK_IMAGE_ASPECT_PLANE_1_BIT;
+                break;
+
+            case Aspect::None:
+                UNREACHABLE();
         }
+    }
+    return flags;
+}
 
-        return region;
+// Vulkan SPEC requires the source/destination region specified by each element of
+// pRegions must be a region that is contained within srcImage/dstImage. Here the size of
+// the image refers to the virtual size, while Dawn validates texture copy extent with the
+// physical size, so we need to re-calculate the texture copy extent to ensure it should fit
+// in the virtual size of the subresource.
+Extent3D ComputeTextureCopyExtent(const TextureCopy& textureCopy, const Extent3D& copySize) {
+    Extent3D validTextureCopyExtent = copySize;
+    const TextureBase* texture = textureCopy.texture.Get();
+    Extent3D virtualSizeAtLevel = texture->GetMipLevelVirtualSize(textureCopy.mipLevel);
+    ASSERT(textureCopy.origin.x <= virtualSizeAtLevel.width);
+    ASSERT(textureCopy.origin.y <= virtualSizeAtLevel.height);
+    if (copySize.width > virtualSizeAtLevel.width - textureCopy.origin.x) {
+        ASSERT(texture->GetFormat().isCompressed);
+        validTextureCopyExtent.width = virtualSizeAtLevel.width - textureCopy.origin.x;
+    }
+    if (copySize.height > virtualSizeAtLevel.height - textureCopy.origin.y) {
+        ASSERT(texture->GetFormat().isCompressed);
+        validTextureCopyExtent.height = virtualSizeAtLevel.height - textureCopy.origin.y;
     }
 
-    void SetDebugNameInternal(Device* device,
-                              VkObjectType objectType,
-                              uint64_t objectHandle,
-                              const char* prefix,
-                              std::string label) {
-        if (!objectHandle) {
+    return validTextureCopyExtent;
+}
+
+VkBufferImageCopy ComputeBufferImageCopyRegion(const BufferCopy& bufferCopy,
+                                               const TextureCopy& textureCopy,
+                                               const Extent3D& copySize) {
+    TextureDataLayout passDataLayout;
+    passDataLayout.offset = bufferCopy.offset;
+    passDataLayout.rowsPerImage = bufferCopy.rowsPerImage;
+    passDataLayout.bytesPerRow = bufferCopy.bytesPerRow;
+    return ComputeBufferImageCopyRegion(passDataLayout, textureCopy, copySize);
+}
+
+VkBufferImageCopy ComputeBufferImageCopyRegion(const TextureDataLayout& dataLayout,
+                                               const TextureCopy& textureCopy,
+                                               const Extent3D& copySize) {
+    const Texture* texture = ToBackend(textureCopy.texture.Get());
+
+    VkBufferImageCopy region;
+
+    region.bufferOffset = dataLayout.offset;
+    // In Vulkan the row length is in texels while it is in bytes for Dawn
+    const TexelBlockInfo& blockInfo = texture->GetFormat().GetAspectInfo(textureCopy.aspect).block;
+    ASSERT(dataLayout.bytesPerRow % blockInfo.byteSize == 0);
+    region.bufferRowLength = dataLayout.bytesPerRow / blockInfo.byteSize * blockInfo.width;
+    region.bufferImageHeight = dataLayout.rowsPerImage * blockInfo.height;
+
+    region.imageSubresource.aspectMask = VulkanAspectMask(textureCopy.aspect);
+    region.imageSubresource.mipLevel = textureCopy.mipLevel;
+
+    switch (textureCopy.texture->GetDimension()) {
+        case wgpu::TextureDimension::e1D:
+            ASSERT(textureCopy.origin.z == 0 && copySize.depthOrArrayLayers == 1);
+            region.imageOffset.x = textureCopy.origin.x;
+            region.imageOffset.y = 0;
+            region.imageOffset.z = 0;
+            region.imageSubresource.baseArrayLayer = 0;
+            region.imageSubresource.layerCount = 1;
+
+            ASSERT(!textureCopy.texture->GetFormat().isCompressed);
+            region.imageExtent.width = copySize.width;
+            region.imageExtent.height = 1;
+            region.imageExtent.depth = 1;
+            break;
+
+        case wgpu::TextureDimension::e2D: {
+            region.imageOffset.x = textureCopy.origin.x;
+            region.imageOffset.y = textureCopy.origin.y;
+            region.imageOffset.z = 0;
+            region.imageSubresource.baseArrayLayer = textureCopy.origin.z;
+            region.imageSubresource.layerCount = copySize.depthOrArrayLayers;
+
+            Extent3D imageExtent = ComputeTextureCopyExtent(textureCopy, copySize);
+            region.imageExtent.width = imageExtent.width;
+            region.imageExtent.height = imageExtent.height;
+            region.imageExtent.depth = 1;
+            break;
+        }
+
+        case wgpu::TextureDimension::e3D: {
+            region.imageOffset.x = textureCopy.origin.x;
+            region.imageOffset.y = textureCopy.origin.y;
+            region.imageOffset.z = textureCopy.origin.z;
+            region.imageSubresource.baseArrayLayer = 0;
+            region.imageSubresource.layerCount = 1;
+
+            ASSERT(!textureCopy.texture->GetFormat().isCompressed);
+            region.imageExtent.width = copySize.width;
+            region.imageExtent.height = copySize.height;
+            region.imageExtent.depth = copySize.depthOrArrayLayers;
+            break;
+        }
+    }
+
+    return region;
+}
+
+void SetDebugNameInternal(Device* device,
+                          VkObjectType objectType,
+                          uint64_t objectHandle,
+                          const char* prefix,
+                          std::string label) {
+    if (!objectHandle) {
+        return;
+    }
+
+    if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
+        VkDebugUtilsObjectNameInfoEXT objectNameInfo;
+        objectNameInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
+        objectNameInfo.pNext = nullptr;
+        objectNameInfo.objectType = objectType;
+        objectNameInfo.objectHandle = objectHandle;
+
+        if (label.empty() || !device->IsToggleEnabled(Toggle::UseUserDefinedLabelsInBackend)) {
+            objectNameInfo.pObjectName = prefix;
+            device->fn.SetDebugUtilsObjectNameEXT(device->GetVkDevice(), &objectNameInfo);
             return;
         }
 
-        if (device->GetGlobalInfo().HasExt(InstanceExt::DebugUtils)) {
-            VkDebugUtilsObjectNameInfoEXT objectNameInfo;
-            objectNameInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
-            objectNameInfo.pNext = nullptr;
-            objectNameInfo.objectType = objectType;
-            objectNameInfo.objectHandle = objectHandle;
+        std::string objectName = prefix;
+        objectName += "_";
+        objectName += label;
+        objectNameInfo.pObjectName = objectName.c_str();
+        device->fn.SetDebugUtilsObjectNameEXT(device->GetVkDevice(), &objectNameInfo);
+    }
+}
 
-            if (label.empty() || !device->IsToggleEnabled(Toggle::UseUserDefinedLabelsInBackend)) {
-                objectNameInfo.pObjectName = prefix;
-                device->fn.SetDebugUtilsObjectNameEXT(device->GetVkDevice(), &objectNameInfo);
-                return;
-            }
+VkSpecializationInfo* GetVkSpecializationInfo(
+    const ProgrammableStage& programmableStage,
+    VkSpecializationInfo* specializationInfo,
+    std::vector<OverridableConstantScalar>* specializationDataEntries,
+    std::vector<VkSpecializationMapEntry>* specializationMapEntries) {
+    ASSERT(specializationInfo);
+    ASSERT(specializationDataEntries);
+    ASSERT(specializationMapEntries);
 
-            std::string objectName = prefix;
-            objectName += "_";
-            objectName += label;
-            objectNameInfo.pObjectName = objectName.c_str();
-            device->fn.SetDebugUtilsObjectNameEXT(device->GetVkDevice(), &objectNameInfo);
-        }
+    if (programmableStage.constants.size() == 0) {
+        return nullptr;
     }
 
-    VkSpecializationInfo* GetVkSpecializationInfo(
-        const ProgrammableStage& programmableStage,
-        VkSpecializationInfo* specializationInfo,
-        std::vector<OverridableConstantScalar>* specializationDataEntries,
-        std::vector<VkSpecializationMapEntry>* specializationMapEntries) {
-        ASSERT(specializationInfo);
-        ASSERT(specializationDataEntries);
-        ASSERT(specializationMapEntries);
+    const EntryPointMetadata& entryPointMetaData =
+        programmableStage.module->GetEntryPoint(programmableStage.entryPoint);
 
-        if (programmableStage.constants.size() == 0) {
-            return nullptr;
+    for (const auto& pipelineConstant : programmableStage.constants) {
+        const std::string& identifier = pipelineConstant.first;
+        double value = pipelineConstant.second;
+
+        // This is already validated so `identifier` must exist
+        const auto& moduleConstant = entryPointMetaData.overridableConstants.at(identifier);
+
+        specializationMapEntries->push_back(
+            VkSpecializationMapEntry{moduleConstant.id,
+                                     static_cast<uint32_t>(specializationDataEntries->size() *
+                                                           sizeof(OverridableConstantScalar)),
+                                     sizeof(OverridableConstantScalar)});
+
+        OverridableConstantScalar entry{};
+        switch (moduleConstant.type) {
+            case EntryPointMetadata::OverridableConstant::Type::Boolean:
+                entry.b = static_cast<int32_t>(value);
+                break;
+            case EntryPointMetadata::OverridableConstant::Type::Float32:
+                entry.f32 = static_cast<float>(value);
+                break;
+            case EntryPointMetadata::OverridableConstant::Type::Int32:
+                entry.i32 = static_cast<int32_t>(value);
+                break;
+            case EntryPointMetadata::OverridableConstant::Type::Uint32:
+                entry.u32 = static_cast<uint32_t>(value);
+                break;
+            default:
+                UNREACHABLE();
         }
-
-        const EntryPointMetadata& entryPointMetaData =
-            programmableStage.module->GetEntryPoint(programmableStage.entryPoint);
-
-        for (const auto& pipelineConstant : programmableStage.constants) {
-            const std::string& identifier = pipelineConstant.first;
-            double value = pipelineConstant.second;
-
-            // This is already validated so `identifier` must exist
-            const auto& moduleConstant = entryPointMetaData.overridableConstants.at(identifier);
-
-            specializationMapEntries->push_back(
-                VkSpecializationMapEntry{moduleConstant.id,
-                                         static_cast<uint32_t>(specializationDataEntries->size() *
-                                                               sizeof(OverridableConstantScalar)),
-                                         sizeof(OverridableConstantScalar)});
-
-            OverridableConstantScalar entry{};
-            switch (moduleConstant.type) {
-                case EntryPointMetadata::OverridableConstant::Type::Boolean:
-                    entry.b = static_cast<int32_t>(value);
-                    break;
-                case EntryPointMetadata::OverridableConstant::Type::Float32:
-                    entry.f32 = static_cast<float>(value);
-                    break;
-                case EntryPointMetadata::OverridableConstant::Type::Int32:
-                    entry.i32 = static_cast<int32_t>(value);
-                    break;
-                case EntryPointMetadata::OverridableConstant::Type::Uint32:
-                    entry.u32 = static_cast<uint32_t>(value);
-                    break;
-                default:
-                    UNREACHABLE();
-            }
-            specializationDataEntries->push_back(entry);
-        }
-
-        specializationInfo->mapEntryCount = static_cast<uint32_t>(specializationMapEntries->size());
-        specializationInfo->pMapEntries = specializationMapEntries->data();
-        specializationInfo->dataSize =
-            specializationDataEntries->size() * sizeof(OverridableConstantScalar);
-        specializationInfo->pData = specializationDataEntries->data();
-
-        return specializationInfo;
+        specializationDataEntries->push_back(entry);
     }
 
+    specializationInfo->mapEntryCount = static_cast<uint32_t>(specializationMapEntries->size());
+    specializationInfo->pMapEntries = specializationMapEntries->data();
+    specializationInfo->dataSize =
+        specializationDataEntries->size() * sizeof(OverridableConstantScalar);
+    specializationInfo->pData = specializationDataEntries->data();
+
+    return specializationInfo;
+}
+
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/UtilsVulkan.h b/src/dawn/native/vulkan/UtilsVulkan.h
index 3377f756..572b2a7 100644
--- a/src/dawn/native/vulkan/UtilsVulkan.h
+++ b/src/dawn/native/vulkan/UtilsVulkan.h
@@ -23,133 +23,132 @@
 #include "dawn/native/dawn_platform.h"
 
 namespace dawn::native {
-    struct ProgrammableStage;
-    union OverridableConstantScalar;
+struct ProgrammableStage;
+union OverridableConstantScalar;
 }  // namespace dawn::native
 
 namespace dawn::native::vulkan {
 
-    class Device;
+class Device;
 
-    // A Helper type used to build a pNext chain of extension structs.
-    // Usage is:
-    //   1) Create instance, passing the address of the first struct in the chain. This requires
-    //      pNext to be nullptr. If you already have a chain you need to pass a pointer to the tail
-    //      of it.
+// A Helper type used to build a pNext chain of extension structs.
+// Usage is:
+//   1) Create instance, passing the address of the first struct in the chain. This requires
+//      pNext to be nullptr. If you already have a chain you need to pass a pointer to the tail
+//      of it.
+//
+//   2) Call Add(&vk_struct) every time a new struct needs to be appended to the chain.
+//
+//   3) Alternatively, call Add(&vk_struct, VK_STRUCTURE_TYPE_XXX) to initialize the struct
+//      with a given VkStructureType value while appending it to the chain.
+//
+// Examples:
+//     VkPhysicalFeatures2 features2 = {
+//       .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2,
+//       .pNext = nullptr,
+//     };
+//
+//     PNextChainBuilder featuresChain(&features2);
+//
+//     featuresChain.Add(&featuresExtensions.subgroupSizeControl,
+//                       VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT);
+//
+struct PNextChainBuilder {
+    // Constructor takes the address of a Vulkan structure instance, and
+    // walks its pNext chain to record the current location of its tail.
     //
-    //   2) Call Add(&vk_struct) every time a new struct needs to be appended to the chain.
-    //
-    //   3) Alternatively, call Add(&vk_struct, VK_STRUCTURE_TYPE_XXX) to initialize the struct
-    //      with a given VkStructureType value while appending it to the chain.
-    //
-    // Examples:
-    //     VkPhysicalFeatures2 features2 = {
-    //       .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2,
-    //       .pNext = nullptr,
-    //     };
-    //
-    //     PNextChainBuilder featuresChain(&features2);
-    //
-    //     featuresChain.Add(&featuresExtensions.subgroupSizeControl,
-    //                       VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT);
-    //
-    struct PNextChainBuilder {
-        // Constructor takes the address of a Vulkan structure instance, and
-        // walks its pNext chain to record the current location of its tail.
-        //
-        // NOTE: Some VK_STRUCT_TYPEs define their pNext field as a const void*
-        // which is why the VkBaseOutStructure* casts below are necessary.
-        template <typename VK_STRUCT_TYPE>
-        explicit PNextChainBuilder(VK_STRUCT_TYPE* head)
-            : mCurrent(reinterpret_cast<VkBaseOutStructure*>(head)) {
-            while (mCurrent->pNext != nullptr) {
-                mCurrent = mCurrent->pNext;
-            }
-        }
-
-        // Add one item to the chain. |vk_struct| must be a Vulkan structure
-        // that is already initialized.
-        template <typename VK_STRUCT_TYPE>
-        void Add(VK_STRUCT_TYPE* vkStruct) {
-            // Checks to ensure proper type safety.
-            static_assert(
-                offsetof(VK_STRUCT_TYPE, sType) == offsetof(VkBaseOutStructure, sType) &&
-                    offsetof(VK_STRUCT_TYPE, pNext) == offsetof(VkBaseOutStructure, pNext),
-                "Argument type is not a proper Vulkan structure type");
-            vkStruct->pNext = nullptr;
-
-            mCurrent->pNext = reinterpret_cast<VkBaseOutStructure*>(vkStruct);
+    // NOTE: Some VK_STRUCT_TYPEs define their pNext field as a const void*
+    // which is why the VkBaseOutStructure* casts below are necessary.
+    template <typename VK_STRUCT_TYPE>
+    explicit PNextChainBuilder(VK_STRUCT_TYPE* head)
+        : mCurrent(reinterpret_cast<VkBaseOutStructure*>(head)) {
+        while (mCurrent->pNext != nullptr) {
             mCurrent = mCurrent->pNext;
         }
-
-        // A variant of Add() above that also initializes the |sType| field in |vk_struct|.
-        template <typename VK_STRUCT_TYPE>
-        void Add(VK_STRUCT_TYPE* vkStruct, VkStructureType sType) {
-            vkStruct->sType = sType;
-            Add(vkStruct);
-        }
-
-      private:
-        VkBaseOutStructure* mCurrent;
-    };
-
-    VkCompareOp ToVulkanCompareOp(wgpu::CompareFunction op);
-
-    VkImageAspectFlags VulkanAspectMask(const Aspect& aspects);
-
-    Extent3D ComputeTextureCopyExtent(const TextureCopy& textureCopy, const Extent3D& copySize);
-
-    VkBufferImageCopy ComputeBufferImageCopyRegion(const BufferCopy& bufferCopy,
-                                                   const TextureCopy& textureCopy,
-                                                   const Extent3D& copySize);
-    VkBufferImageCopy ComputeBufferImageCopyRegion(const TextureDataLayout& dataLayout,
-                                                   const TextureCopy& textureCopy,
-                                                   const Extent3D& copySize);
-
-    // Gets the associated VkObjectType for any non-dispatchable handle
-    template <class HandleType>
-    VkObjectType GetVkObjectType(HandleType handle);
-
-    void SetDebugNameInternal(Device* device,
-                              VkObjectType objectType,
-                              uint64_t objectHandle,
-                              const char* prefix,
-                              std::string label);
-
-    // The majority of Vulkan handles are "non-dispatchable". Dawn wraps these by overriding
-    // VK_DEFINE_NON_DISPATCHABLE_HANDLE to add some capabilities like making null comparisons
-    // easier. In those cases we can make setting the debug name a bit easier by getting the
-    // object type automatically and handling the indirection to the native handle.
-    template <typename Tag, typename HandleType>
-    void SetDebugName(Device* device,
-                      detail::VkHandle<Tag, HandleType> objectHandle,
-                      const char* prefix,
-                      std::string label = "") {
-        SetDebugNameInternal(device, GetVkObjectType(objectHandle),
-                             reinterpret_cast<uint64_t>(objectHandle.GetHandle()), prefix, label);
     }
 
-    // Handles like VkQueue and VKDevice require a special path because they are dispatchable, so
-    // they require an explicit VkObjectType and cast to a uint64_t directly rather than by getting
-    // the non-dispatchable wrapper's underlying handle.
-    template <typename HandleType>
-    void SetDebugName(Device* device,
-                      VkObjectType objectType,
-                      HandleType objectHandle,
-                      const char* prefix,
-                      std::string label = "") {
-        SetDebugNameInternal(device, objectType, reinterpret_cast<uint64_t>(objectHandle), prefix,
-                             label);
+    // Add one item to the chain. |vk_struct| must be a Vulkan structure
+    // that is already initialized.
+    template <typename VK_STRUCT_TYPE>
+    void Add(VK_STRUCT_TYPE* vkStruct) {
+        // Checks to ensure proper type safety.
+        static_assert(offsetof(VK_STRUCT_TYPE, sType) == offsetof(VkBaseOutStructure, sType) &&
+                          offsetof(VK_STRUCT_TYPE, pNext) == offsetof(VkBaseOutStructure, pNext),
+                      "Argument type is not a proper Vulkan structure type");
+        vkStruct->pNext = nullptr;
+
+        mCurrent->pNext = reinterpret_cast<VkBaseOutStructure*>(vkStruct);
+        mCurrent = mCurrent->pNext;
     }
 
-    // Returns nullptr or &specializationInfo
-    // specializationInfo, specializationDataEntries, specializationMapEntries needs to
-    // be alive at least until VkSpecializationInfo is passed into Vulkan Create*Pipelines
-    VkSpecializationInfo* GetVkSpecializationInfo(
-        const ProgrammableStage& programmableStage,
-        VkSpecializationInfo* specializationInfo,
-        std::vector<OverridableConstantScalar>* specializationDataEntries,
-        std::vector<VkSpecializationMapEntry>* specializationMapEntries);
+    // A variant of Add() above that also initializes the |sType| field in |vk_struct|.
+    template <typename VK_STRUCT_TYPE>
+    void Add(VK_STRUCT_TYPE* vkStruct, VkStructureType sType) {
+        vkStruct->sType = sType;
+        Add(vkStruct);
+    }
+
+  private:
+    VkBaseOutStructure* mCurrent;
+};
+
+VkCompareOp ToVulkanCompareOp(wgpu::CompareFunction op);
+
+VkImageAspectFlags VulkanAspectMask(const Aspect& aspects);
+
+Extent3D ComputeTextureCopyExtent(const TextureCopy& textureCopy, const Extent3D& copySize);
+
+VkBufferImageCopy ComputeBufferImageCopyRegion(const BufferCopy& bufferCopy,
+                                               const TextureCopy& textureCopy,
+                                               const Extent3D& copySize);
+VkBufferImageCopy ComputeBufferImageCopyRegion(const TextureDataLayout& dataLayout,
+                                               const TextureCopy& textureCopy,
+                                               const Extent3D& copySize);
+
+// Gets the associated VkObjectType for any non-dispatchable handle
+template <class HandleType>
+VkObjectType GetVkObjectType(HandleType handle);
+
+void SetDebugNameInternal(Device* device,
+                          VkObjectType objectType,
+                          uint64_t objectHandle,
+                          const char* prefix,
+                          std::string label);
+
+// The majority of Vulkan handles are "non-dispatchable". Dawn wraps these by overriding
+// VK_DEFINE_NON_DISPATCHABLE_HANDLE to add some capabilities like making null comparisons
+// easier. In those cases we can make setting the debug name a bit easier by getting the
+// object type automatically and handling the indirection to the native handle.
+template <typename Tag, typename HandleType>
+void SetDebugName(Device* device,
+                  detail::VkHandle<Tag, HandleType> objectHandle,
+                  const char* prefix,
+                  std::string label = "") {
+    SetDebugNameInternal(device, GetVkObjectType(objectHandle),
+                         reinterpret_cast<uint64_t>(objectHandle.GetHandle()), prefix, label);
+}
+
+// Handles like VkQueue and VKDevice require a special path because they are dispatchable, so
+// they require an explicit VkObjectType and cast to a uint64_t directly rather than by getting
+// the non-dispatchable wrapper's underlying handle.
+template <typename HandleType>
+void SetDebugName(Device* device,
+                  VkObjectType objectType,
+                  HandleType objectHandle,
+                  const char* prefix,
+                  std::string label = "") {
+    SetDebugNameInternal(device, objectType, reinterpret_cast<uint64_t>(objectHandle), prefix,
+                         label);
+}
+
+// Returns nullptr or &specializationInfo
+// specializationInfo, specializationDataEntries, specializationMapEntries needs to
+// be alive at least until VkSpecializationInfo is passed into Vulkan Create*Pipelines
+VkSpecializationInfo* GetVkSpecializationInfo(
+    const ProgrammableStage& programmableStage,
+    VkSpecializationInfo* specializationInfo,
+    std::vector<OverridableConstantScalar>* specializationDataEntries,
+    std::vector<VkSpecializationMapEntry>* specializationMapEntries);
 
 }  // namespace dawn::native::vulkan
 
diff --git a/src/dawn/native/vulkan/VulkanBackend.cpp b/src/dawn/native/vulkan/VulkanBackend.cpp
index e8f630a..1b18e45 100644
--- a/src/dawn/native/vulkan/VulkanBackend.cpp
+++ b/src/dawn/native/vulkan/VulkanBackend.cpp
@@ -28,102 +28,95 @@
 
 namespace dawn::native::vulkan {
 
-    VkInstance GetInstance(WGPUDevice device) {
-        Device* backendDevice = ToBackend(FromAPI(device));
-        return backendDevice->GetVkInstance();
-    }
+VkInstance GetInstance(WGPUDevice device) {
+    Device* backendDevice = ToBackend(FromAPI(device));
+    return backendDevice->GetVkInstance();
+}
 
-    DAWN_NATIVE_EXPORT PFN_vkVoidFunction GetInstanceProcAddr(WGPUDevice device,
-                                                              const char* pName) {
-        Device* backendDevice = ToBackend(FromAPI(device));
-        return (*backendDevice->fn.GetInstanceProcAddr)(backendDevice->GetVkInstance(), pName);
-    }
+DAWN_NATIVE_EXPORT PFN_vkVoidFunction GetInstanceProcAddr(WGPUDevice device, const char* pName) {
+    Device* backendDevice = ToBackend(FromAPI(device));
+    return (*backendDevice->fn.GetInstanceProcAddr)(backendDevice->GetVkInstance(), pName);
+}
 
-    // Explicitly export this function because it uses the "native" type for surfaces while the
-    // header as seen in this file uses the wrapped type.
-    DAWN_NATIVE_EXPORT DawnSwapChainImplementation
-    CreateNativeSwapChainImpl(WGPUDevice device, ::VkSurfaceKHR surfaceNative) {
-        Device* backendDevice = ToBackend(FromAPI(device));
-        VkSurfaceKHR surface = VkSurfaceKHR::CreateFromHandle(surfaceNative);
+// Explicitly export this function because it uses the "native" type for surfaces while the
+// header as seen in this file uses the wrapped type.
+DAWN_NATIVE_EXPORT DawnSwapChainImplementation
+CreateNativeSwapChainImpl(WGPUDevice device, ::VkSurfaceKHR surfaceNative) {
+    Device* backendDevice = ToBackend(FromAPI(device));
+    VkSurfaceKHR surface = VkSurfaceKHR::CreateFromHandle(surfaceNative);
 
-        DawnSwapChainImplementation impl;
-        impl = CreateSwapChainImplementation(new NativeSwapChainImpl(backendDevice, surface));
-        impl.textureUsage = WGPUTextureUsage_Present;
+    DawnSwapChainImplementation impl;
+    impl = CreateSwapChainImplementation(new NativeSwapChainImpl(backendDevice, surface));
+    impl.textureUsage = WGPUTextureUsage_Present;
 
-        return impl;
-    }
+    return impl;
+}
 
-    WGPUTextureFormat GetNativeSwapChainPreferredFormat(
-        const DawnSwapChainImplementation* swapChain) {
-        NativeSwapChainImpl* impl = reinterpret_cast<NativeSwapChainImpl*>(swapChain->userData);
-        return static_cast<WGPUTextureFormat>(impl->GetPreferredFormat());
-    }
+WGPUTextureFormat GetNativeSwapChainPreferredFormat(const DawnSwapChainImplementation* swapChain) {
+    NativeSwapChainImpl* impl = reinterpret_cast<NativeSwapChainImpl*>(swapChain->userData);
+    return static_cast<WGPUTextureFormat>(impl->GetPreferredFormat());
+}
 
-    AdapterDiscoveryOptions::AdapterDiscoveryOptions()
-        : AdapterDiscoveryOptionsBase(WGPUBackendType_Vulkan) {
-    }
+AdapterDiscoveryOptions::AdapterDiscoveryOptions()
+    : AdapterDiscoveryOptionsBase(WGPUBackendType_Vulkan) {}
 
 #if defined(DAWN_PLATFORM_LINUX)
-    ExternalImageDescriptorOpaqueFD::ExternalImageDescriptorOpaqueFD()
-        : ExternalImageDescriptorFD(ExternalImageType::OpaqueFD) {
-    }
+ExternalImageDescriptorOpaqueFD::ExternalImageDescriptorOpaqueFD()
+    : ExternalImageDescriptorFD(ExternalImageType::OpaqueFD) {}
 
-    ExternalImageDescriptorDmaBuf::ExternalImageDescriptorDmaBuf()
-        : ExternalImageDescriptorFD(ExternalImageType::DmaBuf) {
-    }
+ExternalImageDescriptorDmaBuf::ExternalImageDescriptorDmaBuf()
+    : ExternalImageDescriptorFD(ExternalImageType::DmaBuf) {}
 
-    ExternalImageExportInfoOpaqueFD::ExternalImageExportInfoOpaqueFD()
-        : ExternalImageExportInfoFD(ExternalImageType::OpaqueFD) {
-    }
+ExternalImageExportInfoOpaqueFD::ExternalImageExportInfoOpaqueFD()
+    : ExternalImageExportInfoFD(ExternalImageType::OpaqueFD) {}
 
-    ExternalImageExportInfoDmaBuf::ExternalImageExportInfoDmaBuf()
-        : ExternalImageExportInfoFD(ExternalImageType::DmaBuf) {
-    }
+ExternalImageExportInfoDmaBuf::ExternalImageExportInfoDmaBuf()
+    : ExternalImageExportInfoFD(ExternalImageType::DmaBuf) {}
 #endif  // DAWN_PLATFORM_LINUX
 
-    WGPUTexture WrapVulkanImage(WGPUDevice device, const ExternalImageDescriptorVk* descriptor) {
+WGPUTexture WrapVulkanImage(WGPUDevice device, const ExternalImageDescriptorVk* descriptor) {
 #if defined(DAWN_PLATFORM_LINUX)
-        switch (descriptor->GetType()) {
-            case ExternalImageType::OpaqueFD:
-            case ExternalImageType::DmaBuf: {
-                Device* backendDevice = ToBackend(FromAPI(device));
-                const ExternalImageDescriptorFD* fdDescriptor =
-                    static_cast<const ExternalImageDescriptorFD*>(descriptor);
+    switch (descriptor->GetType()) {
+        case ExternalImageType::OpaqueFD:
+        case ExternalImageType::DmaBuf: {
+            Device* backendDevice = ToBackend(FromAPI(device));
+            const ExternalImageDescriptorFD* fdDescriptor =
+                static_cast<const ExternalImageDescriptorFD*>(descriptor);
 
-                return ToAPI(backendDevice->CreateTextureWrappingVulkanImage(
-                    fdDescriptor, fdDescriptor->memoryFD, fdDescriptor->waitFDs));
-            }
-            default:
-                return nullptr;
+            return ToAPI(backendDevice->CreateTextureWrappingVulkanImage(
+                fdDescriptor, fdDescriptor->memoryFD, fdDescriptor->waitFDs));
         }
-#else
-        return nullptr;
-#endif  // DAWN_PLATFORM_LINUX
+        default:
+            return nullptr;
     }
-
-    bool ExportVulkanImage(WGPUTexture texture,
-                           VkImageLayout desiredLayout,
-                           ExternalImageExportInfoVk* info) {
-        if (texture == nullptr) {
-            return false;
-        }
-#if defined(DAWN_PLATFORM_LINUX)
-        switch (info->GetType()) {
-            case ExternalImageType::OpaqueFD:
-            case ExternalImageType::DmaBuf: {
-                Texture* backendTexture = ToBackend(FromAPI(texture));
-                Device* device = ToBackend(backendTexture->GetDevice());
-                ExternalImageExportInfoFD* fdInfo = static_cast<ExternalImageExportInfoFD*>(info);
-
-                return device->SignalAndExportExternalTexture(backendTexture, desiredLayout, fdInfo,
-                                                              &fdInfo->semaphoreHandles);
-            }
-            default:
-                return false;
-        }
 #else
+    return nullptr;
+#endif  // DAWN_PLATFORM_LINUX
+}
+
+bool ExportVulkanImage(WGPUTexture texture,
+                       VkImageLayout desiredLayout,
+                       ExternalImageExportInfoVk* info) {
+    if (texture == nullptr) {
         return false;
-#endif  // DAWN_PLATFORM_LINUX
     }
+#if defined(DAWN_PLATFORM_LINUX)
+    switch (info->GetType()) {
+        case ExternalImageType::OpaqueFD:
+        case ExternalImageType::DmaBuf: {
+            Texture* backendTexture = ToBackend(FromAPI(texture));
+            Device* device = ToBackend(backendTexture->GetDevice());
+            ExternalImageExportInfoFD* fdInfo = static_cast<ExternalImageExportInfoFD*>(info);
+
+            return device->SignalAndExportExternalTexture(backendTexture, desiredLayout, fdInfo,
+                                                          &fdInfo->semaphoreHandles);
+        }
+        default:
+            return false;
+    }
+#else
+    return false;
+#endif  // DAWN_PLATFORM_LINUX
+}
 
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/VulkanError.cpp b/src/dawn/native/vulkan/VulkanError.cpp
index 49416b9..3b7b37c 100644
--- a/src/dawn/native/vulkan/VulkanError.cpp
+++ b/src/dawn/native/vulkan/VulkanError.cpp
@@ -18,92 +18,92 @@
 
 namespace dawn::native::vulkan {
 
-    const char* VkResultAsString(::VkResult result) {
-        // Convert to a int32_t to silence and MSVC warning that the fake errors don't appear in
-        // the original VkResult enum.
-        int32_t code = static_cast<int32_t>(result);
+const char* VkResultAsString(::VkResult result) {
+    // Convert to a int32_t to silence and MSVC warning that the fake errors don't appear in
+    // the original VkResult enum.
+    int32_t code = static_cast<int32_t>(result);
 
-        switch (code) {
-            case VK_SUCCESS:
-                return "VK_SUCCESS";
-            case VK_NOT_READY:
-                return "VK_NOT_READY";
-            case VK_TIMEOUT:
-                return "VK_TIMEOUT";
-            case VK_EVENT_SET:
-                return "VK_EVENT_SET";
-            case VK_EVENT_RESET:
-                return "VK_EVENT_RESET";
-            case VK_INCOMPLETE:
-                return "VK_INCOMPLETE";
-            case VK_ERROR_OUT_OF_HOST_MEMORY:
-                return "VK_ERROR_OUT_OF_HOST_MEMORY";
-            case VK_ERROR_OUT_OF_DEVICE_MEMORY:
-                return "VK_ERROR_OUT_OF_DEVICE_MEMORY";
-            case VK_ERROR_INITIALIZATION_FAILED:
-                return "VK_ERROR_INITIALIZATION_FAILED";
-            case VK_ERROR_DEVICE_LOST:
-                return "VK_ERROR_DEVICE_LOST";
-            case VK_ERROR_MEMORY_MAP_FAILED:
-                return "VK_ERROR_MEMORY_MAP_FAILED";
-            case VK_ERROR_LAYER_NOT_PRESENT:
-                return "VK_ERROR_LAYER_NOT_PRESENT";
-            case VK_ERROR_EXTENSION_NOT_PRESENT:
-                return "VK_ERROR_EXTENSION_NOT_PRESENT";
-            case VK_ERROR_FEATURE_NOT_PRESENT:
-                return "VK_ERROR_FEATURE_NOT_PRESENT";
-            case VK_ERROR_INCOMPATIBLE_DRIVER:
-                return "VK_ERROR_INCOMPATIBLE_DRIVER";
-            case VK_ERROR_TOO_MANY_OBJECTS:
-                return "VK_ERROR_TOO_MANY_OBJECTS";
-            case VK_ERROR_FORMAT_NOT_SUPPORTED:
-                return "VK_ERROR_FORMAT_NOT_SUPPORTED";
-            case VK_ERROR_FRAGMENTED_POOL:
-                return "VK_ERROR_FRAGMENTED_POOL";
+    switch (code) {
+        case VK_SUCCESS:
+            return "VK_SUCCESS";
+        case VK_NOT_READY:
+            return "VK_NOT_READY";
+        case VK_TIMEOUT:
+            return "VK_TIMEOUT";
+        case VK_EVENT_SET:
+            return "VK_EVENT_SET";
+        case VK_EVENT_RESET:
+            return "VK_EVENT_RESET";
+        case VK_INCOMPLETE:
+            return "VK_INCOMPLETE";
+        case VK_ERROR_OUT_OF_HOST_MEMORY:
+            return "VK_ERROR_OUT_OF_HOST_MEMORY";
+        case VK_ERROR_OUT_OF_DEVICE_MEMORY:
+            return "VK_ERROR_OUT_OF_DEVICE_MEMORY";
+        case VK_ERROR_INITIALIZATION_FAILED:
+            return "VK_ERROR_INITIALIZATION_FAILED";
+        case VK_ERROR_DEVICE_LOST:
+            return "VK_ERROR_DEVICE_LOST";
+        case VK_ERROR_MEMORY_MAP_FAILED:
+            return "VK_ERROR_MEMORY_MAP_FAILED";
+        case VK_ERROR_LAYER_NOT_PRESENT:
+            return "VK_ERROR_LAYER_NOT_PRESENT";
+        case VK_ERROR_EXTENSION_NOT_PRESENT:
+            return "VK_ERROR_EXTENSION_NOT_PRESENT";
+        case VK_ERROR_FEATURE_NOT_PRESENT:
+            return "VK_ERROR_FEATURE_NOT_PRESENT";
+        case VK_ERROR_INCOMPATIBLE_DRIVER:
+            return "VK_ERROR_INCOMPATIBLE_DRIVER";
+        case VK_ERROR_TOO_MANY_OBJECTS:
+            return "VK_ERROR_TOO_MANY_OBJECTS";
+        case VK_ERROR_FORMAT_NOT_SUPPORTED:
+            return "VK_ERROR_FORMAT_NOT_SUPPORTED";
+        case VK_ERROR_FRAGMENTED_POOL:
+            return "VK_ERROR_FRAGMENTED_POOL";
 
-            case VK_ERROR_SURFACE_LOST_KHR:
-                return "VK_ERROR_SURFACE_LOST_KHR";
-            case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR:
-                return "VK_ERROR_NATIVE_WINDOW_IN_USE_KHR";
+        case VK_ERROR_SURFACE_LOST_KHR:
+            return "VK_ERROR_SURFACE_LOST_KHR";
+        case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR:
+            return "VK_ERROR_NATIVE_WINDOW_IN_USE_KHR";
 
-            case VK_FAKE_DEVICE_OOM_FOR_TESTING:
-                return "VK_FAKE_DEVICE_OOM_FOR_TESTING";
-            case VK_FAKE_ERROR_FOR_TESTING:
-                return "VK_FAKE_ERROR_FOR_TESTING";
-            default:
-                return "<Unknown VkResult>";
-        }
+        case VK_FAKE_DEVICE_OOM_FOR_TESTING:
+            return "VK_FAKE_DEVICE_OOM_FOR_TESTING";
+        case VK_FAKE_ERROR_FOR_TESTING:
+            return "VK_FAKE_ERROR_FOR_TESTING";
+        default:
+            return "<Unknown VkResult>";
+    }
+}
+
+MaybeError CheckVkSuccessImpl(VkResult result, const char* context) {
+    if (DAWN_LIKELY(result == VK_SUCCESS)) {
+        return {};
     }
 
-    MaybeError CheckVkSuccessImpl(VkResult result, const char* context) {
-        if (DAWN_LIKELY(result == VK_SUCCESS)) {
-            return {};
-        }
+    std::string message = std::string(context) + " failed with " + VkResultAsString(result);
 
-        std::string message = std::string(context) + " failed with " + VkResultAsString(result);
+    if (result == VK_ERROR_DEVICE_LOST) {
+        return DAWN_DEVICE_LOST_ERROR(message);
+    } else {
+        return DAWN_INTERNAL_ERROR(message);
+    }
+}
 
-        if (result == VK_ERROR_DEVICE_LOST) {
-            return DAWN_DEVICE_LOST_ERROR(message);
-        } else {
-            return DAWN_INTERNAL_ERROR(message);
-        }
+MaybeError CheckVkOOMThenSuccessImpl(VkResult result, const char* context) {
+    if (DAWN_LIKELY(result == VK_SUCCESS)) {
+        return {};
     }
 
-    MaybeError CheckVkOOMThenSuccessImpl(VkResult result, const char* context) {
-        if (DAWN_LIKELY(result == VK_SUCCESS)) {
-            return {};
-        }
+    std::string message = std::string(context) + " failed with " + VkResultAsString(result);
 
-        std::string message = std::string(context) + " failed with " + VkResultAsString(result);
-
-        if (result == VK_ERROR_OUT_OF_DEVICE_MEMORY || result == VK_ERROR_OUT_OF_HOST_MEMORY ||
-            result == VK_FAKE_DEVICE_OOM_FOR_TESTING) {
-            return DAWN_OUT_OF_MEMORY_ERROR(message);
-        } else if (result == VK_ERROR_DEVICE_LOST) {
-            return DAWN_DEVICE_LOST_ERROR(message);
-        } else {
-            return DAWN_INTERNAL_ERROR(message);
-        }
+    if (result == VK_ERROR_OUT_OF_DEVICE_MEMORY || result == VK_ERROR_OUT_OF_HOST_MEMORY ||
+        result == VK_FAKE_DEVICE_OOM_FOR_TESTING) {
+        return DAWN_OUT_OF_MEMORY_ERROR(message);
+    } else if (result == VK_ERROR_DEVICE_LOST) {
+        return DAWN_DEVICE_LOST_ERROR(message);
+    } else {
+        return DAWN_INTERNAL_ERROR(message);
     }
+}
 
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/VulkanError.h b/src/dawn/native/vulkan/VulkanError.h
index 90d1242..0651fee 100644
--- a/src/dawn/native/vulkan/VulkanError.h
+++ b/src/dawn/native/vulkan/VulkanError.h
@@ -23,11 +23,11 @@
 
 namespace dawn::native::vulkan {
 
-    // Returns a string version of the result.
-    const char* VkResultAsString(::VkResult result);
+// Returns a string version of the result.
+const char* VkResultAsString(::VkResult result);
 
-    MaybeError CheckVkSuccessImpl(VkResult result, const char* context);
-    MaybeError CheckVkOOMThenSuccessImpl(VkResult result, const char* context);
+MaybeError CheckVkSuccessImpl(VkResult result, const char* context);
+MaybeError CheckVkOOMThenSuccessImpl(VkResult result, const char* context);
 
 // Returns a success only if result if VK_SUCCESS, an error with the context and stringified
 // result value instead. Can be used like this:
diff --git a/src/dawn/native/vulkan/VulkanExtensions.cpp b/src/dawn/native/vulkan/VulkanExtensions.cpp
index 5262313..6f6ab1d 100644
--- a/src/dawn/native/vulkan/VulkanExtensions.cpp
+++ b/src/dawn/native/vulkan/VulkanExtensions.cpp
@@ -22,315 +22,315 @@
 
 namespace dawn::native::vulkan {
 
-    static constexpr uint32_t VulkanVersion_1_1 = VK_MAKE_VERSION(1, 1, 0);
-    static constexpr uint32_t VulkanVersion_1_2 = VK_MAKE_VERSION(1, 2, 0);
-    static constexpr uint32_t VulkanVersion_1_3 = VK_MAKE_VERSION(1, 3, 0);
-    static constexpr uint32_t NeverPromoted = std::numeric_limits<uint32_t>::max();
+static constexpr uint32_t VulkanVersion_1_1 = VK_MAKE_VERSION(1, 1, 0);
+static constexpr uint32_t VulkanVersion_1_2 = VK_MAKE_VERSION(1, 2, 0);
+static constexpr uint32_t VulkanVersion_1_3 = VK_MAKE_VERSION(1, 3, 0);
+static constexpr uint32_t NeverPromoted = std::numeric_limits<uint32_t>::max();
 
-    // A static array for InstanceExtInfo that can be indexed with InstanceExts.
-    // GetInstanceExtInfo checks that "index" matches the index used to access this array so an
-    // assert will fire if it isn't in the correct order.
-    static constexpr size_t kInstanceExtCount = static_cast<size_t>(InstanceExt::EnumCount);
-    static constexpr std::array<InstanceExtInfo, kInstanceExtCount> sInstanceExtInfos{{
-        //
-        {InstanceExt::GetPhysicalDeviceProperties2, "VK_KHR_get_physical_device_properties2",
-         VulkanVersion_1_1},
-        {InstanceExt::ExternalMemoryCapabilities, "VK_KHR_external_memory_capabilities",
-         VulkanVersion_1_1},
-        {InstanceExt::ExternalSemaphoreCapabilities, "VK_KHR_external_semaphore_capabilities",
-         VulkanVersion_1_1},
+// A static array for InstanceExtInfo that can be indexed with InstanceExts.
+// GetInstanceExtInfo checks that "index" matches the index used to access this array so an
+// assert will fire if it isn't in the correct order.
+static constexpr size_t kInstanceExtCount = static_cast<size_t>(InstanceExt::EnumCount);
+static constexpr std::array<InstanceExtInfo, kInstanceExtCount> sInstanceExtInfos{{
+    //
+    {InstanceExt::GetPhysicalDeviceProperties2, "VK_KHR_get_physical_device_properties2",
+     VulkanVersion_1_1},
+    {InstanceExt::ExternalMemoryCapabilities, "VK_KHR_external_memory_capabilities",
+     VulkanVersion_1_1},
+    {InstanceExt::ExternalSemaphoreCapabilities, "VK_KHR_external_semaphore_capabilities",
+     VulkanVersion_1_1},
 
-        {InstanceExt::Surface, "VK_KHR_surface", NeverPromoted},
-        {InstanceExt::FuchsiaImagePipeSurface, "VK_FUCHSIA_imagepipe_surface", NeverPromoted},
-        {InstanceExt::MetalSurface, "VK_EXT_metal_surface", NeverPromoted},
-        {InstanceExt::WaylandSurface, "VK_KHR_wayland_surface", NeverPromoted},
-        {InstanceExt::Win32Surface, "VK_KHR_win32_surface", NeverPromoted},
-        {InstanceExt::XcbSurface, "VK_KHR_xcb_surface", NeverPromoted},
-        {InstanceExt::XlibSurface, "VK_KHR_xlib_surface", NeverPromoted},
-        {InstanceExt::AndroidSurface, "VK_KHR_android_surface", NeverPromoted},
+    {InstanceExt::Surface, "VK_KHR_surface", NeverPromoted},
+    {InstanceExt::FuchsiaImagePipeSurface, "VK_FUCHSIA_imagepipe_surface", NeverPromoted},
+    {InstanceExt::MetalSurface, "VK_EXT_metal_surface", NeverPromoted},
+    {InstanceExt::WaylandSurface, "VK_KHR_wayland_surface", NeverPromoted},
+    {InstanceExt::Win32Surface, "VK_KHR_win32_surface", NeverPromoted},
+    {InstanceExt::XcbSurface, "VK_KHR_xcb_surface", NeverPromoted},
+    {InstanceExt::XlibSurface, "VK_KHR_xlib_surface", NeverPromoted},
+    {InstanceExt::AndroidSurface, "VK_KHR_android_surface", NeverPromoted},
 
-        {InstanceExt::DebugUtils, "VK_EXT_debug_utils", NeverPromoted},
-        {InstanceExt::ValidationFeatures, "VK_EXT_validation_features", NeverPromoted},
-        //
-    }};
+    {InstanceExt::DebugUtils, "VK_EXT_debug_utils", NeverPromoted},
+    {InstanceExt::ValidationFeatures, "VK_EXT_validation_features", NeverPromoted},
+    //
+}};
 
-    const InstanceExtInfo& GetInstanceExtInfo(InstanceExt ext) {
-        uint32_t index = static_cast<uint32_t>(ext);
-        ASSERT(index < sInstanceExtInfos.size());
-        ASSERT(sInstanceExtInfos[index].index == ext);
-        return sInstanceExtInfos[index];
+const InstanceExtInfo& GetInstanceExtInfo(InstanceExt ext) {
+    uint32_t index = static_cast<uint32_t>(ext);
+    ASSERT(index < sInstanceExtInfos.size());
+    ASSERT(sInstanceExtInfos[index].index == ext);
+    return sInstanceExtInfos[index];
+}
+
+std::unordered_map<std::string, InstanceExt> CreateInstanceExtNameMap() {
+    std::unordered_map<std::string, InstanceExt> result;
+    for (const InstanceExtInfo& info : sInstanceExtInfos) {
+        result[info.name] = info.index;
     }
+    return result;
+}
 
-    std::unordered_map<std::string, InstanceExt> CreateInstanceExtNameMap() {
-        std::unordered_map<std::string, InstanceExt> result;
-        for (const InstanceExtInfo& info : sInstanceExtInfos) {
-            result[info.name] = info.index;
-        }
-        return result;
-    }
+InstanceExtSet EnsureDependencies(const InstanceExtSet& advertisedExts) {
+    // We need to check that all transitive dependencies of extensions are advertised.
+    // To do that in a single pass and no data structures, the extensions are topologically
+    // sorted in the definition of InstanceExt.
+    // To ensure the order is correct, we mark visited extensions in `visitedSet` and each
+    // dependency check will first assert all its dependents have been visited.
+    InstanceExtSet visitedSet;
+    InstanceExtSet trimmedSet;
 
-    InstanceExtSet EnsureDependencies(const InstanceExtSet& advertisedExts) {
-        // We need to check that all transitive dependencies of extensions are advertised.
-        // To do that in a single pass and no data structures, the extensions are topologically
-        // sorted in the definition of InstanceExt.
-        // To ensure the order is correct, we mark visited extensions in `visitedSet` and each
-        // dependency check will first assert all its dependents have been visited.
-        InstanceExtSet visitedSet;
-        InstanceExtSet trimmedSet;
+    auto HasDep = [&](InstanceExt ext) -> bool {
+        ASSERT(visitedSet[ext]);
+        return trimmedSet[ext];
+    };
 
-        auto HasDep = [&](InstanceExt ext) -> bool {
-            ASSERT(visitedSet[ext]);
-            return trimmedSet[ext];
-        };
+    for (uint32_t i = 0; i < sInstanceExtInfos.size(); i++) {
+        InstanceExt ext = static_cast<InstanceExt>(i);
 
-        for (uint32_t i = 0; i < sInstanceExtInfos.size(); i++) {
-            InstanceExt ext = static_cast<InstanceExt>(i);
+        bool hasDependencies = false;
+        switch (ext) {
+            case InstanceExt::GetPhysicalDeviceProperties2:
+            case InstanceExt::Surface:
+            case InstanceExt::DebugUtils:
+            case InstanceExt::ValidationFeatures:
+                hasDependencies = true;
+                break;
 
-            bool hasDependencies = false;
-            switch (ext) {
-                case InstanceExt::GetPhysicalDeviceProperties2:
-                case InstanceExt::Surface:
-                case InstanceExt::DebugUtils:
-                case InstanceExt::ValidationFeatures:
-                    hasDependencies = true;
-                    break;
+            case InstanceExt::ExternalMemoryCapabilities:
+            case InstanceExt::ExternalSemaphoreCapabilities:
+                hasDependencies = HasDep(InstanceExt::GetPhysicalDeviceProperties2);
+                break;
 
-                case InstanceExt::ExternalMemoryCapabilities:
-                case InstanceExt::ExternalSemaphoreCapabilities:
-                    hasDependencies = HasDep(InstanceExt::GetPhysicalDeviceProperties2);
-                    break;
+            case InstanceExt::AndroidSurface:
+            case InstanceExt::FuchsiaImagePipeSurface:
+            case InstanceExt::MetalSurface:
+            case InstanceExt::WaylandSurface:
+            case InstanceExt::Win32Surface:
+            case InstanceExt::XcbSurface:
+            case InstanceExt::XlibSurface:
+                hasDependencies = HasDep(InstanceExt::Surface);
+                break;
 
-                case InstanceExt::AndroidSurface:
-                case InstanceExt::FuchsiaImagePipeSurface:
-                case InstanceExt::MetalSurface:
-                case InstanceExt::WaylandSurface:
-                case InstanceExt::Win32Surface:
-                case InstanceExt::XcbSurface:
-                case InstanceExt::XlibSurface:
-                    hasDependencies = HasDep(InstanceExt::Surface);
-                    break;
-
-                case InstanceExt::EnumCount:
-                    UNREACHABLE();
-            }
-
-            trimmedSet.set(ext, hasDependencies && advertisedExts[ext]);
-            visitedSet.set(ext, true);
+            case InstanceExt::EnumCount:
+                UNREACHABLE();
         }
 
-        return trimmedSet;
+        trimmedSet.set(ext, hasDependencies && advertisedExts[ext]);
+        visitedSet.set(ext, true);
     }
 
-    void MarkPromotedExtensions(InstanceExtSet* extensions, uint32_t version) {
-        for (const InstanceExtInfo& info : sInstanceExtInfos) {
-            if (info.versionPromoted <= version) {
-                extensions->set(info.index, true);
-            }
+    return trimmedSet;
+}
+
+void MarkPromotedExtensions(InstanceExtSet* extensions, uint32_t version) {
+    for (const InstanceExtInfo& info : sInstanceExtInfos) {
+        if (info.versionPromoted <= version) {
+            extensions->set(info.index, true);
         }
     }
+}
 
-    static constexpr size_t kDeviceExtCount = static_cast<size_t>(DeviceExt::EnumCount);
-    static constexpr std::array<DeviceExtInfo, kDeviceExtCount> sDeviceExtInfos{{
-        //
-        {DeviceExt::BindMemory2, "VK_KHR_bind_memory2", VulkanVersion_1_1},
-        {DeviceExt::Maintenance1, "VK_KHR_maintenance1", VulkanVersion_1_1},
-        {DeviceExt::StorageBufferStorageClass, "VK_KHR_storage_buffer_storage_class",
-         VulkanVersion_1_1},
-        {DeviceExt::GetPhysicalDeviceProperties2, "VK_KHR_get_physical_device_properties2",
-         VulkanVersion_1_1},
-        {DeviceExt::GetMemoryRequirements2, "VK_KHR_get_memory_requirements2", VulkanVersion_1_1},
-        {DeviceExt::ExternalMemoryCapabilities, "VK_KHR_external_memory_capabilities",
-         VulkanVersion_1_1},
-        {DeviceExt::ExternalSemaphoreCapabilities, "VK_KHR_external_semaphore_capabilities",
-         VulkanVersion_1_1},
-        {DeviceExt::ExternalMemory, "VK_KHR_external_memory", VulkanVersion_1_1},
-        {DeviceExt::ExternalSemaphore, "VK_KHR_external_semaphore", VulkanVersion_1_1},
-        {DeviceExt::_16BitStorage, "VK_KHR_16bit_storage", VulkanVersion_1_1},
-        {DeviceExt::SamplerYCbCrConversion, "VK_KHR_sampler_ycbcr_conversion", VulkanVersion_1_1},
+static constexpr size_t kDeviceExtCount = static_cast<size_t>(DeviceExt::EnumCount);
+static constexpr std::array<DeviceExtInfo, kDeviceExtCount> sDeviceExtInfos{{
+    //
+    {DeviceExt::BindMemory2, "VK_KHR_bind_memory2", VulkanVersion_1_1},
+    {DeviceExt::Maintenance1, "VK_KHR_maintenance1", VulkanVersion_1_1},
+    {DeviceExt::StorageBufferStorageClass, "VK_KHR_storage_buffer_storage_class",
+     VulkanVersion_1_1},
+    {DeviceExt::GetPhysicalDeviceProperties2, "VK_KHR_get_physical_device_properties2",
+     VulkanVersion_1_1},
+    {DeviceExt::GetMemoryRequirements2, "VK_KHR_get_memory_requirements2", VulkanVersion_1_1},
+    {DeviceExt::ExternalMemoryCapabilities, "VK_KHR_external_memory_capabilities",
+     VulkanVersion_1_1},
+    {DeviceExt::ExternalSemaphoreCapabilities, "VK_KHR_external_semaphore_capabilities",
+     VulkanVersion_1_1},
+    {DeviceExt::ExternalMemory, "VK_KHR_external_memory", VulkanVersion_1_1},
+    {DeviceExt::ExternalSemaphore, "VK_KHR_external_semaphore", VulkanVersion_1_1},
+    {DeviceExt::_16BitStorage, "VK_KHR_16bit_storage", VulkanVersion_1_1},
+    {DeviceExt::SamplerYCbCrConversion, "VK_KHR_sampler_ycbcr_conversion", VulkanVersion_1_1},
 
-        {DeviceExt::DriverProperties, "VK_KHR_driver_properties", VulkanVersion_1_2},
-        {DeviceExt::ImageFormatList, "VK_KHR_image_format_list", VulkanVersion_1_2},
-        {DeviceExt::ShaderFloat16Int8, "VK_KHR_shader_float16_int8", VulkanVersion_1_2},
+    {DeviceExt::DriverProperties, "VK_KHR_driver_properties", VulkanVersion_1_2},
+    {DeviceExt::ImageFormatList, "VK_KHR_image_format_list", VulkanVersion_1_2},
+    {DeviceExt::ShaderFloat16Int8, "VK_KHR_shader_float16_int8", VulkanVersion_1_2},
 
-        {DeviceExt::ZeroInitializeWorkgroupMemory, "VK_KHR_zero_initialize_workgroup_memory",
-         VulkanVersion_1_3},
+    {DeviceExt::ZeroInitializeWorkgroupMemory, "VK_KHR_zero_initialize_workgroup_memory",
+     VulkanVersion_1_3},
 
-        {DeviceExt::ExternalMemoryFD, "VK_KHR_external_memory_fd", NeverPromoted},
-        {DeviceExt::ExternalMemoryDmaBuf, "VK_EXT_external_memory_dma_buf", NeverPromoted},
-        {DeviceExt::ExternalMemoryZirconHandle, "VK_FUCHSIA_external_memory", NeverPromoted},
-        {DeviceExt::ExternalSemaphoreFD, "VK_KHR_external_semaphore_fd", NeverPromoted},
-        {DeviceExt::ExternalSemaphoreZirconHandle, "VK_FUCHSIA_external_semaphore", NeverPromoted},
+    {DeviceExt::ExternalMemoryFD, "VK_KHR_external_memory_fd", NeverPromoted},
+    {DeviceExt::ExternalMemoryDmaBuf, "VK_EXT_external_memory_dma_buf", NeverPromoted},
+    {DeviceExt::ExternalMemoryZirconHandle, "VK_FUCHSIA_external_memory", NeverPromoted},
+    {DeviceExt::ExternalSemaphoreFD, "VK_KHR_external_semaphore_fd", NeverPromoted},
+    {DeviceExt::ExternalSemaphoreZirconHandle, "VK_FUCHSIA_external_semaphore", NeverPromoted},
 
-        {DeviceExt::ImageDrmFormatModifier, "VK_EXT_image_drm_format_modifier", NeverPromoted},
-        {DeviceExt::Swapchain, "VK_KHR_swapchain", NeverPromoted},
-        {DeviceExt::SubgroupSizeControl, "VK_EXT_subgroup_size_control", NeverPromoted},
-        //
-    }};
+    {DeviceExt::ImageDrmFormatModifier, "VK_EXT_image_drm_format_modifier", NeverPromoted},
+    {DeviceExt::Swapchain, "VK_KHR_swapchain", NeverPromoted},
+    {DeviceExt::SubgroupSizeControl, "VK_EXT_subgroup_size_control", NeverPromoted},
+    //
+}};
 
-    const DeviceExtInfo& GetDeviceExtInfo(DeviceExt ext) {
-        uint32_t index = static_cast<uint32_t>(ext);
-        ASSERT(index < sDeviceExtInfos.size());
-        ASSERT(sDeviceExtInfos[index].index == ext);
-        return sDeviceExtInfos[index];
+const DeviceExtInfo& GetDeviceExtInfo(DeviceExt ext) {
+    uint32_t index = static_cast<uint32_t>(ext);
+    ASSERT(index < sDeviceExtInfos.size());
+    ASSERT(sDeviceExtInfos[index].index == ext);
+    return sDeviceExtInfos[index];
+}
+
+std::unordered_map<std::string, DeviceExt> CreateDeviceExtNameMap() {
+    std::unordered_map<std::string, DeviceExt> result;
+    for (const DeviceExtInfo& info : sDeviceExtInfos) {
+        result[info.name] = info.index;
     }
+    return result;
+}
 
-    std::unordered_map<std::string, DeviceExt> CreateDeviceExtNameMap() {
-        std::unordered_map<std::string, DeviceExt> result;
-        for (const DeviceExtInfo& info : sDeviceExtInfos) {
-            result[info.name] = info.index;
-        }
-        return result;
-    }
+DeviceExtSet EnsureDependencies(const DeviceExtSet& advertisedExts,
+                                const InstanceExtSet& instanceExts,
+                                uint32_t icdVersion) {
+    // This is very similar to EnsureDependencies for instanceExtSet. See comment there for
+    // an explanation of what happens.
+    DeviceExtSet visitedSet;
+    DeviceExtSet trimmedSet;
 
-    DeviceExtSet EnsureDependencies(const DeviceExtSet& advertisedExts,
-                                    const InstanceExtSet& instanceExts,
-                                    uint32_t icdVersion) {
-        // This is very similar to EnsureDependencies for instanceExtSet. See comment there for
-        // an explanation of what happens.
-        DeviceExtSet visitedSet;
-        DeviceExtSet trimmedSet;
+    auto HasDep = [&](DeviceExt ext) -> bool {
+        ASSERT(visitedSet[ext]);
+        return trimmedSet[ext];
+    };
 
-        auto HasDep = [&](DeviceExt ext) -> bool {
-            ASSERT(visitedSet[ext]);
-            return trimmedSet[ext];
-        };
+    for (uint32_t i = 0; i < sDeviceExtInfos.size(); i++) {
+        DeviceExt ext = static_cast<DeviceExt>(i);
 
-        for (uint32_t i = 0; i < sDeviceExtInfos.size(); i++) {
-            DeviceExt ext = static_cast<DeviceExt>(i);
+        bool hasDependencies = false;
+        switch (ext) {
+            // Happy extensions don't need anybody else!
+            case DeviceExt::BindMemory2:
+            case DeviceExt::GetMemoryRequirements2:
+            case DeviceExt::Maintenance1:
+            case DeviceExt::ImageFormatList:
+            case DeviceExt::StorageBufferStorageClass:
+                hasDependencies = true;
+                break;
 
-            bool hasDependencies = false;
-            switch (ext) {
-                // Happy extensions don't need anybody else!
-                case DeviceExt::BindMemory2:
-                case DeviceExt::GetMemoryRequirements2:
-                case DeviceExt::Maintenance1:
-                case DeviceExt::ImageFormatList:
-                case DeviceExt::StorageBufferStorageClass:
-                    hasDependencies = true;
-                    break;
+            // Physical device extensions technically don't require the instance to support
+            // them but VulkanFunctions only loads the function pointers if the instance
+            // advertises the extension. So if we didn't have this check, we'd risk a calling
+            // a nullptr.
+            case DeviceExt::GetPhysicalDeviceProperties2:
+                hasDependencies = instanceExts[InstanceExt::GetPhysicalDeviceProperties2];
+                break;
+            case DeviceExt::ExternalMemoryCapabilities:
+                hasDependencies = instanceExts[InstanceExt::ExternalMemoryCapabilities] &&
+                                  HasDep(DeviceExt::GetPhysicalDeviceProperties2);
+                break;
+            case DeviceExt::ExternalSemaphoreCapabilities:
+                hasDependencies = instanceExts[InstanceExt::ExternalSemaphoreCapabilities] &&
+                                  HasDep(DeviceExt::GetPhysicalDeviceProperties2);
+                break;
 
-                // Physical device extensions technically don't require the instance to support
-                // them but VulkanFunctions only loads the function pointers if the instance
-                // advertises the extension. So if we didn't have this check, we'd risk a calling
-                // a nullptr.
-                case DeviceExt::GetPhysicalDeviceProperties2:
-                    hasDependencies = instanceExts[InstanceExt::GetPhysicalDeviceProperties2];
-                    break;
-                case DeviceExt::ExternalMemoryCapabilities:
-                    hasDependencies = instanceExts[InstanceExt::ExternalMemoryCapabilities] &&
-                                      HasDep(DeviceExt::GetPhysicalDeviceProperties2);
-                    break;
-                case DeviceExt::ExternalSemaphoreCapabilities:
-                    hasDependencies = instanceExts[InstanceExt::ExternalSemaphoreCapabilities] &&
-                                      HasDep(DeviceExt::GetPhysicalDeviceProperties2);
-                    break;
+            case DeviceExt::ImageDrmFormatModifier:
+                hasDependencies = HasDep(DeviceExt::BindMemory2) &&
+                                  HasDep(DeviceExt::GetPhysicalDeviceProperties2) &&
+                                  HasDep(DeviceExt::ImageFormatList) &&
+                                  HasDep(DeviceExt::SamplerYCbCrConversion);
+                break;
 
-                case DeviceExt::ImageDrmFormatModifier:
-                    hasDependencies = HasDep(DeviceExt::BindMemory2) &&
-                                      HasDep(DeviceExt::GetPhysicalDeviceProperties2) &&
-                                      HasDep(DeviceExt::ImageFormatList) &&
-                                      HasDep(DeviceExt::SamplerYCbCrConversion);
-                    break;
+            case DeviceExt::Swapchain:
+                hasDependencies = instanceExts[InstanceExt::Surface];
+                break;
 
-                case DeviceExt::Swapchain:
-                    hasDependencies = instanceExts[InstanceExt::Surface];
-                    break;
+            case DeviceExt::SamplerYCbCrConversion:
+                hasDependencies = HasDep(DeviceExt::Maintenance1) &&
+                                  HasDep(DeviceExt::BindMemory2) &&
+                                  HasDep(DeviceExt::GetMemoryRequirements2) &&
+                                  HasDep(DeviceExt::GetPhysicalDeviceProperties2);
+                break;
 
-                case DeviceExt::SamplerYCbCrConversion:
-                    hasDependencies = HasDep(DeviceExt::Maintenance1) &&
-                                      HasDep(DeviceExt::BindMemory2) &&
-                                      HasDep(DeviceExt::GetMemoryRequirements2) &&
-                                      HasDep(DeviceExt::GetPhysicalDeviceProperties2);
-                    break;
+            case DeviceExt::DriverProperties:
+            case DeviceExt::ShaderFloat16Int8:
+                hasDependencies = HasDep(DeviceExt::GetPhysicalDeviceProperties2);
+                break;
 
-                case DeviceExt::DriverProperties:
-                case DeviceExt::ShaderFloat16Int8:
-                    hasDependencies = HasDep(DeviceExt::GetPhysicalDeviceProperties2);
-                    break;
+            case DeviceExt::ExternalMemory:
+                hasDependencies = HasDep(DeviceExt::ExternalMemoryCapabilities);
+                break;
 
-                case DeviceExt::ExternalMemory:
-                    hasDependencies = HasDep(DeviceExt::ExternalMemoryCapabilities);
-                    break;
+            case DeviceExt::ExternalSemaphore:
+                hasDependencies = HasDep(DeviceExt::ExternalSemaphoreCapabilities);
+                break;
 
-                case DeviceExt::ExternalSemaphore:
-                    hasDependencies = HasDep(DeviceExt::ExternalSemaphoreCapabilities);
-                    break;
+            case DeviceExt::ExternalMemoryFD:
+            case DeviceExt::ExternalMemoryZirconHandle:
+                hasDependencies = HasDep(DeviceExt::ExternalMemory);
+                break;
 
-                case DeviceExt::ExternalMemoryFD:
-                case DeviceExt::ExternalMemoryZirconHandle:
-                    hasDependencies = HasDep(DeviceExt::ExternalMemory);
-                    break;
+            case DeviceExt::ExternalMemoryDmaBuf:
+                hasDependencies = HasDep(DeviceExt::ExternalMemoryFD);
+                break;
 
-                case DeviceExt::ExternalMemoryDmaBuf:
-                    hasDependencies = HasDep(DeviceExt::ExternalMemoryFD);
-                    break;
+            case DeviceExt::ExternalSemaphoreFD:
+            case DeviceExt::ExternalSemaphoreZirconHandle:
+                hasDependencies = HasDep(DeviceExt::ExternalSemaphore);
+                break;
 
-                case DeviceExt::ExternalSemaphoreFD:
-                case DeviceExt::ExternalSemaphoreZirconHandle:
-                    hasDependencies = HasDep(DeviceExt::ExternalSemaphore);
-                    break;
+            case DeviceExt::_16BitStorage:
+                hasDependencies = HasDep(DeviceExt::GetPhysicalDeviceProperties2) &&
+                                  HasDep(DeviceExt::StorageBufferStorageClass);
+                break;
 
-                case DeviceExt::_16BitStorage:
-                    hasDependencies = HasDep(DeviceExt::GetPhysicalDeviceProperties2) &&
-                                      HasDep(DeviceExt::StorageBufferStorageClass);
-                    break;
+            case DeviceExt::SubgroupSizeControl:
+                // Using the extension requires DeviceExt::GetPhysicalDeviceProperties2, but we
+                // don't need to check for it as it also requires Vulkan 1.1 in which
+                // VK_KHR_get_physical_device_properties2 was promoted.
+                hasDependencies = icdVersion >= VulkanVersion_1_1;
+                break;
 
-                case DeviceExt::SubgroupSizeControl:
-                    // Using the extension requires DeviceExt::GetPhysicalDeviceProperties2, but we
-                    // don't need to check for it as it also requires Vulkan 1.1 in which
-                    // VK_KHR_get_physical_device_properties2 was promoted.
-                    hasDependencies = icdVersion >= VulkanVersion_1_1;
-                    break;
+            case DeviceExt::ZeroInitializeWorkgroupMemory:
+                hasDependencies = HasDep(DeviceExt::GetPhysicalDeviceProperties2);
+                break;
 
-                case DeviceExt::ZeroInitializeWorkgroupMemory:
-                    hasDependencies = HasDep(DeviceExt::GetPhysicalDeviceProperties2);
-                    break;
-
-                case DeviceExt::EnumCount:
-                    UNREACHABLE();
-            }
-
-            trimmedSet.set(ext, hasDependencies && advertisedExts[ext]);
-            visitedSet.set(ext, true);
+            case DeviceExt::EnumCount:
+                UNREACHABLE();
         }
 
-        return trimmedSet;
+        trimmedSet.set(ext, hasDependencies && advertisedExts[ext]);
+        visitedSet.set(ext, true);
     }
 
-    void MarkPromotedExtensions(DeviceExtSet* extensions, uint32_t version) {
-        for (const DeviceExtInfo& info : sDeviceExtInfos) {
-            if (info.versionPromoted <= version) {
-                extensions->set(info.index, true);
-            }
+    return trimmedSet;
+}
+
+void MarkPromotedExtensions(DeviceExtSet* extensions, uint32_t version) {
+    for (const DeviceExtInfo& info : sDeviceExtInfos) {
+        if (info.versionPromoted <= version) {
+            extensions->set(info.index, true);
         }
     }
+}
 
-    // A static array for VulkanLayerInfo that can be indexed with VulkanLayers.
-    // GetVulkanLayerInfo checks that "index" matches the index used to access this array so an
-    // assert will fire if it isn't in the correct order.
-    static constexpr size_t kVulkanLayerCount = static_cast<size_t>(VulkanLayer::EnumCount);
-    static constexpr std::array<VulkanLayerInfo, kVulkanLayerCount> sVulkanLayerInfos{{
-        //
-        {VulkanLayer::Validation, "VK_LAYER_KHRONOS_validation"},
-        {VulkanLayer::LunargVkTrace, "VK_LAYER_LUNARG_vktrace"},
-        {VulkanLayer::RenderDocCapture, "VK_LAYER_RENDERDOC_Capture"},
-        {VulkanLayer::FuchsiaImagePipeSwapchain, "VK_LAYER_FUCHSIA_imagepipe_swapchain"},
-        //
-    }};
+// A static array for VulkanLayerInfo that can be indexed with VulkanLayers.
+// GetVulkanLayerInfo checks that "index" matches the index used to access this array so an
+// assert will fire if it isn't in the correct order.
+static constexpr size_t kVulkanLayerCount = static_cast<size_t>(VulkanLayer::EnumCount);
+static constexpr std::array<VulkanLayerInfo, kVulkanLayerCount> sVulkanLayerInfos{{
+    //
+    {VulkanLayer::Validation, "VK_LAYER_KHRONOS_validation"},
+    {VulkanLayer::LunargVkTrace, "VK_LAYER_LUNARG_vktrace"},
+    {VulkanLayer::RenderDocCapture, "VK_LAYER_RENDERDOC_Capture"},
+    {VulkanLayer::FuchsiaImagePipeSwapchain, "VK_LAYER_FUCHSIA_imagepipe_swapchain"},
+    //
+}};
 
-    const VulkanLayerInfo& GetVulkanLayerInfo(VulkanLayer layer) {
-        uint32_t index = static_cast<uint32_t>(layer);
-        ASSERT(index < sVulkanLayerInfos.size());
-        ASSERT(sVulkanLayerInfos[index].layer == layer);
-        return sVulkanLayerInfos[index];
+const VulkanLayerInfo& GetVulkanLayerInfo(VulkanLayer layer) {
+    uint32_t index = static_cast<uint32_t>(layer);
+    ASSERT(index < sVulkanLayerInfos.size());
+    ASSERT(sVulkanLayerInfos[index].layer == layer);
+    return sVulkanLayerInfos[index];
+}
+
+std::unordered_map<std::string, VulkanLayer> CreateVulkanLayerNameMap() {
+    std::unordered_map<std::string, VulkanLayer> result;
+    for (const VulkanLayerInfo& info : sVulkanLayerInfos) {
+        result[info.name] = info.layer;
     }
-
-    std::unordered_map<std::string, VulkanLayer> CreateVulkanLayerNameMap() {
-        std::unordered_map<std::string, VulkanLayer> result;
-        for (const VulkanLayerInfo& info : sVulkanLayerInfos) {
-            result[info.name] = info.layer;
-        }
-        return result;
-    }
+    return result;
+}
 
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/VulkanExtensions.h b/src/dawn/native/vulkan/VulkanExtensions.h
index 117c722..78189a1 100644
--- a/src/dawn/native/vulkan/VulkanExtensions.h
+++ b/src/dawn/native/vulkan/VulkanExtensions.h
@@ -22,145 +22,145 @@
 
 namespace dawn::native::vulkan {
 
-    // The list of known instance extensions. They must be in dependency order (this is checked
-    // inside EnsureDependencies)
-    enum class InstanceExt {
-        // Promoted to 1.1
-        GetPhysicalDeviceProperties2,
-        ExternalMemoryCapabilities,
-        ExternalSemaphoreCapabilities,
+// The list of known instance extensions. They must be in dependency order (this is checked
+// inside EnsureDependencies)
+enum class InstanceExt {
+    // Promoted to 1.1
+    GetPhysicalDeviceProperties2,
+    ExternalMemoryCapabilities,
+    ExternalSemaphoreCapabilities,
 
-        // Surface extensions
-        Surface,
-        FuchsiaImagePipeSurface,
-        MetalSurface,
-        WaylandSurface,
-        Win32Surface,
-        XcbSurface,
-        XlibSurface,
-        AndroidSurface,
+    // Surface extensions
+    Surface,
+    FuchsiaImagePipeSurface,
+    MetalSurface,
+    WaylandSurface,
+    Win32Surface,
+    XcbSurface,
+    XlibSurface,
+    AndroidSurface,
 
-        // Others
-        DebugUtils,
-        ValidationFeatures,
+    // Others
+    DebugUtils,
+    ValidationFeatures,
 
-        EnumCount,
-    };
+    EnumCount,
+};
 
-    // A bitset that is indexed with InstanceExt.
-    using InstanceExtSet = ityp::bitset<InstanceExt, static_cast<uint32_t>(InstanceExt::EnumCount)>;
+// A bitset that is indexed with InstanceExt.
+using InstanceExtSet = ityp::bitset<InstanceExt, static_cast<uint32_t>(InstanceExt::EnumCount)>;
 
-    // Information about a known instance extension.
-    struct InstanceExtInfo {
-        InstanceExt index;
-        const char* name;
-        // The version in which this extension was promoted as built with VK_MAKE_VERSION,
-        // or NeverPromoted if it was never promoted.
-        uint32_t versionPromoted;
-    };
+// Information about a known instance extension.
+struct InstanceExtInfo {
+    InstanceExt index;
+    const char* name;
+    // The version in which this extension was promoted as built with VK_MAKE_VERSION,
+    // or NeverPromoted if it was never promoted.
+    uint32_t versionPromoted;
+};
 
-    // Returns the information about a known InstanceExt
-    const InstanceExtInfo& GetInstanceExtInfo(InstanceExt ext);
-    // Returns a map that maps a Vulkan extension name to its InstanceExt.
-    std::unordered_map<std::string, InstanceExt> CreateInstanceExtNameMap();
+// Returns the information about a known InstanceExt
+const InstanceExtInfo& GetInstanceExtInfo(InstanceExt ext);
+// Returns a map that maps a Vulkan extension name to its InstanceExt.
+std::unordered_map<std::string, InstanceExt> CreateInstanceExtNameMap();
 
-    // Sets entries in `extensions` to true if that entry was promoted in Vulkan version `version`
-    void MarkPromotedExtensions(InstanceExtSet* extensions, uint32_t version);
-    // From a set of extensions advertised as supported by the instance (or promoted), remove all
-    // extensions that don't have all their transitive dependencies in advertisedExts.
-    InstanceExtSet EnsureDependencies(const InstanceExtSet& advertisedExts);
+// Sets entries in `extensions` to true if that entry was promoted in Vulkan version `version`
+void MarkPromotedExtensions(InstanceExtSet* extensions, uint32_t version);
+// From a set of extensions advertised as supported by the instance (or promoted), remove all
+// extensions that don't have all their transitive dependencies in advertisedExts.
+InstanceExtSet EnsureDependencies(const InstanceExtSet& advertisedExts);
 
-    // The list of known device extensions. They must be in dependency order (this is checked
-    // inside EnsureDependencies)
-    enum class DeviceExt {
-        // Promoted to 1.1
-        BindMemory2,
-        Maintenance1,
-        StorageBufferStorageClass,
-        GetPhysicalDeviceProperties2,
-        GetMemoryRequirements2,
-        ExternalMemoryCapabilities,
-        ExternalSemaphoreCapabilities,
-        ExternalMemory,
-        ExternalSemaphore,
-        _16BitStorage,
-        SamplerYCbCrConversion,
+// The list of known device extensions. They must be in dependency order (this is checked
+// inside EnsureDependencies)
+enum class DeviceExt {
+    // Promoted to 1.1
+    BindMemory2,
+    Maintenance1,
+    StorageBufferStorageClass,
+    GetPhysicalDeviceProperties2,
+    GetMemoryRequirements2,
+    ExternalMemoryCapabilities,
+    ExternalSemaphoreCapabilities,
+    ExternalMemory,
+    ExternalSemaphore,
+    _16BitStorage,
+    SamplerYCbCrConversion,
 
-        // Promoted to 1.2
-        DriverProperties,
-        ImageFormatList,
-        ShaderFloat16Int8,
+    // Promoted to 1.2
+    DriverProperties,
+    ImageFormatList,
+    ShaderFloat16Int8,
 
-        // Promoted to 1.3
-        ZeroInitializeWorkgroupMemory,
+    // Promoted to 1.3
+    ZeroInitializeWorkgroupMemory,
 
-        // External* extensions
-        ExternalMemoryFD,
-        ExternalMemoryDmaBuf,
-        ExternalMemoryZirconHandle,
-        ExternalSemaphoreFD,
-        ExternalSemaphoreZirconHandle,
+    // External* extensions
+    ExternalMemoryFD,
+    ExternalMemoryDmaBuf,
+    ExternalMemoryZirconHandle,
+    ExternalSemaphoreFD,
+    ExternalSemaphoreZirconHandle,
 
-        // Others
-        ImageDrmFormatModifier,
-        Swapchain,
-        SubgroupSizeControl,
+    // Others
+    ImageDrmFormatModifier,
+    Swapchain,
+    SubgroupSizeControl,
 
-        EnumCount,
-    };
+    EnumCount,
+};
 
-    // A bitset that is indexed with DeviceExt.
-    using DeviceExtSet = ityp::bitset<DeviceExt, static_cast<uint32_t>(DeviceExt::EnumCount)>;
+// A bitset that is indexed with DeviceExt.
+using DeviceExtSet = ityp::bitset<DeviceExt, static_cast<uint32_t>(DeviceExt::EnumCount)>;
 
-    // Information about a known device extension.
-    struct DeviceExtInfo {
-        DeviceExt index;
-        const char* name;
-        // The version in which this extension was promoted as built with VK_MAKE_VERSION,
-        // or NeverPromoted if it was never promoted.
-        uint32_t versionPromoted;
-    };
+// Information about a known device extension.
+struct DeviceExtInfo {
+    DeviceExt index;
+    const char* name;
+    // The version in which this extension was promoted as built with VK_MAKE_VERSION,
+    // or NeverPromoted if it was never promoted.
+    uint32_t versionPromoted;
+};
 
-    // Returns the information about a known DeviceExt
-    const DeviceExtInfo& GetDeviceExtInfo(DeviceExt ext);
-    // Returns a map that maps a Vulkan extension name to its DeviceExt.
-    std::unordered_map<std::string, DeviceExt> CreateDeviceExtNameMap();
+// Returns the information about a known DeviceExt
+const DeviceExtInfo& GetDeviceExtInfo(DeviceExt ext);
+// Returns a map that maps a Vulkan extension name to its DeviceExt.
+std::unordered_map<std::string, DeviceExt> CreateDeviceExtNameMap();
 
-    // Sets entries in `extensions` to true if that entry was promoted in Vulkan version `version`
-    void MarkPromotedExtensions(DeviceExtSet* extensions, uint32_t version);
-    // From a set of extensions advertised as supported by the device (or promoted), remove all
-    // extensions that don't have all their transitive dependencies in advertisedExts or in
-    // instanceExts.
-    DeviceExtSet EnsureDependencies(const DeviceExtSet& advertisedExts,
-                                    const InstanceExtSet& instanceExts,
-                                    uint32_t icdVersion);
+// Sets entries in `extensions` to true if that entry was promoted in Vulkan version `version`
+void MarkPromotedExtensions(DeviceExtSet* extensions, uint32_t version);
+// From a set of extensions advertised as supported by the device (or promoted), remove all
+// extensions that don't have all their transitive dependencies in advertisedExts or in
+// instanceExts.
+DeviceExtSet EnsureDependencies(const DeviceExtSet& advertisedExts,
+                                const InstanceExtSet& instanceExts,
+                                uint32_t icdVersion);
 
-    // The list of all known Vulkan layers.
-    enum class VulkanLayer {
-        Validation,
-        LunargVkTrace,
-        RenderDocCapture,
+// The list of all known Vulkan layers.
+enum class VulkanLayer {
+    Validation,
+    LunargVkTrace,
+    RenderDocCapture,
 
-        // Fuchsia implements the swapchain through a layer (VK_LAYER_FUCHSIA_image_pipe_swapchain),
-        // which adds an instance extensions (VK_FUCHSIA_image_surface) to all ICDs.
-        FuchsiaImagePipeSwapchain,
+    // Fuchsia implements the swapchain through a layer (VK_LAYER_FUCHSIA_image_pipe_swapchain),
+    // which adds an instance extensions (VK_FUCHSIA_image_surface) to all ICDs.
+    FuchsiaImagePipeSwapchain,
 
-        EnumCount,
-    };
+    EnumCount,
+};
 
-    // A bitset that is indexed with VulkanLayer.
-    using VulkanLayerSet = ityp::bitset<VulkanLayer, static_cast<uint32_t>(VulkanLayer::EnumCount)>;
+// A bitset that is indexed with VulkanLayer.
+using VulkanLayerSet = ityp::bitset<VulkanLayer, static_cast<uint32_t>(VulkanLayer::EnumCount)>;
 
-    // Information about a known layer
-    struct VulkanLayerInfo {
-        VulkanLayer layer;
-        const char* name;
-    };
+// Information about a known layer
+struct VulkanLayerInfo {
+    VulkanLayer layer;
+    const char* name;
+};
 
-    // Returns the information about a known VulkanLayer
-    const VulkanLayerInfo& GetVulkanLayerInfo(VulkanLayer layer);
-    // Returns a map that maps a Vulkan layer name to its VulkanLayer.
-    std::unordered_map<std::string, VulkanLayer> CreateVulkanLayerNameMap();
+// Returns the information about a known VulkanLayer
+const VulkanLayerInfo& GetVulkanLayerInfo(VulkanLayer layer);
+// Returns a map that maps a Vulkan layer name to its VulkanLayer.
+std::unordered_map<std::string, VulkanLayer> CreateVulkanLayerNameMap();
 
 }  // namespace dawn::native::vulkan
 
diff --git a/src/dawn/native/vulkan/VulkanFunctions.cpp b/src/dawn/native/vulkan/VulkanFunctions.cpp
index 0bdfcb9..630ff13 100644
--- a/src/dawn/native/vulkan/VulkanFunctions.cpp
+++ b/src/dawn/native/vulkan/VulkanFunctions.cpp
@@ -29,22 +29,22 @@
         }                                                                                  \
     } while (0)
 
-    MaybeError VulkanFunctions::LoadGlobalProcs(const DynamicLib& vulkanLib) {
-        if (!vulkanLib.GetProc(&GetInstanceProcAddr, "vkGetInstanceProcAddr")) {
-            return DAWN_INTERNAL_ERROR("Couldn't get vkGetInstanceProcAddr");
-        }
-
-        GET_GLOBAL_PROC(CreateInstance);
-        GET_GLOBAL_PROC(EnumerateInstanceExtensionProperties);
-        GET_GLOBAL_PROC(EnumerateInstanceLayerProperties);
-
-        // Is not available in Vulkan 1.0, so allow nullptr
-        EnumerateInstanceVersion = reinterpret_cast<decltype(EnumerateInstanceVersion)>(
-            GetInstanceProcAddr(nullptr, "vkEnumerateInstanceVersion"));
-
-        return {};
+MaybeError VulkanFunctions::LoadGlobalProcs(const DynamicLib& vulkanLib) {
+    if (!vulkanLib.GetProc(&GetInstanceProcAddr, "vkGetInstanceProcAddr")) {
+        return DAWN_INTERNAL_ERROR("Couldn't get vkGetInstanceProcAddr");
     }
 
+    GET_GLOBAL_PROC(CreateInstance);
+    GET_GLOBAL_PROC(EnumerateInstanceExtensionProperties);
+    GET_GLOBAL_PROC(EnumerateInstanceLayerProperties);
+
+    // Is not available in Vulkan 1.0, so allow nullptr
+    EnumerateInstanceVersion = reinterpret_cast<decltype(EnumerateInstanceVersion)>(
+        GetInstanceProcAddr(nullptr, "vkEnumerateInstanceVersion"));
+
+    return {};
+}
+
 #define GET_INSTANCE_PROC_BASE(name, procName)                                                  \
     do {                                                                                        \
         name = reinterpret_cast<decltype(name)>(GetInstanceProcAddr(instance, "vk" #procName)); \
@@ -56,117 +56,117 @@
 #define GET_INSTANCE_PROC(name) GET_INSTANCE_PROC_BASE(name, name)
 #define GET_INSTANCE_PROC_VENDOR(name, vendor) GET_INSTANCE_PROC_BASE(name, name##vendor)
 
-    MaybeError VulkanFunctions::LoadInstanceProcs(VkInstance instance,
-                                                  const VulkanGlobalInfo& globalInfo) {
-        // Load this proc first so that we can destroy the instance even if some other
-        // GET_INSTANCE_PROC fails
-        GET_INSTANCE_PROC(DestroyInstance);
+MaybeError VulkanFunctions::LoadInstanceProcs(VkInstance instance,
+                                              const VulkanGlobalInfo& globalInfo) {
+    // Load this proc first so that we can destroy the instance even if some other
+    // GET_INSTANCE_PROC fails
+    GET_INSTANCE_PROC(DestroyInstance);
 
-        GET_INSTANCE_PROC(CreateDevice);
-        GET_INSTANCE_PROC(DestroyDevice);
-        GET_INSTANCE_PROC(EnumerateDeviceExtensionProperties);
-        GET_INSTANCE_PROC(EnumerateDeviceLayerProperties);
-        GET_INSTANCE_PROC(EnumeratePhysicalDevices);
-        GET_INSTANCE_PROC(GetDeviceProcAddr);
-        GET_INSTANCE_PROC(GetPhysicalDeviceFeatures);
-        GET_INSTANCE_PROC(GetPhysicalDeviceFormatProperties);
-        GET_INSTANCE_PROC(GetPhysicalDeviceImageFormatProperties);
-        GET_INSTANCE_PROC(GetPhysicalDeviceMemoryProperties);
-        GET_INSTANCE_PROC(GetPhysicalDeviceProperties);
-        GET_INSTANCE_PROC(GetPhysicalDeviceQueueFamilyProperties);
-        GET_INSTANCE_PROC(GetPhysicalDeviceSparseImageFormatProperties);
+    GET_INSTANCE_PROC(CreateDevice);
+    GET_INSTANCE_PROC(DestroyDevice);
+    GET_INSTANCE_PROC(EnumerateDeviceExtensionProperties);
+    GET_INSTANCE_PROC(EnumerateDeviceLayerProperties);
+    GET_INSTANCE_PROC(EnumeratePhysicalDevices);
+    GET_INSTANCE_PROC(GetDeviceProcAddr);
+    GET_INSTANCE_PROC(GetPhysicalDeviceFeatures);
+    GET_INSTANCE_PROC(GetPhysicalDeviceFormatProperties);
+    GET_INSTANCE_PROC(GetPhysicalDeviceImageFormatProperties);
+    GET_INSTANCE_PROC(GetPhysicalDeviceMemoryProperties);
+    GET_INSTANCE_PROC(GetPhysicalDeviceProperties);
+    GET_INSTANCE_PROC(GetPhysicalDeviceQueueFamilyProperties);
+    GET_INSTANCE_PROC(GetPhysicalDeviceSparseImageFormatProperties);
 
-        if (globalInfo.HasExt(InstanceExt::DebugUtils)) {
-            GET_INSTANCE_PROC(CmdBeginDebugUtilsLabelEXT);
-            GET_INSTANCE_PROC(CmdEndDebugUtilsLabelEXT);
-            GET_INSTANCE_PROC(CmdInsertDebugUtilsLabelEXT);
-            GET_INSTANCE_PROC(CreateDebugUtilsMessengerEXT);
-            GET_INSTANCE_PROC(DestroyDebugUtilsMessengerEXT);
-            GET_INSTANCE_PROC(QueueBeginDebugUtilsLabelEXT);
-            GET_INSTANCE_PROC(QueueEndDebugUtilsLabelEXT);
-            GET_INSTANCE_PROC(QueueInsertDebugUtilsLabelEXT);
-            GET_INSTANCE_PROC(SetDebugUtilsObjectNameEXT);
-            GET_INSTANCE_PROC(SetDebugUtilsObjectTagEXT);
-            GET_INSTANCE_PROC(SubmitDebugUtilsMessageEXT);
-        }
+    if (globalInfo.HasExt(InstanceExt::DebugUtils)) {
+        GET_INSTANCE_PROC(CmdBeginDebugUtilsLabelEXT);
+        GET_INSTANCE_PROC(CmdEndDebugUtilsLabelEXT);
+        GET_INSTANCE_PROC(CmdInsertDebugUtilsLabelEXT);
+        GET_INSTANCE_PROC(CreateDebugUtilsMessengerEXT);
+        GET_INSTANCE_PROC(DestroyDebugUtilsMessengerEXT);
+        GET_INSTANCE_PROC(QueueBeginDebugUtilsLabelEXT);
+        GET_INSTANCE_PROC(QueueEndDebugUtilsLabelEXT);
+        GET_INSTANCE_PROC(QueueInsertDebugUtilsLabelEXT);
+        GET_INSTANCE_PROC(SetDebugUtilsObjectNameEXT);
+        GET_INSTANCE_PROC(SetDebugUtilsObjectTagEXT);
+        GET_INSTANCE_PROC(SubmitDebugUtilsMessageEXT);
+    }
 
-        // Vulkan 1.1 is not required to report promoted extensions from 1.0 and is not required to
-        // support the vendor entrypoint in GetProcAddress.
-        if (globalInfo.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
-            GET_INSTANCE_PROC(GetPhysicalDeviceExternalBufferProperties);
-        } else if (globalInfo.HasExt(InstanceExt::ExternalMemoryCapabilities)) {
-            GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceExternalBufferProperties, KHR);
-        }
+    // Vulkan 1.1 is not required to report promoted extensions from 1.0 and is not required to
+    // support the vendor entrypoint in GetProcAddress.
+    if (globalInfo.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
+        GET_INSTANCE_PROC(GetPhysicalDeviceExternalBufferProperties);
+    } else if (globalInfo.HasExt(InstanceExt::ExternalMemoryCapabilities)) {
+        GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceExternalBufferProperties, KHR);
+    }
 
-        if (globalInfo.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
-            GET_INSTANCE_PROC(GetPhysicalDeviceExternalSemaphoreProperties);
-        } else if (globalInfo.HasExt(InstanceExt::ExternalSemaphoreCapabilities)) {
-            GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceExternalSemaphoreProperties, KHR);
-        }
+    if (globalInfo.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
+        GET_INSTANCE_PROC(GetPhysicalDeviceExternalSemaphoreProperties);
+    } else if (globalInfo.HasExt(InstanceExt::ExternalSemaphoreCapabilities)) {
+        GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceExternalSemaphoreProperties, KHR);
+    }
 
-        if (globalInfo.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
-            GET_INSTANCE_PROC(GetPhysicalDeviceFeatures2);
-            GET_INSTANCE_PROC(GetPhysicalDeviceProperties2);
-            GET_INSTANCE_PROC(GetPhysicalDeviceFormatProperties2);
-            GET_INSTANCE_PROC(GetPhysicalDeviceImageFormatProperties2);
-            GET_INSTANCE_PROC(GetPhysicalDeviceQueueFamilyProperties2);
-            GET_INSTANCE_PROC(GetPhysicalDeviceMemoryProperties2);
-            GET_INSTANCE_PROC(GetPhysicalDeviceSparseImageFormatProperties2);
-        } else if (globalInfo.HasExt(InstanceExt::GetPhysicalDeviceProperties2)) {
-            GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceFeatures2, KHR);
-            GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceProperties2, KHR);
-            GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceFormatProperties2, KHR);
-            GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceImageFormatProperties2, KHR);
-            GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceQueueFamilyProperties2, KHR);
-            GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceMemoryProperties2, KHR);
-            GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceSparseImageFormatProperties2, KHR);
-        }
+    if (globalInfo.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
+        GET_INSTANCE_PROC(GetPhysicalDeviceFeatures2);
+        GET_INSTANCE_PROC(GetPhysicalDeviceProperties2);
+        GET_INSTANCE_PROC(GetPhysicalDeviceFormatProperties2);
+        GET_INSTANCE_PROC(GetPhysicalDeviceImageFormatProperties2);
+        GET_INSTANCE_PROC(GetPhysicalDeviceQueueFamilyProperties2);
+        GET_INSTANCE_PROC(GetPhysicalDeviceMemoryProperties2);
+        GET_INSTANCE_PROC(GetPhysicalDeviceSparseImageFormatProperties2);
+    } else if (globalInfo.HasExt(InstanceExt::GetPhysicalDeviceProperties2)) {
+        GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceFeatures2, KHR);
+        GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceProperties2, KHR);
+        GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceFormatProperties2, KHR);
+        GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceImageFormatProperties2, KHR);
+        GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceQueueFamilyProperties2, KHR);
+        GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceMemoryProperties2, KHR);
+        GET_INSTANCE_PROC_VENDOR(GetPhysicalDeviceSparseImageFormatProperties2, KHR);
+    }
 
-        if (globalInfo.HasExt(InstanceExt::Surface)) {
-            GET_INSTANCE_PROC(DestroySurfaceKHR);
-            GET_INSTANCE_PROC(GetPhysicalDeviceSurfaceSupportKHR);
-            GET_INSTANCE_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
-            GET_INSTANCE_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
-            GET_INSTANCE_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
-        }
+    if (globalInfo.HasExt(InstanceExt::Surface)) {
+        GET_INSTANCE_PROC(DestroySurfaceKHR);
+        GET_INSTANCE_PROC(GetPhysicalDeviceSurfaceSupportKHR);
+        GET_INSTANCE_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
+        GET_INSTANCE_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
+        GET_INSTANCE_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
+    }
 
 #if defined(VK_USE_PLATFORM_FUCHSIA)
-        if (globalInfo.HasExt(InstanceExt::FuchsiaImagePipeSurface)) {
-            GET_INSTANCE_PROC(CreateImagePipeSurfaceFUCHSIA);
-        }
+    if (globalInfo.HasExt(InstanceExt::FuchsiaImagePipeSurface)) {
+        GET_INSTANCE_PROC(CreateImagePipeSurfaceFUCHSIA);
+    }
 #endif  // defined(VK_USE_PLATFORM_FUCHSIA)
 
 #if defined(DAWN_ENABLE_BACKEND_METAL)
-        if (globalInfo.HasExt(InstanceExt::MetalSurface)) {
-            GET_INSTANCE_PROC(CreateMetalSurfaceEXT);
-        }
+    if (globalInfo.HasExt(InstanceExt::MetalSurface)) {
+        GET_INSTANCE_PROC(CreateMetalSurfaceEXT);
+    }
 #endif  // defined(DAWN_ENABLE_BACKEND_METAL)
 
 #if defined(DAWN_PLATFORM_WINDOWS)
-        if (globalInfo.HasExt(InstanceExt::Win32Surface)) {
-            GET_INSTANCE_PROC(CreateWin32SurfaceKHR);
-            GET_INSTANCE_PROC(GetPhysicalDeviceWin32PresentationSupportKHR);
-        }
+    if (globalInfo.HasExt(InstanceExt::Win32Surface)) {
+        GET_INSTANCE_PROC(CreateWin32SurfaceKHR);
+        GET_INSTANCE_PROC(GetPhysicalDeviceWin32PresentationSupportKHR);
+    }
 #endif  // defined(DAWN_PLATFORM_WINDOWS)
 
 #if defined(DAWN_PLATFORM_ANDROID)
-        if (globalInfo.HasExt(InstanceExt::AndroidSurface)) {
-            GET_INSTANCE_PROC(CreateAndroidSurfaceKHR);
-        }
+    if (globalInfo.HasExt(InstanceExt::AndroidSurface)) {
+        GET_INSTANCE_PROC(CreateAndroidSurfaceKHR);
+    }
 #endif  // defined(DAWN_PLATFORM_ANDROID)
 
 #if defined(DAWN_USE_X11)
-        if (globalInfo.HasExt(InstanceExt::XlibSurface)) {
-            GET_INSTANCE_PROC(CreateXlibSurfaceKHR);
-            GET_INSTANCE_PROC(GetPhysicalDeviceXlibPresentationSupportKHR);
-        }
-        if (globalInfo.HasExt(InstanceExt::XcbSurface)) {
-            GET_INSTANCE_PROC(CreateXcbSurfaceKHR);
-            GET_INSTANCE_PROC(GetPhysicalDeviceXcbPresentationSupportKHR);
-        }
-#endif  // defined(DAWN_USE_X11)
-        return {};
+    if (globalInfo.HasExt(InstanceExt::XlibSurface)) {
+        GET_INSTANCE_PROC(CreateXlibSurfaceKHR);
+        GET_INSTANCE_PROC(GetPhysicalDeviceXlibPresentationSupportKHR);
     }
+    if (globalInfo.HasExt(InstanceExt::XcbSurface)) {
+        GET_INSTANCE_PROC(CreateXcbSurfaceKHR);
+        GET_INSTANCE_PROC(GetPhysicalDeviceXcbPresentationSupportKHR);
+    }
+#endif  // defined(DAWN_USE_X11)
+    return {};
+}
 
 #define GET_DEVICE_PROC(name)                                                           \
     do {                                                                                \
@@ -176,165 +176,164 @@
         }                                                                               \
     } while (0)
 
-    MaybeError VulkanFunctions::LoadDeviceProcs(VkDevice device,
-                                                const VulkanDeviceInfo& deviceInfo) {
-        GET_DEVICE_PROC(AllocateCommandBuffers);
-        GET_DEVICE_PROC(AllocateDescriptorSets);
-        GET_DEVICE_PROC(AllocateMemory);
-        GET_DEVICE_PROC(BeginCommandBuffer);
-        GET_DEVICE_PROC(BindBufferMemory);
-        GET_DEVICE_PROC(BindImageMemory);
-        GET_DEVICE_PROC(CmdBeginQuery);
-        GET_DEVICE_PROC(CmdBeginRenderPass);
-        GET_DEVICE_PROC(CmdBindDescriptorSets);
-        GET_DEVICE_PROC(CmdBindIndexBuffer);
-        GET_DEVICE_PROC(CmdBindPipeline);
-        GET_DEVICE_PROC(CmdBindVertexBuffers);
-        GET_DEVICE_PROC(CmdBlitImage);
-        GET_DEVICE_PROC(CmdClearAttachments);
-        GET_DEVICE_PROC(CmdClearColorImage);
-        GET_DEVICE_PROC(CmdClearDepthStencilImage);
-        GET_DEVICE_PROC(CmdCopyBuffer);
-        GET_DEVICE_PROC(CmdCopyBufferToImage);
-        GET_DEVICE_PROC(CmdCopyImage);
-        GET_DEVICE_PROC(CmdCopyImageToBuffer);
-        GET_DEVICE_PROC(CmdCopyQueryPoolResults);
-        GET_DEVICE_PROC(CmdDispatch);
-        GET_DEVICE_PROC(CmdDispatchIndirect);
-        GET_DEVICE_PROC(CmdDraw);
-        GET_DEVICE_PROC(CmdDrawIndexed);
-        GET_DEVICE_PROC(CmdDrawIndexedIndirect);
-        GET_DEVICE_PROC(CmdDrawIndirect);
-        GET_DEVICE_PROC(CmdEndQuery);
-        GET_DEVICE_PROC(CmdEndRenderPass);
-        GET_DEVICE_PROC(CmdExecuteCommands);
-        GET_DEVICE_PROC(CmdFillBuffer);
-        GET_DEVICE_PROC(CmdNextSubpass);
-        GET_DEVICE_PROC(CmdPipelineBarrier);
-        GET_DEVICE_PROC(CmdPushConstants);
-        GET_DEVICE_PROC(CmdResetEvent);
-        GET_DEVICE_PROC(CmdResetQueryPool);
-        GET_DEVICE_PROC(CmdResolveImage);
-        GET_DEVICE_PROC(CmdSetBlendConstants);
-        GET_DEVICE_PROC(CmdSetDepthBias);
-        GET_DEVICE_PROC(CmdSetDepthBounds);
-        GET_DEVICE_PROC(CmdSetEvent);
-        GET_DEVICE_PROC(CmdSetLineWidth);
-        GET_DEVICE_PROC(CmdSetScissor);
-        GET_DEVICE_PROC(CmdSetStencilCompareMask);
-        GET_DEVICE_PROC(CmdSetStencilReference);
-        GET_DEVICE_PROC(CmdSetStencilWriteMask);
-        GET_DEVICE_PROC(CmdSetViewport);
-        GET_DEVICE_PROC(CmdUpdateBuffer);
-        GET_DEVICE_PROC(CmdWaitEvents);
-        GET_DEVICE_PROC(CmdWriteTimestamp);
-        GET_DEVICE_PROC(CreateBuffer);
-        GET_DEVICE_PROC(CreateBufferView);
-        GET_DEVICE_PROC(CreateCommandPool);
-        GET_DEVICE_PROC(CreateComputePipelines);
-        GET_DEVICE_PROC(CreateDescriptorPool);
-        GET_DEVICE_PROC(CreateDescriptorSetLayout);
-        GET_DEVICE_PROC(CreateEvent);
-        GET_DEVICE_PROC(CreateFence);
-        GET_DEVICE_PROC(CreateFramebuffer);
-        GET_DEVICE_PROC(CreateGraphicsPipelines);
-        GET_DEVICE_PROC(CreateImage);
-        GET_DEVICE_PROC(CreateImageView);
-        GET_DEVICE_PROC(CreatePipelineCache);
-        GET_DEVICE_PROC(CreatePipelineLayout);
-        GET_DEVICE_PROC(CreateQueryPool);
-        GET_DEVICE_PROC(CreateRenderPass);
-        GET_DEVICE_PROC(CreateSampler);
-        GET_DEVICE_PROC(CreateSemaphore);
-        GET_DEVICE_PROC(CreateShaderModule);
-        GET_DEVICE_PROC(DestroyBuffer);
-        GET_DEVICE_PROC(DestroyBufferView);
-        GET_DEVICE_PROC(DestroyCommandPool);
-        GET_DEVICE_PROC(DestroyDescriptorPool);
-        GET_DEVICE_PROC(DestroyDescriptorSetLayout);
-        GET_DEVICE_PROC(DestroyEvent);
-        GET_DEVICE_PROC(DestroyFence);
-        GET_DEVICE_PROC(DestroyFramebuffer);
-        GET_DEVICE_PROC(DestroyImage);
-        GET_DEVICE_PROC(DestroyImageView);
-        GET_DEVICE_PROC(DestroyPipeline);
-        GET_DEVICE_PROC(DestroyPipelineCache);
-        GET_DEVICE_PROC(DestroyPipelineLayout);
-        GET_DEVICE_PROC(DestroyQueryPool);
-        GET_DEVICE_PROC(DestroyRenderPass);
-        GET_DEVICE_PROC(DestroySampler);
-        GET_DEVICE_PROC(DestroySemaphore);
-        GET_DEVICE_PROC(DestroyShaderModule);
-        GET_DEVICE_PROC(DeviceWaitIdle);
-        GET_DEVICE_PROC(EndCommandBuffer);
-        GET_DEVICE_PROC(FlushMappedMemoryRanges);
-        GET_DEVICE_PROC(FreeCommandBuffers);
-        GET_DEVICE_PROC(FreeDescriptorSets);
-        GET_DEVICE_PROC(FreeMemory);
-        GET_DEVICE_PROC(GetBufferMemoryRequirements);
-        GET_DEVICE_PROC(GetDeviceMemoryCommitment);
-        GET_DEVICE_PROC(GetDeviceQueue);
-        GET_DEVICE_PROC(GetEventStatus);
-        GET_DEVICE_PROC(GetFenceStatus);
-        GET_DEVICE_PROC(GetImageMemoryRequirements);
-        GET_DEVICE_PROC(GetImageSparseMemoryRequirements);
-        GET_DEVICE_PROC(GetImageSubresourceLayout);
-        GET_DEVICE_PROC(GetPipelineCacheData);
-        GET_DEVICE_PROC(GetQueryPoolResults);
-        GET_DEVICE_PROC(GetRenderAreaGranularity);
-        GET_DEVICE_PROC(InvalidateMappedMemoryRanges);
-        GET_DEVICE_PROC(MapMemory);
-        GET_DEVICE_PROC(MergePipelineCaches);
-        GET_DEVICE_PROC(QueueBindSparse);
-        GET_DEVICE_PROC(QueueSubmit);
-        GET_DEVICE_PROC(QueueWaitIdle);
-        GET_DEVICE_PROC(ResetCommandBuffer);
-        GET_DEVICE_PROC(ResetCommandPool);
-        GET_DEVICE_PROC(ResetDescriptorPool);
-        GET_DEVICE_PROC(ResetEvent);
-        GET_DEVICE_PROC(ResetFences);
-        GET_DEVICE_PROC(SetEvent);
-        GET_DEVICE_PROC(UnmapMemory);
-        GET_DEVICE_PROC(UpdateDescriptorSets);
-        GET_DEVICE_PROC(WaitForFences);
+MaybeError VulkanFunctions::LoadDeviceProcs(VkDevice device, const VulkanDeviceInfo& deviceInfo) {
+    GET_DEVICE_PROC(AllocateCommandBuffers);
+    GET_DEVICE_PROC(AllocateDescriptorSets);
+    GET_DEVICE_PROC(AllocateMemory);
+    GET_DEVICE_PROC(BeginCommandBuffer);
+    GET_DEVICE_PROC(BindBufferMemory);
+    GET_DEVICE_PROC(BindImageMemory);
+    GET_DEVICE_PROC(CmdBeginQuery);
+    GET_DEVICE_PROC(CmdBeginRenderPass);
+    GET_DEVICE_PROC(CmdBindDescriptorSets);
+    GET_DEVICE_PROC(CmdBindIndexBuffer);
+    GET_DEVICE_PROC(CmdBindPipeline);
+    GET_DEVICE_PROC(CmdBindVertexBuffers);
+    GET_DEVICE_PROC(CmdBlitImage);
+    GET_DEVICE_PROC(CmdClearAttachments);
+    GET_DEVICE_PROC(CmdClearColorImage);
+    GET_DEVICE_PROC(CmdClearDepthStencilImage);
+    GET_DEVICE_PROC(CmdCopyBuffer);
+    GET_DEVICE_PROC(CmdCopyBufferToImage);
+    GET_DEVICE_PROC(CmdCopyImage);
+    GET_DEVICE_PROC(CmdCopyImageToBuffer);
+    GET_DEVICE_PROC(CmdCopyQueryPoolResults);
+    GET_DEVICE_PROC(CmdDispatch);
+    GET_DEVICE_PROC(CmdDispatchIndirect);
+    GET_DEVICE_PROC(CmdDraw);
+    GET_DEVICE_PROC(CmdDrawIndexed);
+    GET_DEVICE_PROC(CmdDrawIndexedIndirect);
+    GET_DEVICE_PROC(CmdDrawIndirect);
+    GET_DEVICE_PROC(CmdEndQuery);
+    GET_DEVICE_PROC(CmdEndRenderPass);
+    GET_DEVICE_PROC(CmdExecuteCommands);
+    GET_DEVICE_PROC(CmdFillBuffer);
+    GET_DEVICE_PROC(CmdNextSubpass);
+    GET_DEVICE_PROC(CmdPipelineBarrier);
+    GET_DEVICE_PROC(CmdPushConstants);
+    GET_DEVICE_PROC(CmdResetEvent);
+    GET_DEVICE_PROC(CmdResetQueryPool);
+    GET_DEVICE_PROC(CmdResolveImage);
+    GET_DEVICE_PROC(CmdSetBlendConstants);
+    GET_DEVICE_PROC(CmdSetDepthBias);
+    GET_DEVICE_PROC(CmdSetDepthBounds);
+    GET_DEVICE_PROC(CmdSetEvent);
+    GET_DEVICE_PROC(CmdSetLineWidth);
+    GET_DEVICE_PROC(CmdSetScissor);
+    GET_DEVICE_PROC(CmdSetStencilCompareMask);
+    GET_DEVICE_PROC(CmdSetStencilReference);
+    GET_DEVICE_PROC(CmdSetStencilWriteMask);
+    GET_DEVICE_PROC(CmdSetViewport);
+    GET_DEVICE_PROC(CmdUpdateBuffer);
+    GET_DEVICE_PROC(CmdWaitEvents);
+    GET_DEVICE_PROC(CmdWriteTimestamp);
+    GET_DEVICE_PROC(CreateBuffer);
+    GET_DEVICE_PROC(CreateBufferView);
+    GET_DEVICE_PROC(CreateCommandPool);
+    GET_DEVICE_PROC(CreateComputePipelines);
+    GET_DEVICE_PROC(CreateDescriptorPool);
+    GET_DEVICE_PROC(CreateDescriptorSetLayout);
+    GET_DEVICE_PROC(CreateEvent);
+    GET_DEVICE_PROC(CreateFence);
+    GET_DEVICE_PROC(CreateFramebuffer);
+    GET_DEVICE_PROC(CreateGraphicsPipelines);
+    GET_DEVICE_PROC(CreateImage);
+    GET_DEVICE_PROC(CreateImageView);
+    GET_DEVICE_PROC(CreatePipelineCache);
+    GET_DEVICE_PROC(CreatePipelineLayout);
+    GET_DEVICE_PROC(CreateQueryPool);
+    GET_DEVICE_PROC(CreateRenderPass);
+    GET_DEVICE_PROC(CreateSampler);
+    GET_DEVICE_PROC(CreateSemaphore);
+    GET_DEVICE_PROC(CreateShaderModule);
+    GET_DEVICE_PROC(DestroyBuffer);
+    GET_DEVICE_PROC(DestroyBufferView);
+    GET_DEVICE_PROC(DestroyCommandPool);
+    GET_DEVICE_PROC(DestroyDescriptorPool);
+    GET_DEVICE_PROC(DestroyDescriptorSetLayout);
+    GET_DEVICE_PROC(DestroyEvent);
+    GET_DEVICE_PROC(DestroyFence);
+    GET_DEVICE_PROC(DestroyFramebuffer);
+    GET_DEVICE_PROC(DestroyImage);
+    GET_DEVICE_PROC(DestroyImageView);
+    GET_DEVICE_PROC(DestroyPipeline);
+    GET_DEVICE_PROC(DestroyPipelineCache);
+    GET_DEVICE_PROC(DestroyPipelineLayout);
+    GET_DEVICE_PROC(DestroyQueryPool);
+    GET_DEVICE_PROC(DestroyRenderPass);
+    GET_DEVICE_PROC(DestroySampler);
+    GET_DEVICE_PROC(DestroySemaphore);
+    GET_DEVICE_PROC(DestroyShaderModule);
+    GET_DEVICE_PROC(DeviceWaitIdle);
+    GET_DEVICE_PROC(EndCommandBuffer);
+    GET_DEVICE_PROC(FlushMappedMemoryRanges);
+    GET_DEVICE_PROC(FreeCommandBuffers);
+    GET_DEVICE_PROC(FreeDescriptorSets);
+    GET_DEVICE_PROC(FreeMemory);
+    GET_DEVICE_PROC(GetBufferMemoryRequirements);
+    GET_DEVICE_PROC(GetDeviceMemoryCommitment);
+    GET_DEVICE_PROC(GetDeviceQueue);
+    GET_DEVICE_PROC(GetEventStatus);
+    GET_DEVICE_PROC(GetFenceStatus);
+    GET_DEVICE_PROC(GetImageMemoryRequirements);
+    GET_DEVICE_PROC(GetImageSparseMemoryRequirements);
+    GET_DEVICE_PROC(GetImageSubresourceLayout);
+    GET_DEVICE_PROC(GetPipelineCacheData);
+    GET_DEVICE_PROC(GetQueryPoolResults);
+    GET_DEVICE_PROC(GetRenderAreaGranularity);
+    GET_DEVICE_PROC(InvalidateMappedMemoryRanges);
+    GET_DEVICE_PROC(MapMemory);
+    GET_DEVICE_PROC(MergePipelineCaches);
+    GET_DEVICE_PROC(QueueBindSparse);
+    GET_DEVICE_PROC(QueueSubmit);
+    GET_DEVICE_PROC(QueueWaitIdle);
+    GET_DEVICE_PROC(ResetCommandBuffer);
+    GET_DEVICE_PROC(ResetCommandPool);
+    GET_DEVICE_PROC(ResetDescriptorPool);
+    GET_DEVICE_PROC(ResetEvent);
+    GET_DEVICE_PROC(ResetFences);
+    GET_DEVICE_PROC(SetEvent);
+    GET_DEVICE_PROC(UnmapMemory);
+    GET_DEVICE_PROC(UpdateDescriptorSets);
+    GET_DEVICE_PROC(WaitForFences);
 
-        if (deviceInfo.HasExt(DeviceExt::ExternalMemoryFD)) {
-            GET_DEVICE_PROC(GetMemoryFdKHR);
-            GET_DEVICE_PROC(GetMemoryFdPropertiesKHR);
-        }
+    if (deviceInfo.HasExt(DeviceExt::ExternalMemoryFD)) {
+        GET_DEVICE_PROC(GetMemoryFdKHR);
+        GET_DEVICE_PROC(GetMemoryFdPropertiesKHR);
+    }
 
-        if (deviceInfo.HasExt(DeviceExt::ExternalSemaphoreFD)) {
-            GET_DEVICE_PROC(ImportSemaphoreFdKHR);
-            GET_DEVICE_PROC(GetSemaphoreFdKHR);
-        }
+    if (deviceInfo.HasExt(DeviceExt::ExternalSemaphoreFD)) {
+        GET_DEVICE_PROC(ImportSemaphoreFdKHR);
+        GET_DEVICE_PROC(GetSemaphoreFdKHR);
+    }
 
-        if (deviceInfo.HasExt(DeviceExt::Swapchain)) {
-            GET_DEVICE_PROC(CreateSwapchainKHR);
-            GET_DEVICE_PROC(DestroySwapchainKHR);
-            GET_DEVICE_PROC(GetSwapchainImagesKHR);
-            GET_DEVICE_PROC(AcquireNextImageKHR);
-            GET_DEVICE_PROC(QueuePresentKHR);
-        }
+    if (deviceInfo.HasExt(DeviceExt::Swapchain)) {
+        GET_DEVICE_PROC(CreateSwapchainKHR);
+        GET_DEVICE_PROC(DestroySwapchainKHR);
+        GET_DEVICE_PROC(GetSwapchainImagesKHR);
+        GET_DEVICE_PROC(AcquireNextImageKHR);
+        GET_DEVICE_PROC(QueuePresentKHR);
+    }
 
-        if (deviceInfo.HasExt(DeviceExt::GetMemoryRequirements2)) {
-            GET_DEVICE_PROC(GetBufferMemoryRequirements2);
-            GET_DEVICE_PROC(GetImageMemoryRequirements2);
-            GET_DEVICE_PROC(GetImageSparseMemoryRequirements2);
-        }
+    if (deviceInfo.HasExt(DeviceExt::GetMemoryRequirements2)) {
+        GET_DEVICE_PROC(GetBufferMemoryRequirements2);
+        GET_DEVICE_PROC(GetImageMemoryRequirements2);
+        GET_DEVICE_PROC(GetImageSparseMemoryRequirements2);
+    }
 
 #if VK_USE_PLATFORM_FUCHSIA
-        if (deviceInfo.HasExt(DeviceExt::ExternalMemoryZirconHandle)) {
-            GET_DEVICE_PROC(GetMemoryZirconHandleFUCHSIA);
-            GET_DEVICE_PROC(GetMemoryZirconHandlePropertiesFUCHSIA);
-        }
+    if (deviceInfo.HasExt(DeviceExt::ExternalMemoryZirconHandle)) {
+        GET_DEVICE_PROC(GetMemoryZirconHandleFUCHSIA);
+        GET_DEVICE_PROC(GetMemoryZirconHandlePropertiesFUCHSIA);
+    }
 
-        if (deviceInfo.HasExt(DeviceExt::ExternalSemaphoreZirconHandle)) {
-            GET_DEVICE_PROC(ImportSemaphoreZirconHandleFUCHSIA);
-            GET_DEVICE_PROC(GetSemaphoreZirconHandleFUCHSIA);
-        }
+    if (deviceInfo.HasExt(DeviceExt::ExternalSemaphoreZirconHandle)) {
+        GET_DEVICE_PROC(ImportSemaphoreZirconHandleFUCHSIA);
+        GET_DEVICE_PROC(GetSemaphoreZirconHandleFUCHSIA);
+    }
 #endif
 
-        return {};
-    }
+    return {};
+}
 
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/VulkanFunctions.h b/src/dawn/native/vulkan/VulkanFunctions.h
index 447c98d..ea6cd2e 100644
--- a/src/dawn/native/vulkan/VulkanFunctions.h
+++ b/src/dawn/native/vulkan/VulkanFunctions.h
@@ -23,307 +23,295 @@
 
 namespace dawn::native::vulkan {
 
-    struct VulkanGlobalInfo;
-    struct VulkanDeviceInfo;
+struct VulkanGlobalInfo;
+struct VulkanDeviceInfo;
 
-    // Stores the Vulkan entry points. Also loads them from the dynamic library
-    // and the vkGet*ProcAddress entry points.
-    struct VulkanFunctions {
-        MaybeError LoadGlobalProcs(const DynamicLib& vulkanLib);
-        MaybeError LoadInstanceProcs(VkInstance instance, const VulkanGlobalInfo& globalInfo);
-        MaybeError LoadDeviceProcs(VkDevice device, const VulkanDeviceInfo& deviceInfo);
+// Stores the Vulkan entry points. Also loads them from the dynamic library
+// and the vkGet*ProcAddress entry points.
+struct VulkanFunctions {
+    MaybeError LoadGlobalProcs(const DynamicLib& vulkanLib);
+    MaybeError LoadInstanceProcs(VkInstance instance, const VulkanGlobalInfo& globalInfo);
+    MaybeError LoadDeviceProcs(VkDevice device, const VulkanDeviceInfo& deviceInfo);
 
-        // ---------- Global procs
+    // ---------- Global procs
 
-        // Initial proc from which we can get all the others
-        PFN_vkGetInstanceProcAddr GetInstanceProcAddr = nullptr;
+    // Initial proc from which we can get all the others
+    PFN_vkGetInstanceProcAddr GetInstanceProcAddr = nullptr;
 
-        PFN_vkCreateInstance CreateInstance = nullptr;
-        PFN_vkEnumerateInstanceExtensionProperties EnumerateInstanceExtensionProperties = nullptr;
-        PFN_vkEnumerateInstanceLayerProperties EnumerateInstanceLayerProperties = nullptr;
-        // DestroyInstance isn't technically a global proc but we want to be able to use it
-        // before querying the instance procs in case we need to error out during initialization.
-        PFN_vkDestroyInstance DestroyInstance = nullptr;
+    PFN_vkCreateInstance CreateInstance = nullptr;
+    PFN_vkEnumerateInstanceExtensionProperties EnumerateInstanceExtensionProperties = nullptr;
+    PFN_vkEnumerateInstanceLayerProperties EnumerateInstanceLayerProperties = nullptr;
+    // DestroyInstance isn't technically a global proc but we want to be able to use it
+    // before querying the instance procs in case we need to error out during initialization.
+    PFN_vkDestroyInstance DestroyInstance = nullptr;
 
-        // Core Vulkan 1.1
-        PFN_vkEnumerateInstanceVersion EnumerateInstanceVersion = nullptr;
+    // Core Vulkan 1.1
+    PFN_vkEnumerateInstanceVersion EnumerateInstanceVersion = nullptr;
 
-        // ---------- Instance procs
+    // ---------- Instance procs
 
-        // Core Vulkan 1.0
-        PFN_vkCreateDevice CreateDevice = nullptr;
-        PFN_vkEnumerateDeviceExtensionProperties EnumerateDeviceExtensionProperties = nullptr;
-        PFN_vkEnumerateDeviceLayerProperties EnumerateDeviceLayerProperties = nullptr;
-        PFN_vkEnumeratePhysicalDevices EnumeratePhysicalDevices = nullptr;
-        PFN_vkGetDeviceProcAddr GetDeviceProcAddr = nullptr;
-        PFN_vkGetPhysicalDeviceFeatures GetPhysicalDeviceFeatures = nullptr;
-        PFN_vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties = nullptr;
-        PFN_vkGetPhysicalDeviceImageFormatProperties GetPhysicalDeviceImageFormatProperties =
-            nullptr;
-        PFN_vkGetPhysicalDeviceMemoryProperties GetPhysicalDeviceMemoryProperties = nullptr;
-        PFN_vkGetPhysicalDeviceProperties GetPhysicalDeviceProperties = nullptr;
-        PFN_vkGetPhysicalDeviceQueueFamilyProperties GetPhysicalDeviceQueueFamilyProperties =
-            nullptr;
-        PFN_vkGetPhysicalDeviceSparseImageFormatProperties
-            GetPhysicalDeviceSparseImageFormatProperties = nullptr;
-        // Not technically an instance proc but we want to be able to use it as soon as the
-        // device is created.
-        PFN_vkDestroyDevice DestroyDevice = nullptr;
+    // Core Vulkan 1.0
+    PFN_vkCreateDevice CreateDevice = nullptr;
+    PFN_vkEnumerateDeviceExtensionProperties EnumerateDeviceExtensionProperties = nullptr;
+    PFN_vkEnumerateDeviceLayerProperties EnumerateDeviceLayerProperties = nullptr;
+    PFN_vkEnumeratePhysicalDevices EnumeratePhysicalDevices = nullptr;
+    PFN_vkGetDeviceProcAddr GetDeviceProcAddr = nullptr;
+    PFN_vkGetPhysicalDeviceFeatures GetPhysicalDeviceFeatures = nullptr;
+    PFN_vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties = nullptr;
+    PFN_vkGetPhysicalDeviceImageFormatProperties GetPhysicalDeviceImageFormatProperties = nullptr;
+    PFN_vkGetPhysicalDeviceMemoryProperties GetPhysicalDeviceMemoryProperties = nullptr;
+    PFN_vkGetPhysicalDeviceProperties GetPhysicalDeviceProperties = nullptr;
+    PFN_vkGetPhysicalDeviceQueueFamilyProperties GetPhysicalDeviceQueueFamilyProperties = nullptr;
+    PFN_vkGetPhysicalDeviceSparseImageFormatProperties
+        GetPhysicalDeviceSparseImageFormatProperties = nullptr;
+    // Not technically an instance proc but we want to be able to use it as soon as the
+    // device is created.
+    PFN_vkDestroyDevice DestroyDevice = nullptr;
 
-        // VK_EXT_debug_utils
-        PFN_vkCmdBeginDebugUtilsLabelEXT CmdBeginDebugUtilsLabelEXT = nullptr;
-        PFN_vkCmdEndDebugUtilsLabelEXT CmdEndDebugUtilsLabelEXT = nullptr;
-        PFN_vkCmdInsertDebugUtilsLabelEXT CmdInsertDebugUtilsLabelEXT = nullptr;
-        PFN_vkCreateDebugUtilsMessengerEXT CreateDebugUtilsMessengerEXT = nullptr;
-        PFN_vkDestroyDebugUtilsMessengerEXT DestroyDebugUtilsMessengerEXT = nullptr;
-        PFN_vkQueueBeginDebugUtilsLabelEXT QueueBeginDebugUtilsLabelEXT = nullptr;
-        PFN_vkQueueEndDebugUtilsLabelEXT QueueEndDebugUtilsLabelEXT = nullptr;
-        PFN_vkQueueInsertDebugUtilsLabelEXT QueueInsertDebugUtilsLabelEXT = nullptr;
-        PFN_vkSetDebugUtilsObjectNameEXT SetDebugUtilsObjectNameEXT = nullptr;
-        PFN_vkSetDebugUtilsObjectTagEXT SetDebugUtilsObjectTagEXT = nullptr;
-        PFN_vkSubmitDebugUtilsMessageEXT SubmitDebugUtilsMessageEXT = nullptr;
+    // VK_EXT_debug_utils
+    PFN_vkCmdBeginDebugUtilsLabelEXT CmdBeginDebugUtilsLabelEXT = nullptr;
+    PFN_vkCmdEndDebugUtilsLabelEXT CmdEndDebugUtilsLabelEXT = nullptr;
+    PFN_vkCmdInsertDebugUtilsLabelEXT CmdInsertDebugUtilsLabelEXT = nullptr;
+    PFN_vkCreateDebugUtilsMessengerEXT CreateDebugUtilsMessengerEXT = nullptr;
+    PFN_vkDestroyDebugUtilsMessengerEXT DestroyDebugUtilsMessengerEXT = nullptr;
+    PFN_vkQueueBeginDebugUtilsLabelEXT QueueBeginDebugUtilsLabelEXT = nullptr;
+    PFN_vkQueueEndDebugUtilsLabelEXT QueueEndDebugUtilsLabelEXT = nullptr;
+    PFN_vkQueueInsertDebugUtilsLabelEXT QueueInsertDebugUtilsLabelEXT = nullptr;
+    PFN_vkSetDebugUtilsObjectNameEXT SetDebugUtilsObjectNameEXT = nullptr;
+    PFN_vkSetDebugUtilsObjectTagEXT SetDebugUtilsObjectTagEXT = nullptr;
+    PFN_vkSubmitDebugUtilsMessageEXT SubmitDebugUtilsMessageEXT = nullptr;
 
-        // VK_KHR_surface
-        PFN_vkDestroySurfaceKHR DestroySurfaceKHR = nullptr;
-        PFN_vkGetPhysicalDeviceSurfaceSupportKHR GetPhysicalDeviceSurfaceSupportKHR = nullptr;
-        PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR GetPhysicalDeviceSurfaceCapabilitiesKHR =
-            nullptr;
-        PFN_vkGetPhysicalDeviceSurfaceFormatsKHR GetPhysicalDeviceSurfaceFormatsKHR = nullptr;
-        PFN_vkGetPhysicalDeviceSurfacePresentModesKHR GetPhysicalDeviceSurfacePresentModesKHR =
-            nullptr;
+    // VK_KHR_surface
+    PFN_vkDestroySurfaceKHR DestroySurfaceKHR = nullptr;
+    PFN_vkGetPhysicalDeviceSurfaceSupportKHR GetPhysicalDeviceSurfaceSupportKHR = nullptr;
+    PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR GetPhysicalDeviceSurfaceCapabilitiesKHR = nullptr;
+    PFN_vkGetPhysicalDeviceSurfaceFormatsKHR GetPhysicalDeviceSurfaceFormatsKHR = nullptr;
+    PFN_vkGetPhysicalDeviceSurfacePresentModesKHR GetPhysicalDeviceSurfacePresentModesKHR = nullptr;
 
-        // Core Vulkan 1.1 promoted extensions, set if either the core version or the extension is
-        // present.
+    // Core Vulkan 1.1 promoted extensions, set if either the core version or the extension is
+    // present.
 
-        // VK_KHR_external_memory_capabilities
-        PFN_vkGetPhysicalDeviceExternalBufferProperties GetPhysicalDeviceExternalBufferProperties =
-            nullptr;
+    // VK_KHR_external_memory_capabilities
+    PFN_vkGetPhysicalDeviceExternalBufferProperties GetPhysicalDeviceExternalBufferProperties =
+        nullptr;
 
-        // VK_KHR_external_semaphore_capabilities
-        PFN_vkGetPhysicalDeviceExternalSemaphoreProperties
-            GetPhysicalDeviceExternalSemaphoreProperties = nullptr;
+    // VK_KHR_external_semaphore_capabilities
+    PFN_vkGetPhysicalDeviceExternalSemaphoreProperties
+        GetPhysicalDeviceExternalSemaphoreProperties = nullptr;
 
-        // VK_KHR_get_physical_device_properties2
-        PFN_vkGetPhysicalDeviceFeatures2 GetPhysicalDeviceFeatures2 = nullptr;
-        PFN_vkGetPhysicalDeviceProperties2 GetPhysicalDeviceProperties2 = nullptr;
-        PFN_vkGetPhysicalDeviceFormatProperties2 GetPhysicalDeviceFormatProperties2 = nullptr;
-        PFN_vkGetPhysicalDeviceImageFormatProperties2 GetPhysicalDeviceImageFormatProperties2 =
-            nullptr;
-        PFN_vkGetPhysicalDeviceQueueFamilyProperties2 GetPhysicalDeviceQueueFamilyProperties2 =
-            nullptr;
-        PFN_vkGetPhysicalDeviceMemoryProperties2 GetPhysicalDeviceMemoryProperties2 = nullptr;
-        PFN_vkGetPhysicalDeviceSparseImageFormatProperties2
-            GetPhysicalDeviceSparseImageFormatProperties2 = nullptr;
+    // VK_KHR_get_physical_device_properties2
+    PFN_vkGetPhysicalDeviceFeatures2 GetPhysicalDeviceFeatures2 = nullptr;
+    PFN_vkGetPhysicalDeviceProperties2 GetPhysicalDeviceProperties2 = nullptr;
+    PFN_vkGetPhysicalDeviceFormatProperties2 GetPhysicalDeviceFormatProperties2 = nullptr;
+    PFN_vkGetPhysicalDeviceImageFormatProperties2 GetPhysicalDeviceImageFormatProperties2 = nullptr;
+    PFN_vkGetPhysicalDeviceQueueFamilyProperties2 GetPhysicalDeviceQueueFamilyProperties2 = nullptr;
+    PFN_vkGetPhysicalDeviceMemoryProperties2 GetPhysicalDeviceMemoryProperties2 = nullptr;
+    PFN_vkGetPhysicalDeviceSparseImageFormatProperties2
+        GetPhysicalDeviceSparseImageFormatProperties2 = nullptr;
 
 #if defined(VK_USE_PLATFORM_FUCHSIA)
-        // FUCHSIA_image_pipe_surface
-        PFN_vkCreateImagePipeSurfaceFUCHSIA CreateImagePipeSurfaceFUCHSIA = nullptr;
+    // FUCHSIA_image_pipe_surface
+    PFN_vkCreateImagePipeSurfaceFUCHSIA CreateImagePipeSurfaceFUCHSIA = nullptr;
 #endif  // defined(VK_USE_PLATFORM_FUCHSIA)
 
 #if defined(DAWN_ENABLE_BACKEND_METAL)
-        // EXT_metal_surface
-        PFN_vkCreateMetalSurfaceEXT CreateMetalSurfaceEXT = nullptr;
+    // EXT_metal_surface
+    PFN_vkCreateMetalSurfaceEXT CreateMetalSurfaceEXT = nullptr;
 #endif  // defined(DAWN_ENABLE_BACKEND_METAL)
 
 #if defined(DAWN_PLATFORM_WINDOWS)
-        // KHR_win32_surface
-        PFN_vkCreateWin32SurfaceKHR CreateWin32SurfaceKHR = nullptr;
-        PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR
-            GetPhysicalDeviceWin32PresentationSupportKHR = nullptr;
+    // KHR_win32_surface
+    PFN_vkCreateWin32SurfaceKHR CreateWin32SurfaceKHR = nullptr;
+    PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR
+        GetPhysicalDeviceWin32PresentationSupportKHR = nullptr;
 #endif  // defined(DAWN_PLATFORM_WINDOWS)
 
 #if defined(DAWN_PLATFORM_ANDROID)
-        PFN_vkCreateAndroidSurfaceKHR CreateAndroidSurfaceKHR = nullptr;
+    PFN_vkCreateAndroidSurfaceKHR CreateAndroidSurfaceKHR = nullptr;
 #endif  // defined(DAWN_PLATFORM_ANDROID)
 
 #if defined(DAWN_USE_X11)
-        // KHR_xlib_surface
-        PFN_vkCreateXlibSurfaceKHR CreateXlibSurfaceKHR = nullptr;
-        PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR
-            GetPhysicalDeviceXlibPresentationSupportKHR = nullptr;
+    // KHR_xlib_surface
+    PFN_vkCreateXlibSurfaceKHR CreateXlibSurfaceKHR = nullptr;
+    PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR GetPhysicalDeviceXlibPresentationSupportKHR =
+        nullptr;
 
-        // KHR_xcb_surface
-        PFN_vkCreateXcbSurfaceKHR CreateXcbSurfaceKHR = nullptr;
-        PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR
-            GetPhysicalDeviceXcbPresentationSupportKHR = nullptr;
+    // KHR_xcb_surface
+    PFN_vkCreateXcbSurfaceKHR CreateXcbSurfaceKHR = nullptr;
+    PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR GetPhysicalDeviceXcbPresentationSupportKHR =
+        nullptr;
 #endif  // defined(DAWN_USE_X11)
 
-        // ---------- Device procs
+    // ---------- Device procs
 
-        // Core Vulkan 1.0
-        PFN_vkAllocateCommandBuffers AllocateCommandBuffers = nullptr;
-        PFN_vkAllocateDescriptorSets AllocateDescriptorSets = nullptr;
-        PFN_vkAllocateMemory AllocateMemory = nullptr;
-        PFN_vkBeginCommandBuffer BeginCommandBuffer = nullptr;
-        PFN_vkBindBufferMemory BindBufferMemory = nullptr;
-        PFN_vkBindImageMemory BindImageMemory = nullptr;
-        PFN_vkCmdBeginQuery CmdBeginQuery = nullptr;
-        PFN_vkCmdBeginRenderPass CmdBeginRenderPass = nullptr;
-        PFN_vkCmdBindDescriptorSets CmdBindDescriptorSets = nullptr;
-        PFN_vkCmdBindIndexBuffer CmdBindIndexBuffer = nullptr;
-        PFN_vkCmdBindPipeline CmdBindPipeline = nullptr;
-        PFN_vkCmdBindVertexBuffers CmdBindVertexBuffers = nullptr;
-        PFN_vkCmdBlitImage CmdBlitImage = nullptr;
-        PFN_vkCmdClearAttachments CmdClearAttachments = nullptr;
-        PFN_vkCmdClearColorImage CmdClearColorImage = nullptr;
-        PFN_vkCmdClearDepthStencilImage CmdClearDepthStencilImage = nullptr;
-        PFN_vkCmdCopyBuffer CmdCopyBuffer = nullptr;
-        PFN_vkCmdCopyBufferToImage CmdCopyBufferToImage = nullptr;
-        PFN_vkCmdCopyImage CmdCopyImage = nullptr;
-        PFN_vkCmdCopyImageToBuffer CmdCopyImageToBuffer = nullptr;
-        PFN_vkCmdCopyQueryPoolResults CmdCopyQueryPoolResults = nullptr;
-        PFN_vkCmdDispatch CmdDispatch = nullptr;
-        PFN_vkCmdDispatchIndirect CmdDispatchIndirect = nullptr;
-        PFN_vkCmdDraw CmdDraw = nullptr;
-        PFN_vkCmdDrawIndexed CmdDrawIndexed = nullptr;
-        PFN_vkCmdDrawIndexedIndirect CmdDrawIndexedIndirect = nullptr;
-        PFN_vkCmdDrawIndirect CmdDrawIndirect = nullptr;
-        PFN_vkCmdEndQuery CmdEndQuery = nullptr;
-        PFN_vkCmdEndRenderPass CmdEndRenderPass = nullptr;
-        PFN_vkCmdExecuteCommands CmdExecuteCommands = nullptr;
-        PFN_vkCmdFillBuffer CmdFillBuffer = nullptr;
-        PFN_vkCmdNextSubpass CmdNextSubpass = nullptr;
-        PFN_vkCmdPipelineBarrier CmdPipelineBarrier = nullptr;
-        PFN_vkCmdPushConstants CmdPushConstants = nullptr;
-        PFN_vkCmdResetEvent CmdResetEvent = nullptr;
-        PFN_vkCmdResetQueryPool CmdResetQueryPool = nullptr;
-        PFN_vkCmdResolveImage CmdResolveImage = nullptr;
-        PFN_vkCmdSetBlendConstants CmdSetBlendConstants = nullptr;
-        PFN_vkCmdSetDepthBias CmdSetDepthBias = nullptr;
-        PFN_vkCmdSetDepthBounds CmdSetDepthBounds = nullptr;
-        PFN_vkCmdSetEvent CmdSetEvent = nullptr;
-        PFN_vkCmdSetLineWidth CmdSetLineWidth = nullptr;
-        PFN_vkCmdSetScissor CmdSetScissor = nullptr;
-        PFN_vkCmdSetStencilCompareMask CmdSetStencilCompareMask = nullptr;
-        PFN_vkCmdSetStencilReference CmdSetStencilReference = nullptr;
-        PFN_vkCmdSetStencilWriteMask CmdSetStencilWriteMask = nullptr;
-        PFN_vkCmdSetViewport CmdSetViewport = nullptr;
-        PFN_vkCmdUpdateBuffer CmdUpdateBuffer = nullptr;
-        PFN_vkCmdWaitEvents CmdWaitEvents = nullptr;
-        PFN_vkCmdWriteTimestamp CmdWriteTimestamp = nullptr;
-        PFN_vkCreateBuffer CreateBuffer = nullptr;
-        PFN_vkCreateBufferView CreateBufferView = nullptr;
-        PFN_vkCreateCommandPool CreateCommandPool = nullptr;
-        PFN_vkCreateComputePipelines CreateComputePipelines = nullptr;
-        PFN_vkCreateDescriptorPool CreateDescriptorPool = nullptr;
-        PFN_vkCreateDescriptorSetLayout CreateDescriptorSetLayout = nullptr;
-        PFN_vkCreateEvent CreateEvent = nullptr;
-        PFN_vkCreateFence CreateFence = nullptr;
-        PFN_vkCreateFramebuffer CreateFramebuffer = nullptr;
-        PFN_vkCreateGraphicsPipelines CreateGraphicsPipelines = nullptr;
-        PFN_vkCreateImage CreateImage = nullptr;
-        PFN_vkCreateImageView CreateImageView = nullptr;
-        PFN_vkCreatePipelineCache CreatePipelineCache = nullptr;
-        PFN_vkCreatePipelineLayout CreatePipelineLayout = nullptr;
-        PFN_vkCreateQueryPool CreateQueryPool = nullptr;
-        PFN_vkCreateRenderPass CreateRenderPass = nullptr;
-        PFN_vkCreateSampler CreateSampler = nullptr;
-        PFN_vkCreateSemaphore CreateSemaphore = nullptr;
-        PFN_vkCreateShaderModule CreateShaderModule = nullptr;
-        PFN_vkDestroyBuffer DestroyBuffer = nullptr;
-        PFN_vkDestroyBufferView DestroyBufferView = nullptr;
-        PFN_vkDestroyCommandPool DestroyCommandPool = nullptr;
-        PFN_vkDestroyDescriptorPool DestroyDescriptorPool = nullptr;
-        PFN_vkDestroyDescriptorSetLayout DestroyDescriptorSetLayout = nullptr;
-        PFN_vkDestroyEvent DestroyEvent = nullptr;
-        PFN_vkDestroyFence DestroyFence = nullptr;
-        PFN_vkDestroyFramebuffer DestroyFramebuffer = nullptr;
-        PFN_vkDestroyImage DestroyImage = nullptr;
-        PFN_vkDestroyImageView DestroyImageView = nullptr;
-        PFN_vkDestroyPipeline DestroyPipeline = nullptr;
-        PFN_vkDestroyPipelineCache DestroyPipelineCache = nullptr;
-        PFN_vkDestroyPipelineLayout DestroyPipelineLayout = nullptr;
-        PFN_vkDestroyQueryPool DestroyQueryPool = nullptr;
-        PFN_vkDestroyRenderPass DestroyRenderPass = nullptr;
-        PFN_vkDestroySampler DestroySampler = nullptr;
-        PFN_vkDestroySemaphore DestroySemaphore = nullptr;
-        PFN_vkDestroyShaderModule DestroyShaderModule = nullptr;
-        PFN_vkDeviceWaitIdle DeviceWaitIdle = nullptr;
-        PFN_vkEndCommandBuffer EndCommandBuffer = nullptr;
-        PFN_vkFlushMappedMemoryRanges FlushMappedMemoryRanges = nullptr;
-        PFN_vkFreeCommandBuffers FreeCommandBuffers = nullptr;
-        PFN_vkFreeDescriptorSets FreeDescriptorSets = nullptr;
-        PFN_vkFreeMemory FreeMemory = nullptr;
-        PFN_vkGetBufferMemoryRequirements GetBufferMemoryRequirements = nullptr;
-        PFN_vkGetDeviceMemoryCommitment GetDeviceMemoryCommitment = nullptr;
-        PFN_vkGetDeviceQueue GetDeviceQueue = nullptr;
-        PFN_vkGetEventStatus GetEventStatus = nullptr;
-        PFN_vkGetFenceStatus GetFenceStatus = nullptr;
-        PFN_vkGetImageMemoryRequirements GetImageMemoryRequirements = nullptr;
-        PFN_vkGetImageSparseMemoryRequirements GetImageSparseMemoryRequirements = nullptr;
-        PFN_vkGetImageSubresourceLayout GetImageSubresourceLayout = nullptr;
-        PFN_vkGetPipelineCacheData GetPipelineCacheData = nullptr;
-        PFN_vkGetQueryPoolResults GetQueryPoolResults = nullptr;
-        PFN_vkGetRenderAreaGranularity GetRenderAreaGranularity = nullptr;
-        PFN_vkInvalidateMappedMemoryRanges InvalidateMappedMemoryRanges = nullptr;
-        PFN_vkMapMemory MapMemory = nullptr;
-        PFN_vkMergePipelineCaches MergePipelineCaches = nullptr;
-        PFN_vkQueueBindSparse QueueBindSparse = nullptr;
-        PFN_vkQueueSubmit QueueSubmit = nullptr;
-        PFN_vkQueueWaitIdle QueueWaitIdle = nullptr;
-        PFN_vkResetCommandBuffer ResetCommandBuffer = nullptr;
-        PFN_vkResetCommandPool ResetCommandPool = nullptr;
-        PFN_vkResetDescriptorPool ResetDescriptorPool = nullptr;
-        PFN_vkResetEvent ResetEvent = nullptr;
-        PFN_vkResetFences ResetFences = nullptr;
-        PFN_vkSetEvent SetEvent = nullptr;
-        PFN_vkUnmapMemory UnmapMemory = nullptr;
-        PFN_vkUpdateDescriptorSets UpdateDescriptorSets = nullptr;
-        PFN_vkWaitForFences WaitForFences = nullptr;
+    // Core Vulkan 1.0
+    PFN_vkAllocateCommandBuffers AllocateCommandBuffers = nullptr;
+    PFN_vkAllocateDescriptorSets AllocateDescriptorSets = nullptr;
+    PFN_vkAllocateMemory AllocateMemory = nullptr;
+    PFN_vkBeginCommandBuffer BeginCommandBuffer = nullptr;
+    PFN_vkBindBufferMemory BindBufferMemory = nullptr;
+    PFN_vkBindImageMemory BindImageMemory = nullptr;
+    PFN_vkCmdBeginQuery CmdBeginQuery = nullptr;
+    PFN_vkCmdBeginRenderPass CmdBeginRenderPass = nullptr;
+    PFN_vkCmdBindDescriptorSets CmdBindDescriptorSets = nullptr;
+    PFN_vkCmdBindIndexBuffer CmdBindIndexBuffer = nullptr;
+    PFN_vkCmdBindPipeline CmdBindPipeline = nullptr;
+    PFN_vkCmdBindVertexBuffers CmdBindVertexBuffers = nullptr;
+    PFN_vkCmdBlitImage CmdBlitImage = nullptr;
+    PFN_vkCmdClearAttachments CmdClearAttachments = nullptr;
+    PFN_vkCmdClearColorImage CmdClearColorImage = nullptr;
+    PFN_vkCmdClearDepthStencilImage CmdClearDepthStencilImage = nullptr;
+    PFN_vkCmdCopyBuffer CmdCopyBuffer = nullptr;
+    PFN_vkCmdCopyBufferToImage CmdCopyBufferToImage = nullptr;
+    PFN_vkCmdCopyImage CmdCopyImage = nullptr;
+    PFN_vkCmdCopyImageToBuffer CmdCopyImageToBuffer = nullptr;
+    PFN_vkCmdCopyQueryPoolResults CmdCopyQueryPoolResults = nullptr;
+    PFN_vkCmdDispatch CmdDispatch = nullptr;
+    PFN_vkCmdDispatchIndirect CmdDispatchIndirect = nullptr;
+    PFN_vkCmdDraw CmdDraw = nullptr;
+    PFN_vkCmdDrawIndexed CmdDrawIndexed = nullptr;
+    PFN_vkCmdDrawIndexedIndirect CmdDrawIndexedIndirect = nullptr;
+    PFN_vkCmdDrawIndirect CmdDrawIndirect = nullptr;
+    PFN_vkCmdEndQuery CmdEndQuery = nullptr;
+    PFN_vkCmdEndRenderPass CmdEndRenderPass = nullptr;
+    PFN_vkCmdExecuteCommands CmdExecuteCommands = nullptr;
+    PFN_vkCmdFillBuffer CmdFillBuffer = nullptr;
+    PFN_vkCmdNextSubpass CmdNextSubpass = nullptr;
+    PFN_vkCmdPipelineBarrier CmdPipelineBarrier = nullptr;
+    PFN_vkCmdPushConstants CmdPushConstants = nullptr;
+    PFN_vkCmdResetEvent CmdResetEvent = nullptr;
+    PFN_vkCmdResetQueryPool CmdResetQueryPool = nullptr;
+    PFN_vkCmdResolveImage CmdResolveImage = nullptr;
+    PFN_vkCmdSetBlendConstants CmdSetBlendConstants = nullptr;
+    PFN_vkCmdSetDepthBias CmdSetDepthBias = nullptr;
+    PFN_vkCmdSetDepthBounds CmdSetDepthBounds = nullptr;
+    PFN_vkCmdSetEvent CmdSetEvent = nullptr;
+    PFN_vkCmdSetLineWidth CmdSetLineWidth = nullptr;
+    PFN_vkCmdSetScissor CmdSetScissor = nullptr;
+    PFN_vkCmdSetStencilCompareMask CmdSetStencilCompareMask = nullptr;
+    PFN_vkCmdSetStencilReference CmdSetStencilReference = nullptr;
+    PFN_vkCmdSetStencilWriteMask CmdSetStencilWriteMask = nullptr;
+    PFN_vkCmdSetViewport CmdSetViewport = nullptr;
+    PFN_vkCmdUpdateBuffer CmdUpdateBuffer = nullptr;
+    PFN_vkCmdWaitEvents CmdWaitEvents = nullptr;
+    PFN_vkCmdWriteTimestamp CmdWriteTimestamp = nullptr;
+    PFN_vkCreateBuffer CreateBuffer = nullptr;
+    PFN_vkCreateBufferView CreateBufferView = nullptr;
+    PFN_vkCreateCommandPool CreateCommandPool = nullptr;
+    PFN_vkCreateComputePipelines CreateComputePipelines = nullptr;
+    PFN_vkCreateDescriptorPool CreateDescriptorPool = nullptr;
+    PFN_vkCreateDescriptorSetLayout CreateDescriptorSetLayout = nullptr;
+    PFN_vkCreateEvent CreateEvent = nullptr;
+    PFN_vkCreateFence CreateFence = nullptr;
+    PFN_vkCreateFramebuffer CreateFramebuffer = nullptr;
+    PFN_vkCreateGraphicsPipelines CreateGraphicsPipelines = nullptr;
+    PFN_vkCreateImage CreateImage = nullptr;
+    PFN_vkCreateImageView CreateImageView = nullptr;
+    PFN_vkCreatePipelineCache CreatePipelineCache = nullptr;
+    PFN_vkCreatePipelineLayout CreatePipelineLayout = nullptr;
+    PFN_vkCreateQueryPool CreateQueryPool = nullptr;
+    PFN_vkCreateRenderPass CreateRenderPass = nullptr;
+    PFN_vkCreateSampler CreateSampler = nullptr;
+    PFN_vkCreateSemaphore CreateSemaphore = nullptr;
+    PFN_vkCreateShaderModule CreateShaderModule = nullptr;
+    PFN_vkDestroyBuffer DestroyBuffer = nullptr;
+    PFN_vkDestroyBufferView DestroyBufferView = nullptr;
+    PFN_vkDestroyCommandPool DestroyCommandPool = nullptr;
+    PFN_vkDestroyDescriptorPool DestroyDescriptorPool = nullptr;
+    PFN_vkDestroyDescriptorSetLayout DestroyDescriptorSetLayout = nullptr;
+    PFN_vkDestroyEvent DestroyEvent = nullptr;
+    PFN_vkDestroyFence DestroyFence = nullptr;
+    PFN_vkDestroyFramebuffer DestroyFramebuffer = nullptr;
+    PFN_vkDestroyImage DestroyImage = nullptr;
+    PFN_vkDestroyImageView DestroyImageView = nullptr;
+    PFN_vkDestroyPipeline DestroyPipeline = nullptr;
+    PFN_vkDestroyPipelineCache DestroyPipelineCache = nullptr;
+    PFN_vkDestroyPipelineLayout DestroyPipelineLayout = nullptr;
+    PFN_vkDestroyQueryPool DestroyQueryPool = nullptr;
+    PFN_vkDestroyRenderPass DestroyRenderPass = nullptr;
+    PFN_vkDestroySampler DestroySampler = nullptr;
+    PFN_vkDestroySemaphore DestroySemaphore = nullptr;
+    PFN_vkDestroyShaderModule DestroyShaderModule = nullptr;
+    PFN_vkDeviceWaitIdle DeviceWaitIdle = nullptr;
+    PFN_vkEndCommandBuffer EndCommandBuffer = nullptr;
+    PFN_vkFlushMappedMemoryRanges FlushMappedMemoryRanges = nullptr;
+    PFN_vkFreeCommandBuffers FreeCommandBuffers = nullptr;
+    PFN_vkFreeDescriptorSets FreeDescriptorSets = nullptr;
+    PFN_vkFreeMemory FreeMemory = nullptr;
+    PFN_vkGetBufferMemoryRequirements GetBufferMemoryRequirements = nullptr;
+    PFN_vkGetDeviceMemoryCommitment GetDeviceMemoryCommitment = nullptr;
+    PFN_vkGetDeviceQueue GetDeviceQueue = nullptr;
+    PFN_vkGetEventStatus GetEventStatus = nullptr;
+    PFN_vkGetFenceStatus GetFenceStatus = nullptr;
+    PFN_vkGetImageMemoryRequirements GetImageMemoryRequirements = nullptr;
+    PFN_vkGetImageSparseMemoryRequirements GetImageSparseMemoryRequirements = nullptr;
+    PFN_vkGetImageSubresourceLayout GetImageSubresourceLayout = nullptr;
+    PFN_vkGetPipelineCacheData GetPipelineCacheData = nullptr;
+    PFN_vkGetQueryPoolResults GetQueryPoolResults = nullptr;
+    PFN_vkGetRenderAreaGranularity GetRenderAreaGranularity = nullptr;
+    PFN_vkInvalidateMappedMemoryRanges InvalidateMappedMemoryRanges = nullptr;
+    PFN_vkMapMemory MapMemory = nullptr;
+    PFN_vkMergePipelineCaches MergePipelineCaches = nullptr;
+    PFN_vkQueueBindSparse QueueBindSparse = nullptr;
+    PFN_vkQueueSubmit QueueSubmit = nullptr;
+    PFN_vkQueueWaitIdle QueueWaitIdle = nullptr;
+    PFN_vkResetCommandBuffer ResetCommandBuffer = nullptr;
+    PFN_vkResetCommandPool ResetCommandPool = nullptr;
+    PFN_vkResetDescriptorPool ResetDescriptorPool = nullptr;
+    PFN_vkResetEvent ResetEvent = nullptr;
+    PFN_vkResetFences ResetFences = nullptr;
+    PFN_vkSetEvent SetEvent = nullptr;
+    PFN_vkUnmapMemory UnmapMemory = nullptr;
+    PFN_vkUpdateDescriptorSets UpdateDescriptorSets = nullptr;
+    PFN_vkWaitForFences WaitForFences = nullptr;
 
-        // VK_KHR_external_memory_fd
-        PFN_vkGetMemoryFdKHR GetMemoryFdKHR = nullptr;
-        PFN_vkGetMemoryFdPropertiesKHR GetMemoryFdPropertiesKHR = nullptr;
+    // VK_KHR_external_memory_fd
+    PFN_vkGetMemoryFdKHR GetMemoryFdKHR = nullptr;
+    PFN_vkGetMemoryFdPropertiesKHR GetMemoryFdPropertiesKHR = nullptr;
 
-        // VK_KHR_external_semaphore_fd
-        PFN_vkImportSemaphoreFdKHR ImportSemaphoreFdKHR = nullptr;
-        PFN_vkGetSemaphoreFdKHR GetSemaphoreFdKHR = nullptr;
+    // VK_KHR_external_semaphore_fd
+    PFN_vkImportSemaphoreFdKHR ImportSemaphoreFdKHR = nullptr;
+    PFN_vkGetSemaphoreFdKHR GetSemaphoreFdKHR = nullptr;
 
-        // VK_KHR_get_memory_requirements2
-        PFN_vkGetBufferMemoryRequirements2KHR GetBufferMemoryRequirements2 = nullptr;
-        PFN_vkGetImageMemoryRequirements2KHR GetImageMemoryRequirements2 = nullptr;
-        PFN_vkGetImageSparseMemoryRequirements2KHR GetImageSparseMemoryRequirements2 = nullptr;
+    // VK_KHR_get_memory_requirements2
+    PFN_vkGetBufferMemoryRequirements2KHR GetBufferMemoryRequirements2 = nullptr;
+    PFN_vkGetImageMemoryRequirements2KHR GetImageMemoryRequirements2 = nullptr;
+    PFN_vkGetImageSparseMemoryRequirements2KHR GetImageSparseMemoryRequirements2 = nullptr;
 
-        // VK_KHR_swapchain
-        PFN_vkCreateSwapchainKHR CreateSwapchainKHR = nullptr;
-        PFN_vkDestroySwapchainKHR DestroySwapchainKHR = nullptr;
-        PFN_vkGetSwapchainImagesKHR GetSwapchainImagesKHR = nullptr;
-        PFN_vkAcquireNextImageKHR AcquireNextImageKHR = nullptr;
-        PFN_vkQueuePresentKHR QueuePresentKHR = nullptr;
+    // VK_KHR_swapchain
+    PFN_vkCreateSwapchainKHR CreateSwapchainKHR = nullptr;
+    PFN_vkDestroySwapchainKHR DestroySwapchainKHR = nullptr;
+    PFN_vkGetSwapchainImagesKHR GetSwapchainImagesKHR = nullptr;
+    PFN_vkAcquireNextImageKHR AcquireNextImageKHR = nullptr;
+    PFN_vkQueuePresentKHR QueuePresentKHR = nullptr;
 
 #if VK_USE_PLATFORM_FUCHSIA
-        // VK_FUCHSIA_external_memory
-        PFN_vkGetMemoryZirconHandleFUCHSIA GetMemoryZirconHandleFUCHSIA = nullptr;
-        PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA GetMemoryZirconHandlePropertiesFUCHSIA =
-            nullptr;
+    // VK_FUCHSIA_external_memory
+    PFN_vkGetMemoryZirconHandleFUCHSIA GetMemoryZirconHandleFUCHSIA = nullptr;
+    PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA GetMemoryZirconHandlePropertiesFUCHSIA = nullptr;
 
-        // VK_FUCHSIA_external_semaphore
-        PFN_vkImportSemaphoreZirconHandleFUCHSIA ImportSemaphoreZirconHandleFUCHSIA = nullptr;
-        PFN_vkGetSemaphoreZirconHandleFUCHSIA GetSemaphoreZirconHandleFUCHSIA = nullptr;
+    // VK_FUCHSIA_external_semaphore
+    PFN_vkImportSemaphoreZirconHandleFUCHSIA ImportSemaphoreZirconHandleFUCHSIA = nullptr;
+    PFN_vkGetSemaphoreZirconHandleFUCHSIA GetSemaphoreZirconHandleFUCHSIA = nullptr;
 #endif
-    };
+};
 
-    // Create a wrapper around VkResult in the dawn::native::vulkan namespace. This shadows the
-    // default VkResult (::VkResult). This ensures that assigning or creating a VkResult from a raw
-    // ::VkResult uses WrapUnsafe. This makes it clear that users of VkResult must be intentional
-    // about handling error cases.
-    class VkResult {
-      public:
-        constexpr static VkResult WrapUnsafe(::VkResult value) {
-            return VkResult(value);
-        }
+// Create a wrapper around VkResult in the dawn::native::vulkan namespace. This shadows the
+// default VkResult (::VkResult). This ensures that assigning or creating a VkResult from a raw
+// ::VkResult uses WrapUnsafe. This makes it clear that users of VkResult must be intentional
+// about handling error cases.
+class VkResult {
+  public:
+    constexpr static VkResult WrapUnsafe(::VkResult value) { return VkResult(value); }
 
-        constexpr operator ::VkResult() const {
-            return mValue;
-        }
+    constexpr operator ::VkResult() const { return mValue; }
 
-      private:
-        // Private. Use VkResult::WrapUnsafe instead.
-        explicit constexpr VkResult(::VkResult value) : mValue(value) {
-        }
+  private:
+    // Private. Use VkResult::WrapUnsafe instead.
+    explicit constexpr VkResult(::VkResult value) : mValue(value) {}
 
-        ::VkResult mValue;
-    };
+    ::VkResult mValue;
+};
 
 }  // namespace dawn::native::vulkan
 
diff --git a/src/dawn/native/vulkan/VulkanInfo.cpp b/src/dawn/native/vulkan/VulkanInfo.cpp
index 9b33fb2..ebcf3ea 100644
--- a/src/dawn/native/vulkan/VulkanInfo.cpp
+++ b/src/dawn/native/vulkan/VulkanInfo.cpp
@@ -26,312 +26,305 @@
 
 namespace dawn::native::vulkan {
 
-    namespace {
-        ResultOrError<InstanceExtSet> GatherInstanceExtensions(
-            const char* layerName,
-            const dawn::native::vulkan::VulkanFunctions& vkFunctions,
-            const std::unordered_map<std::string, InstanceExt>& knownExts) {
-            uint32_t count = 0;
-            VkResult vkResult = VkResult::WrapUnsafe(
-                vkFunctions.EnumerateInstanceExtensionProperties(layerName, &count, nullptr));
-            if (vkResult != VK_SUCCESS && vkResult != VK_INCOMPLETE) {
-                return DAWN_INTERNAL_ERROR("vkEnumerateInstanceExtensionProperties");
-            }
-
-            std::vector<VkExtensionProperties> extensions(count);
-            DAWN_TRY(CheckVkSuccess(vkFunctions.EnumerateInstanceExtensionProperties(
-                                        layerName, &count, extensions.data()),
-                                    "vkEnumerateInstanceExtensionProperties"));
-
-            InstanceExtSet result;
-            for (const VkExtensionProperties& extension : extensions) {
-                auto it = knownExts.find(extension.extensionName);
-                if (it != knownExts.end()) {
-                    result.set(it->second, true);
-                }
-            }
-
-            return result;
-        }
-
-    }  // namespace
-
-    bool VulkanGlobalKnobs::HasExt(InstanceExt ext) const {
-        return extensions[ext];
+namespace {
+ResultOrError<InstanceExtSet> GatherInstanceExtensions(
+    const char* layerName,
+    const dawn::native::vulkan::VulkanFunctions& vkFunctions,
+    const std::unordered_map<std::string, InstanceExt>& knownExts) {
+    uint32_t count = 0;
+    VkResult vkResult = VkResult::WrapUnsafe(
+        vkFunctions.EnumerateInstanceExtensionProperties(layerName, &count, nullptr));
+    if (vkResult != VK_SUCCESS && vkResult != VK_INCOMPLETE) {
+        return DAWN_INTERNAL_ERROR("vkEnumerateInstanceExtensionProperties");
     }
 
-    bool VulkanDeviceKnobs::HasExt(DeviceExt ext) const {
-        return extensions[ext];
+    std::vector<VkExtensionProperties> extensions(count);
+    DAWN_TRY(CheckVkSuccess(
+        vkFunctions.EnumerateInstanceExtensionProperties(layerName, &count, extensions.data()),
+        "vkEnumerateInstanceExtensionProperties"));
+
+    InstanceExtSet result;
+    for (const VkExtensionProperties& extension : extensions) {
+        auto it = knownExts.find(extension.extensionName);
+        if (it != knownExts.end()) {
+            result.set(it->second, true);
+        }
     }
 
-    ResultOrError<VulkanGlobalInfo> GatherGlobalInfo(const VulkanFunctions& vkFunctions) {
-        VulkanGlobalInfo info = {};
-        // Gather info on available API version
-        {
-            info.apiVersion = VK_MAKE_VERSION(1, 0, 0);
-            if (vkFunctions.EnumerateInstanceVersion != nullptr) {
-                DAWN_TRY(CheckVkSuccess(vkFunctions.EnumerateInstanceVersion(&info.apiVersion),
-                                        "vkEnumerateInstanceVersion"));
-            }
+    return result;
+}
+
+}  // namespace
+
+bool VulkanGlobalKnobs::HasExt(InstanceExt ext) const {
+    return extensions[ext];
+}
+
+bool VulkanDeviceKnobs::HasExt(DeviceExt ext) const {
+    return extensions[ext];
+}
+
+ResultOrError<VulkanGlobalInfo> GatherGlobalInfo(const VulkanFunctions& vkFunctions) {
+    VulkanGlobalInfo info = {};
+    // Gather info on available API version
+    {
+        info.apiVersion = VK_MAKE_VERSION(1, 0, 0);
+        if (vkFunctions.EnumerateInstanceVersion != nullptr) {
+            DAWN_TRY(CheckVkSuccess(vkFunctions.EnumerateInstanceVersion(&info.apiVersion),
+                                    "vkEnumerateInstanceVersion"));
         }
-
-        // Gather the info about the instance layers
-        {
-            uint32_t count = 0;
-            VkResult result =
-                VkResult::WrapUnsafe(vkFunctions.EnumerateInstanceLayerProperties(&count, nullptr));
-            // From the Vulkan spec result should be success if there are 0 layers,
-            // incomplete otherwise. This means that both values represent a success.
-            // This is the same for all Enumarte functions
-            if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
-                return DAWN_INTERNAL_ERROR("vkEnumerateInstanceLayerProperties");
-            }
-
-            std::vector<VkLayerProperties> layersProperties(count);
-            DAWN_TRY(CheckVkSuccess(
-                vkFunctions.EnumerateInstanceLayerProperties(&count, layersProperties.data()),
-                "vkEnumerateInstanceLayerProperties"));
-
-            std::unordered_map<std::string, VulkanLayer> knownLayers = CreateVulkanLayerNameMap();
-            for (const VkLayerProperties& layer : layersProperties) {
-                auto it = knownLayers.find(layer.layerName);
-                if (it != knownLayers.end()) {
-                    info.layers.set(it->second, true);
-                }
-            }
-        }
-
-        // Gather the info about the instance extensions
-        {
-            std::unordered_map<std::string, InstanceExt> knownExts = CreateInstanceExtNameMap();
-
-            DAWN_TRY_ASSIGN(info.extensions,
-                            GatherInstanceExtensions(nullptr, vkFunctions, knownExts));
-            MarkPromotedExtensions(&info.extensions, info.apiVersion);
-            info.extensions = EnsureDependencies(info.extensions);
-
-            for (VulkanLayer layer : IterateBitSet(info.layers)) {
-                DAWN_TRY_ASSIGN(info.layerExtensions[layer],
-                                GatherInstanceExtensions(GetVulkanLayerInfo(layer).name,
-                                                         vkFunctions, knownExts));
-                MarkPromotedExtensions(&info.layerExtensions[layer], info.apiVersion);
-                info.layerExtensions[layer] = EnsureDependencies(info.layerExtensions[layer]);
-            }
-        }
-
-        return std::move(info);
     }
 
-    ResultOrError<std::vector<VkPhysicalDevice>> GatherPhysicalDevices(
-        VkInstance instance,
-        const VulkanFunctions& vkFunctions) {
+    // Gather the info about the instance layers
+    {
         uint32_t count = 0;
         VkResult result =
-            VkResult::WrapUnsafe(vkFunctions.EnumeratePhysicalDevices(instance, &count, nullptr));
+            VkResult::WrapUnsafe(vkFunctions.EnumerateInstanceLayerProperties(&count, nullptr));
+        // From the Vulkan spec result should be success if there are 0 layers,
+        // incomplete otherwise. This means that both values represent a success.
+        // This is the same for all Enumarte functions
         if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
-            return DAWN_INTERNAL_ERROR("vkEnumeratePhysicalDevices");
+            return DAWN_INTERNAL_ERROR("vkEnumerateInstanceLayerProperties");
         }
 
-        std::vector<VkPhysicalDevice> physicalDevices(count);
+        std::vector<VkLayerProperties> layersProperties(count);
         DAWN_TRY(CheckVkSuccess(
-            vkFunctions.EnumeratePhysicalDevices(instance, &count, physicalDevices.data()),
-            "vkEnumeratePhysicalDevices"));
+            vkFunctions.EnumerateInstanceLayerProperties(&count, layersProperties.data()),
+            "vkEnumerateInstanceLayerProperties"));
 
-        return std::move(physicalDevices);
+        std::unordered_map<std::string, VulkanLayer> knownLayers = CreateVulkanLayerNameMap();
+        for (const VkLayerProperties& layer : layersProperties) {
+            auto it = knownLayers.find(layer.layerName);
+            if (it != knownLayers.end()) {
+                info.layers.set(it->second, true);
+            }
+        }
     }
 
-    ResultOrError<VulkanDeviceInfo> GatherDeviceInfo(const Adapter& adapter) {
-        VulkanDeviceInfo info = {};
-        VkPhysicalDevice physicalDevice = adapter.GetPhysicalDevice();
-        const VulkanGlobalInfo& globalInfo = adapter.GetVulkanInstance()->GetGlobalInfo();
-        const VulkanFunctions& vkFunctions = adapter.GetVulkanInstance()->GetFunctions();
+    // Gather the info about the instance extensions
+    {
+        std::unordered_map<std::string, InstanceExt> knownExts = CreateInstanceExtNameMap();
 
-        // Query the device properties first to get the ICD's `apiVersion`
-        vkFunctions.GetPhysicalDeviceProperties(physicalDevice, &info.properties);
+        DAWN_TRY_ASSIGN(info.extensions, GatherInstanceExtensions(nullptr, vkFunctions, knownExts));
+        MarkPromotedExtensions(&info.extensions, info.apiVersion);
+        info.extensions = EnsureDependencies(info.extensions);
 
-        // Gather info about device memory.
-        {
-            VkPhysicalDeviceMemoryProperties memory;
-            vkFunctions.GetPhysicalDeviceMemoryProperties(physicalDevice, &memory);
-
-            info.memoryTypes.assign(memory.memoryTypes,
-                                    memory.memoryTypes + memory.memoryTypeCount);
-            info.memoryHeaps.assign(memory.memoryHeaps,
-                                    memory.memoryHeaps + memory.memoryHeapCount);
+        for (VulkanLayer layer : IterateBitSet(info.layers)) {
+            DAWN_TRY_ASSIGN(
+                info.layerExtensions[layer],
+                GatherInstanceExtensions(GetVulkanLayerInfo(layer).name, vkFunctions, knownExts));
+            MarkPromotedExtensions(&info.layerExtensions[layer], info.apiVersion);
+            info.layerExtensions[layer] = EnsureDependencies(info.layerExtensions[layer]);
         }
-
-        // Gather info about device queue families
-        {
-            uint32_t count = 0;
-            vkFunctions.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, &count, nullptr);
-
-            info.queueFamilies.resize(count);
-            vkFunctions.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, &count,
-                                                               info.queueFamilies.data());
-        }
-
-        // Gather the info about the device layers
-        {
-            uint32_t count = 0;
-            VkResult result = VkResult::WrapUnsafe(
-                vkFunctions.EnumerateDeviceLayerProperties(physicalDevice, &count, nullptr));
-            if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
-                return DAWN_INTERNAL_ERROR("vkEnumerateDeviceLayerProperties");
-            }
-
-            info.layers.resize(count);
-            DAWN_TRY(CheckVkSuccess(vkFunctions.EnumerateDeviceLayerProperties(
-                                        physicalDevice, &count, info.layers.data()),
-                                    "vkEnumerateDeviceLayerProperties"));
-        }
-
-        // Gather the info about the device extensions
-        {
-            uint32_t count = 0;
-            VkResult result = VkResult::WrapUnsafe(vkFunctions.EnumerateDeviceExtensionProperties(
-                physicalDevice, nullptr, &count, nullptr));
-            if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
-                return DAWN_INTERNAL_ERROR("vkEnumerateDeviceExtensionProperties");
-            }
-
-            std::vector<VkExtensionProperties> extensionsProperties;
-            extensionsProperties.resize(count);
-            DAWN_TRY(
-                CheckVkSuccess(vkFunctions.EnumerateDeviceExtensionProperties(
-                                   physicalDevice, nullptr, &count, extensionsProperties.data()),
-                               "vkEnumerateDeviceExtensionProperties"));
-
-            std::unordered_map<std::string, DeviceExt> knownExts = CreateDeviceExtNameMap();
-
-            for (const VkExtensionProperties& extension : extensionsProperties) {
-                auto it = knownExts.find(extension.extensionName);
-                if (it != knownExts.end()) {
-                    info.extensions.set(it->second, true);
-                }
-            }
-
-            MarkPromotedExtensions(&info.extensions, info.properties.apiVersion);
-            info.extensions = EnsureDependencies(info.extensions, globalInfo.extensions,
-                                                 info.properties.apiVersion);
-        }
-
-        // Gather general and extension features and properties
-        //
-        // Use vkGetPhysicalDevice{Features,Properties}2 if required to gather information about
-        // the extensions. DeviceExt::GetPhysicalDeviceProperties2 is guaranteed to be available
-        // because these extensions (transitively) depend on it in `EnsureDependencies`
-        VkPhysicalDeviceFeatures2 features2 = {};
-        features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
-        features2.pNext = nullptr;
-        PNextChainBuilder featuresChain(&features2);
-
-        VkPhysicalDeviceProperties2 properties2 = {};
-        properties2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
-        features2.pNext = nullptr;
-        PNextChainBuilder propertiesChain(&properties2);
-
-        if (info.extensions[DeviceExt::ShaderFloat16Int8]) {
-            featuresChain.Add(&info.shaderFloat16Int8Features,
-                              VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR);
-        }
-
-        if (info.extensions[DeviceExt::_16BitStorage]) {
-            featuresChain.Add(&info._16BitStorageFeatures,
-                              VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES);
-        }
-
-        if (info.extensions[DeviceExt::SubgroupSizeControl]) {
-            featuresChain.Add(&info.subgroupSizeControlFeatures,
-                              VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT);
-            propertiesChain.Add(
-                &info.subgroupSizeControlProperties,
-                VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT);
-        }
-
-        if (info.extensions[DeviceExt::DriverProperties]) {
-            propertiesChain.Add(&info.driverProperties,
-                                VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES);
-        }
-
-        // If we have DeviceExt::GetPhysicalDeviceProperties2, use features2 and properties2 so
-        // that features no covered by VkPhysicalDevice{Features,Properties} can be queried.
-        //
-        // Note that info.properties has already been filled at the start of this function to get
-        // `apiVersion`.
-        ASSERT(info.properties.apiVersion != 0);
-        if (info.extensions[DeviceExt::GetPhysicalDeviceProperties2]) {
-            vkFunctions.GetPhysicalDeviceProperties2(physicalDevice, &properties2);
-            vkFunctions.GetPhysicalDeviceFeatures2(physicalDevice, &features2);
-            info.features = features2.features;
-        } else {
-            ASSERT(features2.pNext == nullptr && properties2.pNext == nullptr);
-            vkFunctions.GetPhysicalDeviceFeatures(physicalDevice, &info.features);
-        }
-
-        // TODO(cwallez@chromium.org): gather info about formats
-
-        return std::move(info);
     }
 
-    ResultOrError<VulkanSurfaceInfo> GatherSurfaceInfo(const Adapter& adapter,
-                                                       VkSurfaceKHR surface) {
-        VulkanSurfaceInfo info = {};
+    return std::move(info);
+}
 
-        VkPhysicalDevice physicalDevice = adapter.GetPhysicalDevice();
-        const VulkanFunctions& vkFunctions = adapter.GetVulkanInstance()->GetFunctions();
-
-        // Get the surface capabilities
-        DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfaceCapabilitiesKHR(
-                                    physicalDevice, surface, &info.capabilities),
-                                "vkGetPhysicalDeviceSurfaceCapabilitiesKHR"));
-
-        // Query which queue families support presenting this surface
-        {
-            size_t nQueueFamilies = adapter.GetDeviceInfo().queueFamilies.size();
-            info.supportedQueueFamilies.resize(nQueueFamilies, false);
-
-            for (uint32_t i = 0; i < nQueueFamilies; ++i) {
-                VkBool32 supported = VK_FALSE;
-                DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfaceSupportKHR(
-                                            physicalDevice, i, surface, &supported),
-                                        "vkGetPhysicalDeviceSurfaceSupportKHR"));
-
-                info.supportedQueueFamilies[i] = (supported == VK_TRUE);
-            }
-        }
-
-        // Gather supported formats
-        {
-            uint32_t count = 0;
-            VkResult result = VkResult::WrapUnsafe(vkFunctions.GetPhysicalDeviceSurfaceFormatsKHR(
-                physicalDevice, surface, &count, nullptr));
-            if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
-                return DAWN_INTERNAL_ERROR("vkGetPhysicalDeviceSurfaceFormatsKHR");
-            }
-
-            info.formats.resize(count);
-            DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfaceFormatsKHR(
-                                        physicalDevice, surface, &count, info.formats.data()),
-                                    "vkGetPhysicalDeviceSurfaceFormatsKHR"));
-        }
-
-        // Gather supported presents modes
-        {
-            uint32_t count = 0;
-            VkResult result =
-                VkResult::WrapUnsafe(vkFunctions.GetPhysicalDeviceSurfacePresentModesKHR(
-                    physicalDevice, surface, &count, nullptr));
-            if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
-                return DAWN_INTERNAL_ERROR("vkGetPhysicalDeviceSurfacePresentModesKHR");
-            }
-
-            info.presentModes.resize(count);
-            DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfacePresentModesKHR(
-                                        physicalDevice, surface, &count, info.presentModes.data()),
-                                    "vkGetPhysicalDeviceSurfacePresentModesKHR"));
-        }
-
-        return std::move(info);
+ResultOrError<std::vector<VkPhysicalDevice>> GatherPhysicalDevices(
+    VkInstance instance,
+    const VulkanFunctions& vkFunctions) {
+    uint32_t count = 0;
+    VkResult result =
+        VkResult::WrapUnsafe(vkFunctions.EnumeratePhysicalDevices(instance, &count, nullptr));
+    if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
+        return DAWN_INTERNAL_ERROR("vkEnumeratePhysicalDevices");
     }
 
+    std::vector<VkPhysicalDevice> physicalDevices(count);
+    DAWN_TRY(CheckVkSuccess(
+        vkFunctions.EnumeratePhysicalDevices(instance, &count, physicalDevices.data()),
+        "vkEnumeratePhysicalDevices"));
+
+    return std::move(physicalDevices);
+}
+
+ResultOrError<VulkanDeviceInfo> GatherDeviceInfo(const Adapter& adapter) {
+    VulkanDeviceInfo info = {};
+    VkPhysicalDevice physicalDevice = adapter.GetPhysicalDevice();
+    const VulkanGlobalInfo& globalInfo = adapter.GetVulkanInstance()->GetGlobalInfo();
+    const VulkanFunctions& vkFunctions = adapter.GetVulkanInstance()->GetFunctions();
+
+    // Query the device properties first to get the ICD's `apiVersion`
+    vkFunctions.GetPhysicalDeviceProperties(physicalDevice, &info.properties);
+
+    // Gather info about device memory.
+    {
+        VkPhysicalDeviceMemoryProperties memory;
+        vkFunctions.GetPhysicalDeviceMemoryProperties(physicalDevice, &memory);
+
+        info.memoryTypes.assign(memory.memoryTypes, memory.memoryTypes + memory.memoryTypeCount);
+        info.memoryHeaps.assign(memory.memoryHeaps, memory.memoryHeaps + memory.memoryHeapCount);
+    }
+
+    // Gather info about device queue families
+    {
+        uint32_t count = 0;
+        vkFunctions.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, &count, nullptr);
+
+        info.queueFamilies.resize(count);
+        vkFunctions.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, &count,
+                                                           info.queueFamilies.data());
+    }
+
+    // Gather the info about the device layers
+    {
+        uint32_t count = 0;
+        VkResult result = VkResult::WrapUnsafe(
+            vkFunctions.EnumerateDeviceLayerProperties(physicalDevice, &count, nullptr));
+        if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
+            return DAWN_INTERNAL_ERROR("vkEnumerateDeviceLayerProperties");
+        }
+
+        info.layers.resize(count);
+        DAWN_TRY(CheckVkSuccess(
+            vkFunctions.EnumerateDeviceLayerProperties(physicalDevice, &count, info.layers.data()),
+            "vkEnumerateDeviceLayerProperties"));
+    }
+
+    // Gather the info about the device extensions
+    {
+        uint32_t count = 0;
+        VkResult result = VkResult::WrapUnsafe(vkFunctions.EnumerateDeviceExtensionProperties(
+            physicalDevice, nullptr, &count, nullptr));
+        if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
+            return DAWN_INTERNAL_ERROR("vkEnumerateDeviceExtensionProperties");
+        }
+
+        std::vector<VkExtensionProperties> extensionsProperties;
+        extensionsProperties.resize(count);
+        DAWN_TRY(CheckVkSuccess(vkFunctions.EnumerateDeviceExtensionProperties(
+                                    physicalDevice, nullptr, &count, extensionsProperties.data()),
+                                "vkEnumerateDeviceExtensionProperties"));
+
+        std::unordered_map<std::string, DeviceExt> knownExts = CreateDeviceExtNameMap();
+
+        for (const VkExtensionProperties& extension : extensionsProperties) {
+            auto it = knownExts.find(extension.extensionName);
+            if (it != knownExts.end()) {
+                info.extensions.set(it->second, true);
+            }
+        }
+
+        MarkPromotedExtensions(&info.extensions, info.properties.apiVersion);
+        info.extensions =
+            EnsureDependencies(info.extensions, globalInfo.extensions, info.properties.apiVersion);
+    }
+
+    // Gather general and extension features and properties
+    //
+    // Use vkGetPhysicalDevice{Features,Properties}2 if required to gather information about
+    // the extensions. DeviceExt::GetPhysicalDeviceProperties2 is guaranteed to be available
+    // because these extensions (transitively) depend on it in `EnsureDependencies`
+    VkPhysicalDeviceFeatures2 features2 = {};
+    features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
+    features2.pNext = nullptr;
+    PNextChainBuilder featuresChain(&features2);
+
+    VkPhysicalDeviceProperties2 properties2 = {};
+    properties2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
+    features2.pNext = nullptr;
+    PNextChainBuilder propertiesChain(&properties2);
+
+    if (info.extensions[DeviceExt::ShaderFloat16Int8]) {
+        featuresChain.Add(&info.shaderFloat16Int8Features,
+                          VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR);
+    }
+
+    if (info.extensions[DeviceExt::_16BitStorage]) {
+        featuresChain.Add(&info._16BitStorageFeatures,
+                          VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES);
+    }
+
+    if (info.extensions[DeviceExt::SubgroupSizeControl]) {
+        featuresChain.Add(&info.subgroupSizeControlFeatures,
+                          VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT);
+        propertiesChain.Add(&info.subgroupSizeControlProperties,
+                            VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT);
+    }
+
+    if (info.extensions[DeviceExt::DriverProperties]) {
+        propertiesChain.Add(&info.driverProperties,
+                            VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES);
+    }
+
+    // If we have DeviceExt::GetPhysicalDeviceProperties2, use features2 and properties2 so
+    // that features no covered by VkPhysicalDevice{Features,Properties} can be queried.
+    //
+    // Note that info.properties has already been filled at the start of this function to get
+    // `apiVersion`.
+    ASSERT(info.properties.apiVersion != 0);
+    if (info.extensions[DeviceExt::GetPhysicalDeviceProperties2]) {
+        vkFunctions.GetPhysicalDeviceProperties2(physicalDevice, &properties2);
+        vkFunctions.GetPhysicalDeviceFeatures2(physicalDevice, &features2);
+        info.features = features2.features;
+    } else {
+        ASSERT(features2.pNext == nullptr && properties2.pNext == nullptr);
+        vkFunctions.GetPhysicalDeviceFeatures(physicalDevice, &info.features);
+    }
+
+    // TODO(cwallez@chromium.org): gather info about formats
+
+    return std::move(info);
+}
+
+ResultOrError<VulkanSurfaceInfo> GatherSurfaceInfo(const Adapter& adapter, VkSurfaceKHR surface) {
+    VulkanSurfaceInfo info = {};
+
+    VkPhysicalDevice physicalDevice = adapter.GetPhysicalDevice();
+    const VulkanFunctions& vkFunctions = adapter.GetVulkanInstance()->GetFunctions();
+
+    // Get the surface capabilities
+    DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfaceCapabilitiesKHR(
+                                physicalDevice, surface, &info.capabilities),
+                            "vkGetPhysicalDeviceSurfaceCapabilitiesKHR"));
+
+    // Query which queue families support presenting this surface
+    {
+        size_t nQueueFamilies = adapter.GetDeviceInfo().queueFamilies.size();
+        info.supportedQueueFamilies.resize(nQueueFamilies, false);
+
+        for (uint32_t i = 0; i < nQueueFamilies; ++i) {
+            VkBool32 supported = VK_FALSE;
+            DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfaceSupportKHR(
+                                        physicalDevice, i, surface, &supported),
+                                    "vkGetPhysicalDeviceSurfaceSupportKHR"));
+
+            info.supportedQueueFamilies[i] = (supported == VK_TRUE);
+        }
+    }
+
+    // Gather supported formats
+    {
+        uint32_t count = 0;
+        VkResult result = VkResult::WrapUnsafe(vkFunctions.GetPhysicalDeviceSurfaceFormatsKHR(
+            physicalDevice, surface, &count, nullptr));
+        if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
+            return DAWN_INTERNAL_ERROR("vkGetPhysicalDeviceSurfaceFormatsKHR");
+        }
+
+        info.formats.resize(count);
+        DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfaceFormatsKHR(
+                                    physicalDevice, surface, &count, info.formats.data()),
+                                "vkGetPhysicalDeviceSurfaceFormatsKHR"));
+    }
+
+    // Gather supported presents modes
+    {
+        uint32_t count = 0;
+        VkResult result = VkResult::WrapUnsafe(vkFunctions.GetPhysicalDeviceSurfacePresentModesKHR(
+            physicalDevice, surface, &count, nullptr));
+        if (result != VK_SUCCESS && result != VK_INCOMPLETE) {
+            return DAWN_INTERNAL_ERROR("vkGetPhysicalDeviceSurfacePresentModesKHR");
+        }
+
+        info.presentModes.resize(count);
+        DAWN_TRY(CheckVkSuccess(vkFunctions.GetPhysicalDeviceSurfacePresentModesKHR(
+                                    physicalDevice, surface, &count, info.presentModes.data()),
+                                "vkGetPhysicalDeviceSurfacePresentModesKHR"));
+    }
+
+    return std::move(info);
+}
+
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/native/vulkan/VulkanInfo.h b/src/dawn/native/vulkan/VulkanInfo.h
index 6abb7f4..961a2ad 100644
--- a/src/dawn/native/vulkan/VulkanInfo.h
+++ b/src/dawn/native/vulkan/VulkanInfo.h
@@ -24,68 +24,66 @@
 
 namespace dawn::native::vulkan {
 
-    class Adapter;
-    class Backend;
-    struct VulkanFunctions;
+class Adapter;
+class Backend;
+struct VulkanFunctions;
 
-    // Global information - gathered before the instance is created
-    struct VulkanGlobalKnobs {
-        VulkanLayerSet layers;
-        ityp::array<VulkanLayer, InstanceExtSet, static_cast<uint32_t>(VulkanLayer::EnumCount)>
-            layerExtensions;
+// Global information - gathered before the instance is created
+struct VulkanGlobalKnobs {
+    VulkanLayerSet layers;
+    ityp::array<VulkanLayer, InstanceExtSet, static_cast<uint32_t>(VulkanLayer::EnumCount)>
+        layerExtensions;
 
-        // During information gathering `extensions` only contains the instance's extensions but
-        // during the instance creation logic it becomes the OR of the instance's extensions and
-        // the selected layers' extensions.
-        InstanceExtSet extensions;
-        bool HasExt(InstanceExt ext) const;
-    };
+    // During information gathering `extensions` only contains the instance's extensions but
+    // during the instance creation logic it becomes the OR of the instance's extensions and
+    // the selected layers' extensions.
+    InstanceExtSet extensions;
+    bool HasExt(InstanceExt ext) const;
+};
 
-    struct VulkanGlobalInfo : VulkanGlobalKnobs {
-        uint32_t apiVersion;
-    };
+struct VulkanGlobalInfo : VulkanGlobalKnobs {
+    uint32_t apiVersion;
+};
 
-    // Device information - gathered before the device is created.
-    struct VulkanDeviceKnobs {
-        VkPhysicalDeviceFeatures features;
-        VkPhysicalDeviceShaderFloat16Int8FeaturesKHR shaderFloat16Int8Features;
-        VkPhysicalDevice16BitStorageFeaturesKHR _16BitStorageFeatures;
-        VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroupSizeControlFeatures;
-        VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR
-            zeroInitializeWorkgroupMemoryFeatures;
+// Device information - gathered before the device is created.
+struct VulkanDeviceKnobs {
+    VkPhysicalDeviceFeatures features;
+    VkPhysicalDeviceShaderFloat16Int8FeaturesKHR shaderFloat16Int8Features;
+    VkPhysicalDevice16BitStorageFeaturesKHR _16BitStorageFeatures;
+    VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroupSizeControlFeatures;
+    VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR zeroInitializeWorkgroupMemoryFeatures;
 
-        bool HasExt(DeviceExt ext) const;
-        DeviceExtSet extensions;
-    };
+    bool HasExt(DeviceExt ext) const;
+    DeviceExtSet extensions;
+};
 
-    struct VulkanDeviceInfo : VulkanDeviceKnobs {
-        VkPhysicalDeviceProperties properties;
-        VkPhysicalDeviceDriverProperties driverProperties;
-        VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
+struct VulkanDeviceInfo : VulkanDeviceKnobs {
+    VkPhysicalDeviceProperties properties;
+    VkPhysicalDeviceDriverProperties driverProperties;
+    VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
 
-        std::vector<VkQueueFamilyProperties> queueFamilies;
+    std::vector<VkQueueFamilyProperties> queueFamilies;
 
-        std::vector<VkMemoryType> memoryTypes;
-        std::vector<VkMemoryHeap> memoryHeaps;
+    std::vector<VkMemoryType> memoryTypes;
+    std::vector<VkMemoryHeap> memoryHeaps;
 
-        std::vector<VkLayerProperties> layers;
-        // TODO(cwallez@chromium.org): layer instance extensions
-    };
+    std::vector<VkLayerProperties> layers;
+    // TODO(cwallez@chromium.org): layer instance extensions
+};
 
-    struct VulkanSurfaceInfo {
-        VkSurfaceCapabilitiesKHR capabilities;
-        std::vector<VkSurfaceFormatKHR> formats;
-        std::vector<VkPresentModeKHR> presentModes;
-        std::vector<bool> supportedQueueFamilies;
-    };
+struct VulkanSurfaceInfo {
+    VkSurfaceCapabilitiesKHR capabilities;
+    std::vector<VkSurfaceFormatKHR> formats;
+    std::vector<VkPresentModeKHR> presentModes;
+    std::vector<bool> supportedQueueFamilies;
+};
 
-    ResultOrError<VulkanGlobalInfo> GatherGlobalInfo(const VulkanFunctions& vkFunctions);
-    ResultOrError<std::vector<VkPhysicalDevice>> GatherPhysicalDevices(
-        VkInstance instance,
-        const VulkanFunctions& vkFunctions);
-    ResultOrError<VulkanDeviceInfo> GatherDeviceInfo(const Adapter& adapter);
-    ResultOrError<VulkanSurfaceInfo> GatherSurfaceInfo(const Adapter& adapter,
-                                                       VkSurfaceKHR surface);
+ResultOrError<VulkanGlobalInfo> GatherGlobalInfo(const VulkanFunctions& vkFunctions);
+ResultOrError<std::vector<VkPhysicalDevice>> GatherPhysicalDevices(
+    VkInstance instance,
+    const VulkanFunctions& vkFunctions);
+ResultOrError<VulkanDeviceInfo> GatherDeviceInfo(const Adapter& adapter);
+ResultOrError<VulkanSurfaceInfo> GatherSurfaceInfo(const Adapter& adapter, VkSurfaceKHR surface);
 }  // namespace dawn::native::vulkan
 
 #endif  // SRC_DAWN_NATIVE_VULKAN_VULKANINFO_H_
diff --git a/src/dawn/native/vulkan/external_memory/MemoryService.h b/src/dawn/native/vulkan/external_memory/MemoryService.h
index 2b1e00b..a15309a 100644
--- a/src/dawn/native/vulkan/external_memory/MemoryService.h
+++ b/src/dawn/native/vulkan/external_memory/MemoryService.h
@@ -21,57 +21,57 @@
 #include "dawn/native/vulkan/ExternalHandle.h"
 
 namespace dawn::native::vulkan {
-    class Device;
-    struct VulkanDeviceInfo;
+class Device;
+struct VulkanDeviceInfo;
 }  // namespace dawn::native::vulkan
 
 namespace dawn::native::vulkan::external_memory {
 
-    struct MemoryImportParams {
-        VkDeviceSize allocationSize;
-        uint32_t memoryTypeIndex;
-    };
+struct MemoryImportParams {
+    VkDeviceSize allocationSize;
+    uint32_t memoryTypeIndex;
+};
 
-    class Service {
-      public:
-        explicit Service(Device* device);
-        ~Service();
+class Service {
+  public:
+    explicit Service(Device* device);
+    ~Service();
 
-        static bool CheckSupport(const VulkanDeviceInfo& deviceInfo);
+    static bool CheckSupport(const VulkanDeviceInfo& deviceInfo);
 
-        // True if the device reports it supports importing external memory.
-        bool SupportsImportMemory(VkFormat format,
-                                  VkImageType type,
-                                  VkImageTiling tiling,
-                                  VkImageUsageFlags usage,
-                                  VkImageCreateFlags flags);
+    // True if the device reports it supports importing external memory.
+    bool SupportsImportMemory(VkFormat format,
+                              VkImageType type,
+                              VkImageTiling tiling,
+                              VkImageUsageFlags usage,
+                              VkImageCreateFlags flags);
 
-        // True if the device reports it supports creating VkImages from external memory.
-        bool SupportsCreateImage(const ExternalImageDescriptor* descriptor,
-                                 VkFormat format,
-                                 VkImageUsageFlags usage,
-                                 bool* supportsDisjoint);
+    // True if the device reports it supports creating VkImages from external memory.
+    bool SupportsCreateImage(const ExternalImageDescriptor* descriptor,
+                             VkFormat format,
+                             VkImageUsageFlags usage,
+                             bool* supportsDisjoint);
 
-        // Returns the parameters required for importing memory
-        ResultOrError<MemoryImportParams> GetMemoryImportParams(
-            const ExternalImageDescriptor* descriptor,
-            VkImage image);
+    // Returns the parameters required for importing memory
+    ResultOrError<MemoryImportParams> GetMemoryImportParams(
+        const ExternalImageDescriptor* descriptor,
+        VkImage image);
 
-        // Given an external handle pointing to memory, import it into a VkDeviceMemory
-        ResultOrError<VkDeviceMemory> ImportMemory(ExternalMemoryHandle handle,
-                                                   const MemoryImportParams& importParams,
-                                                   VkImage image);
+    // Given an external handle pointing to memory, import it into a VkDeviceMemory
+    ResultOrError<VkDeviceMemory> ImportMemory(ExternalMemoryHandle handle,
+                                               const MemoryImportParams& importParams,
+                                               VkImage image);
 
-        // Create a VkImage for the given handle type
-        ResultOrError<VkImage> CreateImage(const ExternalImageDescriptor* descriptor,
-                                           const VkImageCreateInfo& baseCreateInfo);
+    // Create a VkImage for the given handle type
+    ResultOrError<VkImage> CreateImage(const ExternalImageDescriptor* descriptor,
+                                       const VkImageCreateInfo& baseCreateInfo);
 
-      private:
-        Device* mDevice = nullptr;
+  private:
+    Device* mDevice = nullptr;
 
-        // True if early checks pass that determine if the service is supported
-        bool mSupported = false;
-    };
+    // True if early checks pass that determine if the service is supported
+    bool mSupported = false;
+};
 
 }  // namespace dawn::native::vulkan::external_memory
 
diff --git a/src/dawn/native/vulkan/external_memory/MemoryServiceDmaBuf.cpp b/src/dawn/native/vulkan/external_memory/MemoryServiceDmaBuf.cpp
index cd8a5b9..bbbd2ca 100644
--- a/src/dawn/native/vulkan/external_memory/MemoryServiceDmaBuf.cpp
+++ b/src/dawn/native/vulkan/external_memory/MemoryServiceDmaBuf.cpp
@@ -25,335 +25,329 @@
 
 namespace dawn::native::vulkan::external_memory {
 
-    namespace {
+namespace {
 
-        bool GetFormatModifierProps(const VulkanFunctions& fn,
-                                    VkPhysicalDevice physicalDevice,
-                                    VkFormat format,
-                                    uint64_t modifier,
-                                    VkDrmFormatModifierPropertiesEXT* formatModifierProps) {
-            std::vector<VkDrmFormatModifierPropertiesEXT> formatModifierPropsVector;
-            VkFormatProperties2 formatProps = {};
-            formatProps.sType = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2;
-            PNextChainBuilder formatPropsChain(&formatProps);
+bool GetFormatModifierProps(const VulkanFunctions& fn,
+                            VkPhysicalDevice physicalDevice,
+                            VkFormat format,
+                            uint64_t modifier,
+                            VkDrmFormatModifierPropertiesEXT* formatModifierProps) {
+    std::vector<VkDrmFormatModifierPropertiesEXT> formatModifierPropsVector;
+    VkFormatProperties2 formatProps = {};
+    formatProps.sType = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2;
+    PNextChainBuilder formatPropsChain(&formatProps);
 
-            VkDrmFormatModifierPropertiesListEXT formatModifierPropsList = {};
-            formatModifierPropsList.drmFormatModifierCount = 0;
-            formatModifierPropsList.pDrmFormatModifierProperties = nullptr;
-            formatPropsChain.Add(&formatModifierPropsList,
-                                 VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT);
+    VkDrmFormatModifierPropertiesListEXT formatModifierPropsList = {};
+    formatModifierPropsList.drmFormatModifierCount = 0;
+    formatModifierPropsList.pDrmFormatModifierProperties = nullptr;
+    formatPropsChain.Add(&formatModifierPropsList,
+                         VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT);
 
-            fn.GetPhysicalDeviceFormatProperties2(physicalDevice, format, &formatProps);
+    fn.GetPhysicalDeviceFormatProperties2(physicalDevice, format, &formatProps);
 
-            uint32_t modifierCount = formatModifierPropsList.drmFormatModifierCount;
-            formatModifierPropsVector.resize(modifierCount);
-            formatModifierPropsList.pDrmFormatModifierProperties = formatModifierPropsVector.data();
+    uint32_t modifierCount = formatModifierPropsList.drmFormatModifierCount;
+    formatModifierPropsVector.resize(modifierCount);
+    formatModifierPropsList.pDrmFormatModifierProperties = formatModifierPropsVector.data();
 
-            fn.GetPhysicalDeviceFormatProperties2(physicalDevice, format, &formatProps);
-            for (const auto& props : formatModifierPropsVector) {
-                if (props.drmFormatModifier == modifier) {
-                    *formatModifierProps = props;
-                    return true;
-                }
-            }
+    fn.GetPhysicalDeviceFormatProperties2(physicalDevice, format, &formatProps);
+    for (const auto& props : formatModifierPropsVector) {
+        if (props.drmFormatModifier == modifier) {
+            *formatModifierProps = props;
+            return true;
+        }
+    }
+    return false;
+}
+
+// Some modifiers use multiple planes (for example, see the comment for
+// I915_FORMAT_MOD_Y_TILED_CCS in drm/drm_fourcc.h).
+ResultOrError<uint32_t> GetModifierPlaneCount(const VulkanFunctions& fn,
+                                              VkPhysicalDevice physicalDevice,
+                                              VkFormat format,
+                                              uint64_t modifier) {
+    VkDrmFormatModifierPropertiesEXT props;
+    if (GetFormatModifierProps(fn, physicalDevice, format, modifier, &props)) {
+        return static_cast<uint32_t>(props.drmFormatModifierPlaneCount);
+    }
+    return DAWN_FORMAT_VALIDATION_ERROR("DRM format modifier not supported.");
+}
+
+bool IsMultiPlanarVkFormat(VkFormat format) {
+    switch (format) {
+        case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+        case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+        case VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM:
+        case VK_FORMAT_G8_B8R8_2PLANE_422_UNORM:
+        case VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM:
+        case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16:
+        case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16:
+        case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16:
+        case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16:
+        case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16:
+        case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16:
+        case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16:
+        case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16:
+        case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16:
+        case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16:
+        case VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM:
+        case VK_FORMAT_G16_B16R16_2PLANE_420_UNORM:
+        case VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM:
+        case VK_FORMAT_G16_B16R16_2PLANE_422_UNORM:
+        case VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM:
+            return true;
+
+        default:
             return false;
-        }
+    }
+}
 
-        // Some modifiers use multiple planes (for example, see the comment for
-        // I915_FORMAT_MOD_Y_TILED_CCS in drm/drm_fourcc.h).
-        ResultOrError<uint32_t> GetModifierPlaneCount(const VulkanFunctions& fn,
-                                                      VkPhysicalDevice physicalDevice,
-                                                      VkFormat format,
-                                                      uint64_t modifier) {
-            VkDrmFormatModifierPropertiesEXT props;
-            if (GetFormatModifierProps(fn, physicalDevice, format, modifier, &props)) {
-                return static_cast<uint32_t>(props.drmFormatModifierPlaneCount);
-            }
-            return DAWN_FORMAT_VALIDATION_ERROR("DRM format modifier not supported.");
-        }
+bool SupportsDisjoint(const VulkanFunctions& fn,
+                      VkPhysicalDevice physicalDevice,
+                      VkFormat format,
+                      uint64_t modifier) {
+    if (IsMultiPlanarVkFormat(format)) {
+        VkDrmFormatModifierPropertiesEXT props;
+        return (GetFormatModifierProps(fn, physicalDevice, format, modifier, &props) &&
+                (props.drmFormatModifierTilingFeatures & VK_FORMAT_FEATURE_DISJOINT_BIT));
+    }
+    return false;
+}
 
-        bool IsMultiPlanarVkFormat(VkFormat format) {
-            switch (format) {
-                case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
-                case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
-                case VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM:
-                case VK_FORMAT_G8_B8R8_2PLANE_422_UNORM:
-                case VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM:
-                case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16:
-                case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16:
-                case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16:
-                case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16:
-                case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16:
-                case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16:
-                case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16:
-                case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16:
-                case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16:
-                case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16:
-                case VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM:
-                case VK_FORMAT_G16_B16R16_2PLANE_420_UNORM:
-                case VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM:
-                case VK_FORMAT_G16_B16R16_2PLANE_422_UNORM:
-                case VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM:
-                    return true;
+}  // namespace
 
-                default:
-                    return false;
-            }
-        }
+Service::Service(Device* device)
+    : mDevice(device), mSupported(CheckSupport(device->GetDeviceInfo())) {}
 
-        bool SupportsDisjoint(const VulkanFunctions& fn,
-                              VkPhysicalDevice physicalDevice,
-                              VkFormat format,
-                              uint64_t modifier) {
-            if (IsMultiPlanarVkFormat(format)) {
-                VkDrmFormatModifierPropertiesEXT props;
-                return (GetFormatModifierProps(fn, physicalDevice, format, modifier, &props) &&
-                        (props.drmFormatModifierTilingFeatures & VK_FORMAT_FEATURE_DISJOINT_BIT));
-            }
-            return false;
-        }
+Service::~Service() = default;
 
-    }  // namespace
+// static
+bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo) {
+    return deviceInfo.HasExt(DeviceExt::ExternalMemoryFD) &&
+           deviceInfo.HasExt(DeviceExt::ImageDrmFormatModifier);
+}
 
-    Service::Service(Device* device)
-        : mDevice(device), mSupported(CheckSupport(device->GetDeviceInfo())) {
+bool Service::SupportsImportMemory(VkFormat format,
+                                   VkImageType type,
+                                   VkImageTiling tiling,
+                                   VkImageUsageFlags usage,
+                                   VkImageCreateFlags flags) {
+    return mSupported && (!IsMultiPlanarVkFormat(format) ||
+                          (format == VK_FORMAT_G8_B8R8_2PLANE_420_UNORM &&
+                           mDevice->GetDeviceInfo().HasExt(DeviceExt::ImageFormatList)));
+}
+
+bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
+                                  VkFormat format,
+                                  VkImageUsageFlags usage,
+                                  bool* supportsDisjoint) {
+    *supportsDisjoint = false;
+    // Early out before we try using extension functions
+    if (!mSupported) {
+        return false;
+    }
+    if (descriptor->GetType() != ExternalImageType::DmaBuf) {
+        return false;
+    }
+    const ExternalImageDescriptorDmaBuf* dmaBufDescriptor =
+        static_cast<const ExternalImageDescriptorDmaBuf*>(descriptor);
+
+    // Verify plane count for the modifier.
+    VkPhysicalDevice physicalDevice = ToBackend(mDevice->GetAdapter())->GetPhysicalDevice();
+    uint32_t planeCount = 0;
+    if (mDevice->ConsumedError(GetModifierPlaneCount(mDevice->fn, physicalDevice, format,
+                                                     dmaBufDescriptor->drmModifier),
+                               &planeCount)) {
+        return false;
+    }
+    if (planeCount == 0) {
+        return false;
+    }
+    // Only support the NV12 multi-planar format for now.
+    if (planeCount > 1 && format != VK_FORMAT_G8_B8R8_2PLANE_420_UNORM) {
+        return false;
+    }
+    *supportsDisjoint =
+        SupportsDisjoint(mDevice->fn, physicalDevice, format, dmaBufDescriptor->drmModifier);
+
+    // Verify that the format modifier of the external memory and the requested Vulkan format
+    // are actually supported together in a dma-buf import.
+    VkPhysicalDeviceImageFormatInfo2 imageFormatInfo = {};
+    imageFormatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2;
+    imageFormatInfo.format = format;
+    imageFormatInfo.type = VK_IMAGE_TYPE_2D;
+    imageFormatInfo.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
+    imageFormatInfo.usage = usage;
+    imageFormatInfo.flags = 0;
+    PNextChainBuilder imageFormatInfoChain(&imageFormatInfo);
+
+    VkPhysicalDeviceExternalImageFormatInfo externalImageFormatInfo = {};
+    externalImageFormatInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
+    imageFormatInfoChain.Add(&externalImageFormatInfo,
+                             VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO);
+
+    VkPhysicalDeviceImageDrmFormatModifierInfoEXT drmModifierInfo = {};
+    drmModifierInfo.drmFormatModifier = dmaBufDescriptor->drmModifier;
+    drmModifierInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+    imageFormatInfoChain.Add(&drmModifierInfo,
+                             VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT);
+
+    // For mutable vkimage of multi-planar format, we also need to make sure the each
+    // plane's view format can be supported.
+    std::array<VkFormat, 2> viewFormats;
+    VkImageFormatListCreateInfo imageFormatListInfo = {};
+
+    if (planeCount > 1) {
+        ASSERT(format == VK_FORMAT_G8_B8R8_2PLANE_420_UNORM);
+        viewFormats = {VK_FORMAT_R8_UNORM, VK_FORMAT_R8G8_UNORM};
+        imageFormatListInfo.viewFormatCount = 2;
+        imageFormatListInfo.pViewFormats = viewFormats.data();
+        imageFormatInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
+        imageFormatInfoChain.Add(&imageFormatListInfo,
+                                 VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO);
     }
 
-    Service::~Service() = default;
+    VkImageFormatProperties2 imageFormatProps = {};
+    imageFormatProps.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2;
+    PNextChainBuilder imageFormatPropsChain(&imageFormatProps);
 
-    // static
-    bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo) {
-        return deviceInfo.HasExt(DeviceExt::ExternalMemoryFD) &&
-               deviceInfo.HasExt(DeviceExt::ImageDrmFormatModifier);
+    VkExternalImageFormatProperties externalImageFormatProps = {};
+    imageFormatPropsChain.Add(&externalImageFormatProps,
+                              VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES);
+
+    VkResult result = VkResult::WrapUnsafe(mDevice->fn.GetPhysicalDeviceImageFormatProperties2(
+        physicalDevice, &imageFormatInfo, &imageFormatProps));
+    if (result != VK_SUCCESS) {
+        return false;
+    }
+    VkExternalMemoryFeatureFlags featureFlags =
+        externalImageFormatProps.externalMemoryProperties.externalMemoryFeatures;
+    return featureFlags & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT;
+}
+
+ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
+    const ExternalImageDescriptor* descriptor,
+    VkImage image) {
+    DAWN_INVALID_IF(descriptor->GetType() != ExternalImageType::DmaBuf,
+                    "ExternalImageDescriptor is not a ExternalImageDescriptorDmaBuf.");
+
+    const ExternalImageDescriptorDmaBuf* dmaBufDescriptor =
+        static_cast<const ExternalImageDescriptorDmaBuf*>(descriptor);
+    VkDevice device = mDevice->GetVkDevice();
+
+    // Get the valid memory types for the VkImage.
+    VkMemoryRequirements memoryRequirements;
+    mDevice->fn.GetImageMemoryRequirements(device, image, &memoryRequirements);
+
+    VkMemoryFdPropertiesKHR fdProperties;
+    fdProperties.sType = VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR;
+    fdProperties.pNext = nullptr;
+
+    // Get the valid memory types that the external memory can be imported as.
+    mDevice->fn.GetMemoryFdPropertiesKHR(device, VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
+                                         dmaBufDescriptor->memoryFD, &fdProperties);
+    // Choose the best memory type that satisfies both the image's constraint and the
+    // import's constraint.
+    memoryRequirements.memoryTypeBits &= fdProperties.memoryTypeBits;
+    int memoryTypeIndex = mDevice->GetResourceMemoryAllocator()->FindBestTypeIndex(
+        memoryRequirements, MemoryKind::Opaque);
+    DAWN_INVALID_IF(memoryTypeIndex == -1, "Unable to find an appropriate memory type for import.");
+
+    MemoryImportParams params = {memoryRequirements.size, static_cast<uint32_t>(memoryTypeIndex)};
+    return params;
+}
+
+ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
+                                                    const MemoryImportParams& importParams,
+                                                    VkImage image) {
+    DAWN_INVALID_IF(handle < 0, "Importing memory with an invalid handle.");
+
+    VkMemoryAllocateInfo memoryAllocateInfo = {};
+    memoryAllocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+    memoryAllocateInfo.allocationSize = importParams.allocationSize;
+    memoryAllocateInfo.memoryTypeIndex = importParams.memoryTypeIndex;
+    PNextChainBuilder memoryAllocateInfoChain(&memoryAllocateInfo);
+
+    VkImportMemoryFdInfoKHR importMemoryFdInfo;
+    importMemoryFdInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
+    importMemoryFdInfo.fd = handle;
+    memoryAllocateInfoChain.Add(&importMemoryFdInfo, VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR);
+
+    VkMemoryDedicatedAllocateInfo memoryDedicatedAllocateInfo;
+    memoryDedicatedAllocateInfo.image = image;
+    memoryDedicatedAllocateInfo.buffer = VkBuffer{};
+    memoryAllocateInfoChain.Add(&memoryDedicatedAllocateInfo,
+                                VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO);
+
+    VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
+    DAWN_TRY(CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &memoryAllocateInfo,
+                                                       nullptr, &*allocatedMemory),
+                            "vkAllocateMemory"));
+    return allocatedMemory;
+}
+
+ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
+                                            const VkImageCreateInfo& baseCreateInfo) {
+    DAWN_INVALID_IF(descriptor->GetType() != ExternalImageType::DmaBuf,
+                    "ExternalImageDescriptor is not a dma-buf descriptor.");
+
+    const ExternalImageDescriptorDmaBuf* dmaBufDescriptor =
+        static_cast<const ExternalImageDescriptorDmaBuf*>(descriptor);
+    VkPhysicalDevice physicalDevice = ToBackend(mDevice->GetAdapter())->GetPhysicalDevice();
+    VkDevice device = mDevice->GetVkDevice();
+
+    uint32_t planeCount;
+    DAWN_TRY_ASSIGN(planeCount,
+                    GetModifierPlaneCount(mDevice->fn, physicalDevice, baseCreateInfo.format,
+                                          dmaBufDescriptor->drmModifier));
+
+    VkImageCreateInfo createInfo = baseCreateInfo;
+    createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+    createInfo.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
+
+    PNextChainBuilder createInfoChain(&createInfo);
+
+    VkExternalMemoryImageCreateInfo externalMemoryImageCreateInfo = {};
+    externalMemoryImageCreateInfo.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
+    createInfoChain.Add(&externalMemoryImageCreateInfo,
+                        VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO);
+
+    // For single plane formats.
+    VkSubresourceLayout planeLayout = {};
+    planeLayout.offset = 0;
+    planeLayout.size = 0;  // VK_EXT_image_drm_format_modifier mandates size = 0.
+    planeLayout.rowPitch = dmaBufDescriptor->stride;
+    planeLayout.arrayPitch = 0;  // Not an array texture
+    planeLayout.depthPitch = 0;  // Not a depth texture
+
+    VkImageDrmFormatModifierExplicitCreateInfoEXT explicitCreateInfo = {};
+    explicitCreateInfo.drmFormatModifier = dmaBufDescriptor->drmModifier;
+    explicitCreateInfo.drmFormatModifierPlaneCount = 1;
+    explicitCreateInfo.pPlaneLayouts = &planeLayout;
+
+    // For multi-planar formats, we can't explicitly specify VkSubresourceLayout for each plane
+    // due to the lack of knowledge about the required 'offset'. Alternatively
+    // VkImageDrmFormatModifierListCreateInfoEXT can be used to create image with the DRM format
+    // modifier.
+    VkImageDrmFormatModifierListCreateInfoEXT listCreateInfo = {};
+    listCreateInfo.drmFormatModifierCount = 1;
+    listCreateInfo.pDrmFormatModifiers = &dmaBufDescriptor->drmModifier;
+
+    if (planeCount > 1) {
+        // For multi-planar formats, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT specifies that a
+        // VkImageView can be plane's format which might differ from the image's format.
+        createInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
+        createInfoChain.Add(&listCreateInfo,
+                            VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT);
+    } else {
+        createInfoChain.Add(&explicitCreateInfo,
+                            VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT);
     }
 
-    bool Service::SupportsImportMemory(VkFormat format,
-                                       VkImageType type,
-                                       VkImageTiling tiling,
-                                       VkImageUsageFlags usage,
-                                       VkImageCreateFlags flags) {
-        return mSupported && (!IsMultiPlanarVkFormat(format) ||
-                              (format == VK_FORMAT_G8_B8R8_2PLANE_420_UNORM &&
-                               mDevice->GetDeviceInfo().HasExt(DeviceExt::ImageFormatList)));
-    }
-
-    bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
-                                      VkFormat format,
-                                      VkImageUsageFlags usage,
-                                      bool* supportsDisjoint) {
-        *supportsDisjoint = false;
-        // Early out before we try using extension functions
-        if (!mSupported) {
-            return false;
-        }
-        if (descriptor->GetType() != ExternalImageType::DmaBuf) {
-            return false;
-        }
-        const ExternalImageDescriptorDmaBuf* dmaBufDescriptor =
-            static_cast<const ExternalImageDescriptorDmaBuf*>(descriptor);
-
-        // Verify plane count for the modifier.
-        VkPhysicalDevice physicalDevice = ToBackend(mDevice->GetAdapter())->GetPhysicalDevice();
-        uint32_t planeCount = 0;
-        if (mDevice->ConsumedError(GetModifierPlaneCount(mDevice->fn, physicalDevice, format,
-                                                         dmaBufDescriptor->drmModifier),
-                                   &planeCount)) {
-            return false;
-        }
-        if (planeCount == 0) {
-            return false;
-        }
-        // Only support the NV12 multi-planar format for now.
-        if (planeCount > 1 && format != VK_FORMAT_G8_B8R8_2PLANE_420_UNORM) {
-            return false;
-        }
-        *supportsDisjoint =
-            SupportsDisjoint(mDevice->fn, physicalDevice, format, dmaBufDescriptor->drmModifier);
-
-        // Verify that the format modifier of the external memory and the requested Vulkan format
-        // are actually supported together in a dma-buf import.
-        VkPhysicalDeviceImageFormatInfo2 imageFormatInfo = {};
-        imageFormatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2;
-        imageFormatInfo.format = format;
-        imageFormatInfo.type = VK_IMAGE_TYPE_2D;
-        imageFormatInfo.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
-        imageFormatInfo.usage = usage;
-        imageFormatInfo.flags = 0;
-        PNextChainBuilder imageFormatInfoChain(&imageFormatInfo);
-
-        VkPhysicalDeviceExternalImageFormatInfo externalImageFormatInfo = {};
-        externalImageFormatInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
-        imageFormatInfoChain.Add(&externalImageFormatInfo,
-                                 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO);
-
-        VkPhysicalDeviceImageDrmFormatModifierInfoEXT drmModifierInfo = {};
-        drmModifierInfo.drmFormatModifier = dmaBufDescriptor->drmModifier;
-        drmModifierInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
-        imageFormatInfoChain.Add(
-            &drmModifierInfo, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT);
-
-        // For mutable vkimage of multi-planar format, we also need to make sure the each
-        // plane's view format can be supported.
-        std::array<VkFormat, 2> viewFormats;
-        VkImageFormatListCreateInfo imageFormatListInfo = {};
-
-        if (planeCount > 1) {
-            ASSERT(format == VK_FORMAT_G8_B8R8_2PLANE_420_UNORM);
-            viewFormats = {VK_FORMAT_R8_UNORM, VK_FORMAT_R8G8_UNORM};
-            imageFormatListInfo.viewFormatCount = 2;
-            imageFormatListInfo.pViewFormats = viewFormats.data();
-            imageFormatInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
-            imageFormatInfoChain.Add(&imageFormatListInfo,
-                                     VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO);
-        }
-
-        VkImageFormatProperties2 imageFormatProps = {};
-        imageFormatProps.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2;
-        PNextChainBuilder imageFormatPropsChain(&imageFormatProps);
-
-        VkExternalImageFormatProperties externalImageFormatProps = {};
-        imageFormatPropsChain.Add(&externalImageFormatProps,
-                                  VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES);
-
-        VkResult result = VkResult::WrapUnsafe(mDevice->fn.GetPhysicalDeviceImageFormatProperties2(
-            physicalDevice, &imageFormatInfo, &imageFormatProps));
-        if (result != VK_SUCCESS) {
-            return false;
-        }
-        VkExternalMemoryFeatureFlags featureFlags =
-            externalImageFormatProps.externalMemoryProperties.externalMemoryFeatures;
-        return featureFlags & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT;
-    }
-
-    ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
-        const ExternalImageDescriptor* descriptor,
-        VkImage image) {
-        DAWN_INVALID_IF(descriptor->GetType() != ExternalImageType::DmaBuf,
-                        "ExternalImageDescriptor is not a ExternalImageDescriptorDmaBuf.");
-
-        const ExternalImageDescriptorDmaBuf* dmaBufDescriptor =
-            static_cast<const ExternalImageDescriptorDmaBuf*>(descriptor);
-        VkDevice device = mDevice->GetVkDevice();
-
-        // Get the valid memory types for the VkImage.
-        VkMemoryRequirements memoryRequirements;
-        mDevice->fn.GetImageMemoryRequirements(device, image, &memoryRequirements);
-
-        VkMemoryFdPropertiesKHR fdProperties;
-        fdProperties.sType = VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR;
-        fdProperties.pNext = nullptr;
-
-        // Get the valid memory types that the external memory can be imported as.
-        mDevice->fn.GetMemoryFdPropertiesKHR(device, VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
-                                             dmaBufDescriptor->memoryFD, &fdProperties);
-        // Choose the best memory type that satisfies both the image's constraint and the
-        // import's constraint.
-        memoryRequirements.memoryTypeBits &= fdProperties.memoryTypeBits;
-        int memoryTypeIndex = mDevice->GetResourceMemoryAllocator()->FindBestTypeIndex(
-            memoryRequirements, MemoryKind::Opaque);
-        DAWN_INVALID_IF(memoryTypeIndex == -1,
-                        "Unable to find an appropriate memory type for import.");
-
-        MemoryImportParams params = {memoryRequirements.size,
-                                     static_cast<uint32_t>(memoryTypeIndex)};
-        return params;
-    }
-
-    ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
-                                                        const MemoryImportParams& importParams,
-                                                        VkImage image) {
-        DAWN_INVALID_IF(handle < 0, "Importing memory with an invalid handle.");
-
-        VkMemoryAllocateInfo memoryAllocateInfo = {};
-        memoryAllocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
-        memoryAllocateInfo.allocationSize = importParams.allocationSize;
-        memoryAllocateInfo.memoryTypeIndex = importParams.memoryTypeIndex;
-        PNextChainBuilder memoryAllocateInfoChain(&memoryAllocateInfo);
-
-        VkImportMemoryFdInfoKHR importMemoryFdInfo;
-        importMemoryFdInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
-        importMemoryFdInfo.fd = handle;
-        memoryAllocateInfoChain.Add(&importMemoryFdInfo,
-                                    VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR);
-
-        VkMemoryDedicatedAllocateInfo memoryDedicatedAllocateInfo;
-        memoryDedicatedAllocateInfo.image = image;
-        memoryDedicatedAllocateInfo.buffer = VkBuffer{};
-        memoryAllocateInfoChain.Add(&memoryDedicatedAllocateInfo,
-                                    VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO);
-
-        VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
-        DAWN_TRY(
-            CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &memoryAllocateInfo,
-                                                      nullptr, &*allocatedMemory),
-                           "vkAllocateMemory"));
-        return allocatedMemory;
-    }
-
-    ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
-                                                const VkImageCreateInfo& baseCreateInfo) {
-        DAWN_INVALID_IF(descriptor->GetType() != ExternalImageType::DmaBuf,
-                        "ExternalImageDescriptor is not a dma-buf descriptor.");
-
-        const ExternalImageDescriptorDmaBuf* dmaBufDescriptor =
-            static_cast<const ExternalImageDescriptorDmaBuf*>(descriptor);
-        VkPhysicalDevice physicalDevice = ToBackend(mDevice->GetAdapter())->GetPhysicalDevice();
-        VkDevice device = mDevice->GetVkDevice();
-
-        uint32_t planeCount;
-        DAWN_TRY_ASSIGN(planeCount,
-                        GetModifierPlaneCount(mDevice->fn, physicalDevice, baseCreateInfo.format,
-                                              dmaBufDescriptor->drmModifier));
-
-        VkImageCreateInfo createInfo = baseCreateInfo;
-        createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
-        createInfo.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
-
-        PNextChainBuilder createInfoChain(&createInfo);
-
-        VkExternalMemoryImageCreateInfo externalMemoryImageCreateInfo = {};
-        externalMemoryImageCreateInfo.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
-        createInfoChain.Add(&externalMemoryImageCreateInfo,
-                            VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO);
-
-        // For single plane formats.
-        VkSubresourceLayout planeLayout = {};
-        planeLayout.offset = 0;
-        planeLayout.size = 0;  // VK_EXT_image_drm_format_modifier mandates size = 0.
-        planeLayout.rowPitch = dmaBufDescriptor->stride;
-        planeLayout.arrayPitch = 0;  // Not an array texture
-        planeLayout.depthPitch = 0;  // Not a depth texture
-
-        VkImageDrmFormatModifierExplicitCreateInfoEXT explicitCreateInfo = {};
-        explicitCreateInfo.drmFormatModifier = dmaBufDescriptor->drmModifier;
-        explicitCreateInfo.drmFormatModifierPlaneCount = 1;
-        explicitCreateInfo.pPlaneLayouts = &planeLayout;
-
-        // For multi-planar formats, we can't explicitly specify VkSubresourceLayout for each plane
-        // due to the lack of knowledge about the required 'offset'. Alternatively
-        // VkImageDrmFormatModifierListCreateInfoEXT can be used to create image with the DRM format
-        // modifier.
-        VkImageDrmFormatModifierListCreateInfoEXT listCreateInfo = {};
-        listCreateInfo.drmFormatModifierCount = 1;
-        listCreateInfo.pDrmFormatModifiers = &dmaBufDescriptor->drmModifier;
-
-        if (planeCount > 1) {
-            // For multi-planar formats, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT specifies that a
-            // VkImageView can be plane's format which might differ from the image's format.
-            createInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
-            createInfoChain.Add(&listCreateInfo,
-                                VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT);
-        } else {
-            createInfoChain.Add(
-                &explicitCreateInfo,
-                VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT);
-        }
-
-        // Create a new VkImage with tiling equal to the DRM format modifier.
-        VkImage image;
-        DAWN_TRY(CheckVkSuccess(mDevice->fn.CreateImage(device, &createInfo, nullptr, &*image),
-                                "CreateImage"));
-        return image;
-    }
+    // Create a new VkImage with tiling equal to the DRM format modifier.
+    VkImage image;
+    DAWN_TRY(CheckVkSuccess(mDevice->fn.CreateImage(device, &createInfo, nullptr, &*image),
+                            "CreateImage"));
+    return image;
+}
 
 }  // namespace dawn::native::vulkan::external_memory
diff --git a/src/dawn/native/vulkan/external_memory/MemoryServiceNull.cpp b/src/dawn/native/vulkan/external_memory/MemoryServiceNull.cpp
index c2307bf..5cb8c4d 100644
--- a/src/dawn/native/vulkan/external_memory/MemoryServiceNull.cpp
+++ b/src/dawn/native/vulkan/external_memory/MemoryServiceNull.cpp
@@ -17,49 +17,49 @@
 
 namespace dawn::native::vulkan::external_memory {
 
-    Service::Service(Device* device) : mDevice(device) {
-        DAWN_UNUSED(mDevice);
-        DAWN_UNUSED(mSupported);
-    }
+Service::Service(Device* device) : mDevice(device) {
+    DAWN_UNUSED(mDevice);
+    DAWN_UNUSED(mSupported);
+}
 
-    Service::~Service() = default;
+Service::~Service() = default;
 
-    // static
-    bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo) {
-        return false;
-    }
+// static
+bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo) {
+    return false;
+}
 
-    bool Service::SupportsImportMemory(VkFormat format,
-                                       VkImageType type,
-                                       VkImageTiling tiling,
-                                       VkImageUsageFlags usage,
-                                       VkImageCreateFlags flags) {
-        return false;
-    }
+bool Service::SupportsImportMemory(VkFormat format,
+                                   VkImageType type,
+                                   VkImageTiling tiling,
+                                   VkImageUsageFlags usage,
+                                   VkImageCreateFlags flags) {
+    return false;
+}
 
-    bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
-                                      VkFormat format,
-                                      VkImageUsageFlags usage,
-                                      bool* supportsDisjoint) {
-        *supportsDisjoint = false;
-        return false;
-    }
+bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
+                                  VkFormat format,
+                                  VkImageUsageFlags usage,
+                                  bool* supportsDisjoint) {
+    *supportsDisjoint = false;
+    return false;
+}
 
-    ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
-        const ExternalImageDescriptor* descriptor,
-        VkImage image) {
-        return DAWN_UNIMPLEMENTED_ERROR("Using null memory service to interop inside Vulkan");
-    }
+ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
+    const ExternalImageDescriptor* descriptor,
+    VkImage image) {
+    return DAWN_UNIMPLEMENTED_ERROR("Using null memory service to interop inside Vulkan");
+}
 
-    ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
-                                                        const MemoryImportParams& importParams,
-                                                        VkImage image) {
-        return DAWN_UNIMPLEMENTED_ERROR("Using null memory service to interop inside Vulkan");
-    }
+ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
+                                                    const MemoryImportParams& importParams,
+                                                    VkImage image) {
+    return DAWN_UNIMPLEMENTED_ERROR("Using null memory service to interop inside Vulkan");
+}
 
-    ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
-                                                const VkImageCreateInfo& baseCreateInfo) {
-        return DAWN_UNIMPLEMENTED_ERROR("Using null memory service to interop inside Vulkan");
-    }
+ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
+                                            const VkImageCreateInfo& baseCreateInfo) {
+    return DAWN_UNIMPLEMENTED_ERROR("Using null memory service to interop inside Vulkan");
+}
 
 }  // namespace dawn::native::vulkan::external_memory
diff --git a/src/dawn/native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp b/src/dawn/native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp
index b5a780b..f6f1fe4 100644
--- a/src/dawn/native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp
+++ b/src/dawn/native/vulkan/external_memory/MemoryServiceOpaqueFD.cpp
@@ -23,138 +23,136 @@
 
 namespace dawn::native::vulkan::external_memory {
 
-    Service::Service(Device* device)
-        : mDevice(device), mSupported(CheckSupport(device->GetDeviceInfo())) {
+Service::Service(Device* device)
+    : mDevice(device), mSupported(CheckSupport(device->GetDeviceInfo())) {}
+
+Service::~Service() = default;
+
+// static
+bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo) {
+    return deviceInfo.HasExt(DeviceExt::ExternalMemoryFD);
+}
+
+bool Service::SupportsImportMemory(VkFormat format,
+                                   VkImageType type,
+                                   VkImageTiling tiling,
+                                   VkImageUsageFlags usage,
+                                   VkImageCreateFlags flags) {
+    // Early out before we try using extension functions
+    if (!mSupported) {
+        return false;
     }
 
-    Service::~Service() = default;
+    VkPhysicalDeviceExternalImageFormatInfo externalFormatInfo;
+    externalFormatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR;
+    externalFormatInfo.pNext = nullptr;
+    externalFormatInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
 
-    // static
-    bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo) {
-        return deviceInfo.HasExt(DeviceExt::ExternalMemoryFD);
+    VkPhysicalDeviceImageFormatInfo2 formatInfo;
+    formatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR;
+    formatInfo.pNext = &externalFormatInfo;
+    formatInfo.format = format;
+    formatInfo.type = type;
+    formatInfo.tiling = tiling;
+    formatInfo.usage = usage;
+    formatInfo.flags = flags;
+
+    VkExternalImageFormatProperties externalFormatProperties;
+    externalFormatProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR;
+    externalFormatProperties.pNext = nullptr;
+
+    VkImageFormatProperties2 formatProperties;
+    formatProperties.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR;
+    formatProperties.pNext = &externalFormatProperties;
+
+    VkResult result = VkResult::WrapUnsafe(mDevice->fn.GetPhysicalDeviceImageFormatProperties2(
+        ToBackend(mDevice->GetAdapter())->GetPhysicalDevice(), &formatInfo, &formatProperties));
+
+    // If handle not supported, result == VK_ERROR_FORMAT_NOT_SUPPORTED
+    if (result != VK_SUCCESS) {
+        return false;
     }
 
-    bool Service::SupportsImportMemory(VkFormat format,
-                                       VkImageType type,
-                                       VkImageTiling tiling,
-                                       VkImageUsageFlags usage,
-                                       VkImageCreateFlags flags) {
-        // Early out before we try using extension functions
-        if (!mSupported) {
-            return false;
-        }
+    // TODO(http://crbug.com/dawn/206): Investigate dedicated only images
+    VkFlags memoryFlags = externalFormatProperties.externalMemoryProperties.externalMemoryFeatures;
+    return (memoryFlags & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR) != 0;
+}
 
-        VkPhysicalDeviceExternalImageFormatInfo externalFormatInfo;
-        externalFormatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR;
-        externalFormatInfo.pNext = nullptr;
-        externalFormatInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
+bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
+                                  VkFormat format,
+                                  VkImageUsageFlags usage,
+                                  bool* supportsDisjoint) {
+    *supportsDisjoint = false;
+    return mSupported;
+}
 
-        VkPhysicalDeviceImageFormatInfo2 formatInfo;
-        formatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR;
-        formatInfo.pNext = &externalFormatInfo;
-        formatInfo.format = format;
-        formatInfo.type = type;
-        formatInfo.tiling = tiling;
-        formatInfo.usage = usage;
-        formatInfo.flags = flags;
+ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
+    const ExternalImageDescriptor* descriptor,
+    VkImage image) {
+    DAWN_INVALID_IF(descriptor->GetType() != ExternalImageType::OpaqueFD,
+                    "ExternalImageDescriptor is not an OpaqueFD descriptor.");
 
-        VkExternalImageFormatProperties externalFormatProperties;
-        externalFormatProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR;
-        externalFormatProperties.pNext = nullptr;
+    const ExternalImageDescriptorOpaqueFD* opaqueFDDescriptor =
+        static_cast<const ExternalImageDescriptorOpaqueFD*>(descriptor);
 
-        VkImageFormatProperties2 formatProperties;
-        formatProperties.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR;
-        formatProperties.pNext = &externalFormatProperties;
+    MemoryImportParams params = {opaqueFDDescriptor->allocationSize,
+                                 opaqueFDDescriptor->memoryTypeIndex};
+    return params;
+}
 
-        VkResult result = VkResult::WrapUnsafe(mDevice->fn.GetPhysicalDeviceImageFormatProperties2(
-            ToBackend(mDevice->GetAdapter())->GetPhysicalDevice(), &formatInfo, &formatProperties));
+ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
+                                                    const MemoryImportParams& importParams,
+                                                    VkImage image) {
+    DAWN_INVALID_IF(handle < 0, "Importing memory with an invalid handle.");
 
-        // If handle not supported, result == VK_ERROR_FORMAT_NOT_SUPPORTED
-        if (result != VK_SUCCESS) {
-            return false;
-        }
+    VkMemoryRequirements requirements;
+    mDevice->fn.GetImageMemoryRequirements(mDevice->GetVkDevice(), image, &requirements);
+    DAWN_INVALID_IF(requirements.size > importParams.allocationSize,
+                    "Requested allocation size (%u) is smaller than the image requires (%u).",
+                    importParams.allocationSize, requirements.size);
 
-        // TODO(http://crbug.com/dawn/206): Investigate dedicated only images
-        VkFlags memoryFlags =
-            externalFormatProperties.externalMemoryProperties.externalMemoryFeatures;
-        return (memoryFlags & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR) != 0;
-    }
+    VkImportMemoryFdInfoKHR importMemoryFdInfo;
+    importMemoryFdInfo.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR;
+    importMemoryFdInfo.pNext = nullptr;
+    importMemoryFdInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
+    importMemoryFdInfo.fd = handle;
 
-    bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
-                                      VkFormat format,
-                                      VkImageUsageFlags usage,
-                                      bool* supportsDisjoint) {
-        *supportsDisjoint = false;
-        return mSupported;
-    }
+    VkMemoryAllocateInfo allocateInfo;
+    allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+    allocateInfo.pNext = &importMemoryFdInfo;
+    allocateInfo.allocationSize = importParams.allocationSize;
+    allocateInfo.memoryTypeIndex = importParams.memoryTypeIndex;
 
-    ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
-        const ExternalImageDescriptor* descriptor,
-        VkImage image) {
-        DAWN_INVALID_IF(descriptor->GetType() != ExternalImageType::OpaqueFD,
-                        "ExternalImageDescriptor is not an OpaqueFD descriptor.");
+    VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
+    DAWN_TRY(CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo,
+                                                       nullptr, &*allocatedMemory),
+                            "vkAllocateMemory"));
+    return allocatedMemory;
+}
 
-        const ExternalImageDescriptorOpaqueFD* opaqueFDDescriptor =
-            static_cast<const ExternalImageDescriptorOpaqueFD*>(descriptor);
+ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
+                                            const VkImageCreateInfo& baseCreateInfo) {
+    VkImageCreateInfo createInfo = baseCreateInfo;
+    createInfo.flags |= VK_IMAGE_CREATE_ALIAS_BIT_KHR;
+    createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+    createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
 
-        MemoryImportParams params = {opaqueFDDescriptor->allocationSize,
-                                     opaqueFDDescriptor->memoryTypeIndex};
-        return params;
-    }
+    VkExternalMemoryImageCreateInfo externalMemoryImageCreateInfo;
+    externalMemoryImageCreateInfo.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
+    externalMemoryImageCreateInfo.pNext = nullptr;
+    externalMemoryImageCreateInfo.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
 
-    ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
-                                                        const MemoryImportParams& importParams,
-                                                        VkImage image) {
-        DAWN_INVALID_IF(handle < 0, "Importing memory with an invalid handle.");
+    PNextChainBuilder createInfoChain(&createInfo);
+    createInfoChain.Add(&externalMemoryImageCreateInfo,
+                        VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO);
 
-        VkMemoryRequirements requirements;
-        mDevice->fn.GetImageMemoryRequirements(mDevice->GetVkDevice(), image, &requirements);
-        DAWN_INVALID_IF(requirements.size > importParams.allocationSize,
-                        "Requested allocation size (%u) is smaller than the image requires (%u).",
-                        importParams.allocationSize, requirements.size);
+    ASSERT(IsSampleCountSupported(mDevice, createInfo));
 
-        VkImportMemoryFdInfoKHR importMemoryFdInfo;
-        importMemoryFdInfo.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR;
-        importMemoryFdInfo.pNext = nullptr;
-        importMemoryFdInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
-        importMemoryFdInfo.fd = handle;
-
-        VkMemoryAllocateInfo allocateInfo;
-        allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
-        allocateInfo.pNext = &importMemoryFdInfo;
-        allocateInfo.allocationSize = importParams.allocationSize;
-        allocateInfo.memoryTypeIndex = importParams.memoryTypeIndex;
-
-        VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
-        DAWN_TRY(CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo,
-                                                           nullptr, &*allocatedMemory),
-                                "vkAllocateMemory"));
-        return allocatedMemory;
-    }
-
-    ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
-                                                const VkImageCreateInfo& baseCreateInfo) {
-        VkImageCreateInfo createInfo = baseCreateInfo;
-        createInfo.flags |= VK_IMAGE_CREATE_ALIAS_BIT_KHR;
-        createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
-        createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
-
-        VkExternalMemoryImageCreateInfo externalMemoryImageCreateInfo;
-        externalMemoryImageCreateInfo.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
-        externalMemoryImageCreateInfo.pNext = nullptr;
-        externalMemoryImageCreateInfo.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
-
-        PNextChainBuilder createInfoChain(&createInfo);
-        createInfoChain.Add(&externalMemoryImageCreateInfo,
-                            VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO);
-
-        ASSERT(IsSampleCountSupported(mDevice, createInfo));
-
-        VkImage image;
-        DAWN_TRY(CheckVkSuccess(
-            mDevice->fn.CreateImage(mDevice->GetVkDevice(), &createInfo, nullptr, &*image),
-            "CreateImage"));
-        return image;
-    }
+    VkImage image;
+    DAWN_TRY(CheckVkSuccess(
+        mDevice->fn.CreateImage(mDevice->GetVkDevice(), &createInfo, nullptr, &*image),
+        "CreateImage"));
+    return image;
+}
 
 }  // namespace dawn::native::vulkan::external_memory
diff --git a/src/dawn/native/vulkan/external_memory/MemoryServiceZirconHandle.cpp b/src/dawn/native/vulkan/external_memory/MemoryServiceZirconHandle.cpp
index 85fdeac..cec5cc4 100644
--- a/src/dawn/native/vulkan/external_memory/MemoryServiceZirconHandle.cpp
+++ b/src/dawn/native/vulkan/external_memory/MemoryServiceZirconHandle.cpp
@@ -23,140 +23,137 @@
 
 namespace dawn::native::vulkan::external_memory {
 
-    Service::Service(Device* device)
-        : mDevice(device), mSupported(CheckSupport(device->GetDeviceInfo())) {
+Service::Service(Device* device)
+    : mDevice(device), mSupported(CheckSupport(device->GetDeviceInfo())) {}
+
+Service::~Service() = default;
+
+// static
+bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo) {
+    return deviceInfo.HasExt(DeviceExt::ExternalMemoryZirconHandle);
+}
+
+bool Service::SupportsImportMemory(VkFormat format,
+                                   VkImageType type,
+                                   VkImageTiling tiling,
+                                   VkImageUsageFlags usage,
+                                   VkImageCreateFlags flags) {
+    // Early out before we try using extension functions
+    if (!mSupported) {
+        return false;
     }
 
-    Service::~Service() = default;
+    VkPhysicalDeviceExternalImageFormatInfo externalFormatInfo;
+    externalFormatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR;
+    externalFormatInfo.pNext = nullptr;
+    externalFormatInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
 
-    // static
-    bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo) {
-        return deviceInfo.HasExt(DeviceExt::ExternalMemoryZirconHandle);
+    VkPhysicalDeviceImageFormatInfo2 formatInfo;
+    formatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR;
+    formatInfo.pNext = &externalFormatInfo;
+    formatInfo.format = format;
+    formatInfo.type = type;
+    formatInfo.tiling = tiling;
+    formatInfo.usage = usage;
+    formatInfo.flags = flags;
+
+    VkExternalImageFormatProperties externalFormatProperties;
+    externalFormatProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR;
+    externalFormatProperties.pNext = nullptr;
+
+    VkImageFormatProperties2 formatProperties;
+    formatProperties.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR;
+    formatProperties.pNext = &externalFormatProperties;
+
+    VkResult result = mDevice->fn.GetPhysicalDeviceImageFormatProperties2(
+        ToBackend(mDevice->GetAdapter())->GetPhysicalDevice(), &formatInfo, &formatProperties);
+
+    // If handle not supported, result == VK_ERROR_FORMAT_NOT_SUPPORTED
+    if (result != VK_SUCCESS) {
+        return false;
     }
 
-    bool Service::SupportsImportMemory(VkFormat format,
-                                       VkImageType type,
-                                       VkImageTiling tiling,
-                                       VkImageUsageFlags usage,
-                                       VkImageCreateFlags flags) {
-        // Early out before we try using extension functions
-        if (!mSupported) {
-            return false;
-        }
+    // TODO(http://crbug.com/dawn/206): Investigate dedicated only images
+    VkFlags memoryFlags = externalFormatProperties.externalMemoryProperties.externalMemoryFeatures;
+    return (memoryFlags & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR) != 0;
+}
 
-        VkPhysicalDeviceExternalImageFormatInfo externalFormatInfo;
-        externalFormatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR;
-        externalFormatInfo.pNext = nullptr;
-        externalFormatInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
+bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
+                                  VkFormat format,
+                                  VkImageUsageFlags usage,
+                                  bool* supportsDisjoint) {
+    *supportsDisjoint = false;
+    return mSupported;
+}
 
-        VkPhysicalDeviceImageFormatInfo2 formatInfo;
-        formatInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR;
-        formatInfo.pNext = &externalFormatInfo;
-        formatInfo.format = format;
-        formatInfo.type = type;
-        formatInfo.tiling = tiling;
-        formatInfo.usage = usage;
-        formatInfo.flags = flags;
+ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
+    const ExternalImageDescriptor* descriptor,
+    VkImage image) {
+    DAWN_INVALID_IF(descriptor->type != ExternalImageType::OpaqueFD,
+                    "ExternalImageDescriptor is not an OpaqueFD descriptor.");
 
-        VkExternalImageFormatProperties externalFormatProperties;
-        externalFormatProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR;
-        externalFormatProperties.pNext = nullptr;
+    const ExternalImageDescriptorOpaqueFD* opaqueFDDescriptor =
+        static_cast<const ExternalImageDescriptorOpaqueFD*>(descriptor);
 
-        VkImageFormatProperties2 formatProperties;
-        formatProperties.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR;
-        formatProperties.pNext = &externalFormatProperties;
+    MemoryImportParams params = {opaqueFDDescriptor->allocationSize,
+                                 opaqueFDDescriptor->memoryTypeIndex};
+    return params;
+}
 
-        VkResult result = mDevice->fn.GetPhysicalDeviceImageFormatProperties2(
-            ToBackend(mDevice->GetAdapter())->GetPhysicalDevice(), &formatInfo, &formatProperties);
+ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
+                                                    const MemoryImportParams& importParams,
+                                                    VkImage image) {
+    DAWN_INVALID_IF(handle == ZX_HANDLE_INVALID, "Importing memory with an invalid handle.");
 
-        // If handle not supported, result == VK_ERROR_FORMAT_NOT_SUPPORTED
-        if (result != VK_SUCCESS) {
-            return false;
-        }
+    VkMemoryRequirements requirements;
+    mDevice->fn.GetImageMemoryRequirements(mDevice->GetVkDevice(), image, &requirements);
+    DAWN_INVALID_IF(requirements.size > importParams.allocationSize,
+                    "Requested allocation size (%u) is smaller than the required image size (%u).",
+                    importParams.allocationSize, requirements.size);
 
-        // TODO(http://crbug.com/dawn/206): Investigate dedicated only images
-        VkFlags memoryFlags =
-            externalFormatProperties.externalMemoryProperties.externalMemoryFeatures;
-        return (memoryFlags & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR) != 0;
-    }
+    VkImportMemoryZirconHandleInfoFUCHSIA importMemoryHandleInfo;
+    importMemoryHandleInfo.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_ZIRCON_HANDLE_INFO_FUCHSIA;
+    importMemoryHandleInfo.pNext = nullptr;
+    importMemoryHandleInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
+    importMemoryHandleInfo.handle = handle;
 
-    bool Service::SupportsCreateImage(const ExternalImageDescriptor* descriptor,
-                                      VkFormat format,
-                                      VkImageUsageFlags usage,
-                                      bool* supportsDisjoint) {
-        *supportsDisjoint = false;
-        return mSupported;
-    }
+    VkMemoryAllocateInfo allocateInfo;
+    allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+    allocateInfo.pNext = &importMemoryHandleInfo;
+    allocateInfo.allocationSize = importParams.allocationSize;
+    allocateInfo.memoryTypeIndex = importParams.memoryTypeIndex;
 
-    ResultOrError<MemoryImportParams> Service::GetMemoryImportParams(
-        const ExternalImageDescriptor* descriptor,
-        VkImage image) {
-        DAWN_INVALID_IF(descriptor->type != ExternalImageType::OpaqueFD,
-                        "ExternalImageDescriptor is not an OpaqueFD descriptor.");
+    VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
+    DAWN_TRY(CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo,
+                                                       nullptr, &*allocatedMemory),
+                            "vkAllocateMemory"));
+    return allocatedMemory;
+}
 
-        const ExternalImageDescriptorOpaqueFD* opaqueFDDescriptor =
-            static_cast<const ExternalImageDescriptorOpaqueFD*>(descriptor);
+ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
+                                            const VkImageCreateInfo& baseCreateInfo) {
+    VkImageCreateInfo createInfo = baseCreateInfo;
+    createInfo.flags |= VK_IMAGE_CREATE_ALIAS_BIT_KHR;
+    createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+    createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
 
-        MemoryImportParams params = {opaqueFDDescriptor->allocationSize,
-                                     opaqueFDDescriptor->memoryTypeIndex};
-        return params;
-    }
+    VkExternalMemoryImageCreateInfo externalMemoryImageCreateInfo;
+    externalMemoryImageCreateInfo.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
+    externalMemoryImageCreateInfo.pNext = nullptr;
+    externalMemoryImageCreateInfo.handleTypes =
+        VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
 
-    ResultOrError<VkDeviceMemory> Service::ImportMemory(ExternalMemoryHandle handle,
-                                                        const MemoryImportParams& importParams,
-                                                        VkImage image) {
-        DAWN_INVALID_IF(handle == ZX_HANDLE_INVALID, "Importing memory with an invalid handle.");
+    PNextChainBuilder createInfoChain(&createInfo);
+    createInfoChain.Add(&externalMemoryImageCreateInfo,
+                        VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO);
 
-        VkMemoryRequirements requirements;
-        mDevice->fn.GetImageMemoryRequirements(mDevice->GetVkDevice(), image, &requirements);
-        DAWN_INVALID_IF(
-            requirements.size > importParams.allocationSize,
-            "Requested allocation size (%u) is smaller than the required image size (%u).",
-            importParams.allocationSize, requirements.size);
+    ASSERT(IsSampleCountSupported(mDevice, createInfo));
 
-        VkImportMemoryZirconHandleInfoFUCHSIA importMemoryHandleInfo;
-        importMemoryHandleInfo.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_ZIRCON_HANDLE_INFO_FUCHSIA;
-        importMemoryHandleInfo.pNext = nullptr;
-        importMemoryHandleInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
-        importMemoryHandleInfo.handle = handle;
-
-        VkMemoryAllocateInfo allocateInfo;
-        allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
-        allocateInfo.pNext = &importMemoryHandleInfo;
-        allocateInfo.allocationSize = importParams.allocationSize;
-        allocateInfo.memoryTypeIndex = importParams.memoryTypeIndex;
-
-        VkDeviceMemory allocatedMemory = VK_NULL_HANDLE;
-        DAWN_TRY(CheckVkSuccess(mDevice->fn.AllocateMemory(mDevice->GetVkDevice(), &allocateInfo,
-                                                           nullptr, &*allocatedMemory),
-                                "vkAllocateMemory"));
-        return allocatedMemory;
-    }
-
-    ResultOrError<VkImage> Service::CreateImage(const ExternalImageDescriptor* descriptor,
-                                                const VkImageCreateInfo& baseCreateInfo) {
-        VkImageCreateInfo createInfo = baseCreateInfo;
-        createInfo.flags |= VK_IMAGE_CREATE_ALIAS_BIT_KHR;
-        createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
-        createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
-
-        VkExternalMemoryImageCreateInfo externalMemoryImageCreateInfo;
-        externalMemoryImageCreateInfo.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
-        externalMemoryImageCreateInfo.pNext = nullptr;
-        externalMemoryImageCreateInfo.handleTypes =
-            VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
-
-        PNextChainBuilder createInfoChain(&createInfo);
-        createInfoChain.Add(&externalMemoryImageCreateInfo,
-                            VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO);
-
-        ASSERT(IsSampleCountSupported(mDevice, createInfo));
-
-        VkImage image;
-        DAWN_TRY(CheckVkSuccess(
-            mDevice->fn.CreateImage(mDevice->GetVkDevice(), &createInfo, nullptr, &*image),
-            "CreateImage"));
-        return image;
-    }
+    VkImage image;
+    DAWN_TRY(CheckVkSuccess(
+        mDevice->fn.CreateImage(mDevice->GetVkDevice(), &createInfo, nullptr, &*image),
+        "CreateImage"));
+    return image;
+}
 
 }  // namespace dawn::native::vulkan::external_memory
diff --git a/src/dawn/native/vulkan/external_semaphore/SemaphoreService.h b/src/dawn/native/vulkan/external_semaphore/SemaphoreService.h
index 2da5834..e27689f 100644
--- a/src/dawn/native/vulkan/external_semaphore/SemaphoreService.h
+++ b/src/dawn/native/vulkan/external_semaphore/SemaphoreService.h
@@ -22,38 +22,38 @@
 #include "dawn/native/vulkan/VulkanInfo.h"
 
 namespace dawn::native::vulkan {
-    class Device;
+class Device;
 }  // namespace dawn::native::vulkan
 
 namespace dawn::native::vulkan::external_semaphore {
 
-    class Service {
-      public:
-        explicit Service(Device* device);
-        ~Service();
+class Service {
+  public:
+    explicit Service(Device* device);
+    ~Service();
 
-        static bool CheckSupport(const VulkanDeviceInfo& deviceInfo,
-                                 VkPhysicalDevice physicalDevice,
-                                 const VulkanFunctions& fn);
+    static bool CheckSupport(const VulkanDeviceInfo& deviceInfo,
+                             VkPhysicalDevice physicalDevice,
+                             const VulkanFunctions& fn);
 
-        // True if the device reports it supports this feature
-        bool Supported();
+    // True if the device reports it supports this feature
+    bool Supported();
 
-        // Given an external handle, import it into a VkSemaphore
-        ResultOrError<VkSemaphore> ImportSemaphore(ExternalSemaphoreHandle handle);
+    // Given an external handle, import it into a VkSemaphore
+    ResultOrError<VkSemaphore> ImportSemaphore(ExternalSemaphoreHandle handle);
 
-        // Create a VkSemaphore that is exportable into an external handle later
-        ResultOrError<VkSemaphore> CreateExportableSemaphore();
+    // Create a VkSemaphore that is exportable into an external handle later
+    ResultOrError<VkSemaphore> CreateExportableSemaphore();
 
-        // Export a VkSemaphore into an external handle
-        ResultOrError<ExternalSemaphoreHandle> ExportSemaphore(VkSemaphore semaphore);
+    // Export a VkSemaphore into an external handle
+    ResultOrError<ExternalSemaphoreHandle> ExportSemaphore(VkSemaphore semaphore);
 
-      private:
-        Device* mDevice = nullptr;
+  private:
+    Device* mDevice = nullptr;
 
-        // True if early checks pass that determine if the service is supported
-        bool mSupported = false;
-    };
+    // True if early checks pass that determine if the service is supported
+    bool mSupported = false;
+};
 
 }  // namespace dawn::native::vulkan::external_semaphore
 
diff --git a/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceFD.cpp b/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceFD.cpp
index 6909491..9c1d923 100644
--- a/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceFD.cpp
+++ b/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceFD.cpp
@@ -29,111 +29,110 @@
 
 namespace dawn::native::vulkan::external_semaphore {
 
-    Service::Service(Device* device)
-        : mDevice(device),
-          mSupported(CheckSupport(device->GetDeviceInfo(),
-                                  ToBackend(device->GetAdapter())->GetPhysicalDevice(),
-                                  device->fn)) {
+Service::Service(Device* device)
+    : mDevice(device),
+      mSupported(CheckSupport(device->GetDeviceInfo(),
+                              ToBackend(device->GetAdapter())->GetPhysicalDevice(),
+                              device->fn)) {}
+
+Service::~Service() = default;
+
+// static
+bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo,
+                           VkPhysicalDevice physicalDevice,
+                           const VulkanFunctions& fn) {
+    if (!deviceInfo.HasExt(DeviceExt::ExternalSemaphoreFD)) {
+        return false;
     }
 
-    Service::~Service() = default;
+    VkPhysicalDeviceExternalSemaphoreInfoKHR semaphoreInfo;
+    semaphoreInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR;
+    semaphoreInfo.pNext = nullptr;
+    semaphoreInfo.handleType = kHandleType;
 
-    // static
-    bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo,
-                               VkPhysicalDevice physicalDevice,
-                               const VulkanFunctions& fn) {
-        if (!deviceInfo.HasExt(DeviceExt::ExternalSemaphoreFD)) {
-            return false;
-        }
+    VkExternalSemaphorePropertiesKHR semaphoreProperties;
+    semaphoreProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR;
+    semaphoreProperties.pNext = nullptr;
 
-        VkPhysicalDeviceExternalSemaphoreInfoKHR semaphoreInfo;
-        semaphoreInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR;
-        semaphoreInfo.pNext = nullptr;
-        semaphoreInfo.handleType = kHandleType;
+    fn.GetPhysicalDeviceExternalSemaphoreProperties(physicalDevice, &semaphoreInfo,
+                                                    &semaphoreProperties);
 
-        VkExternalSemaphorePropertiesKHR semaphoreProperties;
-        semaphoreProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR;
-        semaphoreProperties.pNext = nullptr;
+    VkFlags requiredFlags = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
+                            VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
 
-        fn.GetPhysicalDeviceExternalSemaphoreProperties(physicalDevice, &semaphoreInfo,
-                                                        &semaphoreProperties);
+    return IsSubset(requiredFlags, semaphoreProperties.externalSemaphoreFeatures);
+}
 
-        VkFlags requiredFlags = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
-                                VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
+bool Service::Supported() {
+    return mSupported;
+}
 
-        return IsSubset(requiredFlags, semaphoreProperties.externalSemaphoreFeatures);
+ResultOrError<VkSemaphore> Service::ImportSemaphore(ExternalSemaphoreHandle handle) {
+    DAWN_INVALID_IF(handle < 0, "Importing a semaphore with an invalid handle.");
+
+    VkSemaphore semaphore = VK_NULL_HANDLE;
+    VkSemaphoreCreateInfo info;
+    info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+    info.pNext = nullptr;
+    info.flags = 0;
+
+    DAWN_TRY(CheckVkSuccess(
+        mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &info, nullptr, &*semaphore),
+        "vkCreateSemaphore"));
+
+    VkImportSemaphoreFdInfoKHR importSemaphoreFdInfo;
+    importSemaphoreFdInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
+    importSemaphoreFdInfo.pNext = nullptr;
+    importSemaphoreFdInfo.semaphore = semaphore;
+    importSemaphoreFdInfo.flags = 0;
+    importSemaphoreFdInfo.handleType = kHandleType;
+    importSemaphoreFdInfo.fd = handle;
+
+    MaybeError status = CheckVkSuccess(
+        mDevice->fn.ImportSemaphoreFdKHR(mDevice->GetVkDevice(), &importSemaphoreFdInfo),
+        "vkImportSemaphoreFdKHR");
+
+    if (status.IsError()) {
+        mDevice->fn.DestroySemaphore(mDevice->GetVkDevice(), semaphore, nullptr);
+        DAWN_TRY(std::move(status));
     }
 
-    bool Service::Supported() {
-        return mSupported;
-    }
+    return semaphore;
+}
 
-    ResultOrError<VkSemaphore> Service::ImportSemaphore(ExternalSemaphoreHandle handle) {
-        DAWN_INVALID_IF(handle < 0, "Importing a semaphore with an invalid handle.");
+ResultOrError<VkSemaphore> Service::CreateExportableSemaphore() {
+    VkExportSemaphoreCreateInfoKHR exportSemaphoreInfo;
+    exportSemaphoreInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR;
+    exportSemaphoreInfo.pNext = nullptr;
+    exportSemaphoreInfo.handleTypes = kHandleType;
 
-        VkSemaphore semaphore = VK_NULL_HANDLE;
-        VkSemaphoreCreateInfo info;
-        info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
-        info.pNext = nullptr;
-        info.flags = 0;
+    VkSemaphoreCreateInfo semaphoreCreateInfo;
+    semaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+    semaphoreCreateInfo.pNext = &exportSemaphoreInfo;
+    semaphoreCreateInfo.flags = 0;
 
-        DAWN_TRY(CheckVkSuccess(
-            mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &info, nullptr, &*semaphore),
-            "vkCreateSemaphore"));
+    VkSemaphore signalSemaphore;
+    DAWN_TRY(
+        CheckVkSuccess(mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &semaphoreCreateInfo,
+                                                   nullptr, &*signalSemaphore),
+                       "vkCreateSemaphore"));
+    return signalSemaphore;
+}
 
-        VkImportSemaphoreFdInfoKHR importSemaphoreFdInfo;
-        importSemaphoreFdInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
-        importSemaphoreFdInfo.pNext = nullptr;
-        importSemaphoreFdInfo.semaphore = semaphore;
-        importSemaphoreFdInfo.flags = 0;
-        importSemaphoreFdInfo.handleType = kHandleType;
-        importSemaphoreFdInfo.fd = handle;
+ResultOrError<ExternalSemaphoreHandle> Service::ExportSemaphore(VkSemaphore semaphore) {
+    VkSemaphoreGetFdInfoKHR semaphoreGetFdInfo;
+    semaphoreGetFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
+    semaphoreGetFdInfo.pNext = nullptr;
+    semaphoreGetFdInfo.semaphore = semaphore;
+    semaphoreGetFdInfo.handleType = kHandleType;
 
-        MaybeError status = CheckVkSuccess(
-            mDevice->fn.ImportSemaphoreFdKHR(mDevice->GetVkDevice(), &importSemaphoreFdInfo),
-            "vkImportSemaphoreFdKHR");
+    int fd = -1;
+    DAWN_TRY(CheckVkSuccess(
+        mDevice->fn.GetSemaphoreFdKHR(mDevice->GetVkDevice(), &semaphoreGetFdInfo, &fd),
+        "vkGetSemaphoreFdKHR"));
 
-        if (status.IsError()) {
-            mDevice->fn.DestroySemaphore(mDevice->GetVkDevice(), semaphore, nullptr);
-            DAWN_TRY(std::move(status));
-        }
-
-        return semaphore;
-    }
-
-    ResultOrError<VkSemaphore> Service::CreateExportableSemaphore() {
-        VkExportSemaphoreCreateInfoKHR exportSemaphoreInfo;
-        exportSemaphoreInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR;
-        exportSemaphoreInfo.pNext = nullptr;
-        exportSemaphoreInfo.handleTypes = kHandleType;
-
-        VkSemaphoreCreateInfo semaphoreCreateInfo;
-        semaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
-        semaphoreCreateInfo.pNext = &exportSemaphoreInfo;
-        semaphoreCreateInfo.flags = 0;
-
-        VkSemaphore signalSemaphore;
-        DAWN_TRY(
-            CheckVkSuccess(mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &semaphoreCreateInfo,
-                                                       nullptr, &*signalSemaphore),
-                           "vkCreateSemaphore"));
-        return signalSemaphore;
-    }
-
-    ResultOrError<ExternalSemaphoreHandle> Service::ExportSemaphore(VkSemaphore semaphore) {
-        VkSemaphoreGetFdInfoKHR semaphoreGetFdInfo;
-        semaphoreGetFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
-        semaphoreGetFdInfo.pNext = nullptr;
-        semaphoreGetFdInfo.semaphore = semaphore;
-        semaphoreGetFdInfo.handleType = kHandleType;
-
-        int fd = -1;
-        DAWN_TRY(CheckVkSuccess(
-            mDevice->fn.GetSemaphoreFdKHR(mDevice->GetVkDevice(), &semaphoreGetFdInfo, &fd),
-            "vkGetSemaphoreFdKHR"));
-
-        ASSERT(fd >= 0);
-        return fd;
-    }
+    ASSERT(fd >= 0);
+    return fd;
+}
 
 }  // namespace dawn::native::vulkan::external_semaphore
diff --git a/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceNull.cpp b/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceNull.cpp
index ba32609..1963524 100644
--- a/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceNull.cpp
+++ b/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceNull.cpp
@@ -17,34 +17,34 @@
 
 namespace dawn::native::vulkan::external_semaphore {
 
-    Service::Service(Device* device) : mDevice(device) {
-        DAWN_UNUSED(mDevice);
-        DAWN_UNUSED(mSupported);
-    }
+Service::Service(Device* device) : mDevice(device) {
+    DAWN_UNUSED(mDevice);
+    DAWN_UNUSED(mSupported);
+}
 
-    Service::~Service() = default;
+Service::~Service() = default;
 
-    // static
-    bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo,
-                               VkPhysicalDevice physicalDevice,
-                               const VulkanFunctions& fn) {
-        return false;
-    }
+// static
+bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo,
+                           VkPhysicalDevice physicalDevice,
+                           const VulkanFunctions& fn) {
+    return false;
+}
 
-    bool Service::Supported() {
-        return false;
-    }
+bool Service::Supported() {
+    return false;
+}
 
-    ResultOrError<VkSemaphore> Service::ImportSemaphore(ExternalSemaphoreHandle handle) {
-        return DAWN_UNIMPLEMENTED_ERROR("Using null semaphore service to interop inside Vulkan");
-    }
+ResultOrError<VkSemaphore> Service::ImportSemaphore(ExternalSemaphoreHandle handle) {
+    return DAWN_UNIMPLEMENTED_ERROR("Using null semaphore service to interop inside Vulkan");
+}
 
-    ResultOrError<VkSemaphore> Service::CreateExportableSemaphore() {
-        return DAWN_UNIMPLEMENTED_ERROR("Using null semaphore service to interop inside Vulkan");
-    }
+ResultOrError<VkSemaphore> Service::CreateExportableSemaphore() {
+    return DAWN_UNIMPLEMENTED_ERROR("Using null semaphore service to interop inside Vulkan");
+}
 
-    ResultOrError<ExternalSemaphoreHandle> Service::ExportSemaphore(VkSemaphore semaphore) {
-        return DAWN_UNIMPLEMENTED_ERROR("Using null semaphore service to interop inside Vulkan");
-    }
+ResultOrError<ExternalSemaphoreHandle> Service::ExportSemaphore(VkSemaphore semaphore) {
+    return DAWN_UNIMPLEMENTED_ERROR("Using null semaphore service to interop inside Vulkan");
+}
 
 }  // namespace dawn::native::vulkan::external_semaphore
diff --git a/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp b/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp
index db5b800..af1f3f3 100644
--- a/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp
+++ b/src/dawn/native/vulkan/external_semaphore/SemaphoreServiceZirconHandle.cpp
@@ -22,116 +22,111 @@
 
 namespace dawn::native::vulkan::external_semaphore {
 
-    Service::Service(Device* device)
-        : mDevice(device),
-          mSupported(CheckSupport(device->GetDeviceInfo(),
-                                  ToBackend(device->GetAdapter())->GetPhysicalDevice(),
-                                  device->fn)) {
+Service::Service(Device* device)
+    : mDevice(device),
+      mSupported(CheckSupport(device->GetDeviceInfo(),
+                              ToBackend(device->GetAdapter())->GetPhysicalDevice(),
+                              device->fn)) {}
+
+Service::~Service() = default;
+
+// static
+bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo,
+                           VkPhysicalDevice physicalDevice,
+                           const VulkanFunctions& fn) {
+    if (!deviceInfo.HasExt(DeviceExt::ExternalSemaphoreZirconHandle)) {
+        return false;
     }
 
-    Service::~Service() = default;
+    VkPhysicalDeviceExternalSemaphoreInfoKHR semaphoreInfo;
+    semaphoreInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR;
+    semaphoreInfo.pNext = nullptr;
+    semaphoreInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
 
-    // static
-    bool Service::CheckSupport(const VulkanDeviceInfo& deviceInfo,
-                               VkPhysicalDevice physicalDevice,
-                               const VulkanFunctions& fn) {
-        if (!deviceInfo.HasExt(DeviceExt::ExternalSemaphoreZirconHandle)) {
-            return false;
-        }
+    VkExternalSemaphorePropertiesKHR semaphoreProperties;
+    semaphoreProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR;
+    semaphoreProperties.pNext = nullptr;
 
-        VkPhysicalDeviceExternalSemaphoreInfoKHR semaphoreInfo;
-        semaphoreInfo.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR;
-        semaphoreInfo.pNext = nullptr;
-        semaphoreInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
+    fn.GetPhysicalDeviceExternalSemaphoreProperties(physicalDevice, &semaphoreInfo,
+                                                    &semaphoreProperties);
 
-        VkExternalSemaphorePropertiesKHR semaphoreProperties;
-        semaphoreProperties.sType = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR;
-        semaphoreProperties.pNext = nullptr;
+    VkFlags requiredFlags = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
+                            VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
 
-        fn.GetPhysicalDeviceExternalSemaphoreProperties(physicalDevice, &semaphoreInfo,
-                                                        &semaphoreProperties);
+    return IsSubset(requiredFlags, semaphoreProperties.externalSemaphoreFeatures);
+}
 
-        VkFlags requiredFlags = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
-                                VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
+bool Service::Supported() {
+    return mSupported;
+}
 
-        return IsSubset(requiredFlags, semaphoreProperties.externalSemaphoreFeatures);
+ResultOrError<VkSemaphore> Service::ImportSemaphore(ExternalSemaphoreHandle handle) {
+    DAWN_INVALID_IF(handle == ZX_HANDLE_INVALID, "Importing a semaphore with an invalid handle.");
+
+    VkSemaphore semaphore = VK_NULL_HANDLE;
+    VkSemaphoreCreateInfo info;
+    info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+    info.pNext = nullptr;
+    info.flags = 0;
+
+    DAWN_TRY(CheckVkSuccess(
+        mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &info, nullptr, &*semaphore),
+        "vkCreateSemaphore"));
+
+    VkImportSemaphoreZirconHandleInfoFUCHSIA importSemaphoreHandleInfo;
+    importSemaphoreHandleInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_ZIRCON_HANDLE_INFO_FUCHSIA;
+    importSemaphoreHandleInfo.pNext = nullptr;
+    importSemaphoreHandleInfo.semaphore = semaphore;
+    importSemaphoreHandleInfo.flags = 0;
+    importSemaphoreHandleInfo.handleType =
+        VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
+    importSemaphoreHandleInfo.handle = handle;
+
+    MaybeError status = CheckVkSuccess(mDevice->fn.ImportSemaphoreZirconHandleFUCHSIA(
+                                           mDevice->GetVkDevice(), &importSemaphoreHandleInfo),
+                                       "vkImportSemaphoreZirconHandleFUCHSIA");
+
+    if (status.IsError()) {
+        mDevice->fn.DestroySemaphore(mDevice->GetVkDevice(), semaphore, nullptr);
+        DAWN_TRY(std::move(status));
     }
 
-    bool Service::Supported() {
-        return mSupported;
-    }
+    return semaphore;
+}
 
-    ResultOrError<VkSemaphore> Service::ImportSemaphore(ExternalSemaphoreHandle handle) {
-        DAWN_INVALID_IF(handle == ZX_HANDLE_INVALID,
-                        "Importing a semaphore with an invalid handle.");
+ResultOrError<VkSemaphore> Service::CreateExportableSemaphore() {
+    VkExportSemaphoreCreateInfoKHR exportSemaphoreInfo;
+    exportSemaphoreInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR;
+    exportSemaphoreInfo.pNext = nullptr;
+    exportSemaphoreInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
 
-        VkSemaphore semaphore = VK_NULL_HANDLE;
-        VkSemaphoreCreateInfo info;
-        info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
-        info.pNext = nullptr;
-        info.flags = 0;
+    VkSemaphoreCreateInfo semaphoreCreateInfo;
+    semaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+    semaphoreCreateInfo.pNext = &exportSemaphoreInfo;
+    semaphoreCreateInfo.flags = 0;
 
-        DAWN_TRY(CheckVkSuccess(
-            mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &info, nullptr, &*semaphore),
-            "vkCreateSemaphore"));
+    VkSemaphore signalSemaphore;
+    DAWN_TRY(
+        CheckVkSuccess(mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &semaphoreCreateInfo,
+                                                   nullptr, &*signalSemaphore),
+                       "vkCreateSemaphore"));
+    return signalSemaphore;
+}
 
-        VkImportSemaphoreZirconHandleInfoFUCHSIA importSemaphoreHandleInfo;
-        importSemaphoreHandleInfo.sType =
-            VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_ZIRCON_HANDLE_INFO_FUCHSIA;
-        importSemaphoreHandleInfo.pNext = nullptr;
-        importSemaphoreHandleInfo.semaphore = semaphore;
-        importSemaphoreHandleInfo.flags = 0;
-        importSemaphoreHandleInfo.handleType =
-            VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
-        importSemaphoreHandleInfo.handle = handle;
+ResultOrError<ExternalSemaphoreHandle> Service::ExportSemaphore(VkSemaphore semaphore) {
+    VkSemaphoreGetZirconHandleInfoFUCHSIA semaphoreGetHandleInfo;
+    semaphoreGetHandleInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_ZIRCON_HANDLE_INFO_FUCHSIA;
+    semaphoreGetHandleInfo.pNext = nullptr;
+    semaphoreGetHandleInfo.semaphore = semaphore;
+    semaphoreGetHandleInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
 
-        MaybeError status = CheckVkSuccess(mDevice->fn.ImportSemaphoreZirconHandleFUCHSIA(
-                                               mDevice->GetVkDevice(), &importSemaphoreHandleInfo),
-                                           "vkImportSemaphoreZirconHandleFUCHSIA");
+    zx_handle_t handle = ZX_HANDLE_INVALID;
+    DAWN_TRY(CheckVkSuccess(mDevice->fn.GetSemaphoreZirconHandleFUCHSIA(
+                                mDevice->GetVkDevice(), &semaphoreGetHandleInfo, &handle),
+                            "VkSemaphoreGetZirconHandleInfoFUCHSIA"));
 
-        if (status.IsError()) {
-            mDevice->fn.DestroySemaphore(mDevice->GetVkDevice(), semaphore, nullptr);
-            DAWN_TRY(std::move(status));
-        }
-
-        return semaphore;
-    }
-
-    ResultOrError<VkSemaphore> Service::CreateExportableSemaphore() {
-        VkExportSemaphoreCreateInfoKHR exportSemaphoreInfo;
-        exportSemaphoreInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR;
-        exportSemaphoreInfo.pNext = nullptr;
-        exportSemaphoreInfo.handleTypes =
-            VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
-
-        VkSemaphoreCreateInfo semaphoreCreateInfo;
-        semaphoreCreateInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
-        semaphoreCreateInfo.pNext = &exportSemaphoreInfo;
-        semaphoreCreateInfo.flags = 0;
-
-        VkSemaphore signalSemaphore;
-        DAWN_TRY(
-            CheckVkSuccess(mDevice->fn.CreateSemaphore(mDevice->GetVkDevice(), &semaphoreCreateInfo,
-                                                       nullptr, &*signalSemaphore),
-                           "vkCreateSemaphore"));
-        return signalSemaphore;
-    }
-
-    ResultOrError<ExternalSemaphoreHandle> Service::ExportSemaphore(VkSemaphore semaphore) {
-        VkSemaphoreGetZirconHandleInfoFUCHSIA semaphoreGetHandleInfo;
-        semaphoreGetHandleInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_ZIRCON_HANDLE_INFO_FUCHSIA;
-        semaphoreGetHandleInfo.pNext = nullptr;
-        semaphoreGetHandleInfo.semaphore = semaphore;
-        semaphoreGetHandleInfo.handleType =
-            VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
-
-        zx_handle_t handle = ZX_HANDLE_INVALID;
-        DAWN_TRY(CheckVkSuccess(mDevice->fn.GetSemaphoreZirconHandleFUCHSIA(
-                                    mDevice->GetVkDevice(), &semaphoreGetHandleInfo, &handle),
-                                "VkSemaphoreGetZirconHandleInfoFUCHSIA"));
-
-        ASSERT(handle != ZX_HANDLE_INVALID);
-        return handle;
-    }
+    ASSERT(handle != ZX_HANDLE_INVALID);
+    return handle;
+}
 
 }  // namespace dawn::native::vulkan::external_semaphore
diff --git a/src/dawn/native/webgpu_absl_format.cpp b/src/dawn/native/webgpu_absl_format.cpp
index f50d6b0..2550f4b 100644
--- a/src/dawn/native/webgpu_absl_format.cpp
+++ b/src/dawn/native/webgpu_absl_format.cpp
@@ -30,414 +30,411 @@
 
 namespace dawn::native {
 
-    //
-    // Structs
-    //
+//
+// Structs
+//
 
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        const Color* value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s) {
-        if (value == nullptr) {
-            s->Append("[null]");
-            return {true};
-        }
-        s->Append(absl::StrFormat("[Color r:%f, g:%f, b:%f, a:%f]", value->r, value->g, value->b,
-                                  value->a));
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString>
+AbslFormatConvert(const Color* value, const absl::FormatConversionSpec& spec, absl::FormatSink* s) {
+    if (value == nullptr) {
+        s->Append("[null]");
+        return {true};
+    }
+    s->Append(
+        absl::StrFormat("[Color r:%f, g:%f, b:%f, a:%f]", value->r, value->g, value->b, value->a));
+    return {true};
+}
+
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+    const Extent3D* value,
+    const absl::FormatConversionSpec& spec,
+    absl::FormatSink* s) {
+    if (value == nullptr) {
+        s->Append("[null]");
+        return {true};
+    }
+    s->Append(absl::StrFormat("[Extent3D width:%u, height:%u, depthOrArrayLayers:%u]", value->width,
+                              value->height, value->depthOrArrayLayers));
+    return {true};
+}
+
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+    const Origin3D* value,
+    const absl::FormatConversionSpec& spec,
+    absl::FormatSink* s) {
+    if (value == nullptr) {
+        s->Append("[null]");
+        return {true};
+    }
+    s->Append(absl::StrFormat("[Origin3D x:%u, y:%u, z:%u]", value->x, value->y, value->z));
+    return {true};
+}
+
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+    const BindingInfo& value,
+    const absl::FormatConversionSpec& spec,
+    absl::FormatSink* s) {
+    static const auto* const fmt =
+        new absl::ParsedFormat<'u', 's', 's', 's'>("{ binding: %u, visibility: %s, %s: %s }");
+    switch (value.bindingType) {
+        case BindingInfoType::Buffer:
+            s->Append(absl::StrFormat(*fmt, static_cast<uint32_t>(value.binding), value.visibility,
+                                      value.bindingType, value.buffer));
+            break;
+        case BindingInfoType::Sampler:
+            s->Append(absl::StrFormat(*fmt, static_cast<uint32_t>(value.binding), value.visibility,
+                                      value.bindingType, value.sampler));
+            break;
+        case BindingInfoType::Texture:
+            s->Append(absl::StrFormat(*fmt, static_cast<uint32_t>(value.binding), value.visibility,
+                                      value.bindingType, value.texture));
+            break;
+        case BindingInfoType::StorageTexture:
+            s->Append(absl::StrFormat(*fmt, static_cast<uint32_t>(value.binding), value.visibility,
+                                      value.bindingType, value.storageTexture));
+            break;
+        case BindingInfoType::ExternalTexture:
+            break;
+    }
+    return {true};
+}
+
+//
+// Objects
+//
+
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+    const DeviceBase* value,
+    const absl::FormatConversionSpec& spec,
+    absl::FormatSink* s) {
+    if (value == nullptr) {
+        s->Append("[null]");
+        return {true};
+    }
+    s->Append("[Device");
+    const std::string& label = value->GetLabel();
+    if (!label.empty()) {
+        s->Append(absl::StrFormat(" \"%s\"", label));
+    }
+    s->Append("]");
+    return {true};
+}
+
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+    const ApiObjectBase* value,
+    const absl::FormatConversionSpec& spec,
+    absl::FormatSink* s) {
+    if (value == nullptr) {
+        s->Append("[null]");
+        return {true};
+    }
+    s->Append("[");
+    if (value->IsError()) {
+        s->Append("Invalid ");
+    }
+    s->Append(ObjectTypeAsString(value->GetType()));
+    const std::string& label = value->GetLabel();
+    if (!label.empty()) {
+        s->Append(absl::StrFormat(" \"%s\"", label));
+    }
+    s->Append("]");
+    return {true};
+}
+
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+    const TextureViewBase* value,
+    const absl::FormatConversionSpec& spec,
+    absl::FormatSink* s) {
+    if (value == nullptr) {
+        s->Append("[null]");
+        return {true};
+    }
+    s->Append("[");
+    if (value->IsError()) {
+        s->Append("Invalid ");
+    }
+    s->Append(ObjectTypeAsString(value->GetType()));
+    const std::string& label = value->GetLabel();
+    if (!label.empty()) {
+        s->Append(absl::StrFormat(" \"%s\"", label));
+    }
+    const std::string& textureLabel = value->GetTexture()->GetLabel();
+    if (!textureLabel.empty()) {
+        s->Append(absl::StrFormat(" of Texture \"%s\"", textureLabel));
+    }
+    s->Append("]");
+    return {true};
+}
+
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+    const AttachmentState* value,
+    const absl::FormatConversionSpec& spec,
+    absl::FormatSink* s) {
+    if (value == nullptr) {
+        s->Append("[null]");
         return {true};
     }
 
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        const Extent3D* value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s) {
-        if (value == nullptr) {
-            s->Append("[null]");
-            return {true};
-        }
-        s->Append(absl::StrFormat("[Extent3D width:%u, height:%u, depthOrArrayLayers:%u]",
-                                  value->width, value->height, value->depthOrArrayLayers));
-        return {true};
-    }
+    s->Append("{ colorFormats: [");
 
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        const Origin3D* value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s) {
-        if (value == nullptr) {
-            s->Append("[null]");
-            return {true};
-        }
-        s->Append(absl::StrFormat("[Origin3D x:%u, y:%u, z:%u]", value->x, value->y, value->z));
-        return {true};
-    }
+    ColorAttachmentIndex nextColorIndex(uint8_t(0));
 
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        const BindingInfo& value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s) {
-        static const auto* const fmt =
-            new absl::ParsedFormat<'u', 's', 's', 's'>("{ binding: %u, visibility: %s, %s: %s }");
-        switch (value.bindingType) {
-            case BindingInfoType::Buffer:
-                s->Append(absl::StrFormat(*fmt, static_cast<uint32_t>(value.binding),
-                                          value.visibility, value.bindingType, value.buffer));
-                break;
-            case BindingInfoType::Sampler:
-                s->Append(absl::StrFormat(*fmt, static_cast<uint32_t>(value.binding),
-                                          value.visibility, value.bindingType, value.sampler));
-                break;
-            case BindingInfoType::Texture:
-                s->Append(absl::StrFormat(*fmt, static_cast<uint32_t>(value.binding),
-                                          value.visibility, value.bindingType, value.texture));
-                break;
-            case BindingInfoType::StorageTexture:
-                s->Append(absl::StrFormat(*fmt, static_cast<uint32_t>(value.binding),
-                                          value.visibility, value.bindingType,
-                                          value.storageTexture));
-                break;
-            case BindingInfoType::ExternalTexture:
-                break;
-        }
-        return {true};
-    }
-
-    //
-    // Objects
-    //
-
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        const DeviceBase* value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s) {
-        if (value == nullptr) {
-            s->Append("[null]");
-            return {true};
-        }
-        s->Append("[Device");
-        const std::string& label = value->GetLabel();
-        if (!label.empty()) {
-            s->Append(absl::StrFormat(" \"%s\"", label));
-        }
-        s->Append("]");
-        return {true};
-    }
-
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        const ApiObjectBase* value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s) {
-        if (value == nullptr) {
-            s->Append("[null]");
-            return {true};
-        }
-        s->Append("[");
-        if (value->IsError()) {
-            s->Append("Invalid ");
-        }
-        s->Append(ObjectTypeAsString(value->GetType()));
-        const std::string& label = value->GetLabel();
-        if (!label.empty()) {
-            s->Append(absl::StrFormat(" \"%s\"", label));
-        }
-        s->Append("]");
-        return {true};
-    }
-
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        const TextureViewBase* value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s) {
-        if (value == nullptr) {
-            s->Append("[null]");
-            return {true};
-        }
-        s->Append("[");
-        if (value->IsError()) {
-            s->Append("Invalid ");
-        }
-        s->Append(ObjectTypeAsString(value->GetType()));
-        const std::string& label = value->GetLabel();
-        if (!label.empty()) {
-            s->Append(absl::StrFormat(" \"%s\"", label));
-        }
-        const std::string& textureLabel = value->GetTexture()->GetLabel();
-        if (!textureLabel.empty()) {
-            s->Append(absl::StrFormat(" of Texture \"%s\"", textureLabel));
-        }
-        s->Append("]");
-        return {true};
-    }
-
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        const AttachmentState* value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s) {
-        if (value == nullptr) {
-            s->Append("[null]");
-            return {true};
-        }
-
-        s->Append("{ colorFormats: [");
-
-        ColorAttachmentIndex nextColorIndex(uint8_t(0));
-
-        bool needsComma = false;
-        for (ColorAttachmentIndex i : IterateBitSet(value->GetColorAttachmentsMask())) {
-            while (nextColorIndex < i) {
-                s->Append(absl::StrFormat("%s, ", wgpu::TextureFormat::Undefined));
-                nextColorIndex++;
-                needsComma = false;
-            }
-
-            if (needsComma) {
-                s->Append(", ");
-            }
-
-            s->Append(absl::StrFormat("%s", value->GetColorAttachmentFormat(i)));
-
+    bool needsComma = false;
+    for (ColorAttachmentIndex i : IterateBitSet(value->GetColorAttachmentsMask())) {
+        while (nextColorIndex < i) {
+            s->Append(absl::StrFormat("%s, ", wgpu::TextureFormat::Undefined));
             nextColorIndex++;
-            needsComma = true;
+            needsComma = false;
         }
 
-        s->Append("], ");
-
-        if (value->HasDepthStencilAttachment()) {
-            s->Append(absl::StrFormat("depthStencilFormat: %s, ", value->GetDepthStencilFormat()));
+        if (needsComma) {
+            s->Append(", ");
         }
 
-        s->Append(absl::StrFormat("sampleCount: %u }", value->GetSampleCount()));
+        s->Append(absl::StrFormat("%s", value->GetColorAttachmentFormat(i)));
 
+        nextColorIndex++;
+        needsComma = true;
+    }
+
+    s->Append("], ");
+
+    if (value->HasDepthStencilAttachment()) {
+        s->Append(absl::StrFormat("depthStencilFormat: %s, ", value->GetDepthStencilFormat()));
+    }
+
+    s->Append(absl::StrFormat("sampleCount: %u }", value->GetSampleCount()));
+
+    return {true};
+}
+
+//
+// Enums
+//
+
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString>
+AbslFormatConvert(Aspect value, const absl::FormatConversionSpec& spec, absl::FormatSink* s) {
+    if (value == Aspect::None) {
+        s->Append("None");
         return {true};
     }
 
-    //
-    // Enums
-    //
+    bool first = true;
 
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString>
-    AbslFormatConvert(Aspect value, const absl::FormatConversionSpec& spec, absl::FormatSink* s) {
-        if (value == Aspect::None) {
-            s->Append("None");
-            return {true};
+    if (value & Aspect::Color) {
+        first = false;
+        s->Append("Color");
+        value &= ~Aspect::Color;
+    }
+
+    if (value & Aspect::Depth) {
+        if (!first) {
+            s->Append("|");
         }
+        first = false;
+        s->Append("Depth");
+        value &= ~Aspect::Depth;
+    }
 
-        bool first = true;
-
-        if (value & Aspect::Color) {
-            first = false;
-            s->Append("Color");
-            value &= ~Aspect::Color;
+    if (value & Aspect::Stencil) {
+        if (!first) {
+            s->Append("|");
         }
+        first = false;
+        s->Append("Stencil");
+        value &= ~Aspect::Stencil;
+    }
 
-        if (value & Aspect::Depth) {
-            if (!first) {
-                s->Append("|");
-            }
-            first = false;
-            s->Append("Depth");
-            value &= ~Aspect::Depth;
+    // Output any remaining flags as a hex value
+    if (static_cast<bool>(value)) {
+        if (!first) {
+            s->Append("|");
         }
+        s->Append(absl::StrFormat("%x", static_cast<uint8_t>(value)));
+    }
 
-        if (value & Aspect::Stencil) {
-            if (!first) {
-                s->Append("|");
-            }
-            first = false;
-            s->Append("Stencil");
-            value &= ~Aspect::Stencil;
-        }
+    return {true};
+}
 
-        // Output any remaining flags as a hex value
-        if (static_cast<bool>(value)) {
-            if (!first) {
-                s->Append("|");
-            }
-            s->Append(absl::StrFormat("%x", static_cast<uint8_t>(value)));
-        }
-
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+    SampleTypeBit value,
+    const absl::FormatConversionSpec& spec,
+    absl::FormatSink* s) {
+    if (value == SampleTypeBit::None) {
+        s->Append("None");
         return {true};
     }
 
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        SampleTypeBit value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s) {
-        if (value == SampleTypeBit::None) {
-            s->Append("None");
-            return {true};
+    bool first = true;
+
+    if (value & SampleTypeBit::Float) {
+        first = false;
+        s->Append("Float");
+        value &= ~SampleTypeBit::Float;
+    }
+
+    if (value & SampleTypeBit::UnfilterableFloat) {
+        if (!first) {
+            s->Append("|");
         }
+        first = false;
+        s->Append("UnfilterableFloat");
+        value &= ~SampleTypeBit::UnfilterableFloat;
+    }
 
-        bool first = true;
+    if (value & SampleTypeBit::Depth) {
+        if (!first) {
+            s->Append("|");
+        }
+        first = false;
+        s->Append("Depth");
+        value &= ~SampleTypeBit::Depth;
+    }
 
-        if (value & SampleTypeBit::Float) {
-            first = false;
+    if (value & SampleTypeBit::Sint) {
+        if (!first) {
+            s->Append("|");
+        }
+        first = false;
+        s->Append("Sint");
+        value &= ~SampleTypeBit::Sint;
+    }
+
+    if (value & SampleTypeBit::Uint) {
+        if (!first) {
+            s->Append("|");
+        }
+        first = false;
+        s->Append("Uint");
+        value &= ~SampleTypeBit::Uint;
+    }
+
+    // Output any remaining flags as a hex value
+    if (static_cast<bool>(value)) {
+        if (!first) {
+            s->Append("|");
+        }
+        s->Append(absl::StrFormat("%x", static_cast<uint8_t>(value)));
+    }
+
+    return {true};
+}
+
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+    BindingInfoType value,
+    const absl::FormatConversionSpec& spec,
+    absl::FormatSink* s) {
+    switch (value) {
+        case BindingInfoType::Buffer:
+            s->Append("buffer");
+            break;
+        case BindingInfoType::Sampler:
+            s->Append("sampler");
+            break;
+        case BindingInfoType::Texture:
+            s->Append("texture");
+            break;
+        case BindingInfoType::StorageTexture:
+            s->Append("storageTexture");
+            break;
+        case BindingInfoType::ExternalTexture:
+            s->Append("externalTexture");
+            break;
+    }
+    return {true};
+}
+
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+    SingleShaderStage value,
+    const absl::FormatConversionSpec& spec,
+    absl::FormatSink* s) {
+    switch (value) {
+        case SingleShaderStage::Compute:
+            s->Append("Compute");
+            break;
+        case SingleShaderStage::Vertex:
+            s->Append("Vertex");
+            break;
+        case SingleShaderStage::Fragment:
+            s->Append("Fragment");
+            break;
+    }
+    return {true};
+}
+
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+    VertexFormatBaseType value,
+    const absl::FormatConversionSpec& spec,
+    absl::FormatSink* s) {
+    switch (value) {
+        case VertexFormatBaseType::Float:
             s->Append("Float");
-            value &= ~SampleTypeBit::Float;
-        }
-
-        if (value & SampleTypeBit::UnfilterableFloat) {
-            if (!first) {
-                s->Append("|");
-            }
-            first = false;
-            s->Append("UnfilterableFloat");
-            value &= ~SampleTypeBit::UnfilterableFloat;
-        }
-
-        if (value & SampleTypeBit::Depth) {
-            if (!first) {
-                s->Append("|");
-            }
-            first = false;
-            s->Append("Depth");
-            value &= ~SampleTypeBit::Depth;
-        }
-
-        if (value & SampleTypeBit::Sint) {
-            if (!first) {
-                s->Append("|");
-            }
-            first = false;
-            s->Append("Sint");
-            value &= ~SampleTypeBit::Sint;
-        }
-
-        if (value & SampleTypeBit::Uint) {
-            if (!first) {
-                s->Append("|");
-            }
-            first = false;
+            break;
+        case VertexFormatBaseType::Uint:
             s->Append("Uint");
-            value &= ~SampleTypeBit::Uint;
-        }
-
-        // Output any remaining flags as a hex value
-        if (static_cast<bool>(value)) {
-            if (!first) {
-                s->Append("|");
-            }
-            s->Append(absl::StrFormat("%x", static_cast<uint8_t>(value)));
-        }
-
-        return {true};
+            break;
+        case VertexFormatBaseType::Sint:
+            s->Append("Sint");
+            break;
     }
+    return {true};
+}
 
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        BindingInfoType value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s) {
-        switch (value) {
-            case BindingInfoType::Buffer:
-                s->Append("buffer");
-                break;
-            case BindingInfoType::Sampler:
-                s->Append("sampler");
-                break;
-            case BindingInfoType::Texture:
-                s->Append("texture");
-                break;
-            case BindingInfoType::StorageTexture:
-                s->Append("storageTexture");
-                break;
-            case BindingInfoType::ExternalTexture:
-                s->Append("externalTexture");
-                break;
-        }
-        return {true};
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+    InterStageComponentType value,
+    const absl::FormatConversionSpec& spec,
+    absl::FormatSink* s) {
+    switch (value) {
+        case InterStageComponentType::Float:
+            s->Append("Float");
+            break;
+        case InterStageComponentType::Uint:
+            s->Append("Uint");
+            break;
+        case InterStageComponentType::Sint:
+            s->Append("Sint");
+            break;
     }
+    return {true};
+}
 
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        SingleShaderStage value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s) {
-        switch (value) {
-            case SingleShaderStage::Compute:
-                s->Append("Compute");
-                break;
-            case SingleShaderStage::Vertex:
-                s->Append("Vertex");
-                break;
-            case SingleShaderStage::Fragment:
-                s->Append("Fragment");
-                break;
-        }
-        return {true};
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+    InterpolationType value,
+    const absl::FormatConversionSpec& spec,
+    absl::FormatSink* s) {
+    switch (value) {
+        case InterpolationType::Perspective:
+            s->Append("Perspective");
+            break;
+        case InterpolationType::Linear:
+            s->Append("Linear");
+            break;
+        case InterpolationType::Flat:
+            s->Append("Flat");
+            break;
     }
+    return {true};
+}
 
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        VertexFormatBaseType value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s) {
-        switch (value) {
-            case VertexFormatBaseType::Float:
-                s->Append("Float");
-                break;
-            case VertexFormatBaseType::Uint:
-                s->Append("Uint");
-                break;
-            case VertexFormatBaseType::Sint:
-                s->Append("Sint");
-                break;
-        }
-        return {true};
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+    InterpolationSampling value,
+    const absl::FormatConversionSpec& spec,
+    absl::FormatSink* s) {
+    switch (value) {
+        case InterpolationSampling::None:
+            s->Append("None");
+            break;
+        case InterpolationSampling::Center:
+            s->Append("Center");
+            break;
+        case InterpolationSampling::Centroid:
+            s->Append("Centroid");
+            break;
+        case InterpolationSampling::Sample:
+            s->Append("Sample");
+            break;
     }
-
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        InterStageComponentType value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s) {
-        switch (value) {
-            case InterStageComponentType::Float:
-                s->Append("Float");
-                break;
-            case InterStageComponentType::Uint:
-                s->Append("Uint");
-                break;
-            case InterStageComponentType::Sint:
-                s->Append("Sint");
-                break;
-        }
-        return {true};
-    }
-
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        InterpolationType value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s) {
-        switch (value) {
-            case InterpolationType::Perspective:
-                s->Append("Perspective");
-                break;
-            case InterpolationType::Linear:
-                s->Append("Linear");
-                break;
-            case InterpolationType::Flat:
-                s->Append("Flat");
-                break;
-        }
-        return {true};
-    }
-
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        InterpolationSampling value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s) {
-        switch (value) {
-            case InterpolationSampling::None:
-                s->Append("None");
-                break;
-            case InterpolationSampling::Center:
-                s->Append("Center");
-                break;
-            case InterpolationSampling::Centroid:
-                s->Append("Centroid");
-                break;
-            case InterpolationSampling::Sample:
-                s->Append("Sample");
-                break;
-        }
-        return {true};
-    }
+    return {true};
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/native/webgpu_absl_format.h b/src/dawn/native/webgpu_absl_format.h
index 0c141a3..4c0c667 100644
--- a/src/dawn/native/webgpu_absl_format.h
+++ b/src/dawn/native/webgpu_absl_format.h
@@ -21,113 +21,109 @@
 
 namespace dawn::native {
 
-    //
-    // Structs
-    //
+//
+// Structs
+//
 
-    struct Color;
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        const Color* value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s);
+struct Color;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString>
+AbslFormatConvert(const Color* value, const absl::FormatConversionSpec& spec, absl::FormatSink* s);
 
-    struct Extent3D;
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        const Extent3D* value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s);
+struct Extent3D;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+    const Extent3D* value,
+    const absl::FormatConversionSpec& spec,
+    absl::FormatSink* s);
 
-    struct Origin3D;
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        const Origin3D* value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s);
+struct Origin3D;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+    const Origin3D* value,
+    const absl::FormatConversionSpec& spec,
+    absl::FormatSink* s);
 
-    struct BindingInfo;
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        const BindingInfo& value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s);
+struct BindingInfo;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+    const BindingInfo& value,
+    const absl::FormatConversionSpec& spec,
+    absl::FormatSink* s);
 
-    //
-    // Objects
-    //
+//
+// Objects
+//
 
-    class DeviceBase;
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        const DeviceBase* value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s);
+class DeviceBase;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+    const DeviceBase* value,
+    const absl::FormatConversionSpec& spec,
+    absl::FormatSink* s);
 
-    class ApiObjectBase;
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        const ApiObjectBase* value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s);
+class ApiObjectBase;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+    const ApiObjectBase* value,
+    const absl::FormatConversionSpec& spec,
+    absl::FormatSink* s);
 
-    // Special case for TextureViews, since frequently the texture will be the
-    // thing that's labeled.
-    class TextureViewBase;
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        const TextureViewBase* value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s);
+// Special case for TextureViews, since frequently the texture will be the
+// thing that's labeled.
+class TextureViewBase;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+    const TextureViewBase* value,
+    const absl::FormatConversionSpec& spec,
+    absl::FormatSink* s);
 
-    class AttachmentState;
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        const AttachmentState* value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s);
+class AttachmentState;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+    const AttachmentState* value,
+    const absl::FormatConversionSpec& spec,
+    absl::FormatSink* s);
 
-    //
-    // Enums
-    //
+//
+// Enums
+//
 
-    enum class Aspect : uint8_t;
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString>
-    AbslFormatConvert(Aspect value, const absl::FormatConversionSpec& spec, absl::FormatSink* s);
+enum class Aspect : uint8_t;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString>
+AbslFormatConvert(Aspect value, const absl::FormatConversionSpec& spec, absl::FormatSink* s);
 
-    enum class BindingInfoType;
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        BindingInfoType value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s);
+enum class BindingInfoType;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+    BindingInfoType value,
+    const absl::FormatConversionSpec& spec,
+    absl::FormatSink* s);
 
-    enum class SampleTypeBit : uint8_t;
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        SampleTypeBit value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s);
+enum class SampleTypeBit : uint8_t;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString>
+AbslFormatConvert(SampleTypeBit value, const absl::FormatConversionSpec& spec, absl::FormatSink* s);
 
-    enum class SingleShaderStage;
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        SingleShaderStage value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s);
+enum class SingleShaderStage;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+    SingleShaderStage value,
+    const absl::FormatConversionSpec& spec,
+    absl::FormatSink* s);
 
-    enum class VertexFormatBaseType;
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        VertexFormatBaseType value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s);
+enum class VertexFormatBaseType;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+    VertexFormatBaseType value,
+    const absl::FormatConversionSpec& spec,
+    absl::FormatSink* s);
 
-    enum class InterStageComponentType;
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        InterStageComponentType value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s);
+enum class InterStageComponentType;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+    InterStageComponentType value,
+    const absl::FormatConversionSpec& spec,
+    absl::FormatSink* s);
 
-    enum class InterpolationType;
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        InterpolationType value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s);
+enum class InterpolationType;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+    InterpolationType value,
+    const absl::FormatConversionSpec& spec,
+    absl::FormatSink* s);
 
-    enum class InterpolationSampling;
-    absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
-        InterpolationSampling value,
-        const absl::FormatConversionSpec& spec,
-        absl::FormatSink* s);
+enum class InterpolationSampling;
+absl::FormatConvertResult<absl::FormatConversionCharSet::kString> AbslFormatConvert(
+    InterpolationSampling value,
+    const absl::FormatConversionSpec& spec,
+    absl::FormatSink* s);
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/node/Module.cpp b/src/dawn/node/Module.cpp
index e5879bf..ba2ad04 100644
--- a/src/dawn/node/Module.cpp
+++ b/src/dawn/node/Module.cpp
@@ -22,33 +22,33 @@
 #include "src/dawn/node/binding/GPU.h"
 
 namespace {
-    Napi::Value CreateGPU(const Napi::CallbackInfo& info) {
-        const auto& env = info.Env();
+Napi::Value CreateGPU(const Napi::CallbackInfo& info) {
+    const auto& env = info.Env();
 
-        std::tuple<std::vector<std::string>> args;
-        auto res = wgpu::interop::FromJS(info, args);
-        if (res != wgpu::interop::Success) {
-            Napi::Error::New(env, res.error).ThrowAsJavaScriptException();
+    std::tuple<std::vector<std::string>> args;
+    auto res = wgpu::interop::FromJS(info, args);
+    if (res != wgpu::interop::Success) {
+        Napi::Error::New(env, res.error).ThrowAsJavaScriptException();
+        return env.Undefined();
+    }
+
+    wgpu::binding::Flags flags;
+
+    // Parse out the key=value flags out of the input args array
+    for (const auto& arg : std::get<0>(args)) {
+        const size_t sep_index = arg.find("=");
+        if (sep_index == std::string::npos) {
+            Napi::Error::New(env, "Flags expected argument format is <key>=<value>")
+                .ThrowAsJavaScriptException();
             return env.Undefined();
         }
-
-        wgpu::binding::Flags flags;
-
-        // Parse out the key=value flags out of the input args array
-        for (const auto& arg : std::get<0>(args)) {
-            const size_t sep_index = arg.find("=");
-            if (sep_index == std::string::npos) {
-                Napi::Error::New(env, "Flags expected argument format is <key>=<value>")
-                    .ThrowAsJavaScriptException();
-                return env.Undefined();
-            }
-            flags.Set(arg.substr(0, sep_index), arg.substr(sep_index + 1));
-        }
-
-        // Construct a wgpu::interop::GPU interface, implemented by wgpu::bindings::GPU.
-        return wgpu::interop::GPU::Create<wgpu::binding::GPU>(env, std::move(flags));
+        flags.Set(arg.substr(0, sep_index), arg.substr(sep_index + 1));
     }
 
+    // Construct a wgpu::interop::GPU interface, implemented by wgpu::bindings::GPU.
+    return wgpu::interop::GPU::Create<wgpu::binding::GPU>(env, std::move(flags));
+}
+
 }  // namespace
 
 // Initialize() initializes the Dawn node module, registering all the WebGPU
diff --git a/src/dawn/node/NapiSymbols.cpp b/src/dawn/node/NapiSymbols.cpp
index a557eca..01a184f 100644
--- a/src/dawn/node/NapiSymbols.cpp
+++ b/src/dawn/node/NapiSymbols.cpp
@@ -22,7 +22,7 @@
 // which we can use to produce weak-symbol stubs.
 
 #ifdef _WIN32
-#    error "NapiSymbols.cpp is not used on Windows"
+#error "NapiSymbols.cpp is not used on Windows"
 #endif
 
 #define NAPI_SYMBOL(NAME)                                                              \
diff --git a/src/dawn/node/binding/AsyncRunner.cpp b/src/dawn/node/binding/AsyncRunner.cpp
index a978fa8..5926255 100644
--- a/src/dawn/node/binding/AsyncRunner.cpp
+++ b/src/dawn/node/binding/AsyncRunner.cpp
@@ -19,42 +19,41 @@
 
 namespace wgpu::binding {
 
-    AsyncRunner::AsyncRunner(Napi::Env env, wgpu::Device device) : env_(env), device_(device) {
-    }
+AsyncRunner::AsyncRunner(Napi::Env env, wgpu::Device device) : env_(env), device_(device) {}
 
-    void AsyncRunner::Begin() {
-        assert(count_ != std::numeric_limits<decltype(count_)>::max());
-        if (count_++ == 0) {
-            QueueTick();
-        }
+void AsyncRunner::Begin() {
+    assert(count_ != std::numeric_limits<decltype(count_)>::max());
+    if (count_++ == 0) {
+        QueueTick();
     }
+}
 
-    void AsyncRunner::End() {
-        assert(count_ > 0);
-        count_--;
-    }
+void AsyncRunner::End() {
+    assert(count_ > 0);
+    count_--;
+}
 
-    void AsyncRunner::QueueTick() {
-        // TODO(crbug.com/dawn/1127): We probably want to reduce the frequency at which this gets
-        // called.
-        if (tick_queued_) {
-            return;
-        }
-        tick_queued_ = true;
-        env_.Global()
-            .Get("setImmediate")
-            .As<Napi::Function>()
-            .Call({
-                // TODO(crbug.com/dawn/1127): Create once, reuse.
-                Napi::Function::New(env_,
-                                    [this](const Napi::CallbackInfo&) {
-                                        tick_queued_ = false;
-                                        if (count_ > 0) {
-                                            device_.Tick();
-                                            QueueTick();
-                                        }
-                                    }),
-            });
+void AsyncRunner::QueueTick() {
+    // TODO(crbug.com/dawn/1127): We probably want to reduce the frequency at which this gets
+    // called.
+    if (tick_queued_) {
+        return;
     }
+    tick_queued_ = true;
+    env_.Global()
+        .Get("setImmediate")
+        .As<Napi::Function>()
+        .Call({
+            // TODO(crbug.com/dawn/1127): Create once, reuse.
+            Napi::Function::New(env_,
+                                [this](const Napi::CallbackInfo&) {
+                                    tick_queued_ = false;
+                                    if (count_ > 0) {
+                                        device_.Tick();
+                                        QueueTick();
+                                    }
+                                }),
+        });
+}
 
 }  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/AsyncRunner.h b/src/dawn/node/binding/AsyncRunner.h
index f01d394..0c733b9 100644
--- a/src/dawn/node/binding/AsyncRunner.h
+++ b/src/dawn/node/binding/AsyncRunner.h
@@ -24,55 +24,52 @@
 
 namespace wgpu::binding {
 
-    // AsyncRunner is used to poll a wgpu::Device with calls to Tick() while there are asynchronous
-    // tasks in flight.
-    class AsyncRunner {
-      public:
-        AsyncRunner(Napi::Env env, wgpu::Device device);
+// AsyncRunner is used to poll a wgpu::Device with calls to Tick() while there are asynchronous
+// tasks in flight.
+class AsyncRunner {
+  public:
+    AsyncRunner(Napi::Env env, wgpu::Device device);
 
-        // Begin() should be called when a new asynchronous task is started.
-        // If the number of executing asynchronous tasks transitions from 0 to 1, then a function
-        // will be scheduled on the main JavaScript thread to call wgpu::Device::Tick() whenever the
-        // thread is idle. This will be repeatedly called until the number of executing asynchronous
-        // tasks reaches 0 again.
-        void Begin();
+    // Begin() should be called when a new asynchronous task is started.
+    // If the number of executing asynchronous tasks transitions from 0 to 1, then a function
+    // will be scheduled on the main JavaScript thread to call wgpu::Device::Tick() whenever the
+    // thread is idle. This will be repeatedly called until the number of executing asynchronous
+    // tasks reaches 0 again.
+    void Begin();
 
-        // End() should be called once the asynchronous task has finished.
-        // Every call to Begin() should eventually result in a call to End().
-        void End();
+    // End() should be called once the asynchronous task has finished.
+    // Every call to Begin() should eventually result in a call to End().
+    void End();
 
-      private:
-        void QueueTick();
-        Napi::Env env_;
-        wgpu::Device const device_;
-        uint64_t count_ = 0;
-        bool tick_queued_ = false;
-    };
+  private:
+    void QueueTick();
+    Napi::Env env_;
+    wgpu::Device const device_;
+    uint64_t count_ = 0;
+    bool tick_queued_ = false;
+};
 
-    // AsyncTask is a RAII helper for calling AsyncRunner::Begin() on construction, and
-    // AsyncRunner::End() on destruction.
-    class AsyncTask {
-      public:
-        inline AsyncTask(AsyncTask&&) = default;
+// AsyncTask is a RAII helper for calling AsyncRunner::Begin() on construction, and
+// AsyncRunner::End() on destruction.
+class AsyncTask {
+  public:
+    inline AsyncTask(AsyncTask&&) = default;
 
-        // Constructor.
-        // Calls AsyncRunner::Begin()
-        explicit inline AsyncTask(std::shared_ptr<AsyncRunner> runner)
-            : runner_(std::move(runner)) {
-            runner_->Begin();
-        }
+    // Constructor.
+    // Calls AsyncRunner::Begin()
+    explicit inline AsyncTask(std::shared_ptr<AsyncRunner> runner) : runner_(std::move(runner)) {
+        runner_->Begin();
+    }
 
-        // Destructor.
-        // Calls AsyncRunner::End()
-        inline ~AsyncTask() {
-            runner_->End();
-        }
+    // Destructor.
+    // Calls AsyncRunner::End()
+    inline ~AsyncTask() { runner_->End(); }
 
-      private:
-        AsyncTask(const AsyncTask&) = delete;
-        AsyncTask& operator=(const AsyncTask&) = delete;
-        std::shared_ptr<AsyncRunner> runner_;
-    };
+  private:
+    AsyncTask(const AsyncTask&) = delete;
+    AsyncTask& operator=(const AsyncTask&) = delete;
+    std::shared_ptr<AsyncRunner> runner_;
+};
 
 }  // namespace wgpu::binding
 
diff --git a/src/dawn/node/binding/Converter.cpp b/src/dawn/node/binding/Converter.cpp
index 9c116f2..2d1ae29 100644
--- a/src/dawn/node/binding/Converter.cpp
+++ b/src/dawn/node/binding/Converter.cpp
@@ -24,1198 +24,1183 @@
 
 namespace wgpu::binding {
 
-    Converter::~Converter() {
-        for (auto& free : free_) {
-            free();
-        }
+Converter::~Converter() {
+    for (auto& free : free_) {
+        free();
     }
+}
 
-    bool Converter::Convert(wgpu::Extent3D& out, const interop::GPUExtent3D& in) {
-        out = {};
-        if (auto* dict = std::get_if<interop::GPUExtent3DDict>(&in)) {
-            out.depthOrArrayLayers = dict->depthOrArrayLayers;
-            out.width = dict->width;
-            out.height = dict->height;
-            return true;
-        }
-        if (auto* vec = std::get_if<std::vector<interop::GPUIntegerCoordinate>>(&in)) {
-            switch (vec->size()) {
-                default:
-                case 3:
-                    out.depthOrArrayLayers = (*vec)[2];
-                case 2:  // fallthrough
-                    out.height = (*vec)[1];
-                case 1:  // fallthrough
-                    out.width = (*vec)[0];
-                    return true;
-                case 0:
-                    break;
-            }
-        }
-        Napi::Error::New(env, "invalid value for GPUExtent3D").ThrowAsJavaScriptException();
-        return false;
-    }
-
-    bool Converter::Convert(wgpu::Origin3D& out, const interop::GPUOrigin3DDict& in) {
-        out = {};
-        out.x = in.x;
-        out.y = in.y;
-        out.z = in.z;
+bool Converter::Convert(wgpu::Extent3D& out, const interop::GPUExtent3D& in) {
+    out = {};
+    if (auto* dict = std::get_if<interop::GPUExtent3DDict>(&in)) {
+        out.depthOrArrayLayers = dict->depthOrArrayLayers;
+        out.width = dict->width;
+        out.height = dict->height;
         return true;
     }
-
-    bool Converter::Convert(wgpu::Color& out, const interop::GPUColor& in) {
-        out = {};
-        if (auto* dict = std::get_if<interop::GPUColorDict>(&in)) {
-            out.r = dict->r;
-            out.g = dict->g;
-            out.b = dict->b;
-            out.a = dict->a;
-            return true;
-        }
-        if (auto* vec = std::get_if<std::vector<double>>(&in)) {
-            switch (vec->size()) {
-                default:
-                case 4:
-                    out.a = (*vec)[3];
-                case 3:  // fallthrough
-                    out.b = (*vec)[2];
-                case 2:  // fallthrough
-                    out.g = (*vec)[1];
-                case 1:  // fallthrough
-                    out.r = (*vec)[0];
-                    return true;
-                case 0:
-                    break;
-            }
-        }
-        Napi::Error::New(env, "invalid value for GPUColor").ThrowAsJavaScriptException();
-        return false;
-    }
-
-    bool Converter::Convert(wgpu::Origin3D& out,
-                            const std::vector<interop::GPUIntegerCoordinate>& in) {
-        out = {};
-        switch (in.size()) {
+    if (auto* vec = std::get_if<std::vector<interop::GPUIntegerCoordinate>>(&in)) {
+        switch (vec->size()) {
             default:
             case 3:
-                out.z = in[2];
+                out.depthOrArrayLayers = (*vec)[2];
             case 2:  // fallthrough
-                out.y = in[1];
+                out.height = (*vec)[1];
             case 1:  // fallthrough
-                out.x = in[0];
+                out.width = (*vec)[0];
+                return true;
             case 0:
                 break;
         }
+    }
+    Napi::Error::New(env, "invalid value for GPUExtent3D").ThrowAsJavaScriptException();
+    return false;
+}
+
+bool Converter::Convert(wgpu::Origin3D& out, const interop::GPUOrigin3DDict& in) {
+    out = {};
+    out.x = in.x;
+    out.y = in.y;
+    out.z = in.z;
+    return true;
+}
+
+bool Converter::Convert(wgpu::Color& out, const interop::GPUColor& in) {
+    out = {};
+    if (auto* dict = std::get_if<interop::GPUColorDict>(&in)) {
+        out.r = dict->r;
+        out.g = dict->g;
+        out.b = dict->b;
+        out.a = dict->a;
         return true;
     }
-
-    bool Converter::Convert(wgpu::TextureAspect& out, const interop::GPUTextureAspect& in) {
-        out = wgpu::TextureAspect::All;
-        switch (in) {
-            case interop::GPUTextureAspect::kAll:
-                out = wgpu::TextureAspect::All;
+    if (auto* vec = std::get_if<std::vector<double>>(&in)) {
+        switch (vec->size()) {
+            default:
+            case 4:
+                out.a = (*vec)[3];
+            case 3:  // fallthrough
+                out.b = (*vec)[2];
+            case 2:  // fallthrough
+                out.g = (*vec)[1];
+            case 1:  // fallthrough
+                out.r = (*vec)[0];
                 return true;
-            case interop::GPUTextureAspect::kStencilOnly:
-                out = wgpu::TextureAspect::StencilOnly;
-                return true;
-            case interop::GPUTextureAspect::kDepthOnly:
-                out = wgpu::TextureAspect::DepthOnly;
-                return true;
+            case 0:
+                break;
         }
-        Napi::Error::New(env, "invalid value for GPUTextureAspect").ThrowAsJavaScriptException();
-        return false;
     }
+    Napi::Error::New(env, "invalid value for GPUColor").ThrowAsJavaScriptException();
+    return false;
+}
 
-    bool Converter::Convert(wgpu::ImageCopyTexture& out, const interop::GPUImageCopyTexture& in) {
-        out = {};
-        return Convert(out.texture, in.texture) && Convert(out.mipLevel, in.mipLevel) &&
-               Convert(out.origin, in.origin) && Convert(out.aspect, in.aspect);
+bool Converter::Convert(wgpu::Origin3D& out, const std::vector<interop::GPUIntegerCoordinate>& in) {
+    out = {};
+    switch (in.size()) {
+        default:
+        case 3:
+            out.z = in[2];
+        case 2:  // fallthrough
+            out.y = in[1];
+        case 1:  // fallthrough
+            out.x = in[0];
+        case 0:
+            break;
     }
+    return true;
+}
 
-    bool Converter::Convert(wgpu::ImageCopyBuffer& out, const interop::GPUImageCopyBuffer& in) {
-        out = {};
-        out.buffer = *in.buffer.As<GPUBuffer>();
-        return Convert(out.layout.offset, in.offset) &&
-               Convert(out.layout.bytesPerRow, in.bytesPerRow) &&
-               Convert(out.layout.rowsPerImage, in.rowsPerImage);
-    }
-
-    bool Converter::Convert(BufferSource& out, interop::BufferSource in) {
-        out = {};
-        if (auto* view = std::get_if<interop::ArrayBufferView>(&in)) {
-            std::visit(
-                [&](auto&& v) {
-                    auto arr = v.ArrayBuffer();
-                    out.data = arr.Data();
-                    out.size = arr.ByteLength();
-                    out.bytesPerElement = v.ElementSize();
-                },
-                *view);
+bool Converter::Convert(wgpu::TextureAspect& out, const interop::GPUTextureAspect& in) {
+    out = wgpu::TextureAspect::All;
+    switch (in) {
+        case interop::GPUTextureAspect::kAll:
+            out = wgpu::TextureAspect::All;
             return true;
-        }
-        if (auto* arr = std::get_if<interop::ArrayBuffer>(&in)) {
-            out.data = arr->Data();
-            out.size = arr->ByteLength();
-            out.bytesPerElement = 1;
+        case interop::GPUTextureAspect::kStencilOnly:
+            out = wgpu::TextureAspect::StencilOnly;
             return true;
-        }
-        Napi::Error::New(env, "invalid value for BufferSource").ThrowAsJavaScriptException();
-        return false;
+        case interop::GPUTextureAspect::kDepthOnly:
+            out = wgpu::TextureAspect::DepthOnly;
+            return true;
     }
+    Napi::Error::New(env, "invalid value for GPUTextureAspect").ThrowAsJavaScriptException();
+    return false;
+}
 
-    bool Converter::Convert(wgpu::TextureDataLayout& out, const interop::GPUImageDataLayout& in) {
-        out = {};
-        return Convert(out.bytesPerRow, in.bytesPerRow) && Convert(out.offset, in.offset) &&
-               Convert(out.rowsPerImage, in.rowsPerImage);
-    }
+bool Converter::Convert(wgpu::ImageCopyTexture& out, const interop::GPUImageCopyTexture& in) {
+    out = {};
+    return Convert(out.texture, in.texture) && Convert(out.mipLevel, in.mipLevel) &&
+           Convert(out.origin, in.origin) && Convert(out.aspect, in.aspect);
+}
 
-    bool Converter::Convert(wgpu::TextureFormat& out, const interop::GPUTextureFormat& in) {
-        out = wgpu::TextureFormat::Undefined;
-        switch (in) {
-            case interop::GPUTextureFormat::kR8Unorm:
-                out = wgpu::TextureFormat::R8Unorm;
-                return true;
-            case interop::GPUTextureFormat::kR8Snorm:
-                out = wgpu::TextureFormat::R8Snorm;
-                return true;
-            case interop::GPUTextureFormat::kR8Uint:
-                out = wgpu::TextureFormat::R8Uint;
-                return true;
-            case interop::GPUTextureFormat::kR8Sint:
-                out = wgpu::TextureFormat::R8Sint;
-                return true;
-            case interop::GPUTextureFormat::kR16Uint:
-                out = wgpu::TextureFormat::R16Uint;
-                return true;
-            case interop::GPUTextureFormat::kR16Sint:
-                out = wgpu::TextureFormat::R16Sint;
-                return true;
-            case interop::GPUTextureFormat::kR16Float:
-                out = wgpu::TextureFormat::R16Float;
-                return true;
-            case interop::GPUTextureFormat::kRg8Unorm:
-                out = wgpu::TextureFormat::RG8Unorm;
-                return true;
-            case interop::GPUTextureFormat::kRg8Snorm:
-                out = wgpu::TextureFormat::RG8Snorm;
-                return true;
-            case interop::GPUTextureFormat::kRg8Uint:
-                out = wgpu::TextureFormat::RG8Uint;
-                return true;
-            case interop::GPUTextureFormat::kRg8Sint:
-                out = wgpu::TextureFormat::RG8Sint;
-                return true;
-            case interop::GPUTextureFormat::kR32Uint:
-                out = wgpu::TextureFormat::R32Uint;
-                return true;
-            case interop::GPUTextureFormat::kR32Sint:
-                out = wgpu::TextureFormat::R32Sint;
-                return true;
-            case interop::GPUTextureFormat::kR32Float:
-                out = wgpu::TextureFormat::R32Float;
-                return true;
-            case interop::GPUTextureFormat::kRg16Uint:
-                out = wgpu::TextureFormat::RG16Uint;
-                return true;
-            case interop::GPUTextureFormat::kRg16Sint:
-                out = wgpu::TextureFormat::RG16Sint;
-                return true;
-            case interop::GPUTextureFormat::kRg16Float:
-                out = wgpu::TextureFormat::RG16Float;
-                return true;
-            case interop::GPUTextureFormat::kRgba8Unorm:
-                out = wgpu::TextureFormat::RGBA8Unorm;
-                return true;
-            case interop::GPUTextureFormat::kRgba8UnormSrgb:
-                out = wgpu::TextureFormat::RGBA8UnormSrgb;
-                return true;
-            case interop::GPUTextureFormat::kRgba8Snorm:
-                out = wgpu::TextureFormat::RGBA8Snorm;
-                return true;
-            case interop::GPUTextureFormat::kRgba8Uint:
-                out = wgpu::TextureFormat::RGBA8Uint;
-                return true;
-            case interop::GPUTextureFormat::kRgba8Sint:
-                out = wgpu::TextureFormat::RGBA8Sint;
-                return true;
-            case interop::GPUTextureFormat::kBgra8Unorm:
-                out = wgpu::TextureFormat::BGRA8Unorm;
-                return true;
-            case interop::GPUTextureFormat::kBgra8UnormSrgb:
-                out = wgpu::TextureFormat::BGRA8UnormSrgb;
-                return true;
-            case interop::GPUTextureFormat::kRgb9E5Ufloat:
-                out = wgpu::TextureFormat::RGB9E5Ufloat;
-                return true;
-            case interop::GPUTextureFormat::kRgb10A2Unorm:
-                out = wgpu::TextureFormat::RGB10A2Unorm;
-                return true;
-            case interop::GPUTextureFormat::kRg11B10Ufloat:
-                out = wgpu::TextureFormat::RG11B10Ufloat;
-                return true;
-            case interop::GPUTextureFormat::kRg32Uint:
-                out = wgpu::TextureFormat::RG32Uint;
-                return true;
-            case interop::GPUTextureFormat::kRg32Sint:
-                out = wgpu::TextureFormat::RG32Sint;
-                return true;
-            case interop::GPUTextureFormat::kRg32Float:
-                out = wgpu::TextureFormat::RG32Float;
-                return true;
-            case interop::GPUTextureFormat::kRgba16Uint:
-                out = wgpu::TextureFormat::RGBA16Uint;
-                return true;
-            case interop::GPUTextureFormat::kRgba16Sint:
-                out = wgpu::TextureFormat::RGBA16Sint;
-                return true;
-            case interop::GPUTextureFormat::kRgba16Float:
-                out = wgpu::TextureFormat::RGBA16Float;
-                return true;
-            case interop::GPUTextureFormat::kRgba32Uint:
-                out = wgpu::TextureFormat::RGBA32Uint;
-                return true;
-            case interop::GPUTextureFormat::kRgba32Sint:
-                out = wgpu::TextureFormat::RGBA32Sint;
-                return true;
-            case interop::GPUTextureFormat::kRgba32Float:
-                out = wgpu::TextureFormat::RGBA32Float;
-                return true;
-            case interop::GPUTextureFormat::kStencil8:
-                out = wgpu::TextureFormat::Stencil8;
-                return true;
-            case interop::GPUTextureFormat::kDepth16Unorm:
-                out = wgpu::TextureFormat::Depth16Unorm;
-                return true;
-            case interop::GPUTextureFormat::kDepth24Plus:
-                out = wgpu::TextureFormat::Depth24Plus;
-                return true;
-            case interop::GPUTextureFormat::kDepth24PlusStencil8:
-                out = wgpu::TextureFormat::Depth24PlusStencil8;
-                return true;
-            case interop::GPUTextureFormat::kDepth32Float:
-                out = wgpu::TextureFormat::Depth32Float;
-                return true;
-            case interop::GPUTextureFormat::kDepth24UnormStencil8:
-                out = wgpu::TextureFormat::Depth24UnormStencil8;
-                return true;
-            case interop::GPUTextureFormat::kDepth32FloatStencil8:
-                out = wgpu::TextureFormat::Depth32FloatStencil8;
-                return true;
-            case interop::GPUTextureFormat::kBc1RgbaUnorm:
-                out = wgpu::TextureFormat::BC1RGBAUnorm;
-                return true;
-            case interop::GPUTextureFormat::kBc1RgbaUnormSrgb:
-                out = wgpu::TextureFormat::BC1RGBAUnormSrgb;
-                return true;
-            case interop::GPUTextureFormat::kBc2RgbaUnorm:
-                out = wgpu::TextureFormat::BC2RGBAUnorm;
-                return true;
-            case interop::GPUTextureFormat::kBc2RgbaUnormSrgb:
-                out = wgpu::TextureFormat::BC2RGBAUnormSrgb;
-                return true;
-            case interop::GPUTextureFormat::kBc3RgbaUnorm:
-                out = wgpu::TextureFormat::BC3RGBAUnorm;
-                return true;
-            case interop::GPUTextureFormat::kBc3RgbaUnormSrgb:
-                out = wgpu::TextureFormat::BC3RGBAUnormSrgb;
-                return true;
-            case interop::GPUTextureFormat::kBc4RUnorm:
-                out = wgpu::TextureFormat::BC4RUnorm;
-                return true;
-            case interop::GPUTextureFormat::kBc4RSnorm:
-                out = wgpu::TextureFormat::BC4RSnorm;
-                return true;
-            case interop::GPUTextureFormat::kBc5RgUnorm:
-                out = wgpu::TextureFormat::BC5RGUnorm;
-                return true;
-            case interop::GPUTextureFormat::kBc5RgSnorm:
-                out = wgpu::TextureFormat::BC5RGSnorm;
-                return true;
-            case interop::GPUTextureFormat::kBc6HRgbUfloat:
-                out = wgpu::TextureFormat::BC6HRGBUfloat;
-                return true;
-            case interop::GPUTextureFormat::kBc6HRgbFloat:
-                out = wgpu::TextureFormat::BC6HRGBFloat;
-                return true;
-            case interop::GPUTextureFormat::kBc7RgbaUnorm:
-                out = wgpu::TextureFormat::BC7RGBAUnorm;
-                return true;
-            case interop::GPUTextureFormat::kBc7RgbaUnormSrgb:
-                out = wgpu::TextureFormat::BC7RGBAUnormSrgb;
-                return true;
-            case interop::GPUTextureFormat::kEtc2Rgb8Unorm:
-                out = wgpu::TextureFormat::ETC2RGB8Unorm;
-                return true;
-            case interop::GPUTextureFormat::kEtc2Rgb8UnormSrgb:
-                out = wgpu::TextureFormat::ETC2RGB8UnormSrgb;
-                return true;
-            case interop::GPUTextureFormat::kEtc2Rgb8A1Unorm:
-                out = wgpu::TextureFormat::ETC2RGB8A1Unorm;
-                return true;
-            case interop::GPUTextureFormat::kEtc2Rgb8A1UnormSrgb:
-                out = wgpu::TextureFormat::ETC2RGB8A1UnormSrgb;
-                return true;
-            case interop::GPUTextureFormat::kEtc2Rgba8Unorm:
-                out = wgpu::TextureFormat::ETC2RGBA8Unorm;
-                return true;
-            case interop::GPUTextureFormat::kEtc2Rgba8UnormSrgb:
-                out = wgpu::TextureFormat::ETC2RGBA8UnormSrgb;
-                return true;
-            case interop::GPUTextureFormat::kEacR11Unorm:
-                out = wgpu::TextureFormat::EACR11Unorm;
-                return true;
-            case interop::GPUTextureFormat::kEacR11Snorm:
-                out = wgpu::TextureFormat::EACR11Snorm;
-                return true;
-            case interop::GPUTextureFormat::kEacRg11Unorm:
-                out = wgpu::TextureFormat::EACRG11Unorm;
-                return true;
-            case interop::GPUTextureFormat::kEacRg11Snorm:
-                out = wgpu::TextureFormat::EACRG11Snorm;
-                return true;
-            case interop::GPUTextureFormat::kAstc4X4Unorm:
-                out = wgpu::TextureFormat::ASTC4x4Unorm;
-                return true;
-            case interop::GPUTextureFormat::kAstc4X4UnormSrgb:
-                out = wgpu::TextureFormat::ASTC4x4UnormSrgb;
-                return true;
-            case interop::GPUTextureFormat::kAstc5X4Unorm:
-                out = wgpu::TextureFormat::ASTC5x4Unorm;
-                return true;
-            case interop::GPUTextureFormat::kAstc5X4UnormSrgb:
-                out = wgpu::TextureFormat::ASTC5x4UnormSrgb;
-                return true;
-            case interop::GPUTextureFormat::kAstc5X5Unorm:
-                out = wgpu::TextureFormat::ASTC5x5Unorm;
-                return true;
-            case interop::GPUTextureFormat::kAstc5X5UnormSrgb:
-                out = wgpu::TextureFormat::ASTC5x5UnormSrgb;
-                return true;
-            case interop::GPUTextureFormat::kAstc6X5Unorm:
-                out = wgpu::TextureFormat::ASTC6x5Unorm;
-                return true;
-            case interop::GPUTextureFormat::kAstc6X5UnormSrgb:
-                out = wgpu::TextureFormat::ASTC6x5UnormSrgb;
-                return true;
-            case interop::GPUTextureFormat::kAstc6X6Unorm:
-                out = wgpu::TextureFormat::ASTC6x6Unorm;
-                return true;
-            case interop::GPUTextureFormat::kAstc6X6UnormSrgb:
-                out = wgpu::TextureFormat::ASTC6x6UnormSrgb;
-                return true;
-            case interop::GPUTextureFormat::kAstc8X5Unorm:
-                out = wgpu::TextureFormat::ASTC8x5Unorm;
-                return true;
-            case interop::GPUTextureFormat::kAstc8X5UnormSrgb:
-                out = wgpu::TextureFormat::ASTC8x5UnormSrgb;
-                return true;
-            case interop::GPUTextureFormat::kAstc8X6Unorm:
-                out = wgpu::TextureFormat::ASTC8x6Unorm;
-                return true;
-            case interop::GPUTextureFormat::kAstc8X6UnormSrgb:
-                out = wgpu::TextureFormat::ASTC8x6UnormSrgb;
-                return true;
-            case interop::GPUTextureFormat::kAstc8X8Unorm:
-                out = wgpu::TextureFormat::ASTC8x8Unorm;
-                return true;
-            case interop::GPUTextureFormat::kAstc8X8UnormSrgb:
-                out = wgpu::TextureFormat::ASTC8x8UnormSrgb;
-                return true;
-            case interop::GPUTextureFormat::kAstc10X5Unorm:
-                out = wgpu::TextureFormat::ASTC10x5Unorm;
-                return true;
-            case interop::GPUTextureFormat::kAstc10X5UnormSrgb:
-                out = wgpu::TextureFormat::ASTC10x5UnormSrgb;
-                return true;
-            case interop::GPUTextureFormat::kAstc10X6Unorm:
-                out = wgpu::TextureFormat::ASTC10x6Unorm;
-                return true;
-            case interop::GPUTextureFormat::kAstc10X6UnormSrgb:
-                out = wgpu::TextureFormat::ASTC10x6UnormSrgb;
-                return true;
-            case interop::GPUTextureFormat::kAstc10X8Unorm:
-                out = wgpu::TextureFormat::ASTC10x8Unorm;
-                return true;
-            case interop::GPUTextureFormat::kAstc10X8UnormSrgb:
-                out = wgpu::TextureFormat::ASTC10x8UnormSrgb;
-                return true;
-            case interop::GPUTextureFormat::kAstc10X10Unorm:
-                out = wgpu::TextureFormat::ASTC10x10Unorm;
-                return true;
-            case interop::GPUTextureFormat::kAstc10X10UnormSrgb:
-                out = wgpu::TextureFormat::ASTC10x10UnormSrgb;
-                return true;
-            case interop::GPUTextureFormat::kAstc12X10Unorm:
-                out = wgpu::TextureFormat::ASTC12x10Unorm;
-                return true;
-            case interop::GPUTextureFormat::kAstc12X10UnormSrgb:
-                out = wgpu::TextureFormat::ASTC12x10UnormSrgb;
-                return true;
-            case interop::GPUTextureFormat::kAstc12X12Unorm:
-                out = wgpu::TextureFormat::ASTC12x12Unorm;
-                return true;
-            case interop::GPUTextureFormat::kAstc12X12UnormSrgb:
-                out = wgpu::TextureFormat::ASTC12x12UnormSrgb;
-                return true;
-        }
-        Napi::Error::New(env, "invalid value for GPUTextureFormat").ThrowAsJavaScriptException();
-        return false;
-    }
+bool Converter::Convert(wgpu::ImageCopyBuffer& out, const interop::GPUImageCopyBuffer& in) {
+    out = {};
+    out.buffer = *in.buffer.As<GPUBuffer>();
+    return Convert(out.layout.offset, in.offset) &&
+           Convert(out.layout.bytesPerRow, in.bytesPerRow) &&
+           Convert(out.layout.rowsPerImage, in.rowsPerImage);
+}
 
-    bool Converter::Convert(wgpu::TextureUsage& out, const interop::GPUTextureUsageFlags& in) {
-        out = static_cast<wgpu::TextureUsage>(in.value);
+bool Converter::Convert(BufferSource& out, interop::BufferSource in) {
+    out = {};
+    if (auto* view = std::get_if<interop::ArrayBufferView>(&in)) {
+        std::visit(
+            [&](auto&& v) {
+                auto arr = v.ArrayBuffer();
+                out.data = arr.Data();
+                out.size = arr.ByteLength();
+                out.bytesPerElement = v.ElementSize();
+            },
+            *view);
         return true;
     }
-
-    bool Converter::Convert(wgpu::ColorWriteMask& out, const interop::GPUColorWriteFlags& in) {
-        out = static_cast<wgpu::ColorWriteMask>(in.value);
+    if (auto* arr = std::get_if<interop::ArrayBuffer>(&in)) {
+        out.data = arr->Data();
+        out.size = arr->ByteLength();
+        out.bytesPerElement = 1;
         return true;
     }
+    Napi::Error::New(env, "invalid value for BufferSource").ThrowAsJavaScriptException();
+    return false;
+}
 
-    bool Converter::Convert(wgpu::BufferUsage& out, const interop::GPUBufferUsageFlags& in) {
-        out = static_cast<wgpu::BufferUsage>(in.value);
-        return true;
+bool Converter::Convert(wgpu::TextureDataLayout& out, const interop::GPUImageDataLayout& in) {
+    out = {};
+    return Convert(out.bytesPerRow, in.bytesPerRow) && Convert(out.offset, in.offset) &&
+           Convert(out.rowsPerImage, in.rowsPerImage);
+}
+
+bool Converter::Convert(wgpu::TextureFormat& out, const interop::GPUTextureFormat& in) {
+    out = wgpu::TextureFormat::Undefined;
+    switch (in) {
+        case interop::GPUTextureFormat::kR8Unorm:
+            out = wgpu::TextureFormat::R8Unorm;
+            return true;
+        case interop::GPUTextureFormat::kR8Snorm:
+            out = wgpu::TextureFormat::R8Snorm;
+            return true;
+        case interop::GPUTextureFormat::kR8Uint:
+            out = wgpu::TextureFormat::R8Uint;
+            return true;
+        case interop::GPUTextureFormat::kR8Sint:
+            out = wgpu::TextureFormat::R8Sint;
+            return true;
+        case interop::GPUTextureFormat::kR16Uint:
+            out = wgpu::TextureFormat::R16Uint;
+            return true;
+        case interop::GPUTextureFormat::kR16Sint:
+            out = wgpu::TextureFormat::R16Sint;
+            return true;
+        case interop::GPUTextureFormat::kR16Float:
+            out = wgpu::TextureFormat::R16Float;
+            return true;
+        case interop::GPUTextureFormat::kRg8Unorm:
+            out = wgpu::TextureFormat::RG8Unorm;
+            return true;
+        case interop::GPUTextureFormat::kRg8Snorm:
+            out = wgpu::TextureFormat::RG8Snorm;
+            return true;
+        case interop::GPUTextureFormat::kRg8Uint:
+            out = wgpu::TextureFormat::RG8Uint;
+            return true;
+        case interop::GPUTextureFormat::kRg8Sint:
+            out = wgpu::TextureFormat::RG8Sint;
+            return true;
+        case interop::GPUTextureFormat::kR32Uint:
+            out = wgpu::TextureFormat::R32Uint;
+            return true;
+        case interop::GPUTextureFormat::kR32Sint:
+            out = wgpu::TextureFormat::R32Sint;
+            return true;
+        case interop::GPUTextureFormat::kR32Float:
+            out = wgpu::TextureFormat::R32Float;
+            return true;
+        case interop::GPUTextureFormat::kRg16Uint:
+            out = wgpu::TextureFormat::RG16Uint;
+            return true;
+        case interop::GPUTextureFormat::kRg16Sint:
+            out = wgpu::TextureFormat::RG16Sint;
+            return true;
+        case interop::GPUTextureFormat::kRg16Float:
+            out = wgpu::TextureFormat::RG16Float;
+            return true;
+        case interop::GPUTextureFormat::kRgba8Unorm:
+            out = wgpu::TextureFormat::RGBA8Unorm;
+            return true;
+        case interop::GPUTextureFormat::kRgba8UnormSrgb:
+            out = wgpu::TextureFormat::RGBA8UnormSrgb;
+            return true;
+        case interop::GPUTextureFormat::kRgba8Snorm:
+            out = wgpu::TextureFormat::RGBA8Snorm;
+            return true;
+        case interop::GPUTextureFormat::kRgba8Uint:
+            out = wgpu::TextureFormat::RGBA8Uint;
+            return true;
+        case interop::GPUTextureFormat::kRgba8Sint:
+            out = wgpu::TextureFormat::RGBA8Sint;
+            return true;
+        case interop::GPUTextureFormat::kBgra8Unorm:
+            out = wgpu::TextureFormat::BGRA8Unorm;
+            return true;
+        case interop::GPUTextureFormat::kBgra8UnormSrgb:
+            out = wgpu::TextureFormat::BGRA8UnormSrgb;
+            return true;
+        case interop::GPUTextureFormat::kRgb9E5Ufloat:
+            out = wgpu::TextureFormat::RGB9E5Ufloat;
+            return true;
+        case interop::GPUTextureFormat::kRgb10A2Unorm:
+            out = wgpu::TextureFormat::RGB10A2Unorm;
+            return true;
+        case interop::GPUTextureFormat::kRg11B10Ufloat:
+            out = wgpu::TextureFormat::RG11B10Ufloat;
+            return true;
+        case interop::GPUTextureFormat::kRg32Uint:
+            out = wgpu::TextureFormat::RG32Uint;
+            return true;
+        case interop::GPUTextureFormat::kRg32Sint:
+            out = wgpu::TextureFormat::RG32Sint;
+            return true;
+        case interop::GPUTextureFormat::kRg32Float:
+            out = wgpu::TextureFormat::RG32Float;
+            return true;
+        case interop::GPUTextureFormat::kRgba16Uint:
+            out = wgpu::TextureFormat::RGBA16Uint;
+            return true;
+        case interop::GPUTextureFormat::kRgba16Sint:
+            out = wgpu::TextureFormat::RGBA16Sint;
+            return true;
+        case interop::GPUTextureFormat::kRgba16Float:
+            out = wgpu::TextureFormat::RGBA16Float;
+            return true;
+        case interop::GPUTextureFormat::kRgba32Uint:
+            out = wgpu::TextureFormat::RGBA32Uint;
+            return true;
+        case interop::GPUTextureFormat::kRgba32Sint:
+            out = wgpu::TextureFormat::RGBA32Sint;
+            return true;
+        case interop::GPUTextureFormat::kRgba32Float:
+            out = wgpu::TextureFormat::RGBA32Float;
+            return true;
+        case interop::GPUTextureFormat::kStencil8:
+            out = wgpu::TextureFormat::Stencil8;
+            return true;
+        case interop::GPUTextureFormat::kDepth16Unorm:
+            out = wgpu::TextureFormat::Depth16Unorm;
+            return true;
+        case interop::GPUTextureFormat::kDepth24Plus:
+            out = wgpu::TextureFormat::Depth24Plus;
+            return true;
+        case interop::GPUTextureFormat::kDepth24PlusStencil8:
+            out = wgpu::TextureFormat::Depth24PlusStencil8;
+            return true;
+        case interop::GPUTextureFormat::kDepth32Float:
+            out = wgpu::TextureFormat::Depth32Float;
+            return true;
+        case interop::GPUTextureFormat::kDepth24UnormStencil8:
+            out = wgpu::TextureFormat::Depth24UnormStencil8;
+            return true;
+        case interop::GPUTextureFormat::kDepth32FloatStencil8:
+            out = wgpu::TextureFormat::Depth32FloatStencil8;
+            return true;
+        case interop::GPUTextureFormat::kBc1RgbaUnorm:
+            out = wgpu::TextureFormat::BC1RGBAUnorm;
+            return true;
+        case interop::GPUTextureFormat::kBc1RgbaUnormSrgb:
+            out = wgpu::TextureFormat::BC1RGBAUnormSrgb;
+            return true;
+        case interop::GPUTextureFormat::kBc2RgbaUnorm:
+            out = wgpu::TextureFormat::BC2RGBAUnorm;
+            return true;
+        case interop::GPUTextureFormat::kBc2RgbaUnormSrgb:
+            out = wgpu::TextureFormat::BC2RGBAUnormSrgb;
+            return true;
+        case interop::GPUTextureFormat::kBc3RgbaUnorm:
+            out = wgpu::TextureFormat::BC3RGBAUnorm;
+            return true;
+        case interop::GPUTextureFormat::kBc3RgbaUnormSrgb:
+            out = wgpu::TextureFormat::BC3RGBAUnormSrgb;
+            return true;
+        case interop::GPUTextureFormat::kBc4RUnorm:
+            out = wgpu::TextureFormat::BC4RUnorm;
+            return true;
+        case interop::GPUTextureFormat::kBc4RSnorm:
+            out = wgpu::TextureFormat::BC4RSnorm;
+            return true;
+        case interop::GPUTextureFormat::kBc5RgUnorm:
+            out = wgpu::TextureFormat::BC5RGUnorm;
+            return true;
+        case interop::GPUTextureFormat::kBc5RgSnorm:
+            out = wgpu::TextureFormat::BC5RGSnorm;
+            return true;
+        case interop::GPUTextureFormat::kBc6HRgbUfloat:
+            out = wgpu::TextureFormat::BC6HRGBUfloat;
+            return true;
+        case interop::GPUTextureFormat::kBc6HRgbFloat:
+            out = wgpu::TextureFormat::BC6HRGBFloat;
+            return true;
+        case interop::GPUTextureFormat::kBc7RgbaUnorm:
+            out = wgpu::TextureFormat::BC7RGBAUnorm;
+            return true;
+        case interop::GPUTextureFormat::kBc7RgbaUnormSrgb:
+            out = wgpu::TextureFormat::BC7RGBAUnormSrgb;
+            return true;
+        case interop::GPUTextureFormat::kEtc2Rgb8Unorm:
+            out = wgpu::TextureFormat::ETC2RGB8Unorm;
+            return true;
+        case interop::GPUTextureFormat::kEtc2Rgb8UnormSrgb:
+            out = wgpu::TextureFormat::ETC2RGB8UnormSrgb;
+            return true;
+        case interop::GPUTextureFormat::kEtc2Rgb8A1Unorm:
+            out = wgpu::TextureFormat::ETC2RGB8A1Unorm;
+            return true;
+        case interop::GPUTextureFormat::kEtc2Rgb8A1UnormSrgb:
+            out = wgpu::TextureFormat::ETC2RGB8A1UnormSrgb;
+            return true;
+        case interop::GPUTextureFormat::kEtc2Rgba8Unorm:
+            out = wgpu::TextureFormat::ETC2RGBA8Unorm;
+            return true;
+        case interop::GPUTextureFormat::kEtc2Rgba8UnormSrgb:
+            out = wgpu::TextureFormat::ETC2RGBA8UnormSrgb;
+            return true;
+        case interop::GPUTextureFormat::kEacR11Unorm:
+            out = wgpu::TextureFormat::EACR11Unorm;
+            return true;
+        case interop::GPUTextureFormat::kEacR11Snorm:
+            out = wgpu::TextureFormat::EACR11Snorm;
+            return true;
+        case interop::GPUTextureFormat::kEacRg11Unorm:
+            out = wgpu::TextureFormat::EACRG11Unorm;
+            return true;
+        case interop::GPUTextureFormat::kEacRg11Snorm:
+            out = wgpu::TextureFormat::EACRG11Snorm;
+            return true;
+        case interop::GPUTextureFormat::kAstc4X4Unorm:
+            out = wgpu::TextureFormat::ASTC4x4Unorm;
+            return true;
+        case interop::GPUTextureFormat::kAstc4X4UnormSrgb:
+            out = wgpu::TextureFormat::ASTC4x4UnormSrgb;
+            return true;
+        case interop::GPUTextureFormat::kAstc5X4Unorm:
+            out = wgpu::TextureFormat::ASTC5x4Unorm;
+            return true;
+        case interop::GPUTextureFormat::kAstc5X4UnormSrgb:
+            out = wgpu::TextureFormat::ASTC5x4UnormSrgb;
+            return true;
+        case interop::GPUTextureFormat::kAstc5X5Unorm:
+            out = wgpu::TextureFormat::ASTC5x5Unorm;
+            return true;
+        case interop::GPUTextureFormat::kAstc5X5UnormSrgb:
+            out = wgpu::TextureFormat::ASTC5x5UnormSrgb;
+            return true;
+        case interop::GPUTextureFormat::kAstc6X5Unorm:
+            out = wgpu::TextureFormat::ASTC6x5Unorm;
+            return true;
+        case interop::GPUTextureFormat::kAstc6X5UnormSrgb:
+            out = wgpu::TextureFormat::ASTC6x5UnormSrgb;
+            return true;
+        case interop::GPUTextureFormat::kAstc6X6Unorm:
+            out = wgpu::TextureFormat::ASTC6x6Unorm;
+            return true;
+        case interop::GPUTextureFormat::kAstc6X6UnormSrgb:
+            out = wgpu::TextureFormat::ASTC6x6UnormSrgb;
+            return true;
+        case interop::GPUTextureFormat::kAstc8X5Unorm:
+            out = wgpu::TextureFormat::ASTC8x5Unorm;
+            return true;
+        case interop::GPUTextureFormat::kAstc8X5UnormSrgb:
+            out = wgpu::TextureFormat::ASTC8x5UnormSrgb;
+            return true;
+        case interop::GPUTextureFormat::kAstc8X6Unorm:
+            out = wgpu::TextureFormat::ASTC8x6Unorm;
+            return true;
+        case interop::GPUTextureFormat::kAstc8X6UnormSrgb:
+            out = wgpu::TextureFormat::ASTC8x6UnormSrgb;
+            return true;
+        case interop::GPUTextureFormat::kAstc8X8Unorm:
+            out = wgpu::TextureFormat::ASTC8x8Unorm;
+            return true;
+        case interop::GPUTextureFormat::kAstc8X8UnormSrgb:
+            out = wgpu::TextureFormat::ASTC8x8UnormSrgb;
+            return true;
+        case interop::GPUTextureFormat::kAstc10X5Unorm:
+            out = wgpu::TextureFormat::ASTC10x5Unorm;
+            return true;
+        case interop::GPUTextureFormat::kAstc10X5UnormSrgb:
+            out = wgpu::TextureFormat::ASTC10x5UnormSrgb;
+            return true;
+        case interop::GPUTextureFormat::kAstc10X6Unorm:
+            out = wgpu::TextureFormat::ASTC10x6Unorm;
+            return true;
+        case interop::GPUTextureFormat::kAstc10X6UnormSrgb:
+            out = wgpu::TextureFormat::ASTC10x6UnormSrgb;
+            return true;
+        case interop::GPUTextureFormat::kAstc10X8Unorm:
+            out = wgpu::TextureFormat::ASTC10x8Unorm;
+            return true;
+        case interop::GPUTextureFormat::kAstc10X8UnormSrgb:
+            out = wgpu::TextureFormat::ASTC10x8UnormSrgb;
+            return true;
+        case interop::GPUTextureFormat::kAstc10X10Unorm:
+            out = wgpu::TextureFormat::ASTC10x10Unorm;
+            return true;
+        case interop::GPUTextureFormat::kAstc10X10UnormSrgb:
+            out = wgpu::TextureFormat::ASTC10x10UnormSrgb;
+            return true;
+        case interop::GPUTextureFormat::kAstc12X10Unorm:
+            out = wgpu::TextureFormat::ASTC12x10Unorm;
+            return true;
+        case interop::GPUTextureFormat::kAstc12X10UnormSrgb:
+            out = wgpu::TextureFormat::ASTC12x10UnormSrgb;
+            return true;
+        case interop::GPUTextureFormat::kAstc12X12Unorm:
+            out = wgpu::TextureFormat::ASTC12x12Unorm;
+            return true;
+        case interop::GPUTextureFormat::kAstc12X12UnormSrgb:
+            out = wgpu::TextureFormat::ASTC12x12UnormSrgb;
+            return true;
     }
+    Napi::Error::New(env, "invalid value for GPUTextureFormat").ThrowAsJavaScriptException();
+    return false;
+}
 
-    bool Converter::Convert(wgpu::MapMode& out, const interop::GPUMapModeFlags& in) {
-        out = static_cast<wgpu::MapMode>(in.value);
-        return true;
+bool Converter::Convert(wgpu::TextureUsage& out, const interop::GPUTextureUsageFlags& in) {
+    out = static_cast<wgpu::TextureUsage>(in.value);
+    return true;
+}
+
+bool Converter::Convert(wgpu::ColorWriteMask& out, const interop::GPUColorWriteFlags& in) {
+    out = static_cast<wgpu::ColorWriteMask>(in.value);
+    return true;
+}
+
+bool Converter::Convert(wgpu::BufferUsage& out, const interop::GPUBufferUsageFlags& in) {
+    out = static_cast<wgpu::BufferUsage>(in.value);
+    return true;
+}
+
+bool Converter::Convert(wgpu::MapMode& out, const interop::GPUMapModeFlags& in) {
+    out = static_cast<wgpu::MapMode>(in.value);
+    return true;
+}
+
+bool Converter::Convert(wgpu::ShaderStage& out, const interop::GPUShaderStageFlags& in) {
+    out = static_cast<wgpu::ShaderStage>(in.value);
+    return true;
+}
+
+bool Converter::Convert(wgpu::TextureDimension& out, const interop::GPUTextureDimension& in) {
+    out = wgpu::TextureDimension::e1D;
+    switch (in) {
+        case interop::GPUTextureDimension::k1D:
+            out = wgpu::TextureDimension::e1D;
+            return true;
+        case interop::GPUTextureDimension::k2D:
+            out = wgpu::TextureDimension::e2D;
+            return true;
+        case interop::GPUTextureDimension::k3D:
+            out = wgpu::TextureDimension::e3D;
+            return true;
     }
+    Napi::Error::New(env, "invalid value for GPUTextureDimension").ThrowAsJavaScriptException();
+    return false;
+}
 
-    bool Converter::Convert(wgpu::ShaderStage& out, const interop::GPUShaderStageFlags& in) {
-        out = static_cast<wgpu::ShaderStage>(in.value);
-        return true;
+bool Converter::Convert(wgpu::TextureViewDimension& out,
+                        const interop::GPUTextureViewDimension& in) {
+    out = wgpu::TextureViewDimension::Undefined;
+    switch (in) {
+        case interop::GPUTextureViewDimension::k1D:
+            out = wgpu::TextureViewDimension::e1D;
+            return true;
+        case interop::GPUTextureViewDimension::k2D:
+            out = wgpu::TextureViewDimension::e2D;
+            return true;
+        case interop::GPUTextureViewDimension::k2DArray:
+            out = wgpu::TextureViewDimension::e2DArray;
+            return true;
+        case interop::GPUTextureViewDimension::kCube:
+            out = wgpu::TextureViewDimension::Cube;
+            return true;
+        case interop::GPUTextureViewDimension::kCubeArray:
+            out = wgpu::TextureViewDimension::CubeArray;
+            return true;
+        case interop::GPUTextureViewDimension::k3D:
+            out = wgpu::TextureViewDimension::e3D;
+            return true;
+        default:
+            break;
     }
+    Napi::Error::New(env, "invalid value for GPUTextureViewDimension").ThrowAsJavaScriptException();
+    return false;
+}
 
-    bool Converter::Convert(wgpu::TextureDimension& out, const interop::GPUTextureDimension& in) {
-        out = wgpu::TextureDimension::e1D;
-        switch (in) {
-            case interop::GPUTextureDimension::k1D:
-                out = wgpu::TextureDimension::e1D;
-                return true;
-            case interop::GPUTextureDimension::k2D:
-                out = wgpu::TextureDimension::e2D;
-                return true;
-            case interop::GPUTextureDimension::k3D:
-                out = wgpu::TextureDimension::e3D;
-                return true;
+bool Converter::Convert(wgpu::ProgrammableStageDescriptor& out,
+                        const interop::GPUProgrammableStage& in) {
+    out = {};
+    out.module = *in.module.As<GPUShaderModule>();
+
+    // Replace nulls in the entryPoint name with another character that's disallowed in
+    // identifiers. This is so that using "main\0" doesn't match an entryPoint named "main".
+    // TODO(dawn:1345): Replace with a way to size strings explicitly in webgpu.h
+    char* entryPoint = Allocate<char>(in.entryPoint.size() + 1);
+    entryPoint[in.entryPoint.size()] = '\0';
+    for (size_t i = 0; i < in.entryPoint.size(); i++) {
+        if (in.entryPoint[i] == '\0') {
+            entryPoint[i] = '#';
+        } else {
+            entryPoint[i] = in.entryPoint[i];
         }
-        Napi::Error::New(env, "invalid value for GPUTextureDimension").ThrowAsJavaScriptException();
+    }
+    out.entryPoint = entryPoint;
+
+    return Convert(out.constants, out.constantCount, in.constants);
+}
+
+bool Converter::Convert(wgpu::ConstantEntry& out,
+                        const std::string& in_name,
+                        wgpu::interop::GPUPipelineConstantValue in_value) {
+    out.key = in_name.c_str();
+    out.value = in_value;
+    return true;
+}
+
+bool Converter::Convert(wgpu::BlendComponent& out, const interop::GPUBlendComponent& in) {
+    out = {};
+    return Convert(out.operation, in.operation) && Convert(out.dstFactor, in.dstFactor) &&
+           Convert(out.srcFactor, in.srcFactor);
+}
+
+bool Converter::Convert(wgpu::BlendFactor& out, const interop::GPUBlendFactor& in) {
+    out = wgpu::BlendFactor::Zero;
+    switch (in) {
+        case interop::GPUBlendFactor::kZero:
+            out = wgpu::BlendFactor::Zero;
+            return true;
+        case interop::GPUBlendFactor::kOne:
+            out = wgpu::BlendFactor::One;
+            return true;
+        case interop::GPUBlendFactor::kSrc:
+            out = wgpu::BlendFactor::Src;
+            return true;
+        case interop::GPUBlendFactor::kOneMinusSrc:
+            out = wgpu::BlendFactor::OneMinusSrc;
+            return true;
+        case interop::GPUBlendFactor::kSrcAlpha:
+            out = wgpu::BlendFactor::SrcAlpha;
+            return true;
+        case interop::GPUBlendFactor::kOneMinusSrcAlpha:
+            out = wgpu::BlendFactor::OneMinusSrcAlpha;
+            return true;
+        case interop::GPUBlendFactor::kDst:
+            out = wgpu::BlendFactor::Dst;
+            return true;
+        case interop::GPUBlendFactor::kOneMinusDst:
+            out = wgpu::BlendFactor::OneMinusDst;
+            return true;
+        case interop::GPUBlendFactor::kDstAlpha:
+            out = wgpu::BlendFactor::DstAlpha;
+            return true;
+        case interop::GPUBlendFactor::kOneMinusDstAlpha:
+            out = wgpu::BlendFactor::OneMinusDstAlpha;
+            return true;
+        case interop::GPUBlendFactor::kSrcAlphaSaturated:
+            out = wgpu::BlendFactor::SrcAlphaSaturated;
+            return true;
+        case interop::GPUBlendFactor::kConstant:
+            out = wgpu::BlendFactor::Constant;
+            return true;
+        case interop::GPUBlendFactor::kOneMinusConstant:
+            out = wgpu::BlendFactor::OneMinusConstant;
+            return true;
+        default:
+            break;
+    }
+    Napi::Error::New(env, "invalid value for GPUBlendFactor").ThrowAsJavaScriptException();
+    return false;
+}
+
+bool Converter::Convert(wgpu::BlendOperation& out, const interop::GPUBlendOperation& in) {
+    out = wgpu::BlendOperation::Add;
+    switch (in) {
+        case interop::GPUBlendOperation::kAdd:
+            out = wgpu::BlendOperation::Add;
+            return true;
+        case interop::GPUBlendOperation::kSubtract:
+            out = wgpu::BlendOperation::Subtract;
+            return true;
+        case interop::GPUBlendOperation::kReverseSubtract:
+            out = wgpu::BlendOperation::ReverseSubtract;
+            return true;
+        case interop::GPUBlendOperation::kMin:
+            out = wgpu::BlendOperation::Min;
+            return true;
+        case interop::GPUBlendOperation::kMax:
+            out = wgpu::BlendOperation::Max;
+            return true;
+        default:
+            break;
+    }
+    Napi::Error::New(env, "invalid value for GPUBlendOperation").ThrowAsJavaScriptException();
+    return false;
+}
+
+bool Converter::Convert(wgpu::BlendState& out, const interop::GPUBlendState& in) {
+    out = {};
+    return Convert(out.alpha, in.alpha) && Convert(out.color, in.color);
+}
+
+bool Converter::Convert(wgpu::PrimitiveState& out, const interop::GPUPrimitiveState& in) {
+    out = {};
+    return Convert(out.topology, in.topology) &&
+           Convert(out.stripIndexFormat, in.stripIndexFormat) &&
+           Convert(out.frontFace, in.frontFace) && Convert(out.cullMode, in.cullMode);
+}
+
+bool Converter::Convert(wgpu::ColorTargetState& out, const interop::GPUColorTargetState& in) {
+    out = {};
+    return Convert(out.format, in.format) && Convert(out.blend, in.blend) &&
+           Convert(out.writeMask, in.writeMask);
+}
+
+bool Converter::Convert(wgpu::DepthStencilState& out, const interop::GPUDepthStencilState& in) {
+    out = {};
+    return Convert(out.format, in.format) && Convert(out.depthWriteEnabled, in.depthWriteEnabled) &&
+           Convert(out.depthCompare, in.depthCompare) &&
+           Convert(out.stencilFront, in.stencilFront) && Convert(out.stencilBack, in.stencilBack) &&
+           Convert(out.stencilReadMask, in.stencilReadMask) &&
+           Convert(out.stencilWriteMask, in.stencilWriteMask) &&
+           Convert(out.depthBias, in.depthBias) &&
+           Convert(out.depthBiasSlopeScale, in.depthBiasSlopeScale) &&
+           Convert(out.depthBiasClamp, in.depthBiasClamp);
+}
+
+bool Converter::Convert(wgpu::MultisampleState& out, const interop::GPUMultisampleState& in) {
+    out = {};
+    return Convert(out.count, in.count) && Convert(out.mask, in.mask) &&
+           Convert(out.alphaToCoverageEnabled, in.alphaToCoverageEnabled);
+}
+
+bool Converter::Convert(wgpu::FragmentState& out, const interop::GPUFragmentState& in) {
+    out = {};
+    return Convert(out.targets, out.targetCount, in.targets) &&  //
+           Convert(out.module, in.module) &&                     //
+           Convert(out.entryPoint, in.entryPoint) &&             //
+           Convert(out.constants, out.constantCount, in.constants);
+}
+
+bool Converter::Convert(wgpu::PrimitiveTopology& out, const interop::GPUPrimitiveTopology& in) {
+    out = wgpu::PrimitiveTopology::LineList;
+    switch (in) {
+        case interop::GPUPrimitiveTopology::kPointList:
+            out = wgpu::PrimitiveTopology::PointList;
+            return true;
+        case interop::GPUPrimitiveTopology::kLineList:
+            out = wgpu::PrimitiveTopology::LineList;
+            return true;
+        case interop::GPUPrimitiveTopology::kLineStrip:
+            out = wgpu::PrimitiveTopology::LineStrip;
+            return true;
+        case interop::GPUPrimitiveTopology::kTriangleList:
+            out = wgpu::PrimitiveTopology::TriangleList;
+            return true;
+        case interop::GPUPrimitiveTopology::kTriangleStrip:
+            out = wgpu::PrimitiveTopology::TriangleStrip;
+            return true;
+    }
+    Napi::Error::New(env, "invalid value for GPUPrimitiveTopology").ThrowAsJavaScriptException();
+    return false;
+}
+
+bool Converter::Convert(wgpu::FrontFace& out, const interop::GPUFrontFace& in) {
+    out = wgpu::FrontFace::CW;
+    switch (in) {
+        case interop::GPUFrontFace::kCw:
+            out = wgpu::FrontFace::CW;
+            return true;
+        case interop::GPUFrontFace::kCcw:
+            out = wgpu::FrontFace::CCW;
+            return true;
+    }
+    Napi::Error::New(env, "invalid value for GPUFrontFace").ThrowAsJavaScriptException();
+    return false;
+}
+
+bool Converter::Convert(wgpu::CullMode& out, const interop::GPUCullMode& in) {
+    out = wgpu::CullMode::None;
+    switch (in) {
+        case interop::GPUCullMode::kNone:
+            out = wgpu::CullMode::None;
+            return true;
+        case interop::GPUCullMode::kFront:
+            out = wgpu::CullMode::Front;
+            return true;
+        case interop::GPUCullMode::kBack:
+            out = wgpu::CullMode::Back;
+            return true;
+    }
+    Napi::Error::New(env, "invalid value for GPUCullMode").ThrowAsJavaScriptException();
+    return false;
+}
+
+bool Converter::Convert(wgpu::CompareFunction& out, const interop::GPUCompareFunction& in) {
+    out = wgpu::CompareFunction::Undefined;
+    switch (in) {
+        case interop::GPUCompareFunction::kNever:
+            out = wgpu::CompareFunction::Never;
+            return true;
+        case interop::GPUCompareFunction::kLess:
+            out = wgpu::CompareFunction::Less;
+            return true;
+        case interop::GPUCompareFunction::kLessEqual:
+            out = wgpu::CompareFunction::LessEqual;
+            return true;
+        case interop::GPUCompareFunction::kGreater:
+            out = wgpu::CompareFunction::Greater;
+            return true;
+        case interop::GPUCompareFunction::kGreaterEqual:
+            out = wgpu::CompareFunction::GreaterEqual;
+            return true;
+        case interop::GPUCompareFunction::kEqual:
+            out = wgpu::CompareFunction::Equal;
+            return true;
+        case interop::GPUCompareFunction::kNotEqual:
+            out = wgpu::CompareFunction::NotEqual;
+            return true;
+        case interop::GPUCompareFunction::kAlways:
+            out = wgpu::CompareFunction::Always;
+            return true;
+    }
+    Napi::Error::New(env, "invalid value for GPUCompareFunction").ThrowAsJavaScriptException();
+    return false;
+}
+
+bool Converter::Convert(wgpu::IndexFormat& out, const interop::GPUIndexFormat& in) {
+    out = wgpu::IndexFormat::Undefined;
+    switch (in) {
+        case interop::GPUIndexFormat::kUint16:
+            out = wgpu::IndexFormat::Uint16;
+            return true;
+        case interop::GPUIndexFormat::kUint32:
+            out = wgpu::IndexFormat::Uint32;
+            return true;
+    }
+    Napi::Error::New(env, "invalid value for GPUIndexFormat").ThrowAsJavaScriptException();
+    return false;
+}
+
+bool Converter::Convert(wgpu::StencilOperation& out, const interop::GPUStencilOperation& in) {
+    out = wgpu::StencilOperation::Zero;
+    switch (in) {
+        case interop::GPUStencilOperation::kKeep:
+            out = wgpu::StencilOperation::Keep;
+            return true;
+        case interop::GPUStencilOperation::kZero:
+            out = wgpu::StencilOperation::Zero;
+            return true;
+        case interop::GPUStencilOperation::kReplace:
+            out = wgpu::StencilOperation::Replace;
+            return true;
+        case interop::GPUStencilOperation::kInvert:
+            out = wgpu::StencilOperation::Invert;
+            return true;
+        case interop::GPUStencilOperation::kIncrementClamp:
+            out = wgpu::StencilOperation::IncrementClamp;
+            return true;
+        case interop::GPUStencilOperation::kDecrementClamp:
+            out = wgpu::StencilOperation::DecrementClamp;
+            return true;
+        case interop::GPUStencilOperation::kIncrementWrap:
+            out = wgpu::StencilOperation::IncrementWrap;
+            return true;
+        case interop::GPUStencilOperation::kDecrementWrap:
+            out = wgpu::StencilOperation::DecrementWrap;
+            return true;
+    }
+    Napi::Error::New(env, "invalid value for GPUStencilOperation").ThrowAsJavaScriptException();
+    return false;
+}
+
+bool Converter::Convert(wgpu::StencilFaceState& out, const interop::GPUStencilFaceState& in) {
+    return Convert(out.compare, in.compare) && Convert(out.failOp, in.failOp) &&
+           Convert(out.depthFailOp, in.depthFailOp) && Convert(out.passOp, in.passOp);
+}
+
+bool Converter::Convert(wgpu::VertexBufferLayout& out, const interop::GPUVertexBufferLayout& in) {
+    out = {};
+    return Convert(out.attributes, out.attributeCount, in.attributes) &&
+           Convert(out.arrayStride, in.arrayStride) && Convert(out.stepMode, in.stepMode);
+}
+
+bool Converter::Convert(wgpu::VertexState& out, const interop::GPUVertexState& in) {
+    out = {};
+    return Convert(out.module, in.module) && Convert(out.buffers, out.bufferCount, in.buffers) &&
+           Convert(out.entryPoint, in.entryPoint) &&
+           Convert(out.constants, out.constantCount, in.constants);
+}
+
+bool Converter::Convert(wgpu::VertexStepMode& out, const interop::GPUVertexStepMode& in) {
+    out = wgpu::VertexStepMode::Instance;
+    switch (in) {
+        case interop::GPUVertexStepMode::kInstance:
+            out = wgpu::VertexStepMode::Instance;
+            return true;
+        case interop::GPUVertexStepMode::kVertex:
+            out = wgpu::VertexStepMode::Vertex;
+            return true;
+        default:
+            break;
+    }
+    Napi::Error::New(env, "invalid value for GPUVertexStepMode").ThrowAsJavaScriptException();
+    return false;
+}
+
+bool Converter::Convert(wgpu::VertexAttribute& out, const interop::GPUVertexAttribute& in) {
+    return Convert(out.format, in.format) && Convert(out.offset, in.offset) &&
+           Convert(out.shaderLocation, in.shaderLocation);
+}
+
+bool Converter::Convert(wgpu::VertexFormat& out, const interop::GPUVertexFormat& in) {
+    out = wgpu::VertexFormat::Undefined;
+    switch (in) {
+        case interop::GPUVertexFormat::kUint8X2:
+            out = wgpu::VertexFormat::Uint8x2;
+            return true;
+        case interop::GPUVertexFormat::kUint8X4:
+            out = wgpu::VertexFormat::Uint8x4;
+            return true;
+        case interop::GPUVertexFormat::kSint8X2:
+            out = wgpu::VertexFormat::Sint8x2;
+            return true;
+        case interop::GPUVertexFormat::kSint8X4:
+            out = wgpu::VertexFormat::Sint8x4;
+            return true;
+        case interop::GPUVertexFormat::kUnorm8X2:
+            out = wgpu::VertexFormat::Unorm8x2;
+            return true;
+        case interop::GPUVertexFormat::kUnorm8X4:
+            out = wgpu::VertexFormat::Unorm8x4;
+            return true;
+        case interop::GPUVertexFormat::kSnorm8X2:
+            out = wgpu::VertexFormat::Snorm8x2;
+            return true;
+        case interop::GPUVertexFormat::kSnorm8X4:
+            out = wgpu::VertexFormat::Snorm8x4;
+            return true;
+        case interop::GPUVertexFormat::kUint16X2:
+            out = wgpu::VertexFormat::Uint16x2;
+            return true;
+        case interop::GPUVertexFormat::kUint16X4:
+            out = wgpu::VertexFormat::Uint16x4;
+            return true;
+        case interop::GPUVertexFormat::kSint16X2:
+            out = wgpu::VertexFormat::Sint16x2;
+            return true;
+        case interop::GPUVertexFormat::kSint16X4:
+            out = wgpu::VertexFormat::Sint16x4;
+            return true;
+        case interop::GPUVertexFormat::kUnorm16X2:
+            out = wgpu::VertexFormat::Unorm16x2;
+            return true;
+        case interop::GPUVertexFormat::kUnorm16X4:
+            out = wgpu::VertexFormat::Unorm16x4;
+            return true;
+        case interop::GPUVertexFormat::kSnorm16X2:
+            out = wgpu::VertexFormat::Snorm16x2;
+            return true;
+        case interop::GPUVertexFormat::kSnorm16X4:
+            out = wgpu::VertexFormat::Snorm16x4;
+            return true;
+        case interop::GPUVertexFormat::kFloat16X2:
+            out = wgpu::VertexFormat::Float16x2;
+            return true;
+        case interop::GPUVertexFormat::kFloat16X4:
+            out = wgpu::VertexFormat::Float16x4;
+            return true;
+        case interop::GPUVertexFormat::kFloat32:
+            out = wgpu::VertexFormat::Float32;
+            return true;
+        case interop::GPUVertexFormat::kFloat32X2:
+            out = wgpu::VertexFormat::Float32x2;
+            return true;
+        case interop::GPUVertexFormat::kFloat32X3:
+            out = wgpu::VertexFormat::Float32x3;
+            return true;
+        case interop::GPUVertexFormat::kFloat32X4:
+            out = wgpu::VertexFormat::Float32x4;
+            return true;
+        case interop::GPUVertexFormat::kUint32:
+            out = wgpu::VertexFormat::Uint32;
+            return true;
+        case interop::GPUVertexFormat::kUint32X2:
+            out = wgpu::VertexFormat::Uint32x2;
+            return true;
+        case interop::GPUVertexFormat::kUint32X3:
+            out = wgpu::VertexFormat::Uint32x3;
+            return true;
+        case interop::GPUVertexFormat::kUint32X4:
+            out = wgpu::VertexFormat::Uint32x4;
+            return true;
+        case interop::GPUVertexFormat::kSint32:
+            out = wgpu::VertexFormat::Sint32;
+            return true;
+        case interop::GPUVertexFormat::kSint32X2:
+            out = wgpu::VertexFormat::Sint32x2;
+            return true;
+        case interop::GPUVertexFormat::kSint32X3:
+            out = wgpu::VertexFormat::Sint32x3;
+            return true;
+        case interop::GPUVertexFormat::kSint32X4:
+            out = wgpu::VertexFormat::Sint32x4;
+            return true;
+        default:
+            break;
+    }
+    Napi::Error::New(env, "invalid value for GPUVertexFormat").ThrowAsJavaScriptException();
+    return false;
+}
+
+bool Converter::Convert(wgpu::RenderPassColorAttachment& out,
+                        const interop::GPURenderPassColorAttachment& in) {
+    out = {};
+    return Convert(out.view, in.view) &&                    //
+           Convert(out.resolveTarget, in.resolveTarget) &&  //
+           Convert(out.clearValue, in.clearValue) &&        //
+           Convert(out.loadOp, in.loadOp) &&                //
+           Convert(out.storeOp, in.storeOp);
+}
+
+bool Converter::Convert(wgpu::RenderPassDepthStencilAttachment& out,
+                        const interop::GPURenderPassDepthStencilAttachment& in) {
+    out = {};
+    return Convert(out.view, in.view) &&                            //
+           Convert(out.depthClearValue, in.depthClearValue) &&      //
+           Convert(out.depthLoadOp, in.depthLoadOp) &&              //
+           Convert(out.depthStoreOp, in.depthStoreOp) &&            //
+           Convert(out.depthReadOnly, in.depthReadOnly) &&          //
+           Convert(out.stencilClearValue, in.stencilClearValue) &&  //
+           Convert(out.stencilLoadOp, in.stencilLoadOp) &&          //
+           Convert(out.stencilStoreOp, in.stencilStoreOp) &&        //
+           Convert(out.stencilReadOnly, in.stencilReadOnly);
+}
+
+bool Converter::Convert(wgpu::LoadOp& out, const interop::GPULoadOp& in) {
+    out = wgpu::LoadOp::Clear;
+    switch (in) {
+        case interop::GPULoadOp::kLoad:
+            out = wgpu::LoadOp::Load;
+            return true;
+        case interop::GPULoadOp::kClear:
+            out = wgpu::LoadOp::Clear;
+            return true;
+    }
+    Napi::Error::New(env, "invalid value for GPULoadOp").ThrowAsJavaScriptException();
+    return false;
+}
+
+bool Converter::Convert(wgpu::StoreOp& out, const interop::GPUStoreOp& in) {
+    out = wgpu::StoreOp::Store;
+    switch (in) {
+        case interop::GPUStoreOp::kStore:
+            out = wgpu::StoreOp::Store;
+            return true;
+        case interop::GPUStoreOp::kDiscard:
+            out = wgpu::StoreOp::Discard;
+            return true;
+    }
+    Napi::Error::New(env, "invalid value for GPUStoreOp").ThrowAsJavaScriptException();
+    return false;
+}
+
+bool Converter::Convert(wgpu::BindGroupEntry& out, const interop::GPUBindGroupEntry& in) {
+    out = {};
+    if (!Convert(out.binding, in.binding)) {
         return false;
     }
 
-    bool Converter::Convert(wgpu::TextureViewDimension& out,
-                            const interop::GPUTextureViewDimension& in) {
-        out = wgpu::TextureViewDimension::Undefined;
-        switch (in) {
-            case interop::GPUTextureViewDimension::k1D:
-                out = wgpu::TextureViewDimension::e1D;
-                return true;
-            case interop::GPUTextureViewDimension::k2D:
-                out = wgpu::TextureViewDimension::e2D;
-                return true;
-            case interop::GPUTextureViewDimension::k2DArray:
-                out = wgpu::TextureViewDimension::e2DArray;
-                return true;
-            case interop::GPUTextureViewDimension::kCube:
-                out = wgpu::TextureViewDimension::Cube;
-                return true;
-            case interop::GPUTextureViewDimension::kCubeArray:
-                out = wgpu::TextureViewDimension::CubeArray;
-                return true;
-            case interop::GPUTextureViewDimension::k3D:
-                out = wgpu::TextureViewDimension::e3D;
-                return true;
-            default:
-                break;
-        }
-        Napi::Error::New(env, "invalid value for GPUTextureViewDimension")
-            .ThrowAsJavaScriptException();
-        return false;
+    if (auto* res = std::get_if<interop::Interface<interop::GPUSampler>>(&in.resource)) {
+        return Convert(out.sampler, *res);
     }
-
-    bool Converter::Convert(wgpu::ProgrammableStageDescriptor& out,
-                            const interop::GPUProgrammableStage& in) {
-        out = {};
-        out.module = *in.module.As<GPUShaderModule>();
-
-        // Replace nulls in the entryPoint name with another character that's disallowed in
-        // identifiers. This is so that using "main\0" doesn't match an entryPoint named "main".
-        // TODO(dawn:1345): Replace with a way to size strings explicitly in webgpu.h
-        char* entryPoint = Allocate<char>(in.entryPoint.size() + 1);
-        entryPoint[in.entryPoint.size()] = '\0';
-        for (size_t i = 0; i < in.entryPoint.size(); i++) {
-            if (in.entryPoint[i] == '\0') {
-                entryPoint[i] = '#';
-            } else {
-                entryPoint[i] = in.entryPoint[i];
-            }
-        }
-        out.entryPoint = entryPoint;
-
-        return Convert(out.constants, out.constantCount, in.constants);
+    if (auto* res = std::get_if<interop::Interface<interop::GPUTextureView>>(&in.resource)) {
+        return Convert(out.textureView, *res);
     }
-
-    bool Converter::Convert(wgpu::ConstantEntry& out,
-                            const std::string& in_name,
-                            wgpu::interop::GPUPipelineConstantValue in_value) {
-        out.key = in_name.c_str();
-        out.value = in_value;
-        return true;
-    }
-
-    bool Converter::Convert(wgpu::BlendComponent& out, const interop::GPUBlendComponent& in) {
-        out = {};
-        return Convert(out.operation, in.operation) && Convert(out.dstFactor, in.dstFactor) &&
-               Convert(out.srcFactor, in.srcFactor);
-    }
-
-    bool Converter::Convert(wgpu::BlendFactor& out, const interop::GPUBlendFactor& in) {
-        out = wgpu::BlendFactor::Zero;
-        switch (in) {
-            case interop::GPUBlendFactor::kZero:
-                out = wgpu::BlendFactor::Zero;
-                return true;
-            case interop::GPUBlendFactor::kOne:
-                out = wgpu::BlendFactor::One;
-                return true;
-            case interop::GPUBlendFactor::kSrc:
-                out = wgpu::BlendFactor::Src;
-                return true;
-            case interop::GPUBlendFactor::kOneMinusSrc:
-                out = wgpu::BlendFactor::OneMinusSrc;
-                return true;
-            case interop::GPUBlendFactor::kSrcAlpha:
-                out = wgpu::BlendFactor::SrcAlpha;
-                return true;
-            case interop::GPUBlendFactor::kOneMinusSrcAlpha:
-                out = wgpu::BlendFactor::OneMinusSrcAlpha;
-                return true;
-            case interop::GPUBlendFactor::kDst:
-                out = wgpu::BlendFactor::Dst;
-                return true;
-            case interop::GPUBlendFactor::kOneMinusDst:
-                out = wgpu::BlendFactor::OneMinusDst;
-                return true;
-            case interop::GPUBlendFactor::kDstAlpha:
-                out = wgpu::BlendFactor::DstAlpha;
-                return true;
-            case interop::GPUBlendFactor::kOneMinusDstAlpha:
-                out = wgpu::BlendFactor::OneMinusDstAlpha;
-                return true;
-            case interop::GPUBlendFactor::kSrcAlphaSaturated:
-                out = wgpu::BlendFactor::SrcAlphaSaturated;
-                return true;
-            case interop::GPUBlendFactor::kConstant:
-                out = wgpu::BlendFactor::Constant;
-                return true;
-            case interop::GPUBlendFactor::kOneMinusConstant:
-                out = wgpu::BlendFactor::OneMinusConstant;
-                return true;
-            default:
-                break;
-        }
-        Napi::Error::New(env, "invalid value for GPUBlendFactor").ThrowAsJavaScriptException();
-        return false;
-    }
-
-    bool Converter::Convert(wgpu::BlendOperation& out, const interop::GPUBlendOperation& in) {
-        out = wgpu::BlendOperation::Add;
-        switch (in) {
-            case interop::GPUBlendOperation::kAdd:
-                out = wgpu::BlendOperation::Add;
-                return true;
-            case interop::GPUBlendOperation::kSubtract:
-                out = wgpu::BlendOperation::Subtract;
-                return true;
-            case interop::GPUBlendOperation::kReverseSubtract:
-                out = wgpu::BlendOperation::ReverseSubtract;
-                return true;
-            case interop::GPUBlendOperation::kMin:
-                out = wgpu::BlendOperation::Min;
-                return true;
-            case interop::GPUBlendOperation::kMax:
-                out = wgpu::BlendOperation::Max;
-                return true;
-            default:
-                break;
-        }
-        Napi::Error::New(env, "invalid value for GPUBlendOperation").ThrowAsJavaScriptException();
-        return false;
-    }
-
-    bool Converter::Convert(wgpu::BlendState& out, const interop::GPUBlendState& in) {
-        out = {};
-        return Convert(out.alpha, in.alpha) && Convert(out.color, in.color);
-    }
-
-    bool Converter::Convert(wgpu::PrimitiveState& out, const interop::GPUPrimitiveState& in) {
-        out = {};
-        return Convert(out.topology, in.topology) &&
-               Convert(out.stripIndexFormat, in.stripIndexFormat) &&
-               Convert(out.frontFace, in.frontFace) && Convert(out.cullMode, in.cullMode);
-    }
-
-    bool Converter::Convert(wgpu::ColorTargetState& out, const interop::GPUColorTargetState& in) {
-        out = {};
-        return Convert(out.format, in.format) && Convert(out.blend, in.blend) &&
-               Convert(out.writeMask, in.writeMask);
-    }
-
-    bool Converter::Convert(wgpu::DepthStencilState& out, const interop::GPUDepthStencilState& in) {
-        out = {};
-        return Convert(out.format, in.format) &&
-               Convert(out.depthWriteEnabled, in.depthWriteEnabled) &&
-               Convert(out.depthCompare, in.depthCompare) &&
-               Convert(out.stencilFront, in.stencilFront) &&
-               Convert(out.stencilBack, in.stencilBack) &&
-               Convert(out.stencilReadMask, in.stencilReadMask) &&
-               Convert(out.stencilWriteMask, in.stencilWriteMask) &&
-               Convert(out.depthBias, in.depthBias) &&
-               Convert(out.depthBiasSlopeScale, in.depthBiasSlopeScale) &&
-               Convert(out.depthBiasClamp, in.depthBiasClamp);
-    }
-
-    bool Converter::Convert(wgpu::MultisampleState& out, const interop::GPUMultisampleState& in) {
-        out = {};
-        return Convert(out.count, in.count) && Convert(out.mask, in.mask) &&
-               Convert(out.alphaToCoverageEnabled, in.alphaToCoverageEnabled);
-    }
-
-    bool Converter::Convert(wgpu::FragmentState& out, const interop::GPUFragmentState& in) {
-        out = {};
-        return Convert(out.targets, out.targetCount, in.targets) &&  //
-               Convert(out.module, in.module) &&                     //
-               Convert(out.entryPoint, in.entryPoint) &&             //
-               Convert(out.constants, out.constantCount, in.constants);
-    }
-
-    bool Converter::Convert(wgpu::PrimitiveTopology& out, const interop::GPUPrimitiveTopology& in) {
-        out = wgpu::PrimitiveTopology::LineList;
-        switch (in) {
-            case interop::GPUPrimitiveTopology::kPointList:
-                out = wgpu::PrimitiveTopology::PointList;
-                return true;
-            case interop::GPUPrimitiveTopology::kLineList:
-                out = wgpu::PrimitiveTopology::LineList;
-                return true;
-            case interop::GPUPrimitiveTopology::kLineStrip:
-                out = wgpu::PrimitiveTopology::LineStrip;
-                return true;
-            case interop::GPUPrimitiveTopology::kTriangleList:
-                out = wgpu::PrimitiveTopology::TriangleList;
-                return true;
-            case interop::GPUPrimitiveTopology::kTriangleStrip:
-                out = wgpu::PrimitiveTopology::TriangleStrip;
-                return true;
-        }
-        Napi::Error::New(env, "invalid value for GPUPrimitiveTopology")
-            .ThrowAsJavaScriptException();
-        return false;
-    }
-
-    bool Converter::Convert(wgpu::FrontFace& out, const interop::GPUFrontFace& in) {
-        out = wgpu::FrontFace::CW;
-        switch (in) {
-            case interop::GPUFrontFace::kCw:
-                out = wgpu::FrontFace::CW;
-                return true;
-            case interop::GPUFrontFace::kCcw:
-                out = wgpu::FrontFace::CCW;
-                return true;
-        }
-        Napi::Error::New(env, "invalid value for GPUFrontFace").ThrowAsJavaScriptException();
-        return false;
-    }
-
-    bool Converter::Convert(wgpu::CullMode& out, const interop::GPUCullMode& in) {
-        out = wgpu::CullMode::None;
-        switch (in) {
-            case interop::GPUCullMode::kNone:
-                out = wgpu::CullMode::None;
-                return true;
-            case interop::GPUCullMode::kFront:
-                out = wgpu::CullMode::Front;
-                return true;
-            case interop::GPUCullMode::kBack:
-                out = wgpu::CullMode::Back;
-                return true;
-        }
-        Napi::Error::New(env, "invalid value for GPUCullMode").ThrowAsJavaScriptException();
-        return false;
-    }
-
-    bool Converter::Convert(wgpu::CompareFunction& out, const interop::GPUCompareFunction& in) {
-        out = wgpu::CompareFunction::Undefined;
-        switch (in) {
-            case interop::GPUCompareFunction::kNever:
-                out = wgpu::CompareFunction::Never;
-                return true;
-            case interop::GPUCompareFunction::kLess:
-                out = wgpu::CompareFunction::Less;
-                return true;
-            case interop::GPUCompareFunction::kLessEqual:
-                out = wgpu::CompareFunction::LessEqual;
-                return true;
-            case interop::GPUCompareFunction::kGreater:
-                out = wgpu::CompareFunction::Greater;
-                return true;
-            case interop::GPUCompareFunction::kGreaterEqual:
-                out = wgpu::CompareFunction::GreaterEqual;
-                return true;
-            case interop::GPUCompareFunction::kEqual:
-                out = wgpu::CompareFunction::Equal;
-                return true;
-            case interop::GPUCompareFunction::kNotEqual:
-                out = wgpu::CompareFunction::NotEqual;
-                return true;
-            case interop::GPUCompareFunction::kAlways:
-                out = wgpu::CompareFunction::Always;
-                return true;
-        }
-        Napi::Error::New(env, "invalid value for GPUCompareFunction").ThrowAsJavaScriptException();
-        return false;
-    }
-
-    bool Converter::Convert(wgpu::IndexFormat& out, const interop::GPUIndexFormat& in) {
-        out = wgpu::IndexFormat::Undefined;
-        switch (in) {
-            case interop::GPUIndexFormat::kUint16:
-                out = wgpu::IndexFormat::Uint16;
-                return true;
-            case interop::GPUIndexFormat::kUint32:
-                out = wgpu::IndexFormat::Uint32;
-                return true;
-        }
-        Napi::Error::New(env, "invalid value for GPUIndexFormat").ThrowAsJavaScriptException();
-        return false;
-    }
-
-    bool Converter::Convert(wgpu::StencilOperation& out, const interop::GPUStencilOperation& in) {
-        out = wgpu::StencilOperation::Zero;
-        switch (in) {
-            case interop::GPUStencilOperation::kKeep:
-                out = wgpu::StencilOperation::Keep;
-                return true;
-            case interop::GPUStencilOperation::kZero:
-                out = wgpu::StencilOperation::Zero;
-                return true;
-            case interop::GPUStencilOperation::kReplace:
-                out = wgpu::StencilOperation::Replace;
-                return true;
-            case interop::GPUStencilOperation::kInvert:
-                out = wgpu::StencilOperation::Invert;
-                return true;
-            case interop::GPUStencilOperation::kIncrementClamp:
-                out = wgpu::StencilOperation::IncrementClamp;
-                return true;
-            case interop::GPUStencilOperation::kDecrementClamp:
-                out = wgpu::StencilOperation::DecrementClamp;
-                return true;
-            case interop::GPUStencilOperation::kIncrementWrap:
-                out = wgpu::StencilOperation::IncrementWrap;
-                return true;
-            case interop::GPUStencilOperation::kDecrementWrap:
-                out = wgpu::StencilOperation::DecrementWrap;
-                return true;
-        }
-        Napi::Error::New(env, "invalid value for GPUStencilOperation").ThrowAsJavaScriptException();
-        return false;
-    }
-
-    bool Converter::Convert(wgpu::StencilFaceState& out, const interop::GPUStencilFaceState& in) {
-        return Convert(out.compare, in.compare) && Convert(out.failOp, in.failOp) &&
-               Convert(out.depthFailOp, in.depthFailOp) && Convert(out.passOp, in.passOp);
-    }
-
-    bool Converter::Convert(wgpu::VertexBufferLayout& out,
-                            const interop::GPUVertexBufferLayout& in) {
-        out = {};
-        return Convert(out.attributes, out.attributeCount, in.attributes) &&
-               Convert(out.arrayStride, in.arrayStride) && Convert(out.stepMode, in.stepMode);
-    }
-
-    bool Converter::Convert(wgpu::VertexState& out, const interop::GPUVertexState& in) {
-        out = {};
-        return Convert(out.module, in.module) &&
-               Convert(out.buffers, out.bufferCount, in.buffers) &&
-               Convert(out.entryPoint, in.entryPoint) &&
-               Convert(out.constants, out.constantCount, in.constants);
-    }
-
-    bool Converter::Convert(wgpu::VertexStepMode& out, const interop::GPUVertexStepMode& in) {
-        out = wgpu::VertexStepMode::Instance;
-        switch (in) {
-            case interop::GPUVertexStepMode::kInstance:
-                out = wgpu::VertexStepMode::Instance;
-                return true;
-            case interop::GPUVertexStepMode::kVertex:
-                out = wgpu::VertexStepMode::Vertex;
-                return true;
-            default:
-                break;
-        }
-        Napi::Error::New(env, "invalid value for GPUVertexStepMode").ThrowAsJavaScriptException();
-        return false;
-    }
-
-    bool Converter::Convert(wgpu::VertexAttribute& out, const interop::GPUVertexAttribute& in) {
-        return Convert(out.format, in.format) && Convert(out.offset, in.offset) &&
-               Convert(out.shaderLocation, in.shaderLocation);
-    }
-
-    bool Converter::Convert(wgpu::VertexFormat& out, const interop::GPUVertexFormat& in) {
-        out = wgpu::VertexFormat::Undefined;
-        switch (in) {
-            case interop::GPUVertexFormat::kUint8X2:
-                out = wgpu::VertexFormat::Uint8x2;
-                return true;
-            case interop::GPUVertexFormat::kUint8X4:
-                out = wgpu::VertexFormat::Uint8x4;
-                return true;
-            case interop::GPUVertexFormat::kSint8X2:
-                out = wgpu::VertexFormat::Sint8x2;
-                return true;
-            case interop::GPUVertexFormat::kSint8X4:
-                out = wgpu::VertexFormat::Sint8x4;
-                return true;
-            case interop::GPUVertexFormat::kUnorm8X2:
-                out = wgpu::VertexFormat::Unorm8x2;
-                return true;
-            case interop::GPUVertexFormat::kUnorm8X4:
-                out = wgpu::VertexFormat::Unorm8x4;
-                return true;
-            case interop::GPUVertexFormat::kSnorm8X2:
-                out = wgpu::VertexFormat::Snorm8x2;
-                return true;
-            case interop::GPUVertexFormat::kSnorm8X4:
-                out = wgpu::VertexFormat::Snorm8x4;
-                return true;
-            case interop::GPUVertexFormat::kUint16X2:
-                out = wgpu::VertexFormat::Uint16x2;
-                return true;
-            case interop::GPUVertexFormat::kUint16X4:
-                out = wgpu::VertexFormat::Uint16x4;
-                return true;
-            case interop::GPUVertexFormat::kSint16X2:
-                out = wgpu::VertexFormat::Sint16x2;
-                return true;
-            case interop::GPUVertexFormat::kSint16X4:
-                out = wgpu::VertexFormat::Sint16x4;
-                return true;
-            case interop::GPUVertexFormat::kUnorm16X2:
-                out = wgpu::VertexFormat::Unorm16x2;
-                return true;
-            case interop::GPUVertexFormat::kUnorm16X4:
-                out = wgpu::VertexFormat::Unorm16x4;
-                return true;
-            case interop::GPUVertexFormat::kSnorm16X2:
-                out = wgpu::VertexFormat::Snorm16x2;
-                return true;
-            case interop::GPUVertexFormat::kSnorm16X4:
-                out = wgpu::VertexFormat::Snorm16x4;
-                return true;
-            case interop::GPUVertexFormat::kFloat16X2:
-                out = wgpu::VertexFormat::Float16x2;
-                return true;
-            case interop::GPUVertexFormat::kFloat16X4:
-                out = wgpu::VertexFormat::Float16x4;
-                return true;
-            case interop::GPUVertexFormat::kFloat32:
-                out = wgpu::VertexFormat::Float32;
-                return true;
-            case interop::GPUVertexFormat::kFloat32X2:
-                out = wgpu::VertexFormat::Float32x2;
-                return true;
-            case interop::GPUVertexFormat::kFloat32X3:
-                out = wgpu::VertexFormat::Float32x3;
-                return true;
-            case interop::GPUVertexFormat::kFloat32X4:
-                out = wgpu::VertexFormat::Float32x4;
-                return true;
-            case interop::GPUVertexFormat::kUint32:
-                out = wgpu::VertexFormat::Uint32;
-                return true;
-            case interop::GPUVertexFormat::kUint32X2:
-                out = wgpu::VertexFormat::Uint32x2;
-                return true;
-            case interop::GPUVertexFormat::kUint32X3:
-                out = wgpu::VertexFormat::Uint32x3;
-                return true;
-            case interop::GPUVertexFormat::kUint32X4:
-                out = wgpu::VertexFormat::Uint32x4;
-                return true;
-            case interop::GPUVertexFormat::kSint32:
-                out = wgpu::VertexFormat::Sint32;
-                return true;
-            case interop::GPUVertexFormat::kSint32X2:
-                out = wgpu::VertexFormat::Sint32x2;
-                return true;
-            case interop::GPUVertexFormat::kSint32X3:
-                out = wgpu::VertexFormat::Sint32x3;
-                return true;
-            case interop::GPUVertexFormat::kSint32X4:
-                out = wgpu::VertexFormat::Sint32x4;
-                return true;
-            default:
-                break;
-        }
-        Napi::Error::New(env, "invalid value for GPUVertexFormat").ThrowAsJavaScriptException();
-        return false;
-    }
-
-    bool Converter::Convert(wgpu::RenderPassColorAttachment& out,
-                            const interop::GPURenderPassColorAttachment& in) {
-        out = {};
-        return Convert(out.view, in.view) &&                    //
-               Convert(out.resolveTarget, in.resolveTarget) &&  //
-               Convert(out.clearValue, in.clearValue) &&        //
-               Convert(out.loadOp, in.loadOp) &&                //
-               Convert(out.storeOp, in.storeOp);
-    }
-
-    bool Converter::Convert(wgpu::RenderPassDepthStencilAttachment& out,
-                            const interop::GPURenderPassDepthStencilAttachment& in) {
-        out = {};
-        return Convert(out.view, in.view) &&                            //
-               Convert(out.depthClearValue, in.depthClearValue) &&      //
-               Convert(out.depthLoadOp, in.depthLoadOp) &&              //
-               Convert(out.depthStoreOp, in.depthStoreOp) &&            //
-               Convert(out.depthReadOnly, in.depthReadOnly) &&          //
-               Convert(out.stencilClearValue, in.stencilClearValue) &&  //
-               Convert(out.stencilLoadOp, in.stencilLoadOp) &&          //
-               Convert(out.stencilStoreOp, in.stencilStoreOp) &&        //
-               Convert(out.stencilReadOnly, in.stencilReadOnly);
-    }
-
-    bool Converter::Convert(wgpu::LoadOp& out, const interop::GPULoadOp& in) {
-        out = wgpu::LoadOp::Clear;
-        switch (in) {
-            case interop::GPULoadOp::kLoad:
-                out = wgpu::LoadOp::Load;
-                return true;
-            case interop::GPULoadOp::kClear:
-                out = wgpu::LoadOp::Clear;
-                return true;
-        }
-        Napi::Error::New(env, "invalid value for GPULoadOp").ThrowAsJavaScriptException();
-        return false;
-    }
-
-    bool Converter::Convert(wgpu::StoreOp& out, const interop::GPUStoreOp& in) {
-        out = wgpu::StoreOp::Store;
-        switch (in) {
-            case interop::GPUStoreOp::kStore:
-                out = wgpu::StoreOp::Store;
-                return true;
-            case interop::GPUStoreOp::kDiscard:
-                out = wgpu::StoreOp::Discard;
-                return true;
-        }
-        Napi::Error::New(env, "invalid value for GPUStoreOp").ThrowAsJavaScriptException();
-        return false;
-    }
-
-    bool Converter::Convert(wgpu::BindGroupEntry& out, const interop::GPUBindGroupEntry& in) {
-        out = {};
-        if (!Convert(out.binding, in.binding)) {
+    if (auto* res = std::get_if<interop::GPUBufferBinding>(&in.resource)) {
+        auto buffer = res->buffer.As<GPUBuffer>();
+        out.size = wgpu::kWholeSize;
+        if (!buffer || !Convert(out.offset, res->offset) || !Convert(out.size, res->size)) {
             return false;
         }
-
-        if (auto* res = std::get_if<interop::Interface<interop::GPUSampler>>(&in.resource)) {
-            return Convert(out.sampler, *res);
-        }
-        if (auto* res = std::get_if<interop::Interface<interop::GPUTextureView>>(&in.resource)) {
-            return Convert(out.textureView, *res);
-        }
-        if (auto* res = std::get_if<interop::GPUBufferBinding>(&in.resource)) {
-            auto buffer = res->buffer.As<GPUBuffer>();
-            out.size = wgpu::kWholeSize;
-            if (!buffer || !Convert(out.offset, res->offset) || !Convert(out.size, res->size)) {
-                return false;
-            }
-            out.buffer = *buffer;
-            return true;
-        }
-        if (auto* res =
-                std::get_if<interop::Interface<interop::GPUExternalTexture>>(&in.resource)) {
-            // TODO(crbug.com/dawn/1129): External textures
-            UNIMPLEMENTED();
-        }
-        Napi::Error::New(env, "invalid value for GPUBindGroupEntry.resource")
-            .ThrowAsJavaScriptException();
-        return false;
+        out.buffer = *buffer;
+        return true;
     }
-
-    bool Converter::Convert(wgpu::BindGroupLayoutEntry& out,
-                            const interop::GPUBindGroupLayoutEntry& in) {
+    if (auto* res = std::get_if<interop::Interface<interop::GPUExternalTexture>>(&in.resource)) {
         // TODO(crbug.com/dawn/1129): External textures
-        return Convert(out.binding, in.binding) && Convert(out.visibility, in.visibility) &&
-               Convert(out.buffer, in.buffer) && Convert(out.sampler, in.sampler) &&
-               Convert(out.texture, in.texture) && Convert(out.storageTexture, in.storageTexture);
+        UNIMPLEMENTED();
     }
+    Napi::Error::New(env, "invalid value for GPUBindGroupEntry.resource")
+        .ThrowAsJavaScriptException();
+    return false;
+}
 
-    bool Converter::Convert(wgpu::BufferBindingLayout& out,
-                            const interop::GPUBufferBindingLayout& in) {
-        return Convert(out.type, in.type) && Convert(out.hasDynamicOffset, in.hasDynamicOffset) &&
-               Convert(out.minBindingSize, in.minBindingSize);
-    }
+bool Converter::Convert(wgpu::BindGroupLayoutEntry& out,
+                        const interop::GPUBindGroupLayoutEntry& in) {
+    // TODO(crbug.com/dawn/1129): External textures
+    return Convert(out.binding, in.binding) && Convert(out.visibility, in.visibility) &&
+           Convert(out.buffer, in.buffer) && Convert(out.sampler, in.sampler) &&
+           Convert(out.texture, in.texture) && Convert(out.storageTexture, in.storageTexture);
+}
 
-    bool Converter::Convert(wgpu::SamplerBindingLayout& out,
-                            const interop::GPUSamplerBindingLayout& in) {
-        return Convert(out.type, in.type);
-    }
+bool Converter::Convert(wgpu::BufferBindingLayout& out, const interop::GPUBufferBindingLayout& in) {
+    return Convert(out.type, in.type) && Convert(out.hasDynamicOffset, in.hasDynamicOffset) &&
+           Convert(out.minBindingSize, in.minBindingSize);
+}
 
-    bool Converter::Convert(wgpu::TextureBindingLayout& out,
-                            const interop::GPUTextureBindingLayout& in) {
-        return Convert(out.sampleType, in.sampleType) &&
-               Convert(out.viewDimension, in.viewDimension) &&
-               Convert(out.multisampled, in.multisampled);
-    }
+bool Converter::Convert(wgpu::SamplerBindingLayout& out,
+                        const interop::GPUSamplerBindingLayout& in) {
+    return Convert(out.type, in.type);
+}
 
-    bool Converter::Convert(wgpu::StorageTextureBindingLayout& out,
-                            const interop::GPUStorageTextureBindingLayout& in) {
-        return Convert(out.access, in.access) && Convert(out.format, in.format) &&
-               Convert(out.viewDimension, in.viewDimension);
-    }
+bool Converter::Convert(wgpu::TextureBindingLayout& out,
+                        const interop::GPUTextureBindingLayout& in) {
+    return Convert(out.sampleType, in.sampleType) && Convert(out.viewDimension, in.viewDimension) &&
+           Convert(out.multisampled, in.multisampled);
+}
 
-    bool Converter::Convert(wgpu::BufferBindingType& out, const interop::GPUBufferBindingType& in) {
-        out = wgpu::BufferBindingType::Undefined;
-        switch (in) {
-            case interop::GPUBufferBindingType::kUniform:
-                out = wgpu::BufferBindingType::Uniform;
-                return true;
-            case interop::GPUBufferBindingType::kStorage:
-                out = wgpu::BufferBindingType::Storage;
-                return true;
-            case interop::GPUBufferBindingType::kReadOnlyStorage:
-                out = wgpu::BufferBindingType::ReadOnlyStorage;
-                return true;
-        }
-        Napi::Error::New(env, "invalid value for GPUBufferBindingType")
-            .ThrowAsJavaScriptException();
-        return false;
-    }
+bool Converter::Convert(wgpu::StorageTextureBindingLayout& out,
+                        const interop::GPUStorageTextureBindingLayout& in) {
+    return Convert(out.access, in.access) && Convert(out.format, in.format) &&
+           Convert(out.viewDimension, in.viewDimension);
+}
 
-    bool Converter::Convert(wgpu::TextureSampleType& out, const interop::GPUTextureSampleType& in) {
-        out = wgpu::TextureSampleType::Undefined;
-        switch (in) {
-            case interop::GPUTextureSampleType::kFloat:
-                out = wgpu::TextureSampleType::Float;
-                return true;
-            case interop::GPUTextureSampleType::kUnfilterableFloat:
-                out = wgpu::TextureSampleType::UnfilterableFloat;
-                return true;
-            case interop::GPUTextureSampleType::kDepth:
-                out = wgpu::TextureSampleType::Depth;
-                return true;
-            case interop::GPUTextureSampleType::kSint:
-                out = wgpu::TextureSampleType::Sint;
-                return true;
-            case interop::GPUTextureSampleType::kUint:
-                out = wgpu::TextureSampleType::Uint;
-                return true;
-        }
-        Napi::Error::New(env, "invalid value for GPUTextureSampleType")
-            .ThrowAsJavaScriptException();
-        return false;
+bool Converter::Convert(wgpu::BufferBindingType& out, const interop::GPUBufferBindingType& in) {
+    out = wgpu::BufferBindingType::Undefined;
+    switch (in) {
+        case interop::GPUBufferBindingType::kUniform:
+            out = wgpu::BufferBindingType::Uniform;
+            return true;
+        case interop::GPUBufferBindingType::kStorage:
+            out = wgpu::BufferBindingType::Storage;
+            return true;
+        case interop::GPUBufferBindingType::kReadOnlyStorage:
+            out = wgpu::BufferBindingType::ReadOnlyStorage;
+            return true;
     }
+    Napi::Error::New(env, "invalid value for GPUBufferBindingType").ThrowAsJavaScriptException();
+    return false;
+}
 
-    bool Converter::Convert(wgpu::SamplerBindingType& out,
-                            const interop::GPUSamplerBindingType& in) {
-        out = wgpu::SamplerBindingType::Undefined;
-        switch (in) {
-            case interop::GPUSamplerBindingType::kFiltering:
-                out = wgpu::SamplerBindingType::Filtering;
-                return true;
-            case interop::GPUSamplerBindingType::kNonFiltering:
-                out = wgpu::SamplerBindingType::NonFiltering;
-                return true;
-            case interop::GPUSamplerBindingType::kComparison:
-                out = wgpu::SamplerBindingType::Comparison;
-                return true;
-        }
-        Napi::Error::New(env, "invalid value for GPUSamplerBindingType")
-            .ThrowAsJavaScriptException();
-        return false;
+bool Converter::Convert(wgpu::TextureSampleType& out, const interop::GPUTextureSampleType& in) {
+    out = wgpu::TextureSampleType::Undefined;
+    switch (in) {
+        case interop::GPUTextureSampleType::kFloat:
+            out = wgpu::TextureSampleType::Float;
+            return true;
+        case interop::GPUTextureSampleType::kUnfilterableFloat:
+            out = wgpu::TextureSampleType::UnfilterableFloat;
+            return true;
+        case interop::GPUTextureSampleType::kDepth:
+            out = wgpu::TextureSampleType::Depth;
+            return true;
+        case interop::GPUTextureSampleType::kSint:
+            out = wgpu::TextureSampleType::Sint;
+            return true;
+        case interop::GPUTextureSampleType::kUint:
+            out = wgpu::TextureSampleType::Uint;
+            return true;
     }
+    Napi::Error::New(env, "invalid value for GPUTextureSampleType").ThrowAsJavaScriptException();
+    return false;
+}
 
-    bool Converter::Convert(wgpu::StorageTextureAccess& out,
-                            const interop::GPUStorageTextureAccess& in) {
-        out = wgpu::StorageTextureAccess::Undefined;
-        switch (in) {
-            case interop::GPUStorageTextureAccess::kWriteOnly:
-                out = wgpu::StorageTextureAccess::WriteOnly;
-                return true;
-        }
-        Napi::Error::New(env, "invalid value for GPUStorageTextureAccess")
-            .ThrowAsJavaScriptException();
-        return false;
+bool Converter::Convert(wgpu::SamplerBindingType& out, const interop::GPUSamplerBindingType& in) {
+    out = wgpu::SamplerBindingType::Undefined;
+    switch (in) {
+        case interop::GPUSamplerBindingType::kFiltering:
+            out = wgpu::SamplerBindingType::Filtering;
+            return true;
+        case interop::GPUSamplerBindingType::kNonFiltering:
+            out = wgpu::SamplerBindingType::NonFiltering;
+            return true;
+        case interop::GPUSamplerBindingType::kComparison:
+            out = wgpu::SamplerBindingType::Comparison;
+            return true;
     }
+    Napi::Error::New(env, "invalid value for GPUSamplerBindingType").ThrowAsJavaScriptException();
+    return false;
+}
 
-    bool Converter::Convert(wgpu::QueryType& out, const interop::GPUQueryType& in) {
-        out = wgpu::QueryType::Occlusion;
-        switch (in) {
-            case interop::GPUQueryType::kOcclusion:
-                out = wgpu::QueryType::Occlusion;
-                return true;
-            case interop::GPUQueryType::kTimestamp:
-                out = wgpu::QueryType::Timestamp;
-                return true;
-        }
-        Napi::Error::New(env, "invalid value for GPUQueryType").ThrowAsJavaScriptException();
-        return false;
+bool Converter::Convert(wgpu::StorageTextureAccess& out,
+                        const interop::GPUStorageTextureAccess& in) {
+    out = wgpu::StorageTextureAccess::Undefined;
+    switch (in) {
+        case interop::GPUStorageTextureAccess::kWriteOnly:
+            out = wgpu::StorageTextureAccess::WriteOnly;
+            return true;
     }
+    Napi::Error::New(env, "invalid value for GPUStorageTextureAccess").ThrowAsJavaScriptException();
+    return false;
+}
 
-    bool Converter::Convert(wgpu::AddressMode& out, const interop::GPUAddressMode& in) {
-        out = wgpu::AddressMode::Repeat;
-        switch (in) {
-            case interop::GPUAddressMode::kClampToEdge:
-                out = wgpu::AddressMode::ClampToEdge;
-                return true;
-            case interop::GPUAddressMode::kRepeat:
-                out = wgpu::AddressMode::Repeat;
-                return true;
-            case interop::GPUAddressMode::kMirrorRepeat:
-                out = wgpu::AddressMode::MirrorRepeat;
-                return true;
-        }
-        Napi::Error::New(env, "invalid value for GPUAddressMode").ThrowAsJavaScriptException();
-        return false;
+bool Converter::Convert(wgpu::QueryType& out, const interop::GPUQueryType& in) {
+    out = wgpu::QueryType::Occlusion;
+    switch (in) {
+        case interop::GPUQueryType::kOcclusion:
+            out = wgpu::QueryType::Occlusion;
+            return true;
+        case interop::GPUQueryType::kTimestamp:
+            out = wgpu::QueryType::Timestamp;
+            return true;
     }
+    Napi::Error::New(env, "invalid value for GPUQueryType").ThrowAsJavaScriptException();
+    return false;
+}
 
-    bool Converter::Convert(wgpu::FilterMode& out, const interop::GPUFilterMode& in) {
-        out = wgpu::FilterMode::Nearest;
-        switch (in) {
-            case interop::GPUFilterMode::kNearest:
-                out = wgpu::FilterMode::Nearest;
-                return true;
-            case interop::GPUFilterMode::kLinear:
-                out = wgpu::FilterMode::Linear;
-                return true;
-        }
-        Napi::Error::New(env, "invalid value for GPUFilterMode").ThrowAsJavaScriptException();
-        return false;
+bool Converter::Convert(wgpu::AddressMode& out, const interop::GPUAddressMode& in) {
+    out = wgpu::AddressMode::Repeat;
+    switch (in) {
+        case interop::GPUAddressMode::kClampToEdge:
+            out = wgpu::AddressMode::ClampToEdge;
+            return true;
+        case interop::GPUAddressMode::kRepeat:
+            out = wgpu::AddressMode::Repeat;
+            return true;
+        case interop::GPUAddressMode::kMirrorRepeat:
+            out = wgpu::AddressMode::MirrorRepeat;
+            return true;
     }
+    Napi::Error::New(env, "invalid value for GPUAddressMode").ThrowAsJavaScriptException();
+    return false;
+}
 
-    bool Converter::Convert(wgpu::ComputePipelineDescriptor& out,
-                            const interop::GPUComputePipelineDescriptor& in) {
-        return Convert(out.label, in.label) &&    //
-               Convert(out.layout, in.layout) &&  //
-               Convert(out.compute, in.compute);
+bool Converter::Convert(wgpu::FilterMode& out, const interop::GPUFilterMode& in) {
+    out = wgpu::FilterMode::Nearest;
+    switch (in) {
+        case interop::GPUFilterMode::kNearest:
+            out = wgpu::FilterMode::Nearest;
+            return true;
+        case interop::GPUFilterMode::kLinear:
+            out = wgpu::FilterMode::Linear;
+            return true;
     }
+    Napi::Error::New(env, "invalid value for GPUFilterMode").ThrowAsJavaScriptException();
+    return false;
+}
 
-    bool Converter::Convert(wgpu::RenderPipelineDescriptor& out,
-                            const interop::GPURenderPipelineDescriptor& in) {
-        wgpu::RenderPipelineDescriptor desc{};
-        return Convert(out.label, in.label) &&                //
-               Convert(out.layout, in.layout) &&              //
-               Convert(out.vertex, in.vertex) &&              //
-               Convert(out.primitive, in.primitive) &&        //
-               Convert(out.depthStencil, in.depthStencil) &&  //
-               Convert(out.multisample, in.multisample) &&    //
-               Convert(out.fragment, in.fragment);
-    }
+bool Converter::Convert(wgpu::ComputePipelineDescriptor& out,
+                        const interop::GPUComputePipelineDescriptor& in) {
+    return Convert(out.label, in.label) &&    //
+           Convert(out.layout, in.layout) &&  //
+           Convert(out.compute, in.compute);
+}
+
+bool Converter::Convert(wgpu::RenderPipelineDescriptor& out,
+                        const interop::GPURenderPipelineDescriptor& in) {
+    wgpu::RenderPipelineDescriptor desc{};
+    return Convert(out.label, in.label) &&                //
+           Convert(out.layout, in.layout) &&              //
+           Convert(out.vertex, in.vertex) &&              //
+           Convert(out.primitive, in.primitive) &&        //
+           Convert(out.depthStencil, in.depthStencil) &&  //
+           Convert(out.multisample, in.multisample) &&    //
+           Convert(out.fragment, in.fragment);
+}
 
 }  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/Converter.h b/src/dawn/node/binding/Converter.h
index 2f08b1c..2e46f0d 100644
--- a/src/dawn/node/binding/Converter.h
+++ b/src/dawn/node/binding/Converter.h
@@ -30,13 +30,13 @@
 
 namespace wgpu::binding {
 
-    // ImplOfTraits is a traits helper that is used to associate the interop interface type to the
-    // binding implementation type.
-    template <typename T>
-    struct ImplOfTraits {};
+// ImplOfTraits is a traits helper that is used to associate the interop interface type to the
+// binding implementation type.
+template <typename T>
+struct ImplOfTraits {};
 
-    // DECLARE_IMPL() is a macro that declares a specialization of ImplOfTraits so that
-    // `typename ImplOfTraits<interop::NAME>::type` is equivalent to `binding::NAME`.
+// DECLARE_IMPL() is a macro that declares a specialization of ImplOfTraits so that
+// `typename ImplOfTraits<interop::NAME>::type` is equivalent to `binding::NAME`.
 #define DECLARE_IMPL(NAME)               \
     class NAME;                          \
     template <>                          \
@@ -44,371 +44,356 @@
         using type = binding::NAME;      \
     }
 
-    // Declare the interop interface to binding implementations
-    DECLARE_IMPL(GPUBindGroup);
-    DECLARE_IMPL(GPUBindGroupLayout);
-    DECLARE_IMPL(GPUBuffer);
-    DECLARE_IMPL(GPUPipelineLayout);
-    DECLARE_IMPL(GPUQuerySet);
-    DECLARE_IMPL(GPURenderBundle);
-    DECLARE_IMPL(GPURenderPipeline);
-    DECLARE_IMPL(GPUSampler);
-    DECLARE_IMPL(GPUShaderModule);
-    DECLARE_IMPL(GPUTexture);
-    DECLARE_IMPL(GPUTextureView);
+// Declare the interop interface to binding implementations
+DECLARE_IMPL(GPUBindGroup);
+DECLARE_IMPL(GPUBindGroupLayout);
+DECLARE_IMPL(GPUBuffer);
+DECLARE_IMPL(GPUPipelineLayout);
+DECLARE_IMPL(GPUQuerySet);
+DECLARE_IMPL(GPURenderBundle);
+DECLARE_IMPL(GPURenderPipeline);
+DECLARE_IMPL(GPUSampler);
+DECLARE_IMPL(GPUShaderModule);
+DECLARE_IMPL(GPUTexture);
+DECLARE_IMPL(GPUTextureView);
 #undef DECLARE_IMPL
 
-    // Helper for obtaining the binding implementation type from the interop interface type
-    template <typename T>
-    using ImplOf = typename ImplOfTraits<T>::type;
+// Helper for obtaining the binding implementation type from the interop interface type
+template <typename T>
+using ImplOf = typename ImplOfTraits<T>::type;
 
-    // Converter is a utility class for converting IDL generated interop types into Dawn types.
-    // As the Dawn C++ API uses raw C pointers for a number of its interfaces, Converter performs
-    // heap allocations for conversions of vector or optional types. These pointers are
-    // automatically freed when the Converter is destructed.
-    class Converter {
-      public:
-        explicit Converter(Napi::Env e) : env(e) {
-        }
-        ~Converter();
+// Converter is a utility class for converting IDL generated interop types into Dawn types.
+// As the Dawn C++ API uses raw C pointers for a number of its interfaces, Converter performs
+// heap allocations for conversions of vector or optional types. These pointers are
+// automatically freed when the Converter is destructed.
+class Converter {
+  public:
+    explicit Converter(Napi::Env e) : env(e) {}
+    ~Converter();
 
-        // Conversion function. Converts the interop type IN to the Dawn type OUT.
-        // Returns true on success, false on failure.
-        template <typename OUT, typename IN>
-        [[nodiscard]] inline bool operator()(OUT&& out, IN&& in) {
-            return Convert(std::forward<OUT>(out), std::forward<IN>(in));
-        }
+    // Conversion function. Converts the interop type IN to the Dawn type OUT.
+    // Returns true on success, false on failure.
+    template <typename OUT, typename IN>
+    [[nodiscard]] inline bool operator()(OUT&& out, IN&& in) {
+        return Convert(std::forward<OUT>(out), std::forward<IN>(in));
+    }
 
-        // Vector conversion function. Converts the vector of interop type IN to a pointer of
-        // elements of Dawn type OUT, which is assigned to 'out_els'.
-        // out_count is assigned the number of elements in 'in'.
-        // Returns true on success, false on failure.
-        // The pointer assigned to 'out_els' is valid until the Converter is destructed.
-        template <typename OUT, typename IN>
-        [[nodiscard]] inline bool operator()(OUT*& out_els,
-                                             uint32_t& out_count,
-                                             const std::vector<IN>& in) {
-            return Convert(out_els, out_count, in);
-        }
+    // Vector conversion function. Converts the vector of interop type IN to a pointer of
+    // elements of Dawn type OUT, which is assigned to 'out_els'.
+    // out_count is assigned the number of elements in 'in'.
+    // Returns true on success, false on failure.
+    // The pointer assigned to 'out_els' is valid until the Converter is destructed.
+    template <typename OUT, typename IN>
+    [[nodiscard]] inline bool operator()(OUT*& out_els,
+                                         uint32_t& out_count,
+                                         const std::vector<IN>& in) {
+        return Convert(out_els, out_count, in);
+    }
 
-        // Returns the Env that this Converter was constructed with.
-        inline Napi::Env Env() const {
-            return env;
-        }
+    // Returns the Env that this Converter was constructed with.
+    inline Napi::Env Env() const { return env; }
 
-        // BufferSource is the converted type of interop::BufferSource.
-        struct BufferSource {
-            void* data;
-            size_t size;             // in bytes
-            size_t bytesPerElement;  // 1 for ArrayBuffers
-        };
-
-      private:
-        // Below are the various overloads of Convert() used to convert the interop -> Dawn types.
-        [[nodiscard]] bool Convert(wgpu::Extent3D& out, const interop::GPUExtent3D& in);
-
-        [[nodiscard]] bool Convert(wgpu::Origin3D& out, const interop::GPUOrigin3DDict& in);
-
-        [[nodiscard]] bool Convert(wgpu::Color& out, const interop::GPUColor& in);
-
-        [[nodiscard]] bool Convert(wgpu::Origin3D& out,
-                                   const std::vector<interop::GPUIntegerCoordinate>& in);
-
-        [[nodiscard]] bool Convert(wgpu::TextureAspect& out, const interop::GPUTextureAspect& in);
-
-        [[nodiscard]] bool Convert(wgpu::ImageCopyTexture& out,
-                                   const interop::GPUImageCopyTexture& in);
-
-        [[nodiscard]] bool Convert(wgpu::ImageCopyBuffer& out,
-                                   const interop::GPUImageCopyBuffer& in);
-
-        [[nodiscard]] bool Convert(BufferSource& out, interop::BufferSource in);
-
-        [[nodiscard]] bool Convert(wgpu::TextureDataLayout& out,
-                                   const interop::GPUImageDataLayout& in);
-
-        [[nodiscard]] bool Convert(wgpu::TextureFormat& out, const interop::GPUTextureFormat& in);
-
-        [[nodiscard]] bool Convert(wgpu::TextureUsage& out,
-                                   const interop::GPUTextureUsageFlags& in);
-
-        [[nodiscard]] bool Convert(wgpu::ColorWriteMask& out,
-                                   const interop::GPUColorWriteFlags& in);
-
-        [[nodiscard]] bool Convert(wgpu::BufferUsage& out, const interop::GPUBufferUsageFlags& in);
-
-        [[nodiscard]] bool Convert(wgpu::MapMode& out, const interop::GPUMapModeFlags& in);
-
-        [[nodiscard]] bool Convert(wgpu::ShaderStage& out, const interop::GPUShaderStageFlags& in);
-
-        [[nodiscard]] bool Convert(wgpu::TextureDimension& out,
-                                   const interop::GPUTextureDimension& in);
-
-        [[nodiscard]] bool Convert(wgpu::TextureViewDimension& out,
-                                   const interop::GPUTextureViewDimension& in);
-
-        [[nodiscard]] bool Convert(wgpu::ProgrammableStageDescriptor& out,
-                                   const interop::GPUProgrammableStage& in);
-
-        [[nodiscard]] bool Convert(wgpu::ConstantEntry& out,
-                                   const std::string& in_name,
-                                   wgpu::interop::GPUPipelineConstantValue in_value);
-
-        [[nodiscard]] bool Convert(wgpu::BlendComponent& out, const interop::GPUBlendComponent& in);
-
-        [[nodiscard]] bool Convert(wgpu::BlendFactor& out, const interop::GPUBlendFactor& in);
-
-        [[nodiscard]] bool Convert(wgpu::BlendOperation& out, const interop::GPUBlendOperation& in);
-
-        [[nodiscard]] bool Convert(wgpu::BlendState& out, const interop::GPUBlendState& in);
-
-        [[nodiscard]] bool Convert(wgpu::PrimitiveState& out, const interop::GPUPrimitiveState& in);
-
-        [[nodiscard]] bool Convert(wgpu::ColorTargetState& out,
-                                   const interop::GPUColorTargetState& in);
-
-        [[nodiscard]] bool Convert(wgpu::DepthStencilState& out,
-                                   const interop::GPUDepthStencilState& in);
-
-        [[nodiscard]] bool Convert(wgpu::MultisampleState& out,
-                                   const interop::GPUMultisampleState& in);
-
-        [[nodiscard]] bool Convert(wgpu::FragmentState& out, const interop::GPUFragmentState& in);
-
-        [[nodiscard]] bool Convert(wgpu::PrimitiveTopology& out,
-                                   const interop::GPUPrimitiveTopology& in);
-
-        [[nodiscard]] bool Convert(wgpu::FrontFace& out, const interop::GPUFrontFace& in);
-
-        [[nodiscard]] bool Convert(wgpu::CullMode& out, const interop::GPUCullMode& in);
-
-        [[nodiscard]] bool Convert(wgpu::CompareFunction& out,
-                                   const interop::GPUCompareFunction& in);
-
-        [[nodiscard]] bool Convert(wgpu::IndexFormat& out, const interop::GPUIndexFormat& in);
-
-        [[nodiscard]] bool Convert(wgpu::StencilOperation& out,
-                                   const interop::GPUStencilOperation& in);
-
-        [[nodiscard]] bool Convert(wgpu::StencilFaceState& out,
-                                   const interop::GPUStencilFaceState& in);
-
-        [[nodiscard]] bool Convert(wgpu::VertexState& out, const interop::GPUVertexState& in);
-
-        [[nodiscard]] bool Convert(wgpu::VertexBufferLayout& out,
-                                   const interop::GPUVertexBufferLayout& in);
-
-        [[nodiscard]] bool Convert(wgpu::VertexStepMode& out, const interop::GPUVertexStepMode& in);
-
-        [[nodiscard]] bool Convert(wgpu::VertexAttribute& out,
-                                   const interop::GPUVertexAttribute& in);
-
-        [[nodiscard]] bool Convert(wgpu::VertexFormat& out, const interop::GPUVertexFormat& in);
-
-        [[nodiscard]] bool Convert(wgpu::RenderPassColorAttachment& out,
-                                   const interop::GPURenderPassColorAttachment& in);
-
-        [[nodiscard]] bool Convert(wgpu::RenderPassDepthStencilAttachment& out,
-                                   const interop::GPURenderPassDepthStencilAttachment& in);
-
-        [[nodiscard]] bool Convert(wgpu::LoadOp& out, const interop::GPULoadOp& in);
-
-        [[nodiscard]] bool Convert(wgpu::StoreOp& out, const interop::GPUStoreOp& in);
-
-        [[nodiscard]] bool Convert(wgpu::BindGroupEntry& out, const interop::GPUBindGroupEntry& in);
-
-        [[nodiscard]] bool Convert(wgpu::BindGroupLayoutEntry& out,
-                                   const interop::GPUBindGroupLayoutEntry& in);
-
-        [[nodiscard]] bool Convert(wgpu::BufferBindingLayout& out,
-                                   const interop::GPUBufferBindingLayout& in);
-
-        [[nodiscard]] bool Convert(wgpu::SamplerBindingLayout& out,
-                                   const interop::GPUSamplerBindingLayout& in);
-
-        [[nodiscard]] bool Convert(wgpu::TextureBindingLayout& out,
-                                   const interop::GPUTextureBindingLayout& in);
-
-        [[nodiscard]] bool Convert(wgpu::StorageTextureBindingLayout& out,
-                                   const interop::GPUStorageTextureBindingLayout& in);
-
-        [[nodiscard]] bool Convert(wgpu::BufferBindingType& out,
-                                   const interop::GPUBufferBindingType& in);
-
-        [[nodiscard]] bool Convert(wgpu::SamplerBindingType& out,
-                                   const interop::GPUSamplerBindingType& in);
-
-        [[nodiscard]] bool Convert(wgpu::TextureSampleType& out,
-                                   const interop::GPUTextureSampleType& in);
-
-        [[nodiscard]] bool Convert(wgpu::StorageTextureAccess& out,
-                                   const interop::GPUStorageTextureAccess& in);
-
-        [[nodiscard]] bool Convert(wgpu::QueryType& out, const interop::GPUQueryType& in);
-
-        [[nodiscard]] bool Convert(wgpu::AddressMode& out, const interop::GPUAddressMode& in);
-
-        [[nodiscard]] bool Convert(wgpu::FilterMode& out, const interop::GPUFilterMode& in);
-
-        [[nodiscard]] bool Convert(wgpu::ComputePipelineDescriptor& out,
-                                   const interop::GPUComputePipelineDescriptor& in);
-
-        [[nodiscard]] bool Convert(wgpu::RenderPipelineDescriptor& out,
-                                   const interop::GPURenderPipelineDescriptor& in);
-
-        // std::string to C string
-        inline bool Convert(const char*& out, const std::string& in) {
-            out = in.c_str();
-            return true;
-        }
-
-        // Pass-through (no conversion)
-        template <typename T>
-        inline bool Convert(T& out, const T& in) {
-            out = in;
-            return true;
-        }
-
-        // Integral number conversion, with dynamic limit checking
-        template <typename OUT,
-                  typename IN,
-                  typename = std::enable_if_t<std::is_integral_v<IN> && std::is_integral_v<OUT>>>
-        inline bool Convert(OUT& out, const IN& in) {
-            out = static_cast<OUT>(in);
-            if (static_cast<IN>(out) != in) {
-                Napi::Error::New(env, "Integer value (" + std::to_string(in) +
-                                          ") cannot be converted to the Dawn data type without "
-                                          "truncation of the value")
-                    .ThrowAsJavaScriptException();
-                return false;
-            }
-            return true;
-        }
-
-        // ClampedInteger<T>
-        template <typename T>
-        inline bool Convert(T& out, const interop::ClampedInteger<T>& in) {
-            out = in;
-            return true;
-        }
-
-        // EnforceRangeInteger<T>
-        template <typename T>
-        inline bool Convert(T& out, const interop::EnforceRangeInteger<T>& in) {
-            out = in;
-            return true;
-        }
-
-        template <typename OUT, typename... IN_TYPES>
-        inline bool Convert(OUT& out, const std::variant<IN_TYPES...>& in) {
-            return std::visit([&](auto&& i) { return Convert(out, i); }, in);
-        }
-
-        // If the std::optional does not have a value, then Convert() simply returns true and 'out'
-        // is not assigned a new value.
-        template <typename OUT, typename IN>
-        inline bool Convert(OUT& out, const std::optional<IN>& in) {
-            if (in.has_value()) {
-                return Convert(out, in.value());
-            }
-            return true;
-        }
-
-        // std::optional -> T*
-        // OUT* is assigned either a pointer to the converted value, or nullptr, depending on
-        // whether 'in' has a value.
-        template <typename OUT,
-                  typename IN,
-                  typename _ = std::enable_if_t<!std::is_same_v<IN, std::string>>>
-        inline bool Convert(OUT*& out, const std::optional<IN>& in) {
-            if (in.has_value()) {
-                auto* el = Allocate<std::remove_const_t<OUT>>();
-                if (!Convert(*el, in.value())) {
-                    return false;
-                }
-                out = el;
-            } else {
-                out = nullptr;
-            }
-            return true;
-        }
-
-        // interop::Interface -> Dawn object
-        template <typename OUT, typename IN>
-        inline bool Convert(OUT& out, const interop::Interface<IN>& in) {
-            using Impl = ImplOf<IN>;
-            out = *in.template As<Impl>();
-            if (!out) {
-                LOG("Dawn object has been destroyed. This should not happen");
-                return false;
-            }
-            return true;
-        }
-
-        // vector -> raw pointer + count
-        template <typename OUT, typename IN>
-        inline bool Convert(OUT*& out_els, uint32_t& out_count, const std::vector<IN>& in) {
-            if (in.size() == 0) {
-                out_els = nullptr;
-                out_count = 0;
-                return true;
-            }
-            auto* els = Allocate<std::remove_const_t<OUT>>(in.size());
-            for (size_t i = 0; i < in.size(); i++) {
-                if (!Convert(els[i], in[i])) {
-                    return false;
-                }
-            }
-            out_els = els;
-            return Convert(out_count, in.size());
-        }
-
-        // unordered_map -> raw pointer + count
-        template <typename OUT, typename IN_KEY, typename IN_VALUE>
-        inline bool Convert(OUT*& out_els,
-                            uint32_t& out_count,
-                            const std::unordered_map<IN_KEY, IN_VALUE>& in) {
-            if (in.size() == 0) {
-                out_els = nullptr;
-                out_count = 0;
-                return true;
-            }
-            auto* els = Allocate<std::remove_const_t<OUT>>(in.size());
-            size_t i = 0;
-            for (auto& [key, value] : in) {
-                if (!Convert(els[i++], key, value)) {
-                    return false;
-                }
-            }
-            out_els = els;
-            return Convert(out_count, in.size());
-        }
-
-        // std::optional<T> -> raw pointer + count
-        template <typename OUT, typename IN>
-        inline bool Convert(OUT*& out_els, uint32_t& out_count, const std::optional<IN>& in) {
-            if (!in.has_value()) {
-                out_els = nullptr;
-                out_count = 0;
-                return true;
-            }
-            return Convert(out_els, out_count, in.value());
-        }
-
-        Napi::Env env;
-
-        // Allocate() allocates and constructs an array of 'n' elements, and returns a pointer to
-        // the first element. The array is freed when the Converter is destructed.
-        template <typename T>
-        T* Allocate(size_t n = 1) {
-            auto* ptr = new T[n]{};
-            free_.emplace_back([ptr] { delete[] ptr; });
-            return ptr;
-        }
-
-        std::vector<std::function<void()>> free_;
+    // BufferSource is the converted type of interop::BufferSource.
+    struct BufferSource {
+        void* data;
+        size_t size;             // in bytes
+        size_t bytesPerElement;  // 1 for ArrayBuffers
     };
 
+  private:
+    // Below are the various overloads of Convert() used to convert the interop -> Dawn types.
+    [[nodiscard]] bool Convert(wgpu::Extent3D& out, const interop::GPUExtent3D& in);
+
+    [[nodiscard]] bool Convert(wgpu::Origin3D& out, const interop::GPUOrigin3DDict& in);
+
+    [[nodiscard]] bool Convert(wgpu::Color& out, const interop::GPUColor& in);
+
+    [[nodiscard]] bool Convert(wgpu::Origin3D& out,
+                               const std::vector<interop::GPUIntegerCoordinate>& in);
+
+    [[nodiscard]] bool Convert(wgpu::TextureAspect& out, const interop::GPUTextureAspect& in);
+
+    [[nodiscard]] bool Convert(wgpu::ImageCopyTexture& out, const interop::GPUImageCopyTexture& in);
+
+    [[nodiscard]] bool Convert(wgpu::ImageCopyBuffer& out, const interop::GPUImageCopyBuffer& in);
+
+    [[nodiscard]] bool Convert(BufferSource& out, interop::BufferSource in);
+
+    [[nodiscard]] bool Convert(wgpu::TextureDataLayout& out, const interop::GPUImageDataLayout& in);
+
+    [[nodiscard]] bool Convert(wgpu::TextureFormat& out, const interop::GPUTextureFormat& in);
+
+    [[nodiscard]] bool Convert(wgpu::TextureUsage& out, const interop::GPUTextureUsageFlags& in);
+
+    [[nodiscard]] bool Convert(wgpu::ColorWriteMask& out, const interop::GPUColorWriteFlags& in);
+
+    [[nodiscard]] bool Convert(wgpu::BufferUsage& out, const interop::GPUBufferUsageFlags& in);
+
+    [[nodiscard]] bool Convert(wgpu::MapMode& out, const interop::GPUMapModeFlags& in);
+
+    [[nodiscard]] bool Convert(wgpu::ShaderStage& out, const interop::GPUShaderStageFlags& in);
+
+    [[nodiscard]] bool Convert(wgpu::TextureDimension& out, const interop::GPUTextureDimension& in);
+
+    [[nodiscard]] bool Convert(wgpu::TextureViewDimension& out,
+                               const interop::GPUTextureViewDimension& in);
+
+    [[nodiscard]] bool Convert(wgpu::ProgrammableStageDescriptor& out,
+                               const interop::GPUProgrammableStage& in);
+
+    [[nodiscard]] bool Convert(wgpu::ConstantEntry& out,
+                               const std::string& in_name,
+                               wgpu::interop::GPUPipelineConstantValue in_value);
+
+    [[nodiscard]] bool Convert(wgpu::BlendComponent& out, const interop::GPUBlendComponent& in);
+
+    [[nodiscard]] bool Convert(wgpu::BlendFactor& out, const interop::GPUBlendFactor& in);
+
+    [[nodiscard]] bool Convert(wgpu::BlendOperation& out, const interop::GPUBlendOperation& in);
+
+    [[nodiscard]] bool Convert(wgpu::BlendState& out, const interop::GPUBlendState& in);
+
+    [[nodiscard]] bool Convert(wgpu::PrimitiveState& out, const interop::GPUPrimitiveState& in);
+
+    [[nodiscard]] bool Convert(wgpu::ColorTargetState& out, const interop::GPUColorTargetState& in);
+
+    [[nodiscard]] bool Convert(wgpu::DepthStencilState& out,
+                               const interop::GPUDepthStencilState& in);
+
+    [[nodiscard]] bool Convert(wgpu::MultisampleState& out, const interop::GPUMultisampleState& in);
+
+    [[nodiscard]] bool Convert(wgpu::FragmentState& out, const interop::GPUFragmentState& in);
+
+    [[nodiscard]] bool Convert(wgpu::PrimitiveTopology& out,
+                               const interop::GPUPrimitiveTopology& in);
+
+    [[nodiscard]] bool Convert(wgpu::FrontFace& out, const interop::GPUFrontFace& in);
+
+    [[nodiscard]] bool Convert(wgpu::CullMode& out, const interop::GPUCullMode& in);
+
+    [[nodiscard]] bool Convert(wgpu::CompareFunction& out, const interop::GPUCompareFunction& in);
+
+    [[nodiscard]] bool Convert(wgpu::IndexFormat& out, const interop::GPUIndexFormat& in);
+
+    [[nodiscard]] bool Convert(wgpu::StencilOperation& out, const interop::GPUStencilOperation& in);
+
+    [[nodiscard]] bool Convert(wgpu::StencilFaceState& out, const interop::GPUStencilFaceState& in);
+
+    [[nodiscard]] bool Convert(wgpu::VertexState& out, const interop::GPUVertexState& in);
+
+    [[nodiscard]] bool Convert(wgpu::VertexBufferLayout& out,
+                               const interop::GPUVertexBufferLayout& in);
+
+    [[nodiscard]] bool Convert(wgpu::VertexStepMode& out, const interop::GPUVertexStepMode& in);
+
+    [[nodiscard]] bool Convert(wgpu::VertexAttribute& out, const interop::GPUVertexAttribute& in);
+
+    [[nodiscard]] bool Convert(wgpu::VertexFormat& out, const interop::GPUVertexFormat& in);
+
+    [[nodiscard]] bool Convert(wgpu::RenderPassColorAttachment& out,
+                               const interop::GPURenderPassColorAttachment& in);
+
+    [[nodiscard]] bool Convert(wgpu::RenderPassDepthStencilAttachment& out,
+                               const interop::GPURenderPassDepthStencilAttachment& in);
+
+    [[nodiscard]] bool Convert(wgpu::LoadOp& out, const interop::GPULoadOp& in);
+
+    [[nodiscard]] bool Convert(wgpu::StoreOp& out, const interop::GPUStoreOp& in);
+
+    [[nodiscard]] bool Convert(wgpu::BindGroupEntry& out, const interop::GPUBindGroupEntry& in);
+
+    [[nodiscard]] bool Convert(wgpu::BindGroupLayoutEntry& out,
+                               const interop::GPUBindGroupLayoutEntry& in);
+
+    [[nodiscard]] bool Convert(wgpu::BufferBindingLayout& out,
+                               const interop::GPUBufferBindingLayout& in);
+
+    [[nodiscard]] bool Convert(wgpu::SamplerBindingLayout& out,
+                               const interop::GPUSamplerBindingLayout& in);
+
+    [[nodiscard]] bool Convert(wgpu::TextureBindingLayout& out,
+                               const interop::GPUTextureBindingLayout& in);
+
+    [[nodiscard]] bool Convert(wgpu::StorageTextureBindingLayout& out,
+                               const interop::GPUStorageTextureBindingLayout& in);
+
+    [[nodiscard]] bool Convert(wgpu::BufferBindingType& out,
+                               const interop::GPUBufferBindingType& in);
+
+    [[nodiscard]] bool Convert(wgpu::SamplerBindingType& out,
+                               const interop::GPUSamplerBindingType& in);
+
+    [[nodiscard]] bool Convert(wgpu::TextureSampleType& out,
+                               const interop::GPUTextureSampleType& in);
+
+    [[nodiscard]] bool Convert(wgpu::StorageTextureAccess& out,
+                               const interop::GPUStorageTextureAccess& in);
+
+    [[nodiscard]] bool Convert(wgpu::QueryType& out, const interop::GPUQueryType& in);
+
+    [[nodiscard]] bool Convert(wgpu::AddressMode& out, const interop::GPUAddressMode& in);
+
+    [[nodiscard]] bool Convert(wgpu::FilterMode& out, const interop::GPUFilterMode& in);
+
+    [[nodiscard]] bool Convert(wgpu::ComputePipelineDescriptor& out,
+                               const interop::GPUComputePipelineDescriptor& in);
+
+    [[nodiscard]] bool Convert(wgpu::RenderPipelineDescriptor& out,
+                               const interop::GPURenderPipelineDescriptor& in);
+
+    // std::string to C string
+    inline bool Convert(const char*& out, const std::string& in) {
+        out = in.c_str();
+        return true;
+    }
+
+    // Pass-through (no conversion)
+    template <typename T>
+    inline bool Convert(T& out, const T& in) {
+        out = in;
+        return true;
+    }
+
+    // Integral number conversion, with dynamic limit checking
+    template <typename OUT,
+              typename IN,
+              typename = std::enable_if_t<std::is_integral_v<IN> && std::is_integral_v<OUT>>>
+    inline bool Convert(OUT& out, const IN& in) {
+        out = static_cast<OUT>(in);
+        if (static_cast<IN>(out) != in) {
+            Napi::Error::New(env, "Integer value (" + std::to_string(in) +
+                                      ") cannot be converted to the Dawn data type without "
+                                      "truncation of the value")
+                .ThrowAsJavaScriptException();
+            return false;
+        }
+        return true;
+    }
+
+    // ClampedInteger<T>
+    template <typename T>
+    inline bool Convert(T& out, const interop::ClampedInteger<T>& in) {
+        out = in;
+        return true;
+    }
+
+    // EnforceRangeInteger<T>
+    template <typename T>
+    inline bool Convert(T& out, const interop::EnforceRangeInteger<T>& in) {
+        out = in;
+        return true;
+    }
+
+    template <typename OUT, typename... IN_TYPES>
+    inline bool Convert(OUT& out, const std::variant<IN_TYPES...>& in) {
+        return std::visit([&](auto&& i) { return Convert(out, i); }, in);
+    }
+
+    // If the std::optional does not have a value, then Convert() simply returns true and 'out'
+    // is not assigned a new value.
+    template <typename OUT, typename IN>
+    inline bool Convert(OUT& out, const std::optional<IN>& in) {
+        if (in.has_value()) {
+            return Convert(out, in.value());
+        }
+        return true;
+    }
+
+    // std::optional -> T*
+    // OUT* is assigned either a pointer to the converted value, or nullptr, depending on
+    // whether 'in' has a value.
+    template <typename OUT,
+              typename IN,
+              typename _ = std::enable_if_t<!std::is_same_v<IN, std::string>>>
+    inline bool Convert(OUT*& out, const std::optional<IN>& in) {
+        if (in.has_value()) {
+            auto* el = Allocate<std::remove_const_t<OUT>>();
+            if (!Convert(*el, in.value())) {
+                return false;
+            }
+            out = el;
+        } else {
+            out = nullptr;
+        }
+        return true;
+    }
+
+    // interop::Interface -> Dawn object
+    template <typename OUT, typename IN>
+    inline bool Convert(OUT& out, const interop::Interface<IN>& in) {
+        using Impl = ImplOf<IN>;
+        out = *in.template As<Impl>();
+        if (!out) {
+            LOG("Dawn object has been destroyed. This should not happen");
+            return false;
+        }
+        return true;
+    }
+
+    // vector -> raw pointer + count
+    template <typename OUT, typename IN>
+    inline bool Convert(OUT*& out_els, uint32_t& out_count, const std::vector<IN>& in) {
+        if (in.size() == 0) {
+            out_els = nullptr;
+            out_count = 0;
+            return true;
+        }
+        auto* els = Allocate<std::remove_const_t<OUT>>(in.size());
+        for (size_t i = 0; i < in.size(); i++) {
+            if (!Convert(els[i], in[i])) {
+                return false;
+            }
+        }
+        out_els = els;
+        return Convert(out_count, in.size());
+    }
+
+    // unordered_map -> raw pointer + count
+    template <typename OUT, typename IN_KEY, typename IN_VALUE>
+    inline bool Convert(OUT*& out_els,
+                        uint32_t& out_count,
+                        const std::unordered_map<IN_KEY, IN_VALUE>& in) {
+        if (in.size() == 0) {
+            out_els = nullptr;
+            out_count = 0;
+            return true;
+        }
+        auto* els = Allocate<std::remove_const_t<OUT>>(in.size());
+        size_t i = 0;
+        for (auto& [key, value] : in) {
+            if (!Convert(els[i++], key, value)) {
+                return false;
+            }
+        }
+        out_els = els;
+        return Convert(out_count, in.size());
+    }
+
+    // std::optional<T> -> raw pointer + count
+    template <typename OUT, typename IN>
+    inline bool Convert(OUT*& out_els, uint32_t& out_count, const std::optional<IN>& in) {
+        if (!in.has_value()) {
+            out_els = nullptr;
+            out_count = 0;
+            return true;
+        }
+        return Convert(out_els, out_count, in.value());
+    }
+
+    Napi::Env env;
+
+    // Allocate() allocates and constructs an array of 'n' elements, and returns a pointer to
+    // the first element. The array is freed when the Converter is destructed.
+    template <typename T>
+    T* Allocate(size_t n = 1) {
+        auto* ptr = new T[n]{};
+        free_.emplace_back([ptr] { delete[] ptr; });
+        return ptr;
+    }
+
+    std::vector<std::function<void()>> free_;
+};
+
 }  // namespace wgpu::binding
 
 #endif  // SRC_DAWN_NODE_BINDING_CONVERTER_H_
diff --git a/src/dawn/node/binding/Errors.cpp b/src/dawn/node/binding/Errors.cpp
index fd13ba6..4efa489 100644
--- a/src/dawn/node/binding/Errors.cpp
+++ b/src/dawn/node/binding/Errors.cpp
@@ -16,164 +16,161 @@
 
 namespace wgpu::binding {
 
-    namespace {
-        constexpr char kHierarchyRequestError[] = "HierarchyRequestError";
-        constexpr char kWrongDocumentError[] = "WrongDocumentError";
-        constexpr char kInvalidCharacterError[] = "InvalidCharacterError";
-        constexpr char kNoModificationAllowedError[] = "NoModificationAllowedError";
-        constexpr char kNotFoundError[] = "NotFoundError";
-        constexpr char kNotSupportedError[] = "NotSupportedError";
-        constexpr char kInUseAttributeError[] = "InUseAttributeError";
-        constexpr char kInvalidStateError[] = "InvalidStateError";
-        constexpr char kSyntaxError[] = "SyntaxError";
-        constexpr char kInvalidModificationError[] = "InvalidModificationError";
-        constexpr char kNamespaceError[] = "NamespaceError";
-        constexpr char kSecurityError[] = "SecurityError";
-        constexpr char kNetworkError[] = "NetworkError";
-        constexpr char kAbortError[] = "AbortError";
-        constexpr char kURLMismatchError[] = "URLMismatchError";
-        constexpr char kQuotaExceededError[] = "QuotaExceededError";
-        constexpr char kTimeoutError[] = "TimeoutError";
-        constexpr char kInvalidNodeTypeError[] = "InvalidNodeTypeError";
-        constexpr char kDataCloneError[] = "DataCloneError";
-        constexpr char kEncodingError[] = "EncodingError";
-        constexpr char kNotReadableError[] = "NotReadableError";
-        constexpr char kUnknownError[] = "UnknownError";
-        constexpr char kConstraintError[] = "ConstraintError";
-        constexpr char kDataError[] = "DataError";
-        constexpr char kTransactionInactiveError[] = "TransactionInactiveError";
-        constexpr char kReadOnlyError[] = "ReadOnlyError";
-        constexpr char kVersionError[] = "VersionError";
-        constexpr char kOperationError[] = "OperationError";
-        constexpr char kNotAllowedError[] = "NotAllowedError";
+namespace {
+constexpr char kHierarchyRequestError[] = "HierarchyRequestError";
+constexpr char kWrongDocumentError[] = "WrongDocumentError";
+constexpr char kInvalidCharacterError[] = "InvalidCharacterError";
+constexpr char kNoModificationAllowedError[] = "NoModificationAllowedError";
+constexpr char kNotFoundError[] = "NotFoundError";
+constexpr char kNotSupportedError[] = "NotSupportedError";
+constexpr char kInUseAttributeError[] = "InUseAttributeError";
+constexpr char kInvalidStateError[] = "InvalidStateError";
+constexpr char kSyntaxError[] = "SyntaxError";
+constexpr char kInvalidModificationError[] = "InvalidModificationError";
+constexpr char kNamespaceError[] = "NamespaceError";
+constexpr char kSecurityError[] = "SecurityError";
+constexpr char kNetworkError[] = "NetworkError";
+constexpr char kAbortError[] = "AbortError";
+constexpr char kURLMismatchError[] = "URLMismatchError";
+constexpr char kQuotaExceededError[] = "QuotaExceededError";
+constexpr char kTimeoutError[] = "TimeoutError";
+constexpr char kInvalidNodeTypeError[] = "InvalidNodeTypeError";
+constexpr char kDataCloneError[] = "DataCloneError";
+constexpr char kEncodingError[] = "EncodingError";
+constexpr char kNotReadableError[] = "NotReadableError";
+constexpr char kUnknownError[] = "UnknownError";
+constexpr char kConstraintError[] = "ConstraintError";
+constexpr char kDataError[] = "DataError";
+constexpr char kTransactionInactiveError[] = "TransactionInactiveError";
+constexpr char kReadOnlyError[] = "ReadOnlyError";
+constexpr char kVersionError[] = "VersionError";
+constexpr char kOperationError[] = "OperationError";
+constexpr char kNotAllowedError[] = "NotAllowedError";
 
-        static Napi::Error New(Napi::Env env,
-                               std::string name,
-                               std::string message,
-                               uint16_t code = 0) {
-            auto err = Napi::Error::New(env);
-            err.Set("name", name);
-            err.Set("message", message.empty() ? name : message);
-            err.Set("code", static_cast<double>(code));
-            return err;
-        }
+static Napi::Error New(Napi::Env env, std::string name, std::string message, uint16_t code = 0) {
+    auto err = Napi::Error::New(env);
+    err.Set("name", name);
+    err.Set("message", message.empty() ? name : message);
+    err.Set("code", static_cast<double>(code));
+    return err;
+}
 
-    }  // namespace
+}  // namespace
 
-    Napi::Error Errors::HierarchyRequestError(Napi::Env env, std::string message) {
-        return New(env, kHierarchyRequestError, message);
-    }
+Napi::Error Errors::HierarchyRequestError(Napi::Env env, std::string message) {
+    return New(env, kHierarchyRequestError, message);
+}
 
-    Napi::Error Errors::WrongDocumentError(Napi::Env env, std::string message) {
-        return New(env, kWrongDocumentError, message);
-    }
+Napi::Error Errors::WrongDocumentError(Napi::Env env, std::string message) {
+    return New(env, kWrongDocumentError, message);
+}
 
-    Napi::Error Errors::InvalidCharacterError(Napi::Env env, std::string message) {
-        return New(env, kInvalidCharacterError, message);
-    }
+Napi::Error Errors::InvalidCharacterError(Napi::Env env, std::string message) {
+    return New(env, kInvalidCharacterError, message);
+}
 
-    Napi::Error Errors::NoModificationAllowedError(Napi::Env env, std::string message) {
-        return New(env, kNoModificationAllowedError, message);
-    }
+Napi::Error Errors::NoModificationAllowedError(Napi::Env env, std::string message) {
+    return New(env, kNoModificationAllowedError, message);
+}
 
-    Napi::Error Errors::NotFoundError(Napi::Env env, std::string message) {
-        return New(env, kNotFoundError, message);
-    }
+Napi::Error Errors::NotFoundError(Napi::Env env, std::string message) {
+    return New(env, kNotFoundError, message);
+}
 
-    Napi::Error Errors::NotSupportedError(Napi::Env env, std::string message) {
-        return New(env, kNotSupportedError, message);
-    }
+Napi::Error Errors::NotSupportedError(Napi::Env env, std::string message) {
+    return New(env, kNotSupportedError, message);
+}
 
-    Napi::Error Errors::InUseAttributeError(Napi::Env env, std::string message) {
-        return New(env, kInUseAttributeError, message);
-    }
+Napi::Error Errors::InUseAttributeError(Napi::Env env, std::string message) {
+    return New(env, kInUseAttributeError, message);
+}
 
-    Napi::Error Errors::InvalidStateError(Napi::Env env, std::string message) {
-        return New(env, kInvalidStateError, message);
-    }
+Napi::Error Errors::InvalidStateError(Napi::Env env, std::string message) {
+    return New(env, kInvalidStateError, message);
+}
 
-    Napi::Error Errors::SyntaxError(Napi::Env env, std::string message) {
-        return New(env, kSyntaxError, message);
-    }
+Napi::Error Errors::SyntaxError(Napi::Env env, std::string message) {
+    return New(env, kSyntaxError, message);
+}
 
-    Napi::Error Errors::InvalidModificationError(Napi::Env env, std::string message) {
-        return New(env, kInvalidModificationError, message);
-    }
+Napi::Error Errors::InvalidModificationError(Napi::Env env, std::string message) {
+    return New(env, kInvalidModificationError, message);
+}
 
-    Napi::Error Errors::NamespaceError(Napi::Env env, std::string message) {
-        return New(env, kNamespaceError, message);
-    }
+Napi::Error Errors::NamespaceError(Napi::Env env, std::string message) {
+    return New(env, kNamespaceError, message);
+}
 
-    Napi::Error Errors::SecurityError(Napi::Env env, std::string message) {
-        return New(env, kSecurityError, message);
-    }
+Napi::Error Errors::SecurityError(Napi::Env env, std::string message) {
+    return New(env, kSecurityError, message);
+}
 
-    Napi::Error Errors::NetworkError(Napi::Env env, std::string message) {
-        return New(env, kNetworkError, message);
-    }
+Napi::Error Errors::NetworkError(Napi::Env env, std::string message) {
+    return New(env, kNetworkError, message);
+}
 
-    Napi::Error Errors::AbortError(Napi::Env env, std::string message) {
-        return New(env, kAbortError, message);
-    }
+Napi::Error Errors::AbortError(Napi::Env env, std::string message) {
+    return New(env, kAbortError, message);
+}
 
-    Napi::Error Errors::URLMismatchError(Napi::Env env, std::string message) {
-        return New(env, kURLMismatchError, message);
-    }
+Napi::Error Errors::URLMismatchError(Napi::Env env, std::string message) {
+    return New(env, kURLMismatchError, message);
+}
 
-    Napi::Error Errors::QuotaExceededError(Napi::Env env, std::string message) {
-        return New(env, kQuotaExceededError, message);
-    }
+Napi::Error Errors::QuotaExceededError(Napi::Env env, std::string message) {
+    return New(env, kQuotaExceededError, message);
+}
 
-    Napi::Error Errors::TimeoutError(Napi::Env env, std::string message) {
-        return New(env, kTimeoutError, message);
-    }
+Napi::Error Errors::TimeoutError(Napi::Env env, std::string message) {
+    return New(env, kTimeoutError, message);
+}
 
-    Napi::Error Errors::InvalidNodeTypeError(Napi::Env env, std::string message) {
-        return New(env, kInvalidNodeTypeError, message);
-    }
+Napi::Error Errors::InvalidNodeTypeError(Napi::Env env, std::string message) {
+    return New(env, kInvalidNodeTypeError, message);
+}
 
-    Napi::Error Errors::DataCloneError(Napi::Env env, std::string message) {
-        return New(env, kDataCloneError, message);
-    }
+Napi::Error Errors::DataCloneError(Napi::Env env, std::string message) {
+    return New(env, kDataCloneError, message);
+}
 
-    Napi::Error Errors::EncodingError(Napi::Env env, std::string message) {
-        return New(env, kEncodingError, message);
-    }
+Napi::Error Errors::EncodingError(Napi::Env env, std::string message) {
+    return New(env, kEncodingError, message);
+}
 
-    Napi::Error Errors::NotReadableError(Napi::Env env, std::string message) {
-        return New(env, kNotReadableError, message);
-    }
+Napi::Error Errors::NotReadableError(Napi::Env env, std::string message) {
+    return New(env, kNotReadableError, message);
+}
 
-    Napi::Error Errors::UnknownError(Napi::Env env, std::string message) {
-        return New(env, kUnknownError, message);
-    }
+Napi::Error Errors::UnknownError(Napi::Env env, std::string message) {
+    return New(env, kUnknownError, message);
+}
 
-    Napi::Error Errors::ConstraintError(Napi::Env env, std::string message) {
-        return New(env, kConstraintError, message);
-    }
+Napi::Error Errors::ConstraintError(Napi::Env env, std::string message) {
+    return New(env, kConstraintError, message);
+}
 
-    Napi::Error Errors::DataError(Napi::Env env, std::string message) {
-        return New(env, kDataError, message);
-    }
+Napi::Error Errors::DataError(Napi::Env env, std::string message) {
+    return New(env, kDataError, message);
+}
 
-    Napi::Error Errors::TransactionInactiveError(Napi::Env env, std::string message) {
-        return New(env, kTransactionInactiveError, message);
-    }
+Napi::Error Errors::TransactionInactiveError(Napi::Env env, std::string message) {
+    return New(env, kTransactionInactiveError, message);
+}
 
-    Napi::Error Errors::ReadOnlyError(Napi::Env env, std::string message) {
-        return New(env, kReadOnlyError, message);
-    }
+Napi::Error Errors::ReadOnlyError(Napi::Env env, std::string message) {
+    return New(env, kReadOnlyError, message);
+}
 
-    Napi::Error Errors::VersionError(Napi::Env env, std::string message) {
-        return New(env, kVersionError, message);
-    }
+Napi::Error Errors::VersionError(Napi::Env env, std::string message) {
+    return New(env, kVersionError, message);
+}
 
-    Napi::Error Errors::OperationError(Napi::Env env, std::string message) {
-        return New(env, kOperationError, message);
-    }
+Napi::Error Errors::OperationError(Napi::Env env, std::string message) {
+    return New(env, kOperationError, message);
+}
 
-    Napi::Error Errors::NotAllowedError(Napi::Env env, std::string message) {
-        return New(env, kNotAllowedError, message);
-    }
+Napi::Error Errors::NotAllowedError(Napi::Env env, std::string message) {
+    return New(env, kNotAllowedError, message);
+}
 
 }  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/Errors.h b/src/dawn/node/binding/Errors.h
index 6d68275..9e30ef7 100644
--- a/src/dawn/node/binding/Errors.h
+++ b/src/dawn/node/binding/Errors.h
@@ -21,41 +21,41 @@
 
 namespace wgpu::binding {
 
-    // Errors contains static helper methods for creating DOMException error
-    // messages as documented at:
-    // https://heycam.github.io/webidl/#idl-DOMException-error-names
-    class Errors {
-      public:
-        static Napi::Error HierarchyRequestError(Napi::Env, std::string message = {});
-        static Napi::Error WrongDocumentError(Napi::Env, std::string message = {});
-        static Napi::Error InvalidCharacterError(Napi::Env, std::string message = {});
-        static Napi::Error NoModificationAllowedError(Napi::Env, std::string message = {});
-        static Napi::Error NotFoundError(Napi::Env, std::string message = {});
-        static Napi::Error NotSupportedError(Napi::Env, std::string message = {});
-        static Napi::Error InUseAttributeError(Napi::Env, std::string message = {});
-        static Napi::Error InvalidStateError(Napi::Env, std::string message = {});
-        static Napi::Error SyntaxError(Napi::Env, std::string message = {});
-        static Napi::Error InvalidModificationError(Napi::Env, std::string message = {});
-        static Napi::Error NamespaceError(Napi::Env, std::string message = {});
-        static Napi::Error SecurityError(Napi::Env, std::string message = {});
-        static Napi::Error NetworkError(Napi::Env, std::string message = {});
-        static Napi::Error AbortError(Napi::Env, std::string message = {});
-        static Napi::Error URLMismatchError(Napi::Env, std::string message = {});
-        static Napi::Error QuotaExceededError(Napi::Env, std::string message = {});
-        static Napi::Error TimeoutError(Napi::Env, std::string message = {});
-        static Napi::Error InvalidNodeTypeError(Napi::Env, std::string message = {});
-        static Napi::Error DataCloneError(Napi::Env, std::string message = {});
-        static Napi::Error EncodingError(Napi::Env, std::string message = {});
-        static Napi::Error NotReadableError(Napi::Env, std::string message = {});
-        static Napi::Error UnknownError(Napi::Env, std::string message = {});
-        static Napi::Error ConstraintError(Napi::Env, std::string message = {});
-        static Napi::Error DataError(Napi::Env, std::string message = {});
-        static Napi::Error TransactionInactiveError(Napi::Env, std::string message = {});
-        static Napi::Error ReadOnlyError(Napi::Env, std::string message = {});
-        static Napi::Error VersionError(Napi::Env, std::string message = {});
-        static Napi::Error OperationError(Napi::Env, std::string message = {});
-        static Napi::Error NotAllowedError(Napi::Env, std::string message = {});
-    };
+// Errors contains static helper methods for creating DOMException error
+// messages as documented at:
+// https://heycam.github.io/webidl/#idl-DOMException-error-names
+class Errors {
+  public:
+    static Napi::Error HierarchyRequestError(Napi::Env, std::string message = {});
+    static Napi::Error WrongDocumentError(Napi::Env, std::string message = {});
+    static Napi::Error InvalidCharacterError(Napi::Env, std::string message = {});
+    static Napi::Error NoModificationAllowedError(Napi::Env, std::string message = {});
+    static Napi::Error NotFoundError(Napi::Env, std::string message = {});
+    static Napi::Error NotSupportedError(Napi::Env, std::string message = {});
+    static Napi::Error InUseAttributeError(Napi::Env, std::string message = {});
+    static Napi::Error InvalidStateError(Napi::Env, std::string message = {});
+    static Napi::Error SyntaxError(Napi::Env, std::string message = {});
+    static Napi::Error InvalidModificationError(Napi::Env, std::string message = {});
+    static Napi::Error NamespaceError(Napi::Env, std::string message = {});
+    static Napi::Error SecurityError(Napi::Env, std::string message = {});
+    static Napi::Error NetworkError(Napi::Env, std::string message = {});
+    static Napi::Error AbortError(Napi::Env, std::string message = {});
+    static Napi::Error URLMismatchError(Napi::Env, std::string message = {});
+    static Napi::Error QuotaExceededError(Napi::Env, std::string message = {});
+    static Napi::Error TimeoutError(Napi::Env, std::string message = {});
+    static Napi::Error InvalidNodeTypeError(Napi::Env, std::string message = {});
+    static Napi::Error DataCloneError(Napi::Env, std::string message = {});
+    static Napi::Error EncodingError(Napi::Env, std::string message = {});
+    static Napi::Error NotReadableError(Napi::Env, std::string message = {});
+    static Napi::Error UnknownError(Napi::Env, std::string message = {});
+    static Napi::Error ConstraintError(Napi::Env, std::string message = {});
+    static Napi::Error DataError(Napi::Env, std::string message = {});
+    static Napi::Error TransactionInactiveError(Napi::Env, std::string message = {});
+    static Napi::Error ReadOnlyError(Napi::Env, std::string message = {});
+    static Napi::Error VersionError(Napi::Env, std::string message = {});
+    static Napi::Error OperationError(Napi::Env, std::string message = {});
+    static Napi::Error NotAllowedError(Napi::Env, std::string message = {});
+};
 
 }  // namespace wgpu::binding
 
diff --git a/src/dawn/node/binding/Flags.cpp b/src/dawn/node/binding/Flags.cpp
index 40b0560..75fd2ce 100644
--- a/src/dawn/node/binding/Flags.cpp
+++ b/src/dawn/node/binding/Flags.cpp
@@ -15,15 +15,15 @@
 #include "src/dawn/node/binding/Flags.h"
 
 namespace wgpu::binding {
-    void Flags::Set(const std::string& key, const std::string& value) {
-        flags_[key] = value;
-    }
+void Flags::Set(const std::string& key, const std::string& value) {
+    flags_[key] = value;
+}
 
-    std::optional<std::string> Flags::Get(const std::string& key) const {
-        auto iter = flags_.find(key);
-        if (iter != flags_.end()) {
-            return iter->second;
-        }
-        return {};
+std::optional<std::string> Flags::Get(const std::string& key) const {
+    auto iter = flags_.find(key);
+    if (iter != flags_.end()) {
+        return iter->second;
     }
+    return {};
+}
 }  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/Flags.h b/src/dawn/node/binding/Flags.h
index b4d4e29..2989868 100644
--- a/src/dawn/node/binding/Flags.h
+++ b/src/dawn/node/binding/Flags.h
@@ -20,16 +20,16 @@
 #include <unordered_map>
 
 namespace wgpu::binding {
-    // Flags maintains a key-value mapping of input flags passed into the module's create()
-    // function, used to configure dawn_node.
-    class Flags {
-      public:
-        void Set(const std::string& key, const std::string& value);
-        std::optional<std::string> Get(const std::string& key) const;
+// Flags maintains a key-value mapping of input flags passed into the module's create()
+// function, used to configure dawn_node.
+class Flags {
+  public:
+    void Set(const std::string& key, const std::string& value);
+    std::optional<std::string> Get(const std::string& key) const;
 
-      private:
-        std::unordered_map<std::string, std::string> flags_;
-    };
+  private:
+    std::unordered_map<std::string, std::string> flags_;
+};
 }  // namespace wgpu::binding
 
 #endif  // SRC_DAWN_NODE_BINDING_FLAGS_H_
diff --git a/src/dawn/node/binding/GPU.cpp b/src/dawn/node/binding/GPU.cpp
index 55be416..1b147ed 100644
--- a/src/dawn/node/binding/GPU.cpp
+++ b/src/dawn/node/binding/GPU.cpp
@@ -22,147 +22,147 @@
 #include "src/dawn/node/binding/GPUAdapter.h"
 
 #if defined(_WIN32)
-#    include <Windows.h>
+#include <Windows.h>
 #endif
 
 namespace {
-    std::string GetEnvVar(const char* varName) {
+std::string GetEnvVar(const char* varName) {
 #if defined(_WIN32)
-        // Use _dupenv_s to avoid unsafe warnings about std::getenv
-        char* value = nullptr;
-        _dupenv_s(&value, nullptr, varName);
-        if (value) {
-            std::string result = value;
-            free(value);
-            return result;
-        }
-        return "";
+    // Use _dupenv_s to avoid unsafe warnings about std::getenv
+    char* value = nullptr;
+    _dupenv_s(&value, nullptr, varName);
+    if (value) {
+        std::string result = value;
+        free(value);
+        return result;
+    }
+    return "";
 #else
-        if (auto* val = std::getenv(varName)) {
-            return val;
-        }
-        return "";
-#endif
+    if (auto* val = std::getenv(varName)) {
+        return val;
     }
+    return "";
+#endif
+}
 
-    void SetDllDir(const char* dir) {
-        (void)dir;
+void SetDllDir(const char* dir) {
+    (void)dir;
 #if defined(_WIN32)
-        ::SetDllDirectory(dir);
+    ::SetDllDirectory(dir);
 #endif
-    }
+}
 
 }  // namespace
 
 namespace wgpu::binding {
 
-    ////////////////////////////////////////////////////////////////////////////////
-    // wgpu::bindings::GPU
-    ////////////////////////////////////////////////////////////////////////////////
-    GPU::GPU(Flags flags) : flags_(std::move(flags)) {
-        // TODO(dawn:1123): Disable in 'release'
-        instance_.EnableBackendValidation(true);
-        instance_.SetBackendValidationLevel(dawn::native::BackendValidationLevel::Full);
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPU
+////////////////////////////////////////////////////////////////////////////////
+GPU::GPU(Flags flags) : flags_(std::move(flags)) {
+    // TODO(dawn:1123): Disable in 'release'
+    instance_.EnableBackendValidation(true);
+    instance_.SetBackendValidationLevel(dawn::native::BackendValidationLevel::Full);
 
-        // Setting the DllDir changes where we load adapter DLLs from (e.g. d3dcompiler_47.dll)
-        if (auto dir = flags_.Get("dlldir")) {
-            SetDllDir(dir->c_str());
-        }
-        instance_.DiscoverDefaultAdapters();
+    // Setting the DllDir changes where we load adapter DLLs from (e.g. d3dcompiler_47.dll)
+    if (auto dir = flags_.Get("dlldir")) {
+        SetDllDir(dir->c_str());
     }
+    instance_.DiscoverDefaultAdapters();
+}
 
-    interop::Promise<std::optional<interop::Interface<interop::GPUAdapter>>> GPU::requestAdapter(
-        Napi::Env env,
-        interop::GPURequestAdapterOptions options) {
-        auto promise = interop::Promise<std::optional<interop::Interface<interop::GPUAdapter>>>(
-            env, PROMISE_INFO);
+interop::Promise<std::optional<interop::Interface<interop::GPUAdapter>>> GPU::requestAdapter(
+    Napi::Env env,
+    interop::GPURequestAdapterOptions options) {
+    auto promise =
+        interop::Promise<std::optional<interop::Interface<interop::GPUAdapter>>>(env, PROMISE_INFO);
 
-        if (options.forceFallbackAdapter) {
-            // Software adapters are not currently supported.
-            promise.Resolve({});
-            return promise;
-        }
-
-        auto adapters = instance_.GetAdapters();
-        if (adapters.empty()) {
-            promise.Resolve({});
-            return promise;
-        }
-
-#if defined(_WIN32)
-        constexpr auto defaultBackendType = wgpu::BackendType::D3D12;
-#elif defined(__linux__)
-        constexpr auto defaultBackendType = wgpu::BackendType::Vulkan;
-#elif defined(__APPLE__)
-        constexpr auto defaultBackendType = wgpu::BackendType::Metal;
-#else
-#    error "Unsupported platform"
-#endif
-
-        auto targetBackendType = defaultBackendType;
-        std::string forceBackend;
-
-        // Check for override from env var
-        if (std::string envVar = GetEnvVar("DAWNNODE_BACKEND"); !envVar.empty()) {
-            forceBackend = envVar;
-        }
-
-        // Check for override from flag
-        if (auto f = flags_.Get("dawn-backend")) {
-            forceBackend = *f;
-        }
-
-        std::transform(forceBackend.begin(), forceBackend.end(), forceBackend.begin(),
-                       [](char c) { return std::tolower(c); });
-
-        // Default to first adapter if a backend is not specified
-        size_t adapterIndex = 0;
-
-        if (!forceBackend.empty()) {
-            if (forceBackend == "null") {
-                targetBackendType = wgpu::BackendType::Null;
-            } else if (forceBackend == "webgpu") {
-                targetBackendType = wgpu::BackendType::WebGPU;
-            } else if (forceBackend == "d3d11") {
-                targetBackendType = wgpu::BackendType::D3D11;
-            } else if (forceBackend == "d3d12" || forceBackend == "d3d") {
-                targetBackendType = wgpu::BackendType::D3D12;
-            } else if (forceBackend == "metal") {
-                targetBackendType = wgpu::BackendType::Metal;
-            } else if (forceBackend == "vulkan" || forceBackend == "vk") {
-                targetBackendType = wgpu::BackendType::Vulkan;
-            } else if (forceBackend == "opengl" || forceBackend == "gl") {
-                targetBackendType = wgpu::BackendType::OpenGL;
-            } else if (forceBackend == "opengles" || forceBackend == "gles") {
-                targetBackendType = wgpu::BackendType::OpenGLES;
-            } else {
-                promise.Reject("unknown backend '" + forceBackend + "'");
-                return promise;
-            }
-        }
-
-        bool found = false;
-        for (size_t i = 0; i < adapters.size(); ++i) {
-            wgpu::AdapterProperties props;
-            adapters[i].GetProperties(&props);
-            if (props.backendType == targetBackendType) {
-                adapterIndex = i;
-                found = true;
-                break;
-            }
-        }
-        if (!found) {
-            if (!forceBackend.empty()) {
-                promise.Reject("backend '" + forceBackend + "' not found");
-            } else {
-                promise.Reject("no suitable backends found");
-            }
-            return promise;
-        }
-
-        auto adapter = GPUAdapter::Create<GPUAdapter>(env, adapters[adapterIndex], flags_);
-        promise.Resolve(std::optional<interop::Interface<interop::GPUAdapter>>(adapter));
+    if (options.forceFallbackAdapter) {
+        // Software adapters are not currently supported.
+        promise.Resolve({});
         return promise;
     }
 
+    auto adapters = instance_.GetAdapters();
+    if (adapters.empty()) {
+        promise.Resolve({});
+        return promise;
+    }
+
+#if defined(_WIN32)
+    constexpr auto defaultBackendType = wgpu::BackendType::D3D12;
+#elif defined(__linux__)
+    constexpr auto defaultBackendType = wgpu::BackendType::Vulkan;
+#elif defined(__APPLE__)
+    constexpr auto defaultBackendType = wgpu::BackendType::Metal;
+#else
+#error "Unsupported platform"
+#endif
+
+    auto targetBackendType = defaultBackendType;
+    std::string forceBackend;
+
+    // Check for override from env var
+    if (std::string envVar = GetEnvVar("DAWNNODE_BACKEND"); !envVar.empty()) {
+        forceBackend = envVar;
+    }
+
+    // Check for override from flag
+    if (auto f = flags_.Get("dawn-backend")) {
+        forceBackend = *f;
+    }
+
+    std::transform(forceBackend.begin(), forceBackend.end(), forceBackend.begin(),
+                   [](char c) { return std::tolower(c); });
+
+    // Default to first adapter if a backend is not specified
+    size_t adapterIndex = 0;
+
+    if (!forceBackend.empty()) {
+        if (forceBackend == "null") {
+            targetBackendType = wgpu::BackendType::Null;
+        } else if (forceBackend == "webgpu") {
+            targetBackendType = wgpu::BackendType::WebGPU;
+        } else if (forceBackend == "d3d11") {
+            targetBackendType = wgpu::BackendType::D3D11;
+        } else if (forceBackend == "d3d12" || forceBackend == "d3d") {
+            targetBackendType = wgpu::BackendType::D3D12;
+        } else if (forceBackend == "metal") {
+            targetBackendType = wgpu::BackendType::Metal;
+        } else if (forceBackend == "vulkan" || forceBackend == "vk") {
+            targetBackendType = wgpu::BackendType::Vulkan;
+        } else if (forceBackend == "opengl" || forceBackend == "gl") {
+            targetBackendType = wgpu::BackendType::OpenGL;
+        } else if (forceBackend == "opengles" || forceBackend == "gles") {
+            targetBackendType = wgpu::BackendType::OpenGLES;
+        } else {
+            promise.Reject("unknown backend '" + forceBackend + "'");
+            return promise;
+        }
+    }
+
+    bool found = false;
+    for (size_t i = 0; i < adapters.size(); ++i) {
+        wgpu::AdapterProperties props;
+        adapters[i].GetProperties(&props);
+        if (props.backendType == targetBackendType) {
+            adapterIndex = i;
+            found = true;
+            break;
+        }
+    }
+    if (!found) {
+        if (!forceBackend.empty()) {
+            promise.Reject("backend '" + forceBackend + "' not found");
+        } else {
+            promise.Reject("no suitable backends found");
+        }
+        return promise;
+    }
+
+    auto adapter = GPUAdapter::Create<GPUAdapter>(env, adapters[adapterIndex], flags_);
+    promise.Resolve(std::optional<interop::Interface<interop::GPUAdapter>>(adapter));
+    return promise;
+}
+
 }  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPU.h b/src/dawn/node/binding/GPU.h
index b6ac9f3..8ccafff 100644
--- a/src/dawn/node/binding/GPU.h
+++ b/src/dawn/node/binding/GPU.h
@@ -23,20 +23,20 @@
 #include "src/dawn/node/interop/WebGPU.h"
 
 namespace wgpu::binding {
-    // GPU is an implementation of interop::GPU that wraps a dawn::native::Instance.
-    class GPU final : public interop::GPU {
-      public:
-        GPU(Flags flags);
+// GPU is an implementation of interop::GPU that wraps a dawn::native::Instance.
+class GPU final : public interop::GPU {
+  public:
+    GPU(Flags flags);
 
-        // interop::GPU interface compliance
-        interop::Promise<std::optional<interop::Interface<interop::GPUAdapter>>> requestAdapter(
-            Napi::Env env,
-            interop::GPURequestAdapterOptions options) override;
+    // interop::GPU interface compliance
+    interop::Promise<std::optional<interop::Interface<interop::GPUAdapter>>> requestAdapter(
+        Napi::Env env,
+        interop::GPURequestAdapterOptions options) override;
 
-      private:
-        const Flags flags_;
-        dawn::native::Instance instance_;
-    };
+  private:
+    const Flags flags_;
+    dawn::native::Instance instance_;
+};
 
 }  // namespace wgpu::binding
 
diff --git a/src/dawn/node/binding/GPUAdapter.cpp b/src/dawn/node/binding/GPUAdapter.cpp
index 00ff953..bbed35d 100644
--- a/src/dawn/node/binding/GPUAdapter.cpp
+++ b/src/dawn/node/binding/GPUAdapter.cpp
@@ -23,31 +23,31 @@
 #include "src/dawn/node/binding/GPUSupportedLimits.h"
 
 namespace {
-    // TODO(amaiorano): Move to utility header
-    std::vector<std::string> Split(const std::string& s, char delim) {
-        if (s.empty())
-            return {};
+// TODO(amaiorano): Move to utility header
+std::vector<std::string> Split(const std::string& s, char delim) {
+    if (s.empty())
+        return {};
 
-        std::vector<std::string> result;
-        const size_t lastIndex = s.length() - 1;
-        size_t startIndex = 0;
-        size_t i = startIndex;
+    std::vector<std::string> result;
+    const size_t lastIndex = s.length() - 1;
+    size_t startIndex = 0;
+    size_t i = startIndex;
 
-        while (i <= lastIndex) {
-            if (s[i] == delim) {
-                auto token = s.substr(startIndex, i - startIndex);
-                if (!token.empty())  // Discard empty tokens
-                    result.push_back(token);
-                startIndex = i + 1;
-            } else if (i == lastIndex) {
-                auto token = s.substr(startIndex, i - startIndex + 1);
-                if (!token.empty())  // Discard empty tokens
-                    result.push_back(token);
-            }
-            ++i;
+    while (i <= lastIndex) {
+        if (s[i] == delim) {
+            auto token = s.substr(startIndex, i - startIndex);
+            if (!token.empty())  // Discard empty tokens
+                result.push_back(token);
+            startIndex = i + 1;
+        } else if (i == lastIndex) {
+            auto token = s.substr(startIndex, i - startIndex + 1);
+            if (!token.empty())  // Discard empty tokens
+                result.push_back(token);
         }
-        return result;
+        ++i;
     }
+    return result;
+}
 }  // namespace
 
 #define FOR_EACH_LIMIT(X)                        \
@@ -80,193 +80,188 @@
 
 namespace wgpu::binding {
 
-    namespace {
+namespace {
 
-        ////////////////////////////////////////////////////////////////////////////////
-        // wgpu::binding::<anon>::Features
-        // Implements interop::GPUSupportedFeatures
-        ////////////////////////////////////////////////////////////////////////////////
-        class Features : public interop::GPUSupportedFeatures {
-          public:
-            explicit Features(WGPUDeviceProperties properties) {
-                if (properties.depth24UnormStencil8) {
-                    enabled_.emplace(interop::GPUFeatureName::kDepth24UnormStencil8);
-                }
-                if (properties.depth32FloatStencil8) {
-                    enabled_.emplace(interop::GPUFeatureName::kDepth32FloatStencil8);
-                }
-                if (properties.timestampQuery) {
-                    enabled_.emplace(interop::GPUFeatureName::kTimestampQuery);
-                }
-                if (properties.textureCompressionBC) {
-                    enabled_.emplace(interop::GPUFeatureName::kTextureCompressionBc);
-                }
-                if (properties.textureCompressionETC2) {
-                    enabled_.emplace(interop::GPUFeatureName::kTextureCompressionEtc2);
-                }
-                if (properties.textureCompressionASTC) {
-                    enabled_.emplace(interop::GPUFeatureName::kTextureCompressionAstc);
-                }
-                if (properties.timestampQuery) {
-                    enabled_.emplace(interop::GPUFeatureName::kTimestampQuery);
-                }
-
-                // TODO(dawn:1123) add support for these extensions when possible.
-                // wgpu::interop::GPUFeatureName::kIndirectFirstInstance
-                // wgpu::interop::GPUFeatureName::kDepthClipControl
-            }
-
-            bool has(interop::GPUFeatureName feature) {
-                return enabled_.count(feature) != 0;
-            }
-
-            // interop::GPUSupportedFeatures compliance
-            bool has(Napi::Env, std::string name) override {
-                interop::GPUFeatureName feature;
-                if (interop::Converter<interop::GPUFeatureName>::FromString(name, feature)) {
-                    return has(feature);
-                }
-                return false;
-            }
-            std::vector<std::string> keys(Napi::Env) override {
-                std::vector<std::string> out;
-                out.reserve(enabled_.size());
-                for (auto feature : enabled_) {
-                    out.push_back(interop::Converter<interop::GPUFeatureName>::ToString(feature));
-                }
-                return out;
-            }
-
-          private:
-            std::unordered_set<interop::GPUFeatureName> enabled_;
-        };
-
-    }  // namespace
-
-    ////////////////////////////////////////////////////////////////////////////////
-    // wgpu::bindings::GPUAdapter
-    // TODO(crbug.com/dawn/1133): This is a stub implementation. Properly implement.
-    ////////////////////////////////////////////////////////////////////////////////
-    GPUAdapter::GPUAdapter(dawn::native::Adapter a, const Flags& flags)
-        : adapter_(a), flags_(flags) {
-    }
-
-    std::string GPUAdapter::getName(Napi::Env) {
-        return "dawn-adapter";
-    }
-
-    interop::Interface<interop::GPUSupportedFeatures> GPUAdapter::getFeatures(Napi::Env env) {
-        return interop::GPUSupportedFeatures::Create<Features>(env,
-                                                               adapter_.GetAdapterProperties());
-    }
-
-    interop::Interface<interop::GPUSupportedLimits> GPUAdapter::getLimits(Napi::Env env) {
-        WGPUSupportedLimits limits{};
-        if (!adapter_.GetLimits(&limits)) {
-            Napi::Error::New(env, "failed to get adapter limits").ThrowAsJavaScriptException();
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::binding::<anon>::Features
+// Implements interop::GPUSupportedFeatures
+////////////////////////////////////////////////////////////////////////////////
+class Features : public interop::GPUSupportedFeatures {
+  public:
+    explicit Features(WGPUDeviceProperties properties) {
+        if (properties.depth24UnormStencil8) {
+            enabled_.emplace(interop::GPUFeatureName::kDepth24UnormStencil8);
+        }
+        if (properties.depth32FloatStencil8) {
+            enabled_.emplace(interop::GPUFeatureName::kDepth32FloatStencil8);
+        }
+        if (properties.timestampQuery) {
+            enabled_.emplace(interop::GPUFeatureName::kTimestampQuery);
+        }
+        if (properties.textureCompressionBC) {
+            enabled_.emplace(interop::GPUFeatureName::kTextureCompressionBc);
+        }
+        if (properties.textureCompressionETC2) {
+            enabled_.emplace(interop::GPUFeatureName::kTextureCompressionEtc2);
+        }
+        if (properties.textureCompressionASTC) {
+            enabled_.emplace(interop::GPUFeatureName::kTextureCompressionAstc);
+        }
+        if (properties.timestampQuery) {
+            enabled_.emplace(interop::GPUFeatureName::kTimestampQuery);
         }
 
-        wgpu::SupportedLimits wgpuLimits{};
+        // TODO(dawn:1123) add support for these extensions when possible.
+        // wgpu::interop::GPUFeatureName::kIndirectFirstInstance
+        // wgpu::interop::GPUFeatureName::kDepthClipControl
+    }
+
+    bool has(interop::GPUFeatureName feature) { return enabled_.count(feature) != 0; }
+
+    // interop::GPUSupportedFeatures compliance
+    bool has(Napi::Env, std::string name) override {
+        interop::GPUFeatureName feature;
+        if (interop::Converter<interop::GPUFeatureName>::FromString(name, feature)) {
+            return has(feature);
+        }
+        return false;
+    }
+    std::vector<std::string> keys(Napi::Env) override {
+        std::vector<std::string> out;
+        out.reserve(enabled_.size());
+        for (auto feature : enabled_) {
+            out.push_back(interop::Converter<interop::GPUFeatureName>::ToString(feature));
+        }
+        return out;
+    }
+
+  private:
+    std::unordered_set<interop::GPUFeatureName> enabled_;
+};
+
+}  // namespace
+
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUAdapter
+// TODO(crbug.com/dawn/1133): This is a stub implementation. Properly implement.
+////////////////////////////////////////////////////////////////////////////////
+GPUAdapter::GPUAdapter(dawn::native::Adapter a, const Flags& flags) : adapter_(a), flags_(flags) {}
+
+std::string GPUAdapter::getName(Napi::Env) {
+    return "dawn-adapter";
+}
+
+interop::Interface<interop::GPUSupportedFeatures> GPUAdapter::getFeatures(Napi::Env env) {
+    return interop::GPUSupportedFeatures::Create<Features>(env, adapter_.GetAdapterProperties());
+}
+
+interop::Interface<interop::GPUSupportedLimits> GPUAdapter::getLimits(Napi::Env env) {
+    WGPUSupportedLimits limits{};
+    if (!adapter_.GetLimits(&limits)) {
+        Napi::Error::New(env, "failed to get adapter limits").ThrowAsJavaScriptException();
+    }
+
+    wgpu::SupportedLimits wgpuLimits{};
 
 #define COPY_LIMIT(LIMIT) wgpuLimits.limits.LIMIT = limits.limits.LIMIT;
-        FOR_EACH_LIMIT(COPY_LIMIT)
+    FOR_EACH_LIMIT(COPY_LIMIT)
 #undef COPY_LIMIT
 
-        return interop::GPUSupportedLimits::Create<GPUSupportedLimits>(env, wgpuLimits);
-    }
+    return interop::GPUSupportedLimits::Create<GPUSupportedLimits>(env, wgpuLimits);
+}
 
-    bool GPUAdapter::getIsFallbackAdapter(Napi::Env) {
-        UNIMPLEMENTED();
-    }
+bool GPUAdapter::getIsFallbackAdapter(Napi::Env) {
+    UNIMPLEMENTED();
+}
 
-    interop::Promise<interop::Interface<interop::GPUDevice>> GPUAdapter::requestDevice(
-        Napi::Env env,
-        interop::GPUDeviceDescriptor descriptor) {
-        wgpu::DeviceDescriptor desc{};  // TODO(crbug.com/dawn/1133): Fill in.
-        interop::Promise<interop::Interface<interop::GPUDevice>> promise(env, PROMISE_INFO);
+interop::Promise<interop::Interface<interop::GPUDevice>> GPUAdapter::requestDevice(
+    Napi::Env env,
+    interop::GPUDeviceDescriptor descriptor) {
+    wgpu::DeviceDescriptor desc{};  // TODO(crbug.com/dawn/1133): Fill in.
+    interop::Promise<interop::Interface<interop::GPUDevice>> promise(env, PROMISE_INFO);
 
-        std::vector<wgpu::FeatureName> requiredFeatures;
-        // See src/dawn/native/Features.cpp for enum <-> string mappings.
-        for (auto required : descriptor.requiredFeatures) {
-            switch (required) {
-                case interop::GPUFeatureName::kTextureCompressionBc:
-                    requiredFeatures.emplace_back(wgpu::FeatureName::TextureCompressionBC);
-                    continue;
-                case interop::GPUFeatureName::kTextureCompressionEtc2:
-                    requiredFeatures.emplace_back(wgpu::FeatureName::TextureCompressionETC2);
-                    continue;
-                case interop::GPUFeatureName::kTextureCompressionAstc:
-                    requiredFeatures.emplace_back(wgpu::FeatureName::TextureCompressionASTC);
-                    continue;
-                case interop::GPUFeatureName::kTimestampQuery:
-                    requiredFeatures.emplace_back(wgpu::FeatureName::TimestampQuery);
-                    continue;
-                case interop::GPUFeatureName::kDepth24UnormStencil8:
-                    requiredFeatures.emplace_back(wgpu::FeatureName::Depth24UnormStencil8);
-                    continue;
-                case interop::GPUFeatureName::kDepth32FloatStencil8:
-                    requiredFeatures.emplace_back(wgpu::FeatureName::Depth32FloatStencil8);
-                    continue;
-                case interop::GPUFeatureName::kDepthClipControl:
-                case interop::GPUFeatureName::kIndirectFirstInstance:
-                    // TODO(dawn:1123) Add support for these extensions when possible.
-                    continue;
-            }
-            UNIMPLEMENTED("required: ", required);
+    std::vector<wgpu::FeatureName> requiredFeatures;
+    // See src/dawn/native/Features.cpp for enum <-> string mappings.
+    for (auto required : descriptor.requiredFeatures) {
+        switch (required) {
+            case interop::GPUFeatureName::kTextureCompressionBc:
+                requiredFeatures.emplace_back(wgpu::FeatureName::TextureCompressionBC);
+                continue;
+            case interop::GPUFeatureName::kTextureCompressionEtc2:
+                requiredFeatures.emplace_back(wgpu::FeatureName::TextureCompressionETC2);
+                continue;
+            case interop::GPUFeatureName::kTextureCompressionAstc:
+                requiredFeatures.emplace_back(wgpu::FeatureName::TextureCompressionASTC);
+                continue;
+            case interop::GPUFeatureName::kTimestampQuery:
+                requiredFeatures.emplace_back(wgpu::FeatureName::TimestampQuery);
+                continue;
+            case interop::GPUFeatureName::kDepth24UnormStencil8:
+                requiredFeatures.emplace_back(wgpu::FeatureName::Depth24UnormStencil8);
+                continue;
+            case interop::GPUFeatureName::kDepth32FloatStencil8:
+                requiredFeatures.emplace_back(wgpu::FeatureName::Depth32FloatStencil8);
+                continue;
+            case interop::GPUFeatureName::kDepthClipControl:
+            case interop::GPUFeatureName::kIndirectFirstInstance:
+                // TODO(dawn:1123) Add support for these extensions when possible.
+                continue;
         }
+        UNIMPLEMENTED("required: ", required);
+    }
 
-        wgpu::RequiredLimits limits;
+    wgpu::RequiredLimits limits;
 #define COPY_LIMIT(LIMIT)                                        \
     if (descriptor.requiredLimits.count(#LIMIT)) {               \
         limits.limits.LIMIT = descriptor.requiredLimits[#LIMIT]; \
         descriptor.requiredLimits.erase(#LIMIT);                 \
     }
-        FOR_EACH_LIMIT(COPY_LIMIT)
+    FOR_EACH_LIMIT(COPY_LIMIT)
 #undef COPY_LIMIT
 
-        for (auto [key, _] : descriptor.requiredLimits) {
-            promise.Reject(binding::Errors::OperationError(env, "Unknown limit \"" + key + "\""));
-            return promise;
-        }
-
-        // Propogate enabled/disabled dawn features
-        // Note: DawnDeviceTogglesDescriptor::forceEnabledToggles and forceDisabledToggles are
-        // vectors of 'const char*', so we make sure the parsed strings survive the CreateDevice()
-        // call by storing them on the stack.
-        std::vector<std::string> enabledToggles;
-        std::vector<std::string> disabledToggles;
-        std::vector<const char*> forceEnabledToggles;
-        std::vector<const char*> forceDisabledToggles;
-        if (auto values = flags_.Get("enable-dawn-features")) {
-            enabledToggles = Split(*values, ',');
-            for (auto& t : enabledToggles) {
-                forceEnabledToggles.emplace_back(t.c_str());
-            }
-        }
-        if (auto values = flags_.Get("disable-dawn-features")) {
-            disabledToggles = Split(*values, ',');
-            for (auto& t : disabledToggles) {
-                forceDisabledToggles.emplace_back(t.c_str());
-            }
-        }
-
-        desc.requiredFeaturesCount = requiredFeatures.size();
-        desc.requiredFeatures = requiredFeatures.data();
-        desc.requiredLimits = &limits;
-
-        DawnTogglesDeviceDescriptor togglesDesc = {};
-        desc.nextInChain = &togglesDesc;
-        togglesDesc.forceEnabledTogglesCount = forceEnabledToggles.size();
-        togglesDesc.forceEnabledToggles = forceEnabledToggles.data();
-        togglesDesc.forceDisabledTogglesCount = forceDisabledToggles.size();
-        togglesDesc.forceDisabledToggles = forceDisabledToggles.data();
-
-        auto wgpu_device = adapter_.CreateDevice(&desc);
-        if (wgpu_device) {
-            promise.Resolve(interop::GPUDevice::Create<GPUDevice>(env, env, wgpu_device));
-        } else {
-            promise.Reject(binding::Errors::OperationError(env, "failed to create device"));
-        }
+    for (auto [key, _] : descriptor.requiredLimits) {
+        promise.Reject(binding::Errors::OperationError(env, "Unknown limit \"" + key + "\""));
         return promise;
     }
+
+    // Propogate enabled/disabled dawn features
+    // Note: DawnDeviceTogglesDescriptor::forceEnabledToggles and forceDisabledToggles are
+    // vectors of 'const char*', so we make sure the parsed strings survive the CreateDevice()
+    // call by storing them on the stack.
+    std::vector<std::string> enabledToggles;
+    std::vector<std::string> disabledToggles;
+    std::vector<const char*> forceEnabledToggles;
+    std::vector<const char*> forceDisabledToggles;
+    if (auto values = flags_.Get("enable-dawn-features")) {
+        enabledToggles = Split(*values, ',');
+        for (auto& t : enabledToggles) {
+            forceEnabledToggles.emplace_back(t.c_str());
+        }
+    }
+    if (auto values = flags_.Get("disable-dawn-features")) {
+        disabledToggles = Split(*values, ',');
+        for (auto& t : disabledToggles) {
+            forceDisabledToggles.emplace_back(t.c_str());
+        }
+    }
+
+    desc.requiredFeaturesCount = requiredFeatures.size();
+    desc.requiredFeatures = requiredFeatures.data();
+    desc.requiredLimits = &limits;
+
+    DawnTogglesDeviceDescriptor togglesDesc = {};
+    desc.nextInChain = &togglesDesc;
+    togglesDesc.forceEnabledTogglesCount = forceEnabledToggles.size();
+    togglesDesc.forceEnabledToggles = forceEnabledToggles.data();
+    togglesDesc.forceDisabledTogglesCount = forceDisabledToggles.size();
+    togglesDesc.forceDisabledToggles = forceDisabledToggles.data();
+
+    auto wgpu_device = adapter_.CreateDevice(&desc);
+    if (wgpu_device) {
+        promise.Resolve(interop::GPUDevice::Create<GPUDevice>(env, env, wgpu_device));
+    } else {
+        promise.Reject(binding::Errors::OperationError(env, "failed to create device"));
+    }
+    return promise;
+}
 }  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUAdapter.h b/src/dawn/node/binding/GPUAdapter.h
index e3ea737..3249232e 100644
--- a/src/dawn/node/binding/GPUAdapter.h
+++ b/src/dawn/node/binding/GPUAdapter.h
@@ -23,26 +23,26 @@
 #include "src/dawn/node/interop/WebGPU.h"
 
 namespace wgpu::binding {
-    class Flags;
+class Flags;
 
-    // GPUAdapter is an implementation of interop::GPUAdapter that wraps a dawn::native::Adapter.
-    class GPUAdapter final : public interop::GPUAdapter {
-      public:
-        GPUAdapter(dawn::native::Adapter a, const Flags& flags);
+// GPUAdapter is an implementation of interop::GPUAdapter that wraps a dawn::native::Adapter.
+class GPUAdapter final : public interop::GPUAdapter {
+  public:
+    GPUAdapter(dawn::native::Adapter a, const Flags& flags);
 
-        // interop::GPUAdapter interface compliance
-        std::string getName(Napi::Env) override;
-        interop::Interface<interop::GPUSupportedFeatures> getFeatures(Napi::Env) override;
-        interop::Interface<interop::GPUSupportedLimits> getLimits(Napi::Env) override;
-        bool getIsFallbackAdapter(Napi::Env) override;
-        interop::Promise<interop::Interface<interop::GPUDevice>> requestDevice(
-            Napi::Env env,
-            interop::GPUDeviceDescriptor descriptor) override;
+    // interop::GPUAdapter interface compliance
+    std::string getName(Napi::Env) override;
+    interop::Interface<interop::GPUSupportedFeatures> getFeatures(Napi::Env) override;
+    interop::Interface<interop::GPUSupportedLimits> getLimits(Napi::Env) override;
+    bool getIsFallbackAdapter(Napi::Env) override;
+    interop::Promise<interop::Interface<interop::GPUDevice>> requestDevice(
+        Napi::Env env,
+        interop::GPUDeviceDescriptor descriptor) override;
 
-      private:
-        dawn::native::Adapter adapter_;
-        const Flags& flags_;
-    };
+  private:
+    dawn::native::Adapter adapter_;
+    const Flags& flags_;
+};
 
 }  // namespace wgpu::binding
 
diff --git a/src/dawn/node/binding/GPUBindGroup.cpp b/src/dawn/node/binding/GPUBindGroup.cpp
index 20b87d7..7b379b6 100644
--- a/src/dawn/node/binding/GPUBindGroup.cpp
+++ b/src/dawn/node/binding/GPUBindGroup.cpp
@@ -20,19 +20,17 @@
 
 namespace wgpu::binding {
 
-    ////////////////////////////////////////////////////////////////////////////////
-    // wgpu::bindings::GPUBindGroup
-    ////////////////////////////////////////////////////////////////////////////////
-    GPUBindGroup::GPUBindGroup(wgpu::BindGroup group) : group_(std::move(group)) {
-    }
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUBindGroup
+////////////////////////////////////////////////////////////////////////////////
+GPUBindGroup::GPUBindGroup(wgpu::BindGroup group) : group_(std::move(group)) {}
 
-    std::variant<std::string, interop::UndefinedType> GPUBindGroup::getLabel(Napi::Env) {
-        UNIMPLEMENTED();
-    }
+std::variant<std::string, interop::UndefinedType> GPUBindGroup::getLabel(Napi::Env) {
+    UNIMPLEMENTED();
+}
 
-    void GPUBindGroup::setLabel(Napi::Env,
-                                std::variant<std::string, interop::UndefinedType> value) {
-        UNIMPLEMENTED();
-    }
+void GPUBindGroup::setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) {
+    UNIMPLEMENTED();
+}
 
 }  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUBindGroup.h b/src/dawn/node/binding/GPUBindGroup.h
index ebd40cd..a8e4c0f 100644
--- a/src/dawn/node/binding/GPUBindGroup.h
+++ b/src/dawn/node/binding/GPUBindGroup.h
@@ -24,23 +24,21 @@
 
 namespace wgpu::binding {
 
-    // GPUBindGroup is an implementation of interop::GPUBindGroup that wraps a wgpu::BindGroup.
-    class GPUBindGroup final : public interop::GPUBindGroup {
-      public:
-        explicit GPUBindGroup(wgpu::BindGroup group);
+// GPUBindGroup is an implementation of interop::GPUBindGroup that wraps a wgpu::BindGroup.
+class GPUBindGroup final : public interop::GPUBindGroup {
+  public:
+    explicit GPUBindGroup(wgpu::BindGroup group);
 
-        // Implicit cast operator to Dawn GPU object
-        inline operator const wgpu::BindGroup&() const {
-            return group_;
-        }
+    // Implicit cast operator to Dawn GPU object
+    inline operator const wgpu::BindGroup&() const { return group_; }
 
-        // interop::GPUBindGroup interface compliance
-        std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
-        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+    // interop::GPUBindGroup interface compliance
+    std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
+    void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
 
-      private:
-        wgpu::BindGroup group_;
-    };
+  private:
+    wgpu::BindGroup group_;
+};
 
 }  // namespace wgpu::binding
 
diff --git a/src/dawn/node/binding/GPUBindGroupLayout.cpp b/src/dawn/node/binding/GPUBindGroupLayout.cpp
index 64f0ef9..84c519a 100644
--- a/src/dawn/node/binding/GPUBindGroupLayout.cpp
+++ b/src/dawn/node/binding/GPUBindGroupLayout.cpp
@@ -20,20 +20,18 @@
 
 namespace wgpu::binding {
 
-    ////////////////////////////////////////////////////////////////////////////////
-    // wgpu::bindings::GPUBindGroupLayout
-    ////////////////////////////////////////////////////////////////////////////////
-    GPUBindGroupLayout::GPUBindGroupLayout(wgpu::BindGroupLayout layout)
-        : layout_(std::move(layout)) {
-    }
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUBindGroupLayout
+////////////////////////////////////////////////////////////////////////////////
+GPUBindGroupLayout::GPUBindGroupLayout(wgpu::BindGroupLayout layout) : layout_(std::move(layout)) {}
 
-    std::variant<std::string, interop::UndefinedType> GPUBindGroupLayout::getLabel(Napi::Env) {
-        UNIMPLEMENTED();
-    }
+std::variant<std::string, interop::UndefinedType> GPUBindGroupLayout::getLabel(Napi::Env) {
+    UNIMPLEMENTED();
+}
 
-    void GPUBindGroupLayout::setLabel(Napi::Env,
-                                      std::variant<std::string, interop::UndefinedType> value) {
-        UNIMPLEMENTED();
-    }
+void GPUBindGroupLayout::setLabel(Napi::Env,
+                                  std::variant<std::string, interop::UndefinedType> value) {
+    UNIMPLEMENTED();
+}
 
 }  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUBindGroupLayout.h b/src/dawn/node/binding/GPUBindGroupLayout.h
index 3d8855d..484b1bd 100644
--- a/src/dawn/node/binding/GPUBindGroupLayout.h
+++ b/src/dawn/node/binding/GPUBindGroupLayout.h
@@ -24,24 +24,22 @@
 
 namespace wgpu::binding {
 
-    // GPUBindGroupLayout is an implementation of interop::GPUBindGroupLayout that wraps a
-    // wgpu::BindGroupLayout.
-    class GPUBindGroupLayout final : public interop::GPUBindGroupLayout {
-      public:
-        explicit GPUBindGroupLayout(wgpu::BindGroupLayout layout);
+// GPUBindGroupLayout is an implementation of interop::GPUBindGroupLayout that wraps a
+// wgpu::BindGroupLayout.
+class GPUBindGroupLayout final : public interop::GPUBindGroupLayout {
+  public:
+    explicit GPUBindGroupLayout(wgpu::BindGroupLayout layout);
 
-        // Implicit cast operator to Dawn GPU object
-        inline operator const wgpu::BindGroupLayout&() const {
-            return layout_;
-        }
+    // Implicit cast operator to Dawn GPU object
+    inline operator const wgpu::BindGroupLayout&() const { return layout_; }
 
-        // interop::GPUBindGroupLayout interface compliance
-        std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
-        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+    // interop::GPUBindGroupLayout interface compliance
+    std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
+    void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
 
-      private:
-        wgpu::BindGroupLayout layout_;
-    };
+  private:
+    wgpu::BindGroupLayout layout_;
+};
 
 }  // namespace wgpu::binding
 
diff --git a/src/dawn/node/binding/GPUBuffer.cpp b/src/dawn/node/binding/GPUBuffer.cpp
index 2fd330c..d9054a5 100644
--- a/src/dawn/node/binding/GPUBuffer.cpp
+++ b/src/dawn/node/binding/GPUBuffer.cpp
@@ -23,158 +23,158 @@
 
 namespace wgpu::binding {
 
-    ////////////////////////////////////////////////////////////////////////////////
-    // wgpu::bindings::GPUBuffer
-    // TODO(crbug.com/dawn/1134): We may be doing more validation here than necessary. Once CTS is
-    // robustly passing, pull out validation and see what / if breaks.
-    ////////////////////////////////////////////////////////////////////////////////
-    GPUBuffer::GPUBuffer(wgpu::Buffer buffer,
-                         wgpu::BufferDescriptor desc,
-                         wgpu::Device device,
-                         std::shared_ptr<AsyncRunner> async)
-        : buffer_(std::move(buffer)),
-          desc_(desc),
-          device_(std::move(device)),
-          async_(std::move(async)) {
-        if (desc.mappedAtCreation) {
-            state_ = State::MappedAtCreation;
-        }
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUBuffer
+// TODO(crbug.com/dawn/1134): We may be doing more validation here than necessary. Once CTS is
+// robustly passing, pull out validation and see what / if breaks.
+////////////////////////////////////////////////////////////////////////////////
+GPUBuffer::GPUBuffer(wgpu::Buffer buffer,
+                     wgpu::BufferDescriptor desc,
+                     wgpu::Device device,
+                     std::shared_ptr<AsyncRunner> async)
+    : buffer_(std::move(buffer)),
+      desc_(desc),
+      device_(std::move(device)),
+      async_(std::move(async)) {
+    if (desc.mappedAtCreation) {
+        state_ = State::MappedAtCreation;
     }
+}
 
-    interop::Promise<void> GPUBuffer::mapAsync(Napi::Env env,
-                                               interop::GPUMapModeFlags mode,
-                                               interop::GPUSize64 offset,
-                                               std::optional<interop::GPUSize64> size) {
-        wgpu::MapMode md{};
-        Converter conv(env);
-        if (!conv(md, mode)) {
-            interop::Promise<void> promise(env, PROMISE_INFO);
-            promise.Reject(Errors::OperationError(env));
-            return promise;
-        }
-
-        if (state_ != State::Unmapped) {
-            interop::Promise<void> promise(env, PROMISE_INFO);
-            promise.Reject(Errors::OperationError(env));
-            device_.InjectError(wgpu::ErrorType::Validation,
-                                "mapAsync called on buffer that is not in the unmapped state");
-            return promise;
-        }
-
-        struct Context {
-            Napi::Env env;
-            interop::Promise<void> promise;
-            AsyncTask task;
-            State& state;
-        };
-        auto ctx =
-            new Context{env, interop::Promise<void>(env, PROMISE_INFO), AsyncTask(async_), state_};
-        auto promise = ctx->promise;
-
-        uint64_t s = size.has_value() ? size.value().value : (desc_.size - offset);
-
-        state_ = State::MappingPending;
-
-        buffer_.MapAsync(
-            md, offset, s,
-            [](WGPUBufferMapAsyncStatus status, void* userdata) {
-                auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
-                c->state = State::Unmapped;
-                switch (status) {
-                    case WGPUBufferMapAsyncStatus_Force32:
-                        UNREACHABLE("WGPUBufferMapAsyncStatus_Force32");
-                        break;
-                    case WGPUBufferMapAsyncStatus_Success:
-                        c->promise.Resolve();
-                        c->state = State::Mapped;
-                        break;
-                    case WGPUBufferMapAsyncStatus_Error:
-                        c->promise.Reject(Errors::OperationError(c->env));
-                        break;
-                    case WGPUBufferMapAsyncStatus_UnmappedBeforeCallback:
-                    case WGPUBufferMapAsyncStatus_DestroyedBeforeCallback:
-                        c->promise.Reject(Errors::AbortError(c->env));
-                        break;
-                    case WGPUBufferMapAsyncStatus_Unknown:
-                    case WGPUBufferMapAsyncStatus_DeviceLost:
-                        // TODO(dawn:1123): The spec is a bit vague around what the promise should
-                        // do here.
-                        c->promise.Reject(Errors::UnknownError(c->env));
-                        break;
-                }
-            },
-            ctx);
-
+interop::Promise<void> GPUBuffer::mapAsync(Napi::Env env,
+                                           interop::GPUMapModeFlags mode,
+                                           interop::GPUSize64 offset,
+                                           std::optional<interop::GPUSize64> size) {
+    wgpu::MapMode md{};
+    Converter conv(env);
+    if (!conv(md, mode)) {
+        interop::Promise<void> promise(env, PROMISE_INFO);
+        promise.Reject(Errors::OperationError(env));
         return promise;
     }
 
-    interop::ArrayBuffer GPUBuffer::getMappedRange(Napi::Env env,
-                                                   interop::GPUSize64 offset,
-                                                   std::optional<interop::GPUSize64> size) {
-        if (state_ != State::Mapped && state_ != State::MappedAtCreation) {
-            Errors::OperationError(env).ThrowAsJavaScriptException();
-            return {};
-        }
+    if (state_ != State::Unmapped) {
+        interop::Promise<void> promise(env, PROMISE_INFO);
+        promise.Reject(Errors::OperationError(env));
+        device_.InjectError(wgpu::ErrorType::Validation,
+                            "mapAsync called on buffer that is not in the unmapped state");
+        return promise;
+    }
 
-        uint64_t s = size.has_value() ? size.value().value : (desc_.size - offset);
+    struct Context {
+        Napi::Env env;
+        interop::Promise<void> promise;
+        AsyncTask task;
+        State& state;
+    };
+    auto ctx =
+        new Context{env, interop::Promise<void>(env, PROMISE_INFO), AsyncTask(async_), state_};
+    auto promise = ctx->promise;
 
-        uint64_t start = offset;
-        uint64_t end = offset + s;
-        for (auto& mapping : mapped_) {
-            if (mapping.Intersects(start, end)) {
-                Errors::OperationError(env).ThrowAsJavaScriptException();
-                return {};
+    uint64_t s = size.has_value() ? size.value().value : (desc_.size - offset);
+
+    state_ = State::MappingPending;
+
+    buffer_.MapAsync(
+        md, offset, s,
+        [](WGPUBufferMapAsyncStatus status, void* userdata) {
+            auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+            c->state = State::Unmapped;
+            switch (status) {
+                case WGPUBufferMapAsyncStatus_Force32:
+                    UNREACHABLE("WGPUBufferMapAsyncStatus_Force32");
+                    break;
+                case WGPUBufferMapAsyncStatus_Success:
+                    c->promise.Resolve();
+                    c->state = State::Mapped;
+                    break;
+                case WGPUBufferMapAsyncStatus_Error:
+                    c->promise.Reject(Errors::OperationError(c->env));
+                    break;
+                case WGPUBufferMapAsyncStatus_UnmappedBeforeCallback:
+                case WGPUBufferMapAsyncStatus_DestroyedBeforeCallback:
+                    c->promise.Reject(Errors::AbortError(c->env));
+                    break;
+                case WGPUBufferMapAsyncStatus_Unknown:
+                case WGPUBufferMapAsyncStatus_DeviceLost:
+                    // TODO(dawn:1123): The spec is a bit vague around what the promise should
+                    // do here.
+                    c->promise.Reject(Errors::UnknownError(c->env));
+                    break;
             }
-        }
+        },
+        ctx);
 
-        auto* ptr = (desc_.usage & wgpu::BufferUsage::MapWrite)
-                        ? buffer_.GetMappedRange(offset, s)
-                        : const_cast<void*>(buffer_.GetConstMappedRange(offset, s));
-        if (!ptr) {
+    return promise;
+}
+
+interop::ArrayBuffer GPUBuffer::getMappedRange(Napi::Env env,
+                                               interop::GPUSize64 offset,
+                                               std::optional<interop::GPUSize64> size) {
+    if (state_ != State::Mapped && state_ != State::MappedAtCreation) {
+        Errors::OperationError(env).ThrowAsJavaScriptException();
+        return {};
+    }
+
+    uint64_t s = size.has_value() ? size.value().value : (desc_.size - offset);
+
+    uint64_t start = offset;
+    uint64_t end = offset + s;
+    for (auto& mapping : mapped_) {
+        if (mapping.Intersects(start, end)) {
             Errors::OperationError(env).ThrowAsJavaScriptException();
             return {};
         }
-        auto array_buffer = Napi::ArrayBuffer::New(env, ptr, s);
-        // TODO(crbug.com/dawn/1135): Ownership here is the wrong way around.
-        mapped_.emplace_back(Mapping{start, end, Napi::Persistent(array_buffer)});
-        return array_buffer;
     }
 
-    void GPUBuffer::unmap(Napi::Env env) {
-        buffer_.Unmap();
+    auto* ptr = (desc_.usage & wgpu::BufferUsage::MapWrite)
+                    ? buffer_.GetMappedRange(offset, s)
+                    : const_cast<void*>(buffer_.GetConstMappedRange(offset, s));
+    if (!ptr) {
+        Errors::OperationError(env).ThrowAsJavaScriptException();
+        return {};
+    }
+    auto array_buffer = Napi::ArrayBuffer::New(env, ptr, s);
+    // TODO(crbug.com/dawn/1135): Ownership here is the wrong way around.
+    mapped_.emplace_back(Mapping{start, end, Napi::Persistent(array_buffer)});
+    return array_buffer;
+}
 
-        if (state_ != State::Destroyed && state_ != State::Unmapped) {
-            DetachMappings();
-            state_ = State::Unmapped;
-        }
+void GPUBuffer::unmap(Napi::Env env) {
+    buffer_.Unmap();
+
+    if (state_ != State::Destroyed && state_ != State::Unmapped) {
+        DetachMappings();
+        state_ = State::Unmapped;
+    }
+}
+
+void GPUBuffer::destroy(Napi::Env) {
+    if (state_ == State::Destroyed) {
+        return;
     }
 
-    void GPUBuffer::destroy(Napi::Env) {
-        if (state_ == State::Destroyed) {
-            return;
-        }
-
-        if (state_ != State::Unmapped) {
-            DetachMappings();
-        }
-
-        buffer_.Destroy();
-        state_ = State::Destroyed;
+    if (state_ != State::Unmapped) {
+        DetachMappings();
     }
 
-    void GPUBuffer::DetachMappings() {
-        for (auto& mapping : mapped_) {
-            mapping.buffer.Value().Detach();
-        }
-        mapped_.clear();
-    }
+    buffer_.Destroy();
+    state_ = State::Destroyed;
+}
 
-    std::variant<std::string, interop::UndefinedType> GPUBuffer::getLabel(Napi::Env) {
-        UNIMPLEMENTED();
+void GPUBuffer::DetachMappings() {
+    for (auto& mapping : mapped_) {
+        mapping.buffer.Value().Detach();
     }
+    mapped_.clear();
+}
 
-    void GPUBuffer::setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) {
-        UNIMPLEMENTED();
-    }
+std::variant<std::string, interop::UndefinedType> GPUBuffer::getLabel(Napi::Env) {
+    UNIMPLEMENTED();
+}
+
+void GPUBuffer::setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) {
+    UNIMPLEMENTED();
+}
 
 }  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUBuffer.h b/src/dawn/node/binding/GPUBuffer.h
index e62900c..7f96eeb 100644
--- a/src/dawn/node/binding/GPUBuffer.h
+++ b/src/dawn/node/binding/GPUBuffer.h
@@ -27,66 +27,60 @@
 
 namespace wgpu::binding {
 
-    // GPUBuffer is an implementation of interop::GPUBuffer that wraps a wgpu::Buffer.
-    class GPUBuffer final : public interop::GPUBuffer {
-      public:
-        GPUBuffer(wgpu::Buffer buffer,
-                  wgpu::BufferDescriptor desc,
-                  wgpu::Device device,
-                  std::shared_ptr<AsyncRunner> async);
+// GPUBuffer is an implementation of interop::GPUBuffer that wraps a wgpu::Buffer.
+class GPUBuffer final : public interop::GPUBuffer {
+  public:
+    GPUBuffer(wgpu::Buffer buffer,
+              wgpu::BufferDescriptor desc,
+              wgpu::Device device,
+              std::shared_ptr<AsyncRunner> async);
 
-        // Desc() returns the wgpu::BufferDescriptor used to construct the buffer
-        const wgpu::BufferDescriptor& Desc() const {
-            return desc_;
-        }
+    // Desc() returns the wgpu::BufferDescriptor used to construct the buffer
+    const wgpu::BufferDescriptor& Desc() const { return desc_; }
 
-        // Implicit cast operator to Dawn GPU object
-        inline operator const wgpu::Buffer&() const {
-            return buffer_;
-        }
+    // Implicit cast operator to Dawn GPU object
+    inline operator const wgpu::Buffer&() const { return buffer_; }
 
-        // interop::GPUBuffer interface compliance
-        interop::Promise<void> mapAsync(Napi::Env env,
-                                        interop::GPUMapModeFlags mode,
+    // interop::GPUBuffer interface compliance
+    interop::Promise<void> mapAsync(Napi::Env env,
+                                    interop::GPUMapModeFlags mode,
+                                    interop::GPUSize64 offset,
+                                    std::optional<interop::GPUSize64> size) override;
+    interop::ArrayBuffer getMappedRange(Napi::Env env,
                                         interop::GPUSize64 offset,
                                         std::optional<interop::GPUSize64> size) override;
-        interop::ArrayBuffer getMappedRange(Napi::Env env,
-                                            interop::GPUSize64 offset,
-                                            std::optional<interop::GPUSize64> size) override;
-        void unmap(Napi::Env) override;
-        void destroy(Napi::Env) override;
-        std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
-        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+    void unmap(Napi::Env) override;
+    void destroy(Napi::Env) override;
+    std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
+    void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
 
-      private:
-        void DetachMappings();
+  private:
+    void DetachMappings();
 
-        struct Mapping {
-            uint64_t start;
-            uint64_t end;
-            inline bool Intersects(uint64_t s, uint64_t e) const {
-                return s < end && e > start;
-            }
-            Napi::Reference<interop::ArrayBuffer> buffer;
-        };
-
-        // https://www.w3.org/TR/webgpu/#buffer-interface
-        enum class State {
-            Unmapped,
-            Mapped,
-            MappedAtCreation,
-            MappingPending,
-            Destroyed,
-        };
-
-        wgpu::Buffer buffer_;
-        wgpu::BufferDescriptor const desc_;
-        wgpu::Device const device_;
-        std::shared_ptr<AsyncRunner> async_;
-        State state_ = State::Unmapped;
-        std::vector<Mapping> mapped_;
+    struct Mapping {
+        uint64_t start;
+        uint64_t end;
+        inline bool Intersects(uint64_t s, uint64_t e) const { return s < end && e > start; }
+        Napi::Reference<interop::ArrayBuffer> buffer;
     };
 
+    // https://www.w3.org/TR/webgpu/#buffer-interface
+    enum class State {
+        Unmapped,
+        Mapped,
+        MappedAtCreation,
+        MappingPending,
+        Destroyed,
+    };
+
+    wgpu::Buffer buffer_;
+    wgpu::BufferDescriptor const desc_;
+    wgpu::Device const device_;
+    std::shared_ptr<AsyncRunner> async_;
+    State state_ = State::Unmapped;
+    std::vector<Mapping> mapped_;
+};
+
 }  // namespace wgpu::binding
 
 #endif  // SRC_DAWN_NODE_BINDING_GPUBUFFER_H_
diff --git a/src/dawn/node/binding/GPUCommandBuffer.cpp b/src/dawn/node/binding/GPUCommandBuffer.cpp
index 5ee35a3..7aa58bd 100644
--- a/src/dawn/node/binding/GPUCommandBuffer.cpp
+++ b/src/dawn/node/binding/GPUCommandBuffer.cpp
@@ -20,20 +20,19 @@
 
 namespace wgpu::binding {
 
-    ////////////////////////////////////////////////////////////////////////////////
-    // wgpu::bindings::GPUCommandBuffer
-    ////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUCommandBuffer
+////////////////////////////////////////////////////////////////////////////////
 
-    GPUCommandBuffer::GPUCommandBuffer(wgpu::CommandBuffer cmd_buf) : cmd_buf_(std::move(cmd_buf)) {
-    }
+GPUCommandBuffer::GPUCommandBuffer(wgpu::CommandBuffer cmd_buf) : cmd_buf_(std::move(cmd_buf)) {}
 
-    std::variant<std::string, interop::UndefinedType> GPUCommandBuffer::getLabel(Napi::Env) {
-        UNIMPLEMENTED();
-    }
+std::variant<std::string, interop::UndefinedType> GPUCommandBuffer::getLabel(Napi::Env) {
+    UNIMPLEMENTED();
+}
 
-    void GPUCommandBuffer::setLabel(Napi::Env,
-                                    std::variant<std::string, interop::UndefinedType> value) {
-        UNIMPLEMENTED();
-    }
+void GPUCommandBuffer::setLabel(Napi::Env,
+                                std::variant<std::string, interop::UndefinedType> value) {
+    UNIMPLEMENTED();
+}
 
 }  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUCommandBuffer.h b/src/dawn/node/binding/GPUCommandBuffer.h
index ce4fee4..febb2af 100644
--- a/src/dawn/node/binding/GPUCommandBuffer.h
+++ b/src/dawn/node/binding/GPUCommandBuffer.h
@@ -24,24 +24,22 @@
 
 namespace wgpu::binding {
 
-    // GPUCommandBuffer is an implementation of interop::GPUCommandBuffer that wraps a
-    // wgpu::CommandBuffer.
-    class GPUCommandBuffer final : public interop::GPUCommandBuffer {
-      public:
-        explicit GPUCommandBuffer(wgpu::CommandBuffer cmd_buf);
+// GPUCommandBuffer is an implementation of interop::GPUCommandBuffer that wraps a
+// wgpu::CommandBuffer.
+class GPUCommandBuffer final : public interop::GPUCommandBuffer {
+  public:
+    explicit GPUCommandBuffer(wgpu::CommandBuffer cmd_buf);
 
-        // Implicit cast operator to Dawn GPU object
-        inline operator const wgpu::CommandBuffer&() const {
-            return cmd_buf_;
-        }
+    // Implicit cast operator to Dawn GPU object
+    inline operator const wgpu::CommandBuffer&() const { return cmd_buf_; }
 
-        // interop::GPUCommandBuffer interface compliance
-        std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
-        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+    // interop::GPUCommandBuffer interface compliance
+    std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
+    void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
 
-      private:
-        wgpu::CommandBuffer cmd_buf_;
-    };
+  private:
+    wgpu::CommandBuffer cmd_buf_;
+};
 
 }  // namespace wgpu::binding
 
diff --git a/src/dawn/node/binding/GPUCommandEncoder.cpp b/src/dawn/node/binding/GPUCommandEncoder.cpp
index 90d8d6c..6d403bc 100644
--- a/src/dawn/node/binding/GPUCommandEncoder.cpp
+++ b/src/dawn/node/binding/GPUCommandEncoder.cpp
@@ -28,191 +28,190 @@
 
 namespace wgpu::binding {
 
-    ////////////////////////////////////////////////////////////////////////////////
-    // wgpu::bindings::GPUCommandEncoder
-    ////////////////////////////////////////////////////////////////////////////////
-    GPUCommandEncoder::GPUCommandEncoder(wgpu::CommandEncoder enc) : enc_(std::move(enc)) {
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUCommandEncoder
+////////////////////////////////////////////////////////////////////////////////
+GPUCommandEncoder::GPUCommandEncoder(wgpu::CommandEncoder enc) : enc_(std::move(enc)) {}
+
+interop::Interface<interop::GPURenderPassEncoder> GPUCommandEncoder::beginRenderPass(
+    Napi::Env env,
+    interop::GPURenderPassDescriptor descriptor) {
+    Converter conv(env);
+
+    wgpu::RenderPassDescriptor desc{};
+    // TODO(dawn:1250) handle timestampWrites
+    if (!conv(desc.colorAttachments, desc.colorAttachmentCount, descriptor.colorAttachments) ||
+        !conv(desc.depthStencilAttachment, descriptor.depthStencilAttachment) ||
+        !conv(desc.label, descriptor.label) ||
+        !conv(desc.occlusionQuerySet, descriptor.occlusionQuerySet)) {
+        return {};
     }
 
-    interop::Interface<interop::GPURenderPassEncoder> GPUCommandEncoder::beginRenderPass(
-        Napi::Env env,
-        interop::GPURenderPassDescriptor descriptor) {
-        Converter conv(env);
+    return interop::GPURenderPassEncoder::Create<GPURenderPassEncoder>(env,
+                                                                       enc_.BeginRenderPass(&desc));
+}
 
-        wgpu::RenderPassDescriptor desc{};
-        // TODO(dawn:1250) handle timestampWrites
-        if (!conv(desc.colorAttachments, desc.colorAttachmentCount, descriptor.colorAttachments) ||
-            !conv(desc.depthStencilAttachment, descriptor.depthStencilAttachment) ||
-            !conv(desc.label, descriptor.label) ||
-            !conv(desc.occlusionQuerySet, descriptor.occlusionQuerySet)) {
-            return {};
-        }
+interop::Interface<interop::GPUComputePassEncoder> GPUCommandEncoder::beginComputePass(
+    Napi::Env env,
+    interop::GPUComputePassDescriptor descriptor) {
+    wgpu::ComputePassDescriptor desc{};
+    // TODO(dawn:1250) handle timestampWrites
+    return interop::GPUComputePassEncoder::Create<GPUComputePassEncoder>(
+        env, enc_.BeginComputePass(&desc));
+}
 
-        return interop::GPURenderPassEncoder::Create<GPURenderPassEncoder>(
-            env, enc_.BeginRenderPass(&desc));
+void GPUCommandEncoder::clearBuffer(Napi::Env env,
+                                    interop::Interface<interop::GPUBuffer> buffer,
+                                    interop::GPUSize64 offset,
+                                    std::optional<interop::GPUSize64> size) {
+    Converter conv(env);
+
+    wgpu::Buffer b{};
+    uint64_t s = wgpu::kWholeSize;
+    if (!conv(b, buffer) ||  //
+        !conv(s, size)) {
+        return;
     }
 
-    interop::Interface<interop::GPUComputePassEncoder> GPUCommandEncoder::beginComputePass(
-        Napi::Env env,
-        interop::GPUComputePassDescriptor descriptor) {
-        wgpu::ComputePassDescriptor desc{};
-        // TODO(dawn:1250) handle timestampWrites
-        return interop::GPUComputePassEncoder::Create<GPUComputePassEncoder>(
-            env, enc_.BeginComputePass(&desc));
+    enc_.ClearBuffer(b, offset, s);
+}
+
+void GPUCommandEncoder::copyBufferToBuffer(Napi::Env env,
+                                           interop::Interface<interop::GPUBuffer> source,
+                                           interop::GPUSize64 sourceOffset,
+                                           interop::Interface<interop::GPUBuffer> destination,
+                                           interop::GPUSize64 destinationOffset,
+                                           interop::GPUSize64 size) {
+    Converter conv(env);
+
+    wgpu::Buffer src{};
+    wgpu::Buffer dst{};
+    if (!conv(src, source) ||  //
+        !conv(dst, destination)) {
+        return;
     }
 
-    void GPUCommandEncoder::clearBuffer(Napi::Env env,
-                                        interop::Interface<interop::GPUBuffer> buffer,
-                                        interop::GPUSize64 offset,
-                                        std::optional<interop::GPUSize64> size) {
-        Converter conv(env);
+    enc_.CopyBufferToBuffer(src, sourceOffset, dst, destinationOffset, size);
+}
 
-        wgpu::Buffer b{};
-        uint64_t s = wgpu::kWholeSize;
-        if (!conv(b, buffer) ||  //
-            !conv(s, size)) {
-            return;
-        }
+void GPUCommandEncoder::copyBufferToTexture(Napi::Env env,
+                                            interop::GPUImageCopyBuffer source,
+                                            interop::GPUImageCopyTexture destination,
+                                            interop::GPUExtent3D copySize) {
+    Converter conv(env);
 
-        enc_.ClearBuffer(b, offset, s);
+    wgpu::ImageCopyBuffer src{};
+    wgpu::ImageCopyTexture dst{};
+    wgpu::Extent3D size{};
+    if (!conv(src, source) ||       //
+        !conv(dst, destination) ||  //
+        !conv(size, copySize)) {
+        return;
     }
 
-    void GPUCommandEncoder::copyBufferToBuffer(Napi::Env env,
-                                               interop::Interface<interop::GPUBuffer> source,
-                                               interop::GPUSize64 sourceOffset,
-                                               interop::Interface<interop::GPUBuffer> destination,
-                                               interop::GPUSize64 destinationOffset,
-                                               interop::GPUSize64 size) {
-        Converter conv(env);
+    enc_.CopyBufferToTexture(&src, &dst, &size);
+}
 
-        wgpu::Buffer src{};
-        wgpu::Buffer dst{};
-        if (!conv(src, source) ||  //
-            !conv(dst, destination)) {
-            return;
-        }
+void GPUCommandEncoder::copyTextureToBuffer(Napi::Env env,
+                                            interop::GPUImageCopyTexture source,
+                                            interop::GPUImageCopyBuffer destination,
+                                            interop::GPUExtent3D copySize) {
+    Converter conv(env);
 
-        enc_.CopyBufferToBuffer(src, sourceOffset, dst, destinationOffset, size);
+    wgpu::ImageCopyTexture src{};
+    wgpu::ImageCopyBuffer dst{};
+    wgpu::Extent3D size{};
+    if (!conv(src, source) ||       //
+        !conv(dst, destination) ||  //
+        !conv(size, copySize)) {
+        return;
     }
 
-    void GPUCommandEncoder::copyBufferToTexture(Napi::Env env,
-                                                interop::GPUImageCopyBuffer source,
-                                                interop::GPUImageCopyTexture destination,
-                                                interop::GPUExtent3D copySize) {
-        Converter conv(env);
+    enc_.CopyTextureToBuffer(&src, &dst, &size);
+}
 
-        wgpu::ImageCopyBuffer src{};
-        wgpu::ImageCopyTexture dst{};
-        wgpu::Extent3D size{};
-        if (!conv(src, source) ||       //
-            !conv(dst, destination) ||  //
-            !conv(size, copySize)) {
-            return;
-        }
+void GPUCommandEncoder::copyTextureToTexture(Napi::Env env,
+                                             interop::GPUImageCopyTexture source,
+                                             interop::GPUImageCopyTexture destination,
+                                             interop::GPUExtent3D copySize) {
+    Converter conv(env);
 
-        enc_.CopyBufferToTexture(&src, &dst, &size);
+    wgpu::ImageCopyTexture src{};
+    wgpu::ImageCopyTexture dst{};
+    wgpu::Extent3D size{};
+    if (!conv(src, source) ||       //
+        !conv(dst, destination) ||  //
+        !conv(size, copySize)) {
+        return;
     }
 
-    void GPUCommandEncoder::copyTextureToBuffer(Napi::Env env,
-                                                interop::GPUImageCopyTexture source,
-                                                interop::GPUImageCopyBuffer destination,
-                                                interop::GPUExtent3D copySize) {
-        Converter conv(env);
+    enc_.CopyTextureToTexture(&src, &dst, &size);
+}
 
-        wgpu::ImageCopyTexture src{};
-        wgpu::ImageCopyBuffer dst{};
-        wgpu::Extent3D size{};
-        if (!conv(src, source) ||       //
-            !conv(dst, destination) ||  //
-            !conv(size, copySize)) {
-            return;
-        }
+void GPUCommandEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
+    enc_.PushDebugGroup(groupLabel.c_str());
+}
 
-        enc_.CopyTextureToBuffer(&src, &dst, &size);
+void GPUCommandEncoder::popDebugGroup(Napi::Env) {
+    enc_.PopDebugGroup();
+}
+
+void GPUCommandEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
+    enc_.InsertDebugMarker(markerLabel.c_str());
+}
+
+void GPUCommandEncoder::writeTimestamp(Napi::Env env,
+                                       interop::Interface<interop::GPUQuerySet> querySet,
+                                       interop::GPUSize32 queryIndex) {
+    Converter conv(env);
+
+    wgpu::QuerySet q{};
+    if (!conv(q, querySet)) {
+        return;
     }
 
-    void GPUCommandEncoder::copyTextureToTexture(Napi::Env env,
-                                                 interop::GPUImageCopyTexture source,
-                                                 interop::GPUImageCopyTexture destination,
-                                                 interop::GPUExtent3D copySize) {
-        Converter conv(env);
+    enc_.WriteTimestamp(q, queryIndex);
+}
 
-        wgpu::ImageCopyTexture src{};
-        wgpu::ImageCopyTexture dst{};
-        wgpu::Extent3D size{};
-        if (!conv(src, source) ||       //
-            !conv(dst, destination) ||  //
-            !conv(size, copySize)) {
-            return;
-        }
+void GPUCommandEncoder::resolveQuerySet(Napi::Env env,
+                                        interop::Interface<interop::GPUQuerySet> querySet,
+                                        interop::GPUSize32 firstQuery,
+                                        interop::GPUSize32 queryCount,
+                                        interop::Interface<interop::GPUBuffer> destination,
+                                        interop::GPUSize64 destinationOffset) {
+    Converter conv(env);
 
-        enc_.CopyTextureToTexture(&src, &dst, &size);
+    wgpu::QuerySet q{};
+    uint32_t f = 0;
+    uint32_t c = 0;
+    wgpu::Buffer b{};
+    uint64_t o = 0;
+
+    if (!conv(q, querySet) ||     //
+        !conv(f, firstQuery) ||   //
+        !conv(c, queryCount) ||   //
+        !conv(b, destination) ||  //
+        !conv(o, destinationOffset)) {
+        return;
     }
 
-    void GPUCommandEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
-        enc_.PushDebugGroup(groupLabel.c_str());
-    }
+    enc_.ResolveQuerySet(q, f, c, b, o);
+}
 
-    void GPUCommandEncoder::popDebugGroup(Napi::Env) {
-        enc_.PopDebugGroup();
-    }
+interop::Interface<interop::GPUCommandBuffer> GPUCommandEncoder::finish(
+    Napi::Env env,
+    interop::GPUCommandBufferDescriptor descriptor) {
+    wgpu::CommandBufferDescriptor desc{};
+    return interop::GPUCommandBuffer::Create<GPUCommandBuffer>(env, enc_.Finish(&desc));
+}
 
-    void GPUCommandEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
-        enc_.InsertDebugMarker(markerLabel.c_str());
-    }
+std::variant<std::string, interop::UndefinedType> GPUCommandEncoder::getLabel(Napi::Env) {
+    UNIMPLEMENTED();
+}
 
-    void GPUCommandEncoder::writeTimestamp(Napi::Env env,
-                                           interop::Interface<interop::GPUQuerySet> querySet,
-                                           interop::GPUSize32 queryIndex) {
-        Converter conv(env);
-
-        wgpu::QuerySet q{};
-        if (!conv(q, querySet)) {
-            return;
-        }
-
-        enc_.WriteTimestamp(q, queryIndex);
-    }
-
-    void GPUCommandEncoder::resolveQuerySet(Napi::Env env,
-                                            interop::Interface<interop::GPUQuerySet> querySet,
-                                            interop::GPUSize32 firstQuery,
-                                            interop::GPUSize32 queryCount,
-                                            interop::Interface<interop::GPUBuffer> destination,
-                                            interop::GPUSize64 destinationOffset) {
-        Converter conv(env);
-
-        wgpu::QuerySet q{};
-        uint32_t f = 0;
-        uint32_t c = 0;
-        wgpu::Buffer b{};
-        uint64_t o = 0;
-
-        if (!conv(q, querySet) ||     //
-            !conv(f, firstQuery) ||   //
-            !conv(c, queryCount) ||   //
-            !conv(b, destination) ||  //
-            !conv(o, destinationOffset)) {
-            return;
-        }
-
-        enc_.ResolveQuerySet(q, f, c, b, o);
-    }
-
-    interop::Interface<interop::GPUCommandBuffer> GPUCommandEncoder::finish(
-        Napi::Env env,
-        interop::GPUCommandBufferDescriptor descriptor) {
-        wgpu::CommandBufferDescriptor desc{};
-        return interop::GPUCommandBuffer::Create<GPUCommandBuffer>(env, enc_.Finish(&desc));
-    }
-
-    std::variant<std::string, interop::UndefinedType> GPUCommandEncoder::getLabel(Napi::Env) {
-        UNIMPLEMENTED();
-    }
-
-    void GPUCommandEncoder::setLabel(Napi::Env,
-                                     std::variant<std::string, interop::UndefinedType> value) {
-        UNIMPLEMENTED();
-    }
+void GPUCommandEncoder::setLabel(Napi::Env,
+                                 std::variant<std::string, interop::UndefinedType> value) {
+    UNIMPLEMENTED();
+}
 
 }  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUCommandEncoder.h b/src/dawn/node/binding/GPUCommandEncoder.h
index aec60c0..d33882e 100644
--- a/src/dawn/node/binding/GPUCommandEncoder.h
+++ b/src/dawn/node/binding/GPUCommandEncoder.h
@@ -24,62 +24,62 @@
 
 namespace wgpu::binding {
 
-    // GPUCommandEncoder is an implementation of interop::GPUCommandEncoder that wraps a
-    // wgpu::CommandEncoder.
-    class GPUCommandEncoder final : public interop::GPUCommandEncoder {
-      public:
-        explicit GPUCommandEncoder(wgpu::CommandEncoder enc);
+// GPUCommandEncoder is an implementation of interop::GPUCommandEncoder that wraps a
+// wgpu::CommandEncoder.
+class GPUCommandEncoder final : public interop::GPUCommandEncoder {
+  public:
+    explicit GPUCommandEncoder(wgpu::CommandEncoder enc);
 
-        // interop::GPUCommandEncoder interface compliance
-        interop::Interface<interop::GPURenderPassEncoder> beginRenderPass(
-            Napi::Env,
-            interop::GPURenderPassDescriptor descriptor) override;
-        interop::Interface<interop::GPUComputePassEncoder> beginComputePass(
-            Napi::Env,
-            interop::GPUComputePassDescriptor descriptor) override;
-        void clearBuffer(Napi::Env,
-                         interop::Interface<interop::GPUBuffer> buffer,
-                         interop::GPUSize64 offset,
-                         std::optional<interop::GPUSize64> size) override;
-        void copyBufferToBuffer(Napi::Env,
-                                interop::Interface<interop::GPUBuffer> source,
-                                interop::GPUSize64 sourceOffset,
-                                interop::Interface<interop::GPUBuffer> destination,
-                                interop::GPUSize64 destinationOffset,
-                                interop::GPUSize64 size) override;
-        void copyBufferToTexture(Napi::Env,
-                                 interop::GPUImageCopyBuffer source,
-                                 interop::GPUImageCopyTexture destination,
-                                 interop::GPUExtent3D copySize) override;
-        void copyTextureToBuffer(Napi::Env,
-                                 interop::GPUImageCopyTexture source,
-                                 interop::GPUImageCopyBuffer destination,
-                                 interop::GPUExtent3D copySize) override;
-        void copyTextureToTexture(Napi::Env,
-                                  interop::GPUImageCopyTexture source,
-                                  interop::GPUImageCopyTexture destination,
-                                  interop::GPUExtent3D copySize) override;
-        void pushDebugGroup(Napi::Env, std::string groupLabel) override;
-        void popDebugGroup(Napi::Env) override;
-        void insertDebugMarker(Napi::Env, std::string markerLabel) override;
-        void writeTimestamp(Napi::Env,
-                            interop::Interface<interop::GPUQuerySet> querySet,
-                            interop::GPUSize32 queryIndex) override;
-        void resolveQuerySet(Napi::Env,
-                             interop::Interface<interop::GPUQuerySet> querySet,
-                             interop::GPUSize32 firstQuery,
-                             interop::GPUSize32 queryCount,
-                             interop::Interface<interop::GPUBuffer> destination,
-                             interop::GPUSize64 destinationOffset) override;
-        interop::Interface<interop::GPUCommandBuffer> finish(
-            Napi::Env env,
-            interop::GPUCommandBufferDescriptor descriptor) override;
-        std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
-        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+    // interop::GPUCommandEncoder interface compliance
+    interop::Interface<interop::GPURenderPassEncoder> beginRenderPass(
+        Napi::Env,
+        interop::GPURenderPassDescriptor descriptor) override;
+    interop::Interface<interop::GPUComputePassEncoder> beginComputePass(
+        Napi::Env,
+        interop::GPUComputePassDescriptor descriptor) override;
+    void clearBuffer(Napi::Env,
+                     interop::Interface<interop::GPUBuffer> buffer,
+                     interop::GPUSize64 offset,
+                     std::optional<interop::GPUSize64> size) override;
+    void copyBufferToBuffer(Napi::Env,
+                            interop::Interface<interop::GPUBuffer> source,
+                            interop::GPUSize64 sourceOffset,
+                            interop::Interface<interop::GPUBuffer> destination,
+                            interop::GPUSize64 destinationOffset,
+                            interop::GPUSize64 size) override;
+    void copyBufferToTexture(Napi::Env,
+                             interop::GPUImageCopyBuffer source,
+                             interop::GPUImageCopyTexture destination,
+                             interop::GPUExtent3D copySize) override;
+    void copyTextureToBuffer(Napi::Env,
+                             interop::GPUImageCopyTexture source,
+                             interop::GPUImageCopyBuffer destination,
+                             interop::GPUExtent3D copySize) override;
+    void copyTextureToTexture(Napi::Env,
+                              interop::GPUImageCopyTexture source,
+                              interop::GPUImageCopyTexture destination,
+                              interop::GPUExtent3D copySize) override;
+    void pushDebugGroup(Napi::Env, std::string groupLabel) override;
+    void popDebugGroup(Napi::Env) override;
+    void insertDebugMarker(Napi::Env, std::string markerLabel) override;
+    void writeTimestamp(Napi::Env,
+                        interop::Interface<interop::GPUQuerySet> querySet,
+                        interop::GPUSize32 queryIndex) override;
+    void resolveQuerySet(Napi::Env,
+                         interop::Interface<interop::GPUQuerySet> querySet,
+                         interop::GPUSize32 firstQuery,
+                         interop::GPUSize32 queryCount,
+                         interop::Interface<interop::GPUBuffer> destination,
+                         interop::GPUSize64 destinationOffset) override;
+    interop::Interface<interop::GPUCommandBuffer> finish(
+        Napi::Env env,
+        interop::GPUCommandBufferDescriptor descriptor) override;
+    std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
+    void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
 
-      private:
-        wgpu::CommandEncoder enc_;
-    };
+  private:
+    wgpu::CommandEncoder enc_;
+};
 
 }  // namespace wgpu::binding
 
diff --git a/src/dawn/node/binding/GPUComputePassEncoder.cpp b/src/dawn/node/binding/GPUComputePassEncoder.cpp
index 7caf387..b8ce0f5 100644
--- a/src/dawn/node/binding/GPUComputePassEncoder.cpp
+++ b/src/dawn/node/binding/GPUComputePassEncoder.cpp
@@ -25,106 +25,100 @@
 
 namespace wgpu::binding {
 
-    ////////////////////////////////////////////////////////////////////////////////
-    // wgpu::bindings::GPUComputePassEncoder
-    ////////////////////////////////////////////////////////////////////////////////
-    GPUComputePassEncoder::GPUComputePassEncoder(wgpu::ComputePassEncoder enc)
-        : enc_(std::move(enc)) {
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUComputePassEncoder
+////////////////////////////////////////////////////////////////////////////////
+GPUComputePassEncoder::GPUComputePassEncoder(wgpu::ComputePassEncoder enc) : enc_(std::move(enc)) {}
+
+void GPUComputePassEncoder::setPipeline(Napi::Env,
+                                        interop::Interface<interop::GPUComputePipeline> pipeline) {
+    enc_.SetPipeline(*pipeline.As<GPUComputePipeline>());
+}
+
+void GPUComputePassEncoder::dispatch(Napi::Env,
+                                     interop::GPUSize32 workgroupCountX,
+                                     interop::GPUSize32 workgroupCountY,
+                                     interop::GPUSize32 workgroupCountZ) {
+    enc_.Dispatch(workgroupCountX, workgroupCountY, workgroupCountZ);
+}
+
+void GPUComputePassEncoder::dispatchIndirect(Napi::Env,
+                                             interop::Interface<interop::GPUBuffer> indirectBuffer,
+                                             interop::GPUSize64 indirectOffset) {
+    enc_.DispatchIndirect(*indirectBuffer.As<GPUBuffer>(), indirectOffset);
+}
+
+void GPUComputePassEncoder::end(Napi::Env) {
+    enc_.End();
+}
+
+void GPUComputePassEncoder::setBindGroup(
+    Napi::Env env,
+    interop::GPUIndex32 index,
+    interop::Interface<interop::GPUBindGroup> bindGroup,
+    std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) {
+    Converter conv(env);
+
+    wgpu::BindGroup bg{};
+    uint32_t* offsets = nullptr;
+    uint32_t num_offsets = 0;
+    if (!conv(bg, bindGroup) || !conv(offsets, num_offsets, dynamicOffsets)) {
+        return;
     }
 
-    void GPUComputePassEncoder::setPipeline(
-        Napi::Env,
-        interop::Interface<interop::GPUComputePipeline> pipeline) {
-        enc_.SetPipeline(*pipeline.As<GPUComputePipeline>());
+    enc_.SetBindGroup(index, bg, num_offsets, offsets);
+}
+
+void GPUComputePassEncoder::setBindGroup(Napi::Env env,
+                                         interop::GPUIndex32 index,
+                                         interop::Interface<interop::GPUBindGroup> bindGroup,
+                                         interop::Uint32Array dynamicOffsetsData,
+                                         interop::GPUSize64 dynamicOffsetsDataStart,
+                                         interop::GPUSize32 dynamicOffsetsDataLength) {
+    Converter conv(env);
+
+    wgpu::BindGroup bg{};
+    if (!conv(bg, bindGroup)) {
+        return;
     }
 
-    void GPUComputePassEncoder::dispatch(Napi::Env,
-                                         interop::GPUSize32 workgroupCountX,
-                                         interop::GPUSize32 workgroupCountY,
-                                         interop::GPUSize32 workgroupCountZ) {
-        enc_.Dispatch(workgroupCountX, workgroupCountY, workgroupCountZ);
+    if (dynamicOffsetsDataStart > dynamicOffsetsData.ElementLength()) {
+        Napi::RangeError::New(env, "dynamicOffsetsDataStart is out of bound of dynamicOffsetData")
+            .ThrowAsJavaScriptException();
+        return;
     }
 
-    void GPUComputePassEncoder::dispatchIndirect(
-        Napi::Env,
-        interop::Interface<interop::GPUBuffer> indirectBuffer,
-        interop::GPUSize64 indirectOffset) {
-        enc_.DispatchIndirect(*indirectBuffer.As<GPUBuffer>(), indirectOffset);
+    if (dynamicOffsetsDataLength > dynamicOffsetsData.ElementLength() - dynamicOffsetsDataStart) {
+        Napi::RangeError::New(env,
+                              "dynamicOffsetsDataLength + dynamicOffsetsDataStart is out of "
+                              "bound of dynamicOffsetData")
+            .ThrowAsJavaScriptException();
+        return;
     }
 
-    void GPUComputePassEncoder::end(Napi::Env) {
-        enc_.End();
-    }
+    enc_.SetBindGroup(index, bg, dynamicOffsetsDataLength,
+                      dynamicOffsetsData.Data() + dynamicOffsetsDataStart);
+}
 
-    void GPUComputePassEncoder::setBindGroup(
-        Napi::Env env,
-        interop::GPUIndex32 index,
-        interop::Interface<interop::GPUBindGroup> bindGroup,
-        std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) {
-        Converter conv(env);
+void GPUComputePassEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
+    enc_.PushDebugGroup(groupLabel.c_str());
+}
 
-        wgpu::BindGroup bg{};
-        uint32_t* offsets = nullptr;
-        uint32_t num_offsets = 0;
-        if (!conv(bg, bindGroup) || !conv(offsets, num_offsets, dynamicOffsets)) {
-            return;
-        }
+void GPUComputePassEncoder::popDebugGroup(Napi::Env) {
+    enc_.PopDebugGroup();
+}
 
-        enc_.SetBindGroup(index, bg, num_offsets, offsets);
-    }
+void GPUComputePassEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
+    enc_.InsertDebugMarker(markerLabel.c_str());
+}
 
-    void GPUComputePassEncoder::setBindGroup(Napi::Env env,
-                                             interop::GPUIndex32 index,
-                                             interop::Interface<interop::GPUBindGroup> bindGroup,
-                                             interop::Uint32Array dynamicOffsetsData,
-                                             interop::GPUSize64 dynamicOffsetsDataStart,
-                                             interop::GPUSize32 dynamicOffsetsDataLength) {
-        Converter conv(env);
+std::variant<std::string, interop::UndefinedType> GPUComputePassEncoder::getLabel(Napi::Env) {
+    UNIMPLEMENTED();
+}
 
-        wgpu::BindGroup bg{};
-        if (!conv(bg, bindGroup)) {
-            return;
-        }
-
-        if (dynamicOffsetsDataStart > dynamicOffsetsData.ElementLength()) {
-            Napi::RangeError::New(env,
-                                  "dynamicOffsetsDataStart is out of bound of dynamicOffsetData")
-                .ThrowAsJavaScriptException();
-            return;
-        }
-
-        if (dynamicOffsetsDataLength >
-            dynamicOffsetsData.ElementLength() - dynamicOffsetsDataStart) {
-            Napi::RangeError::New(env,
-                                  "dynamicOffsetsDataLength + dynamicOffsetsDataStart is out of "
-                                  "bound of dynamicOffsetData")
-                .ThrowAsJavaScriptException();
-            return;
-        }
-
-        enc_.SetBindGroup(index, bg, dynamicOffsetsDataLength,
-                          dynamicOffsetsData.Data() + dynamicOffsetsDataStart);
-    }
-
-    void GPUComputePassEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
-        enc_.PushDebugGroup(groupLabel.c_str());
-    }
-
-    void GPUComputePassEncoder::popDebugGroup(Napi::Env) {
-        enc_.PopDebugGroup();
-    }
-
-    void GPUComputePassEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
-        enc_.InsertDebugMarker(markerLabel.c_str());
-    }
-
-    std::variant<std::string, interop::UndefinedType> GPUComputePassEncoder::getLabel(Napi::Env) {
-        UNIMPLEMENTED();
-    }
-
-    void GPUComputePassEncoder::setLabel(Napi::Env,
-                                         std::variant<std::string, interop::UndefinedType> value) {
-        UNIMPLEMENTED();
-    }
+void GPUComputePassEncoder::setLabel(Napi::Env,
+                                     std::variant<std::string, interop::UndefinedType> value) {
+    UNIMPLEMENTED();
+}
 
 }  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUComputePassEncoder.h b/src/dawn/node/binding/GPUComputePassEncoder.h
index 7a01482..ab9a9e5 100644
--- a/src/dawn/node/binding/GPUComputePassEncoder.h
+++ b/src/dawn/node/binding/GPUComputePassEncoder.h
@@ -25,47 +25,44 @@
 
 namespace wgpu::binding {
 
-    // GPUComputePassEncoder is an implementation of interop::GPUComputePassEncoder that wraps a
-    // wgpu::ComputePassEncoder.
-    class GPUComputePassEncoder final : public interop::GPUComputePassEncoder {
-      public:
-        explicit GPUComputePassEncoder(wgpu::ComputePassEncoder enc);
+// GPUComputePassEncoder is an implementation of interop::GPUComputePassEncoder that wraps a
+// wgpu::ComputePassEncoder.
+class GPUComputePassEncoder final : public interop::GPUComputePassEncoder {
+  public:
+    explicit GPUComputePassEncoder(wgpu::ComputePassEncoder enc);
 
-        // Implicit cast operator to Dawn GPU object
-        inline operator const wgpu::ComputePassEncoder&() const {
-            return enc_;
-        }
+    // Implicit cast operator to Dawn GPU object
+    inline operator const wgpu::ComputePassEncoder&() const { return enc_; }
 
-        // interop::GPUComputePassEncoder interface compliance
-        void setPipeline(Napi::Env,
-                         interop::Interface<interop::GPUComputePipeline> pipeline) override;
-        void dispatch(Napi::Env,
-                      interop::GPUSize32 workgroupCountX,
-                      interop::GPUSize32 workgroupCountY,
-                      interop::GPUSize32 workgroupCountZ) override;
-        void dispatchIndirect(Napi::Env,
-                              interop::Interface<interop::GPUBuffer> indirectBuffer,
-                              interop::GPUSize64 indirectOffset) override;
-        void end(Napi::Env) override;
-        void setBindGroup(Napi::Env,
-                          interop::GPUIndex32 index,
-                          interop::Interface<interop::GPUBindGroup> bindGroup,
-                          std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) override;
-        void setBindGroup(Napi::Env,
-                          interop::GPUIndex32 index,
-                          interop::Interface<interop::GPUBindGroup> bindGroup,
-                          interop::Uint32Array dynamicOffsetsData,
-                          interop::GPUSize64 dynamicOffsetsDataStart,
-                          interop::GPUSize32 dynamicOffsetsDataLength) override;
-        void pushDebugGroup(Napi::Env, std::string groupLabel) override;
-        void popDebugGroup(Napi::Env) override;
-        void insertDebugMarker(Napi::Env, std::string markerLabel) override;
-        std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
-        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+    // interop::GPUComputePassEncoder interface compliance
+    void setPipeline(Napi::Env, interop::Interface<interop::GPUComputePipeline> pipeline) override;
+    void dispatch(Napi::Env,
+                  interop::GPUSize32 workgroupCountX,
+                  interop::GPUSize32 workgroupCountY,
+                  interop::GPUSize32 workgroupCountZ) override;
+    void dispatchIndirect(Napi::Env,
+                          interop::Interface<interop::GPUBuffer> indirectBuffer,
+                          interop::GPUSize64 indirectOffset) override;
+    void end(Napi::Env) override;
+    void setBindGroup(Napi::Env,
+                      interop::GPUIndex32 index,
+                      interop::Interface<interop::GPUBindGroup> bindGroup,
+                      std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) override;
+    void setBindGroup(Napi::Env,
+                      interop::GPUIndex32 index,
+                      interop::Interface<interop::GPUBindGroup> bindGroup,
+                      interop::Uint32Array dynamicOffsetsData,
+                      interop::GPUSize64 dynamicOffsetsDataStart,
+                      interop::GPUSize32 dynamicOffsetsDataLength) override;
+    void pushDebugGroup(Napi::Env, std::string groupLabel) override;
+    void popDebugGroup(Napi::Env) override;
+    void insertDebugMarker(Napi::Env, std::string markerLabel) override;
+    std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
+    void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
 
-      private:
-        wgpu::ComputePassEncoder enc_;
-    };
+  private:
+    wgpu::ComputePassEncoder enc_;
+};
 
 }  // namespace wgpu::binding
 
diff --git a/src/dawn/node/binding/GPUComputePipeline.cpp b/src/dawn/node/binding/GPUComputePipeline.cpp
index c4376bc..8883aba 100644
--- a/src/dawn/node/binding/GPUComputePipeline.cpp
+++ b/src/dawn/node/binding/GPUComputePipeline.cpp
@@ -22,27 +22,26 @@
 
 namespace wgpu::binding {
 
-    ////////////////////////////////////////////////////////////////////////////////
-    // wgpu::bindings::GPUComputePipeline
-    ////////////////////////////////////////////////////////////////////////////////
-    GPUComputePipeline::GPUComputePipeline(wgpu::ComputePipeline pipeline)
-        : pipeline_(std::move(pipeline)) {
-    }
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUComputePipeline
+////////////////////////////////////////////////////////////////////////////////
+GPUComputePipeline::GPUComputePipeline(wgpu::ComputePipeline pipeline)
+    : pipeline_(std::move(pipeline)) {}
 
-    interop::Interface<interop::GPUBindGroupLayout> GPUComputePipeline::getBindGroupLayout(
-        Napi::Env env,
-        uint32_t index) {
-        return interop::GPUBindGroupLayout::Create<GPUBindGroupLayout>(
-            env, pipeline_.GetBindGroupLayout(index));
-    }
+interop::Interface<interop::GPUBindGroupLayout> GPUComputePipeline::getBindGroupLayout(
+    Napi::Env env,
+    uint32_t index) {
+    return interop::GPUBindGroupLayout::Create<GPUBindGroupLayout>(
+        env, pipeline_.GetBindGroupLayout(index));
+}
 
-    std::variant<std::string, interop::UndefinedType> GPUComputePipeline::getLabel(Napi::Env) {
-        UNIMPLEMENTED();
-    }
+std::variant<std::string, interop::UndefinedType> GPUComputePipeline::getLabel(Napi::Env) {
+    UNIMPLEMENTED();
+}
 
-    void GPUComputePipeline::setLabel(Napi::Env,
-                                      std::variant<std::string, interop::UndefinedType> value) {
-        UNIMPLEMENTED();
-    }
+void GPUComputePipeline::setLabel(Napi::Env,
+                                  std::variant<std::string, interop::UndefinedType> value) {
+    UNIMPLEMENTED();
+}
 
 }  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUComputePipeline.h b/src/dawn/node/binding/GPUComputePipeline.h
index 25d0e47..0afb1ac 100644
--- a/src/dawn/node/binding/GPUComputePipeline.h
+++ b/src/dawn/node/binding/GPUComputePipeline.h
@@ -24,26 +24,24 @@
 
 namespace wgpu::binding {
 
-    // GPUComputePipeline is an implementation of interop::GPUComputePipeline that wraps a
-    // wgpu::ComputePipeline.
-    class GPUComputePipeline final : public interop::GPUComputePipeline {
-      public:
-        explicit GPUComputePipeline(wgpu::ComputePipeline pipeline);
+// GPUComputePipeline is an implementation of interop::GPUComputePipeline that wraps a
+// wgpu::ComputePipeline.
+class GPUComputePipeline final : public interop::GPUComputePipeline {
+  public:
+    explicit GPUComputePipeline(wgpu::ComputePipeline pipeline);
 
-        // Implicit cast operator to Dawn GPU object
-        inline operator const wgpu::ComputePipeline&() const {
-            return pipeline_;
-        }
+    // Implicit cast operator to Dawn GPU object
+    inline operator const wgpu::ComputePipeline&() const { return pipeline_; }
 
-        // interop::GPUComputePipeline interface compliance
-        interop::Interface<interop::GPUBindGroupLayout> getBindGroupLayout(Napi::Env,
-                                                                           uint32_t index) override;
-        std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
-        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+    // interop::GPUComputePipeline interface compliance
+    interop::Interface<interop::GPUBindGroupLayout> getBindGroupLayout(Napi::Env,
+                                                                       uint32_t index) override;
+    std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
+    void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
 
-      private:
-        wgpu::ComputePipeline pipeline_;
-    };
+  private:
+    wgpu::ComputePipeline pipeline_;
+};
 
 }  // namespace wgpu::binding
 
diff --git a/src/dawn/node/binding/GPUDevice.cpp b/src/dawn/node/binding/GPUDevice.cpp
index fa91124..c1aba64 100644
--- a/src/dawn/node/binding/GPUDevice.cpp
+++ b/src/dawn/node/binding/GPUDevice.cpp
@@ -39,494 +39,480 @@
 
 namespace wgpu::binding {
 
-    namespace {
+namespace {
 
-        class DeviceLostInfo : public interop::GPUDeviceLostInfo {
-          public:
-            DeviceLostInfo(interop::GPUDeviceLostReason reason, std::string message)
-                : reason_(reason), message_(message) {
+class DeviceLostInfo : public interop::GPUDeviceLostInfo {
+  public:
+    DeviceLostInfo(interop::GPUDeviceLostReason reason, std::string message)
+        : reason_(reason), message_(message) {}
+    std::variant<interop::GPUDeviceLostReason, interop::UndefinedType> getReason(
+        Napi::Env env) override {
+        return reason_;
+    }
+    std::string getMessage(Napi::Env) override { return message_; }
+
+  private:
+    interop::GPUDeviceLostReason reason_;
+    std::string message_;
+};
+
+class OOMError : public interop::GPUOutOfMemoryError {};
+class ValidationError : public interop::GPUValidationError {
+  public:
+    explicit ValidationError(std::string message) : message_(std::move(message)) {}
+
+    std::string getMessage(Napi::Env) override { return message_; };
+
+  private:
+    std::string message_;
+};
+
+}  // namespace
+
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUDevice
+////////////////////////////////////////////////////////////////////////////////
+GPUDevice::GPUDevice(Napi::Env env, wgpu::Device device)
+    : env_(env),
+      device_(device),
+      async_(std::make_shared<AsyncRunner>(env, device)),
+      lost_promise_(env, PROMISE_INFO) {
+    device_.SetLoggingCallback(
+        [](WGPULoggingType type, char const* message, void* userdata) {
+            std::cout << type << ": " << message << std::endl;
+        },
+        nullptr);
+    device_.SetUncapturedErrorCallback(
+        [](WGPUErrorType type, char const* message, void* userdata) {
+            std::cout << type << ": " << message << std::endl;
+        },
+        nullptr);
+
+    device_.SetDeviceLostCallback(
+        [](WGPUDeviceLostReason reason, char const* message, void* userdata) {
+            auto r = interop::GPUDeviceLostReason::kDestroyed;
+            switch (reason) {
+                case WGPUDeviceLostReason_Force32:
+                    UNREACHABLE("WGPUDeviceLostReason_Force32");
+                    break;
+                case WGPUDeviceLostReason_Destroyed:
+                case WGPUDeviceLostReason_Undefined:
+                    r = interop::GPUDeviceLostReason::kDestroyed;
+                    break;
             }
-            std::variant<interop::GPUDeviceLostReason, interop::UndefinedType> getReason(
-                Napi::Env env) override {
-                return reason_;
+            auto* self = static_cast<GPUDevice*>(userdata);
+            if (self->lost_promise_.GetState() == interop::PromiseState::Pending) {
+                self->lost_promise_.Resolve(
+                    interop::GPUDeviceLostInfo::Create<DeviceLostInfo>(self->env_, r, message));
             }
-            std::string getMessage(Napi::Env) override {
-                return message_;
-            }
+        },
+        this);
+}
 
-          private:
-            interop::GPUDeviceLostReason reason_;
-            std::string message_;
-        };
+GPUDevice::~GPUDevice() {}
 
-        class OOMError : public interop::GPUOutOfMemoryError {};
-        class ValidationError : public interop::GPUValidationError {
-          public:
-            explicit ValidationError(std::string message) : message_(std::move(message)) {
-            }
+interop::Interface<interop::GPUSupportedFeatures> GPUDevice::getFeatures(Napi::Env env) {
+    class Features : public interop::GPUSupportedFeatures {
+      public:
+        bool has(Napi::Env, std::string feature) override { UNIMPLEMENTED(); }
+        std::vector<std::string> keys(Napi::Env) override { UNIMPLEMENTED(); }
+    };
+    return interop::GPUSupportedFeatures::Create<Features>(env);
+}
 
-            std::string getMessage(Napi::Env) override {
-                return message_;
-            };
+interop::Interface<interop::GPUSupportedLimits> GPUDevice::getLimits(Napi::Env env) {
+    wgpu::SupportedLimits limits{};
+    if (!device_.GetLimits(&limits)) {
+        Napi::Error::New(env, "failed to get device limits").ThrowAsJavaScriptException();
+    }
+    return interop::GPUSupportedLimits::Create<GPUSupportedLimits>(env, limits);
+}
 
-          private:
-            std::string message_;
-        };
+interop::Interface<interop::GPUQueue> GPUDevice::getQueue(Napi::Env env) {
+    return interop::GPUQueue::Create<GPUQueue>(env, device_.GetQueue(), async_);
+}
 
-    }  // namespace
+void GPUDevice::destroy(Napi::Env env) {
+    if (lost_promise_.GetState() == interop::PromiseState::Pending) {
+        lost_promise_.Resolve(interop::GPUDeviceLostInfo::Create<DeviceLostInfo>(
+            env_, interop::GPUDeviceLostReason::kDestroyed, "device was destroyed"));
+    }
+    device_.Destroy();
+}
 
-    ////////////////////////////////////////////////////////////////////////////////
-    // wgpu::bindings::GPUDevice
-    ////////////////////////////////////////////////////////////////////////////////
-    GPUDevice::GPUDevice(Napi::Env env, wgpu::Device device)
-        : env_(env),
-          device_(device),
-          async_(std::make_shared<AsyncRunner>(env, device)),
-          lost_promise_(env, PROMISE_INFO) {
-        device_.SetLoggingCallback(
-            [](WGPULoggingType type, char const* message, void* userdata) {
-                std::cout << type << ": " << message << std::endl;
-            },
-            nullptr);
-        device_.SetUncapturedErrorCallback(
-            [](WGPUErrorType type, char const* message, void* userdata) {
-                std::cout << type << ": " << message << std::endl;
-            },
-            nullptr);
+interop::Interface<interop::GPUBuffer> GPUDevice::createBuffer(
+    Napi::Env env,
+    interop::GPUBufferDescriptor descriptor) {
+    Converter conv(env);
 
-        device_.SetDeviceLostCallback(
-            [](WGPUDeviceLostReason reason, char const* message, void* userdata) {
-                auto r = interop::GPUDeviceLostReason::kDestroyed;
-                switch (reason) {
-                    case WGPUDeviceLostReason_Force32:
-                        UNREACHABLE("WGPUDeviceLostReason_Force32");
-                        break;
-                    case WGPUDeviceLostReason_Destroyed:
-                    case WGPUDeviceLostReason_Undefined:
-                        r = interop::GPUDeviceLostReason::kDestroyed;
-                        break;
-                }
-                auto* self = static_cast<GPUDevice*>(userdata);
-                if (self->lost_promise_.GetState() == interop::PromiseState::Pending) {
-                    self->lost_promise_.Resolve(
-                        interop::GPUDeviceLostInfo::Create<DeviceLostInfo>(self->env_, r, message));
-                }
-            },
-            this);
+    wgpu::BufferDescriptor desc{};
+    if (!conv(desc.label, descriptor.label) ||
+        !conv(desc.mappedAtCreation, descriptor.mappedAtCreation) ||
+        !conv(desc.size, descriptor.size) || !conv(desc.usage, descriptor.usage)) {
+        return {};
+    }
+    return interop::GPUBuffer::Create<GPUBuffer>(env, device_.CreateBuffer(&desc), desc, device_,
+                                                 async_);
+}
+
+interop::Interface<interop::GPUTexture> GPUDevice::createTexture(
+    Napi::Env env,
+    interop::GPUTextureDescriptor descriptor) {
+    Converter conv(env);
+
+    wgpu::TextureDescriptor desc{};
+    if (!conv(desc.label, descriptor.label) || !conv(desc.usage, descriptor.usage) ||  //
+        !conv(desc.size, descriptor.size) ||                                           //
+        !conv(desc.dimension, descriptor.dimension) ||                                 //
+        !conv(desc.mipLevelCount, descriptor.mipLevelCount) ||                         //
+        !conv(desc.sampleCount, descriptor.sampleCount) ||                             //
+        !conv(desc.format, descriptor.format)) {
+        return {};
+    }
+    return interop::GPUTexture::Create<GPUTexture>(env, device_.CreateTexture(&desc));
+}
+
+interop::Interface<interop::GPUSampler> GPUDevice::createSampler(
+    Napi::Env env,
+    interop::GPUSamplerDescriptor descriptor) {
+    Converter conv(env);
+
+    wgpu::SamplerDescriptor desc{};
+    if (!conv(desc.label, descriptor.label) ||                //
+        !conv(desc.addressModeU, descriptor.addressModeU) ||  //
+        !conv(desc.addressModeV, descriptor.addressModeV) ||  //
+        !conv(desc.addressModeW, descriptor.addressModeW) ||  //
+        !conv(desc.magFilter, descriptor.magFilter) ||        //
+        !conv(desc.minFilter, descriptor.minFilter) ||        //
+        !conv(desc.mipmapFilter, descriptor.mipmapFilter) ||  //
+        !conv(desc.lodMinClamp, descriptor.lodMinClamp) ||    //
+        !conv(desc.lodMaxClamp, descriptor.lodMaxClamp) ||    //
+        !conv(desc.compare, descriptor.compare) ||            //
+        !conv(desc.maxAnisotropy, descriptor.maxAnisotropy)) {
+        return {};
+    }
+    return interop::GPUSampler::Create<GPUSampler>(env, device_.CreateSampler(&desc));
+}
+
+interop::Interface<interop::GPUExternalTexture> GPUDevice::importExternalTexture(
+    Napi::Env,
+    interop::GPUExternalTextureDescriptor descriptor) {
+    UNIMPLEMENTED();
+}
+
+interop::Interface<interop::GPUBindGroupLayout> GPUDevice::createBindGroupLayout(
+    Napi::Env env,
+    interop::GPUBindGroupLayoutDescriptor descriptor) {
+    Converter conv(env);
+
+    wgpu::BindGroupLayoutDescriptor desc{};
+    if (!conv(desc.label, descriptor.label) ||
+        !conv(desc.entries, desc.entryCount, descriptor.entries)) {
+        return {};
     }
 
-    GPUDevice::~GPUDevice() {
+    return interop::GPUBindGroupLayout::Create<GPUBindGroupLayout>(
+        env, device_.CreateBindGroupLayout(&desc));
+}
+
+interop::Interface<interop::GPUPipelineLayout> GPUDevice::createPipelineLayout(
+    Napi::Env env,
+    interop::GPUPipelineLayoutDescriptor descriptor) {
+    Converter conv(env);
+
+    wgpu::PipelineLayoutDescriptor desc{};
+    if (!conv(desc.label, descriptor.label) ||
+        !conv(desc.bindGroupLayouts, desc.bindGroupLayoutCount, descriptor.bindGroupLayouts)) {
+        return {};
     }
 
-    interop::Interface<interop::GPUSupportedFeatures> GPUDevice::getFeatures(Napi::Env env) {
-        class Features : public interop::GPUSupportedFeatures {
-          public:
-            bool has(Napi::Env, std::string feature) override {
-                UNIMPLEMENTED();
-            }
-            std::vector<std::string> keys(Napi::Env) override {
-                UNIMPLEMENTED();
-            }
-        };
-        return interop::GPUSupportedFeatures::Create<Features>(env);
+    return interop::GPUPipelineLayout::Create<GPUPipelineLayout>(
+        env, device_.CreatePipelineLayout(&desc));
+}
+
+interop::Interface<interop::GPUBindGroup> GPUDevice::createBindGroup(
+    Napi::Env env,
+    interop::GPUBindGroupDescriptor descriptor) {
+    Converter conv(env);
+
+    wgpu::BindGroupDescriptor desc{};
+    if (!conv(desc.label, descriptor.label) || !conv(desc.layout, descriptor.layout) ||
+        !conv(desc.entries, desc.entryCount, descriptor.entries)) {
+        return {};
     }
 
-    interop::Interface<interop::GPUSupportedLimits> GPUDevice::getLimits(Napi::Env env) {
-        wgpu::SupportedLimits limits{};
-        if (!device_.GetLimits(&limits)) {
-            Napi::Error::New(env, "failed to get device limits").ThrowAsJavaScriptException();
-        }
-        return interop::GPUSupportedLimits::Create<GPUSupportedLimits>(env, limits);
+    return interop::GPUBindGroup::Create<GPUBindGroup>(env, device_.CreateBindGroup(&desc));
+}
+
+interop::Interface<interop::GPUShaderModule> GPUDevice::createShaderModule(
+    Napi::Env env,
+    interop::GPUShaderModuleDescriptor descriptor) {
+    Converter conv(env);
+
+    wgpu::ShaderModuleWGSLDescriptor wgsl_desc{};
+    wgpu::ShaderModuleDescriptor sm_desc{};
+    if (!conv(wgsl_desc.source, descriptor.code) || !conv(sm_desc.label, descriptor.label)) {
+        return {};
+    }
+    sm_desc.nextInChain = &wgsl_desc;
+
+    return interop::GPUShaderModule::Create<GPUShaderModule>(
+        env, device_.CreateShaderModule(&sm_desc), async_);
+}
+
+interop::Interface<interop::GPUComputePipeline> GPUDevice::createComputePipeline(
+    Napi::Env env,
+    interop::GPUComputePipelineDescriptor descriptor) {
+    Converter conv(env);
+
+    wgpu::ComputePipelineDescriptor desc{};
+    if (!conv(desc, descriptor)) {
+        return {};
     }
 
-    interop::Interface<interop::GPUQueue> GPUDevice::getQueue(Napi::Env env) {
-        return interop::GPUQueue::Create<GPUQueue>(env, device_.GetQueue(), async_);
+    return interop::GPUComputePipeline::Create<GPUComputePipeline>(
+        env, device_.CreateComputePipeline(&desc));
+}
+
+interop::Interface<interop::GPURenderPipeline> GPUDevice::createRenderPipeline(
+    Napi::Env env,
+    interop::GPURenderPipelineDescriptor descriptor) {
+    Converter conv(env);
+
+    wgpu::RenderPipelineDescriptor desc{};
+    if (!conv(desc, descriptor)) {
+        return {};
     }
 
-    void GPUDevice::destroy(Napi::Env env) {
-        if (lost_promise_.GetState() == interop::PromiseState::Pending) {
-            lost_promise_.Resolve(interop::GPUDeviceLostInfo::Create<DeviceLostInfo>(
-                env_, interop::GPUDeviceLostReason::kDestroyed, "device was destroyed"));
-        }
-        device_.Destroy();
-    }
+    return interop::GPURenderPipeline::Create<GPURenderPipeline>(
+        env, device_.CreateRenderPipeline(&desc));
+}
 
-    interop::Interface<interop::GPUBuffer> GPUDevice::createBuffer(
-        Napi::Env env,
-        interop::GPUBufferDescriptor descriptor) {
-        Converter conv(env);
+interop::Promise<interop::Interface<interop::GPUComputePipeline>>
+GPUDevice::createComputePipelineAsync(Napi::Env env,
+                                      interop::GPUComputePipelineDescriptor descriptor) {
+    using Promise = interop::Promise<interop::Interface<interop::GPUComputePipeline>>;
 
-        wgpu::BufferDescriptor desc{};
-        if (!conv(desc.label, descriptor.label) ||
-            !conv(desc.mappedAtCreation, descriptor.mappedAtCreation) ||
-            !conv(desc.size, descriptor.size) || !conv(desc.usage, descriptor.usage)) {
-            return {};
-        }
-        return interop::GPUBuffer::Create<GPUBuffer>(env, device_.CreateBuffer(&desc), desc,
-                                                     device_, async_);
-    }
+    Converter conv(env);
 
-    interop::Interface<interop::GPUTexture> GPUDevice::createTexture(
-        Napi::Env env,
-        interop::GPUTextureDescriptor descriptor) {
-        Converter conv(env);
-
-        wgpu::TextureDescriptor desc{};
-        if (!conv(desc.label, descriptor.label) || !conv(desc.usage, descriptor.usage) ||  //
-            !conv(desc.size, descriptor.size) ||                                           //
-            !conv(desc.dimension, descriptor.dimension) ||                                 //
-            !conv(desc.mipLevelCount, descriptor.mipLevelCount) ||                         //
-            !conv(desc.sampleCount, descriptor.sampleCount) ||                             //
-            !conv(desc.format, descriptor.format)) {
-            return {};
-        }
-        return interop::GPUTexture::Create<GPUTexture>(env, device_.CreateTexture(&desc));
-    }
-
-    interop::Interface<interop::GPUSampler> GPUDevice::createSampler(
-        Napi::Env env,
-        interop::GPUSamplerDescriptor descriptor) {
-        Converter conv(env);
-
-        wgpu::SamplerDescriptor desc{};
-        if (!conv(desc.label, descriptor.label) ||                //
-            !conv(desc.addressModeU, descriptor.addressModeU) ||  //
-            !conv(desc.addressModeV, descriptor.addressModeV) ||  //
-            !conv(desc.addressModeW, descriptor.addressModeW) ||  //
-            !conv(desc.magFilter, descriptor.magFilter) ||        //
-            !conv(desc.minFilter, descriptor.minFilter) ||        //
-            !conv(desc.mipmapFilter, descriptor.mipmapFilter) ||  //
-            !conv(desc.lodMinClamp, descriptor.lodMinClamp) ||    //
-            !conv(desc.lodMaxClamp, descriptor.lodMaxClamp) ||    //
-            !conv(desc.compare, descriptor.compare) ||            //
-            !conv(desc.maxAnisotropy, descriptor.maxAnisotropy)) {
-            return {};
-        }
-        return interop::GPUSampler::Create<GPUSampler>(env, device_.CreateSampler(&desc));
-    }
-
-    interop::Interface<interop::GPUExternalTexture> GPUDevice::importExternalTexture(
-        Napi::Env,
-        interop::GPUExternalTextureDescriptor descriptor) {
-        UNIMPLEMENTED();
-    }
-
-    interop::Interface<interop::GPUBindGroupLayout> GPUDevice::createBindGroupLayout(
-        Napi::Env env,
-        interop::GPUBindGroupLayoutDescriptor descriptor) {
-        Converter conv(env);
-
-        wgpu::BindGroupLayoutDescriptor desc{};
-        if (!conv(desc.label, descriptor.label) ||
-            !conv(desc.entries, desc.entryCount, descriptor.entries)) {
-            return {};
-        }
-
-        return interop::GPUBindGroupLayout::Create<GPUBindGroupLayout>(
-            env, device_.CreateBindGroupLayout(&desc));
-    }
-
-    interop::Interface<interop::GPUPipelineLayout> GPUDevice::createPipelineLayout(
-        Napi::Env env,
-        interop::GPUPipelineLayoutDescriptor descriptor) {
-        Converter conv(env);
-
-        wgpu::PipelineLayoutDescriptor desc{};
-        if (!conv(desc.label, descriptor.label) ||
-            !conv(desc.bindGroupLayouts, desc.bindGroupLayoutCount, descriptor.bindGroupLayouts)) {
-            return {};
-        }
-
-        return interop::GPUPipelineLayout::Create<GPUPipelineLayout>(
-            env, device_.CreatePipelineLayout(&desc));
-    }
-
-    interop::Interface<interop::GPUBindGroup> GPUDevice::createBindGroup(
-        Napi::Env env,
-        interop::GPUBindGroupDescriptor descriptor) {
-        Converter conv(env);
-
-        wgpu::BindGroupDescriptor desc{};
-        if (!conv(desc.label, descriptor.label) || !conv(desc.layout, descriptor.layout) ||
-            !conv(desc.entries, desc.entryCount, descriptor.entries)) {
-            return {};
-        }
-
-        return interop::GPUBindGroup::Create<GPUBindGroup>(env, device_.CreateBindGroup(&desc));
-    }
-
-    interop::Interface<interop::GPUShaderModule> GPUDevice::createShaderModule(
-        Napi::Env env,
-        interop::GPUShaderModuleDescriptor descriptor) {
-        Converter conv(env);
-
-        wgpu::ShaderModuleWGSLDescriptor wgsl_desc{};
-        wgpu::ShaderModuleDescriptor sm_desc{};
-        if (!conv(wgsl_desc.source, descriptor.code) || !conv(sm_desc.label, descriptor.label)) {
-            return {};
-        }
-        sm_desc.nextInChain = &wgsl_desc;
-
-        return interop::GPUShaderModule::Create<GPUShaderModule>(
-            env, device_.CreateShaderModule(&sm_desc), async_);
-    }
-
-    interop::Interface<interop::GPUComputePipeline> GPUDevice::createComputePipeline(
-        Napi::Env env,
-        interop::GPUComputePipelineDescriptor descriptor) {
-        Converter conv(env);
-
-        wgpu::ComputePipelineDescriptor desc{};
-        if (!conv(desc, descriptor)) {
-            return {};
-        }
-
-        return interop::GPUComputePipeline::Create<GPUComputePipeline>(
-            env, device_.CreateComputePipeline(&desc));
-    }
-
-    interop::Interface<interop::GPURenderPipeline> GPUDevice::createRenderPipeline(
-        Napi::Env env,
-        interop::GPURenderPipelineDescriptor descriptor) {
-        Converter conv(env);
-
-        wgpu::RenderPipelineDescriptor desc{};
-        if (!conv(desc, descriptor)) {
-            return {};
-        }
-
-        return interop::GPURenderPipeline::Create<GPURenderPipeline>(
-            env, device_.CreateRenderPipeline(&desc));
-    }
-
-    interop::Promise<interop::Interface<interop::GPUComputePipeline>>
-    GPUDevice::createComputePipelineAsync(Napi::Env env,
-                                          interop::GPUComputePipelineDescriptor descriptor) {
-        using Promise = interop::Promise<interop::Interface<interop::GPUComputePipeline>>;
-
-        Converter conv(env);
-
-        wgpu::ComputePipelineDescriptor desc{};
-        if (!conv(desc, descriptor)) {
-            Promise promise(env, PROMISE_INFO);
-            promise.Reject(Errors::OperationError(env));
-            return promise;
-        }
-
-        struct Context {
-            Napi::Env env;
-            Promise promise;
-            AsyncTask task;
-        };
-        auto ctx = new Context{env, Promise(env, PROMISE_INFO), AsyncTask(async_)};
-        auto promise = ctx->promise;
-
-        device_.CreateComputePipelineAsync(
-            &desc,
-            [](WGPUCreatePipelineAsyncStatus status, WGPUComputePipeline pipeline,
-               char const* message, void* userdata) {
-                auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
-
-                switch (status) {
-                    case WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_Success:
-                        c->promise.Resolve(interop::GPUComputePipeline::Create<GPUComputePipeline>(
-                            c->env, pipeline));
-                        break;
-                    default:
-                        c->promise.Reject(Errors::OperationError(c->env));
-                        break;
-                }
-            },
-            ctx);
-
+    wgpu::ComputePipelineDescriptor desc{};
+    if (!conv(desc, descriptor)) {
+        Promise promise(env, PROMISE_INFO);
+        promise.Reject(Errors::OperationError(env));
         return promise;
     }
 
-    interop::Promise<interop::Interface<interop::GPURenderPipeline>>
-    GPUDevice::createRenderPipelineAsync(Napi::Env env,
-                                         interop::GPURenderPipelineDescriptor descriptor) {
-        using Promise = interop::Promise<interop::Interface<interop::GPURenderPipeline>>;
+    struct Context {
+        Napi::Env env;
+        Promise promise;
+        AsyncTask task;
+    };
+    auto ctx = new Context{env, Promise(env, PROMISE_INFO), AsyncTask(async_)};
+    auto promise = ctx->promise;
 
-        Converter conv(env);
+    device_.CreateComputePipelineAsync(
+        &desc,
+        [](WGPUCreatePipelineAsyncStatus status, WGPUComputePipeline pipeline, char const* message,
+           void* userdata) {
+            auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
 
-        wgpu::RenderPipelineDescriptor desc{};
-        if (!conv(desc, descriptor)) {
-            Promise promise(env, PROMISE_INFO);
-            promise.Reject(Errors::OperationError(env));
-            return promise;
-        }
+            switch (status) {
+                case WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_Success:
+                    c->promise.Resolve(
+                        interop::GPUComputePipeline::Create<GPUComputePipeline>(c->env, pipeline));
+                    break;
+                default:
+                    c->promise.Reject(Errors::OperationError(c->env));
+                    break;
+            }
+        },
+        ctx);
 
-        struct Context {
-            Napi::Env env;
-            Promise promise;
-            AsyncTask task;
-        };
-        auto ctx = new Context{env, Promise(env, PROMISE_INFO), AsyncTask(async_)};
-        auto promise = ctx->promise;
+    return promise;
+}
 
-        device_.CreateRenderPipelineAsync(
-            &desc,
-            [](WGPUCreatePipelineAsyncStatus status, WGPURenderPipeline pipeline,
-               char const* message, void* userdata) {
-                auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+interop::Promise<interop::Interface<interop::GPURenderPipeline>>
+GPUDevice::createRenderPipelineAsync(Napi::Env env,
+                                     interop::GPURenderPipelineDescriptor descriptor) {
+    using Promise = interop::Promise<interop::Interface<interop::GPURenderPipeline>>;
 
-                switch (status) {
-                    case WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_Success:
-                        c->promise.Resolve(interop::GPURenderPipeline::Create<GPURenderPipeline>(
-                            c->env, pipeline));
-                        break;
-                    default:
-                        c->promise.Reject(Errors::OperationError(c->env));
-                        break;
-                }
-            },
-            ctx);
+    Converter conv(env);
 
+    wgpu::RenderPipelineDescriptor desc{};
+    if (!conv(desc, descriptor)) {
+        Promise promise(env, PROMISE_INFO);
+        promise.Reject(Errors::OperationError(env));
         return promise;
     }
 
-    interop::Interface<interop::GPUCommandEncoder> GPUDevice::createCommandEncoder(
-        Napi::Env env,
-        interop::GPUCommandEncoderDescriptor descriptor) {
-        wgpu::CommandEncoderDescriptor desc{};
-        return interop::GPUCommandEncoder::Create<GPUCommandEncoder>(
-            env, device_.CreateCommandEncoder(&desc));
+    struct Context {
+        Napi::Env env;
+        Promise promise;
+        AsyncTask task;
+    };
+    auto ctx = new Context{env, Promise(env, PROMISE_INFO), AsyncTask(async_)};
+    auto promise = ctx->promise;
+
+    device_.CreateRenderPipelineAsync(
+        &desc,
+        [](WGPUCreatePipelineAsyncStatus status, WGPURenderPipeline pipeline, char const* message,
+           void* userdata) {
+            auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+
+            switch (status) {
+                case WGPUCreatePipelineAsyncStatus::WGPUCreatePipelineAsyncStatus_Success:
+                    c->promise.Resolve(
+                        interop::GPURenderPipeline::Create<GPURenderPipeline>(c->env, pipeline));
+                    break;
+                default:
+                    c->promise.Reject(Errors::OperationError(c->env));
+                    break;
+            }
+        },
+        ctx);
+
+    return promise;
+}
+
+interop::Interface<interop::GPUCommandEncoder> GPUDevice::createCommandEncoder(
+    Napi::Env env,
+    interop::GPUCommandEncoderDescriptor descriptor) {
+    wgpu::CommandEncoderDescriptor desc{};
+    return interop::GPUCommandEncoder::Create<GPUCommandEncoder>(
+        env, device_.CreateCommandEncoder(&desc));
+}
+
+interop::Interface<interop::GPURenderBundleEncoder> GPUDevice::createRenderBundleEncoder(
+    Napi::Env env,
+    interop::GPURenderBundleEncoderDescriptor descriptor) {
+    Converter conv(env);
+
+    wgpu::RenderBundleEncoderDescriptor desc{};
+    if (!conv(desc.label, descriptor.label) ||
+        !conv(desc.colorFormats, desc.colorFormatsCount, descriptor.colorFormats) ||
+        !conv(desc.depthStencilFormat, descriptor.depthStencilFormat) ||
+        !conv(desc.sampleCount, descriptor.sampleCount) ||
+        !conv(desc.depthReadOnly, descriptor.depthReadOnly) ||
+        !conv(desc.stencilReadOnly, descriptor.stencilReadOnly)) {
+        return {};
     }
 
-    interop::Interface<interop::GPURenderBundleEncoder> GPUDevice::createRenderBundleEncoder(
-        Napi::Env env,
-        interop::GPURenderBundleEncoderDescriptor descriptor) {
-        Converter conv(env);
+    return interop::GPURenderBundleEncoder::Create<GPURenderBundleEncoder>(
+        env, device_.CreateRenderBundleEncoder(&desc));
+}
 
-        wgpu::RenderBundleEncoderDescriptor desc{};
-        if (!conv(desc.label, descriptor.label) ||
-            !conv(desc.colorFormats, desc.colorFormatsCount, descriptor.colorFormats) ||
-            !conv(desc.depthStencilFormat, descriptor.depthStencilFormat) ||
-            !conv(desc.sampleCount, descriptor.sampleCount) ||
-            !conv(desc.depthReadOnly, descriptor.depthReadOnly) ||
-            !conv(desc.stencilReadOnly, descriptor.stencilReadOnly)) {
-            return {};
-        }
+interop::Interface<interop::GPUQuerySet> GPUDevice::createQuerySet(
+    Napi::Env env,
+    interop::GPUQuerySetDescriptor descriptor) {
+    Converter conv(env);
 
-        return interop::GPURenderBundleEncoder::Create<GPURenderBundleEncoder>(
-            env, device_.CreateRenderBundleEncoder(&desc));
+    wgpu::QuerySetDescriptor desc{};
+    if (!conv(desc.label, descriptor.label) || !conv(desc.type, descriptor.type) ||
+        !conv(desc.count, descriptor.count)) {
+        return {};
     }
 
-    interop::Interface<interop::GPUQuerySet> GPUDevice::createQuerySet(
-        Napi::Env env,
-        interop::GPUQuerySetDescriptor descriptor) {
-        Converter conv(env);
+    return interop::GPUQuerySet::Create<GPUQuerySet>(env, device_.CreateQuerySet(&desc));
+}
 
-        wgpu::QuerySetDescriptor desc{};
-        if (!conv(desc.label, descriptor.label) || !conv(desc.type, descriptor.type) ||
-            !conv(desc.count, descriptor.count)) {
-            return {};
-        }
+interop::Promise<interop::Interface<interop::GPUDeviceLostInfo>> GPUDevice::getLost(Napi::Env env) {
+    return lost_promise_;
+}
 
-        return interop::GPUQuerySet::Create<GPUQuerySet>(env, device_.CreateQuerySet(&desc));
+void GPUDevice::pushErrorScope(Napi::Env env, interop::GPUErrorFilter filter) {
+    wgpu::ErrorFilter f;
+    switch (filter) {
+        case interop::GPUErrorFilter::kOutOfMemory:
+            f = wgpu::ErrorFilter::OutOfMemory;
+            break;
+        case interop::GPUErrorFilter::kValidation:
+            f = wgpu::ErrorFilter::Validation;
+            break;
+        default:
+            Napi::Error::New(env, "unhandled GPUErrorFilter value").ThrowAsJavaScriptException();
+            return;
     }
+    device_.PushErrorScope(f);
+}
 
-    interop::Promise<interop::Interface<interop::GPUDeviceLostInfo>> GPUDevice::getLost(
-        Napi::Env env) {
-        return lost_promise_;
-    }
+interop::Promise<std::optional<interop::GPUError>> GPUDevice::popErrorScope(Napi::Env env) {
+    using Promise = interop::Promise<std::optional<interop::GPUError>>;
+    struct Context {
+        Napi::Env env;
+        Promise promise;
+        AsyncTask task;
+    };
+    auto* ctx = new Context{env, Promise(env, PROMISE_INFO), AsyncTask(async_)};
+    auto promise = ctx->promise;
 
-    void GPUDevice::pushErrorScope(Napi::Env env, interop::GPUErrorFilter filter) {
-        wgpu::ErrorFilter f;
-        switch (filter) {
-            case interop::GPUErrorFilter::kOutOfMemory:
-                f = wgpu::ErrorFilter::OutOfMemory;
-                break;
-            case interop::GPUErrorFilter::kValidation:
-                f = wgpu::ErrorFilter::Validation;
-                break;
-            default:
-                Napi::Error::New(env, "unhandled GPUErrorFilter value")
-                    .ThrowAsJavaScriptException();
-                return;
-        }
-        device_.PushErrorScope(f);
-    }
+    device_.PopErrorScope(
+        [](WGPUErrorType type, char const* message, void* userdata) {
+            auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+            auto env = c->env;
+            switch (type) {
+                case WGPUErrorType::WGPUErrorType_NoError:
+                    c->promise.Resolve({});
+                    break;
+                case WGPUErrorType::WGPUErrorType_OutOfMemory:
+                    c->promise.Resolve(interop::GPUOutOfMemoryError::Create<OOMError>(env));
+                    break;
+                case WGPUErrorType::WGPUErrorType_Validation:
+                    c->promise.Resolve(
+                        interop::GPUValidationError::Create<ValidationError>(env, message));
+                    break;
+                case WGPUErrorType::WGPUErrorType_Unknown:
+                case WGPUErrorType::WGPUErrorType_DeviceLost:
+                    c->promise.Reject(Errors::OperationError(env, message));
+                    break;
+                default:
+                    c->promise.Reject("unhandled error type");
+                    break;
+            }
+        },
+        ctx);
 
-    interop::Promise<std::optional<interop::GPUError>> GPUDevice::popErrorScope(Napi::Env env) {
-        using Promise = interop::Promise<std::optional<interop::GPUError>>;
-        struct Context {
-            Napi::Env env;
-            Promise promise;
-            AsyncTask task;
-        };
-        auto* ctx = new Context{env, Promise(env, PROMISE_INFO), AsyncTask(async_)};
-        auto promise = ctx->promise;
+    return promise;
+}
 
-        device_.PopErrorScope(
-            [](WGPUErrorType type, char const* message, void* userdata) {
-                auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
-                auto env = c->env;
-                switch (type) {
-                    case WGPUErrorType::WGPUErrorType_NoError:
-                        c->promise.Resolve({});
-                        break;
-                    case WGPUErrorType::WGPUErrorType_OutOfMemory:
-                        c->promise.Resolve(interop::GPUOutOfMemoryError::Create<OOMError>(env));
-                        break;
-                    case WGPUErrorType::WGPUErrorType_Validation:
-                        c->promise.Resolve(
-                            interop::GPUValidationError::Create<ValidationError>(env, message));
-                        break;
-                    case WGPUErrorType::WGPUErrorType_Unknown:
-                    case WGPUErrorType::WGPUErrorType_DeviceLost:
-                        c->promise.Reject(Errors::OperationError(env, message));
-                        break;
-                    default:
-                        c->promise.Reject("unhandled error type");
-                        break;
-                }
-            },
-            ctx);
+std::variant<std::string, interop::UndefinedType> GPUDevice::getLabel(Napi::Env) {
+    UNIMPLEMENTED();
+}
 
-        return promise;
-    }
+void GPUDevice::setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) {
+    UNIMPLEMENTED();
+}
 
-    std::variant<std::string, interop::UndefinedType> GPUDevice::getLabel(Napi::Env) {
-        UNIMPLEMENTED();
-    }
+interop::Interface<interop::EventHandler> GPUDevice::getOnuncapturederror(Napi::Env) {
+    // TODO(dawn:1348): Implement support for the "unhandlederror" event.
+    UNIMPLEMENTED();
+}
 
-    void GPUDevice::setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) {
-        UNIMPLEMENTED();
-    }
+void GPUDevice::setOnuncapturederror(Napi::Env, interop::Interface<interop::EventHandler> value) {
+    // TODO(dawn:1348): Implement support for the "unhandlederror" event.
+    UNIMPLEMENTED();
+}
 
-    interop::Interface<interop::EventHandler> GPUDevice::getOnuncapturederror(Napi::Env) {
-        // TODO(dawn:1348): Implement support for the "unhandlederror" event.
-        UNIMPLEMENTED();
-    }
+void GPUDevice::addEventListener(
+    Napi::Env,
+    std::string type,
+    std::optional<interop::Interface<interop::EventListener>> callback,
+    std::optional<std::variant<interop::AddEventListenerOptions, bool>> options) {
+    // TODO(dawn:1348): Implement support for the "unhandlederror" event.
+    UNIMPLEMENTED();
+}
 
-    void GPUDevice::setOnuncapturederror(Napi::Env,
-                                         interop::Interface<interop::EventHandler> value) {
-        // TODO(dawn:1348): Implement support for the "unhandlederror" event.
-        UNIMPLEMENTED();
-    }
+void GPUDevice::removeEventListener(
+    Napi::Env,
+    std::string type,
+    std::optional<interop::Interface<interop::EventListener>> callback,
+    std::optional<std::variant<interop::EventListenerOptions, bool>> options) {
+    // TODO(dawn:1348): Implement support for the "unhandlederror" event.
+    UNIMPLEMENTED();
+}
 
-    void GPUDevice::addEventListener(
-        Napi::Env,
-        std::string type,
-        std::optional<interop::Interface<interop::EventListener>> callback,
-        std::optional<std::variant<interop::AddEventListenerOptions, bool>> options) {
-        // TODO(dawn:1348): Implement support for the "unhandlederror" event.
-        UNIMPLEMENTED();
-    }
-
-    void GPUDevice::removeEventListener(
-        Napi::Env,
-        std::string type,
-        std::optional<interop::Interface<interop::EventListener>> callback,
-        std::optional<std::variant<interop::EventListenerOptions, bool>> options) {
-        // TODO(dawn:1348): Implement support for the "unhandlederror" event.
-        UNIMPLEMENTED();
-    }
-
-    bool GPUDevice::dispatchEvent(Napi::Env, interop::Interface<interop::Event> event) {
-        // TODO(dawn:1348): Implement support for the "unhandlederror" event.
-        UNIMPLEMENTED();
-    }
+bool GPUDevice::dispatchEvent(Napi::Env, interop::Interface<interop::Event> event) {
+    // TODO(dawn:1348): Implement support for the "unhandlederror" event.
+    UNIMPLEMENTED();
+}
 
 }  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUDevice.h b/src/dawn/node/binding/GPUDevice.h
index eb456d7..3e04855 100644
--- a/src/dawn/node/binding/GPUDevice.h
+++ b/src/dawn/node/binding/GPUDevice.h
@@ -24,94 +24,93 @@
 #include "src/dawn/node/interop/WebGPU.h"
 
 namespace wgpu::binding {
-    // GPUDevice is an implementation of interop::GPUDevice that wraps a wgpu::Device.
-    class GPUDevice final : public interop::GPUDevice {
-      public:
-        GPUDevice(Napi::Env env, wgpu::Device device);
-        ~GPUDevice();
+// GPUDevice is an implementation of interop::GPUDevice that wraps a wgpu::Device.
+class GPUDevice final : public interop::GPUDevice {
+  public:
+    GPUDevice(Napi::Env env, wgpu::Device device);
+    ~GPUDevice();
 
-        // interop::GPUDevice interface compliance
-        interop::Interface<interop::GPUSupportedFeatures> getFeatures(Napi::Env) override;
-        interop::Interface<interop::GPUSupportedLimits> getLimits(Napi::Env) override;
-        interop::Interface<interop::GPUQueue> getQueue(Napi::Env env) override;
-        void destroy(Napi::Env) override;
-        interop::Interface<interop::GPUBuffer> createBuffer(
-            Napi::Env env,
-            interop::GPUBufferDescriptor descriptor) override;
-        interop::Interface<interop::GPUTexture> createTexture(
-            Napi::Env,
-            interop::GPUTextureDescriptor descriptor) override;
-        interop::Interface<interop::GPUSampler> createSampler(
-            Napi::Env,
-            interop::GPUSamplerDescriptor descriptor) override;
-        interop::Interface<interop::GPUExternalTexture> importExternalTexture(
-            Napi::Env,
-            interop::GPUExternalTextureDescriptor descriptor) override;
-        interop::Interface<interop::GPUBindGroupLayout> createBindGroupLayout(
-            Napi::Env,
-            interop::GPUBindGroupLayoutDescriptor descriptor) override;
-        interop::Interface<interop::GPUPipelineLayout> createPipelineLayout(
-            Napi::Env,
-            interop::GPUPipelineLayoutDescriptor descriptor) override;
-        interop::Interface<interop::GPUBindGroup> createBindGroup(
-            Napi::Env,
-            interop::GPUBindGroupDescriptor descriptor) override;
-        interop::Interface<interop::GPUShaderModule> createShaderModule(
-            Napi::Env,
-            interop::GPUShaderModuleDescriptor descriptor) override;
-        interop::Interface<interop::GPUComputePipeline> createComputePipeline(
-            Napi::Env,
-            interop::GPUComputePipelineDescriptor descriptor) override;
-        interop::Interface<interop::GPURenderPipeline> createRenderPipeline(
-            Napi::Env,
-            interop::GPURenderPipelineDescriptor descriptor) override;
-        interop::Promise<interop::Interface<interop::GPUComputePipeline>>
-        createComputePipelineAsync(Napi::Env env,
-                                   interop::GPUComputePipelineDescriptor descriptor) override;
-        interop::Promise<interop::Interface<interop::GPURenderPipeline>> createRenderPipelineAsync(
-            Napi::Env env,
-            interop::GPURenderPipelineDescriptor descriptor) override;
-        interop::Interface<interop::GPUCommandEncoder> createCommandEncoder(
-            Napi::Env env,
-            interop::GPUCommandEncoderDescriptor descriptor) override;
-        interop::Interface<interop::GPURenderBundleEncoder> createRenderBundleEncoder(
-            Napi::Env,
-            interop::GPURenderBundleEncoderDescriptor descriptor) override;
-        interop::Interface<interop::GPUQuerySet> createQuerySet(
-            Napi::Env,
-            interop::GPUQuerySetDescriptor descriptor) override;
-        interop::Promise<interop::Interface<interop::GPUDeviceLostInfo>> getLost(
-            Napi::Env env) override;
-        void pushErrorScope(Napi::Env, interop::GPUErrorFilter filter) override;
-        interop::Promise<std::optional<interop::GPUError>> popErrorScope(Napi::Env env) override;
-        std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
-        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
-        interop::Interface<interop::EventHandler> getOnuncapturederror(Napi::Env) override;
-        void setOnuncapturederror(Napi::Env,
-                                  interop::Interface<interop::EventHandler> value) override;
-        void addEventListener(
-            Napi::Env,
-            std::string type,
-            std::optional<interop::Interface<interop::EventListener>> callback,
-            std::optional<std::variant<interop::AddEventListenerOptions, bool>> options) override;
-        void removeEventListener(
-            Napi::Env,
-            std::string type,
-            std::optional<interop::Interface<interop::EventListener>> callback,
-            std::optional<std::variant<interop::EventListenerOptions, bool>> options) override;
-        bool dispatchEvent(Napi::Env, interop::Interface<interop::Event> event) override;
+    // interop::GPUDevice interface compliance
+    interop::Interface<interop::GPUSupportedFeatures> getFeatures(Napi::Env) override;
+    interop::Interface<interop::GPUSupportedLimits> getLimits(Napi::Env) override;
+    interop::Interface<interop::GPUQueue> getQueue(Napi::Env env) override;
+    void destroy(Napi::Env) override;
+    interop::Interface<interop::GPUBuffer> createBuffer(
+        Napi::Env env,
+        interop::GPUBufferDescriptor descriptor) override;
+    interop::Interface<interop::GPUTexture> createTexture(
+        Napi::Env,
+        interop::GPUTextureDescriptor descriptor) override;
+    interop::Interface<interop::GPUSampler> createSampler(
+        Napi::Env,
+        interop::GPUSamplerDescriptor descriptor) override;
+    interop::Interface<interop::GPUExternalTexture> importExternalTexture(
+        Napi::Env,
+        interop::GPUExternalTextureDescriptor descriptor) override;
+    interop::Interface<interop::GPUBindGroupLayout> createBindGroupLayout(
+        Napi::Env,
+        interop::GPUBindGroupLayoutDescriptor descriptor) override;
+    interop::Interface<interop::GPUPipelineLayout> createPipelineLayout(
+        Napi::Env,
+        interop::GPUPipelineLayoutDescriptor descriptor) override;
+    interop::Interface<interop::GPUBindGroup> createBindGroup(
+        Napi::Env,
+        interop::GPUBindGroupDescriptor descriptor) override;
+    interop::Interface<interop::GPUShaderModule> createShaderModule(
+        Napi::Env,
+        interop::GPUShaderModuleDescriptor descriptor) override;
+    interop::Interface<interop::GPUComputePipeline> createComputePipeline(
+        Napi::Env,
+        interop::GPUComputePipelineDescriptor descriptor) override;
+    interop::Interface<interop::GPURenderPipeline> createRenderPipeline(
+        Napi::Env,
+        interop::GPURenderPipelineDescriptor descriptor) override;
+    interop::Promise<interop::Interface<interop::GPUComputePipeline>> createComputePipelineAsync(
+        Napi::Env env,
+        interop::GPUComputePipelineDescriptor descriptor) override;
+    interop::Promise<interop::Interface<interop::GPURenderPipeline>> createRenderPipelineAsync(
+        Napi::Env env,
+        interop::GPURenderPipelineDescriptor descriptor) override;
+    interop::Interface<interop::GPUCommandEncoder> createCommandEncoder(
+        Napi::Env env,
+        interop::GPUCommandEncoderDescriptor descriptor) override;
+    interop::Interface<interop::GPURenderBundleEncoder> createRenderBundleEncoder(
+        Napi::Env,
+        interop::GPURenderBundleEncoderDescriptor descriptor) override;
+    interop::Interface<interop::GPUQuerySet> createQuerySet(
+        Napi::Env,
+        interop::GPUQuerySetDescriptor descriptor) override;
+    interop::Promise<interop::Interface<interop::GPUDeviceLostInfo>> getLost(
+        Napi::Env env) override;
+    void pushErrorScope(Napi::Env, interop::GPUErrorFilter filter) override;
+    interop::Promise<std::optional<interop::GPUError>> popErrorScope(Napi::Env env) override;
+    std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
+    void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+    interop::Interface<interop::EventHandler> getOnuncapturederror(Napi::Env) override;
+    void setOnuncapturederror(Napi::Env, interop::Interface<interop::EventHandler> value) override;
+    void addEventListener(
+        Napi::Env,
+        std::string type,
+        std::optional<interop::Interface<interop::EventListener>> callback,
+        std::optional<std::variant<interop::AddEventListenerOptions, bool>> options) override;
+    void removeEventListener(
+        Napi::Env,
+        std::string type,
+        std::optional<interop::Interface<interop::EventListener>> callback,
+        std::optional<std::variant<interop::EventListenerOptions, bool>> options) override;
+    bool dispatchEvent(Napi::Env, interop::Interface<interop::Event> event) override;
 
-      private:
-        void QueueTick();
+  private:
+    void QueueTick();
 
-        Napi::Env env_;
-        wgpu::Device device_;
-        std::shared_ptr<AsyncRunner> async_;
+    Napi::Env env_;
+    wgpu::Device device_;
+    std::shared_ptr<AsyncRunner> async_;
 
-        // This promise's JS object lives as long as the device because it is stored in .lost
-        // of the wrapper JS object.
-        interop::Promise<interop::Interface<interop::GPUDeviceLostInfo>> lost_promise_;
-    };
+    // This promise's JS object lives as long as the device because it is stored in .lost
+    // of the wrapper JS object.
+    interop::Promise<interop::Interface<interop::GPUDeviceLostInfo>> lost_promise_;
+};
 
 }  // namespace wgpu::binding
 
diff --git a/src/dawn/node/binding/GPUPipelineLayout.cpp b/src/dawn/node/binding/GPUPipelineLayout.cpp
index 91e75f6..4d6027c 100644
--- a/src/dawn/node/binding/GPUPipelineLayout.cpp
+++ b/src/dawn/node/binding/GPUPipelineLayout.cpp
@@ -20,19 +20,18 @@
 
 namespace wgpu::binding {
 
-    ////////////////////////////////////////////////////////////////////////////////
-    // wgpu::bindings::GPUPipelineLayout
-    ////////////////////////////////////////////////////////////////////////////////
-    GPUPipelineLayout::GPUPipelineLayout(wgpu::PipelineLayout layout) : layout_(std::move(layout)) {
-    }
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUPipelineLayout
+////////////////////////////////////////////////////////////////////////////////
+GPUPipelineLayout::GPUPipelineLayout(wgpu::PipelineLayout layout) : layout_(std::move(layout)) {}
 
-    std::variant<std::string, interop::UndefinedType> GPUPipelineLayout::getLabel(Napi::Env) {
-        UNIMPLEMENTED();
-    }
+std::variant<std::string, interop::UndefinedType> GPUPipelineLayout::getLabel(Napi::Env) {
+    UNIMPLEMENTED();
+}
 
-    void GPUPipelineLayout::setLabel(Napi::Env,
-                                     std::variant<std::string, interop::UndefinedType> value) {
-        UNIMPLEMENTED();
-    }
+void GPUPipelineLayout::setLabel(Napi::Env,
+                                 std::variant<std::string, interop::UndefinedType> value) {
+    UNIMPLEMENTED();
+}
 
 }  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUPipelineLayout.h b/src/dawn/node/binding/GPUPipelineLayout.h
index 676ce27..acdd404 100644
--- a/src/dawn/node/binding/GPUPipelineLayout.h
+++ b/src/dawn/node/binding/GPUPipelineLayout.h
@@ -24,24 +24,22 @@
 
 namespace wgpu::binding {
 
-    // GPUPipelineLayout is an implementation of interop::GPUPipelineLayout that wraps a
-    // wgpu::PipelineLayout.
-    class GPUPipelineLayout final : public interop::GPUPipelineLayout {
-      public:
-        explicit GPUPipelineLayout(wgpu::PipelineLayout layout);
+// GPUPipelineLayout is an implementation of interop::GPUPipelineLayout that wraps a
+// wgpu::PipelineLayout.
+class GPUPipelineLayout final : public interop::GPUPipelineLayout {
+  public:
+    explicit GPUPipelineLayout(wgpu::PipelineLayout layout);
 
-        // Implicit cast operator to Dawn GPU object
-        inline operator const wgpu::PipelineLayout&() const {
-            return layout_;
-        }
+    // Implicit cast operator to Dawn GPU object
+    inline operator const wgpu::PipelineLayout&() const { return layout_; }
 
-        // interop::GPUPipelineLayout interface compliance
-        std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
-        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+    // interop::GPUPipelineLayout interface compliance
+    std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
+    void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
 
-      private:
-        wgpu::PipelineLayout layout_;
-    };
+  private:
+    wgpu::PipelineLayout layout_;
+};
 
 }  // namespace wgpu::binding
 
diff --git a/src/dawn/node/binding/GPUQuerySet.cpp b/src/dawn/node/binding/GPUQuerySet.cpp
index fbf6874..8f1e935 100644
--- a/src/dawn/node/binding/GPUQuerySet.cpp
+++ b/src/dawn/node/binding/GPUQuerySet.cpp
@@ -20,22 +20,21 @@
 
 namespace wgpu::binding {
 
-    ////////////////////////////////////////////////////////////////////////////////
-    // wgpu::bindings::GPUQuerySet
-    ////////////////////////////////////////////////////////////////////////////////
-    GPUQuerySet::GPUQuerySet(wgpu::QuerySet query_set) : query_set_(std::move(query_set)) {
-    }
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUQuerySet
+////////////////////////////////////////////////////////////////////////////////
+GPUQuerySet::GPUQuerySet(wgpu::QuerySet query_set) : query_set_(std::move(query_set)) {}
 
-    void GPUQuerySet::destroy(Napi::Env) {
-        query_set_.Destroy();
-    }
+void GPUQuerySet::destroy(Napi::Env) {
+    query_set_.Destroy();
+}
 
-    std::variant<std::string, interop::UndefinedType> GPUQuerySet::getLabel(Napi::Env) {
-        UNIMPLEMENTED();
-    }
+std::variant<std::string, interop::UndefinedType> GPUQuerySet::getLabel(Napi::Env) {
+    UNIMPLEMENTED();
+}
 
-    void GPUQuerySet::setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) {
-        UNIMPLEMENTED();
-    }
+void GPUQuerySet::setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) {
+    UNIMPLEMENTED();
+}
 
 }  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUQuerySet.h b/src/dawn/node/binding/GPUQuerySet.h
index 721fe3a..27944d1 100644
--- a/src/dawn/node/binding/GPUQuerySet.h
+++ b/src/dawn/node/binding/GPUQuerySet.h
@@ -24,24 +24,22 @@
 
 namespace wgpu::binding {
 
-    // GPUQuerySet is an implementation of interop::GPUQuerySet that wraps a wgpu::QuerySet.
-    class GPUQuerySet final : public interop::GPUQuerySet {
-      public:
-        explicit GPUQuerySet(wgpu::QuerySet query_set);
+// GPUQuerySet is an implementation of interop::GPUQuerySet that wraps a wgpu::QuerySet.
+class GPUQuerySet final : public interop::GPUQuerySet {
+  public:
+    explicit GPUQuerySet(wgpu::QuerySet query_set);
 
-        // Implicit cast operator to Dawn GPU object
-        inline operator const wgpu::QuerySet&() const {
-            return query_set_;
-        }
+    // Implicit cast operator to Dawn GPU object
+    inline operator const wgpu::QuerySet&() const { return query_set_; }
 
-        // interop::GPUQuerySet interface compliance
-        void destroy(Napi::Env) override;
-        std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
-        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+    // interop::GPUQuerySet interface compliance
+    void destroy(Napi::Env) override;
+    std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
+    void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
 
-      private:
-        wgpu::QuerySet query_set_;
-    };
+  private:
+    wgpu::QuerySet query_set_;
+};
 
 }  // namespace wgpu::binding
 
diff --git a/src/dawn/node/binding/GPUQueue.cpp b/src/dawn/node/binding/GPUQueue.cpp
index 5afffb3..7650f72 100644
--- a/src/dawn/node/binding/GPUQueue.cpp
+++ b/src/dawn/node/binding/GPUQueue.cpp
@@ -26,137 +26,134 @@
 
 namespace wgpu::binding {
 
-    ////////////////////////////////////////////////////////////////////////////////
-    // wgpu::bindings::GPUQueue
-    ////////////////////////////////////////////////////////////////////////////////
-    GPUQueue::GPUQueue(wgpu::Queue queue, std::shared_ptr<AsyncRunner> async)
-        : queue_(std::move(queue)), async_(std::move(async)) {
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUQueue
+////////////////////////////////////////////////////////////////////////////////
+GPUQueue::GPUQueue(wgpu::Queue queue, std::shared_ptr<AsyncRunner> async)
+    : queue_(std::move(queue)), async_(std::move(async)) {}
+
+void GPUQueue::submit(Napi::Env env,
+                      std::vector<interop::Interface<interop::GPUCommandBuffer>> commandBuffers) {
+    std::vector<wgpu::CommandBuffer> bufs(commandBuffers.size());
+    for (size_t i = 0; i < commandBuffers.size(); i++) {
+        bufs[i] = *commandBuffers[i].As<GPUCommandBuffer>();
     }
-
-    void GPUQueue::submit(
-        Napi::Env env,
-        std::vector<interop::Interface<interop::GPUCommandBuffer>> commandBuffers) {
-        std::vector<wgpu::CommandBuffer> bufs(commandBuffers.size());
-        for (size_t i = 0; i < commandBuffers.size(); i++) {
-            bufs[i] = *commandBuffers[i].As<GPUCommandBuffer>();
-        }
-        Converter conv(env);
-        uint32_t bufs_size;
-        if (!conv(bufs_size, bufs.size())) {
-            return;
-        }
-        queue_.Submit(bufs_size, bufs.data());
+    Converter conv(env);
+    uint32_t bufs_size;
+    if (!conv(bufs_size, bufs.size())) {
+        return;
     }
+    queue_.Submit(bufs_size, bufs.data());
+}
 
-    interop::Promise<void> GPUQueue::onSubmittedWorkDone(Napi::Env env) {
-        struct Context {
-            Napi::Env env;
-            interop::Promise<void> promise;
-            AsyncTask task;
-        };
-        auto ctx = new Context{env, interop::Promise<void>(env, PROMISE_INFO), AsyncTask(async_)};
-        auto promise = ctx->promise;
+interop::Promise<void> GPUQueue::onSubmittedWorkDone(Napi::Env env) {
+    struct Context {
+        Napi::Env env;
+        interop::Promise<void> promise;
+        AsyncTask task;
+    };
+    auto ctx = new Context{env, interop::Promise<void>(env, PROMISE_INFO), AsyncTask(async_)};
+    auto promise = ctx->promise;
 
-        queue_.OnSubmittedWorkDone(
-            0,
-            [](WGPUQueueWorkDoneStatus status, void* userdata) {
-                auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
-                if (status != WGPUQueueWorkDoneStatus::WGPUQueueWorkDoneStatus_Success) {
-                    Napi::Error::New(c->env, "onSubmittedWorkDone() failed")
-                        .ThrowAsJavaScriptException();
-                }
-                c->promise.Resolve();
-            },
-            ctx);
-
-        return promise;
-    }
-
-    void GPUQueue::writeBuffer(Napi::Env env,
-                               interop::Interface<interop::GPUBuffer> buffer,
-                               interop::GPUSize64 bufferOffset,
-                               interop::BufferSource data,
-                               interop::GPUSize64 dataOffsetElements,
-                               std::optional<interop::GPUSize64> sizeElements) {
-        wgpu::Buffer buf = *buffer.As<GPUBuffer>();
-        Converter::BufferSource src{};
-        Converter conv(env);
-        if (!conv(src, data)) {
-            return;
-        }
-
-        // Note that in the JS semantics of WebGPU, writeBuffer works in number of elements of the
-        // typed arrays.
-        if (dataOffsetElements > uint64_t(src.size / src.bytesPerElement)) {
-            binding::Errors::OperationError(env, "dataOffset is larger than data's size.")
-                .ThrowAsJavaScriptException();
-            return;
-        }
-        uint64_t dataOffset = dataOffsetElements * src.bytesPerElement;
-        src.data = reinterpret_cast<uint8_t*>(src.data) + dataOffset;
-        src.size -= dataOffset;
-
-        // Size defaults to dataSize - dataOffset. Instead of computing in elements, we directly
-        // use it in bytes, and convert the provided value, if any, in bytes.
-        uint64_t size64 = uint64_t(src.size);
-        if (sizeElements.has_value()) {
-            if (sizeElements.value() > std::numeric_limits<uint64_t>::max() / src.bytesPerElement) {
-                binding::Errors::OperationError(env, "size overflows.")
+    queue_.OnSubmittedWorkDone(
+        0,
+        [](WGPUQueueWorkDoneStatus status, void* userdata) {
+            auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+            if (status != WGPUQueueWorkDoneStatus::WGPUQueueWorkDoneStatus_Success) {
+                Napi::Error::New(c->env, "onSubmittedWorkDone() failed")
                     .ThrowAsJavaScriptException();
-                return;
             }
-            size64 = sizeElements.value() * src.bytesPerElement;
-        }
+            c->promise.Resolve();
+        },
+        ctx);
 
-        if (size64 > uint64_t(src.size)) {
-            binding::Errors::OperationError(env, "size + dataOffset is larger than data's size.")
-                .ThrowAsJavaScriptException();
+    return promise;
+}
+
+void GPUQueue::writeBuffer(Napi::Env env,
+                           interop::Interface<interop::GPUBuffer> buffer,
+                           interop::GPUSize64 bufferOffset,
+                           interop::BufferSource data,
+                           interop::GPUSize64 dataOffsetElements,
+                           std::optional<interop::GPUSize64> sizeElements) {
+    wgpu::Buffer buf = *buffer.As<GPUBuffer>();
+    Converter::BufferSource src{};
+    Converter conv(env);
+    if (!conv(src, data)) {
+        return;
+    }
+
+    // Note that in the JS semantics of WebGPU, writeBuffer works in number of elements of the
+    // typed arrays.
+    if (dataOffsetElements > uint64_t(src.size / src.bytesPerElement)) {
+        binding::Errors::OperationError(env, "dataOffset is larger than data's size.")
+            .ThrowAsJavaScriptException();
+        return;
+    }
+    uint64_t dataOffset = dataOffsetElements * src.bytesPerElement;
+    src.data = reinterpret_cast<uint8_t*>(src.data) + dataOffset;
+    src.size -= dataOffset;
+
+    // Size defaults to dataSize - dataOffset. Instead of computing in elements, we directly
+    // use it in bytes, and convert the provided value, if any, in bytes.
+    uint64_t size64 = uint64_t(src.size);
+    if (sizeElements.has_value()) {
+        if (sizeElements.value() > std::numeric_limits<uint64_t>::max() / src.bytesPerElement) {
+            binding::Errors::OperationError(env, "size overflows.").ThrowAsJavaScriptException();
             return;
         }
-
-        if (size64 % 4 != 0) {
-            binding::Errors::OperationError(env, "size is not a multiple of 4 bytes.")
-                .ThrowAsJavaScriptException();
-            return;
-        }
-
-        assert(size64 <= std::numeric_limits<size_t>::max());
-        queue_.WriteBuffer(buf, bufferOffset, src.data, static_cast<size_t>(size64));
+        size64 = sizeElements.value() * src.bytesPerElement;
     }
 
-    void GPUQueue::writeTexture(Napi::Env env,
-                                interop::GPUImageCopyTexture destination,
-                                interop::BufferSource data,
-                                interop::GPUImageDataLayout dataLayout,
-                                interop::GPUExtent3D size) {
-        wgpu::ImageCopyTexture dst{};
-        Converter::BufferSource src{};
-        wgpu::TextureDataLayout layout{};
-        wgpu::Extent3D sz{};
-        Converter conv(env);
-        if (!conv(dst, destination) ||    //
-            !conv(src, data) ||           //
-            !conv(layout, dataLayout) ||  //
-            !conv(sz, size)) {
-            return;
-        }
-
-        queue_.WriteTexture(&dst, src.data, src.size, &layout, &sz);
+    if (size64 > uint64_t(src.size)) {
+        binding::Errors::OperationError(env, "size + dataOffset is larger than data's size.")
+            .ThrowAsJavaScriptException();
+        return;
     }
 
-    void GPUQueue::copyExternalImageToTexture(Napi::Env,
-                                              interop::GPUImageCopyExternalImage source,
-                                              interop::GPUImageCopyTextureTagged destination,
-                                              interop::GPUExtent3D copySize) {
-        UNIMPLEMENTED();
+    if (size64 % 4 != 0) {
+        binding::Errors::OperationError(env, "size is not a multiple of 4 bytes.")
+            .ThrowAsJavaScriptException();
+        return;
     }
 
-    std::variant<std::string, interop::UndefinedType> GPUQueue::getLabel(Napi::Env) {
-        UNIMPLEMENTED();
+    assert(size64 <= std::numeric_limits<size_t>::max());
+    queue_.WriteBuffer(buf, bufferOffset, src.data, static_cast<size_t>(size64));
+}
+
+void GPUQueue::writeTexture(Napi::Env env,
+                            interop::GPUImageCopyTexture destination,
+                            interop::BufferSource data,
+                            interop::GPUImageDataLayout dataLayout,
+                            interop::GPUExtent3D size) {
+    wgpu::ImageCopyTexture dst{};
+    Converter::BufferSource src{};
+    wgpu::TextureDataLayout layout{};
+    wgpu::Extent3D sz{};
+    Converter conv(env);
+    if (!conv(dst, destination) ||    //
+        !conv(src, data) ||           //
+        !conv(layout, dataLayout) ||  //
+        !conv(sz, size)) {
+        return;
     }
 
-    void GPUQueue::setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) {
-        UNIMPLEMENTED();
-    }
+    queue_.WriteTexture(&dst, src.data, src.size, &layout, &sz);
+}
+
+void GPUQueue::copyExternalImageToTexture(Napi::Env,
+                                          interop::GPUImageCopyExternalImage source,
+                                          interop::GPUImageCopyTextureTagged destination,
+                                          interop::GPUExtent3D copySize) {
+    UNIMPLEMENTED();
+}
+
+std::variant<std::string, interop::UndefinedType> GPUQueue::getLabel(Napi::Env) {
+    UNIMPLEMENTED();
+}
+
+void GPUQueue::setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) {
+    UNIMPLEMENTED();
+}
 
 }  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUQueue.h b/src/dawn/node/binding/GPUQueue.h
index b579497..cffb491 100644
--- a/src/dawn/node/binding/GPUQueue.h
+++ b/src/dawn/node/binding/GPUQueue.h
@@ -27,38 +27,37 @@
 
 namespace wgpu::binding {
 
-    // GPUQueue is an implementation of interop::GPUQueue that wraps a wgpu::Queue.
-    class GPUQueue final : public interop::GPUQueue {
-      public:
-        GPUQueue(wgpu::Queue queue, std::shared_ptr<AsyncRunner> async);
+// GPUQueue is an implementation of interop::GPUQueue that wraps a wgpu::Queue.
+class GPUQueue final : public interop::GPUQueue {
+  public:
+    GPUQueue(wgpu::Queue queue, std::shared_ptr<AsyncRunner> async);
 
-        // interop::GPUQueue interface compliance
-        void submit(
-            Napi::Env,
-            std::vector<interop::Interface<interop::GPUCommandBuffer>> commandBuffers) override;
-        interop::Promise<void> onSubmittedWorkDone(Napi::Env) override;
-        void writeBuffer(Napi::Env,
-                         interop::Interface<interop::GPUBuffer> buffer,
-                         interop::GPUSize64 bufferOffset,
-                         interop::BufferSource data,
-                         interop::GPUSize64 dataOffset,
-                         std::optional<interop::GPUSize64> size) override;
-        void writeTexture(Napi::Env,
-                          interop::GPUImageCopyTexture destination,
-                          interop::BufferSource data,
-                          interop::GPUImageDataLayout dataLayout,
-                          interop::GPUExtent3D size) override;
-        void copyExternalImageToTexture(Napi::Env,
-                                        interop::GPUImageCopyExternalImage source,
-                                        interop::GPUImageCopyTextureTagged destination,
-                                        interop::GPUExtent3D copySize) override;
-        std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
-        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+    // interop::GPUQueue interface compliance
+    void submit(Napi::Env,
+                std::vector<interop::Interface<interop::GPUCommandBuffer>> commandBuffers) override;
+    interop::Promise<void> onSubmittedWorkDone(Napi::Env) override;
+    void writeBuffer(Napi::Env,
+                     interop::Interface<interop::GPUBuffer> buffer,
+                     interop::GPUSize64 bufferOffset,
+                     interop::BufferSource data,
+                     interop::GPUSize64 dataOffset,
+                     std::optional<interop::GPUSize64> size) override;
+    void writeTexture(Napi::Env,
+                      interop::GPUImageCopyTexture destination,
+                      interop::BufferSource data,
+                      interop::GPUImageDataLayout dataLayout,
+                      interop::GPUExtent3D size) override;
+    void copyExternalImageToTexture(Napi::Env,
+                                    interop::GPUImageCopyExternalImage source,
+                                    interop::GPUImageCopyTextureTagged destination,
+                                    interop::GPUExtent3D copySize) override;
+    std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
+    void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
 
-      private:
-        wgpu::Queue queue_;
-        std::shared_ptr<AsyncRunner> async_;
-    };
+  private:
+    wgpu::Queue queue_;
+    std::shared_ptr<AsyncRunner> async_;
+};
 
 }  // namespace wgpu::binding
 
diff --git a/src/dawn/node/binding/GPURenderBundle.cpp b/src/dawn/node/binding/GPURenderBundle.cpp
index 9ebc2c4..ef3a0da 100644
--- a/src/dawn/node/binding/GPURenderBundle.cpp
+++ b/src/dawn/node/binding/GPURenderBundle.cpp
@@ -23,19 +23,17 @@
 
 namespace wgpu::binding {
 
-    ////////////////////////////////////////////////////////////////////////////////
-    // wgpu::bindings::GPURenderBundle
-    ////////////////////////////////////////////////////////////////////////////////
-    GPURenderBundle::GPURenderBundle(wgpu::RenderBundle bundle) : bundle_(std::move(bundle)) {
-    }
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPURenderBundle
+////////////////////////////////////////////////////////////////////////////////
+GPURenderBundle::GPURenderBundle(wgpu::RenderBundle bundle) : bundle_(std::move(bundle)) {}
 
-    std::variant<std::string, interop::UndefinedType> GPURenderBundle::getLabel(Napi::Env) {
-        UNIMPLEMENTED();
-    }
+std::variant<std::string, interop::UndefinedType> GPURenderBundle::getLabel(Napi::Env) {
+    UNIMPLEMENTED();
+}
 
-    void GPURenderBundle::setLabel(Napi::Env,
-                                   std::variant<std::string, interop::UndefinedType> value) {
-        UNIMPLEMENTED();
-    }
+void GPURenderBundle::setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) {
+    UNIMPLEMENTED();
+}
 
 }  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPURenderBundle.h b/src/dawn/node/binding/GPURenderBundle.h
index dfdc127..bd31f6b 100644
--- a/src/dawn/node/binding/GPURenderBundle.h
+++ b/src/dawn/node/binding/GPURenderBundle.h
@@ -24,24 +24,22 @@
 
 namespace wgpu::binding {
 
-    // GPURenderBundle is an implementation of interop::GPURenderBundle that wraps a
-    // wgpu::RenderBundle.
-    class GPURenderBundle final : public interop::GPURenderBundle {
-      public:
-        explicit GPURenderBundle(wgpu::RenderBundle bundle);
+// GPURenderBundle is an implementation of interop::GPURenderBundle that wraps a
+// wgpu::RenderBundle.
+class GPURenderBundle final : public interop::GPURenderBundle {
+  public:
+    explicit GPURenderBundle(wgpu::RenderBundle bundle);
 
-        // Implicit cast operator to Dawn GPU object
-        inline operator const wgpu::RenderBundle&() const {
-            return bundle_;
-        }
+    // Implicit cast operator to Dawn GPU object
+    inline operator const wgpu::RenderBundle&() const { return bundle_; }
 
-        // interop::GPURenderBundle interface compliance
-        std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
-        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+    // interop::GPURenderBundle interface compliance
+    std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
+    void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
 
-      private:
-        wgpu::RenderBundle bundle_;
-    };
+  private:
+    wgpu::RenderBundle bundle_;
+};
 
 }  // namespace wgpu::binding
 
diff --git a/src/dawn/node/binding/GPURenderBundleEncoder.cpp b/src/dawn/node/binding/GPURenderBundleEncoder.cpp
index b48d649..f07db2f 100644
--- a/src/dawn/node/binding/GPURenderBundleEncoder.cpp
+++ b/src/dawn/node/binding/GPURenderBundleEncoder.cpp
@@ -25,171 +25,169 @@
 
 namespace wgpu::binding {
 
-    ////////////////////////////////////////////////////////////////////////////////
-    // wgpu::bindings::GPURenderBundleEncoder
-    ////////////////////////////////////////////////////////////////////////////////
-    GPURenderBundleEncoder::GPURenderBundleEncoder(wgpu::RenderBundleEncoder enc)
-        : enc_(std::move(enc)) {
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPURenderBundleEncoder
+////////////////////////////////////////////////////////////////////////////////
+GPURenderBundleEncoder::GPURenderBundleEncoder(wgpu::RenderBundleEncoder enc)
+    : enc_(std::move(enc)) {}
+
+interop::Interface<interop::GPURenderBundle> GPURenderBundleEncoder::finish(
+    Napi::Env env,
+    interop::GPURenderBundleDescriptor descriptor) {
+    wgpu::RenderBundleDescriptor desc{};
+
+    return interop::GPURenderBundle::Create<GPURenderBundle>(env, enc_.Finish(&desc));
+}
+
+void GPURenderBundleEncoder::setBindGroup(
+    Napi::Env env,
+    interop::GPUIndex32 index,
+    interop::Interface<interop::GPUBindGroup> bindGroup,
+    std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) {
+    Converter conv(env);
+
+    wgpu::BindGroup bg{};
+    uint32_t* offsets = nullptr;
+    uint32_t num_offsets = 0;
+    if (!conv(bg, bindGroup) || !conv(offsets, num_offsets, dynamicOffsets)) {
+        return;
     }
 
-    interop::Interface<interop::GPURenderBundle> GPURenderBundleEncoder::finish(
-        Napi::Env env,
-        interop::GPURenderBundleDescriptor descriptor) {
-        wgpu::RenderBundleDescriptor desc{};
+    enc_.SetBindGroup(index, bg, num_offsets, offsets);
+}
 
-        return interop::GPURenderBundle::Create<GPURenderBundle>(env, enc_.Finish(&desc));
+void GPURenderBundleEncoder::setBindGroup(Napi::Env env,
+                                          interop::GPUIndex32 index,
+                                          interop::Interface<interop::GPUBindGroup> bindGroup,
+                                          interop::Uint32Array dynamicOffsetsData,
+                                          interop::GPUSize64 dynamicOffsetsDataStart,
+                                          interop::GPUSize32 dynamicOffsetsDataLength) {
+    Converter conv(env);
+
+    wgpu::BindGroup bg{};
+    if (!conv(bg, bindGroup)) {
+        return;
     }
 
-    void GPURenderBundleEncoder::setBindGroup(
-        Napi::Env env,
-        interop::GPUIndex32 index,
-        interop::Interface<interop::GPUBindGroup> bindGroup,
-        std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) {
-        Converter conv(env);
+    enc_.SetBindGroup(index, bg, dynamicOffsetsDataLength,
+                      dynamicOffsetsData.Data() + dynamicOffsetsDataStart);
+}
 
-        wgpu::BindGroup bg{};
-        uint32_t* offsets = nullptr;
-        uint32_t num_offsets = 0;
-        if (!conv(bg, bindGroup) || !conv(offsets, num_offsets, dynamicOffsets)) {
-            return;
-        }
+void GPURenderBundleEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
+    enc_.PushDebugGroup(groupLabel.c_str());
+}
 
-        enc_.SetBindGroup(index, bg, num_offsets, offsets);
+void GPURenderBundleEncoder::popDebugGroup(Napi::Env) {
+    enc_.PopDebugGroup();
+}
+
+void GPURenderBundleEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
+    enc_.InsertDebugMarker(markerLabel.c_str());
+}
+
+void GPURenderBundleEncoder::setPipeline(Napi::Env env,
+                                         interop::Interface<interop::GPURenderPipeline> pipeline) {
+    Converter conv(env);
+
+    wgpu::RenderPipeline p{};
+    if (!conv(p, pipeline)) {
+        return;
     }
 
-    void GPURenderBundleEncoder::setBindGroup(Napi::Env env,
-                                              interop::GPUIndex32 index,
-                                              interop::Interface<interop::GPUBindGroup> bindGroup,
-                                              interop::Uint32Array dynamicOffsetsData,
-                                              interop::GPUSize64 dynamicOffsetsDataStart,
-                                              interop::GPUSize32 dynamicOffsetsDataLength) {
-        Converter conv(env);
+    enc_.SetPipeline(p);
+}
 
-        wgpu::BindGroup bg{};
-        if (!conv(bg, bindGroup)) {
-            return;
-        }
+void GPURenderBundleEncoder::setIndexBuffer(Napi::Env env,
+                                            interop::Interface<interop::GPUBuffer> buffer,
+                                            interop::GPUIndexFormat indexFormat,
+                                            interop::GPUSize64 offset,
+                                            std::optional<interop::GPUSize64> size) {
+    Converter conv(env);
 
-        enc_.SetBindGroup(index, bg, dynamicOffsetsDataLength,
-                          dynamicOffsetsData.Data() + dynamicOffsetsDataStart);
+    wgpu::Buffer b{};
+    wgpu::IndexFormat f{};
+    uint64_t o = 0;
+    uint64_t s = wgpu::kWholeSize;
+    if (!conv(b, buffer) ||       //
+        !conv(f, indexFormat) ||  //
+        !conv(o, offset) ||       //
+        !conv(s, size)) {
+        return;
     }
 
-    void GPURenderBundleEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
-        enc_.PushDebugGroup(groupLabel.c_str());
+    enc_.SetIndexBuffer(b, f, o, s);
+}
+
+void GPURenderBundleEncoder::setVertexBuffer(Napi::Env env,
+                                             interop::GPUIndex32 slot,
+                                             interop::Interface<interop::GPUBuffer> buffer,
+                                             interop::GPUSize64 offset,
+                                             std::optional<interop::GPUSize64> size) {
+    Converter conv(env);
+
+    wgpu::Buffer b{};
+    uint64_t s = wgpu::kWholeSize;
+    if (!conv(b, buffer) || !conv(s, size)) {
+        return;
     }
+    enc_.SetVertexBuffer(slot, b, offset, s);
+}
 
-    void GPURenderBundleEncoder::popDebugGroup(Napi::Env) {
-        enc_.PopDebugGroup();
+void GPURenderBundleEncoder::draw(Napi::Env env,
+                                  interop::GPUSize32 vertexCount,
+                                  interop::GPUSize32 instanceCount,
+                                  interop::GPUSize32 firstVertex,
+                                  interop::GPUSize32 firstInstance) {
+    enc_.Draw(vertexCount, instanceCount, firstVertex, firstInstance);
+}
+
+void GPURenderBundleEncoder::drawIndexed(Napi::Env env,
+                                         interop::GPUSize32 indexCount,
+                                         interop::GPUSize32 instanceCount,
+                                         interop::GPUSize32 firstIndex,
+                                         interop::GPUSignedOffset32 baseVertex,
+                                         interop::GPUSize32 firstInstance) {
+    enc_.DrawIndexed(indexCount, instanceCount, firstIndex, baseVertex, firstInstance);
+}
+
+void GPURenderBundleEncoder::drawIndirect(Napi::Env env,
+                                          interop::Interface<interop::GPUBuffer> indirectBuffer,
+                                          interop::GPUSize64 indirectOffset) {
+    Converter conv(env);
+
+    wgpu::Buffer b{};
+    uint64_t o = 0;
+
+    if (!conv(b, indirectBuffer) ||  //
+        !conv(o, indirectOffset)) {
+        return;
     }
+    enc_.DrawIndirect(b, o);
+}
 
-    void GPURenderBundleEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
-        enc_.InsertDebugMarker(markerLabel.c_str());
+void GPURenderBundleEncoder::drawIndexedIndirect(
+    Napi::Env env,
+    interop::Interface<interop::GPUBuffer> indirectBuffer,
+    interop::GPUSize64 indirectOffset) {
+    Converter conv(env);
+
+    wgpu::Buffer b{};
+    uint64_t o = 0;
+
+    if (!conv(b, indirectBuffer) ||  //
+        !conv(o, indirectOffset)) {
+        return;
     }
+    enc_.DrawIndexedIndirect(b, o);
+}
 
-    void GPURenderBundleEncoder::setPipeline(
-        Napi::Env env,
-        interop::Interface<interop::GPURenderPipeline> pipeline) {
-        Converter conv(env);
+std::variant<std::string, interop::UndefinedType> GPURenderBundleEncoder::getLabel(Napi::Env) {
+    UNIMPLEMENTED();
+}
 
-        wgpu::RenderPipeline p{};
-        if (!conv(p, pipeline)) {
-            return;
-        }
-
-        enc_.SetPipeline(p);
-    }
-
-    void GPURenderBundleEncoder::setIndexBuffer(Napi::Env env,
-                                                interop::Interface<interop::GPUBuffer> buffer,
-                                                interop::GPUIndexFormat indexFormat,
-                                                interop::GPUSize64 offset,
-                                                std::optional<interop::GPUSize64> size) {
-        Converter conv(env);
-
-        wgpu::Buffer b{};
-        wgpu::IndexFormat f{};
-        uint64_t o = 0;
-        uint64_t s = wgpu::kWholeSize;
-        if (!conv(b, buffer) ||       //
-            !conv(f, indexFormat) ||  //
-            !conv(o, offset) ||       //
-            !conv(s, size)) {
-            return;
-        }
-
-        enc_.SetIndexBuffer(b, f, o, s);
-    }
-
-    void GPURenderBundleEncoder::setVertexBuffer(Napi::Env env,
-                                                 interop::GPUIndex32 slot,
-                                                 interop::Interface<interop::GPUBuffer> buffer,
-                                                 interop::GPUSize64 offset,
-                                                 std::optional<interop::GPUSize64> size) {
-        Converter conv(env);
-
-        wgpu::Buffer b{};
-        uint64_t s = wgpu::kWholeSize;
-        if (!conv(b, buffer) || !conv(s, size)) {
-            return;
-        }
-        enc_.SetVertexBuffer(slot, b, offset, s);
-    }
-
-    void GPURenderBundleEncoder::draw(Napi::Env env,
-                                      interop::GPUSize32 vertexCount,
-                                      interop::GPUSize32 instanceCount,
-                                      interop::GPUSize32 firstVertex,
-                                      interop::GPUSize32 firstInstance) {
-        enc_.Draw(vertexCount, instanceCount, firstVertex, firstInstance);
-    }
-
-    void GPURenderBundleEncoder::drawIndexed(Napi::Env env,
-                                             interop::GPUSize32 indexCount,
-                                             interop::GPUSize32 instanceCount,
-                                             interop::GPUSize32 firstIndex,
-                                             interop::GPUSignedOffset32 baseVertex,
-                                             interop::GPUSize32 firstInstance) {
-        enc_.DrawIndexed(indexCount, instanceCount, firstIndex, baseVertex, firstInstance);
-    }
-
-    void GPURenderBundleEncoder::drawIndirect(Napi::Env env,
-                                              interop::Interface<interop::GPUBuffer> indirectBuffer,
-                                              interop::GPUSize64 indirectOffset) {
-        Converter conv(env);
-
-        wgpu::Buffer b{};
-        uint64_t o = 0;
-
-        if (!conv(b, indirectBuffer) ||  //
-            !conv(o, indirectOffset)) {
-            return;
-        }
-        enc_.DrawIndirect(b, o);
-    }
-
-    void GPURenderBundleEncoder::drawIndexedIndirect(
-        Napi::Env env,
-        interop::Interface<interop::GPUBuffer> indirectBuffer,
-        interop::GPUSize64 indirectOffset) {
-        Converter conv(env);
-
-        wgpu::Buffer b{};
-        uint64_t o = 0;
-
-        if (!conv(b, indirectBuffer) ||  //
-            !conv(o, indirectOffset)) {
-            return;
-        }
-        enc_.DrawIndexedIndirect(b, o);
-    }
-
-    std::variant<std::string, interop::UndefinedType> GPURenderBundleEncoder::getLabel(Napi::Env) {
-        UNIMPLEMENTED();
-    }
-
-    void GPURenderBundleEncoder::setLabel(Napi::Env,
-                                          std::variant<std::string, interop::UndefinedType> value) {
-        UNIMPLEMENTED();
-    }
+void GPURenderBundleEncoder::setLabel(Napi::Env,
+                                      std::variant<std::string, interop::UndefinedType> value) {
+    UNIMPLEMENTED();
+}
 
 }  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPURenderBundleEncoder.h b/src/dawn/node/binding/GPURenderBundleEncoder.h
index d90f081..c65e60a 100644
--- a/src/dawn/node/binding/GPURenderBundleEncoder.h
+++ b/src/dawn/node/binding/GPURenderBundleEncoder.h
@@ -25,64 +25,63 @@
 
 namespace wgpu::binding {
 
-    // GPURenderBundleEncoder is an implementation of interop::GPURenderBundleEncoder that wraps a
-    // wgpu::RenderBundleEncoder.
-    class GPURenderBundleEncoder final : public interop::GPURenderBundleEncoder {
-      public:
-        explicit GPURenderBundleEncoder(wgpu::RenderBundleEncoder enc);
+// GPURenderBundleEncoder is an implementation of interop::GPURenderBundleEncoder that wraps a
+// wgpu::RenderBundleEncoder.
+class GPURenderBundleEncoder final : public interop::GPURenderBundleEncoder {
+  public:
+    explicit GPURenderBundleEncoder(wgpu::RenderBundleEncoder enc);
 
-        // interop::GPURenderBundleEncoder interface compliance
-        interop::Interface<interop::GPURenderBundle> finish(
-            Napi::Env,
-            interop::GPURenderBundleDescriptor descriptor) override;
-        void setBindGroup(Napi::Env,
-                          interop::GPUIndex32 index,
-                          interop::Interface<interop::GPUBindGroup> bindGroup,
-                          std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) override;
-        void setBindGroup(Napi::Env,
-                          interop::GPUIndex32 index,
-                          interop::Interface<interop::GPUBindGroup> bindGroup,
-                          interop::Uint32Array dynamicOffsetsData,
-                          interop::GPUSize64 dynamicOffsetsDataStart,
-                          interop::GPUSize32 dynamicOffsetsDataLength) override;
-        void pushDebugGroup(Napi::Env, std::string groupLabel) override;
-        void popDebugGroup(Napi::Env) override;
-        void insertDebugMarker(Napi::Env, std::string markerLabel) override;
-        void setPipeline(Napi::Env,
-                         interop::Interface<interop::GPURenderPipeline> pipeline) override;
-        void setIndexBuffer(Napi::Env,
-                            interop::Interface<interop::GPUBuffer> buffer,
-                            interop::GPUIndexFormat indexFormat,
-                            interop::GPUSize64 offset,
-                            std::optional<interop::GPUSize64> size) override;
-        void setVertexBuffer(Napi::Env,
-                             interop::GPUIndex32 slot,
-                             interop::Interface<interop::GPUBuffer> buffer,
-                             interop::GPUSize64 offset,
-                             std::optional<interop::GPUSize64> size) override;
-        void draw(Napi::Env,
-                  interop::GPUSize32 vertexCount,
-                  interop::GPUSize32 instanceCount,
-                  interop::GPUSize32 firstVertex,
-                  interop::GPUSize32 firstInstance) override;
-        void drawIndexed(Napi::Env,
-                         interop::GPUSize32 indexCount,
-                         interop::GPUSize32 instanceCount,
-                         interop::GPUSize32 firstIndex,
-                         interop::GPUSignedOffset32 baseVertex,
-                         interop::GPUSize32 firstInstance) override;
-        void drawIndirect(Napi::Env,
-                          interop::Interface<interop::GPUBuffer> indirectBuffer,
-                          interop::GPUSize64 indirectOffset) override;
-        void drawIndexedIndirect(Napi::Env,
-                                 interop::Interface<interop::GPUBuffer> indirectBuffer,
-                                 interop::GPUSize64 indirectOffset) override;
-        std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
-        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+    // interop::GPURenderBundleEncoder interface compliance
+    interop::Interface<interop::GPURenderBundle> finish(
+        Napi::Env,
+        interop::GPURenderBundleDescriptor descriptor) override;
+    void setBindGroup(Napi::Env,
+                      interop::GPUIndex32 index,
+                      interop::Interface<interop::GPUBindGroup> bindGroup,
+                      std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) override;
+    void setBindGroup(Napi::Env,
+                      interop::GPUIndex32 index,
+                      interop::Interface<interop::GPUBindGroup> bindGroup,
+                      interop::Uint32Array dynamicOffsetsData,
+                      interop::GPUSize64 dynamicOffsetsDataStart,
+                      interop::GPUSize32 dynamicOffsetsDataLength) override;
+    void pushDebugGroup(Napi::Env, std::string groupLabel) override;
+    void popDebugGroup(Napi::Env) override;
+    void insertDebugMarker(Napi::Env, std::string markerLabel) override;
+    void setPipeline(Napi::Env, interop::Interface<interop::GPURenderPipeline> pipeline) override;
+    void setIndexBuffer(Napi::Env,
+                        interop::Interface<interop::GPUBuffer> buffer,
+                        interop::GPUIndexFormat indexFormat,
+                        interop::GPUSize64 offset,
+                        std::optional<interop::GPUSize64> size) override;
+    void setVertexBuffer(Napi::Env,
+                         interop::GPUIndex32 slot,
+                         interop::Interface<interop::GPUBuffer> buffer,
+                         interop::GPUSize64 offset,
+                         std::optional<interop::GPUSize64> size) override;
+    void draw(Napi::Env,
+              interop::GPUSize32 vertexCount,
+              interop::GPUSize32 instanceCount,
+              interop::GPUSize32 firstVertex,
+              interop::GPUSize32 firstInstance) override;
+    void drawIndexed(Napi::Env,
+                     interop::GPUSize32 indexCount,
+                     interop::GPUSize32 instanceCount,
+                     interop::GPUSize32 firstIndex,
+                     interop::GPUSignedOffset32 baseVertex,
+                     interop::GPUSize32 firstInstance) override;
+    void drawIndirect(Napi::Env,
+                      interop::Interface<interop::GPUBuffer> indirectBuffer,
+                      interop::GPUSize64 indirectOffset) override;
+    void drawIndexedIndirect(Napi::Env,
+                             interop::Interface<interop::GPUBuffer> indirectBuffer,
+                             interop::GPUSize64 indirectOffset) override;
+    std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
+    void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
 
-      private:
-        wgpu::RenderBundleEncoder enc_;
-    };
+  private:
+    wgpu::RenderBundleEncoder enc_;
+};
 
 }  // namespace wgpu::binding
 
diff --git a/src/dawn/node/binding/GPURenderPassEncoder.cpp b/src/dawn/node/binding/GPURenderPassEncoder.cpp
index 0edbb90..46997dc 100644
--- a/src/dawn/node/binding/GPURenderPassEncoder.cpp
+++ b/src/dawn/node/binding/GPURenderPassEncoder.cpp
@@ -26,232 +26,228 @@
 
 namespace wgpu::binding {
 
-    ////////////////////////////////////////////////////////////////////////////////
-    // wgpu::bindings::GPURenderPassEncoder
-    ////////////////////////////////////////////////////////////////////////////////
-    GPURenderPassEncoder::GPURenderPassEncoder(wgpu::RenderPassEncoder enc) : enc_(std::move(enc)) {
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPURenderPassEncoder
+////////////////////////////////////////////////////////////////////////////////
+GPURenderPassEncoder::GPURenderPassEncoder(wgpu::RenderPassEncoder enc) : enc_(std::move(enc)) {}
+
+void GPURenderPassEncoder::setViewport(Napi::Env,
+                                       float x,
+                                       float y,
+                                       float width,
+                                       float height,
+                                       float minDepth,
+                                       float maxDepth) {
+    enc_.SetViewport(x, y, width, height, minDepth, maxDepth);
+}
+
+void GPURenderPassEncoder::setScissorRect(Napi::Env,
+                                          interop::GPUIntegerCoordinate x,
+                                          interop::GPUIntegerCoordinate y,
+                                          interop::GPUIntegerCoordinate width,
+                                          interop::GPUIntegerCoordinate height) {
+    enc_.SetScissorRect(x, y, width, height);
+}
+
+void GPURenderPassEncoder::setBlendConstant(Napi::Env env, interop::GPUColor color) {
+    Converter conv(env);
+
+    wgpu::Color c{};
+    if (!conv(c, color)) {
+        return;
     }
 
-    void GPURenderPassEncoder::setViewport(Napi::Env,
-                                           float x,
-                                           float y,
-                                           float width,
-                                           float height,
-                                           float minDepth,
-                                           float maxDepth) {
-        enc_.SetViewport(x, y, width, height, minDepth, maxDepth);
+    enc_.SetBlendConstant(&c);
+}
+
+void GPURenderPassEncoder::setStencilReference(Napi::Env, interop::GPUStencilValue reference) {
+    enc_.SetStencilReference(reference);
+}
+
+void GPURenderPassEncoder::beginOcclusionQuery(Napi::Env, interop::GPUSize32 queryIndex) {
+    enc_.BeginOcclusionQuery(queryIndex);
+}
+
+void GPURenderPassEncoder::endOcclusionQuery(Napi::Env) {
+    enc_.EndOcclusionQuery();
+}
+
+void GPURenderPassEncoder::executeBundles(
+    Napi::Env env,
+    std::vector<interop::Interface<interop::GPURenderBundle>> bundles_in) {
+    Converter conv(env);
+
+    wgpu::RenderBundle* bundles = nullptr;
+    uint32_t bundleCount = 0;
+    if (!conv(bundles, bundleCount, bundles_in)) {
+        return;
     }
 
-    void GPURenderPassEncoder::setScissorRect(Napi::Env,
-                                              interop::GPUIntegerCoordinate x,
-                                              interop::GPUIntegerCoordinate y,
-                                              interop::GPUIntegerCoordinate width,
-                                              interop::GPUIntegerCoordinate height) {
-        enc_.SetScissorRect(x, y, width, height);
+    enc_.ExecuteBundles(bundleCount, bundles);
+}
+
+void GPURenderPassEncoder::end(Napi::Env) {
+    enc_.End();
+}
+
+void GPURenderPassEncoder::setBindGroup(
+    Napi::Env env,
+    interop::GPUIndex32 index,
+    interop::Interface<interop::GPUBindGroup> bindGroup,
+    std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) {
+    Converter conv(env);
+
+    wgpu::BindGroup bg{};
+    uint32_t* offsets = nullptr;
+    uint32_t num_offsets = 0;
+    if (!conv(bg, bindGroup) || !conv(offsets, num_offsets, dynamicOffsets)) {
+        return;
     }
 
-    void GPURenderPassEncoder::setBlendConstant(Napi::Env env, interop::GPUColor color) {
-        Converter conv(env);
+    enc_.SetBindGroup(index, bg, num_offsets, offsets);
+}
 
-        wgpu::Color c{};
-        if (!conv(c, color)) {
-            return;
-        }
+void GPURenderPassEncoder::setBindGroup(Napi::Env env,
+                                        interop::GPUIndex32 index,
+                                        interop::Interface<interop::GPUBindGroup> bindGroup,
+                                        interop::Uint32Array dynamicOffsetsData,
+                                        interop::GPUSize64 dynamicOffsetsDataStart,
+                                        interop::GPUSize32 dynamicOffsetsDataLength) {
+    Converter conv(env);
 
-        enc_.SetBlendConstant(&c);
+    wgpu::BindGroup bg{};
+    if (!conv(bg, bindGroup)) {
+        return;
     }
 
-    void GPURenderPassEncoder::setStencilReference(Napi::Env, interop::GPUStencilValue reference) {
-        enc_.SetStencilReference(reference);
+    if (dynamicOffsetsDataStart > dynamicOffsetsData.ElementLength()) {
+        Napi::RangeError::New(env, "dynamicOffsetsDataStart is out of bound of dynamicOffsetData")
+            .ThrowAsJavaScriptException();
+        return;
     }
 
-    void GPURenderPassEncoder::beginOcclusionQuery(Napi::Env, interop::GPUSize32 queryIndex) {
-        enc_.BeginOcclusionQuery(queryIndex);
+    if (dynamicOffsetsDataLength > dynamicOffsetsData.ElementLength() - dynamicOffsetsDataStart) {
+        Napi::RangeError::New(env,
+                              "dynamicOffsetsDataLength + dynamicOffsetsDataStart is out of "
+                              "bound of dynamicOffsetData")
+            .ThrowAsJavaScriptException();
+        return;
     }
 
-    void GPURenderPassEncoder::endOcclusionQuery(Napi::Env) {
-        enc_.EndOcclusionQuery();
+    enc_.SetBindGroup(index, bg, dynamicOffsetsDataLength,
+                      dynamicOffsetsData.Data() + dynamicOffsetsDataStart);
+}
+
+void GPURenderPassEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
+    enc_.PushDebugGroup(groupLabel.c_str());
+}
+
+void GPURenderPassEncoder::popDebugGroup(Napi::Env) {
+    enc_.PopDebugGroup();
+}
+
+void GPURenderPassEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
+    enc_.InsertDebugMarker(markerLabel.c_str());
+}
+
+void GPURenderPassEncoder::setPipeline(Napi::Env env,
+                                       interop::Interface<interop::GPURenderPipeline> pipeline) {
+    Converter conv(env);
+    wgpu::RenderPipeline rp{};
+    if (!conv(rp, pipeline)) {
+        return;
     }
+    enc_.SetPipeline(rp);
+}
 
-    void GPURenderPassEncoder::executeBundles(
-        Napi::Env env,
-        std::vector<interop::Interface<interop::GPURenderBundle>> bundles_in) {
-        Converter conv(env);
+void GPURenderPassEncoder::setIndexBuffer(Napi::Env env,
+                                          interop::Interface<interop::GPUBuffer> buffer,
+                                          interop::GPUIndexFormat indexFormat,
+                                          interop::GPUSize64 offset,
+                                          std::optional<interop::GPUSize64> size) {
+    Converter conv(env);
 
-        wgpu::RenderBundle* bundles = nullptr;
-        uint32_t bundleCount = 0;
-        if (!conv(bundles, bundleCount, bundles_in)) {
-            return;
-        }
-
-        enc_.ExecuteBundles(bundleCount, bundles);
+    wgpu::Buffer b{};
+    wgpu::IndexFormat f;
+    uint64_t s = wgpu::kWholeSize;
+    if (!conv(b, buffer) ||       //
+        !conv(f, indexFormat) ||  //
+        !conv(s, size)) {
+        return;
     }
+    enc_.SetIndexBuffer(b, f, offset, s);
+}
 
-    void GPURenderPassEncoder::end(Napi::Env) {
-        enc_.End();
+void GPURenderPassEncoder::setVertexBuffer(Napi::Env env,
+                                           interop::GPUIndex32 slot,
+                                           interop::Interface<interop::GPUBuffer> buffer,
+                                           interop::GPUSize64 offset,
+                                           std::optional<interop::GPUSize64> size) {
+    Converter conv(env);
+
+    wgpu::Buffer b{};
+    uint64_t s = wgpu::kWholeSize;
+    if (!conv(b, buffer) || !conv(s, size)) {
+        return;
     }
+    enc_.SetVertexBuffer(slot, b, offset, s);
+}
 
-    void GPURenderPassEncoder::setBindGroup(
-        Napi::Env env,
-        interop::GPUIndex32 index,
-        interop::Interface<interop::GPUBindGroup> bindGroup,
-        std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) {
-        Converter conv(env);
+void GPURenderPassEncoder::draw(Napi::Env env,
+                                interop::GPUSize32 vertexCount,
+                                interop::GPUSize32 instanceCount,
+                                interop::GPUSize32 firstVertex,
+                                interop::GPUSize32 firstInstance) {
+    enc_.Draw(vertexCount, instanceCount, firstVertex, firstInstance);
+}
 
-        wgpu::BindGroup bg{};
-        uint32_t* offsets = nullptr;
-        uint32_t num_offsets = 0;
-        if (!conv(bg, bindGroup) || !conv(offsets, num_offsets, dynamicOffsets)) {
-            return;
-        }
+void GPURenderPassEncoder::drawIndexed(Napi::Env env,
+                                       interop::GPUSize32 indexCount,
+                                       interop::GPUSize32 instanceCount,
+                                       interop::GPUSize32 firstIndex,
+                                       interop::GPUSignedOffset32 baseVertex,
+                                       interop::GPUSize32 firstInstance) {
+    enc_.DrawIndexed(indexCount, instanceCount, firstIndex, baseVertex, firstInstance);
+}
 
-        enc_.SetBindGroup(index, bg, num_offsets, offsets);
+void GPURenderPassEncoder::drawIndirect(Napi::Env env,
+                                        interop::Interface<interop::GPUBuffer> indirectBuffer,
+                                        interop::GPUSize64 indirectOffset) {
+    Converter conv(env);
+
+    wgpu::Buffer b{};
+    uint64_t o = 0;
+
+    if (!conv(b, indirectBuffer) ||  //
+        !conv(o, indirectOffset)) {
+        return;
     }
+    enc_.DrawIndirect(b, o);
+}
 
-    void GPURenderPassEncoder::setBindGroup(Napi::Env env,
-                                            interop::GPUIndex32 index,
-                                            interop::Interface<interop::GPUBindGroup> bindGroup,
-                                            interop::Uint32Array dynamicOffsetsData,
-                                            interop::GPUSize64 dynamicOffsetsDataStart,
-                                            interop::GPUSize32 dynamicOffsetsDataLength) {
-        Converter conv(env);
+void GPURenderPassEncoder::drawIndexedIndirect(
+    Napi::Env env,
+    interop::Interface<interop::GPUBuffer> indirectBuffer,
+    interop::GPUSize64 indirectOffset) {
+    Converter conv(env);
 
-        wgpu::BindGroup bg{};
-        if (!conv(bg, bindGroup)) {
-            return;
-        }
+    wgpu::Buffer b{};
+    uint64_t o = 0;
 
-        if (dynamicOffsetsDataStart > dynamicOffsetsData.ElementLength()) {
-            Napi::RangeError::New(env,
-                                  "dynamicOffsetsDataStart is out of bound of dynamicOffsetData")
-                .ThrowAsJavaScriptException();
-            return;
-        }
-
-        if (dynamicOffsetsDataLength >
-            dynamicOffsetsData.ElementLength() - dynamicOffsetsDataStart) {
-            Napi::RangeError::New(env,
-                                  "dynamicOffsetsDataLength + dynamicOffsetsDataStart is out of "
-                                  "bound of dynamicOffsetData")
-                .ThrowAsJavaScriptException();
-            return;
-        }
-
-        enc_.SetBindGroup(index, bg, dynamicOffsetsDataLength,
-                          dynamicOffsetsData.Data() + dynamicOffsetsDataStart);
+    if (!conv(b, indirectBuffer) ||  //
+        !conv(o, indirectOffset)) {
+        return;
     }
+    enc_.DrawIndexedIndirect(b, o);
+}
 
-    void GPURenderPassEncoder::pushDebugGroup(Napi::Env, std::string groupLabel) {
-        enc_.PushDebugGroup(groupLabel.c_str());
-    }
+std::variant<std::string, interop::UndefinedType> GPURenderPassEncoder::getLabel(Napi::Env) {
+    UNIMPLEMENTED();
+}
 
-    void GPURenderPassEncoder::popDebugGroup(Napi::Env) {
-        enc_.PopDebugGroup();
-    }
-
-    void GPURenderPassEncoder::insertDebugMarker(Napi::Env, std::string markerLabel) {
-        enc_.InsertDebugMarker(markerLabel.c_str());
-    }
-
-    void GPURenderPassEncoder::setPipeline(
-        Napi::Env env,
-        interop::Interface<interop::GPURenderPipeline> pipeline) {
-        Converter conv(env);
-        wgpu::RenderPipeline rp{};
-        if (!conv(rp, pipeline)) {
-            return;
-        }
-        enc_.SetPipeline(rp);
-    }
-
-    void GPURenderPassEncoder::setIndexBuffer(Napi::Env env,
-                                              interop::Interface<interop::GPUBuffer> buffer,
-                                              interop::GPUIndexFormat indexFormat,
-                                              interop::GPUSize64 offset,
-                                              std::optional<interop::GPUSize64> size) {
-        Converter conv(env);
-
-        wgpu::Buffer b{};
-        wgpu::IndexFormat f;
-        uint64_t s = wgpu::kWholeSize;
-        if (!conv(b, buffer) ||       //
-            !conv(f, indexFormat) ||  //
-            !conv(s, size)) {
-            return;
-        }
-        enc_.SetIndexBuffer(b, f, offset, s);
-    }
-
-    void GPURenderPassEncoder::setVertexBuffer(Napi::Env env,
-                                               interop::GPUIndex32 slot,
-                                               interop::Interface<interop::GPUBuffer> buffer,
-                                               interop::GPUSize64 offset,
-                                               std::optional<interop::GPUSize64> size) {
-        Converter conv(env);
-
-        wgpu::Buffer b{};
-        uint64_t s = wgpu::kWholeSize;
-        if (!conv(b, buffer) || !conv(s, size)) {
-            return;
-        }
-        enc_.SetVertexBuffer(slot, b, offset, s);
-    }
-
-    void GPURenderPassEncoder::draw(Napi::Env env,
-                                    interop::GPUSize32 vertexCount,
-                                    interop::GPUSize32 instanceCount,
-                                    interop::GPUSize32 firstVertex,
-                                    interop::GPUSize32 firstInstance) {
-        enc_.Draw(vertexCount, instanceCount, firstVertex, firstInstance);
-    }
-
-    void GPURenderPassEncoder::drawIndexed(Napi::Env env,
-                                           interop::GPUSize32 indexCount,
-                                           interop::GPUSize32 instanceCount,
-                                           interop::GPUSize32 firstIndex,
-                                           interop::GPUSignedOffset32 baseVertex,
-                                           interop::GPUSize32 firstInstance) {
-        enc_.DrawIndexed(indexCount, instanceCount, firstIndex, baseVertex, firstInstance);
-    }
-
-    void GPURenderPassEncoder::drawIndirect(Napi::Env env,
-                                            interop::Interface<interop::GPUBuffer> indirectBuffer,
-                                            interop::GPUSize64 indirectOffset) {
-        Converter conv(env);
-
-        wgpu::Buffer b{};
-        uint64_t o = 0;
-
-        if (!conv(b, indirectBuffer) ||  //
-            !conv(o, indirectOffset)) {
-            return;
-        }
-        enc_.DrawIndirect(b, o);
-    }
-
-    void GPURenderPassEncoder::drawIndexedIndirect(
-        Napi::Env env,
-        interop::Interface<interop::GPUBuffer> indirectBuffer,
-        interop::GPUSize64 indirectOffset) {
-        Converter conv(env);
-
-        wgpu::Buffer b{};
-        uint64_t o = 0;
-
-        if (!conv(b, indirectBuffer) ||  //
-            !conv(o, indirectOffset)) {
-            return;
-        }
-        enc_.DrawIndexedIndirect(b, o);
-    }
-
-    std::variant<std::string, interop::UndefinedType> GPURenderPassEncoder::getLabel(Napi::Env) {
-        UNIMPLEMENTED();
-    }
-
-    void GPURenderPassEncoder::setLabel(Napi::Env,
-                                        std::variant<std::string, interop::UndefinedType> value) {
-        UNIMPLEMENTED();
-    }
+void GPURenderPassEncoder::setLabel(Napi::Env,
+                                    std::variant<std::string, interop::UndefinedType> value) {
+    UNIMPLEMENTED();
+}
 
 }  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPURenderPassEncoder.h b/src/dawn/node/binding/GPURenderPassEncoder.h
index 73d07bc..7fed340 100644
--- a/src/dawn/node/binding/GPURenderPassEncoder.h
+++ b/src/dawn/node/binding/GPURenderPassEncoder.h
@@ -25,86 +25,82 @@
 
 namespace wgpu::binding {
 
-    // GPURenderPassEncoder is an implementation of interop::GPURenderPassEncoder that wraps a
-    // wgpu::RenderPassEncoder.
-    class GPURenderPassEncoder final : public interop::GPURenderPassEncoder {
-      public:
-        explicit GPURenderPassEncoder(wgpu::RenderPassEncoder enc);
+// GPURenderPassEncoder is an implementation of interop::GPURenderPassEncoder that wraps a
+// wgpu::RenderPassEncoder.
+class GPURenderPassEncoder final : public interop::GPURenderPassEncoder {
+  public:
+    explicit GPURenderPassEncoder(wgpu::RenderPassEncoder enc);
 
-        // Implicit cast operator to Dawn GPU object
-        inline operator const wgpu::RenderPassEncoder&() const {
-            return enc_;
-        }
+    // Implicit cast operator to Dawn GPU object
+    inline operator const wgpu::RenderPassEncoder&() const { return enc_; }
 
-        // interop::GPURenderPassEncoder interface compliance
-        void setViewport(Napi::Env,
-                         float x,
-                         float y,
-                         float width,
-                         float height,
-                         float minDepth,
-                         float maxDepth) override;
-        void setScissorRect(Napi::Env,
-                            interop::GPUIntegerCoordinate x,
-                            interop::GPUIntegerCoordinate y,
-                            interop::GPUIntegerCoordinate width,
-                            interop::GPUIntegerCoordinate height) override;
-        void setBlendConstant(Napi::Env, interop::GPUColor color) override;
-        void setStencilReference(Napi::Env, interop::GPUStencilValue reference) override;
-        void beginOcclusionQuery(Napi::Env, interop::GPUSize32 queryIndex) override;
-        void endOcclusionQuery(Napi::Env) override;
-        void executeBundles(
-            Napi::Env,
-            std::vector<interop::Interface<interop::GPURenderBundle>> bundles) override;
-        void end(Napi::Env) override;
-        void setBindGroup(Napi::Env,
-                          interop::GPUIndex32 index,
-                          interop::Interface<interop::GPUBindGroup> bindGroup,
-                          std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) override;
-        void setBindGroup(Napi::Env,
-                          interop::GPUIndex32 index,
-                          interop::Interface<interop::GPUBindGroup> bindGroup,
-                          interop::Uint32Array dynamicOffsetsData,
-                          interop::GPUSize64 dynamicOffsetsDataStart,
-                          interop::GPUSize32 dynamicOffsetsDataLength) override;
-        void pushDebugGroup(Napi::Env, std::string groupLabel) override;
-        void popDebugGroup(Napi::Env) override;
-        void insertDebugMarker(Napi::Env, std::string markerLabel) override;
-        void setPipeline(Napi::Env,
-                         interop::Interface<interop::GPURenderPipeline> pipeline) override;
-        void setIndexBuffer(Napi::Env,
-                            interop::Interface<interop::GPUBuffer> buffer,
-                            interop::GPUIndexFormat indexFormat,
-                            interop::GPUSize64 offset,
-                            std::optional<interop::GPUSize64> size) override;
-        void setVertexBuffer(Napi::Env,
-                             interop::GPUIndex32 slot,
-                             interop::Interface<interop::GPUBuffer> buffer,
-                             interop::GPUSize64 offset,
-                             std::optional<interop::GPUSize64> size) override;
-        void draw(Napi::Env,
-                  interop::GPUSize32 vertexCount,
-                  interop::GPUSize32 instanceCount,
-                  interop::GPUSize32 firstVertex,
-                  interop::GPUSize32 firstInstance) override;
-        void drawIndexed(Napi::Env,
-                         interop::GPUSize32 indexCount,
-                         interop::GPUSize32 instanceCount,
-                         interop::GPUSize32 firstIndex,
-                         interop::GPUSignedOffset32 baseVertex,
-                         interop::GPUSize32 firstInstance) override;
-        void drawIndirect(Napi::Env,
-                          interop::Interface<interop::GPUBuffer> indirectBuffer,
-                          interop::GPUSize64 indirectOffset) override;
-        void drawIndexedIndirect(Napi::Env,
-                                 interop::Interface<interop::GPUBuffer> indirectBuffer,
-                                 interop::GPUSize64 indirectOffset) override;
-        std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
-        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+    // interop::GPURenderPassEncoder interface compliance
+    void setViewport(Napi::Env,
+                     float x,
+                     float y,
+                     float width,
+                     float height,
+                     float minDepth,
+                     float maxDepth) override;
+    void setScissorRect(Napi::Env,
+                        interop::GPUIntegerCoordinate x,
+                        interop::GPUIntegerCoordinate y,
+                        interop::GPUIntegerCoordinate width,
+                        interop::GPUIntegerCoordinate height) override;
+    void setBlendConstant(Napi::Env, interop::GPUColor color) override;
+    void setStencilReference(Napi::Env, interop::GPUStencilValue reference) override;
+    void beginOcclusionQuery(Napi::Env, interop::GPUSize32 queryIndex) override;
+    void endOcclusionQuery(Napi::Env) override;
+    void executeBundles(Napi::Env,
+                        std::vector<interop::Interface<interop::GPURenderBundle>> bundles) override;
+    void end(Napi::Env) override;
+    void setBindGroup(Napi::Env,
+                      interop::GPUIndex32 index,
+                      interop::Interface<interop::GPUBindGroup> bindGroup,
+                      std::vector<interop::GPUBufferDynamicOffset> dynamicOffsets) override;
+    void setBindGroup(Napi::Env,
+                      interop::GPUIndex32 index,
+                      interop::Interface<interop::GPUBindGroup> bindGroup,
+                      interop::Uint32Array dynamicOffsetsData,
+                      interop::GPUSize64 dynamicOffsetsDataStart,
+                      interop::GPUSize32 dynamicOffsetsDataLength) override;
+    void pushDebugGroup(Napi::Env, std::string groupLabel) override;
+    void popDebugGroup(Napi::Env) override;
+    void insertDebugMarker(Napi::Env, std::string markerLabel) override;
+    void setPipeline(Napi::Env, interop::Interface<interop::GPURenderPipeline> pipeline) override;
+    void setIndexBuffer(Napi::Env,
+                        interop::Interface<interop::GPUBuffer> buffer,
+                        interop::GPUIndexFormat indexFormat,
+                        interop::GPUSize64 offset,
+                        std::optional<interop::GPUSize64> size) override;
+    void setVertexBuffer(Napi::Env,
+                         interop::GPUIndex32 slot,
+                         interop::Interface<interop::GPUBuffer> buffer,
+                         interop::GPUSize64 offset,
+                         std::optional<interop::GPUSize64> size) override;
+    void draw(Napi::Env,
+              interop::GPUSize32 vertexCount,
+              interop::GPUSize32 instanceCount,
+              interop::GPUSize32 firstVertex,
+              interop::GPUSize32 firstInstance) override;
+    void drawIndexed(Napi::Env,
+                     interop::GPUSize32 indexCount,
+                     interop::GPUSize32 instanceCount,
+                     interop::GPUSize32 firstIndex,
+                     interop::GPUSignedOffset32 baseVertex,
+                     interop::GPUSize32 firstInstance) override;
+    void drawIndirect(Napi::Env,
+                      interop::Interface<interop::GPUBuffer> indirectBuffer,
+                      interop::GPUSize64 indirectOffset) override;
+    void drawIndexedIndirect(Napi::Env,
+                             interop::Interface<interop::GPUBuffer> indirectBuffer,
+                             interop::GPUSize64 indirectOffset) override;
+    std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
+    void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
 
-      private:
-        wgpu::RenderPassEncoder enc_;
-    };
+  private:
+    wgpu::RenderPassEncoder enc_;
+};
 
 }  // namespace wgpu::binding
 
diff --git a/src/dawn/node/binding/GPURenderPipeline.cpp b/src/dawn/node/binding/GPURenderPipeline.cpp
index 31bb0e4..d45386e 100644
--- a/src/dawn/node/binding/GPURenderPipeline.cpp
+++ b/src/dawn/node/binding/GPURenderPipeline.cpp
@@ -22,27 +22,26 @@
 
 namespace wgpu::binding {
 
-    ////////////////////////////////////////////////////////////////////////////////
-    // wgpu::bindings::GPURenderPipeline
-    ////////////////////////////////////////////////////////////////////////////////
-    GPURenderPipeline::GPURenderPipeline(wgpu::RenderPipeline pipeline)
-        : pipeline_(std::move(pipeline)) {
-    }
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPURenderPipeline
+////////////////////////////////////////////////////////////////////////////////
+GPURenderPipeline::GPURenderPipeline(wgpu::RenderPipeline pipeline)
+    : pipeline_(std::move(pipeline)) {}
 
-    interop::Interface<interop::GPUBindGroupLayout> GPURenderPipeline::getBindGroupLayout(
-        Napi::Env env,
-        uint32_t index) {
-        return interop::GPUBindGroupLayout::Create<GPUBindGroupLayout>(
-            env, pipeline_.GetBindGroupLayout(index));
-    }
+interop::Interface<interop::GPUBindGroupLayout> GPURenderPipeline::getBindGroupLayout(
+    Napi::Env env,
+    uint32_t index) {
+    return interop::GPUBindGroupLayout::Create<GPUBindGroupLayout>(
+        env, pipeline_.GetBindGroupLayout(index));
+}
 
-    std::variant<std::string, interop::UndefinedType> GPURenderPipeline::getLabel(Napi::Env) {
-        UNIMPLEMENTED();
-    }
+std::variant<std::string, interop::UndefinedType> GPURenderPipeline::getLabel(Napi::Env) {
+    UNIMPLEMENTED();
+}
 
-    void GPURenderPipeline::setLabel(Napi::Env,
-                                     std::variant<std::string, interop::UndefinedType> value) {
-        UNIMPLEMENTED();
-    }
+void GPURenderPipeline::setLabel(Napi::Env,
+                                 std::variant<std::string, interop::UndefinedType> value) {
+    UNIMPLEMENTED();
+}
 
 }  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPURenderPipeline.h b/src/dawn/node/binding/GPURenderPipeline.h
index a279bf5..b37f435 100644
--- a/src/dawn/node/binding/GPURenderPipeline.h
+++ b/src/dawn/node/binding/GPURenderPipeline.h
@@ -24,26 +24,24 @@
 
 namespace wgpu::binding {
 
-    // GPURenderPipeline is an implementation of interop::GPURenderPipeline that wraps a
-    // wgpu::RenderPipeline.
-    class GPURenderPipeline final : public interop::GPURenderPipeline {
-      public:
-        explicit GPURenderPipeline(wgpu::RenderPipeline pipeline);
+// GPURenderPipeline is an implementation of interop::GPURenderPipeline that wraps a
+// wgpu::RenderPipeline.
+class GPURenderPipeline final : public interop::GPURenderPipeline {
+  public:
+    explicit GPURenderPipeline(wgpu::RenderPipeline pipeline);
 
-        // Implicit cast operator to Dawn GPU object
-        inline operator const wgpu::RenderPipeline&() const {
-            return pipeline_;
-        }
+    // Implicit cast operator to Dawn GPU object
+    inline operator const wgpu::RenderPipeline&() const { return pipeline_; }
 
-        // interop::GPURenderPipeline interface compliance
-        interop::Interface<interop::GPUBindGroupLayout> getBindGroupLayout(Napi::Env,
-                                                                           uint32_t index) override;
-        std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
-        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+    // interop::GPURenderPipeline interface compliance
+    interop::Interface<interop::GPUBindGroupLayout> getBindGroupLayout(Napi::Env,
+                                                                       uint32_t index) override;
+    std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
+    void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
 
-      private:
-        wgpu::RenderPipeline pipeline_;
-    };
+  private:
+    wgpu::RenderPipeline pipeline_;
+};
 
 }  // namespace wgpu::binding
 
diff --git a/src/dawn/node/binding/GPUSampler.cpp b/src/dawn/node/binding/GPUSampler.cpp
index 6bca632..a2f95b7 100644
--- a/src/dawn/node/binding/GPUSampler.cpp
+++ b/src/dawn/node/binding/GPUSampler.cpp
@@ -21,18 +21,17 @@
 
 namespace wgpu::binding {
 
-    ////////////////////////////////////////////////////////////////////////////////
-    // wgpu::bindings::GPUSampler
-    ////////////////////////////////////////////////////////////////////////////////
-    GPUSampler::GPUSampler(wgpu::Sampler sampler) : sampler_(std::move(sampler)) {
-    }
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUSampler
+////////////////////////////////////////////////////////////////////////////////
+GPUSampler::GPUSampler(wgpu::Sampler sampler) : sampler_(std::move(sampler)) {}
 
-    std::variant<std::string, interop::UndefinedType> GPUSampler::getLabel(Napi::Env) {
-        UNIMPLEMENTED();
-    }
+std::variant<std::string, interop::UndefinedType> GPUSampler::getLabel(Napi::Env) {
+    UNIMPLEMENTED();
+}
 
-    void GPUSampler::setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) {
-        UNIMPLEMENTED();
-    }
+void GPUSampler::setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) {
+    UNIMPLEMENTED();
+}
 
 }  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUSampler.h b/src/dawn/node/binding/GPUSampler.h
index d5908f5..a15897c 100644
--- a/src/dawn/node/binding/GPUSampler.h
+++ b/src/dawn/node/binding/GPUSampler.h
@@ -23,23 +23,21 @@
 #include "src/dawn/node/interop/WebGPU.h"
 
 namespace wgpu::binding {
-    // GPUSampler is an implementation of interop::GPUSampler that wraps a wgpu::Sampler.
-    class GPUSampler final : public interop::GPUSampler {
-      public:
-        explicit GPUSampler(wgpu::Sampler sampler);
+// GPUSampler is an implementation of interop::GPUSampler that wraps a wgpu::Sampler.
+class GPUSampler final : public interop::GPUSampler {
+  public:
+    explicit GPUSampler(wgpu::Sampler sampler);
 
-        // Implicit cast operator to Dawn GPU object
-        inline operator const wgpu::Sampler&() const {
-            return sampler_;
-        }
+    // Implicit cast operator to Dawn GPU object
+    inline operator const wgpu::Sampler&() const { return sampler_; }
 
-        // interop::GPUSampler interface compliance
-        std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
-        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+    // interop::GPUSampler interface compliance
+    std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
+    void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
 
-      private:
-        wgpu::Sampler sampler_;
-    };
+  private:
+    wgpu::Sampler sampler_;
+};
 
 }  // namespace wgpu::binding
 
diff --git a/src/dawn/node/binding/GPUShaderModule.cpp b/src/dawn/node/binding/GPUShaderModule.cpp
index d0fe1e4..8f44982 100644
--- a/src/dawn/node/binding/GPUShaderModule.cpp
+++ b/src/dawn/node/binding/GPUShaderModule.cpp
@@ -22,107 +22,94 @@
 
 namespace wgpu::binding {
 
-    ////////////////////////////////////////////////////////////////////////////////
-    // wgpu::bindings::GPUShaderModule
-    ////////////////////////////////////////////////////////////////////////////////
-    GPUShaderModule::GPUShaderModule(wgpu::ShaderModule shader, std::shared_ptr<AsyncRunner> async)
-        : shader_(std::move(shader)), async_(std::move(async)) {
-    }
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUShaderModule
+////////////////////////////////////////////////////////////////////////////////
+GPUShaderModule::GPUShaderModule(wgpu::ShaderModule shader, std::shared_ptr<AsyncRunner> async)
+    : shader_(std::move(shader)), async_(std::move(async)) {}
 
-    interop::Promise<interop::Interface<interop::GPUCompilationInfo>>
-    GPUShaderModule::compilationInfo(Napi::Env env) {
-        struct GPUCompilationMessage : public interop::GPUCompilationMessage {
-            WGPUCompilationMessage message;
+interop::Promise<interop::Interface<interop::GPUCompilationInfo>> GPUShaderModule::compilationInfo(
+    Napi::Env env) {
+    struct GPUCompilationMessage : public interop::GPUCompilationMessage {
+        WGPUCompilationMessage message;
 
-            explicit GPUCompilationMessage(const WGPUCompilationMessage& m) : message(m) {
+        explicit GPUCompilationMessage(const WGPUCompilationMessage& m) : message(m) {}
+        std::string getMessage(Napi::Env) override { return message.message; }
+        interop::GPUCompilationMessageType getType(Napi::Env) override {
+            switch (message.type) {
+                case WGPUCompilationMessageType_Error:
+                    return interop::GPUCompilationMessageType::kError;
+                case WGPUCompilationMessageType_Warning:
+                    return interop::GPUCompilationMessageType::kWarning;
+                case WGPUCompilationMessageType_Info:
+                    return interop::GPUCompilationMessageType::kInfo;
+                default:
+                    UNIMPLEMENTED();
             }
-            std::string getMessage(Napi::Env) override {
-                return message.message;
+        }
+        uint64_t getLineNum(Napi::Env) override { return message.lineNum; }
+        uint64_t getLinePos(Napi::Env) override { return message.linePos; }
+        uint64_t getOffset(Napi::Env) override { return message.offset; }
+        uint64_t getLength(Napi::Env) override { return message.length; }
+    };
+
+    using Messages = std::vector<interop::Interface<interop::GPUCompilationMessage>>;
+
+    struct GPUCompilationInfo : public interop::GPUCompilationInfo {
+        std::vector<Napi::ObjectReference> messages;
+
+        GPUCompilationInfo(Napi::Env env, Messages msgs) {
+            messages.reserve(msgs.size());
+            for (auto& msg : msgs) {
+                messages.emplace_back(Napi::Persistent(Napi::Object(env, msg)));
             }
-            interop::GPUCompilationMessageType getType(Napi::Env) override {
-                switch (message.type) {
-                    case WGPUCompilationMessageType_Error:
-                        return interop::GPUCompilationMessageType::kError;
-                    case WGPUCompilationMessageType_Warning:
-                        return interop::GPUCompilationMessageType::kWarning;
-                    case WGPUCompilationMessageType_Info:
-                        return interop::GPUCompilationMessageType::kInfo;
-                    default:
-                        UNIMPLEMENTED();
-                }
+        }
+        Messages getMessages(Napi::Env) override {
+            Messages out;
+            out.reserve(messages.size());
+            for (auto& msg : messages) {
+                out.emplace_back(msg.Value());
             }
-            uint64_t getLineNum(Napi::Env) override {
-                return message.lineNum;
+            return out;
+        }
+    };
+
+    using Promise = interop::Promise<interop::Interface<interop::GPUCompilationInfo>>;
+
+    struct Context {
+        Napi::Env env;
+        Promise promise;
+        AsyncTask task;
+    };
+    auto ctx = new Context{env, Promise(env, PROMISE_INFO), AsyncTask(async_)};
+    auto promise = ctx->promise;
+
+    shader_.GetCompilationInfo(
+        [](WGPUCompilationInfoRequestStatus status, WGPUCompilationInfo const* compilationInfo,
+           void* userdata) {
+            auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
+
+            Messages messages(compilationInfo->messageCount);
+            for (uint32_t i = 0; i < compilationInfo->messageCount; i++) {
+                auto& msg = compilationInfo->messages[i];
+                messages[i] =
+                    interop::GPUCompilationMessage::Create<GPUCompilationMessage>(c->env, msg);
             }
-            uint64_t getLinePos(Napi::Env) override {
-                return message.linePos;
-            }
-            uint64_t getOffset(Napi::Env) override {
-                return message.offset;
-            }
-            uint64_t getLength(Napi::Env) override {
-                return message.length;
-            }
-        };
 
-        using Messages = std::vector<interop::Interface<interop::GPUCompilationMessage>>;
+            c->promise.Resolve(interop::GPUCompilationInfo::Create<GPUCompilationInfo>(
+                c->env, c->env, std::move(messages)));
+        },
+        ctx);
 
-        struct GPUCompilationInfo : public interop::GPUCompilationInfo {
-            std::vector<Napi::ObjectReference> messages;
+    return promise;
+}
 
-            GPUCompilationInfo(Napi::Env env, Messages msgs) {
-                messages.reserve(msgs.size());
-                for (auto& msg : msgs) {
-                    messages.emplace_back(Napi::Persistent(Napi::Object(env, msg)));
-                }
-            }
-            Messages getMessages(Napi::Env) override {
-                Messages out;
-                out.reserve(messages.size());
-                for (auto& msg : messages) {
-                    out.emplace_back(msg.Value());
-                }
-                return out;
-            }
-        };
+std::variant<std::string, interop::UndefinedType> GPUShaderModule::getLabel(Napi::Env) {
+    UNIMPLEMENTED();
+}
 
-        using Promise = interop::Promise<interop::Interface<interop::GPUCompilationInfo>>;
-
-        struct Context {
-            Napi::Env env;
-            Promise promise;
-            AsyncTask task;
-        };
-        auto ctx = new Context{env, Promise(env, PROMISE_INFO), AsyncTask(async_)};
-        auto promise = ctx->promise;
-
-        shader_.GetCompilationInfo(
-            [](WGPUCompilationInfoRequestStatus status, WGPUCompilationInfo const* compilationInfo,
-               void* userdata) {
-                auto c = std::unique_ptr<Context>(static_cast<Context*>(userdata));
-
-                Messages messages(compilationInfo->messageCount);
-                for (uint32_t i = 0; i < compilationInfo->messageCount; i++) {
-                    auto& msg = compilationInfo->messages[i];
-                    messages[i] =
-                        interop::GPUCompilationMessage::Create<GPUCompilationMessage>(c->env, msg);
-                }
-
-                c->promise.Resolve(interop::GPUCompilationInfo::Create<GPUCompilationInfo>(
-                    c->env, c->env, std::move(messages)));
-            },
-            ctx);
-
-        return promise;
-    }
-
-    std::variant<std::string, interop::UndefinedType> GPUShaderModule::getLabel(Napi::Env) {
-        UNIMPLEMENTED();
-    }
-
-    void GPUShaderModule::setLabel(Napi::Env,
-                                   std::variant<std::string, interop::UndefinedType> value) {
-        UNIMPLEMENTED();
-    }
+void GPUShaderModule::setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) {
+    UNIMPLEMENTED();
+}
 
 }  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUShaderModule.h b/src/dawn/node/binding/GPUShaderModule.h
index a18e758..de4c258 100644
--- a/src/dawn/node/binding/GPUShaderModule.h
+++ b/src/dawn/node/binding/GPUShaderModule.h
@@ -26,27 +26,25 @@
 
 namespace wgpu::binding {
 
-    // GPUShaderModule is an implementation of interop::GPUShaderModule that wraps a
-    // wgpu::ShaderModule.
-    class GPUShaderModule final : public interop::GPUShaderModule {
-      public:
-        GPUShaderModule(wgpu::ShaderModule shader, std::shared_ptr<AsyncRunner> async);
+// GPUShaderModule is an implementation of interop::GPUShaderModule that wraps a
+// wgpu::ShaderModule.
+class GPUShaderModule final : public interop::GPUShaderModule {
+  public:
+    GPUShaderModule(wgpu::ShaderModule shader, std::shared_ptr<AsyncRunner> async);
 
-        // Implicit cast operator to Dawn GPU object
-        inline operator const wgpu::ShaderModule&() const {
-            return shader_;
-        }
+    // Implicit cast operator to Dawn GPU object
+    inline operator const wgpu::ShaderModule&() const { return shader_; }
 
-        // interop::GPUShaderModule interface compliance
-        interop::Promise<interop::Interface<interop::GPUCompilationInfo>> compilationInfo(
-            Napi::Env) override;
-        std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
-        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+    // interop::GPUShaderModule interface compliance
+    interop::Promise<interop::Interface<interop::GPUCompilationInfo>> compilationInfo(
+        Napi::Env) override;
+    std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
+    void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
 
-      private:
-        wgpu::ShaderModule shader_;
-        std::shared_ptr<AsyncRunner> async_;
-    };
+  private:
+    wgpu::ShaderModule shader_;
+    std::shared_ptr<AsyncRunner> async_;
+};
 
 }  // namespace wgpu::binding
 
diff --git a/src/dawn/node/binding/GPUSupportedLimits.cpp b/src/dawn/node/binding/GPUSupportedLimits.cpp
index 41e1592..341385f 100644
--- a/src/dawn/node/binding/GPUSupportedLimits.cpp
+++ b/src/dawn/node/binding/GPUSupportedLimits.cpp
@@ -18,116 +18,114 @@
 
 namespace wgpu::binding {
 
-    ////////////////////////////////////////////////////////////////////////////////
-    // wgpu::bindings::GPUSupportedLimits
-    ////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUSupportedLimits
+////////////////////////////////////////////////////////////////////////////////
 
-    GPUSupportedLimits::GPUSupportedLimits(wgpu::SupportedLimits limits)
-        : limits_(std::move(limits)) {
-    }
+GPUSupportedLimits::GPUSupportedLimits(wgpu::SupportedLimits limits) : limits_(std::move(limits)) {}
 
-    uint32_t GPUSupportedLimits::getMaxTextureDimension1D(Napi::Env) {
-        return limits_.limits.maxTextureDimension1D;
-    }
+uint32_t GPUSupportedLimits::getMaxTextureDimension1D(Napi::Env) {
+    return limits_.limits.maxTextureDimension1D;
+}
 
-    uint32_t GPUSupportedLimits::getMaxTextureDimension2D(Napi::Env) {
-        return limits_.limits.maxTextureDimension2D;
-    }
+uint32_t GPUSupportedLimits::getMaxTextureDimension2D(Napi::Env) {
+    return limits_.limits.maxTextureDimension2D;
+}
 
-    uint32_t GPUSupportedLimits::getMaxTextureDimension3D(Napi::Env) {
-        return limits_.limits.maxTextureDimension3D;
-    }
+uint32_t GPUSupportedLimits::getMaxTextureDimension3D(Napi::Env) {
+    return limits_.limits.maxTextureDimension3D;
+}
 
-    uint32_t GPUSupportedLimits::getMaxTextureArrayLayers(Napi::Env) {
-        return limits_.limits.maxTextureArrayLayers;
-    }
+uint32_t GPUSupportedLimits::getMaxTextureArrayLayers(Napi::Env) {
+    return limits_.limits.maxTextureArrayLayers;
+}
 
-    uint32_t GPUSupportedLimits::getMaxBindGroups(Napi::Env) {
-        return limits_.limits.maxBindGroups;
-    }
+uint32_t GPUSupportedLimits::getMaxBindGroups(Napi::Env) {
+    return limits_.limits.maxBindGroups;
+}
 
-    uint32_t GPUSupportedLimits::getMaxDynamicUniformBuffersPerPipelineLayout(Napi::Env) {
-        return limits_.limits.maxDynamicUniformBuffersPerPipelineLayout;
-    }
+uint32_t GPUSupportedLimits::getMaxDynamicUniformBuffersPerPipelineLayout(Napi::Env) {
+    return limits_.limits.maxDynamicUniformBuffersPerPipelineLayout;
+}
 
-    uint32_t GPUSupportedLimits::getMaxDynamicStorageBuffersPerPipelineLayout(Napi::Env) {
-        return limits_.limits.maxDynamicStorageBuffersPerPipelineLayout;
-    }
+uint32_t GPUSupportedLimits::getMaxDynamicStorageBuffersPerPipelineLayout(Napi::Env) {
+    return limits_.limits.maxDynamicStorageBuffersPerPipelineLayout;
+}
 
-    uint32_t GPUSupportedLimits::getMaxSampledTexturesPerShaderStage(Napi::Env) {
-        return limits_.limits.maxSampledTexturesPerShaderStage;
-    }
+uint32_t GPUSupportedLimits::getMaxSampledTexturesPerShaderStage(Napi::Env) {
+    return limits_.limits.maxSampledTexturesPerShaderStage;
+}
 
-    uint32_t GPUSupportedLimits::getMaxSamplersPerShaderStage(Napi::Env) {
-        return limits_.limits.maxSamplersPerShaderStage;
-    }
+uint32_t GPUSupportedLimits::getMaxSamplersPerShaderStage(Napi::Env) {
+    return limits_.limits.maxSamplersPerShaderStage;
+}
 
-    uint32_t GPUSupportedLimits::getMaxStorageBuffersPerShaderStage(Napi::Env) {
-        return limits_.limits.maxStorageBuffersPerShaderStage;
-    }
+uint32_t GPUSupportedLimits::getMaxStorageBuffersPerShaderStage(Napi::Env) {
+    return limits_.limits.maxStorageBuffersPerShaderStage;
+}
 
-    uint32_t GPUSupportedLimits::getMaxStorageTexturesPerShaderStage(Napi::Env) {
-        return limits_.limits.maxStorageTexturesPerShaderStage;
-    }
+uint32_t GPUSupportedLimits::getMaxStorageTexturesPerShaderStage(Napi::Env) {
+    return limits_.limits.maxStorageTexturesPerShaderStage;
+}
 
-    uint32_t GPUSupportedLimits::getMaxUniformBuffersPerShaderStage(Napi::Env) {
-        return limits_.limits.maxUniformBuffersPerShaderStage;
-    }
+uint32_t GPUSupportedLimits::getMaxUniformBuffersPerShaderStage(Napi::Env) {
+    return limits_.limits.maxUniformBuffersPerShaderStage;
+}
 
-    uint64_t GPUSupportedLimits::getMaxUniformBufferBindingSize(Napi::Env) {
-        return limits_.limits.maxUniformBufferBindingSize;
-    }
+uint64_t GPUSupportedLimits::getMaxUniformBufferBindingSize(Napi::Env) {
+    return limits_.limits.maxUniformBufferBindingSize;
+}
 
-    uint64_t GPUSupportedLimits::getMaxStorageBufferBindingSize(Napi::Env) {
-        return limits_.limits.maxStorageBufferBindingSize;
-    }
+uint64_t GPUSupportedLimits::getMaxStorageBufferBindingSize(Napi::Env) {
+    return limits_.limits.maxStorageBufferBindingSize;
+}
 
-    uint32_t GPUSupportedLimits::getMinUniformBufferOffsetAlignment(Napi::Env) {
-        return limits_.limits.minUniformBufferOffsetAlignment;
-    }
+uint32_t GPUSupportedLimits::getMinUniformBufferOffsetAlignment(Napi::Env) {
+    return limits_.limits.minUniformBufferOffsetAlignment;
+}
 
-    uint32_t GPUSupportedLimits::getMinStorageBufferOffsetAlignment(Napi::Env) {
-        return limits_.limits.minStorageBufferOffsetAlignment;
-    }
+uint32_t GPUSupportedLimits::getMinStorageBufferOffsetAlignment(Napi::Env) {
+    return limits_.limits.minStorageBufferOffsetAlignment;
+}
 
-    uint32_t GPUSupportedLimits::getMaxVertexBuffers(Napi::Env) {
-        return limits_.limits.maxVertexBuffers;
-    }
+uint32_t GPUSupportedLimits::getMaxVertexBuffers(Napi::Env) {
+    return limits_.limits.maxVertexBuffers;
+}
 
-    uint32_t GPUSupportedLimits::getMaxVertexAttributes(Napi::Env) {
-        return limits_.limits.maxVertexAttributes;
-    }
+uint32_t GPUSupportedLimits::getMaxVertexAttributes(Napi::Env) {
+    return limits_.limits.maxVertexAttributes;
+}
 
-    uint32_t GPUSupportedLimits::getMaxVertexBufferArrayStride(Napi::Env) {
-        return limits_.limits.maxVertexBufferArrayStride;
-    }
+uint32_t GPUSupportedLimits::getMaxVertexBufferArrayStride(Napi::Env) {
+    return limits_.limits.maxVertexBufferArrayStride;
+}
 
-    uint32_t GPUSupportedLimits::getMaxInterStageShaderComponents(Napi::Env) {
-        return limits_.limits.maxInterStageShaderComponents;
-    }
+uint32_t GPUSupportedLimits::getMaxInterStageShaderComponents(Napi::Env) {
+    return limits_.limits.maxInterStageShaderComponents;
+}
 
-    uint32_t GPUSupportedLimits::getMaxComputeWorkgroupStorageSize(Napi::Env) {
-        return limits_.limits.maxComputeWorkgroupStorageSize;
-    }
+uint32_t GPUSupportedLimits::getMaxComputeWorkgroupStorageSize(Napi::Env) {
+    return limits_.limits.maxComputeWorkgroupStorageSize;
+}
 
-    uint32_t GPUSupportedLimits::getMaxComputeInvocationsPerWorkgroup(Napi::Env) {
-        return limits_.limits.maxComputeInvocationsPerWorkgroup;
-    }
+uint32_t GPUSupportedLimits::getMaxComputeInvocationsPerWorkgroup(Napi::Env) {
+    return limits_.limits.maxComputeInvocationsPerWorkgroup;
+}
 
-    uint32_t GPUSupportedLimits::getMaxComputeWorkgroupSizeX(Napi::Env) {
-        return limits_.limits.maxComputeWorkgroupSizeX;
-    }
+uint32_t GPUSupportedLimits::getMaxComputeWorkgroupSizeX(Napi::Env) {
+    return limits_.limits.maxComputeWorkgroupSizeX;
+}
 
-    uint32_t GPUSupportedLimits::getMaxComputeWorkgroupSizeY(Napi::Env) {
-        return limits_.limits.maxComputeWorkgroupSizeY;
-    }
+uint32_t GPUSupportedLimits::getMaxComputeWorkgroupSizeY(Napi::Env) {
+    return limits_.limits.maxComputeWorkgroupSizeY;
+}
 
-    uint32_t GPUSupportedLimits::getMaxComputeWorkgroupSizeZ(Napi::Env) {
-        return limits_.limits.maxComputeWorkgroupSizeZ;
-    }
+uint32_t GPUSupportedLimits::getMaxComputeWorkgroupSizeZ(Napi::Env) {
+    return limits_.limits.maxComputeWorkgroupSizeZ;
+}
 
-    uint32_t GPUSupportedLimits::getMaxComputeWorkgroupsPerDimension(Napi::Env) {
-        return limits_.limits.maxComputeWorkgroupsPerDimension;
-    }
+uint32_t GPUSupportedLimits::getMaxComputeWorkgroupsPerDimension(Napi::Env) {
+    return limits_.limits.maxComputeWorkgroupsPerDimension;
+}
 
 }  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUSupportedLimits.h b/src/dawn/node/binding/GPUSupportedLimits.h
index 53dcb62..337db00 100644
--- a/src/dawn/node/binding/GPUSupportedLimits.h
+++ b/src/dawn/node/binding/GPUSupportedLimits.h
@@ -23,42 +23,42 @@
 
 namespace wgpu::binding {
 
-    // GPUSupportedLimits is an implementation of interop::GPUSupportedLimits.
-    class GPUSupportedLimits final : public interop::GPUSupportedLimits {
-      public:
-        explicit GPUSupportedLimits(wgpu::SupportedLimits);
+// GPUSupportedLimits is an implementation of interop::GPUSupportedLimits.
+class GPUSupportedLimits final : public interop::GPUSupportedLimits {
+  public:
+    explicit GPUSupportedLimits(wgpu::SupportedLimits);
 
-        // interop::GPUSupportedLimits interface compliance
-        uint32_t getMaxTextureDimension1D(Napi::Env) override;
-        uint32_t getMaxTextureDimension2D(Napi::Env) override;
-        uint32_t getMaxTextureDimension3D(Napi::Env) override;
-        uint32_t getMaxTextureArrayLayers(Napi::Env) override;
-        uint32_t getMaxBindGroups(Napi::Env) override;
-        uint32_t getMaxDynamicUniformBuffersPerPipelineLayout(Napi::Env) override;
-        uint32_t getMaxDynamicStorageBuffersPerPipelineLayout(Napi::Env) override;
-        uint32_t getMaxSampledTexturesPerShaderStage(Napi::Env) override;
-        uint32_t getMaxSamplersPerShaderStage(Napi::Env) override;
-        uint32_t getMaxStorageBuffersPerShaderStage(Napi::Env) override;
-        uint32_t getMaxStorageTexturesPerShaderStage(Napi::Env) override;
-        uint32_t getMaxUniformBuffersPerShaderStage(Napi::Env) override;
-        uint64_t getMaxUniformBufferBindingSize(Napi::Env) override;
-        uint64_t getMaxStorageBufferBindingSize(Napi::Env) override;
-        uint32_t getMinUniformBufferOffsetAlignment(Napi::Env) override;
-        uint32_t getMinStorageBufferOffsetAlignment(Napi::Env) override;
-        uint32_t getMaxVertexBuffers(Napi::Env) override;
-        uint32_t getMaxVertexAttributes(Napi::Env) override;
-        uint32_t getMaxVertexBufferArrayStride(Napi::Env) override;
-        uint32_t getMaxInterStageShaderComponents(Napi::Env) override;
-        uint32_t getMaxComputeWorkgroupStorageSize(Napi::Env) override;
-        uint32_t getMaxComputeInvocationsPerWorkgroup(Napi::Env) override;
-        uint32_t getMaxComputeWorkgroupSizeX(Napi::Env) override;
-        uint32_t getMaxComputeWorkgroupSizeY(Napi::Env) override;
-        uint32_t getMaxComputeWorkgroupSizeZ(Napi::Env) override;
-        uint32_t getMaxComputeWorkgroupsPerDimension(Napi::Env) override;
+    // interop::GPUSupportedLimits interface compliance
+    uint32_t getMaxTextureDimension1D(Napi::Env) override;
+    uint32_t getMaxTextureDimension2D(Napi::Env) override;
+    uint32_t getMaxTextureDimension3D(Napi::Env) override;
+    uint32_t getMaxTextureArrayLayers(Napi::Env) override;
+    uint32_t getMaxBindGroups(Napi::Env) override;
+    uint32_t getMaxDynamicUniformBuffersPerPipelineLayout(Napi::Env) override;
+    uint32_t getMaxDynamicStorageBuffersPerPipelineLayout(Napi::Env) override;
+    uint32_t getMaxSampledTexturesPerShaderStage(Napi::Env) override;
+    uint32_t getMaxSamplersPerShaderStage(Napi::Env) override;
+    uint32_t getMaxStorageBuffersPerShaderStage(Napi::Env) override;
+    uint32_t getMaxStorageTexturesPerShaderStage(Napi::Env) override;
+    uint32_t getMaxUniformBuffersPerShaderStage(Napi::Env) override;
+    uint64_t getMaxUniformBufferBindingSize(Napi::Env) override;
+    uint64_t getMaxStorageBufferBindingSize(Napi::Env) override;
+    uint32_t getMinUniformBufferOffsetAlignment(Napi::Env) override;
+    uint32_t getMinStorageBufferOffsetAlignment(Napi::Env) override;
+    uint32_t getMaxVertexBuffers(Napi::Env) override;
+    uint32_t getMaxVertexAttributes(Napi::Env) override;
+    uint32_t getMaxVertexBufferArrayStride(Napi::Env) override;
+    uint32_t getMaxInterStageShaderComponents(Napi::Env) override;
+    uint32_t getMaxComputeWorkgroupStorageSize(Napi::Env) override;
+    uint32_t getMaxComputeInvocationsPerWorkgroup(Napi::Env) override;
+    uint32_t getMaxComputeWorkgroupSizeX(Napi::Env) override;
+    uint32_t getMaxComputeWorkgroupSizeY(Napi::Env) override;
+    uint32_t getMaxComputeWorkgroupSizeZ(Napi::Env) override;
+    uint32_t getMaxComputeWorkgroupsPerDimension(Napi::Env) override;
 
-      private:
-        wgpu::SupportedLimits limits_;
-    };
+  private:
+    wgpu::SupportedLimits limits_;
+};
 
 }  // namespace wgpu::binding
 
diff --git a/src/dawn/node/binding/GPUTexture.cpp b/src/dawn/node/binding/GPUTexture.cpp
index 59d4ad9..26f354a 100644
--- a/src/dawn/node/binding/GPUTexture.cpp
+++ b/src/dawn/node/binding/GPUTexture.cpp
@@ -23,44 +23,43 @@
 
 namespace wgpu::binding {
 
-    ////////////////////////////////////////////////////////////////////////////////
-    // wgpu::bindings::GPUTexture
-    ////////////////////////////////////////////////////////////////////////////////
-    GPUTexture::GPUTexture(wgpu::Texture texture) : texture_(std::move(texture)) {
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUTexture
+////////////////////////////////////////////////////////////////////////////////
+GPUTexture::GPUTexture(wgpu::Texture texture) : texture_(std::move(texture)) {}
+
+interop::Interface<interop::GPUTextureView> GPUTexture::createView(
+    Napi::Env env,
+    interop::GPUTextureViewDescriptor descriptor) {
+    if (!texture_) {
+        Errors::OperationError(env).ThrowAsJavaScriptException();
+        return {};
     }
 
-    interop::Interface<interop::GPUTextureView> GPUTexture::createView(
-        Napi::Env env,
-        interop::GPUTextureViewDescriptor descriptor) {
-        if (!texture_) {
-            Errors::OperationError(env).ThrowAsJavaScriptException();
-            return {};
-        }
-
-        wgpu::TextureViewDescriptor desc{};
-        Converter conv(env);
-        if (!conv(desc.baseMipLevel, descriptor.baseMipLevel) ||        //
-            !conv(desc.mipLevelCount, descriptor.mipLevelCount) ||      //
-            !conv(desc.baseArrayLayer, descriptor.baseArrayLayer) ||    //
-            !conv(desc.arrayLayerCount, descriptor.arrayLayerCount) ||  //
-            !conv(desc.format, descriptor.format) ||                    //
-            !conv(desc.dimension, descriptor.dimension) ||              //
-            !conv(desc.aspect, descriptor.aspect)) {
-            return {};
-        }
-        return interop::GPUTextureView::Create<GPUTextureView>(env, texture_.CreateView(&desc));
+    wgpu::TextureViewDescriptor desc{};
+    Converter conv(env);
+    if (!conv(desc.baseMipLevel, descriptor.baseMipLevel) ||        //
+        !conv(desc.mipLevelCount, descriptor.mipLevelCount) ||      //
+        !conv(desc.baseArrayLayer, descriptor.baseArrayLayer) ||    //
+        !conv(desc.arrayLayerCount, descriptor.arrayLayerCount) ||  //
+        !conv(desc.format, descriptor.format) ||                    //
+        !conv(desc.dimension, descriptor.dimension) ||              //
+        !conv(desc.aspect, descriptor.aspect)) {
+        return {};
     }
+    return interop::GPUTextureView::Create<GPUTextureView>(env, texture_.CreateView(&desc));
+}
 
-    void GPUTexture::destroy(Napi::Env) {
-        texture_.Destroy();
-    }
+void GPUTexture::destroy(Napi::Env) {
+    texture_.Destroy();
+}
 
-    std::variant<std::string, interop::UndefinedType> GPUTexture::getLabel(Napi::Env) {
-        UNIMPLEMENTED();
-    }
+std::variant<std::string, interop::UndefinedType> GPUTexture::getLabel(Napi::Env) {
+    UNIMPLEMENTED();
+}
 
-    void GPUTexture::setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) {
-        UNIMPLEMENTED();
-    }
+void GPUTexture::setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) {
+    UNIMPLEMENTED();
+}
 
 }  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUTexture.h b/src/dawn/node/binding/GPUTexture.h
index 8bf82c6..95d2a1d 100644
--- a/src/dawn/node/binding/GPUTexture.h
+++ b/src/dawn/node/binding/GPUTexture.h
@@ -24,27 +24,25 @@
 
 namespace wgpu::binding {
 
-    // GPUTexture is an implementation of interop::GPUTexture that wraps a wgpu::Texture.
-    class GPUTexture final : public interop::GPUTexture {
-      public:
-        explicit GPUTexture(wgpu::Texture texture);
+// GPUTexture is an implementation of interop::GPUTexture that wraps a wgpu::Texture.
+class GPUTexture final : public interop::GPUTexture {
+  public:
+    explicit GPUTexture(wgpu::Texture texture);
 
-        // Implicit cast operator to Dawn GPU object
-        inline operator const wgpu::Texture&() const {
-            return texture_;
-        }
+    // Implicit cast operator to Dawn GPU object
+    inline operator const wgpu::Texture&() const { return texture_; }
 
-        // interop::GPUTexture interface compliance
-        interop::Interface<interop::GPUTextureView> createView(
-            Napi::Env,
-            interop::GPUTextureViewDescriptor descriptor) override;
-        void destroy(Napi::Env) override;
-        std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
-        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+    // interop::GPUTexture interface compliance
+    interop::Interface<interop::GPUTextureView> createView(
+        Napi::Env,
+        interop::GPUTextureViewDescriptor descriptor) override;
+    void destroy(Napi::Env) override;
+    std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
+    void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
 
-      private:
-        wgpu::Texture texture_;
-    };
+  private:
+    wgpu::Texture texture_;
+};
 
 }  // namespace wgpu::binding
 
diff --git a/src/dawn/node/binding/GPUTextureView.cpp b/src/dawn/node/binding/GPUTextureView.cpp
index c2b5a06..df7514a 100644
--- a/src/dawn/node/binding/GPUTextureView.cpp
+++ b/src/dawn/node/binding/GPUTextureView.cpp
@@ -20,19 +20,17 @@
 
 namespace wgpu::binding {
 
-    ////////////////////////////////////////////////////////////////////////////////
-    // wgpu::bindings::GPUTextureView
-    ////////////////////////////////////////////////////////////////////////////////
-    GPUTextureView::GPUTextureView(wgpu::TextureView view) : view_(std::move(view)) {
-    }
+////////////////////////////////////////////////////////////////////////////////
+// wgpu::bindings::GPUTextureView
+////////////////////////////////////////////////////////////////////////////////
+GPUTextureView::GPUTextureView(wgpu::TextureView view) : view_(std::move(view)) {}
 
-    std::variant<std::string, interop::UndefinedType> GPUTextureView::getLabel(Napi::Env) {
-        UNIMPLEMENTED();
-    }
+std::variant<std::string, interop::UndefinedType> GPUTextureView::getLabel(Napi::Env) {
+    UNIMPLEMENTED();
+}
 
-    void GPUTextureView::setLabel(Napi::Env,
-                                  std::variant<std::string, interop::UndefinedType> value) {
-        UNIMPLEMENTED();
-    }
+void GPUTextureView::setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) {
+    UNIMPLEMENTED();
+}
 
 }  // namespace wgpu::binding
diff --git a/src/dawn/node/binding/GPUTextureView.h b/src/dawn/node/binding/GPUTextureView.h
index 004d2c9..93917bc 100644
--- a/src/dawn/node/binding/GPUTextureView.h
+++ b/src/dawn/node/binding/GPUTextureView.h
@@ -24,24 +24,22 @@
 
 namespace wgpu::binding {
 
-    // GPUTextureView is an implementation of interop::GPUTextureView that wraps a
-    // wgpu::TextureView.
-    class GPUTextureView final : public interop::GPUTextureView {
-      public:
-        explicit GPUTextureView(wgpu::TextureView view);
+// GPUTextureView is an implementation of interop::GPUTextureView that wraps a
+// wgpu::TextureView.
+class GPUTextureView final : public interop::GPUTextureView {
+  public:
+    explicit GPUTextureView(wgpu::TextureView view);
 
-        // Implicit cast operator to Dawn GPU object
-        inline operator const wgpu::TextureView&() const {
-            return view_;
-        }
+    // Implicit cast operator to Dawn GPU object
+    inline operator const wgpu::TextureView&() const { return view_; }
 
-        // interop::GPUTextureView interface compliance
-        std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
-        void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
+    // interop::GPUTextureView interface compliance
+    std::variant<std::string, interop::UndefinedType> getLabel(Napi::Env) override;
+    void setLabel(Napi::Env, std::variant<std::string, interop::UndefinedType> value) override;
 
-      private:
-        wgpu::TextureView view_;
-    };
+  private:
+    wgpu::TextureView view_;
+};
 
 }  // namespace wgpu::binding
 
diff --git a/src/dawn/node/interop/Core.cpp b/src/dawn/node/interop/Core.cpp
index 151d852..8303516 100644
--- a/src/dawn/node/interop/Core.cpp
+++ b/src/dawn/node/interop/Core.cpp
@@ -16,155 +16,155 @@
 
 namespace wgpu::interop {
 
-    Result Success;
+Result Success;
 
-    Result Error(std::string msg) {
-        return {msg};
-    }
+Result Error(std::string msg) {
+    return {msg};
+}
 
-    Result Converter<bool>::FromJS(Napi::Env env, Napi::Value value, bool& out) {
-        if (value.IsBoolean()) {
-            out = value.ToBoolean();
-            return Success;
-        }
-        return Error("value is not a boolean");
+Result Converter<bool>::FromJS(Napi::Env env, Napi::Value value, bool& out) {
+    if (value.IsBoolean()) {
+        out = value.ToBoolean();
+        return Success;
     }
-    Napi::Value Converter<bool>::ToJS(Napi::Env env, bool value) {
-        return Napi::Value::From(env, value);
-    }
+    return Error("value is not a boolean");
+}
+Napi::Value Converter<bool>::ToJS(Napi::Env env, bool value) {
+    return Napi::Value::From(env, value);
+}
 
-    Result Converter<std::string>::FromJS(Napi::Env env, Napi::Value value, std::string& out) {
-        if (value.IsString()) {
-            out = value.ToString();
-            return Success;
-        }
-        return Error("value is not a string");
+Result Converter<std::string>::FromJS(Napi::Env env, Napi::Value value, std::string& out) {
+    if (value.IsString()) {
+        out = value.ToString();
+        return Success;
     }
-    Napi::Value Converter<std::string>::ToJS(Napi::Env env, std::string value) {
-        return Napi::Value::From(env, value);
-    }
+    return Error("value is not a string");
+}
+Napi::Value Converter<std::string>::ToJS(Napi::Env env, std::string value) {
+    return Napi::Value::From(env, value);
+}
 
-    Result Converter<int8_t>::FromJS(Napi::Env env, Napi::Value value, int8_t& out) {
-        if (value.IsNumber()) {
-            out = value.ToNumber().Int32Value();
-            return Success;
-        }
-        return Error("value is not a number");
+Result Converter<int8_t>::FromJS(Napi::Env env, Napi::Value value, int8_t& out) {
+    if (value.IsNumber()) {
+        out = value.ToNumber().Int32Value();
+        return Success;
     }
-    Napi::Value Converter<int8_t>::ToJS(Napi::Env env, int8_t value) {
-        return Napi::Value::From(env, value);
-    }
+    return Error("value is not a number");
+}
+Napi::Value Converter<int8_t>::ToJS(Napi::Env env, int8_t value) {
+    return Napi::Value::From(env, value);
+}
 
-    Result Converter<uint8_t>::FromJS(Napi::Env env, Napi::Value value, uint8_t& out) {
-        if (value.IsNumber()) {
-            out = value.ToNumber().Uint32Value();
-            return Success;
-        }
-        return Error("value is not a number");
+Result Converter<uint8_t>::FromJS(Napi::Env env, Napi::Value value, uint8_t& out) {
+    if (value.IsNumber()) {
+        out = value.ToNumber().Uint32Value();
+        return Success;
     }
-    Napi::Value Converter<uint8_t>::ToJS(Napi::Env env, uint8_t value) {
-        return Napi::Value::From(env, value);
-    }
+    return Error("value is not a number");
+}
+Napi::Value Converter<uint8_t>::ToJS(Napi::Env env, uint8_t value) {
+    return Napi::Value::From(env, value);
+}
 
-    Result Converter<int16_t>::FromJS(Napi::Env env, Napi::Value value, int16_t& out) {
-        if (value.IsNumber()) {
-            out = value.ToNumber().Int32Value();
-            return Success;
-        }
-        return Error("value is not a number");
+Result Converter<int16_t>::FromJS(Napi::Env env, Napi::Value value, int16_t& out) {
+    if (value.IsNumber()) {
+        out = value.ToNumber().Int32Value();
+        return Success;
     }
-    Napi::Value Converter<int16_t>::ToJS(Napi::Env env, int16_t value) {
-        return Napi::Value::From(env, value);
-    }
+    return Error("value is not a number");
+}
+Napi::Value Converter<int16_t>::ToJS(Napi::Env env, int16_t value) {
+    return Napi::Value::From(env, value);
+}
 
-    Result Converter<uint16_t>::FromJS(Napi::Env env, Napi::Value value, uint16_t& out) {
-        if (value.IsNumber()) {
-            out = value.ToNumber().Uint32Value();
-            return Success;
-        }
-        return Error("value is not a number");
+Result Converter<uint16_t>::FromJS(Napi::Env env, Napi::Value value, uint16_t& out) {
+    if (value.IsNumber()) {
+        out = value.ToNumber().Uint32Value();
+        return Success;
     }
-    Napi::Value Converter<uint16_t>::ToJS(Napi::Env env, uint16_t value) {
-        return Napi::Value::From(env, value);
-    }
+    return Error("value is not a number");
+}
+Napi::Value Converter<uint16_t>::ToJS(Napi::Env env, uint16_t value) {
+    return Napi::Value::From(env, value);
+}
 
-    Result Converter<int32_t>::FromJS(Napi::Env env, Napi::Value value, int32_t& out) {
-        if (value.IsNumber()) {
-            out = value.ToNumber().Int32Value();
-            return Success;
-        }
-        return Error("value is not a number");
+Result Converter<int32_t>::FromJS(Napi::Env env, Napi::Value value, int32_t& out) {
+    if (value.IsNumber()) {
+        out = value.ToNumber().Int32Value();
+        return Success;
     }
-    Napi::Value Converter<int32_t>::ToJS(Napi::Env env, int32_t value) {
-        return Napi::Value::From(env, value);
-    }
+    return Error("value is not a number");
+}
+Napi::Value Converter<int32_t>::ToJS(Napi::Env env, int32_t value) {
+    return Napi::Value::From(env, value);
+}
 
-    Result Converter<uint32_t>::FromJS(Napi::Env env, Napi::Value value, uint32_t& out) {
-        if (value.IsNumber()) {
-            out = value.ToNumber().Uint32Value();
-            return Success;
-        }
-        return Error("value is not a number");
+Result Converter<uint32_t>::FromJS(Napi::Env env, Napi::Value value, uint32_t& out) {
+    if (value.IsNumber()) {
+        out = value.ToNumber().Uint32Value();
+        return Success;
     }
-    Napi::Value Converter<uint32_t>::ToJS(Napi::Env env, uint32_t value) {
-        return Napi::Value::From(env, value);
-    }
+    return Error("value is not a number");
+}
+Napi::Value Converter<uint32_t>::ToJS(Napi::Env env, uint32_t value) {
+    return Napi::Value::From(env, value);
+}
 
-    Result Converter<int64_t>::FromJS(Napi::Env env, Napi::Value value, int64_t& out) {
-        if (value.IsNumber()) {
-            out = value.ToNumber().Int64Value();
-            return Success;
-        }
-        return Error("value is not a number");
+Result Converter<int64_t>::FromJS(Napi::Env env, Napi::Value value, int64_t& out) {
+    if (value.IsNumber()) {
+        out = value.ToNumber().Int64Value();
+        return Success;
     }
-    Napi::Value Converter<int64_t>::ToJS(Napi::Env env, int64_t value) {
-        return Napi::Value::From(env, value);
-    }
+    return Error("value is not a number");
+}
+Napi::Value Converter<int64_t>::ToJS(Napi::Env env, int64_t value) {
+    return Napi::Value::From(env, value);
+}
 
-    Result Converter<uint64_t>::FromJS(Napi::Env env, Napi::Value value, uint64_t& out) {
-        if (value.IsNumber()) {
-            // Note that the JS Number type only stores doubles, so the max integer
-            // range of values without precision loss is -2^53 to 2^53 (52 bit mantissa
-            // with 1 implicit bit). This is why there's no UInt64Value() function.
-            out = static_cast<uint64_t>(value.ToNumber().Int64Value());
-            return Success;
-        }
-        return Error("value is not a number");
+Result Converter<uint64_t>::FromJS(Napi::Env env, Napi::Value value, uint64_t& out) {
+    if (value.IsNumber()) {
+        // Note that the JS Number type only stores doubles, so the max integer
+        // range of values without precision loss is -2^53 to 2^53 (52 bit mantissa
+        // with 1 implicit bit). This is why there's no UInt64Value() function.
+        out = static_cast<uint64_t>(value.ToNumber().Int64Value());
+        return Success;
     }
-    Napi::Value Converter<uint64_t>::ToJS(Napi::Env env, uint64_t value) {
-        return Napi::Value::From(env, value);
-    }
+    return Error("value is not a number");
+}
+Napi::Value Converter<uint64_t>::ToJS(Napi::Env env, uint64_t value) {
+    return Napi::Value::From(env, value);
+}
 
-    Result Converter<float>::FromJS(Napi::Env env, Napi::Value value, float& out) {
-        if (value.IsNumber()) {
-            out = value.ToNumber().FloatValue();
-            return Success;
-        }
-        return Error("value is not a number");
+Result Converter<float>::FromJS(Napi::Env env, Napi::Value value, float& out) {
+    if (value.IsNumber()) {
+        out = value.ToNumber().FloatValue();
+        return Success;
     }
-    Napi::Value Converter<float>::ToJS(Napi::Env env, float value) {
-        return Napi::Value::From(env, value);
-    }
+    return Error("value is not a number");
+}
+Napi::Value Converter<float>::ToJS(Napi::Env env, float value) {
+    return Napi::Value::From(env, value);
+}
 
-    Result Converter<double>::FromJS(Napi::Env env, Napi::Value value, double& out) {
-        if (value.IsNumber()) {
-            out = value.ToNumber().DoubleValue();
-            return Success;
-        }
-        return Error("value is not a number");
+Result Converter<double>::FromJS(Napi::Env env, Napi::Value value, double& out) {
+    if (value.IsNumber()) {
+        out = value.ToNumber().DoubleValue();
+        return Success;
     }
-    Napi::Value Converter<double>::ToJS(Napi::Env env, double value) {
-        return Napi::Value::From(env, value);
-    }
+    return Error("value is not a number");
+}
+Napi::Value Converter<double>::ToJS(Napi::Env env, double value) {
+    return Napi::Value::From(env, value);
+}
 
-    Result Converter<UndefinedType>::FromJS(Napi::Env, Napi::Value value, UndefinedType&) {
-        if (value.IsUndefined()) {
-            return Success;
-        }
-        return Error("value is undefined");
+Result Converter<UndefinedType>::FromJS(Napi::Env, Napi::Value value, UndefinedType&) {
+    if (value.IsUndefined()) {
+        return Success;
     }
-    Napi::Value Converter<UndefinedType>::ToJS(Napi::Env env, UndefinedType) {
-        return env.Undefined();
-    }
+    return Error("value is undefined");
+}
+Napi::Value Converter<UndefinedType>::ToJS(Napi::Env env, UndefinedType) {
+    return env.Undefined();
+}
 
 }  // namespace wgpu::interop
diff --git a/src/dawn/node/interop/Core.h b/src/dawn/node/interop/Core.h
index 189eae1..4bf9ee7 100644
--- a/src/dawn/node/interop/Core.h
+++ b/src/dawn/node/interop/Core.h
@@ -36,340 +36,293 @@
 #define ENABLE_INTEROP_LOGGING 0  // Enable for verbose interop logging
 
 #if ENABLE_INTEROP_LOGGING
-#    define INTEROP_LOG(...) LOG(__VA_ARGS__)
+#define INTEROP_LOG(...) LOG(__VA_ARGS__)
 #else
-#    define INTEROP_LOG(...)
+#define INTEROP_LOG(...)
 #endif
 
 // A helper macro for constructing a PromiseInfo with the current file, function and line.
 // See PromiseInfo
-#define PROMISE_INFO                     \
-    ::wgpu::interop::PromiseInfo {       \
-        __FILE__, __FUNCTION__, __LINE__ \
-    }
+#define PROMISE_INFO \
+    ::wgpu::interop::PromiseInfo { __FILE__, __FUNCTION__, __LINE__ }
 
 namespace wgpu::interop {
 
-    ////////////////////////////////////////////////////////////////////////////////
-    // Primitive JavaScript types
-    ////////////////////////////////////////////////////////////////////////////////
-    using Object = Napi::Object;
-    using ArrayBuffer = Napi::ArrayBuffer;
-    using Int8Array = Napi::TypedArrayOf<int8_t>;
-    using Int16Array = Napi::TypedArrayOf<int16_t>;
-    using Int32Array = Napi::TypedArrayOf<int32_t>;
-    using Uint8Array = Napi::TypedArrayOf<uint8_t>;
-    using Uint16Array = Napi::TypedArrayOf<uint16_t>;
-    using Uint32Array = Napi::TypedArrayOf<uint32_t>;
-    using Float32Array = Napi::TypedArrayOf<float>;
-    using Float64Array = Napi::TypedArrayOf<double>;
-    using DataView = Napi::TypedArray;
+////////////////////////////////////////////////////////////////////////////////
+// Primitive JavaScript types
+////////////////////////////////////////////////////////////////////////////////
+using Object = Napi::Object;
+using ArrayBuffer = Napi::ArrayBuffer;
+using Int8Array = Napi::TypedArrayOf<int8_t>;
+using Int16Array = Napi::TypedArrayOf<int16_t>;
+using Int32Array = Napi::TypedArrayOf<int32_t>;
+using Uint8Array = Napi::TypedArrayOf<uint8_t>;
+using Uint16Array = Napi::TypedArrayOf<uint16_t>;
+using Uint32Array = Napi::TypedArrayOf<uint32_t>;
+using Float32Array = Napi::TypedArrayOf<float>;
+using Float64Array = Napi::TypedArrayOf<double>;
+using DataView = Napi::TypedArray;
 
-    // Datatype used for undefined values.
-    struct UndefinedType {};
-    static constexpr UndefinedType Undefined;
+// Datatype used for undefined values.
+struct UndefinedType {};
+static constexpr UndefinedType Undefined;
 
-    template <typename T>
-    using FrozenArray = std::vector<T>;
+template <typename T>
+using FrozenArray = std::vector<T>;
 
-    // A wrapper class for integers that's as transparent as possible and is used to distinguish
-    // that the type is tagged with the [Clamp] WebIDL attribute.
-    template <typename T>
-    struct ClampedInteger {
-        static_assert(std::is_integral_v<T>);
+// A wrapper class for integers that's as transparent as possible and is used to distinguish
+// that the type is tagged with the [Clamp] WebIDL attribute.
+template <typename T>
+struct ClampedInteger {
+    static_assert(std::is_integral_v<T>);
 
-        using IntegerType = T;
-        ClampedInteger() : value(0) {
+    using IntegerType = T;
+    ClampedInteger() : value(0) {}
+    // NOLINTNEXTLINE(runtime/explicit)
+    ClampedInteger(T value) : value(value) {}
+    operator T() const { return value; }
+    T value;
+};
+
+// A wrapper class for integers that's as transparent as possible and is used to distinguish
+// that the type is tagged with the [EnforceRange] WebIDL attribute.
+template <typename T>
+struct EnforceRangeInteger {
+    static_assert(std::is_integral_v<T>);
+
+    using IntegerType = T;
+    EnforceRangeInteger() : value(0) {}
+    // NOLINTNEXTLINE(runtime/explicit)
+    EnforceRangeInteger(T value) : value(value) {}
+    operator T() const { return value; }
+    T value;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+// Result
+////////////////////////////////////////////////////////////////////////////////
+
+// Result is used to hold an success / error state by functions that perform JS <-> C++
+// conversion
+struct [[nodiscard]] Result {
+    // Returns true if the operation succeeded, false if there was an error
+    inline operator bool() const { return error.empty(); }
+
+    // If Result is an error, then a new Error is returned with the
+    // stringified values append to the error message.
+    // If Result is a success, then a success Result is returned.
+    template <typename... VALUES>
+    Result Append(VALUES&&... values) {
+        if (*this) {
+            return *this;
         }
-        // NOLINTNEXTLINE(runtime/explicit)
-        ClampedInteger(T value) : value(value) {
-        }
-        operator T() const {
-            return value;
-        }
-        T value;
+        std::stringstream ss;
+        ss << error << "\n";
+        utils::Write(ss, std::forward<VALUES>(values)...);
+        return {ss.str()};
+    }
+
+    // The error message, if the operation failed.
+    std::string error;
+};
+
+// A successful result
+extern Result Success;
+
+// Returns a Result with the given error message
+Result Error(std::string msg);
+
+////////////////////////////////////////////////////////////////////////////////
+// Interface<T>
+////////////////////////////////////////////////////////////////////////////////
+
+// Interface<T> is a templated wrapper around a JavaScript object, which
+// implements the template-generated interface type T. Interfaces are returned
+// by either calling T::Bind() or T::Create().
+template <typename T>
+class Interface {
+  public:
+    // Constructs an Interface with no JS object.
+    inline Interface() {}
+
+    // Constructs an Interface wrapping the given JS object.
+    // The JS object must have been created with a call to T::Bind().
+    explicit inline Interface(Napi::Object o) : object(o) {}
+
+    // Implicit conversion operators to Napi objects.
+    inline operator napi_value() const { return object; }
+    inline operator const Napi::Value&() const { return object; }
+    inline operator const Napi::Object&() const { return object; }
+
+    // Member and dereference operators
+    inline T* operator->() const { return T::Unwrap(object); }
+    inline T* operator*() const { return T::Unwrap(object); }
+
+    // As<IMPL>() returns the unwrapped object cast to the implementation type.
+    // The interface implementation *must* be of the template type IMPL.
+    template <typename IMPL>
+    inline IMPL* As() const {
+        return static_cast<IMPL*>(T::Unwrap(object));
+    }
+
+  private:
+    Napi::Object object;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+// Promise<T>
+////////////////////////////////////////////////////////////////////////////////
+
+// Info holds details about where the promise was constructed.
+// Used for printing debug messages when a promise is finalized without being resolved
+// or rejected.
+// Use the PROMISE_INFO macro to populate this structure.
+struct PromiseInfo {
+    const char* file = nullptr;
+    const char* function = nullptr;
+    int line = 0;
+};
+
+enum class PromiseState {
+    Pending,
+    Resolved,
+    Rejected,
+};
+
+namespace detail {
+// Base class for Promise<T> specializations.
+class PromiseBase {
+  public:
+    // Implicit conversion operators to Napi promises.
+    inline operator napi_value() const { return state_->deferred.Promise(); }
+    inline operator Napi::Value() const { return state_->deferred.Promise(); }
+    inline operator Napi::Promise() const { return state_->deferred.Promise(); }
+
+    // Reject() rejects the promise with the given failure value.
+    void Reject(Napi::Value value) const {
+        state_->deferred.Reject(value);
+        state_->state = PromiseState::Rejected;
+    }
+    void Reject(Napi::Error err) const { Reject(err.Value()); }
+    void Reject(std::string err) const { Reject(Napi::Error::New(state_->deferred.Env(), err)); }
+
+    PromiseState GetState() const { return state_->state; }
+
+  protected:
+    void Resolve(Napi::Value value) const {
+        state_->deferred.Resolve(value);
+        state_->state = PromiseState::Resolved;
+    }
+
+    struct State {
+        Napi::Promise::Deferred deferred;
+        PromiseInfo info;
+        PromiseState state = PromiseState::Pending;
     };
 
-    // A wrapper class for integers that's as transparent as possible and is used to distinguish
-    // that the type is tagged with the [EnforceRange] WebIDL attribute.
-    template <typename T>
-    struct EnforceRangeInteger {
-        static_assert(std::is_integral_v<T>);
+    PromiseBase(Napi::Env env, const PromiseInfo& info)
+        : state_(new State{Napi::Promise::Deferred::New(env), info}) {
+        state_->deferred.Promise().AddFinalizer(
+            [](Napi::Env, State* state) {
+                if (state->state == PromiseState::Pending) {
+                    ::wgpu::utils::Fatal("Promise not resolved or rejected", state->info.file,
+                                         state->info.line, state->info.function);
+                }
+                delete state;
+            },
+            state_);
+    }
 
-        using IntegerType = T;
-        EnforceRangeInteger() : value(0) {
+    State* const state_;
+};
+}  // namespace detail
+
+// Promise<T> is a templated wrapper around a JavaScript promise, which can
+// resolve to the template type T.
+template <typename T>
+class Promise : public detail::PromiseBase {
+  public:
+    // Constructor
+    Promise(Napi::Env env, const PromiseInfo& info) : PromiseBase(env, info) {}
+
+    // Resolve() fulfills the promise with the given value.
+    void Resolve(T&& value) const {
+        PromiseBase::Resolve(ToJS(state_->deferred.Env(), std::forward<T>(value)));
+    }
+};
+
+// Specialization for Promises that resolve with no value
+template <>
+class Promise<void> : public detail::PromiseBase {
+  public:
+    // Constructor
+    Promise(Napi::Env env, const PromiseInfo& info) : PromiseBase(env, info) {}
+
+    // Resolve() fulfills the promise.
+    void Resolve() const { PromiseBase::Resolve(state_->deferred.Env().Undefined()); }
+};
+
+////////////////////////////////////////////////////////////////////////////////
+// Converter<T>
+////////////////////////////////////////////////////////////////////////////////
+
+// Converter<T> is specialized for each type T which can be converted from C++
+// to JavaScript, or JavaScript to C++.
+// Each specialization of Converter<T> is expected to have two static methods
+// with the signatures:
+//
+//  // FromJS() converts the JavaScript value 'in' to the C++ value 'out'.
+//  static Result FromJS(Napi::Env, Napi::Value in, T& out);
+//
+//  // ToJS() converts the C++ value 'in' to a JavaScript value, and returns
+//  // this value.
+//  static Napi::Value ToJS(Napi::Env, T in);
+template <typename T>
+class Converter {};
+
+template <>
+class Converter<Napi::Object> {
+  public:
+    static inline Result FromJS(Napi::Env, Napi::Value value, Napi::Object& out) {
+        if (value.IsObject()) {
+            out = value.ToObject();
+            return Success;
         }
-        // NOLINTNEXTLINE(runtime/explicit)
-        EnforceRangeInteger(T value) : value(value) {
+        return Error("value is not an object");
+    }
+    static inline Napi::Value ToJS(Napi::Env, Napi::Object value) { return value; }
+};
+
+template <>
+class Converter<ArrayBuffer> {
+  public:
+    static inline Result FromJS(Napi::Env, Napi::Value value, ArrayBuffer& out) {
+        if (value.IsArrayBuffer()) {
+            out = value.As<ArrayBuffer>();
+            return Success;
         }
-        operator T() const {
-            return value;
+        return Error("value is not a ArrayBuffer");
+    }
+    static inline Napi::Value ToJS(Napi::Env, ArrayBuffer value) { return value; }
+};
+
+template <>
+class Converter<Napi::TypedArray> {
+  public:
+    static inline Result FromJS(Napi::Env, Napi::Value value, Napi::TypedArray& out) {
+        if (value.IsTypedArray()) {
+            out = value.As<Napi::TypedArray>();
+            return Success;
         }
-        T value;
-    };
+        return Error("value is not a TypedArray");
+    }
+    static inline Napi::Value ToJS(Napi::Env, ArrayBuffer value) { return value; }
+};
 
-    ////////////////////////////////////////////////////////////////////////////////
-    // Result
-    ////////////////////////////////////////////////////////////////////////////////
-
-    // Result is used to hold an success / error state by functions that perform JS <-> C++
-    // conversion
-    struct [[nodiscard]] Result {
-        // Returns true if the operation succeeded, false if there was an error
-        inline operator bool() const {
-            return error.empty();
-        }
-
-        // If Result is an error, then a new Error is returned with the
-        // stringified values append to the error message.
-        // If Result is a success, then a success Result is returned.
-        template <typename... VALUES>
-        Result Append(VALUES && ... values) {
-            if (*this) {
-                return *this;
-            }
-            std::stringstream ss;
-            ss << error << "\n";
-            utils::Write(ss, std::forward<VALUES>(values)...);
-            return {ss.str()};
-        }
-
-        // The error message, if the operation failed.
-        std::string error;
-    };
-
-    // A successful result
-    extern Result Success;
-
-    // Returns a Result with the given error message
-    Result Error(std::string msg);
-
-    ////////////////////////////////////////////////////////////////////////////////
-    // Interface<T>
-    ////////////////////////////////////////////////////////////////////////////////
-
-    // Interface<T> is a templated wrapper around a JavaScript object, which
-    // implements the template-generated interface type T. Interfaces are returned
-    // by either calling T::Bind() or T::Create().
-    template <typename T>
-    class Interface {
-      public:
-        // Constructs an Interface with no JS object.
-        inline Interface() {
-        }
-
-        // Constructs an Interface wrapping the given JS object.
-        // The JS object must have been created with a call to T::Bind().
-        explicit inline Interface(Napi::Object o) : object(o) {
-        }
-
-        // Implicit conversion operators to Napi objects.
-        inline operator napi_value() const {
-            return object;
-        }
-        inline operator const Napi::Value&() const {
-            return object;
-        }
-        inline operator const Napi::Object&() const {
-            return object;
-        }
-
-        // Member and dereference operators
-        inline T* operator->() const {
-            return T::Unwrap(object);
-        }
-        inline T* operator*() const {
-            return T::Unwrap(object);
-        }
-
-        // As<IMPL>() returns the unwrapped object cast to the implementation type.
-        // The interface implementation *must* be of the template type IMPL.
-        template <typename IMPL>
-        inline IMPL* As() const {
-            return static_cast<IMPL*>(T::Unwrap(object));
-        }
-
-      private:
-        Napi::Object object;
-    };
-
-    ////////////////////////////////////////////////////////////////////////////////
-    // Promise<T>
-    ////////////////////////////////////////////////////////////////////////////////
-
-    // Info holds details about where the promise was constructed.
-    // Used for printing debug messages when a promise is finalized without being resolved
-    // or rejected.
-    // Use the PROMISE_INFO macro to populate this structure.
-    struct PromiseInfo {
-        const char* file = nullptr;
-        const char* function = nullptr;
-        int line = 0;
-    };
-
-    enum class PromiseState {
-        Pending,
-        Resolved,
-        Rejected,
-    };
-
-    namespace detail {
-        // Base class for Promise<T> specializations.
-        class PromiseBase {
-          public:
-            // Implicit conversion operators to Napi promises.
-            inline operator napi_value() const {
-                return state_->deferred.Promise();
-            }
-            inline operator Napi::Value() const {
-                return state_->deferred.Promise();
-            }
-            inline operator Napi::Promise() const {
-                return state_->deferred.Promise();
-            }
-
-            // Reject() rejects the promise with the given failure value.
-            void Reject(Napi::Value value) const {
-                state_->deferred.Reject(value);
-                state_->state = PromiseState::Rejected;
-            }
-            void Reject(Napi::Error err) const {
-                Reject(err.Value());
-            }
-            void Reject(std::string err) const {
-                Reject(Napi::Error::New(state_->deferred.Env(), err));
-            }
-
-            PromiseState GetState() const {
-                return state_->state;
-            }
-
-          protected:
-            void Resolve(Napi::Value value) const {
-                state_->deferred.Resolve(value);
-                state_->state = PromiseState::Resolved;
-            }
-
-            struct State {
-                Napi::Promise::Deferred deferred;
-                PromiseInfo info;
-                PromiseState state = PromiseState::Pending;
-            };
-
-            PromiseBase(Napi::Env env, const PromiseInfo& info)
-                : state_(new State{Napi::Promise::Deferred::New(env), info}) {
-                state_->deferred.Promise().AddFinalizer(
-                    [](Napi::Env, State* state) {
-                        if (state->state == PromiseState::Pending) {
-                            ::wgpu::utils::Fatal("Promise not resolved or rejected",
-                                                 state->info.file, state->info.line,
-                                                 state->info.function);
-                        }
-                        delete state;
-                    },
-                    state_);
-            }
-
-            State* const state_;
-        };
-    }  // namespace detail
-
-    // Promise<T> is a templated wrapper around a JavaScript promise, which can
-    // resolve to the template type T.
-    template <typename T>
-    class Promise : public detail::PromiseBase {
-      public:
-        // Constructor
-        Promise(Napi::Env env, const PromiseInfo& info) : PromiseBase(env, info) {
-        }
-
-        // Resolve() fulfills the promise with the given value.
-        void Resolve(T&& value) const {
-            PromiseBase::Resolve(ToJS(state_->deferred.Env(), std::forward<T>(value)));
-        }
-    };
-
-    // Specialization for Promises that resolve with no value
-    template <>
-    class Promise<void> : public detail::PromiseBase {
-      public:
-        // Constructor
-        Promise(Napi::Env env, const PromiseInfo& info) : PromiseBase(env, info) {
-        }
-
-        // Resolve() fulfills the promise.
-        void Resolve() const {
-            PromiseBase::Resolve(state_->deferred.Env().Undefined());
-        }
-    };
-
-    ////////////////////////////////////////////////////////////////////////////////
-    // Converter<T>
-    ////////////////////////////////////////////////////////////////////////////////
-
-    // Converter<T> is specialized for each type T which can be converted from C++
-    // to JavaScript, or JavaScript to C++.
-    // Each specialization of Converter<T> is expected to have two static methods
-    // with the signatures:
-    //
-    //  // FromJS() converts the JavaScript value 'in' to the C++ value 'out'.
-    //  static Result FromJS(Napi::Env, Napi::Value in, T& out);
-    //
-    //  // ToJS() converts the C++ value 'in' to a JavaScript value, and returns
-    //  // this value.
-    //  static Napi::Value ToJS(Napi::Env, T in);
-    template <typename T>
-    class Converter {};
-
-    template <>
-    class Converter<Napi::Object> {
-      public:
-        static inline Result FromJS(Napi::Env, Napi::Value value, Napi::Object& out) {
-            if (value.IsObject()) {
-                out = value.ToObject();
-                return Success;
-            }
-            return Error("value is not an object");
-        }
-        static inline Napi::Value ToJS(Napi::Env, Napi::Object value) {
-            return value;
-        }
-    };
-
-    template <>
-    class Converter<ArrayBuffer> {
-      public:
-        static inline Result FromJS(Napi::Env, Napi::Value value, ArrayBuffer& out) {
-            if (value.IsArrayBuffer()) {
-                out = value.As<ArrayBuffer>();
-                return Success;
-            }
-            return Error("value is not a ArrayBuffer");
-        }
-        static inline Napi::Value ToJS(Napi::Env, ArrayBuffer value) {
-            return value;
-        }
-    };
-
-    template <>
-    class Converter<Napi::TypedArray> {
-      public:
-        static inline Result FromJS(Napi::Env, Napi::Value value, Napi::TypedArray& out) {
-            if (value.IsTypedArray()) {
-                out = value.As<Napi::TypedArray>();
-                return Success;
-            }
-            return Error("value is not a TypedArray");
-        }
-        static inline Napi::Value ToJS(Napi::Env, ArrayBuffer value) {
-            return value;
-        }
-    };
-
-    template <typename T>
-    class Converter<Napi::TypedArrayOf<T>> {
-      public:
-        // clang-format off
+template <typename T>
+class Converter<Napi::TypedArrayOf<T>> {
+  public:
+    // clang-format off
         // The Napi element type of T
         static constexpr napi_typedarray_type element_type =
               std::is_same<T, int8_t>::value   ? napi_int8_array
@@ -383,432 +336,416 @@
             : std::is_same<T, int64_t>::value  ? napi_bigint64_array
             : std::is_same<T, uint64_t>::value ? napi_biguint64_array
             : static_cast<napi_typedarray_type>(-1);
-        // clang-format on
-        static_assert(static_cast<int>(element_type) >= 0,
-                      "unsupported T type for Napi::TypedArrayOf<T>");
-        static inline Result FromJS(Napi::Env, Napi::Value value, Napi::TypedArrayOf<T>& out) {
-            if (value.IsTypedArray()) {
-                auto arr = value.As<Napi::TypedArrayOf<T>>();
-                if (arr.TypedArrayType() == element_type) {
-                    out = arr;
-                    return Success;
-                }
-                return Error("value is not a TypedArray of the correct element type");
-            }
-            return Error("value is not a TypedArray");
-        }
-        static inline Napi::Value ToJS(Napi::Env, ArrayBuffer value) {
-            return value;
-        }
-    };
-
-    template <>
-    class Converter<std::string> {
-      public:
-        static Result FromJS(Napi::Env, Napi::Value, std::string&);
-        static Napi::Value ToJS(Napi::Env, std::string);
-    };
-
-    template <>
-    class Converter<bool> {
-      public:
-        static Result FromJS(Napi::Env, Napi::Value, bool&);
-        static Napi::Value ToJS(Napi::Env, bool);
-    };
-
-    template <>
-    class Converter<int8_t> {
-      public:
-        static Result FromJS(Napi::Env, Napi::Value, int8_t&);
-        static Napi::Value ToJS(Napi::Env, int8_t);
-    };
-
-    template <>
-    class Converter<uint8_t> {
-      public:
-        static Result FromJS(Napi::Env, Napi::Value, uint8_t&);
-        static Napi::Value ToJS(Napi::Env, uint8_t);
-    };
-
-    template <>
-    class Converter<int16_t> {
-      public:
-        static Result FromJS(Napi::Env, Napi::Value, int16_t&);
-        static Napi::Value ToJS(Napi::Env, int16_t);
-    };
-
-    template <>
-    class Converter<uint16_t> {
-      public:
-        static Result FromJS(Napi::Env, Napi::Value, uint16_t&);
-        static Napi::Value ToJS(Napi::Env, uint16_t);
-    };
-
-    template <>
-    class Converter<int32_t> {
-      public:
-        static Result FromJS(Napi::Env, Napi::Value, int32_t&);
-        static Napi::Value ToJS(Napi::Env, int32_t);
-    };
-
-    template <>
-    class Converter<uint32_t> {
-      public:
-        static Result FromJS(Napi::Env, Napi::Value, uint32_t&);
-        static Napi::Value ToJS(Napi::Env, uint32_t);
-    };
-
-    template <>
-    class Converter<int64_t> {
-      public:
-        static Result FromJS(Napi::Env, Napi::Value, int64_t&);
-        static Napi::Value ToJS(Napi::Env, int64_t);
-    };
-
-    template <>
-    class Converter<uint64_t> {
-      public:
-        static Result FromJS(Napi::Env, Napi::Value, uint64_t&);
-        static Napi::Value ToJS(Napi::Env, uint64_t);
-    };
-
-    template <>
-    class Converter<float> {
-      public:
-        static Result FromJS(Napi::Env, Napi::Value, float&);
-        static Napi::Value ToJS(Napi::Env, float);
-    };
-
-    template <>
-    class Converter<double> {
-      public:
-        static Result FromJS(Napi::Env, Napi::Value, double&);
-        static Napi::Value ToJS(Napi::Env, double);
-    };
-
-    // [Clamp]ed integers must convert values outside of the integer range by clamping them.
-    template <typename T>
-    class Converter<ClampedInteger<T>> {
-      public:
-        static Result FromJS(Napi::Env env, Napi::Value value, ClampedInteger<T>& out) {
-            double doubleValue;
-            Result res = Converter<double>::FromJS(env, value, doubleValue);
-            if (!res) {
-                return res;
-            }
-
-            // Check for clamping first.
-            constexpr T kMin = std::numeric_limits<T>::min();
-            constexpr T kMax = std::numeric_limits<T>::max();
-            if (doubleValue < kMin) {
-                out = kMin;
+    // clang-format on
+    static_assert(static_cast<int>(element_type) >= 0,
+                  "unsupported T type for Napi::TypedArrayOf<T>");
+    static inline Result FromJS(Napi::Env, Napi::Value value, Napi::TypedArrayOf<T>& out) {
+        if (value.IsTypedArray()) {
+            auto arr = value.As<Napi::TypedArrayOf<T>>();
+            if (arr.TypedArrayType() == element_type) {
+                out = arr;
                 return Success;
             }
-            if (doubleValue > kMax) {
-                out = kMax;
-                return Success;
-            }
-
-            // Yay, no clamping! We can convert the integer type as usual.
-            T correctValue;
-            res = Converter<T>::FromJS(env, value, correctValue);
-            if (!res) {
-                return res;
-            }
-            out = correctValue;
-            return Success;
+            return Error("value is not a TypedArray of the correct element type");
         }
-        static Napi::Value ToJS(Napi::Env env, const ClampedInteger<T>& value) {
-            return Converter<T>::ToJS(env, value.value);
-        }
-    };
-
-    // [EnforceRange] integers cause a TypeError when converted from out of range values
-    template <typename T>
-    class Converter<EnforceRangeInteger<T>> {
-      public:
-        static Result FromJS(Napi::Env env, Napi::Value value, EnforceRangeInteger<T>& out) {
-            double doubleValue;
-            Result res = Converter<double>::FromJS(env, value, doubleValue);
-            if (!res) {
-                return res;
-            }
-
-            // Check for out of range and throw a type error.
-            constexpr double kMin = static_cast<double>(std::numeric_limits<T>::min());
-            constexpr double kMax = static_cast<double>(std::numeric_limits<T>::max());
-            if (!(kMin <= doubleValue && doubleValue <= kMax)) {
-                return Error("Values are out of the range of that integer.");
-            }
-
-            // Yay, no error! We can convert the integer type as usual.
-            T correctValue;
-            res = Converter<T>::FromJS(env, value, correctValue);
-            if (!res) {
-                return res;
-            }
-            out = correctValue;
-            return Success;
-        }
-        static Napi::Value ToJS(Napi::Env env, const EnforceRangeInteger<T>& value) {
-            return Converter<T>::ToJS(env, value.value);
-        }
-    };
-
-    template <>
-    class Converter<UndefinedType> {
-      public:
-        static Result FromJS(Napi::Env, Napi::Value, UndefinedType&);
-        static Napi::Value ToJS(Napi::Env, UndefinedType);
-    };
-
-    template <typename T>
-    class Converter<Interface<T>> {
-      public:
-        static Result FromJS(Napi::Env env, Napi::Value value, Interface<T>& out) {
-            if (!value.IsObject()) {
-                return Error("value is not object");
-            }
-            auto obj = value.As<Napi::Object>();
-            if (!T::Unwrap(obj)) {
-                return Error("object is not of the correct interface type");
-            }
-            out = Interface<T>(obj);
-            return Success;
-        }
-        static Napi::Value ToJS(Napi::Env env, const Interface<T>& value) {
-            return {env, value};
-        }
-    };
-
-    template <typename T>
-    class Converter<std::optional<T>> {
-      public:
-        static Result FromJS(Napi::Env env, Napi::Value value, std::optional<T>& out) {
-            if (value.IsNull() || value.IsUndefined()) {
-                out.reset();
-                return Success;
-            }
-            T v{};
-            auto res = Converter<T>::FromJS(env, value, v);
-            if (!res) {
-                return res;
-            }
-            out = std::move(v);
-            return Success;
-        }
-        static Napi::Value ToJS(Napi::Env env, std::optional<T> value) {
-            if (value.has_value()) {
-                return Converter<T>::ToJS(env, value.value());
-            }
-            return env.Null();
-        }
-    };
-
-    template <typename T>
-    class Converter<std::vector<T>> {
-      public:
-        static inline Result FromJS(Napi::Env env, Napi::Value value, std::vector<T>& out) {
-            if (!value.IsArray()) {
-                return Error("value is not an array");
-            }
-            auto arr = value.As<Napi::Array>();
-            std::vector<T> vec(arr.Length());
-            for (size_t i = 0; i < vec.size(); i++) {
-                auto res = Converter<T>::FromJS(env, arr[static_cast<uint32_t>(i)], vec[i]);
-                if (!res) {
-                    return res.Append("for array element ", i);
-                }
-            }
-            out = std::move(vec);
-            return Success;
-        }
-        static inline Napi::Value ToJS(Napi::Env env, const std::vector<T>& vec) {
-            auto arr = Napi::Array::New(env, vec.size());
-            for (size_t i = 0; i < vec.size(); i++) {
-                arr.Set(static_cast<uint32_t>(i), Converter<T>::ToJS(env, vec[i]));
-            }
-            return arr;
-        }
-    };
-
-    template <typename K, typename V>
-    class Converter<std::unordered_map<K, V>> {
-      public:
-        static inline Result FromJS(Napi::Env env,
-                                    Napi::Value value,
-                                    std::unordered_map<K, V>& out) {
-            if (!value.IsObject()) {
-                return Error("value is not an object");
-            }
-            auto obj = value.ToObject();
-            auto keys = obj.GetPropertyNames();
-            std::unordered_map<K, V> map(keys.Length());
-            for (uint32_t i = 0; i < static_cast<uint32_t>(keys.Length()); i++) {
-                K key{};
-                V value{};
-                auto key_res = Converter<K>::FromJS(env, keys[i], key);
-                if (!key_res) {
-                    return key_res.Append("for object key");
-                }
-                auto value_res = Converter<V>::FromJS(env, obj.Get(keys[i]), value);
-                if (!value_res) {
-                    return value_res.Append("for object value of key: ", key);
-                }
-                map[key] = value;
-            }
-            out = std::move(map);
-            return Success;
-        }
-        static inline Napi::Value ToJS(Napi::Env env, std::unordered_map<K, V> value) {
-            auto obj = Napi::Object::New(env);
-            for (auto it : value) {
-                obj.Set(Converter<K>::ToJS(env, it.first), Converter<V>::ToJS(env, it.second));
-            }
-            return obj;
-        }
-    };
-
-    template <typename... TYPES>
-    class Converter<std::variant<TYPES...>> {
-        template <typename TY>
-        static inline Result TryFromJS(Napi::Env env,
-                                       Napi::Value value,
-                                       std::variant<TYPES...>& out) {
-            TY v{};
-            auto res = Converter<TY>::FromJS(env, value, v);
-            if (!res) {
-                return Error("no possible types matched");
-            }
-            out = std::move(v);
-            return Success;
-        }
-
-        template <typename T0, typename T1, typename... TN>
-        static inline Result TryFromJS(Napi::Env env,
-                                       Napi::Value value,
-                                       std::variant<TYPES...>& out) {
-            if (TryFromJS<T0>(env, value, out)) {
-                return Success;
-            }
-            return TryFromJS<T1, TN...>(env, value, out);
-        }
-
-      public:
-        static inline Result FromJS(Napi::Env env, Napi::Value value, std::variant<TYPES...>& out) {
-            return TryFromJS<TYPES...>(env, value, out);
-        }
-        static inline Napi::Value ToJS(Napi::Env env, std::variant<TYPES...> value) {
-            return std::visit(
-                [&](auto&& v) {
-                    using T = std::remove_cv_t<std::remove_reference_t<decltype(v)>>;
-                    return Converter<T>::ToJS(env, v);
-                },
-                value);
-        }
-    };
-
-    template <typename T>
-    class Converter<Promise<T>> {
-      public:
-        static inline Result FromJS(Napi::Env, Napi::Value, Promise<T>&) {
-            UNIMPLEMENTED();
-        }
-        static inline Napi::Value ToJS(Napi::Env, Promise<T> promise) {
-            return promise;
-        }
-    };
-
-    ////////////////////////////////////////////////////////////////////////////////
-    // Helpers
-    ////////////////////////////////////////////////////////////////////////////////
-
-    // FromJS() is a helper function which delegates to
-    // Converter<T>::FromJS()
-    template <typename T>
-    inline Result FromJS(Napi::Env env, Napi::Value value, T& out) {
-        return Converter<T>::FromJS(env, value, out);
+        return Error("value is not a TypedArray");
     }
+    static inline Napi::Value ToJS(Napi::Env, ArrayBuffer value) { return value; }
+};
 
-    // FromJSOptional() is similar to FromJS(), but if 'value' is either null
-    // or undefined then 'out' is left unassigned.
-    template <typename T>
-    inline Result FromJSOptional(Napi::Env env, Napi::Value value, T& out) {
+template <>
+class Converter<std::string> {
+  public:
+    static Result FromJS(Napi::Env, Napi::Value, std::string&);
+    static Napi::Value ToJS(Napi::Env, std::string);
+};
+
+template <>
+class Converter<bool> {
+  public:
+    static Result FromJS(Napi::Env, Napi::Value, bool&);
+    static Napi::Value ToJS(Napi::Env, bool);
+};
+
+template <>
+class Converter<int8_t> {
+  public:
+    static Result FromJS(Napi::Env, Napi::Value, int8_t&);
+    static Napi::Value ToJS(Napi::Env, int8_t);
+};
+
+template <>
+class Converter<uint8_t> {
+  public:
+    static Result FromJS(Napi::Env, Napi::Value, uint8_t&);
+    static Napi::Value ToJS(Napi::Env, uint8_t);
+};
+
+template <>
+class Converter<int16_t> {
+  public:
+    static Result FromJS(Napi::Env, Napi::Value, int16_t&);
+    static Napi::Value ToJS(Napi::Env, int16_t);
+};
+
+template <>
+class Converter<uint16_t> {
+  public:
+    static Result FromJS(Napi::Env, Napi::Value, uint16_t&);
+    static Napi::Value ToJS(Napi::Env, uint16_t);
+};
+
+template <>
+class Converter<int32_t> {
+  public:
+    static Result FromJS(Napi::Env, Napi::Value, int32_t&);
+    static Napi::Value ToJS(Napi::Env, int32_t);
+};
+
+template <>
+class Converter<uint32_t> {
+  public:
+    static Result FromJS(Napi::Env, Napi::Value, uint32_t&);
+    static Napi::Value ToJS(Napi::Env, uint32_t);
+};
+
+template <>
+class Converter<int64_t> {
+  public:
+    static Result FromJS(Napi::Env, Napi::Value, int64_t&);
+    static Napi::Value ToJS(Napi::Env, int64_t);
+};
+
+template <>
+class Converter<uint64_t> {
+  public:
+    static Result FromJS(Napi::Env, Napi::Value, uint64_t&);
+    static Napi::Value ToJS(Napi::Env, uint64_t);
+};
+
+template <>
+class Converter<float> {
+  public:
+    static Result FromJS(Napi::Env, Napi::Value, float&);
+    static Napi::Value ToJS(Napi::Env, float);
+};
+
+template <>
+class Converter<double> {
+  public:
+    static Result FromJS(Napi::Env, Napi::Value, double&);
+    static Napi::Value ToJS(Napi::Env, double);
+};
+
+// [Clamp]ed integers must convert values outside of the integer range by clamping them.
+template <typename T>
+class Converter<ClampedInteger<T>> {
+  public:
+    static Result FromJS(Napi::Env env, Napi::Value value, ClampedInteger<T>& out) {
+        double doubleValue;
+        Result res = Converter<double>::FromJS(env, value, doubleValue);
+        if (!res) {
+            return res;
+        }
+
+        // Check for clamping first.
+        constexpr T kMin = std::numeric_limits<T>::min();
+        constexpr T kMax = std::numeric_limits<T>::max();
+        if (doubleValue < kMin) {
+            out = kMin;
+            return Success;
+        }
+        if (doubleValue > kMax) {
+            out = kMax;
+            return Success;
+        }
+
+        // Yay, no clamping! We can convert the integer type as usual.
+        T correctValue;
+        res = Converter<T>::FromJS(env, value, correctValue);
+        if (!res) {
+            return res;
+        }
+        out = correctValue;
+        return Success;
+    }
+    static Napi::Value ToJS(Napi::Env env, const ClampedInteger<T>& value) {
+        return Converter<T>::ToJS(env, value.value);
+    }
+};
+
+// [EnforceRange] integers cause a TypeError when converted from out of range values
+template <typename T>
+class Converter<EnforceRangeInteger<T>> {
+  public:
+    static Result FromJS(Napi::Env env, Napi::Value value, EnforceRangeInteger<T>& out) {
+        double doubleValue;
+        Result res = Converter<double>::FromJS(env, value, doubleValue);
+        if (!res) {
+            return res;
+        }
+
+        // Check for out of range and throw a type error.
+        constexpr double kMin = static_cast<double>(std::numeric_limits<T>::min());
+        constexpr double kMax = static_cast<double>(std::numeric_limits<T>::max());
+        if (!(kMin <= doubleValue && doubleValue <= kMax)) {
+            return Error("Values are out of the range of that integer.");
+        }
+
+        // Yay, no error! We can convert the integer type as usual.
+        T correctValue;
+        res = Converter<T>::FromJS(env, value, correctValue);
+        if (!res) {
+            return res;
+        }
+        out = correctValue;
+        return Success;
+    }
+    static Napi::Value ToJS(Napi::Env env, const EnforceRangeInteger<T>& value) {
+        return Converter<T>::ToJS(env, value.value);
+    }
+};
+
+template <>
+class Converter<UndefinedType> {
+  public:
+    static Result FromJS(Napi::Env, Napi::Value, UndefinedType&);
+    static Napi::Value ToJS(Napi::Env, UndefinedType);
+};
+
+template <typename T>
+class Converter<Interface<T>> {
+  public:
+    static Result FromJS(Napi::Env env, Napi::Value value, Interface<T>& out) {
+        if (!value.IsObject()) {
+            return Error("value is not object");
+        }
+        auto obj = value.As<Napi::Object>();
+        if (!T::Unwrap(obj)) {
+            return Error("object is not of the correct interface type");
+        }
+        out = Interface<T>(obj);
+        return Success;
+    }
+    static Napi::Value ToJS(Napi::Env env, const Interface<T>& value) { return {env, value}; }
+};
+
+template <typename T>
+class Converter<std::optional<T>> {
+  public:
+    static Result FromJS(Napi::Env env, Napi::Value value, std::optional<T>& out) {
         if (value.IsNull() || value.IsUndefined()) {
+            out.reset();
             return Success;
         }
-        return Converter<T>::FromJS(env, value, out);
-    }
-
-    // ToJS() is a helper function which delegates to Converter<T>::ToJS()
-    template <typename T>
-    inline Napi::Value ToJS(Napi::Env env, T&& value) {
-        return Converter<std::remove_cv_t<std::remove_reference_t<T>>>::ToJS(
-            env, std::forward<T>(value));
-    }
-
-    // DefaultedParameter can be used in the tuple parameter types passed to
-    // FromJS(const Napi::CallbackInfo& info, PARAM_TYPES& args), for parameters
-    // that have a default value. If the argument is omitted in the call, then
-    // DefaultedParameter::default_value will be assigned to
-    // DefaultedParameter::value.
-    template <typename T>
-    struct DefaultedParameter {
-        T value;          // The argument value assigned by FromJS()
-        T default_value;  // The default value if no argument supplied
-
-        // Implicit conversion operator. Returns value.
-        inline operator const T&() const {
-            return value;
+        T v{};
+        auto res = Converter<T>::FromJS(env, value, v);
+        if (!res) {
+            return res;
         }
-    };
+        out = std::move(v);
+        return Success;
+    }
+    static Napi::Value ToJS(Napi::Env env, std::optional<T> value) {
+        if (value.has_value()) {
+            return Converter<T>::ToJS(env, value.value());
+        }
+        return env.Null();
+    }
+};
 
-    // IsDefaultedParameter<T>::value is true iff T is of type DefaultedParameter.
-    template <typename T>
-    struct IsDefaultedParameter {
-        static constexpr bool value = false;
-    };
-    template <typename T>
-    struct IsDefaultedParameter<DefaultedParameter<T>> {
-        static constexpr bool value = true;
-    };
+template <typename T>
+class Converter<std::vector<T>> {
+  public:
+    static inline Result FromJS(Napi::Env env, Napi::Value value, std::vector<T>& out) {
+        if (!value.IsArray()) {
+            return Error("value is not an array");
+        }
+        auto arr = value.As<Napi::Array>();
+        std::vector<T> vec(arr.Length());
+        for (size_t i = 0; i < vec.size(); i++) {
+            auto res = Converter<T>::FromJS(env, arr[static_cast<uint32_t>(i)], vec[i]);
+            if (!res) {
+                return res.Append("for array element ", i);
+            }
+        }
+        out = std::move(vec);
+        return Success;
+    }
+    static inline Napi::Value ToJS(Napi::Env env, const std::vector<T>& vec) {
+        auto arr = Napi::Array::New(env, vec.size());
+        for (size_t i = 0; i < vec.size(); i++) {
+            arr.Set(static_cast<uint32_t>(i), Converter<T>::ToJS(env, vec[i]));
+        }
+        return arr;
+    }
+};
 
-    // FromJS() is a helper function for bulk converting the arguments of 'info'.
-    // PARAM_TYPES is a std::tuple<> describing the C++ function parameter types.
-    // Parameters may be of the templated DefaultedParameter type, in which case
-    // the parameter will default to the default-value if omitted.
-    template <typename PARAM_TYPES, int BASE_INDEX = 0>
-    inline Result FromJS(const Napi::CallbackInfo& info, PARAM_TYPES& args) {
-        if constexpr (BASE_INDEX < std::tuple_size_v<PARAM_TYPES>) {
-            using T = std::tuple_element_t<BASE_INDEX, PARAM_TYPES>;
-            auto& value = info[BASE_INDEX];
-            auto& out = std::get<BASE_INDEX>(args);
-            if constexpr (IsDefaultedParameter<T>::value) {
-                // Parameter has a default value.
-                // Check whether the argument was provided.
-                if (value.IsNull() || value.IsUndefined()) {
-                    // Use default value for this parameter
-                    out.value = out.default_value;
-                } else {
-                    // Argument was provided
-                    auto res = FromJS(info.Env(), value, out.value);
-                    if (!res) {
-                        return res;
-                    }
-                }
+template <typename K, typename V>
+class Converter<std::unordered_map<K, V>> {
+  public:
+    static inline Result FromJS(Napi::Env env, Napi::Value value, std::unordered_map<K, V>& out) {
+        if (!value.IsObject()) {
+            return Error("value is not an object");
+        }
+        auto obj = value.ToObject();
+        auto keys = obj.GetPropertyNames();
+        std::unordered_map<K, V> map(keys.Length());
+        for (uint32_t i = 0; i < static_cast<uint32_t>(keys.Length()); i++) {
+            K key{};
+            V value{};
+            auto key_res = Converter<K>::FromJS(env, keys[i], key);
+            if (!key_res) {
+                return key_res.Append("for object key");
+            }
+            auto value_res = Converter<V>::FromJS(env, obj.Get(keys[i]), value);
+            if (!value_res) {
+                return value_res.Append("for object value of key: ", key);
+            }
+            map[key] = value;
+        }
+        out = std::move(map);
+        return Success;
+    }
+    static inline Napi::Value ToJS(Napi::Env env, std::unordered_map<K, V> value) {
+        auto obj = Napi::Object::New(env);
+        for (auto it : value) {
+            obj.Set(Converter<K>::ToJS(env, it.first), Converter<V>::ToJS(env, it.second));
+        }
+        return obj;
+    }
+};
+
+template <typename... TYPES>
+class Converter<std::variant<TYPES...>> {
+    template <typename TY>
+    static inline Result TryFromJS(Napi::Env env, Napi::Value value, std::variant<TYPES...>& out) {
+        TY v{};
+        auto res = Converter<TY>::FromJS(env, value, v);
+        if (!res) {
+            return Error("no possible types matched");
+        }
+        out = std::move(v);
+        return Success;
+    }
+
+    template <typename T0, typename T1, typename... TN>
+    static inline Result TryFromJS(Napi::Env env, Napi::Value value, std::variant<TYPES...>& out) {
+        if (TryFromJS<T0>(env, value, out)) {
+            return Success;
+        }
+        return TryFromJS<T1, TN...>(env, value, out);
+    }
+
+  public:
+    static inline Result FromJS(Napi::Env env, Napi::Value value, std::variant<TYPES...>& out) {
+        return TryFromJS<TYPES...>(env, value, out);
+    }
+    static inline Napi::Value ToJS(Napi::Env env, std::variant<TYPES...> value) {
+        return std::visit(
+            [&](auto&& v) {
+                using T = std::remove_cv_t<std::remove_reference_t<decltype(v)>>;
+                return Converter<T>::ToJS(env, v);
+            },
+            value);
+    }
+};
+
+template <typename T>
+class Converter<Promise<T>> {
+  public:
+    static inline Result FromJS(Napi::Env, Napi::Value, Promise<T>&) { UNIMPLEMENTED(); }
+    static inline Napi::Value ToJS(Napi::Env, Promise<T> promise) { return promise; }
+};
+
+////////////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////////////
+
+// FromJS() is a helper function which delegates to
+// Converter<T>::FromJS()
+template <typename T>
+inline Result FromJS(Napi::Env env, Napi::Value value, T& out) {
+    return Converter<T>::FromJS(env, value, out);
+}
+
+// FromJSOptional() is similar to FromJS(), but if 'value' is either null
+// or undefined then 'out' is left unassigned.
+template <typename T>
+inline Result FromJSOptional(Napi::Env env, Napi::Value value, T& out) {
+    if (value.IsNull() || value.IsUndefined()) {
+        return Success;
+    }
+    return Converter<T>::FromJS(env, value, out);
+}
+
+// ToJS() is a helper function which delegates to Converter<T>::ToJS()
+template <typename T>
+inline Napi::Value ToJS(Napi::Env env, T&& value) {
+    return Converter<std::remove_cv_t<std::remove_reference_t<T>>>::ToJS(env,
+                                                                         std::forward<T>(value));
+}
+
+// DefaultedParameter can be used in the tuple parameter types passed to
+// FromJS(const Napi::CallbackInfo& info, PARAM_TYPES& args), for parameters
+// that have a default value. If the argument is omitted in the call, then
+// DefaultedParameter::default_value will be assigned to
+// DefaultedParameter::value.
+template <typename T>
+struct DefaultedParameter {
+    T value;          // The argument value assigned by FromJS()
+    T default_value;  // The default value if no argument supplied
+
+    // Implicit conversion operator. Returns value.
+    inline operator const T&() const { return value; }
+};
+
+// IsDefaultedParameter<T>::value is true iff T is of type DefaultedParameter.
+template <typename T>
+struct IsDefaultedParameter {
+    static constexpr bool value = false;
+};
+template <typename T>
+struct IsDefaultedParameter<DefaultedParameter<T>> {
+    static constexpr bool value = true;
+};
+
+// FromJS() is a helper function for bulk converting the arguments of 'info'.
+// PARAM_TYPES is a std::tuple<> describing the C++ function parameter types.
+// Parameters may be of the templated DefaultedParameter type, in which case
+// the parameter will default to the default-value if omitted.
+template <typename PARAM_TYPES, int BASE_INDEX = 0>
+inline Result FromJS(const Napi::CallbackInfo& info, PARAM_TYPES& args) {
+    if constexpr (BASE_INDEX < std::tuple_size_v<PARAM_TYPES>) {
+        using T = std::tuple_element_t<BASE_INDEX, PARAM_TYPES>;
+        auto& value = info[BASE_INDEX];
+        auto& out = std::get<BASE_INDEX>(args);
+        if constexpr (IsDefaultedParameter<T>::value) {
+            // Parameter has a default value.
+            // Check whether the argument was provided.
+            if (value.IsNull() || value.IsUndefined()) {
+                // Use default value for this parameter
+                out.value = out.default_value;
             } else {
-                // Parameter does not have a default value.
-                auto res = FromJS(info.Env(), value, out);
+                // Argument was provided
+                auto res = FromJS(info.Env(), value, out.value);
                 if (!res) {
                     return res;
                 }
             }
-            // Convert the rest of the arguments
-            return FromJS<PARAM_TYPES, BASE_INDEX + 1>(info, args);
         } else {
-            return Success;
+            // Parameter does not have a default value.
+            auto res = FromJS(info.Env(), value, out);
+            if (!res) {
+                return res;
+            }
         }
+        // Convert the rest of the arguments
+        return FromJS<PARAM_TYPES, BASE_INDEX + 1>(info, args);
+    } else {
+        return Success;
     }
+}
 
 }  // namespace wgpu::interop
 
diff --git a/src/dawn/node/utils/Debug.h b/src/dawn/node/utils/Debug.h
index df837be..c68b858c 100644
--- a/src/dawn/node/utils/Debug.h
+++ b/src/dawn/node/utils/Debug.h
@@ -29,99 +29,99 @@
 
 namespace wgpu::utils {
 
-    // Write() is a helper for printing container types to the std::ostream.
-    // Write() is used by the LOG() macro below.
+// Write() is a helper for printing container types to the std::ostream.
+// Write() is used by the LOG() macro below.
 
-    // Forward declarations
-    inline std::ostream& Write(std::ostream& out) {
-        return out;
+// Forward declarations
+inline std::ostream& Write(std::ostream& out) {
+    return out;
+}
+template <typename T>
+inline std::ostream& Write(std::ostream& out, const std::optional<T>& value);
+template <typename T>
+inline std::ostream& Write(std::ostream& out, const std::vector<T>& value);
+template <typename K, typename V>
+inline std::ostream& Write(std::ostream& out, const std::unordered_map<K, V>& value);
+template <typename... TYS>
+inline std::ostream& Write(std::ostream& out, const std::variant<TYS...>& value);
+template <typename VALUE>
+std::ostream& Write(std::ostream& out, VALUE&& value);
+
+// Write() implementations
+template <typename T>
+std::ostream& Write(std::ostream& out, const std::optional<T>& value) {
+    if (value.has_value()) {
+        return Write(out, value.value());
     }
-    template <typename T>
-    inline std::ostream& Write(std::ostream& out, const std::optional<T>& value);
-    template <typename T>
-    inline std::ostream& Write(std::ostream& out, const std::vector<T>& value);
-    template <typename K, typename V>
-    inline std::ostream& Write(std::ostream& out, const std::unordered_map<K, V>& value);
-    template <typename... TYS>
-    inline std::ostream& Write(std::ostream& out, const std::variant<TYS...>& value);
-    template <typename VALUE>
-    std::ostream& Write(std::ostream& out, VALUE&& value);
+    return out << "<undefined>";
+}
 
-    // Write() implementations
-    template <typename T>
-    std::ostream& Write(std::ostream& out, const std::optional<T>& value) {
-        if (value.has_value()) {
-            return Write(out, value.value());
+template <typename T>
+std::ostream& Write(std::ostream& out, const std::vector<T>& value) {
+    out << "[";
+    bool first = true;
+    for (const auto& el : value) {
+        if (!first) {
+            out << ", ";
         }
-        return out << "<undefined>";
+        first = false;
+        Write(out, el);
     }
+    return out << "]";
+}
 
-    template <typename T>
-    std::ostream& Write(std::ostream& out, const std::vector<T>& value) {
-        out << "[";
-        bool first = true;
-        for (const auto& el : value) {
-            if (!first) {
-                out << ", ";
-            }
-            first = false;
-            Write(out, el);
+template <typename K, typename V>
+std::ostream& Write(std::ostream& out, const std::unordered_map<K, V>& value) {
+    out << "{";
+    bool first = true;
+    for (auto& [key, value] : value) {
+        if (!first) {
+            out << ", ";
         }
-        return out << "]";
+        first = false;
+        Write(out, key);
+        out << ": ";
+        Write(out, value);
     }
+    return out << "}";
+}
 
-    template <typename K, typename V>
-    std::ostream& Write(std::ostream& out, const std::unordered_map<K, V>& value) {
-        out << "{";
-        bool first = true;
-        for (auto& [key, value] : value) {
-            if (!first) {
-                out << ", ";
-            }
-            first = false;
-            Write(out, key);
-            out << ": ";
-            Write(out, value);
-        }
-        return out << "}";
-    }
+template <typename... TYS>
+std::ostream& Write(std::ostream& out, const std::variant<TYS...>& value) {
+    std::visit([&](auto&& v) { Write(out, v); }, value);
+    return out;
+}
 
-    template <typename... TYS>
-    std::ostream& Write(std::ostream& out, const std::variant<TYS...>& value) {
-        std::visit([&](auto&& v) { Write(out, v); }, value);
-        return out;
-    }
+template <typename VALUE>
+std::ostream& Write(std::ostream& out, VALUE&& value) {
+    return out << std::forward<VALUE>(value);
+}
 
-    template <typename VALUE>
-    std::ostream& Write(std::ostream& out, VALUE&& value) {
-        return out << std::forward<VALUE>(value);
-    }
+template <typename FIRST, typename... REST>
+inline std::ostream& Write(std::ostream& out, FIRST&& first, REST&&... rest) {
+    Write(out, std::forward<FIRST>(first));
+    Write(out, std::forward<REST>(rest)...);
+    return out;
+}
 
-    template <typename FIRST, typename... REST>
-    inline std::ostream& Write(std::ostream& out, FIRST&& first, REST&&... rest) {
-        Write(out, std::forward<FIRST>(first));
-        Write(out, std::forward<REST>(rest)...);
-        return out;
+// Fatal() prints a message to stdout with the given file, line, function and optional message,
+// then calls abort(). Fatal() is usually not called directly, but by the UNREACHABLE() and
+// UNIMPLEMENTED() macro below.
+template <typename... MSG_ARGS>
+[[noreturn]] inline void Fatal(const char* reason,
+                               const char* file,
+                               int line,
+                               const char* function,
+                               MSG_ARGS&&... msg_args) {
+    std::stringstream msg;
+    msg << file << ":" << line << ": " << reason << ": " << function << "()";
+    if constexpr (sizeof...(msg_args) > 0) {
+        msg << " ";
+        Write(msg, std::forward<MSG_ARGS>(msg_args)...);
     }
-
-    // Fatal() prints a message to stdout with the given file, line, function and optional message,
-    // then calls abort(). Fatal() is usually not called directly, but by the UNREACHABLE() and
-    // UNIMPLEMENTED() macro below.
-    template <typename... MSG_ARGS>
-    [[noreturn]] inline void Fatal(const char* reason,
-                                   const char* file,
-                                   int line,
-                                   const char* function,
-                                   MSG_ARGS&&... msg_args) {
-        std::stringstream msg;
-        msg << file << ":" << line << ": " << reason << ": " << function << "()";
-        if constexpr (sizeof...(msg_args) > 0) {
-            msg << " ";
-            Write(msg, std::forward<MSG_ARGS>(msg_args)...);
-        }
-        std::cout << msg.str() << std::endl;
-        abort();
-    }
+    std::cout << msg.str() << std::endl;
+    abort();
+}
 
 // LOG() prints the current file, line and function to stdout, followed by a
 // string representation of all the variadic arguments.
diff --git a/src/dawn/platform/DawnPlatform.cpp b/src/dawn/platform/DawnPlatform.cpp
index d30061b..0d52a33 100644
--- a/src/dawn/platform/DawnPlatform.cpp
+++ b/src/dawn/platform/DawnPlatform.cpp
@@ -21,45 +21,45 @@
 
 namespace dawn::platform {
 
-    CachingInterface::CachingInterface() = default;
+CachingInterface::CachingInterface() = default;
 
-    CachingInterface::~CachingInterface() = default;
+CachingInterface::~CachingInterface() = default;
 
-    Platform::Platform() = default;
+Platform::Platform() = default;
 
-    Platform::~Platform() = default;
+Platform::~Platform() = default;
 
-    const unsigned char* Platform::GetTraceCategoryEnabledFlag(TraceCategory category) {
-        static unsigned char disabled = 0;
-        return &disabled;
-    }
+const unsigned char* Platform::GetTraceCategoryEnabledFlag(TraceCategory category) {
+    static unsigned char disabled = 0;
+    return &disabled;
+}
 
-    double Platform::MonotonicallyIncreasingTime() {
-        return 0;
-    }
+double Platform::MonotonicallyIncreasingTime() {
+    return 0;
+}
 
-    uint64_t Platform::AddTraceEvent(char phase,
-                                     const unsigned char* categoryGroupEnabled,
-                                     const char* name,
-                                     uint64_t id,
-                                     double timestamp,
-                                     int numArgs,
-                                     const char** argNames,
-                                     const unsigned char* argTypes,
-                                     const uint64_t* argValues,
-                                     unsigned char flags) {
-        // AddTraceEvent cannot be called if events are disabled.
-        ASSERT(false);
-        return 0;
-    }
+uint64_t Platform::AddTraceEvent(char phase,
+                                 const unsigned char* categoryGroupEnabled,
+                                 const char* name,
+                                 uint64_t id,
+                                 double timestamp,
+                                 int numArgs,
+                                 const char** argNames,
+                                 const unsigned char* argTypes,
+                                 const uint64_t* argValues,
+                                 unsigned char flags) {
+    // AddTraceEvent cannot be called if events are disabled.
+    ASSERT(false);
+    return 0;
+}
 
-    dawn::platform::CachingInterface* Platform::GetCachingInterface(const void* fingerprint,
-                                                                    size_t fingerprintSize) {
-        return nullptr;
-    }
+dawn::platform::CachingInterface* Platform::GetCachingInterface(const void* fingerprint,
+                                                                size_t fingerprintSize) {
+    return nullptr;
+}
 
-    std::unique_ptr<dawn::platform::WorkerTaskPool> Platform::CreateWorkerTaskPool() {
-        return std::make_unique<AsyncWorkerThreadPool>();
-    }
+std::unique_ptr<dawn::platform::WorkerTaskPool> Platform::CreateWorkerTaskPool() {
+    return std::make_unique<AsyncWorkerThreadPool>();
+}
 
 }  // namespace dawn::platform
diff --git a/src/dawn/platform/WorkerThread.cpp b/src/dawn/platform/WorkerThread.cpp
index ea1c26e..bf71df7 100644
--- a/src/dawn/platform/WorkerThread.cpp
+++ b/src/dawn/platform/WorkerThread.cpp
@@ -22,75 +22,69 @@
 
 namespace {
 
-    class AsyncWaitableEventImpl {
-      public:
-        AsyncWaitableEventImpl() : mIsComplete(false) {
-        }
+class AsyncWaitableEventImpl {
+  public:
+    AsyncWaitableEventImpl() : mIsComplete(false) {}
 
-        void Wait() {
-            std::unique_lock<std::mutex> lock(mMutex);
-            mCondition.wait(lock, [this] { return mIsComplete; });
-        }
+    void Wait() {
+        std::unique_lock<std::mutex> lock(mMutex);
+        mCondition.wait(lock, [this] { return mIsComplete; });
+    }
 
-        bool IsComplete() {
+    bool IsComplete() {
+        std::lock_guard<std::mutex> lock(mMutex);
+        return mIsComplete;
+    }
+
+    void MarkAsComplete() {
+        {
             std::lock_guard<std::mutex> lock(mMutex);
-            return mIsComplete;
+            mIsComplete = true;
         }
+        mCondition.notify_all();
+    }
 
-        void MarkAsComplete() {
-            {
-                std::lock_guard<std::mutex> lock(mMutex);
-                mIsComplete = true;
-            }
-            mCondition.notify_all();
-        }
+  private:
+    std::mutex mMutex;
+    std::condition_variable mCondition;
+    bool mIsComplete;
+};
 
-      private:
-        std::mutex mMutex;
-        std::condition_variable mCondition;
-        bool mIsComplete;
-    };
+class AsyncWaitableEvent final : public dawn::platform::WaitableEvent {
+  public:
+    AsyncWaitableEvent() : mWaitableEventImpl(std::make_shared<AsyncWaitableEventImpl>()) {}
 
-    class AsyncWaitableEvent final : public dawn::platform::WaitableEvent {
-      public:
-        AsyncWaitableEvent() : mWaitableEventImpl(std::make_shared<AsyncWaitableEventImpl>()) {
-        }
+    void Wait() override { mWaitableEventImpl->Wait(); }
 
-        void Wait() override {
-            mWaitableEventImpl->Wait();
-        }
+    bool IsComplete() override { return mWaitableEventImpl->IsComplete(); }
 
-        bool IsComplete() override {
-            return mWaitableEventImpl->IsComplete();
-        }
+    std::shared_ptr<AsyncWaitableEventImpl> GetWaitableEventImpl() const {
+        return mWaitableEventImpl;
+    }
 
-        std::shared_ptr<AsyncWaitableEventImpl> GetWaitableEventImpl() const {
-            return mWaitableEventImpl;
-        }
-
-      private:
-        std::shared_ptr<AsyncWaitableEventImpl> mWaitableEventImpl;
-    };
+  private:
+    std::shared_ptr<AsyncWaitableEventImpl> mWaitableEventImpl;
+};
 
 }  // anonymous namespace
 
 namespace dawn::platform {
 
-    std::unique_ptr<dawn::platform::WaitableEvent> AsyncWorkerThreadPool::PostWorkerTask(
-        dawn::platform::PostWorkerTaskCallback callback,
-        void* userdata) {
-        std::unique_ptr<AsyncWaitableEvent> waitableEvent = std::make_unique<AsyncWaitableEvent>();
+std::unique_ptr<dawn::platform::WaitableEvent> AsyncWorkerThreadPool::PostWorkerTask(
+    dawn::platform::PostWorkerTaskCallback callback,
+    void* userdata) {
+    std::unique_ptr<AsyncWaitableEvent> waitableEvent = std::make_unique<AsyncWaitableEvent>();
 
-        std::function<void()> doTask =
-            [callback, userdata, waitableEventImpl = waitableEvent->GetWaitableEventImpl()]() {
-                callback(userdata);
-                waitableEventImpl->MarkAsComplete();
-            };
+    std::function<void()> doTask = [callback, userdata,
+                                    waitableEventImpl = waitableEvent->GetWaitableEventImpl()]() {
+        callback(userdata);
+        waitableEventImpl->MarkAsComplete();
+    };
 
-        std::thread thread(doTask);
-        thread.detach();
+    std::thread thread(doTask);
+    thread.detach();
 
-        return waitableEvent;
-    }
+    return waitableEvent;
+}
 
 }  // namespace dawn::platform
diff --git a/src/dawn/platform/WorkerThread.h b/src/dawn/platform/WorkerThread.h
index 7a11d83..39932d5 100644
--- a/src/dawn/platform/WorkerThread.h
+++ b/src/dawn/platform/WorkerThread.h
@@ -22,12 +22,12 @@
 
 namespace dawn::platform {
 
-    class AsyncWorkerThreadPool : public dawn::platform::WorkerTaskPool, public NonCopyable {
-      public:
-        std::unique_ptr<dawn::platform::WaitableEvent> PostWorkerTask(
-            dawn::platform::PostWorkerTaskCallback callback,
-            void* userdata) override;
-    };
+class AsyncWorkerThreadPool : public dawn::platform::WorkerTaskPool, public NonCopyable {
+  public:
+    std::unique_ptr<dawn::platform::WaitableEvent> PostWorkerTask(
+        dawn::platform::PostWorkerTaskCallback callback,
+        void* userdata) override;
+};
 
 }  // namespace dawn::platform
 
diff --git a/src/dawn/platform/tracing/EventTracer.cpp b/src/dawn/platform/tracing/EventTracer.cpp
index 7445d98..03b266f 100644
--- a/src/dawn/platform/tracing/EventTracer.cpp
+++ b/src/dawn/platform/tracing/EventTracer.cpp
@@ -18,41 +18,41 @@
 
 namespace dawn::platform::tracing {
 
-    const unsigned char* GetTraceCategoryEnabledFlag(Platform* platform, TraceCategory category) {
-        static unsigned char disabled = 0;
-        if (platform == nullptr) {
-            return &disabled;
-        }
-
-        const unsigned char* categoryEnabledFlag = platform->GetTraceCategoryEnabledFlag(category);
-        if (categoryEnabledFlag != nullptr) {
-            return categoryEnabledFlag;
-        }
-
+const unsigned char* GetTraceCategoryEnabledFlag(Platform* platform, TraceCategory category) {
+    static unsigned char disabled = 0;
+    if (platform == nullptr) {
         return &disabled;
     }
 
-    TraceEventHandle AddTraceEvent(Platform* platform,
-                                   char phase,
-                                   const unsigned char* categoryGroupEnabled,
-                                   const char* name,
-                                   uint64_t id,
-                                   int numArgs,
-                                   const char** argNames,
-                                   const unsigned char* argTypes,
-                                   const uint64_t* argValues,
-                                   unsigned char flags) {
-        ASSERT(platform != nullptr);
-
-        double timestamp = platform->MonotonicallyIncreasingTime();
-        if (timestamp != 0) {
-            TraceEventHandle handle =
-                platform->AddTraceEvent(phase, categoryGroupEnabled, name, id, timestamp, numArgs,
-                                        argNames, argTypes, argValues, flags);
-            return handle;
-        }
-
-        return static_cast<TraceEventHandle>(0);
+    const unsigned char* categoryEnabledFlag = platform->GetTraceCategoryEnabledFlag(category);
+    if (categoryEnabledFlag != nullptr) {
+        return categoryEnabledFlag;
     }
 
+    return &disabled;
+}
+
+TraceEventHandle AddTraceEvent(Platform* platform,
+                               char phase,
+                               const unsigned char* categoryGroupEnabled,
+                               const char* name,
+                               uint64_t id,
+                               int numArgs,
+                               const char** argNames,
+                               const unsigned char* argTypes,
+                               const uint64_t* argValues,
+                               unsigned char flags) {
+    ASSERT(platform != nullptr);
+
+    double timestamp = platform->MonotonicallyIncreasingTime();
+    if (timestamp != 0) {
+        TraceEventHandle handle =
+            platform->AddTraceEvent(phase, categoryGroupEnabled, name, id, timestamp, numArgs,
+                                    argNames, argTypes, argValues, flags);
+        return handle;
+    }
+
+    return static_cast<TraceEventHandle>(0);
+}
+
 }  // namespace dawn::platform::tracing
diff --git a/src/dawn/platform/tracing/EventTracer.h b/src/dawn/platform/tracing/EventTracer.h
index a7bf11f..d1f7fe8 100644
--- a/src/dawn/platform/tracing/EventTracer.h
+++ b/src/dawn/platform/tracing/EventTracer.h
@@ -21,31 +21,29 @@
 
 namespace dawn::platform {
 
-    class Platform;
-    enum class TraceCategory;
+class Platform;
+enum class TraceCategory;
 
-    namespace tracing {
+namespace tracing {
 
-        using TraceEventHandle = uint64_t;
+using TraceEventHandle = uint64_t;
 
-        DAWN_PLATFORM_EXPORT const unsigned char* GetTraceCategoryEnabledFlag(
-            Platform* platform,
-            TraceCategory category);
+DAWN_PLATFORM_EXPORT const unsigned char* GetTraceCategoryEnabledFlag(Platform* platform,
+                                                                      TraceCategory category);
 
-        // TODO(enga): Simplify this API.
-        DAWN_PLATFORM_EXPORT TraceEventHandle
-        AddTraceEvent(Platform* platform,
-                      char phase,
-                      const unsigned char* categoryGroupEnabled,
-                      const char* name,
-                      uint64_t id,
-                      int numArgs,
-                      const char** argNames,
-                      const unsigned char* argTypes,
-                      const uint64_t* argValues,
-                      unsigned char flags);
+// TODO(enga): Simplify this API.
+DAWN_PLATFORM_EXPORT TraceEventHandle AddTraceEvent(Platform* platform,
+                                                    char phase,
+                                                    const unsigned char* categoryGroupEnabled,
+                                                    const char* name,
+                                                    uint64_t id,
+                                                    int numArgs,
+                                                    const char** argNames,
+                                                    const unsigned char* argTypes,
+                                                    const uint64_t* argValues,
+                                                    unsigned char flags);
 
-    }  // namespace tracing
+}  // namespace tracing
 }  // namespace dawn::platform
 
 #endif  // SRC_DAWN_PLATFORM_TRACING_EVENTTRACER_H_
diff --git a/src/dawn/platform/tracing/TraceEvent.h b/src/dawn/platform/tracing/TraceEvent.h
index 9eac668..d50305d 100644
--- a/src/dawn/platform/tracing/TraceEvent.h
+++ b/src/dawn/platform/tracing/TraceEvent.h
@@ -758,75 +758,62 @@
 
 namespace dawn::platform::TraceEvent {
 
-    // Specify these values when the corresponding argument of addTraceEvent is not
-    // used.
-    const int zeroNumArgs = 0;
-    const uint64_t noEventId = 0;
+// Specify these values when the corresponding argument of addTraceEvent is not
+// used.
+const int zeroNumArgs = 0;
+const uint64_t noEventId = 0;
 
-    // TraceID encapsulates an ID that can either be an integer or pointer. Pointers
-    // are mangled with the Process ID so that they are unlikely to collide when the
-    // same pointer is used on different processes.
-    class TraceID {
-      public:
-        explicit TraceID(const void* id, unsigned char* flags)
-            : m_data(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(id))) {
-            *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
-        }
-        explicit TraceID(uint64_t id, unsigned char* flags) : m_data(id) {
-            (void)flags;
-        }
-        explicit TraceID(uint32_t id, unsigned char* flags) : m_data(id) {
-            (void)flags;
-        }
-        explicit TraceID(uint16_t id, unsigned char* flags) : m_data(id) {
-            (void)flags;
-        }
-        explicit TraceID(unsigned char id, unsigned char* flags) : m_data(id) {
-            (void)flags;
-        }
-        explicit TraceID(int64_t id, unsigned char* flags) : m_data(static_cast<uint64_t>(id)) {
-            (void)flags;
-        }
-        explicit TraceID(int32_t id, unsigned char* flags) : m_data(static_cast<uint64_t>(id)) {
-            (void)flags;
-        }
-        explicit TraceID(int16_t id, unsigned char* flags) : m_data(static_cast<uint64_t>(id)) {
-            (void)flags;
-        }
-        explicit TraceID(signed char id, unsigned char* flags) : m_data(static_cast<uint64_t>(id)) {
-            (void)flags;
-        }
+// TraceID encapsulates an ID that can either be an integer or pointer. Pointers
+// are mangled with the Process ID so that they are unlikely to collide when the
+// same pointer is used on different processes.
+class TraceID {
+  public:
+    explicit TraceID(const void* id, unsigned char* flags)
+        : m_data(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(id))) {
+        *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
+    }
+    explicit TraceID(uint64_t id, unsigned char* flags) : m_data(id) { (void)flags; }
+    explicit TraceID(uint32_t id, unsigned char* flags) : m_data(id) { (void)flags; }
+    explicit TraceID(uint16_t id, unsigned char* flags) : m_data(id) { (void)flags; }
+    explicit TraceID(unsigned char id, unsigned char* flags) : m_data(id) { (void)flags; }
+    explicit TraceID(int64_t id, unsigned char* flags) : m_data(static_cast<uint64_t>(id)) {
+        (void)flags;
+    }
+    explicit TraceID(int32_t id, unsigned char* flags) : m_data(static_cast<uint64_t>(id)) {
+        (void)flags;
+    }
+    explicit TraceID(int16_t id, unsigned char* flags) : m_data(static_cast<uint64_t>(id)) {
+        (void)flags;
+    }
+    explicit TraceID(signed char id, unsigned char* flags) : m_data(static_cast<uint64_t>(id)) {
+        (void)flags;
+    }
 
-        uint64_t data() const {
-            return m_data;
-        }
+    uint64_t data() const { return m_data; }
 
-      private:
-        uint64_t m_data;
-    };
+  private:
+    uint64_t m_data;
+};
 
-    // Simple union to store various types as uint64_t.
-    union TraceValueUnion {
-        bool m_bool;
-        uint64_t m_uint;
-        int64_t m_int;
-        double m_double;
-        const void* m_pointer;
-        const char* m_string;
-    };
+// Simple union to store various types as uint64_t.
+union TraceValueUnion {
+    bool m_bool;
+    uint64_t m_uint;
+    int64_t m_int;
+    double m_double;
+    const void* m_pointer;
+    const char* m_string;
+};
 
-    // Simple container for const char* that should be copied instead of retained.
-    class TraceStringWithCopy {
-      public:
-        explicit TraceStringWithCopy(const char* str) : m_str(str) {
-        }
-        operator const char*() const {
-            return m_str;
-        }
+// Simple container for const char* that should be copied instead of retained.
+class TraceStringWithCopy {
+  public:
+    explicit TraceStringWithCopy(const char* str) : m_str(str) {}
+    operator const char*() const { return m_str; }
 
-      private:
-        const char* m_str;
-    };
+  private:
+    const char* m_str;
+};
 
 // Define setTraceValue for each allowed type. It stores the type and
 // value in the return arguments. This allows this API to avoid declaring any
@@ -845,135 +832,132 @@
         *value = static_cast<uint64_t>(arg);                                                  \
     }
 
-    INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint64_t, TRACE_VALUE_TYPE_UINT)
-    INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint32_t, TRACE_VALUE_TYPE_UINT)
-    INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint16_t, TRACE_VALUE_TYPE_UINT)
-    INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned char, TRACE_VALUE_TYPE_UINT)
-    INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int64_t, TRACE_VALUE_TYPE_INT)
-    INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int32_t, TRACE_VALUE_TYPE_INT)
-    INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int16_t, TRACE_VALUE_TYPE_INT)
-    INTERNAL_DECLARE_SET_TRACE_VALUE_INT(signed char, TRACE_VALUE_TYPE_INT)
-    INTERNAL_DECLARE_SET_TRACE_VALUE(bool, m_bool, TRACE_VALUE_TYPE_BOOL)
-    INTERNAL_DECLARE_SET_TRACE_VALUE(double, m_double, TRACE_VALUE_TYPE_DOUBLE)
-    INTERNAL_DECLARE_SET_TRACE_VALUE(const void*, m_pointer, TRACE_VALUE_TYPE_POINTER)
-    INTERNAL_DECLARE_SET_TRACE_VALUE(const char*, m_string, TRACE_VALUE_TYPE_STRING)
-    INTERNAL_DECLARE_SET_TRACE_VALUE(const TraceStringWithCopy&,
-                                     m_string,
-                                     TRACE_VALUE_TYPE_COPY_STRING)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint64_t, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint32_t, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint16_t, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned char, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int64_t, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int32_t, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int16_t, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(signed char, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE(bool, m_bool, TRACE_VALUE_TYPE_BOOL)
+INTERNAL_DECLARE_SET_TRACE_VALUE(double, m_double, TRACE_VALUE_TYPE_DOUBLE)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const void*, m_pointer, TRACE_VALUE_TYPE_POINTER)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const char*, m_string, TRACE_VALUE_TYPE_STRING)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const TraceStringWithCopy&, m_string, TRACE_VALUE_TYPE_COPY_STRING)
 
 #undef INTERNAL_DECLARE_SET_TRACE_VALUE
 #undef INTERNAL_DECLARE_SET_TRACE_VALUE_INT
 
-    static inline void setTraceValue(const std::string& arg, unsigned char* type, uint64_t* value) {
-        TraceValueUnion typeValue;
-        typeValue.m_string = arg.data();
-        *type = TRACE_VALUE_TYPE_COPY_STRING;
-        *value = typeValue.m_uint;
+static inline void setTraceValue(const std::string& arg, unsigned char* type, uint64_t* value) {
+    TraceValueUnion typeValue;
+    typeValue.m_string = arg.data();
+    *type = TRACE_VALUE_TYPE_COPY_STRING;
+    *value = typeValue.m_uint;
+}
+
+// These addTraceEvent template functions are defined here instead of in the
+// macro, because the arg values could be temporary string objects. In order to
+// store pointers to the internal c_str and pass through to the tracing API, the
+// arg values must live throughout these procedures.
+
+static inline dawn::platform::tracing::TraceEventHandle addTraceEvent(
+    dawn::platform::Platform* platform,
+    char phase,
+    const unsigned char* categoryEnabled,
+    const char* name,
+    uint64_t id,
+    unsigned char flags,
+    int /*unused, helps avoid empty __VA_ARGS__*/) {
+    return TRACE_EVENT_API_ADD_TRACE_EVENT(platform, phase, categoryEnabled, name, id, zeroNumArgs,
+                                           0, 0, 0, flags);
+}
+
+template <class ARG1_TYPE>
+static inline dawn::platform::tracing::TraceEventHandle addTraceEvent(
+    dawn::platform::Platform* platform,
+    char phase,
+    const unsigned char* categoryEnabled,
+    const char* name,
+    uint64_t id,
+    unsigned char flags,
+    int /*unused, helps avoid empty __VA_ARGS__*/,
+    const char* arg1Name,
+    const ARG1_TYPE& arg1Val) {
+    const int numArgs = 1;
+    unsigned char argTypes[1];
+    uint64_t argValues[1];
+    setTraceValue(arg1Val, &argTypes[0], &argValues[0]);
+    return TRACE_EVENT_API_ADD_TRACE_EVENT(platform, phase, categoryEnabled, name, id, numArgs,
+                                           &arg1Name, argTypes, argValues, flags);
+}
+
+template <class ARG1_TYPE, class ARG2_TYPE>
+static inline dawn::platform::tracing::TraceEventHandle addTraceEvent(
+    dawn::platform::Platform* platform,
+    char phase,
+    const unsigned char* categoryEnabled,
+    const char* name,
+    uint64_t id,
+    unsigned char flags,
+    int /*unused, helps avoid empty __VA_ARGS__*/,
+    const char* arg1Name,
+    const ARG1_TYPE& arg1Val,
+    const char* arg2Name,
+    const ARG2_TYPE& arg2Val) {
+    const int numArgs = 2;
+    const char* argNames[2] = {arg1Name, arg2Name};
+    unsigned char argTypes[2];
+    uint64_t argValues[2];
+    setTraceValue(arg1Val, &argTypes[0], &argValues[0]);
+    setTraceValue(arg2Val, &argTypes[1], &argValues[1]);
+    return TRACE_EVENT_API_ADD_TRACE_EVENT(platform, phase, categoryEnabled, name, id, numArgs,
+                                           argNames, argTypes, argValues, flags);
+}
+
+// Used by TRACE_EVENTx macro. Do not use directly.
+class TraceEndOnScopeClose {
+  public:
+    // Note: members of m_data intentionally left uninitialized. See initialize.
+    TraceEndOnScopeClose() : m_pdata(0) {}
+    ~TraceEndOnScopeClose() {
+        if (m_pdata)
+            addEventIfEnabled();
     }
 
-    // These addTraceEvent template functions are defined here instead of in the
-    // macro, because the arg values could be temporary string objects. In order to
-    // store pointers to the internal c_str and pass through to the tracing API, the
-    // arg values must live throughout these procedures.
-
-    static inline dawn::platform::tracing::TraceEventHandle addTraceEvent(
-        dawn::platform::Platform* platform,
-        char phase,
-        const unsigned char* categoryEnabled,
-        const char* name,
-        uint64_t id,
-        unsigned char flags,
-        int /*unused, helps avoid empty __VA_ARGS__*/) {
-        return TRACE_EVENT_API_ADD_TRACE_EVENT(platform, phase, categoryEnabled, name, id,
-                                               zeroNumArgs, 0, 0, 0, flags);
+    void initialize(dawn::platform::Platform* platform,
+                    const unsigned char* categoryEnabled,
+                    const char* name) {
+        m_data.platform = platform;
+        m_data.categoryEnabled = categoryEnabled;
+        m_data.name = name;
+        m_pdata = &m_data;
     }
 
-    template <class ARG1_TYPE>
-    static inline dawn::platform::tracing::TraceEventHandle addTraceEvent(
-        dawn::platform::Platform* platform,
-        char phase,
-        const unsigned char* categoryEnabled,
-        const char* name,
-        uint64_t id,
-        unsigned char flags,
-        int /*unused, helps avoid empty __VA_ARGS__*/,
-        const char* arg1Name,
-        const ARG1_TYPE& arg1Val) {
-        const int numArgs = 1;
-        unsigned char argTypes[1];
-        uint64_t argValues[1];
-        setTraceValue(arg1Val, &argTypes[0], &argValues[0]);
-        return TRACE_EVENT_API_ADD_TRACE_EVENT(platform, phase, categoryEnabled, name, id, numArgs,
-                                               &arg1Name, argTypes, argValues, flags);
+  private:
+    // Add the end event if the category is still enabled.
+    void addEventIfEnabled() {
+        // Only called when m_pdata is non-null.
+        if (*m_pdata->categoryEnabled) {
+            TRACE_EVENT_API_ADD_TRACE_EVENT(m_pdata->platform, TRACE_EVENT_PHASE_END,
+                                            m_pdata->categoryEnabled, m_pdata->name, noEventId,
+                                            zeroNumArgs, 0, 0, 0, TRACE_EVENT_FLAG_NONE);
+        }
     }
 
-    template <class ARG1_TYPE, class ARG2_TYPE>
-    static inline dawn::platform::tracing::TraceEventHandle addTraceEvent(
-        dawn::platform::Platform* platform,
-        char phase,
-        const unsigned char* categoryEnabled,
-        const char* name,
-        uint64_t id,
-        unsigned char flags,
-        int /*unused, helps avoid empty __VA_ARGS__*/,
-        const char* arg1Name,
-        const ARG1_TYPE& arg1Val,
-        const char* arg2Name,
-        const ARG2_TYPE& arg2Val) {
-        const int numArgs = 2;
-        const char* argNames[2] = {arg1Name, arg2Name};
-        unsigned char argTypes[2];
-        uint64_t argValues[2];
-        setTraceValue(arg1Val, &argTypes[0], &argValues[0]);
-        setTraceValue(arg2Val, &argTypes[1], &argValues[1]);
-        return TRACE_EVENT_API_ADD_TRACE_EVENT(platform, phase, categoryEnabled, name, id, numArgs,
-                                               argNames, argTypes, argValues, flags);
-    }
-
-    // Used by TRACE_EVENTx macro. Do not use directly.
-    class TraceEndOnScopeClose {
-      public:
-        // Note: members of m_data intentionally left uninitialized. See initialize.
-        TraceEndOnScopeClose() : m_pdata(0) {
-        }
-        ~TraceEndOnScopeClose() {
-            if (m_pdata)
-                addEventIfEnabled();
-        }
-
-        void initialize(dawn::platform::Platform* platform,
-                        const unsigned char* categoryEnabled,
-                        const char* name) {
-            m_data.platform = platform;
-            m_data.categoryEnabled = categoryEnabled;
-            m_data.name = name;
-            m_pdata = &m_data;
-        }
-
-      private:
-        // Add the end event if the category is still enabled.
-        void addEventIfEnabled() {
-            // Only called when m_pdata is non-null.
-            if (*m_pdata->categoryEnabled) {
-                TRACE_EVENT_API_ADD_TRACE_EVENT(m_pdata->platform, TRACE_EVENT_PHASE_END,
-                                                m_pdata->categoryEnabled, m_pdata->name, noEventId,
-                                                zeroNumArgs, 0, 0, 0, TRACE_EVENT_FLAG_NONE);
-            }
-        }
-
-        // This Data struct workaround is to avoid initializing all the members
-        // in Data during construction of this object, since this object is always
-        // constructed, even when tracing is disabled. If the members of Data were
-        // members of this class instead, compiler warnings occur about potential
-        // uninitialized accesses.
-        struct Data {
-            dawn::platform::Platform* platform;
-            const unsigned char* categoryEnabled;
-            const char* name;
-        };
-        Data* m_pdata;
-        Data m_data;
+    // This Data struct workaround is to avoid initializing all the members
+    // in Data during construction of this object, since this object is always
+    // constructed, even when tracing is disabled. If the members of Data were
+    // members of this class instead, compiler warnings occur about potential
+    // uninitialized accesses.
+    struct Data {
+        dawn::platform::Platform* platform;
+        const unsigned char* categoryEnabled;
+        const char* name;
     };
+    Data* m_pdata;
+    Data m_data;
+};
 
 }  // namespace dawn::platform::TraceEvent
 
diff --git a/src/dawn/samples/SampleUtils.cpp b/src/dawn/samples/SampleUtils.cpp
index 3e6a502..ac102a6 100644
--- a/src/dawn/samples/SampleUtils.cpp
+++ b/src/dawn/samples/SampleUtils.cpp
@@ -78,7 +78,7 @@
 #elif defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
 static wgpu::BackendType backendType = wgpu::BackendType::OpenGL;
 #else
-#    error
+#error
 #endif
 
 static CmdBufType cmdBufType = CmdBufType::Terrible;
diff --git a/src/dawn/tests/DawnNativeTest.cpp b/src/dawn/tests/DawnNativeTest.cpp
index 02919ea..c07c5c2 100644
--- a/src/dawn/tests/DawnNativeTest.cpp
+++ b/src/dawn/tests/DawnNativeTest.cpp
@@ -23,13 +23,12 @@
 
 namespace dawn::native {
 
-    void AddFatalDawnFailure(const char* expression, const ErrorData* error) {
-        const auto& backtrace = error->GetBacktrace();
-        GTEST_MESSAGE_AT_(
-            backtrace.at(0).file, backtrace.at(0).line,
-            absl::StrCat(expression, " returned error: ", error->GetMessage()).c_str(),
-            ::testing::TestPartResult::kFatalFailure);
-    }
+void AddFatalDawnFailure(const char* expression, const ErrorData* error) {
+    const auto& backtrace = error->GetBacktrace();
+    GTEST_MESSAGE_AT_(backtrace.at(0).file, backtrace.at(0).line,
+                      absl::StrCat(expression, " returned error: ", error->GetMessage()).c_str(),
+                      ::testing::TestPartResult::kFatalFailure);
+}
 
 }  // namespace dawn::native
 
@@ -67,8 +66,7 @@
     device.SetUncapturedErrorCallback(DawnNativeTest::OnDeviceError, nullptr);
 }
 
-void DawnNativeTest::TearDown() {
-}
+void DawnNativeTest::TearDown() {}
 
 WGPUDevice DawnNativeTest::CreateTestDevice() {
     // Disabled disallowing unsafe APIs so we can test them.
diff --git a/src/dawn/tests/DawnNativeTest.h b/src/dawn/tests/DawnNativeTest.h
index 53b6ab8..e92bf67 100644
--- a/src/dawn/tests/DawnNativeTest.h
+++ b/src/dawn/tests/DawnNativeTest.h
@@ -24,11 +24,11 @@
 
 namespace dawn::native {
 
-    // This is similar to DAWN_TRY_ASSIGN but produces a fatal GTest error if EXPR is an error.
+// This is similar to DAWN_TRY_ASSIGN but produces a fatal GTest error if EXPR is an error.
 #define DAWN_ASSERT_AND_ASSIGN(VAR, EXPR) \
     DAWN_TRY_ASSIGN_WITH_CLEANUP(VAR, EXPR, {}, AddFatalDawnFailure(#EXPR, error.get()))
 
-    void AddFatalDawnFailure(const char* expression, const ErrorData* error);
+void AddFatalDawnFailure(const char* expression, const ErrorData* error);
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/DawnTest.cpp b/src/dawn/tests/DawnTest.cpp
index b4e0f4a..816dbee 100644
--- a/src/dawn/tests/DawnTest.cpp
+++ b/src/dawn/tests/DawnTest.cpp
@@ -44,67 +44,67 @@
 #include "dawn/wire/WireServer.h"
 
 #if defined(DAWN_ENABLE_BACKEND_OPENGL)
-#    include "GLFW/glfw3.h"
-#    include "dawn/native/OpenGLBackend.h"
+#include "GLFW/glfw3.h"
+#include "dawn/native/OpenGLBackend.h"
 #endif  // DAWN_ENABLE_BACKEND_OPENGL
 
 namespace {
 
-    std::string ParamName(wgpu::BackendType type) {
-        switch (type) {
-            case wgpu::BackendType::D3D12:
-                return "D3D12";
-            case wgpu::BackendType::Metal:
-                return "Metal";
-            case wgpu::BackendType::Null:
-                return "Null";
-            case wgpu::BackendType::OpenGL:
-                return "OpenGL";
-            case wgpu::BackendType::OpenGLES:
-                return "OpenGLES";
-            case wgpu::BackendType::Vulkan:
-                return "Vulkan";
-            default:
-                UNREACHABLE();
+std::string ParamName(wgpu::BackendType type) {
+    switch (type) {
+        case wgpu::BackendType::D3D12:
+            return "D3D12";
+        case wgpu::BackendType::Metal:
+            return "Metal";
+        case wgpu::BackendType::Null:
+            return "Null";
+        case wgpu::BackendType::OpenGL:
+            return "OpenGL";
+        case wgpu::BackendType::OpenGLES:
+            return "OpenGLES";
+        case wgpu::BackendType::Vulkan:
+            return "Vulkan";
+        default:
+            UNREACHABLE();
+    }
+}
+
+const char* AdapterTypeName(wgpu::AdapterType type) {
+    switch (type) {
+        case wgpu::AdapterType::DiscreteGPU:
+            return "Discrete GPU";
+        case wgpu::AdapterType::IntegratedGPU:
+            return "Integrated GPU";
+        case wgpu::AdapterType::CPU:
+            return "CPU";
+        case wgpu::AdapterType::Unknown:
+            return "Unknown";
+        default:
+            UNREACHABLE();
+    }
+}
+
+struct MapReadUserdata {
+    DawnTestBase* test;
+    size_t slot;
+};
+
+DawnTestEnvironment* gTestEnv = nullptr;
+
+template <typename T>
+void printBuffer(testing::AssertionResult& result, const T* buffer, const size_t count) {
+    static constexpr unsigned int kBytes = sizeof(T);
+
+    for (size_t index = 0; index < count; ++index) {
+        auto byteView = reinterpret_cast<const uint8_t*>(buffer + index);
+        for (unsigned int b = 0; b < kBytes; ++b) {
+            char buf[4];
+            snprintf(buf, sizeof(buf), "%02X ", byteView[b]);
+            result << buf;
         }
     }
-
-    const char* AdapterTypeName(wgpu::AdapterType type) {
-        switch (type) {
-            case wgpu::AdapterType::DiscreteGPU:
-                return "Discrete GPU";
-            case wgpu::AdapterType::IntegratedGPU:
-                return "Integrated GPU";
-            case wgpu::AdapterType::CPU:
-                return "CPU";
-            case wgpu::AdapterType::Unknown:
-                return "Unknown";
-            default:
-                UNREACHABLE();
-        }
-    }
-
-    struct MapReadUserdata {
-        DawnTestBase* test;
-        size_t slot;
-    };
-
-    DawnTestEnvironment* gTestEnv = nullptr;
-
-    template <typename T>
-    void printBuffer(testing::AssertionResult& result, const T* buffer, const size_t count) {
-        static constexpr unsigned int kBytes = sizeof(T);
-
-        for (size_t index = 0; index < count; ++index) {
-            auto byteView = reinterpret_cast<const uint8_t*>(buffer + index);
-            for (unsigned int b = 0; b < kBytes; ++b) {
-                char buf[4];
-                snprintf(buf, sizeof(buf), "%02X ", byteView[b]);
-                result << buf;
-            }
-        }
-        result << std::endl;
-    }
+    result << std::endl;
+}
 
 }  // anonymous namespace
 
@@ -121,8 +121,7 @@
                                      std::initializer_list<const char*> forceDisabledWorkarounds)
     : backendType(backendType),
       forceEnabledWorkarounds(forceEnabledWorkarounds),
-      forceDisabledWorkarounds(forceDisabledWorkarounds) {
-}
+      forceDisabledWorkarounds(forceDisabledWorkarounds) {}
 
 BackendTestConfig D3D12Backend(std::initializer_list<const char*> forceEnabledWorkarounds,
                                std::initializer_list<const char*> forceDisabledWorkarounds) {
@@ -162,15 +161,13 @@
 
 TestAdapterProperties::TestAdapterProperties(const wgpu::AdapterProperties& properties,
                                              bool selected)
-    : wgpu::AdapterProperties(properties), adapterName(properties.name), selected(selected) {
-}
+    : wgpu::AdapterProperties(properties), adapterName(properties.name), selected(selected) {}
 
 AdapterTestParam::AdapterTestParam(const BackendTestConfig& config,
                                    const TestAdapterProperties& adapterProperties)
     : adapterProperties(adapterProperties),
       forceEnabledWorkarounds(config.forceEnabledWorkarounds),
-      forceDisabledWorkarounds(config.forceDisabledWorkarounds) {
-}
+      forceDisabledWorkarounds(config.forceDisabledWorkarounds) {}
 
 std::ostream& operator<<(std::ostream& os, const AdapterTestParam& param) {
     os << ParamName(param.adapterProperties.backendType) << " "
@@ -193,8 +190,7 @@
     return os;
 }
 
-DawnTestBase::PrintToStringParamName::PrintToStringParamName(const char* test) : mTest(test) {
-}
+DawnTestBase::PrintToStringParamName::PrintToStringParamName(const char* test) : mTest(test) {}
 
 std::string DawnTestBase::PrintToStringParamName::SanitizeParamName(std::string paramName,
                                                                     size_t index) const {
@@ -700,8 +696,7 @@
 
 DawnTestBase::DawnTestBase(const AdapterTestParam& param)
     : mParam(param),
-      mWireHelper(utils::CreateWireHelper(gTestEnv->UsesWire(), gTestEnv->GetWireTraceDir())) {
-}
+      mWireHelper(utils::CreateWireHelper(gTestEnv->UsesWire(), gTestEnv->GetWireTraceDir())) {}
 
 DawnTestBase::~DawnTestBase() {
     // We need to destroy child objects before the Device
@@ -1587,154 +1582,153 @@
 }
 
 namespace detail {
-    std::vector<AdapterTestParam> GetAvailableAdapterTestParamsForBackends(
-        const BackendTestConfig* params,
-        size_t numParams) {
-        ASSERT(gTestEnv != nullptr);
-        return gTestEnv->GetAvailableAdapterTestParamsForBackends(params, numParams);
+std::vector<AdapterTestParam> GetAvailableAdapterTestParamsForBackends(
+    const BackendTestConfig* params,
+    size_t numParams) {
+    ASSERT(gTestEnv != nullptr);
+    return gTestEnv->GetAvailableAdapterTestParamsForBackends(params, numParams);
+}
+
+// Helper classes to set expectations
+
+template <typename T, typename U>
+ExpectEq<T, U>::ExpectEq(T singleValue, T tolerance) : mTolerance(tolerance) {
+    mExpected.push_back(singleValue);
+}
+
+template <typename T, typename U>
+ExpectEq<T, U>::ExpectEq(const T* values, const unsigned int count, T tolerance)
+    : mTolerance(tolerance) {
+    mExpected.assign(values, values + count);
+}
+
+namespace {
+
+template <typename T, typename U = T>
+testing::AssertionResult CheckImpl(const T& expected, const U& actual, const T& tolerance) {
+    ASSERT(tolerance == T{});
+    if (expected != actual) {
+        return testing::AssertionFailure() << expected << ", actual " << actual;
     }
+    return testing::AssertionSuccess();
+}
 
-    // Helper classes to set expectations
-
-    template <typename T, typename U>
-    ExpectEq<T, U>::ExpectEq(T singleValue, T tolerance) : mTolerance(tolerance) {
-        mExpected.push_back(singleValue);
+template <>
+testing::AssertionResult CheckImpl<float>(const float& expected,
+                                          const float& actual,
+                                          const float& tolerance) {
+    if (abs(expected - actual) > tolerance) {
+        return tolerance == 0.0 ? testing::AssertionFailure() << expected << ", actual " << actual
+                                : testing::AssertionFailure() << "within " << tolerance << " of "
+                                                              << expected << ", actual " << actual;
     }
+    return testing::AssertionSuccess();
+}
 
-    template <typename T, typename U>
-    ExpectEq<T, U>::ExpectEq(const T* values, const unsigned int count, T tolerance)
-        : mTolerance(tolerance) {
-        mExpected.assign(values, values + count);
+// Interpret uint16_t as float16
+// This is mostly for reading float16 output from textures
+template <>
+testing::AssertionResult CheckImpl<float, uint16_t>(const float& expected,
+                                                    const uint16_t& actual,
+                                                    const float& tolerance) {
+    float actualF32 = Float16ToFloat32(actual);
+    if (abs(expected - actualF32) > tolerance) {
+        return tolerance == 0.0
+                   ? testing::AssertionFailure() << expected << ", actual " << actualF32
+                   : testing::AssertionFailure() << "within " << tolerance << " of " << expected
+                                                 << ", actual " << actualF32;
     }
+    return testing::AssertionSuccess();
+}
 
-    namespace {
+}  // namespace
 
-        template <typename T, typename U = T>
-        testing::AssertionResult CheckImpl(const T& expected, const U& actual, const T& tolerance) {
-            ASSERT(tolerance == T{});
-            if (expected != actual) {
-                return testing::AssertionFailure() << expected << ", actual " << actual;
+template <typename T, typename U>
+testing::AssertionResult ExpectEq<T, U>::Check(const void* data, size_t size) {
+    DAWN_ASSERT(size == sizeof(U) * mExpected.size());
+    const U* actual = static_cast<const U*>(data);
+
+    for (size_t i = 0; i < mExpected.size(); ++i) {
+        testing::AssertionResult check = CheckImpl(mExpected[i], actual[i], mTolerance);
+        if (!check) {
+            testing::AssertionResult result = testing::AssertionFailure()
+                                              << "Expected data[" << i << "] to be "
+                                              << check.message() << std::endl;
+
+            if (mExpected.size() <= 1024) {
+                result << "Expected:" << std::endl;
+                printBuffer(result, mExpected.data(), mExpected.size());
+
+                result << "Actual:" << std::endl;
+                printBuffer(result, actual, mExpected.size());
             }
-            return testing::AssertionSuccess();
-        }
 
-        template <>
-        testing::AssertionResult CheckImpl<float>(const float& expected,
-                                                  const float& actual,
-                                                  const float& tolerance) {
-            if (abs(expected - actual) > tolerance) {
-                return tolerance == 0.0
-                           ? testing::AssertionFailure() << expected << ", actual " << actual
-                           : testing::AssertionFailure() << "within " << tolerance << " of "
-                                                         << expected << ", actual " << actual;
+            return result;
+        }
+    }
+    return testing::AssertionSuccess();
+}
+
+template class ExpectEq<uint8_t>;
+template class ExpectEq<uint16_t>;
+template class ExpectEq<uint32_t>;
+template class ExpectEq<uint64_t>;
+template class ExpectEq<RGBA8>;
+template class ExpectEq<float>;
+template class ExpectEq<float, uint16_t>;
+
+template <typename T>
+ExpectBetweenColors<T>::ExpectBetweenColors(T value0, T value1) {
+    T l, h;
+    l.r = std::min(value0.r, value1.r);
+    l.g = std::min(value0.g, value1.g);
+    l.b = std::min(value0.b, value1.b);
+    l.a = std::min(value0.a, value1.a);
+
+    h.r = std::max(value0.r, value1.r);
+    h.g = std::max(value0.g, value1.g);
+    h.b = std::max(value0.b, value1.b);
+    h.a = std::max(value0.a, value1.a);
+
+    mLowerColorChannels.push_back(l);
+    mHigherColorChannels.push_back(h);
+
+    mValues0.push_back(value0);
+    mValues1.push_back(value1);
+}
+
+template <typename T>
+testing::AssertionResult ExpectBetweenColors<T>::Check(const void* data, size_t size) {
+    DAWN_ASSERT(size == sizeof(T) * mLowerColorChannels.size());
+    DAWN_ASSERT(mHigherColorChannels.size() == mLowerColorChannels.size());
+    DAWN_ASSERT(mValues0.size() == mValues1.size());
+    DAWN_ASSERT(mValues0.size() == mLowerColorChannels.size());
+
+    const T* actual = static_cast<const T*>(data);
+
+    for (size_t i = 0; i < mLowerColorChannels.size(); ++i) {
+        if (!(actual[i] >= mLowerColorChannels[i] && actual[i] <= mHigherColorChannels[i])) {
+            testing::AssertionResult result = testing::AssertionFailure()
+                                              << "Expected data[" << i << "] to be between "
+                                              << mValues0[i] << " and " << mValues1[i]
+                                              << ", actual " << actual[i] << std::endl;
+
+            if (mLowerColorChannels.size() <= 1024) {
+                result << "Expected between:" << std::endl;
+                printBuffer(result, mValues0.data(), mLowerColorChannels.size());
+                result << "and" << std::endl;
+                printBuffer(result, mValues1.data(), mLowerColorChannels.size());
+
+                result << "Actual:" << std::endl;
+                printBuffer(result, actual, mLowerColorChannels.size());
             }
-            return testing::AssertionSuccess();
+
+            return result;
         }
-
-        // Interpret uint16_t as float16
-        // This is mostly for reading float16 output from textures
-        template <>
-        testing::AssertionResult CheckImpl<float, uint16_t>(const float& expected,
-                                                            const uint16_t& actual,
-                                                            const float& tolerance) {
-            float actualF32 = Float16ToFloat32(actual);
-            if (abs(expected - actualF32) > tolerance) {
-                return tolerance == 0.0
-                           ? testing::AssertionFailure() << expected << ", actual " << actualF32
-                           : testing::AssertionFailure() << "within " << tolerance << " of "
-                                                         << expected << ", actual " << actualF32;
-            }
-            return testing::AssertionSuccess();
-        }
-
-    }  // namespace
-
-    template <typename T, typename U>
-    testing::AssertionResult ExpectEq<T, U>::Check(const void* data, size_t size) {
-        DAWN_ASSERT(size == sizeof(U) * mExpected.size());
-        const U* actual = static_cast<const U*>(data);
-
-        for (size_t i = 0; i < mExpected.size(); ++i) {
-            testing::AssertionResult check = CheckImpl(mExpected[i], actual[i], mTolerance);
-            if (!check) {
-                testing::AssertionResult result = testing::AssertionFailure()
-                                                  << "Expected data[" << i << "] to be "
-                                                  << check.message() << std::endl;
-
-                if (mExpected.size() <= 1024) {
-                    result << "Expected:" << std::endl;
-                    printBuffer(result, mExpected.data(), mExpected.size());
-
-                    result << "Actual:" << std::endl;
-                    printBuffer(result, actual, mExpected.size());
-                }
-
-                return result;
-            }
-        }
-        return testing::AssertionSuccess();
     }
 
-    template class ExpectEq<uint8_t>;
-    template class ExpectEq<uint16_t>;
-    template class ExpectEq<uint32_t>;
-    template class ExpectEq<uint64_t>;
-    template class ExpectEq<RGBA8>;
-    template class ExpectEq<float>;
-    template class ExpectEq<float, uint16_t>;
+    return testing::AssertionSuccess();
+}
 
-    template <typename T>
-    ExpectBetweenColors<T>::ExpectBetweenColors(T value0, T value1) {
-        T l, h;
-        l.r = std::min(value0.r, value1.r);
-        l.g = std::min(value0.g, value1.g);
-        l.b = std::min(value0.b, value1.b);
-        l.a = std::min(value0.a, value1.a);
-
-        h.r = std::max(value0.r, value1.r);
-        h.g = std::max(value0.g, value1.g);
-        h.b = std::max(value0.b, value1.b);
-        h.a = std::max(value0.a, value1.a);
-
-        mLowerColorChannels.push_back(l);
-        mHigherColorChannels.push_back(h);
-
-        mValues0.push_back(value0);
-        mValues1.push_back(value1);
-    }
-
-    template <typename T>
-    testing::AssertionResult ExpectBetweenColors<T>::Check(const void* data, size_t size) {
-        DAWN_ASSERT(size == sizeof(T) * mLowerColorChannels.size());
-        DAWN_ASSERT(mHigherColorChannels.size() == mLowerColorChannels.size());
-        DAWN_ASSERT(mValues0.size() == mValues1.size());
-        DAWN_ASSERT(mValues0.size() == mLowerColorChannels.size());
-
-        const T* actual = static_cast<const T*>(data);
-
-        for (size_t i = 0; i < mLowerColorChannels.size(); ++i) {
-            if (!(actual[i] >= mLowerColorChannels[i] && actual[i] <= mHigherColorChannels[i])) {
-                testing::AssertionResult result = testing::AssertionFailure()
-                                                  << "Expected data[" << i << "] to be between "
-                                                  << mValues0[i] << " and " << mValues1[i]
-                                                  << ", actual " << actual[i] << std::endl;
-
-                if (mLowerColorChannels.size() <= 1024) {
-                    result << "Expected between:" << std::endl;
-                    printBuffer(result, mValues0.data(), mLowerColorChannels.size());
-                    result << "and" << std::endl;
-                    printBuffer(result, mValues1.data(), mLowerColorChannels.size());
-
-                    result << "Actual:" << std::endl;
-                    printBuffer(result, actual, mLowerColorChannels.size());
-                }
-
-                return result;
-            }
-        }
-
-        return testing::AssertionSuccess();
-    }
-
-    template class ExpectBetweenColors<RGBA8>;
+template class ExpectBetweenColors<RGBA8>;
 }  // namespace detail
diff --git a/src/dawn/tests/DawnTest.h b/src/dawn/tests/DawnTest.h
index d389b59..bcd83e4 100644
--- a/src/dawn/tests/DawnTest.h
+++ b/src/dawn/tests/DawnTest.h
@@ -120,10 +120,8 @@
 #define ASSERT_DEVICE_ERROR(statement) ASSERT_DEVICE_ERROR_MSG(statement, testing::_)
 
 struct RGBA8 {
-    constexpr RGBA8() : RGBA8(0, 0, 0, 0) {
-    }
-    constexpr RGBA8(uint8_t r, uint8_t g, uint8_t b, uint8_t a) : r(r), g(g), b(b), a(a) {
-    }
+    constexpr RGBA8() : RGBA8(0, 0, 0, 0) {}
+    constexpr RGBA8(uint8_t r, uint8_t g, uint8_t b, uint8_t a) : r(r), g(g), b(b), a(a) {}
     bool operator==(const RGBA8& other) const;
     bool operator!=(const RGBA8& other) const;
     bool operator<=(const RGBA8& other) const;
@@ -194,25 +192,25 @@
 struct GLFWwindow;
 
 namespace utils {
-    class PlatformDebugLogger;
-    class TerribleCommandBuffer;
-    class WireHelper;
+class PlatformDebugLogger;
+class TerribleCommandBuffer;
+class WireHelper;
 }  // namespace utils
 
 namespace detail {
-    class Expectation;
-    class CustomTextureExpectation;
+class Expectation;
+class CustomTextureExpectation;
 
-    template <typename T, typename U = T>
-    class ExpectEq;
-    template <typename T>
-    class ExpectBetweenColors;
+template <typename T, typename U = T>
+class ExpectEq;
+template <typename T>
+class ExpectBetweenColors;
 }  // namespace detail
 
 namespace dawn::wire {
-    class CommandHandler;
-    class WireClient;
-    class WireServer;
+class CommandHandler;
+class WireClient;
+class WireServer;
 }  // namespace dawn::wire
 
 void InitDawnEnd2EndTestEnvironment(int argc, char** argv);
@@ -655,18 +653,13 @@
     DawnTestWithParams();
     ~DawnTestWithParams() override = default;
 
-    void SetUp() override {
-        DawnTestBase::SetUp();
-    }
+    void SetUp() override { DawnTestBase::SetUp(); }
 
-    void TearDown() override {
-        DawnTestBase::TearDown();
-    }
+    void TearDown() override { DawnTestBase::TearDown(); }
 };
 
 template <typename Params>
-DawnTestWithParams<Params>::DawnTestWithParams() : DawnTestBase(this->GetParam()) {
-}
+DawnTestWithParams<Params>::DawnTestWithParams() : DawnTestBase(this->GetParam()) {}
 
 using DawnTest = DawnTestWithParams<>;
 
@@ -727,8 +720,7 @@
         template <typename... Args>                                                                \
         StructName(const AdapterTestParam& param, Args&&... args)                                  \
             : AdapterTestParam(param),                                                             \
-              DAWN_PP_CONCATENATE(_Dawn_, StructName){std::forward<Args>(args)...} {               \
-        }                                                                                          \
+              DAWN_PP_CONCATENATE(_Dawn_, StructName){std::forward<Args>(args)...} {}              \
     };                                                                                             \
     std::ostream& operator<<(std::ostream& o, const StructName& param) {                           \
         o << static_cast<const AdapterTestParam&>(param);                                          \
@@ -738,69 +730,69 @@
     static_assert(true, "require semicolon")
 
 namespace detail {
-    // Helper functions used for DAWN_INSTANTIATE_TEST
-    std::vector<AdapterTestParam> GetAvailableAdapterTestParamsForBackends(
-        const BackendTestConfig* params,
-        size_t numParams);
+// Helper functions used for DAWN_INSTANTIATE_TEST
+std::vector<AdapterTestParam> GetAvailableAdapterTestParamsForBackends(
+    const BackendTestConfig* params,
+    size_t numParams);
 
-    // All classes used to implement the deferred expectations should inherit from this.
-    class Expectation {
-      public:
-        virtual ~Expectation() = default;
+// All classes used to implement the deferred expectations should inherit from this.
+class Expectation {
+  public:
+    virtual ~Expectation() = default;
 
-        // Will be called with the buffer or texture data the expectation should check.
-        virtual testing::AssertionResult Check(const void* data, size_t size) = 0;
-    };
+    // Will be called with the buffer or texture data the expectation should check.
+    virtual testing::AssertionResult Check(const void* data, size_t size) = 0;
+};
 
-    // Expectation that checks the data is equal to some expected values.
-    // T - expected value Type
-    // U - actual value Type (defaults = T)
-    // This is expanded for float16 mostly where T=float, U=uint16_t
-    template <typename T, typename U>
-    class ExpectEq : public Expectation {
-      public:
-        explicit ExpectEq(T singleValue, T tolerance = {});
-        ExpectEq(const T* values, const unsigned int count, T tolerance = {});
+// Expectation that checks the data is equal to some expected values.
+// T - expected value Type
+// U - actual value Type (defaults = T)
+// This is expanded for float16 mostly where T=float, U=uint16_t
+template <typename T, typename U>
+class ExpectEq : public Expectation {
+  public:
+    explicit ExpectEq(T singleValue, T tolerance = {});
+    ExpectEq(const T* values, const unsigned int count, T tolerance = {});
 
-        testing::AssertionResult Check(const void* data, size_t size) override;
+    testing::AssertionResult Check(const void* data, size_t size) override;
 
-      private:
-        std::vector<T> mExpected;
-        T mTolerance;
-    };
-    extern template class ExpectEq<uint8_t>;
-    extern template class ExpectEq<int16_t>;
-    extern template class ExpectEq<uint32_t>;
-    extern template class ExpectEq<uint64_t>;
-    extern template class ExpectEq<RGBA8>;
-    extern template class ExpectEq<float>;
-    extern template class ExpectEq<float, uint16_t>;
+  private:
+    std::vector<T> mExpected;
+    T mTolerance;
+};
+extern template class ExpectEq<uint8_t>;
+extern template class ExpectEq<int16_t>;
+extern template class ExpectEq<uint32_t>;
+extern template class ExpectEq<uint64_t>;
+extern template class ExpectEq<RGBA8>;
+extern template class ExpectEq<float>;
+extern template class ExpectEq<float, uint16_t>;
 
-    template <typename T>
-    class ExpectBetweenColors : public Expectation {
-      public:
-        // Inclusive for now
-        ExpectBetweenColors(T value0, T value1);
-        testing::AssertionResult Check(const void* data, size_t size) override;
+template <typename T>
+class ExpectBetweenColors : public Expectation {
+  public:
+    // Inclusive for now
+    ExpectBetweenColors(T value0, T value1);
+    testing::AssertionResult Check(const void* data, size_t size) override;
 
-      private:
-        std::vector<T> mLowerColorChannels;
-        std::vector<T> mHigherColorChannels;
+  private:
+    std::vector<T> mLowerColorChannels;
+    std::vector<T> mHigherColorChannels;
 
-        // used for printing error
-        std::vector<T> mValues0;
-        std::vector<T> mValues1;
-    };
-    // A color is considered between color0 and color1 when all channel values are within range of
-    // each counterparts. It doesn't matter which value is higher or lower. Essentially color =
-    // lerp(color0, color1, t) where t is [0,1]. But I don't want to be too strict here.
-    extern template class ExpectBetweenColors<RGBA8>;
+    // used for printing error
+    std::vector<T> mValues0;
+    std::vector<T> mValues1;
+};
+// A color is considered between color0 and color1 when all channel values are within range of
+// each counterparts. It doesn't matter which value is higher or lower. Essentially color =
+// lerp(color0, color1, t) where t is [0,1]. But I don't want to be too strict here.
+extern template class ExpectBetweenColors<RGBA8>;
 
-    class CustomTextureExpectation : public Expectation {
-      public:
-        virtual ~CustomTextureExpectation() = default;
-        virtual uint32_t DataSize() = 0;
-    };
+class CustomTextureExpectation : public Expectation {
+  public:
+    virtual ~CustomTextureExpectation() = default;
+    virtual uint32_t DataSize() = 0;
+};
 
 }  // namespace detail
 
diff --git a/src/dawn/tests/MockCallback.h b/src/dawn/tests/MockCallback.h
index 49840a1..876abc2 100644
--- a/src/dawn/tests/MockCallback.h
+++ b/src/dawn/tests/MockCallback.h
@@ -20,88 +20,86 @@
 #include <tuple>
 #include <utility>
 
-#include "gmock/gmock.h"
 #include "dawn/common/Assert.h"
+#include "gmock/gmock.h"
 
 namespace testing {
 
-    template <typename F>
-    class MockCallback;
+template <typename F>
+class MockCallback;
 
-    // Helper class for mocking callbacks used for Dawn callbacks with |void* userdata|
-    // as the last callback argument.
-    //
-    // Example Usage:
-    //   MockCallback<WGPUDeviceLostCallback> mock;
-    //
-    //   void* foo = XYZ; // this is the callback userdata
-    //
-    //   wgpuDeviceSetDeviceLostCallback(device, mock.Callback(), mock.MakeUserdata(foo));
-    //   EXPECT_CALL(mock, Call(_, foo));
-    template <typename R, typename... Args>
-    class MockCallback<R (*)(Args...)> : public ::testing::MockFunction<R(Args...)> {
-        using CallbackType = R (*)(Args...);
+// Helper class for mocking callbacks used for Dawn callbacks with |void* userdata|
+// as the last callback argument.
+//
+// Example Usage:
+//   MockCallback<WGPUDeviceLostCallback> mock;
+//
+//   void* foo = XYZ; // this is the callback userdata
+//
+//   wgpuDeviceSetDeviceLostCallback(device, mock.Callback(), mock.MakeUserdata(foo));
+//   EXPECT_CALL(mock, Call(_, foo));
+template <typename R, typename... Args>
+class MockCallback<R (*)(Args...)> : public ::testing::MockFunction<R(Args...)> {
+    using CallbackType = R (*)(Args...);
 
-      public:
-        // Helper function makes it easier to get the callback using |foo.Callback()|
-        // unstead of MockCallback<CallbackType>::Callback.
-        static CallbackType Callback() {
-            return CallUnboundCallback;
-        }
+  public:
+    // Helper function makes it easier to get the callback using |foo.Callback()|
+    // unstead of MockCallback<CallbackType>::Callback.
+    static CallbackType Callback() { return CallUnboundCallback; }
 
-        void* MakeUserdata(void* userdata) {
-            auto mockAndUserdata =
-                std::unique_ptr<MockAndUserdata>(new MockAndUserdata{this, userdata});
+    void* MakeUserdata(void* userdata) {
+        auto mockAndUserdata =
+            std::unique_ptr<MockAndUserdata>(new MockAndUserdata{this, userdata});
 
-            // Add the userdata to a set of userdata for this mock. We never
-            // remove from this set even if a callback should only be called once so that
-            // repeated calls to the callback still forward the userdata correctly.
-            // Userdata will be destroyed when the mock is destroyed.
-            auto [result, inserted] = mUserdatas.insert(std::move(mockAndUserdata));
-            ASSERT(inserted);
-            return result->get();
-        }
+        // Add the userdata to a set of userdata for this mock. We never
+        // remove from this set even if a callback should only be called once so that
+        // repeated calls to the callback still forward the userdata correctly.
+        // Userdata will be destroyed when the mock is destroyed.
+        auto [result, inserted] = mUserdatas.insert(std::move(mockAndUserdata));
+        ASSERT(inserted);
+        return result->get();
+    }
 
-      private:
-        struct MockAndUserdata {
-            MockCallback* mock;
-            void* userdata;
-        };
-
-        static R CallUnboundCallback(Args... args) {
-            std::tuple<Args...> tuple = std::make_tuple(args...);
-
-            constexpr size_t ArgC = sizeof...(Args);
-            static_assert(ArgC >= 1, "Mock callback requires at least one argument (the userdata)");
-
-            // Get the userdata. It should be the last argument.
-            auto userdata = std::get<ArgC - 1>(tuple);
-            static_assert(std::is_same<decltype(userdata), void*>::value,
-                          "Last callback argument must be void* userdata");
-
-            // Extract the mock.
-            ASSERT(userdata != nullptr);
-            auto* mockAndUserdata = reinterpret_cast<MockAndUserdata*>(userdata);
-            MockCallback* mock = mockAndUserdata->mock;
-            ASSERT(mock != nullptr);
-
-            // Replace the userdata
-            std::get<ArgC - 1>(tuple) = mockAndUserdata->userdata;
-
-            // Forward the callback to the mock.
-            return mock->CallImpl(std::make_index_sequence<ArgC>{}, std::move(tuple));
-        }
-
-        // This helper cannot be inlined because we dependent on the templated index sequence
-        // to unpack the tuple arguments.
-        template <size_t... Is>
-        R CallImpl(const std::index_sequence<Is...>&, std::tuple<Args...> args) {
-            return this->Call(std::get<Is>(args)...);
-        }
-
-        std::set<std::unique_ptr<MockAndUserdata>> mUserdatas;
+  private:
+    struct MockAndUserdata {
+        MockCallback* mock;
+        void* userdata;
     };
 
+    static R CallUnboundCallback(Args... args) {
+        std::tuple<Args...> tuple = std::make_tuple(args...);
+
+        constexpr size_t ArgC = sizeof...(Args);
+        static_assert(ArgC >= 1, "Mock callback requires at least one argument (the userdata)");
+
+        // Get the userdata. It should be the last argument.
+        auto userdata = std::get<ArgC - 1>(tuple);
+        static_assert(std::is_same<decltype(userdata), void*>::value,
+                      "Last callback argument must be void* userdata");
+
+        // Extract the mock.
+        ASSERT(userdata != nullptr);
+        auto* mockAndUserdata = reinterpret_cast<MockAndUserdata*>(userdata);
+        MockCallback* mock = mockAndUserdata->mock;
+        ASSERT(mock != nullptr);
+
+        // Replace the userdata
+        std::get<ArgC - 1>(tuple) = mockAndUserdata->userdata;
+
+        // Forward the callback to the mock.
+        return mock->CallImpl(std::make_index_sequence<ArgC>{}, std::move(tuple));
+    }
+
+    // This helper cannot be inlined because we dependent on the templated index sequence
+    // to unpack the tuple arguments.
+    template <size_t... Is>
+    R CallImpl(const std::index_sequence<Is...>&, std::tuple<Args...> args) {
+        return this->Call(std::get<Is>(args)...);
+    }
+
+    std::set<std::unique_ptr<MockAndUserdata>> mUserdatas;
+};
+
 }  // namespace testing
 
 #endif  // SRC_DAWN_TESTS_MOCKCALLBACK_H_
diff --git a/src/dawn/tests/ParamGenerator.h b/src/dawn/tests/ParamGenerator.h
index 8a3edba..fd06c71 100644
--- a/src/dawn/tests/ParamGenerator.h
+++ b/src/dawn/tests/ParamGenerator.h
@@ -76,20 +76,15 @@
             return mEnd == other.mEnd && mIndex == other.mIndex;
         }
 
-        bool operator!=(const Iterator& other) const {
-            return !(*this == other);
-        }
+        bool operator!=(const Iterator& other) const { return !(*this == other); }
 
-        ParamStruct operator*() const {
-            return GetParam(mParams, mIndex, s_indexSequence);
-        }
+        ParamStruct operator*() const { return GetParam(mParams, mIndex, s_indexSequence); }
 
       private:
         friend class ParamGenerator;
 
         Iterator(ParamTuple params, Index index)
-            : mParams(params), mIndex(index), mLastIndex{GetLastIndex(params, s_indexSequence)} {
-        }
+            : mParams(params), mIndex(index), mLastIndex{GetLastIndex(params, s_indexSequence)} {}
 
         ParamTuple mParams;
         Index mIndex;
@@ -119,9 +114,9 @@
 struct AdapterTestParam;
 
 namespace detail {
-    std::vector<AdapterTestParam> GetAvailableAdapterTestParamsForBackends(
-        const BackendTestConfig* params,
-        size_t numParams);
+std::vector<AdapterTestParam> GetAvailableAdapterTestParamsForBackends(
+    const BackendTestConfig* params,
+    size_t numParams);
 }
 
 template <typename Param, typename... Params>
diff --git a/src/dawn/tests/end2end/AdapterDiscoveryTests.cpp b/src/dawn/tests/end2end/AdapterDiscoveryTests.cpp
index f283803..6e85b6a 100644
--- a/src/dawn/tests/end2end/AdapterDiscoveryTests.cpp
+++ b/src/dawn/tests/end2end/AdapterDiscoveryTests.cpp
@@ -25,76 +25,105 @@
 #include "dawn/webgpu_cpp.h"
 
 #if defined(DAWN_ENABLE_BACKEND_VULKAN)
-#    include "dawn/native/VulkanBackend.h"
+#include "dawn/native/VulkanBackend.h"
 #endif  // defined(DAWN_ENABLE_BACKEND_VULKAN)
 
 #if defined(DAWN_ENABLE_BACKEND_D3D12)
-#    include "dawn/native/D3D12Backend.h"
+#include "dawn/native/D3D12Backend.h"
 #endif  // defined(DAWN_ENABLE_BACKEND_D3D12)
 
 #if defined(DAWN_ENABLE_BACKEND_METAL)
-#    include "dawn/native/MetalBackend.h"
+#include "dawn/native/MetalBackend.h"
 #endif  // defined(DAWN_ENABLE_BACKEND_METAL)
 
 #if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL) || defined(DAWN_ENABLE_BACKEND_OPENGLES)
-#    include "GLFW/glfw3.h"
-#    include "dawn/native/OpenGLBackend.h"
+#include "GLFW/glfw3.h"
+#include "dawn/native/OpenGLBackend.h"
 #endif  // defined(DAWN_ENABLE_BACKEND_DESKTOP_GL) || defined(DAWN_ENABLE_BACKEND_OPENGLES)
 
 #include <gtest/gtest.h>
 
 namespace {
 
-    using testing::_;
-    using testing::MockCallback;
-    using testing::SaveArg;
+using testing::_;
+using testing::MockCallback;
+using testing::SaveArg;
 
-    class AdapterDiscoveryTests : public ::testing::Test {};
+class AdapterDiscoveryTests : public ::testing::Test {};
 
 #if defined(DAWN_ENABLE_BACKEND_VULKAN)
-    // Test only discovering the SwiftShader adapter
-    TEST(AdapterDiscoveryTests, OnlySwiftShader) {
-        dawn::native::Instance instance;
+// Test only discovering the SwiftShader adapter
+TEST(AdapterDiscoveryTests, OnlySwiftShader) {
+    dawn::native::Instance instance;
 
-        dawn::native::vulkan::AdapterDiscoveryOptions options;
-        options.forceSwiftShader = true;
-        instance.DiscoverAdapters(&options);
+    dawn::native::vulkan::AdapterDiscoveryOptions options;
+    options.forceSwiftShader = true;
+    instance.DiscoverAdapters(&options);
 
-        const auto& adapters = instance.GetAdapters();
-        EXPECT_LE(adapters.size(), 1u);  // 0 or 1 SwiftShader adapters.
-        for (const auto& adapter : adapters) {
-            wgpu::AdapterProperties properties;
-            adapter.GetProperties(&properties);
+    const auto& adapters = instance.GetAdapters();
+    EXPECT_LE(adapters.size(), 1u);  // 0 or 1 SwiftShader adapters.
+    for (const auto& adapter : adapters) {
+        wgpu::AdapterProperties properties;
+        adapter.GetProperties(&properties);
 
-            EXPECT_EQ(properties.backendType, wgpu::BackendType::Vulkan);
-            EXPECT_EQ(properties.adapterType, wgpu::AdapterType::CPU);
-            EXPECT_TRUE(gpu_info::IsSwiftshader(properties.vendorID, properties.deviceID));
-        }
+        EXPECT_EQ(properties.backendType, wgpu::BackendType::Vulkan);
+        EXPECT_EQ(properties.adapterType, wgpu::AdapterType::CPU);
+        EXPECT_TRUE(gpu_info::IsSwiftshader(properties.vendorID, properties.deviceID));
     }
+}
 
-    // Test discovering only Vulkan adapters
-    TEST(AdapterDiscoveryTests, OnlyVulkan) {
-        dawn::native::Instance instance;
+// Test discovering only Vulkan adapters
+TEST(AdapterDiscoveryTests, OnlyVulkan) {
+    dawn::native::Instance instance;
 
-        dawn::native::vulkan::AdapterDiscoveryOptions options;
-        instance.DiscoverAdapters(&options);
+    dawn::native::vulkan::AdapterDiscoveryOptions options;
+    instance.DiscoverAdapters(&options);
 
-        const auto& adapters = instance.GetAdapters();
-        for (const auto& adapter : adapters) {
-            wgpu::AdapterProperties properties;
-            adapter.GetProperties(&properties);
+    const auto& adapters = instance.GetAdapters();
+    for (const auto& adapter : adapters) {
+        wgpu::AdapterProperties properties;
+        adapter.GetProperties(&properties);
 
-            EXPECT_EQ(properties.backendType, wgpu::BackendType::Vulkan);
-        }
+        EXPECT_EQ(properties.backendType, wgpu::BackendType::Vulkan);
     }
+}
 #endif  // defined(DAWN_ENABLE_BACKEND_VULKAN)
 
 #if defined(DAWN_ENABLE_BACKEND_D3D12)
-    // Test discovering only D3D12 adapters
-    TEST(AdapterDiscoveryTests, OnlyD3D12) {
+// Test discovering only D3D12 adapters
+TEST(AdapterDiscoveryTests, OnlyD3D12) {
+    dawn::native::Instance instance;
+
+    dawn::native::d3d12::AdapterDiscoveryOptions options;
+    instance.DiscoverAdapters(&options);
+
+    const auto& adapters = instance.GetAdapters();
+    for (const auto& adapter : adapters) {
+        wgpu::AdapterProperties properties;
+        adapter.GetProperties(&properties);
+
+        EXPECT_EQ(properties.backendType, wgpu::BackendType::D3D12);
+    }
+}
+
+// Test discovering a D3D12 adapter from a prexisting DXGI adapter
+TEST(AdapterDiscoveryTests, MatchingDXGIAdapter) {
+    using Microsoft::WRL::ComPtr;
+
+    ComPtr<IDXGIFactory4> dxgiFactory;
+    HRESULT hr = ::CreateDXGIFactory2(0, IID_PPV_ARGS(&dxgiFactory));
+    ASSERT_EQ(hr, S_OK);
+
+    for (uint32_t adapterIndex = 0;; ++adapterIndex) {
+        ComPtr<IDXGIAdapter1> dxgiAdapter = nullptr;
+        if (dxgiFactory->EnumAdapters1(adapterIndex, &dxgiAdapter) == DXGI_ERROR_NOT_FOUND) {
+            break;  // No more adapters to enumerate.
+        }
+
         dawn::native::Instance instance;
 
         dawn::native::d3d12::AdapterDiscoveryOptions options;
+        options.dxgiAdapter = std::move(dxgiAdapter);
         instance.DiscoverAdapters(&options);
 
         const auto& adapters = instance.GetAdapters();
@@ -105,312 +134,280 @@
             EXPECT_EQ(properties.backendType, wgpu::BackendType::D3D12);
         }
     }
-
-    // Test discovering a D3D12 adapter from a prexisting DXGI adapter
-    TEST(AdapterDiscoveryTests, MatchingDXGIAdapter) {
-        using Microsoft::WRL::ComPtr;
-
-        ComPtr<IDXGIFactory4> dxgiFactory;
-        HRESULT hr = ::CreateDXGIFactory2(0, IID_PPV_ARGS(&dxgiFactory));
-        ASSERT_EQ(hr, S_OK);
-
-        for (uint32_t adapterIndex = 0;; ++adapterIndex) {
-            ComPtr<IDXGIAdapter1> dxgiAdapter = nullptr;
-            if (dxgiFactory->EnumAdapters1(adapterIndex, &dxgiAdapter) == DXGI_ERROR_NOT_FOUND) {
-                break;  // No more adapters to enumerate.
-            }
-
-            dawn::native::Instance instance;
-
-            dawn::native::d3d12::AdapterDiscoveryOptions options;
-            options.dxgiAdapter = std::move(dxgiAdapter);
-            instance.DiscoverAdapters(&options);
-
-            const auto& adapters = instance.GetAdapters();
-            for (const auto& adapter : adapters) {
-                wgpu::AdapterProperties properties;
-                adapter.GetProperties(&properties);
-
-                EXPECT_EQ(properties.backendType, wgpu::BackendType::D3D12);
-            }
-        }
-    }
+}
 #endif  // defined(DAWN_ENABLE_BACKEND_D3D12)
 
 #if defined(DAWN_ENABLE_BACKEND_METAL)
-    // Test discovering only Metal adapters
-    TEST(AdapterDiscoveryTests, OnlyMetal) {
-        dawn::native::Instance instance;
+// Test discovering only Metal adapters
+TEST(AdapterDiscoveryTests, OnlyMetal) {
+    dawn::native::Instance instance;
 
+    dawn::native::metal::AdapterDiscoveryOptions options;
+    instance.DiscoverAdapters(&options);
+
+    const auto& adapters = instance.GetAdapters();
+    for (const auto& adapter : adapters) {
+        wgpu::AdapterProperties properties;
+        adapter.GetProperties(&properties);
+
+        EXPECT_EQ(properties.backendType, wgpu::BackendType::Metal);
+    }
+}
+#endif  // defined(DAWN_ENABLE_BACKEND_METAL)
+
+#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
+// Test discovering only desktop OpenGL adapters
+TEST(AdapterDiscoveryTests, OnlyDesktopGL) {
+    if (!glfwInit()) {
+        GTEST_SKIP() << "glfwInit() failed";
+    }
+    glfwDefaultWindowHints();
+    glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
+    glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 4);
+    glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GLFW_TRUE);
+    glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
+    glfwWindowHint(GLFW_VISIBLE, GLFW_FALSE);
+
+    GLFWwindow* window = glfwCreateWindow(400, 400, "Dawn OpenGL test window", nullptr, nullptr);
+    glfwMakeContextCurrent(window);
+
+    dawn::native::Instance instance;
+
+    dawn::native::opengl::AdapterDiscoveryOptions options;
+    options.getProc = reinterpret_cast<void* (*)(const char*)>(glfwGetProcAddress);
+    instance.DiscoverAdapters(&options);
+    glfwWindowHint(GLFW_VISIBLE, GLFW_TRUE);
+
+    const auto& adapters = instance.GetAdapters();
+    for (const auto& adapter : adapters) {
+        wgpu::AdapterProperties properties;
+        adapter.GetProperties(&properties);
+
+        EXPECT_EQ(properties.backendType, wgpu::BackendType::OpenGL);
+    }
+
+    glfwDestroyWindow(window);
+}
+#endif  // defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
+
+#if defined(DAWN_ENABLE_BACKEND_OPENGLES)
+// Test discovering only OpenGLES adapters
+TEST(AdapterDiscoveryTests, OnlyOpenGLES) {
+    ScopedEnvironmentVar angleDefaultPlatform;
+    if (GetEnvironmentVar("ANGLE_DEFAULT_PLATFORM").first.empty()) {
+        angleDefaultPlatform.Set("ANGLE_DEFAULT_PLATFORM", "swiftshader");
+    }
+
+    if (!glfwInit()) {
+        GTEST_SKIP() << "glfwInit() failed";
+    }
+    glfwDefaultWindowHints();
+    glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
+    glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 1);
+    glfwWindowHint(GLFW_CLIENT_API, GLFW_OPENGL_ES_API);
+    glfwWindowHint(GLFW_CONTEXT_CREATION_API, GLFW_EGL_CONTEXT_API);
+    glfwWindowHint(GLFW_VISIBLE, GLFW_FALSE);
+
+    GLFWwindow* window = glfwCreateWindow(400, 400, "Dawn OpenGLES test window", nullptr, nullptr);
+    glfwMakeContextCurrent(window);
+
+    dawn::native::Instance instance;
+
+    dawn::native::opengl::AdapterDiscoveryOptionsES options;
+    options.getProc = reinterpret_cast<void* (*)(const char*)>(glfwGetProcAddress);
+    instance.DiscoverAdapters(&options);
+    glfwWindowHint(GLFW_VISIBLE, GLFW_TRUE);
+
+    const auto& adapters = instance.GetAdapters();
+    for (const auto& adapter : adapters) {
+        wgpu::AdapterProperties properties;
+        adapter.GetProperties(&properties);
+
+        EXPECT_EQ(properties.backendType, wgpu::BackendType::OpenGLES);
+    }
+
+    glfwDestroyWindow(window);
+}
+#endif  // defined(DAWN_ENABLE_BACKEND_OPENGLES)
+
+#if defined(DAWN_ENABLE_BACKEND_METAL) && defined(DAWN_ENABLE_BACKEND_VULKAN)
+// Test discovering the Metal backend, then the Vulkan backend
+// does not duplicate adapters.
+TEST(AdapterDiscoveryTests, OneBackendThenTheOther) {
+    dawn::native::Instance instance;
+    uint32_t metalAdapterCount = 0;
+    {
         dawn::native::metal::AdapterDiscoveryOptions options;
         instance.DiscoverAdapters(&options);
 
         const auto& adapters = instance.GetAdapters();
+        metalAdapterCount = adapters.size();
         for (const auto& adapter : adapters) {
             wgpu::AdapterProperties properties;
             adapter.GetProperties(&properties);
 
-            EXPECT_EQ(properties.backendType, wgpu::BackendType::Metal);
+            ASSERT_EQ(properties.backendType, wgpu::BackendType::Metal);
         }
     }
-#endif  // defined(DAWN_ENABLE_BACKEND_METAL)
-
-#if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
-    // Test discovering only desktop OpenGL adapters
-    TEST(AdapterDiscoveryTests, OnlyDesktopGL) {
-        if (!glfwInit()) {
-            GTEST_SKIP() << "glfwInit() failed";
-        }
-        glfwDefaultWindowHints();
-        glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
-        glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 4);
-        glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GLFW_TRUE);
-        glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
-        glfwWindowHint(GLFW_VISIBLE, GLFW_FALSE);
-
-        GLFWwindow* window =
-            glfwCreateWindow(400, 400, "Dawn OpenGL test window", nullptr, nullptr);
-        glfwMakeContextCurrent(window);
-
-        dawn::native::Instance instance;
-
-        dawn::native::opengl::AdapterDiscoveryOptions options;
-        options.getProc = reinterpret_cast<void* (*)(const char*)>(glfwGetProcAddress);
+    {
+        dawn::native::vulkan::AdapterDiscoveryOptions options;
         instance.DiscoverAdapters(&options);
-        glfwWindowHint(GLFW_VISIBLE, GLFW_TRUE);
 
+        uint32_t metalAdapterCount2 = 0;
         const auto& adapters = instance.GetAdapters();
         for (const auto& adapter : adapters) {
             wgpu::AdapterProperties properties;
             adapter.GetProperties(&properties);
 
-            EXPECT_EQ(properties.backendType, wgpu::BackendType::OpenGL);
-        }
-
-        glfwDestroyWindow(window);
-    }
-#endif  // defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
-
-#if defined(DAWN_ENABLE_BACKEND_OPENGLES)
-    // Test discovering only OpenGLES adapters
-    TEST(AdapterDiscoveryTests, OnlyOpenGLES) {
-        ScopedEnvironmentVar angleDefaultPlatform;
-        if (GetEnvironmentVar("ANGLE_DEFAULT_PLATFORM").first.empty()) {
-            angleDefaultPlatform.Set("ANGLE_DEFAULT_PLATFORM", "swiftshader");
-        }
-
-        if (!glfwInit()) {
-            GTEST_SKIP() << "glfwInit() failed";
-        }
-        glfwDefaultWindowHints();
-        glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
-        glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 1);
-        glfwWindowHint(GLFW_CLIENT_API, GLFW_OPENGL_ES_API);
-        glfwWindowHint(GLFW_CONTEXT_CREATION_API, GLFW_EGL_CONTEXT_API);
-        glfwWindowHint(GLFW_VISIBLE, GLFW_FALSE);
-
-        GLFWwindow* window =
-            glfwCreateWindow(400, 400, "Dawn OpenGLES test window", nullptr, nullptr);
-        glfwMakeContextCurrent(window);
-
-        dawn::native::Instance instance;
-
-        dawn::native::opengl::AdapterDiscoveryOptionsES options;
-        options.getProc = reinterpret_cast<void* (*)(const char*)>(glfwGetProcAddress);
-        instance.DiscoverAdapters(&options);
-        glfwWindowHint(GLFW_VISIBLE, GLFW_TRUE);
-
-        const auto& adapters = instance.GetAdapters();
-        for (const auto& adapter : adapters) {
-            wgpu::AdapterProperties properties;
-            adapter.GetProperties(&properties);
-
-            EXPECT_EQ(properties.backendType, wgpu::BackendType::OpenGLES);
-        }
-
-        glfwDestroyWindow(window);
-    }
-#endif  // defined(DAWN_ENABLE_BACKEND_OPENGLES)
-
-#if defined(DAWN_ENABLE_BACKEND_METAL) && defined(DAWN_ENABLE_BACKEND_VULKAN)
-    // Test discovering the Metal backend, then the Vulkan backend
-    // does not duplicate adapters.
-    TEST(AdapterDiscoveryTests, OneBackendThenTheOther) {
-        dawn::native::Instance instance;
-        uint32_t metalAdapterCount = 0;
-        {
-            dawn::native::metal::AdapterDiscoveryOptions options;
-            instance.DiscoverAdapters(&options);
-
-            const auto& adapters = instance.GetAdapters();
-            metalAdapterCount = adapters.size();
-            for (const auto& adapter : adapters) {
-                wgpu::AdapterProperties properties;
-                adapter.GetProperties(&properties);
-
-                ASSERT_EQ(properties.backendType, wgpu::BackendType::Metal);
+            EXPECT_TRUE(properties.backendType == wgpu::BackendType::Metal ||
+                        properties.backendType == wgpu::BackendType::Vulkan);
+            if (properties.backendType == wgpu::BackendType::Metal) {
+                metalAdapterCount2++;
             }
         }
-        {
-            dawn::native::vulkan::AdapterDiscoveryOptions options;
-            instance.DiscoverAdapters(&options);
-
-            uint32_t metalAdapterCount2 = 0;
-            const auto& adapters = instance.GetAdapters();
-            for (const auto& adapter : adapters) {
-                wgpu::AdapterProperties properties;
-                adapter.GetProperties(&properties);
-
-                EXPECT_TRUE(properties.backendType == wgpu::BackendType::Metal ||
-                            properties.backendType == wgpu::BackendType::Vulkan);
-                if (properties.backendType == wgpu::BackendType::Metal) {
-                    metalAdapterCount2++;
-                }
-            }
-            EXPECT_EQ(metalAdapterCount, metalAdapterCount2);
-        }
+        EXPECT_EQ(metalAdapterCount, metalAdapterCount2);
     }
+}
 #endif  // defined(DAWN_ENABLE_BACKEND_VULKAN) && defined(DAWN_ENABLE_BACKEND_METAL)
 
-    class AdapterCreationTest : public ::testing::Test {
-      protected:
-        void SetUp() override {
-            dawnProcSetProcs(&dawn_native::GetProcs());
+class AdapterCreationTest : public ::testing::Test {
+  protected:
+    void SetUp() override {
+        dawnProcSetProcs(&dawn_native::GetProcs());
 
-            {
-                auto nativeInstance = std::make_unique<dawn_native::Instance>();
-                nativeInstance->DiscoverDefaultAdapters();
-                for (dawn_native::Adapter& nativeAdapter : nativeInstance->GetAdapters()) {
-                    anyAdapterAvailable = true;
+        {
+            auto nativeInstance = std::make_unique<dawn_native::Instance>();
+            nativeInstance->DiscoverDefaultAdapters();
+            for (dawn_native::Adapter& nativeAdapter : nativeInstance->GetAdapters()) {
+                anyAdapterAvailable = true;
 
-                    wgpu::AdapterProperties properties;
-                    nativeAdapter.GetProperties(&properties);
-                    swiftShaderAvailable =
-                        swiftShaderAvailable ||
-                        gpu_info::IsSwiftshader(properties.vendorID, properties.deviceID);
-                    discreteGPUAvailable = discreteGPUAvailable ||
-                                           properties.adapterType == wgpu::AdapterType::DiscreteGPU;
-                    integratedGPUAvailable =
-                        integratedGPUAvailable ||
-                        properties.adapterType == wgpu::AdapterType::IntegratedGPU;
-                }
+                wgpu::AdapterProperties properties;
+                nativeAdapter.GetProperties(&properties);
+                swiftShaderAvailable =
+                    swiftShaderAvailable ||
+                    gpu_info::IsSwiftshader(properties.vendorID, properties.deviceID);
+                discreteGPUAvailable = discreteGPUAvailable ||
+                                       properties.adapterType == wgpu::AdapterType::DiscreteGPU;
+                integratedGPUAvailable = integratedGPUAvailable ||
+                                         properties.adapterType == wgpu::AdapterType::IntegratedGPU;
             }
-
-            instance = wgpu::CreateInstance();
         }
 
-        void TearDown() override {
-            instance = nullptr;
-            dawnProcSetProcs(nullptr);
-        }
-
-        wgpu::Instance instance;
-        bool anyAdapterAvailable = false;
-        bool swiftShaderAvailable = false;
-        bool discreteGPUAvailable = false;
-        bool integratedGPUAvailable = false;
-    };
-
-    // Test that requesting the default adapter works
-    TEST_F(AdapterCreationTest, DefaultAdapter) {
-        wgpu::RequestAdapterOptions options = {};
-
-        MockCallback<WGPURequestAdapterCallback> cb;
-
-        WGPUAdapter cAdapter = nullptr;
-        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this))
-            .WillOnce(SaveArg<1>(&cAdapter));
-        instance.RequestAdapter(&options, cb.Callback(), cb.MakeUserdata(this));
-
-        wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
-        EXPECT_EQ(adapter != nullptr, anyAdapterAvailable);
+        instance = wgpu::CreateInstance();
     }
 
-    // Test that passing nullptr for the options gets the default adapter
-    TEST_F(AdapterCreationTest, NullGivesDefaultAdapter) {
-        wgpu::RequestAdapterOptions options = {};
-
-        MockCallback<WGPURequestAdapterCallback> cb;
-
-        WGPUAdapter cAdapter = nullptr;
-        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this))
-            .WillOnce(SaveArg<1>(&cAdapter));
-        instance.RequestAdapter(&options, cb.Callback(), cb.MakeUserdata(this));
-
-        wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
-        EXPECT_EQ(adapter != nullptr, anyAdapterAvailable);
-
-        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this + 1))
-            .WillOnce(SaveArg<1>(&cAdapter));
-        instance.RequestAdapter(nullptr, cb.Callback(), cb.MakeUserdata(this + 1));
-
-        wgpu::Adapter adapter2 = wgpu::Adapter::Acquire(cAdapter);
-        EXPECT_EQ(adapter.Get(), adapter2.Get());
+    void TearDown() override {
+        instance = nullptr;
+        dawnProcSetProcs(nullptr);
     }
 
-    // Test that requesting the fallback adapter returns SwiftShader.
-    TEST_F(AdapterCreationTest, FallbackAdapter) {
-        wgpu::RequestAdapterOptions options = {};
-        options.forceFallbackAdapter = true;
+    wgpu::Instance instance;
+    bool anyAdapterAvailable = false;
+    bool swiftShaderAvailable = false;
+    bool discreteGPUAvailable = false;
+    bool integratedGPUAvailable = false;
+};
 
-        MockCallback<WGPURequestAdapterCallback> cb;
+// Test that requesting the default adapter works
+TEST_F(AdapterCreationTest, DefaultAdapter) {
+    wgpu::RequestAdapterOptions options = {};
 
-        WGPUAdapter cAdapter = nullptr;
-        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this))
-            .WillOnce(SaveArg<1>(&cAdapter));
-        instance.RequestAdapter(&options, cb.Callback(), cb.MakeUserdata(this));
+    MockCallback<WGPURequestAdapterCallback> cb;
 
-        wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
-        EXPECT_EQ(adapter != nullptr, swiftShaderAvailable);
-        if (adapter != nullptr) {
-            wgpu::AdapterProperties properties;
-            adapter.GetProperties(&properties);
+    WGPUAdapter cAdapter = nullptr;
+    EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this))
+        .WillOnce(SaveArg<1>(&cAdapter));
+    instance.RequestAdapter(&options, cb.Callback(), cb.MakeUserdata(this));
 
-            EXPECT_EQ(properties.adapterType, wgpu::AdapterType::CPU);
-            EXPECT_TRUE(gpu_info::IsSwiftshader(properties.vendorID, properties.deviceID));
-        }
+    wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
+    EXPECT_EQ(adapter != nullptr, anyAdapterAvailable);
+}
+
+// Test that passing nullptr for the options gets the default adapter
+TEST_F(AdapterCreationTest, NullGivesDefaultAdapter) {
+    wgpu::RequestAdapterOptions options = {};
+
+    MockCallback<WGPURequestAdapterCallback> cb;
+
+    WGPUAdapter cAdapter = nullptr;
+    EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this))
+        .WillOnce(SaveArg<1>(&cAdapter));
+    instance.RequestAdapter(&options, cb.Callback(), cb.MakeUserdata(this));
+
+    wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
+    EXPECT_EQ(adapter != nullptr, anyAdapterAvailable);
+
+    EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this + 1))
+        .WillOnce(SaveArg<1>(&cAdapter));
+    instance.RequestAdapter(nullptr, cb.Callback(), cb.MakeUserdata(this + 1));
+
+    wgpu::Adapter adapter2 = wgpu::Adapter::Acquire(cAdapter);
+    EXPECT_EQ(adapter.Get(), adapter2.Get());
+}
+
+// Test that requesting the fallback adapter returns SwiftShader.
+TEST_F(AdapterCreationTest, FallbackAdapter) {
+    wgpu::RequestAdapterOptions options = {};
+    options.forceFallbackAdapter = true;
+
+    MockCallback<WGPURequestAdapterCallback> cb;
+
+    WGPUAdapter cAdapter = nullptr;
+    EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this))
+        .WillOnce(SaveArg<1>(&cAdapter));
+    instance.RequestAdapter(&options, cb.Callback(), cb.MakeUserdata(this));
+
+    wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
+    EXPECT_EQ(adapter != nullptr, swiftShaderAvailable);
+    if (adapter != nullptr) {
+        wgpu::AdapterProperties properties;
+        adapter.GetProperties(&properties);
+
+        EXPECT_EQ(properties.adapterType, wgpu::AdapterType::CPU);
+        EXPECT_TRUE(gpu_info::IsSwiftshader(properties.vendorID, properties.deviceID));
     }
+}
 
-    // Test that requesting a high performance GPU works
-    TEST_F(AdapterCreationTest, PreferHighPerformance) {
-        wgpu::RequestAdapterOptions options = {};
-        options.powerPreference = wgpu::PowerPreference::HighPerformance;
+// Test that requesting a high performance GPU works
+TEST_F(AdapterCreationTest, PreferHighPerformance) {
+    wgpu::RequestAdapterOptions options = {};
+    options.powerPreference = wgpu::PowerPreference::HighPerformance;
 
-        MockCallback<WGPURequestAdapterCallback> cb;
+    MockCallback<WGPURequestAdapterCallback> cb;
 
-        WGPUAdapter cAdapter = nullptr;
-        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this))
-            .WillOnce(SaveArg<1>(&cAdapter));
-        instance.RequestAdapter(&options, cb.Callback(), cb.MakeUserdata(this));
+    WGPUAdapter cAdapter = nullptr;
+    EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this))
+        .WillOnce(SaveArg<1>(&cAdapter));
+    instance.RequestAdapter(&options, cb.Callback(), cb.MakeUserdata(this));
 
-        wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
-        EXPECT_EQ(adapter != nullptr, anyAdapterAvailable);
-        if (discreteGPUAvailable) {
-            wgpu::AdapterProperties properties;
-            adapter.GetProperties(&properties);
-            EXPECT_EQ(properties.adapterType, wgpu::AdapterType::DiscreteGPU);
-        }
+    wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
+    EXPECT_EQ(adapter != nullptr, anyAdapterAvailable);
+    if (discreteGPUAvailable) {
+        wgpu::AdapterProperties properties;
+        adapter.GetProperties(&properties);
+        EXPECT_EQ(properties.adapterType, wgpu::AdapterType::DiscreteGPU);
     }
+}
 
-    // Test that requesting a low power GPU works
-    TEST_F(AdapterCreationTest, PreferLowPower) {
-        wgpu::RequestAdapterOptions options = {};
-        options.powerPreference = wgpu::PowerPreference::LowPower;
+// Test that requesting a low power GPU works
+TEST_F(AdapterCreationTest, PreferLowPower) {
+    wgpu::RequestAdapterOptions options = {};
+    options.powerPreference = wgpu::PowerPreference::LowPower;
 
-        MockCallback<WGPURequestAdapterCallback> cb;
+    MockCallback<WGPURequestAdapterCallback> cb;
 
-        WGPUAdapter cAdapter = nullptr;
-        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this))
-            .WillOnce(SaveArg<1>(&cAdapter));
-        instance.RequestAdapter(&options, cb.Callback(), cb.MakeUserdata(this));
+    WGPUAdapter cAdapter = nullptr;
+    EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, _, nullptr, this))
+        .WillOnce(SaveArg<1>(&cAdapter));
+    instance.RequestAdapter(&options, cb.Callback(), cb.MakeUserdata(this));
 
-        wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
-        EXPECT_EQ(adapter != nullptr, anyAdapterAvailable);
-        if (integratedGPUAvailable) {
-            wgpu::AdapterProperties properties;
-            adapter.GetProperties(&properties);
-            EXPECT_EQ(properties.adapterType, wgpu::AdapterType::IntegratedGPU);
-        }
+    wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
+    EXPECT_EQ(adapter != nullptr, anyAdapterAvailable);
+    if (integratedGPUAvailable) {
+        wgpu::AdapterProperties properties;
+        adapter.GetProperties(&properties);
+        EXPECT_EQ(properties.adapterType, wgpu::AdapterType::IntegratedGPU);
     }
+}
 
 }  // anonymous namespace
diff --git a/src/dawn/tests/end2end/BufferTests.cpp b/src/dawn/tests/end2end/BufferTests.cpp
index 9853495..b8d7a26 100644
--- a/src/dawn/tests/end2end/BufferTests.cpp
+++ b/src/dawn/tests/end2end/BufferTests.cpp
@@ -556,9 +556,7 @@
         return buffer.GetConstMappedRange(0, size);
     }
 
-    void UnmapBuffer(const wgpu::Buffer& buffer) {
-        buffer.Unmap();
-    }
+    void UnmapBuffer(const wgpu::Buffer& buffer) { buffer.Unmap(); }
 
     wgpu::Buffer BufferMappedAtCreation(wgpu::BufferUsage usage, uint64_t size) {
         wgpu::BufferDescriptor descriptor;
diff --git a/src/dawn/tests/end2end/BufferZeroInitTests.cpp b/src/dawn/tests/end2end/BufferZeroInitTests.cpp
index 09dcc4a..71100b7 100644
--- a/src/dawn/tests/end2end/BufferZeroInitTests.cpp
+++ b/src/dawn/tests/end2end/BufferZeroInitTests.cpp
@@ -34,14 +34,14 @@
 
 namespace {
 
-    struct BufferZeroInitInCopyT2BSpec {
-        wgpu::Extent3D textureSize;
-        uint64_t bufferOffset;
-        uint64_t extraBytes;
-        uint32_t bytesPerRow;
-        uint32_t rowsPerImage;
-        uint32_t lazyClearCount;
-    };
+struct BufferZeroInitInCopyT2BSpec {
+    wgpu::Extent3D textureSize;
+    uint64_t bufferOffset;
+    uint64_t extraBytes;
+    uint32_t bytesPerRow;
+    uint32_t rowsPerImage;
+    uint32_t lazyClearCount;
+};
 
 }  // anonymous namespace
 
diff --git a/src/dawn/tests/end2end/ColorStateTests.cpp b/src/dawn/tests/end2end/ColorStateTests.cpp
index 142ff47..1753c9a 100644
--- a/src/dawn/tests/end2end/ColorStateTests.cpp
+++ b/src/dawn/tests/end2end/ColorStateTests.cpp
@@ -18,9 +18,9 @@
 #include <utility>
 #include <vector>
 
-#include "dawn/tests/DawnTest.h"
 #include "dawn/common/Assert.h"
 #include "dawn/common/Constants.h"
+#include "dawn/tests/DawnTest.h"
 #include "dawn/utils/ComboRenderPipelineDescriptor.h"
 #include "dawn/utils/WGPUHelpers.h"
 
@@ -220,83 +220,83 @@
 };
 
 namespace {
-    // Add two colors and clamp
-    constexpr RGBA8 operator+(const RGBA8& col1, const RGBA8& col2) {
-        int r = static_cast<int>(col1.r) + static_cast<int>(col2.r);
-        int g = static_cast<int>(col1.g) + static_cast<int>(col2.g);
-        int b = static_cast<int>(col1.b) + static_cast<int>(col2.b);
-        int a = static_cast<int>(col1.a) + static_cast<int>(col2.a);
-        r = (r > 255 ? 255 : (r < 0 ? 0 : r));
-        g = (g > 255 ? 255 : (g < 0 ? 0 : g));
-        b = (b > 255 ? 255 : (b < 0 ? 0 : b));
-        a = (a > 255 ? 255 : (a < 0 ? 0 : a));
+// Add two colors and clamp
+constexpr RGBA8 operator+(const RGBA8& col1, const RGBA8& col2) {
+    int r = static_cast<int>(col1.r) + static_cast<int>(col2.r);
+    int g = static_cast<int>(col1.g) + static_cast<int>(col2.g);
+    int b = static_cast<int>(col1.b) + static_cast<int>(col2.b);
+    int a = static_cast<int>(col1.a) + static_cast<int>(col2.a);
+    r = (r > 255 ? 255 : (r < 0 ? 0 : r));
+    g = (g > 255 ? 255 : (g < 0 ? 0 : g));
+    b = (b > 255 ? 255 : (b < 0 ? 0 : b));
+    a = (a > 255 ? 255 : (a < 0 ? 0 : a));
 
-        return RGBA8(static_cast<uint8_t>(r), static_cast<uint8_t>(g), static_cast<uint8_t>(b),
-                     static_cast<uint8_t>(a));
-    }
+    return RGBA8(static_cast<uint8_t>(r), static_cast<uint8_t>(g), static_cast<uint8_t>(b),
+                 static_cast<uint8_t>(a));
+}
 
-    // Subtract two colors and clamp
-    constexpr RGBA8 operator-(const RGBA8& col1, const RGBA8& col2) {
-        int r = static_cast<int>(col1.r) - static_cast<int>(col2.r);
-        int g = static_cast<int>(col1.g) - static_cast<int>(col2.g);
-        int b = static_cast<int>(col1.b) - static_cast<int>(col2.b);
-        int a = static_cast<int>(col1.a) - static_cast<int>(col2.a);
-        r = (r > 255 ? 255 : (r < 0 ? 0 : r));
-        g = (g > 255 ? 255 : (g < 0 ? 0 : g));
-        b = (b > 255 ? 255 : (b < 0 ? 0 : b));
-        a = (a > 255 ? 255 : (a < 0 ? 0 : a));
+// Subtract two colors and clamp
+constexpr RGBA8 operator-(const RGBA8& col1, const RGBA8& col2) {
+    int r = static_cast<int>(col1.r) - static_cast<int>(col2.r);
+    int g = static_cast<int>(col1.g) - static_cast<int>(col2.g);
+    int b = static_cast<int>(col1.b) - static_cast<int>(col2.b);
+    int a = static_cast<int>(col1.a) - static_cast<int>(col2.a);
+    r = (r > 255 ? 255 : (r < 0 ? 0 : r));
+    g = (g > 255 ? 255 : (g < 0 ? 0 : g));
+    b = (b > 255 ? 255 : (b < 0 ? 0 : b));
+    a = (a > 255 ? 255 : (a < 0 ? 0 : a));
 
-        return RGBA8(static_cast<uint8_t>(r), static_cast<uint8_t>(g), static_cast<uint8_t>(b),
-                     static_cast<uint8_t>(a));
-    }
+    return RGBA8(static_cast<uint8_t>(r), static_cast<uint8_t>(g), static_cast<uint8_t>(b),
+                 static_cast<uint8_t>(a));
+}
 
-    // Get the component-wise minimum of two colors
-    RGBA8 min(const RGBA8& col1, const RGBA8& col2) {
-        return RGBA8(std::min(col1.r, col2.r), std::min(col1.g, col2.g), std::min(col1.b, col2.b),
-                     std::min(col1.a, col2.a));
-    }
+// Get the component-wise minimum of two colors
+RGBA8 min(const RGBA8& col1, const RGBA8& col2) {
+    return RGBA8(std::min(col1.r, col2.r), std::min(col1.g, col2.g), std::min(col1.b, col2.b),
+                 std::min(col1.a, col2.a));
+}
 
-    // Get the component-wise maximum of two colors
-    RGBA8 max(const RGBA8& col1, const RGBA8& col2) {
-        return RGBA8(std::max(col1.r, col2.r), std::max(col1.g, col2.g), std::max(col1.b, col2.b),
-                     std::max(col1.a, col2.a));
-    }
+// Get the component-wise maximum of two colors
+RGBA8 max(const RGBA8& col1, const RGBA8& col2) {
+    return RGBA8(std::max(col1.r, col2.r), std::max(col1.g, col2.g), std::max(col1.b, col2.b),
+                 std::max(col1.a, col2.a));
+}
 
-    // Blend two RGBA8 color values parameterized by the provided factors in the range [0.f, 1.f]
-    RGBA8 mix(const RGBA8& col1, const RGBA8& col2, std::array<float, 4> fac) {
-        float r = static_cast<float>(col1.r) * (1.f - fac[0]) + static_cast<float>(col2.r) * fac[0];
-        float g = static_cast<float>(col1.g) * (1.f - fac[1]) + static_cast<float>(col2.g) * fac[1];
-        float b = static_cast<float>(col1.b) * (1.f - fac[2]) + static_cast<float>(col2.b) * fac[2];
-        float a = static_cast<float>(col1.a) * (1.f - fac[3]) + static_cast<float>(col2.a) * fac[3];
+// Blend two RGBA8 color values parameterized by the provided factors in the range [0.f, 1.f]
+RGBA8 mix(const RGBA8& col1, const RGBA8& col2, std::array<float, 4> fac) {
+    float r = static_cast<float>(col1.r) * (1.f - fac[0]) + static_cast<float>(col2.r) * fac[0];
+    float g = static_cast<float>(col1.g) * (1.f - fac[1]) + static_cast<float>(col2.g) * fac[1];
+    float b = static_cast<float>(col1.b) * (1.f - fac[2]) + static_cast<float>(col2.b) * fac[2];
+    float a = static_cast<float>(col1.a) * (1.f - fac[3]) + static_cast<float>(col2.a) * fac[3];
 
-        return RGBA8({static_cast<uint8_t>(std::round(r)), static_cast<uint8_t>(std::round(g)),
-                      static_cast<uint8_t>(std::round(b)), static_cast<uint8_t>(std::round(a))});
-    }
+    return RGBA8({static_cast<uint8_t>(std::round(r)), static_cast<uint8_t>(std::round(g)),
+                  static_cast<uint8_t>(std::round(b)), static_cast<uint8_t>(std::round(a))});
+}
 
-    // Blend two RGBA8 color values parameterized by the provided RGBA8 factor
-    RGBA8 mix(const RGBA8& col1, const RGBA8& col2, const RGBA8& fac) {
-        std::array<float, 4> f = {{
-            static_cast<float>(fac.r) / 255.f,
-            static_cast<float>(fac.g) / 255.f,
-            static_cast<float>(fac.b) / 255.f,
-            static_cast<float>(fac.a) / 255.f,
-        }};
-        return mix(col1, col2, f);
-    }
-
-    constexpr std::array<RGBA8, 8> kColors = {{
-        // check operations over multiple channels
-        RGBA8(64, 0, 0, 0),
-        RGBA8(0, 64, 0, 0),
-        RGBA8(64, 0, 32, 0),
-        RGBA8(0, 64, 32, 0),
-        RGBA8(128, 0, 128, 128),
-        RGBA8(0, 128, 128, 128),
-
-        // check cases that may cause overflow
-        RGBA8(0, 0, 0, 0),
-        RGBA8(255, 255, 255, 255),
+// Blend two RGBA8 color values parameterized by the provided RGBA8 factor
+RGBA8 mix(const RGBA8& col1, const RGBA8& col2, const RGBA8& fac) {
+    std::array<float, 4> f = {{
+        static_cast<float>(fac.r) / 255.f,
+        static_cast<float>(fac.g) / 255.f,
+        static_cast<float>(fac.b) / 255.f,
+        static_cast<float>(fac.a) / 255.f,
     }};
+    return mix(col1, col2, f);
+}
+
+constexpr std::array<RGBA8, 8> kColors = {{
+    // check operations over multiple channels
+    RGBA8(64, 0, 0, 0),
+    RGBA8(0, 64, 0, 0),
+    RGBA8(64, 0, 32, 0),
+    RGBA8(0, 64, 32, 0),
+    RGBA8(128, 0, 128, 128),
+    RGBA8(0, 128, 128, 128),
+
+    // check cases that may cause overflow
+    RGBA8(0, 0, 0, 0),
+    RGBA8(255, 255, 255, 255),
+}};
 }  // namespace
 
 // Test compilation and usage of the fixture
diff --git a/src/dawn/tests/end2end/CompressedTextureFormatTests.cpp b/src/dawn/tests/end2end/CompressedTextureFormatTests.cpp
index 348ca98..310a534 100644
--- a/src/dawn/tests/end2end/CompressedTextureFormatTests.cpp
+++ b/src/dawn/tests/end2end/CompressedTextureFormatTests.cpp
@@ -35,8 +35,8 @@
 };
 
 namespace {
-    using TextureFormat = wgpu::TextureFormat;
-    DAWN_TEST_PARAM_STRUCT(CompressedTextureFormatTestParams, TextureFormat);
+using TextureFormat = wgpu::TextureFormat;
+DAWN_TEST_PARAM_STRUCT(CompressedTextureFormatTestParams, TextureFormat);
 }  // namespace
 
 class CompressedTextureFormatTest : public DawnTestWithParams<CompressedTextureFormatTestParams> {
@@ -61,9 +61,7 @@
         return {};
     }
 
-    bool IsFormatSupported() const {
-        return mIsFormatSupported;
-    }
+    bool IsFormatSupported() const { return mIsFormatSupported; }
 
     uint32_t BlockWidthInTexels() const {
         ASSERT(IsFormatSupported());
@@ -1163,9 +1161,7 @@
         return features;
     }
 
-    bool IsBCFormatSupported() const {
-        return mIsBCFormatSupported;
-    }
+    bool IsBCFormatSupported() const { return mIsBCFormatSupported; }
 
     bool mIsBCFormatSupported = false;
 };
diff --git a/src/dawn/tests/end2end/ComputeLayoutMemoryBufferTests.cpp b/src/dawn/tests/end2end/ComputeLayoutMemoryBufferTests.cpp
index d9cde6c..41a88d0 100644
--- a/src/dawn/tests/end2end/ComputeLayoutMemoryBufferTests.cpp
+++ b/src/dawn/tests/end2end/ComputeLayoutMemoryBufferTests.cpp
@@ -18,59 +18,57 @@
 #include <string>
 #include <vector>
 
-#include "dawn/tests/DawnTest.h"
 #include "dawn/common/Math.h"
+#include "dawn/tests/DawnTest.h"
 #include "dawn/utils/WGPUHelpers.h"
 
 namespace {
 
-    // Helper for replacing all occurrences of substr in str with replacement
-    std::string ReplaceAll(std::string str,
-                           const std::string& substr,
-                           const std::string& replacement) {
-        size_t pos = 0;
-        while ((pos = str.find(substr, pos)) != std::string::npos) {
-            str.replace(pos, substr.length(), replacement);
-            pos += replacement.length();
-        }
-        return str;
+// Helper for replacing all occurrences of substr in str with replacement
+std::string ReplaceAll(std::string str, const std::string& substr, const std::string& replacement) {
+    size_t pos = 0;
+    while ((pos = str.find(substr, pos)) != std::string::npos) {
+        str.replace(pos, substr.length(), replacement);
+        pos += replacement.length();
     }
+    return str;
+}
 
-    // DataMatcherCallback is the callback function by DataMatcher.
-    // It is called for each contiguous sequence of bytes that should be checked
-    // for equality.
-    // offset and size are in units of bytes.
-    using DataMatcherCallback = std::function<void(uint32_t offset, uint32_t size)>;
+// DataMatcherCallback is the callback function by DataMatcher.
+// It is called for each contiguous sequence of bytes that should be checked
+// for equality.
+// offset and size are in units of bytes.
+using DataMatcherCallback = std::function<void(uint32_t offset, uint32_t size)>;
 
-    // DataMatcher is a function pointer to a data matching function.
-    // size is the total number of bytes being considered for matching.
-    // The callback may be called once or multiple times, and may only consider
-    // part of the interval [0, size)
-    using DataMatcher = void (*)(uint32_t size, DataMatcherCallback);
+// DataMatcher is a function pointer to a data matching function.
+// size is the total number of bytes being considered for matching.
+// The callback may be called once or multiple times, and may only consider
+// part of the interval [0, size)
+using DataMatcher = void (*)(uint32_t size, DataMatcherCallback);
 
-    // FullDataMatcher is a DataMatcher that calls callback with the interval
-    // [0, size)
-    void FullDataMatcher(uint32_t size, DataMatcherCallback callback) {
-        callback(0, size);
+// FullDataMatcher is a DataMatcher that calls callback with the interval
+// [0, size)
+void FullDataMatcher(uint32_t size, DataMatcherCallback callback) {
+    callback(0, size);
+}
+
+// StridedDataMatcher is a DataMatcher that calls callback with the strided
+// intervals of length BYTES_TO_MATCH, skipping BYTES_TO_SKIP.
+// For example: StridedDataMatcher<2, 4>(18, callback) will call callback
+// with the intervals: [0, 2), [6, 8), [12, 14)
+template <int BYTES_TO_MATCH, int BYTES_TO_SKIP>
+void StridedDataMatcher(uint32_t size, DataMatcherCallback callback) {
+    uint32_t offset = 0;
+    while (offset < size) {
+        callback(offset, BYTES_TO_MATCH);
+        offset += BYTES_TO_MATCH + BYTES_TO_SKIP;
     }
+}
 
-    // StridedDataMatcher is a DataMatcher that calls callback with the strided
-    // intervals of length BYTES_TO_MATCH, skipping BYTES_TO_SKIP.
-    // For example: StridedDataMatcher<2, 4>(18, callback) will call callback
-    // with the intervals: [0, 2), [6, 8), [12, 14)
-    template <int BYTES_TO_MATCH, int BYTES_TO_SKIP>
-    void StridedDataMatcher(uint32_t size, DataMatcherCallback callback) {
-        uint32_t offset = 0;
-        while (offset < size) {
-            callback(offset, BYTES_TO_MATCH);
-            offset += BYTES_TO_MATCH + BYTES_TO_SKIP;
-        }
-    }
-
-    // Align returns the WGSL decoration for an explicit structure field alignment
-    std::string AlignDeco(uint32_t value) {
-        return "@align(" + std::to_string(value) + ") ";
-    }
+// Align returns the WGSL decoration for an explicit structure field alignment
+std::string AlignDeco(uint32_t value) {
+    return "@align(" + std::to_string(value) + ") ";
+}
 
 }  // namespace
 
@@ -135,9 +133,7 @@
 
 class ComputeLayoutMemoryBufferTests
     : public DawnTestWithParams<ComputeLayoutMemoryBufferTestParams> {
-    void SetUp() override {
-        DawnTestBase::SetUp();
-    }
+    void SetUp() override { DawnTestBase::SetUp(); }
 };
 
 TEST_P(ComputeLayoutMemoryBufferTests, Fields) {
@@ -345,164 +341,153 @@
 
 namespace {
 
-    auto GenerateParams() {
-        auto params = MakeParamGenerator<ComputeLayoutMemoryBufferTestParams>(
-            {
-                D3D12Backend(), MetalBackend(), VulkanBackend(),
-                // TODO(crbug.com/dawn/942)
-                // There was a compiler error: Buffer block cannot be expressed as any of std430,
-                // std140, scalar, even with enhanced layouts. You can try flattening this block to
-                // support a more flexible layout.
-                // OpenGLBackend(),
-                // OpenGLESBackend(),
-            },
-            {StorageClass::Storage, StorageClass::Uniform},
-            {
-                // See https://www.w3.org/TR/WGSL/#alignment-and-size
-                // Scalar types with no custom alignment or size
-                Field{"i32", /* align */ 4, /* size */ 4},
-                Field{"u32", /* align */ 4, /* size */ 4},
-                Field{"f32", /* align */ 4, /* size */ 4},
+auto GenerateParams() {
+    auto params = MakeParamGenerator<ComputeLayoutMemoryBufferTestParams>(
+        {
+            D3D12Backend(), MetalBackend(), VulkanBackend(),
+            // TODO(crbug.com/dawn/942)
+            // There was a compiler error: Buffer block cannot be expressed as any of std430,
+            // std140, scalar, even with enhanced layouts. You can try flattening this block to
+            // support a more flexible layout.
+            // OpenGLBackend(),
+            // OpenGLESBackend(),
+        },
+        {StorageClass::Storage, StorageClass::Uniform},
+        {
+            // See https://www.w3.org/TR/WGSL/#alignment-and-size
+            // Scalar types with no custom alignment or size
+            Field{"i32", /* align */ 4, /* size */ 4},
+            Field{"u32", /* align */ 4, /* size */ 4},
+            Field{"f32", /* align */ 4, /* size */ 4},
 
-                // Scalar types with custom alignment
-                Field{"i32", /* align */ 16, /* size */ 4},
-                Field{"u32", /* align */ 16, /* size */ 4},
-                Field{"f32", /* align */ 16, /* size */ 4},
+            // Scalar types with custom alignment
+            Field{"i32", /* align */ 16, /* size */ 4},
+            Field{"u32", /* align */ 16, /* size */ 4},
+            Field{"f32", /* align */ 16, /* size */ 4},
 
-                // Scalar types with custom size
-                Field{"i32", /* align */ 4, /* size */ 4}.PaddedSize(24),
-                Field{"u32", /* align */ 4, /* size */ 4}.PaddedSize(24),
-                Field{"f32", /* align */ 4, /* size */ 4}.PaddedSize(24),
+            // Scalar types with custom size
+            Field{"i32", /* align */ 4, /* size */ 4}.PaddedSize(24),
+            Field{"u32", /* align */ 4, /* size */ 4}.PaddedSize(24),
+            Field{"f32", /* align */ 4, /* size */ 4}.PaddedSize(24),
 
-                // Vector types with no custom alignment or size
-                Field{"vec2<i32>", /* align */ 8, /* size */ 8},
-                Field{"vec2<u32>", /* align */ 8, /* size */ 8},
-                Field{"vec2<f32>", /* align */ 8, /* size */ 8},
-                Field{"vec3<i32>", /* align */ 16, /* size */ 12},
-                Field{"vec3<u32>", /* align */ 16, /* size */ 12},
-                Field{"vec3<f32>", /* align */ 16, /* size */ 12},
-                Field{"vec4<i32>", /* align */ 16, /* size */ 16},
-                Field{"vec4<u32>", /* align */ 16, /* size */ 16},
-                Field{"vec4<f32>", /* align */ 16, /* size */ 16},
+            // Vector types with no custom alignment or size
+            Field{"vec2<i32>", /* align */ 8, /* size */ 8},
+            Field{"vec2<u32>", /* align */ 8, /* size */ 8},
+            Field{"vec2<f32>", /* align */ 8, /* size */ 8},
+            Field{"vec3<i32>", /* align */ 16, /* size */ 12},
+            Field{"vec3<u32>", /* align */ 16, /* size */ 12},
+            Field{"vec3<f32>", /* align */ 16, /* size */ 12},
+            Field{"vec4<i32>", /* align */ 16, /* size */ 16},
+            Field{"vec4<u32>", /* align */ 16, /* size */ 16},
+            Field{"vec4<f32>", /* align */ 16, /* size */ 16},
 
-                // Vector types with custom alignment
-                Field{"vec2<i32>", /* align */ 32, /* size */ 8},
-                Field{"vec2<u32>", /* align */ 32, /* size */ 8},
-                Field{"vec2<f32>", /* align */ 32, /* size */ 8},
-                Field{"vec3<i32>", /* align */ 32, /* size */ 12},
-                Field{"vec3<u32>", /* align */ 32, /* size */ 12},
-                Field{"vec3<f32>", /* align */ 32, /* size */ 12},
-                Field{"vec4<i32>", /* align */ 32, /* size */ 16},
-                Field{"vec4<u32>", /* align */ 32, /* size */ 16},
-                Field{"vec4<f32>", /* align */ 32, /* size */ 16},
+            // Vector types with custom alignment
+            Field{"vec2<i32>", /* align */ 32, /* size */ 8},
+            Field{"vec2<u32>", /* align */ 32, /* size */ 8},
+            Field{"vec2<f32>", /* align */ 32, /* size */ 8},
+            Field{"vec3<i32>", /* align */ 32, /* size */ 12},
+            Field{"vec3<u32>", /* align */ 32, /* size */ 12},
+            Field{"vec3<f32>", /* align */ 32, /* size */ 12},
+            Field{"vec4<i32>", /* align */ 32, /* size */ 16},
+            Field{"vec4<u32>", /* align */ 32, /* size */ 16},
+            Field{"vec4<f32>", /* align */ 32, /* size */ 16},
 
-                // Vector types with custom size
-                Field{"vec2<i32>", /* align */ 8, /* size */ 8}.PaddedSize(24),
-                Field{"vec2<u32>", /* align */ 8, /* size */ 8}.PaddedSize(24),
-                Field{"vec2<f32>", /* align */ 8, /* size */ 8}.PaddedSize(24),
-                Field{"vec3<i32>", /* align */ 16, /* size */ 12}.PaddedSize(24),
-                Field{"vec3<u32>", /* align */ 16, /* size */ 12}.PaddedSize(24),
-                Field{"vec3<f32>", /* align */ 16, /* size */ 12}.PaddedSize(24),
-                Field{"vec4<i32>", /* align */ 16, /* size */ 16}.PaddedSize(24),
-                Field{"vec4<u32>", /* align */ 16, /* size */ 16}.PaddedSize(24),
-                Field{"vec4<f32>", /* align */ 16, /* size */ 16}.PaddedSize(24),
+            // Vector types with custom size
+            Field{"vec2<i32>", /* align */ 8, /* size */ 8}.PaddedSize(24),
+            Field{"vec2<u32>", /* align */ 8, /* size */ 8}.PaddedSize(24),
+            Field{"vec2<f32>", /* align */ 8, /* size */ 8}.PaddedSize(24),
+            Field{"vec3<i32>", /* align */ 16, /* size */ 12}.PaddedSize(24),
+            Field{"vec3<u32>", /* align */ 16, /* size */ 12}.PaddedSize(24),
+            Field{"vec3<f32>", /* align */ 16, /* size */ 12}.PaddedSize(24),
+            Field{"vec4<i32>", /* align */ 16, /* size */ 16}.PaddedSize(24),
+            Field{"vec4<u32>", /* align */ 16, /* size */ 16}.PaddedSize(24),
+            Field{"vec4<f32>", /* align */ 16, /* size */ 16}.PaddedSize(24),
 
-                // Matrix types with no custom alignment or size
-                Field{"mat2x2<f32>", /* align */ 8, /* size */ 16},
-                Field{"mat3x2<f32>", /* align */ 8, /* size */ 24},
-                Field{"mat4x2<f32>", /* align */ 8, /* size */ 32},
-                Field{"mat2x3<f32>", /* align */ 16, /* size */ 32}.Strided<12, 4>(),
-                Field{"mat3x3<f32>", /* align */ 16, /* size */ 48}.Strided<12, 4>(),
-                Field{"mat4x3<f32>", /* align */ 16, /* size */ 64}.Strided<12, 4>(),
-                Field{"mat2x4<f32>", /* align */ 16, /* size */ 32},
-                Field{"mat3x4<f32>", /* align */ 16, /* size */ 48},
-                Field{"mat4x4<f32>", /* align */ 16, /* size */ 64},
+            // Matrix types with no custom alignment or size
+            Field{"mat2x2<f32>", /* align */ 8, /* size */ 16},
+            Field{"mat3x2<f32>", /* align */ 8, /* size */ 24},
+            Field{"mat4x2<f32>", /* align */ 8, /* size */ 32},
+            Field{"mat2x3<f32>", /* align */ 16, /* size */ 32}.Strided<12, 4>(),
+            Field{"mat3x3<f32>", /* align */ 16, /* size */ 48}.Strided<12, 4>(),
+            Field{"mat4x3<f32>", /* align */ 16, /* size */ 64}.Strided<12, 4>(),
+            Field{"mat2x4<f32>", /* align */ 16, /* size */ 32},
+            Field{"mat3x4<f32>", /* align */ 16, /* size */ 48},
+            Field{"mat4x4<f32>", /* align */ 16, /* size */ 64},
 
-                // Matrix types with custom alignment
-                Field{"mat2x2<f32>", /* align */ 32, /* size */ 16},
-                Field{"mat3x2<f32>", /* align */ 32, /* size */ 24},
-                Field{"mat4x2<f32>", /* align */ 32, /* size */ 32},
-                Field{"mat2x3<f32>", /* align */ 32, /* size */ 32}.Strided<12, 4>(),
-                Field{"mat3x3<f32>", /* align */ 32, /* size */ 48}.Strided<12, 4>(),
-                Field{"mat4x3<f32>", /* align */ 32, /* size */ 64}.Strided<12, 4>(),
-                Field{"mat2x4<f32>", /* align */ 32, /* size */ 32},
-                Field{"mat3x4<f32>", /* align */ 32, /* size */ 48},
-                Field{"mat4x4<f32>", /* align */ 32, /* size */ 64},
+            // Matrix types with custom alignment
+            Field{"mat2x2<f32>", /* align */ 32, /* size */ 16},
+            Field{"mat3x2<f32>", /* align */ 32, /* size */ 24},
+            Field{"mat4x2<f32>", /* align */ 32, /* size */ 32},
+            Field{"mat2x3<f32>", /* align */ 32, /* size */ 32}.Strided<12, 4>(),
+            Field{"mat3x3<f32>", /* align */ 32, /* size */ 48}.Strided<12, 4>(),
+            Field{"mat4x3<f32>", /* align */ 32, /* size */ 64}.Strided<12, 4>(),
+            Field{"mat2x4<f32>", /* align */ 32, /* size */ 32},
+            Field{"mat3x4<f32>", /* align */ 32, /* size */ 48},
+            Field{"mat4x4<f32>", /* align */ 32, /* size */ 64},
 
-                // Matrix types with custom size
-                Field{"mat2x2<f32>", /* align */ 8, /* size */ 16}.PaddedSize(128),
-                Field{"mat3x2<f32>", /* align */ 8, /* size */ 24}.PaddedSize(128),
-                Field{"mat4x2<f32>", /* align */ 8, /* size */ 32}.PaddedSize(128),
-                Field{"mat2x3<f32>", /* align */ 16, /* size */ 32}
-                    .PaddedSize(128)
-                    .Strided<12, 4>(),
-                Field{"mat3x3<f32>", /* align */ 16, /* size */ 48}
-                    .PaddedSize(128)
-                    .Strided<12, 4>(),
-                Field{"mat4x3<f32>", /* align */ 16, /* size */ 64}
-                    .PaddedSize(128)
-                    .Strided<12, 4>(),
-                Field{"mat2x4<f32>", /* align */ 16, /* size */ 32}.PaddedSize(128),
-                Field{"mat3x4<f32>", /* align */ 16, /* size */ 48}.PaddedSize(128),
-                Field{"mat4x4<f32>", /* align */ 16, /* size */ 64}.PaddedSize(128),
+            // Matrix types with custom size
+            Field{"mat2x2<f32>", /* align */ 8, /* size */ 16}.PaddedSize(128),
+            Field{"mat3x2<f32>", /* align */ 8, /* size */ 24}.PaddedSize(128),
+            Field{"mat4x2<f32>", /* align */ 8, /* size */ 32}.PaddedSize(128),
+            Field{"mat2x3<f32>", /* align */ 16, /* size */ 32}.PaddedSize(128).Strided<12, 4>(),
+            Field{"mat3x3<f32>", /* align */ 16, /* size */ 48}.PaddedSize(128).Strided<12, 4>(),
+            Field{"mat4x3<f32>", /* align */ 16, /* size */ 64}.PaddedSize(128).Strided<12, 4>(),
+            Field{"mat2x4<f32>", /* align */ 16, /* size */ 32}.PaddedSize(128),
+            Field{"mat3x4<f32>", /* align */ 16, /* size */ 48}.PaddedSize(128),
+            Field{"mat4x4<f32>", /* align */ 16, /* size */ 64}.PaddedSize(128),
 
-                // Array types with no custom alignment or size.
-                // Note: The use of StorageBufferOnly() is due to UBOs requiring 16 byte alignment
-                // of array elements. See https://www.w3.org/TR/WGSL/#storage-class-constraints
-                Field{"array<u32, 1>", /* align */ 4, /* size */ 4}.StorageBufferOnly(),
-                Field{"array<u32, 2>", /* align */ 4, /* size */ 8}.StorageBufferOnly(),
-                Field{"array<u32, 3>", /* align */ 4, /* size */ 12}.StorageBufferOnly(),
-                Field{"array<u32, 4>", /* align */ 4, /* size */ 16}.StorageBufferOnly(),
-                Field{"array<vec4<u32>, 1>", /* align */ 16, /* size */ 16},
-                Field{"array<vec4<u32>, 2>", /* align */ 16, /* size */ 32},
-                Field{"array<vec4<u32>, 3>", /* align */ 16, /* size */ 48},
-                Field{"array<vec4<u32>, 4>", /* align */ 16, /* size */ 64},
-                Field{"array<vec3<u32>, 4>", /* align */ 16, /* size */ 64}.Strided<12, 4>(),
+            // Array types with no custom alignment or size.
+            // Note: The use of StorageBufferOnly() is due to UBOs requiring 16 byte alignment
+            // of array elements. See https://www.w3.org/TR/WGSL/#storage-class-constraints
+            Field{"array<u32, 1>", /* align */ 4, /* size */ 4}.StorageBufferOnly(),
+            Field{"array<u32, 2>", /* align */ 4, /* size */ 8}.StorageBufferOnly(),
+            Field{"array<u32, 3>", /* align */ 4, /* size */ 12}.StorageBufferOnly(),
+            Field{"array<u32, 4>", /* align */ 4, /* size */ 16}.StorageBufferOnly(),
+            Field{"array<vec4<u32>, 1>", /* align */ 16, /* size */ 16},
+            Field{"array<vec4<u32>, 2>", /* align */ 16, /* size */ 32},
+            Field{"array<vec4<u32>, 3>", /* align */ 16, /* size */ 48},
+            Field{"array<vec4<u32>, 4>", /* align */ 16, /* size */ 64},
+            Field{"array<vec3<u32>, 4>", /* align */ 16, /* size */ 64}.Strided<12, 4>(),
 
-                // Array types with custom alignment
-                Field{"array<u32, 1>", /* align */ 32, /* size */ 4}.StorageBufferOnly(),
-                Field{"array<u32, 2>", /* align */ 32, /* size */ 8}.StorageBufferOnly(),
-                Field{"array<u32, 3>", /* align */ 32, /* size */ 12}.StorageBufferOnly(),
-                Field{"array<u32, 4>", /* align */ 32, /* size */ 16}.StorageBufferOnly(),
-                Field{"array<vec4<u32>, 1>", /* align */ 32, /* size */ 16},
-                Field{"array<vec4<u32>, 2>", /* align */ 32, /* size */ 32},
-                Field{"array<vec4<u32>, 3>", /* align */ 32, /* size */ 48},
-                Field{"array<vec4<u32>, 4>", /* align */ 32, /* size */ 64},
-                Field{"array<vec3<u32>, 4>", /* align */ 32, /* size */ 64}.Strided<12, 4>(),
+            // Array types with custom alignment
+            Field{"array<u32, 1>", /* align */ 32, /* size */ 4}.StorageBufferOnly(),
+            Field{"array<u32, 2>", /* align */ 32, /* size */ 8}.StorageBufferOnly(),
+            Field{"array<u32, 3>", /* align */ 32, /* size */ 12}.StorageBufferOnly(),
+            Field{"array<u32, 4>", /* align */ 32, /* size */ 16}.StorageBufferOnly(),
+            Field{"array<vec4<u32>, 1>", /* align */ 32, /* size */ 16},
+            Field{"array<vec4<u32>, 2>", /* align */ 32, /* size */ 32},
+            Field{"array<vec4<u32>, 3>", /* align */ 32, /* size */ 48},
+            Field{"array<vec4<u32>, 4>", /* align */ 32, /* size */ 64},
+            Field{"array<vec3<u32>, 4>", /* align */ 32, /* size */ 64}.Strided<12, 4>(),
 
-                // Array types with custom size
-                Field{"array<u32, 1>", /* align */ 4, /* size */ 4}
-                    .PaddedSize(128)
-                    .StorageBufferOnly(),
-                Field{"array<u32, 2>", /* align */ 4, /* size */ 8}
-                    .PaddedSize(128)
-                    .StorageBufferOnly(),
-                Field{"array<u32, 3>", /* align */ 4, /* size */ 12}
-                    .PaddedSize(128)
-                    .StorageBufferOnly(),
-                Field{"array<u32, 4>", /* align */ 4, /* size */ 16}
-                    .PaddedSize(128)
-                    .StorageBufferOnly(),
-                Field{"array<vec3<u32>, 4>", /* align */ 16, /* size */ 64}
-                    .PaddedSize(128)
-                    .Strided<12, 4>(),
-            });
+            // Array types with custom size
+            Field{"array<u32, 1>", /* align */ 4, /* size */ 4}.PaddedSize(128).StorageBufferOnly(),
+            Field{"array<u32, 2>", /* align */ 4, /* size */ 8}.PaddedSize(128).StorageBufferOnly(),
+            Field{"array<u32, 3>", /* align */ 4, /* size */ 12}
+                .PaddedSize(128)
+                .StorageBufferOnly(),
+            Field{"array<u32, 4>", /* align */ 4, /* size */ 16}
+                .PaddedSize(128)
+                .StorageBufferOnly(),
+            Field{"array<vec3<u32>, 4>", /* align */ 16, /* size */ 64}
+                .PaddedSize(128)
+                .Strided<12, 4>(),
+        });
 
-        std::vector<ComputeLayoutMemoryBufferTestParams> filtered;
-        for (auto param : params) {
-            if (param.mStorageClass != StorageClass::Storage && param.mField.storage_buffer_only) {
-                continue;
-            }
-            filtered.emplace_back(param);
+    std::vector<ComputeLayoutMemoryBufferTestParams> filtered;
+    for (auto param : params) {
+        if (param.mStorageClass != StorageClass::Storage && param.mField.storage_buffer_only) {
+            continue;
         }
-        return filtered;
+        filtered.emplace_back(param);
     }
+    return filtered;
+}
 
-    INSTANTIATE_TEST_SUITE_P(
-        ,
-        ComputeLayoutMemoryBufferTests,
-        ::testing::ValuesIn(GenerateParams()),
-        DawnTestBase::PrintToStringParamName("ComputeLayoutMemoryBufferTests"));
-    GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(ComputeLayoutMemoryBufferTests);
+INSTANTIATE_TEST_SUITE_P(,
+                         ComputeLayoutMemoryBufferTests,
+                         ::testing::ValuesIn(GenerateParams()),
+                         DawnTestBase::PrintToStringParamName("ComputeLayoutMemoryBufferTests"));
+GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(ComputeLayoutMemoryBufferTests);
 
 }  // namespace
diff --git a/src/dawn/tests/end2end/CopyTests.cpp b/src/dawn/tests/end2end/CopyTests.cpp
index b789156..843baec 100644
--- a/src/dawn/tests/end2end/CopyTests.cpp
+++ b/src/dawn/tests/end2end/CopyTests.cpp
@@ -16,9 +16,9 @@
 #include <array>
 #include <vector>
 
-#include "dawn/tests/DawnTest.h"
 #include "dawn/common/Constants.h"
 #include "dawn/common/Math.h"
+#include "dawn/tests/DawnTest.h"
 #include "dawn/utils/TestUtils.h"
 #include "dawn/utils/TextureUtils.h"
 #include "dawn/utils/WGPUHelpers.h"
@@ -325,13 +325,13 @@
 };
 
 namespace {
-    // The CopyTests Texture to Texture in this class will validate both CopyTextureToTexture and
-    // CopyTextureToTextureInternal.
-    using UsageCopySrc = bool;
-    DAWN_TEST_PARAM_STRUCT(CopyTestsParams, UsageCopySrc);
+// The CopyTests Texture to Texture in this class will validate both CopyTextureToTexture and
+// CopyTextureToTextureInternal.
+using UsageCopySrc = bool;
+DAWN_TEST_PARAM_STRUCT(CopyTestsParams, UsageCopySrc);
 
-    using SrcColorFormat = wgpu::TextureFormat;
-    DAWN_TEST_PARAM_STRUCT(SrcColorFormatParams, SrcColorFormat);
+using SrcColorFormat = wgpu::TextureFormat;
+DAWN_TEST_PARAM_STRUCT(SrcColorFormatParams, SrcColorFormat);
 }  // namespace
 
 template <typename Parent>
diff --git a/src/dawn/tests/end2end/CopyTextureForBrowserTests.cpp b/src/dawn/tests/end2end/CopyTextureForBrowserTests.cpp
index d746bea..736b4bb 100644
--- a/src/dawn/tests/end2end/CopyTextureForBrowserTests.cpp
+++ b/src/dawn/tests/end2end/CopyTextureForBrowserTests.cpp
@@ -23,131 +23,131 @@
 #include "dawn/utils/WGPUHelpers.h"
 
 namespace {
-    static constexpr wgpu::TextureFormat kTextureFormat = wgpu::TextureFormat::RGBA8Unorm;
+static constexpr wgpu::TextureFormat kTextureFormat = wgpu::TextureFormat::RGBA8Unorm;
 
-    // Set default texture size to single line texture for color conversion tests.
-    static constexpr uint64_t kDefaultTextureWidth = 10;
-    static constexpr uint64_t kDefaultTextureHeight = 1;
+// Set default texture size to single line texture for color conversion tests.
+static constexpr uint64_t kDefaultTextureWidth = 10;
+static constexpr uint64_t kDefaultTextureHeight = 1;
 
-    enum class ColorSpace : uint32_t {
-        SRGB = 0x00,
-        DisplayP3 = 0x01,
-    };
+enum class ColorSpace : uint32_t {
+    SRGB = 0x00,
+    DisplayP3 = 0x01,
+};
 
-    using SrcFormat = wgpu::TextureFormat;
-    using DstFormat = wgpu::TextureFormat;
-    using SrcOrigin = wgpu::Origin3D;
-    using DstOrigin = wgpu::Origin3D;
-    using CopySize = wgpu::Extent3D;
-    using FlipY = bool;
-    using SrcColorSpace = ColorSpace;
-    using DstColorSpace = ColorSpace;
-    using SrcAlphaMode = wgpu::AlphaMode;
-    using DstAlphaMode = wgpu::AlphaMode;
+using SrcFormat = wgpu::TextureFormat;
+using DstFormat = wgpu::TextureFormat;
+using SrcOrigin = wgpu::Origin3D;
+using DstOrigin = wgpu::Origin3D;
+using CopySize = wgpu::Extent3D;
+using FlipY = bool;
+using SrcColorSpace = ColorSpace;
+using DstColorSpace = ColorSpace;
+using SrcAlphaMode = wgpu::AlphaMode;
+using DstAlphaMode = wgpu::AlphaMode;
 
-    std::ostream& operator<<(std::ostream& o, wgpu::Origin3D origin) {
-        o << origin.x << ", " << origin.y << ", " << origin.z;
-        return o;
-    }
+std::ostream& operator<<(std::ostream& o, wgpu::Origin3D origin) {
+    o << origin.x << ", " << origin.y << ", " << origin.z;
+    return o;
+}
 
-    std::ostream& operator<<(std::ostream& o, wgpu::Extent3D copySize) {
-        o << copySize.width << ", " << copySize.height << ", " << copySize.depthOrArrayLayers;
-        return o;
-    }
+std::ostream& operator<<(std::ostream& o, wgpu::Extent3D copySize) {
+    o << copySize.width << ", " << copySize.height << ", " << copySize.depthOrArrayLayers;
+    return o;
+}
 
-    std::ostream& operator<<(std::ostream& o, ColorSpace space) {
-        o << static_cast<uint32_t>(space);
-        return o;
-    }
+std::ostream& operator<<(std::ostream& o, ColorSpace space) {
+    o << static_cast<uint32_t>(space);
+    return o;
+}
 
-    DAWN_TEST_PARAM_STRUCT(AlphaTestParams, SrcAlphaMode, DstAlphaMode);
-    DAWN_TEST_PARAM_STRUCT(FormatTestParams, SrcFormat, DstFormat);
-    DAWN_TEST_PARAM_STRUCT(SubRectTestParams, SrcOrigin, DstOrigin, CopySize, FlipY);
-    DAWN_TEST_PARAM_STRUCT(ColorSpaceTestParams,
-                           DstFormat,
-                           SrcColorSpace,
-                           DstColorSpace,
-                           SrcAlphaMode,
-                           DstAlphaMode);
+DAWN_TEST_PARAM_STRUCT(AlphaTestParams, SrcAlphaMode, DstAlphaMode);
+DAWN_TEST_PARAM_STRUCT(FormatTestParams, SrcFormat, DstFormat);
+DAWN_TEST_PARAM_STRUCT(SubRectTestParams, SrcOrigin, DstOrigin, CopySize, FlipY);
+DAWN_TEST_PARAM_STRUCT(ColorSpaceTestParams,
+                       DstFormat,
+                       SrcColorSpace,
+                       DstColorSpace,
+                       SrcAlphaMode,
+                       DstAlphaMode);
 
-    // Color Space table
-    struct ColorSpaceInfo {
-        ColorSpace index;
-        std::array<float, 9> toXYZD50;    // 3x3 row major transform matrix
-        std::array<float, 9> fromXYZD50;  // inverse transform matrix of toXYZD50, precomputed
-        std::array<float, 7> gammaDecodingParams;  // Follow { A, B, G, E, epsilon, C, F } order
-        std::array<float, 7> gammaEncodingParams;  // inverse op of decoding, precomputed
-        bool isNonLinear;
-        bool isExtended;  // For extended color space.
-    };
-    static constexpr size_t kSupportedColorSpaceCount = 2;
-    static constexpr std::array<ColorSpaceInfo, kSupportedColorSpaceCount> ColorSpaceTable = {{
-        // sRGB,
-        // Got primary attributes from https://drafts.csswg.org/css-color/#predefined-sRGB
-        // Use matrices from
-        // http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html#WSMatrices
-        // Get gamma-linear conversion params from https://en.wikipedia.org/wiki/SRGB with some
-        // mathematics.
-        {
-            //
-            ColorSpace::SRGB,
-            {{
-                //
-                0.4360747, 0.3850649, 0.1430804,  //
-                0.2225045, 0.7168786, 0.0606169,  //
-                0.0139322, 0.0971045, 0.7141733   //
-            }},
-
-            {{
-                //
-                3.1338561, -1.6168667, -0.4906146,  //
-                -0.9787684, 1.9161415, 0.0334540,   //
-                0.0719453, -0.2289914, 1.4052427    //
-            }},
-
-            // {G, A, B, C, D, E, F, }
-            {{2.4, 1.0 / 1.055, 0.055 / 1.055, 1.0 / 12.92, 4.045e-02, 0.0, 0.0}},
-
-            {{1.0 / 2.4, 1.13711 /*pow(1.055, 2.4)*/, 0.0, 12.92f, 3.1308e-03, -0.055, 0.0}},
-
-            true,
-            true  //
-        },
-
-        // Display P3, got primary attributes from
-        // https://www.w3.org/TR/css-color-4/#valdef-color-display-p3
-        // Use equations found in
-        // http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html,
-        // Use Bradford method to do D65 to D50 transform.
-        // Get matrices with help of http://www.russellcottrell.com/photo/matrixCalculator.htm
-        // Gamma-linear conversion params is the same as Srgb.
-        {
-            //
-            ColorSpace::DisplayP3,
-            {{
-                //
-                0.5151114, 0.2919612, 0.1571274,  //
-                0.2411865, 0.6922440, 0.0665695,  //
-                -0.0010491, 0.0418832, 0.7842659  //
-            }},
-
-            {{
-                //
-                2.4039872, -0.9898498, -0.3976181,  //
-                -0.8422138, 1.7988188, 0.0160511,   //
-                0.0481937, -0.0973889, 1.2736887    //
-            }},
-
-            // {G, A, B, C, D, E, F, }
-            {{2.4, 1.0 / 1.055, 0.055 / 1.055, 1.0 / 12.92, 4.045e-02, 0.0, 0.0}},
-
-            {{1.0 / 2.4, 1.13711 /*pow(1.055, 2.4)*/, 0.0, 12.92f, 3.1308e-03, -0.055, 0.0}},
-
-            true,
-            false  //
-        }
+// Color Space table
+struct ColorSpaceInfo {
+    ColorSpace index;
+    std::array<float, 9> toXYZD50;             // 3x3 row major transform matrix
+    std::array<float, 9> fromXYZD50;           // inverse transform matrix of toXYZD50, precomputed
+    std::array<float, 7> gammaDecodingParams;  // Follow { A, B, G, E, epsilon, C, F } order
+    std::array<float, 7> gammaEncodingParams;  // inverse op of decoding, precomputed
+    bool isNonLinear;
+    bool isExtended;  // For extended color space.
+};
+static constexpr size_t kSupportedColorSpaceCount = 2;
+static constexpr std::array<ColorSpaceInfo, kSupportedColorSpaceCount> ColorSpaceTable = {{
+    // sRGB,
+    // Got primary attributes from https://drafts.csswg.org/css-color/#predefined-sRGB
+    // Use matrices from
+    // http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html#WSMatrices
+    // Get gamma-linear conversion params from https://en.wikipedia.org/wiki/SRGB with some
+    // mathematics.
+    {
         //
-    }};
+        ColorSpace::SRGB,
+        {{
+            //
+            0.4360747, 0.3850649, 0.1430804,  //
+            0.2225045, 0.7168786, 0.0606169,  //
+            0.0139322, 0.0971045, 0.7141733   //
+        }},
+
+        {{
+            //
+            3.1338561, -1.6168667, -0.4906146,  //
+            -0.9787684, 1.9161415, 0.0334540,   //
+            0.0719453, -0.2289914, 1.4052427    //
+        }},
+
+        // {G, A, B, C, D, E, F, }
+        {{2.4, 1.0 / 1.055, 0.055 / 1.055, 1.0 / 12.92, 4.045e-02, 0.0, 0.0}},
+
+        {{1.0 / 2.4, 1.13711 /*pow(1.055, 2.4)*/, 0.0, 12.92f, 3.1308e-03, -0.055, 0.0}},
+
+        true,
+        true  //
+    },
+
+    // Display P3, got primary attributes from
+    // https://www.w3.org/TR/css-color-4/#valdef-color-display-p3
+    // Use equations found in
+    // http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html,
+    // Use Bradford method to do D65 to D50 transform.
+    // Get matrices with help of http://www.russellcottrell.com/photo/matrixCalculator.htm
+    // Gamma-linear conversion params is the same as Srgb.
+    {
+        //
+        ColorSpace::DisplayP3,
+        {{
+            //
+            0.5151114, 0.2919612, 0.1571274,  //
+            0.2411865, 0.6922440, 0.0665695,  //
+            -0.0010491, 0.0418832, 0.7842659  //
+        }},
+
+        {{
+            //
+            2.4039872, -0.9898498, -0.3976181,  //
+            -0.8422138, 1.7988188, 0.0160511,   //
+            0.0481937, -0.0973889, 1.2736887    //
+        }},
+
+        // {G, A, B, C, D, E, F, }
+        {{2.4, 1.0 / 1.055, 0.055 / 1.055, 1.0 / 12.92, 4.045e-02, 0.0, 0.0}},
+
+        {{1.0 / 2.4, 1.13711 /*pow(1.055, 2.4)*/, 0.0, 12.92f, 3.1308e-03, -0.055, 0.0}},
+
+        true,
+        false  //
+    }
+    //
+}};
 }  // anonymous namespace
 
 template <typename Parent>
diff --git a/src/dawn/tests/end2end/CreatePipelineAsyncTests.cpp b/src/dawn/tests/end2end/CreatePipelineAsyncTests.cpp
index bfec892..6b95755 100644
--- a/src/dawn/tests/end2end/CreatePipelineAsyncTests.cpp
+++ b/src/dawn/tests/end2end/CreatePipelineAsyncTests.cpp
@@ -19,12 +19,12 @@
 #include "dawn/utils/WGPUHelpers.h"
 
 namespace {
-    struct CreatePipelineAsyncTask {
-        wgpu::ComputePipeline computePipeline = nullptr;
-        wgpu::RenderPipeline renderPipeline = nullptr;
-        bool isCompleted = false;
-        std::string message;
-    };
+struct CreatePipelineAsyncTask {
+    wgpu::ComputePipeline computePipeline = nullptr;
+    wgpu::RenderPipeline renderPipeline = nullptr;
+    bool isCompleted = false;
+    std::string message;
+};
 }  // anonymous namespace
 
 class CreatePipelineAsyncTest : public DawnTest {
@@ -65,9 +65,7 @@
         EXPECT_BUFFER_U32_EQ(kExpected, ssbo, 0);
     }
 
-    void ValidateCreateComputePipelineAsync() {
-        ValidateCreateComputePipelineAsync(&task);
-    }
+    void ValidateCreateComputePipelineAsync() { ValidateCreateComputePipelineAsync(&task); }
 
     void ValidateCreateRenderPipelineAsync(CreatePipelineAsyncTask* currentTask) {
         constexpr wgpu::TextureFormat kRenderAttachmentFormat = wgpu::TextureFormat::RGBA8Unorm;
@@ -106,9 +104,7 @@
         EXPECT_PIXEL_RGBA8_EQ(RGBA8(0, 255, 0, 255), outputTexture, 0, 0);
     }
 
-    void ValidateCreateRenderPipelineAsync() {
-        ValidateCreateRenderPipelineAsync(&task);
-    }
+    void ValidateCreateRenderPipelineAsync() { ValidateCreateRenderPipelineAsync(&task); }
 
     void DoCreateRenderPipelineAsync(
         const utils::ComboRenderPipelineDescriptor& renderPipelineDescriptor) {
diff --git a/src/dawn/tests/end2end/D3D12CachingTests.cpp b/src/dawn/tests/end2end/D3D12CachingTests.cpp
index 7987879..c895a53 100644
--- a/src/dawn/tests/end2end/D3D12CachingTests.cpp
+++ b/src/dawn/tests/end2end/D3D12CachingTests.cpp
@@ -22,7 +22,7 @@
 #include "dawn/utils/WGPUHelpers.h"
 
 namespace {
-    using ::testing::NiceMock;
+using ::testing::NiceMock;
 }  // namespace
 
 class D3D12CachingTests : public DawnTest {
diff --git a/src/dawn/tests/end2end/D3D12ResourceWrappingTests.cpp b/src/dawn/tests/end2end/D3D12ResourceWrappingTests.cpp
index 880e34b..ce81ec2 100644
--- a/src/dawn/tests/end2end/D3D12ResourceWrappingTests.cpp
+++ b/src/dawn/tests/end2end/D3D12ResourceWrappingTests.cpp
@@ -30,127 +30,127 @@
 
 namespace {
 
-    using dawn::native::d3d12::kDXGIKeyedMutexAcquireReleaseKey;
+using dawn::native::d3d12::kDXGIKeyedMutexAcquireReleaseKey;
 
-    class D3D12ResourceTestBase : public DawnTest {
-      protected:
-        std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
-            return {wgpu::FeatureName::DawnInternalUsages};
+class D3D12ResourceTestBase : public DawnTest {
+  protected:
+    std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+        return {wgpu::FeatureName::DawnInternalUsages};
+    }
+
+  public:
+    void SetUp() override {
+        DawnTest::SetUp();
+        if (UsesWire()) {
+            return;
         }
 
-      public:
-        void SetUp() override {
-            DawnTest::SetUp();
-            if (UsesWire()) {
-                return;
-            }
+        // Create the D3D11 device/contexts that will be used in subsequent tests
+        ComPtr<ID3D12Device> d3d12Device = dawn::native::d3d12::GetD3D12Device(device.Get());
 
-            // Create the D3D11 device/contexts that will be used in subsequent tests
-            ComPtr<ID3D12Device> d3d12Device = dawn::native::d3d12::GetD3D12Device(device.Get());
+        const LUID adapterLuid = d3d12Device->GetAdapterLuid();
 
-            const LUID adapterLuid = d3d12Device->GetAdapterLuid();
+        ComPtr<IDXGIFactory4> dxgiFactory;
+        HRESULT hr = ::CreateDXGIFactory2(0, IID_PPV_ARGS(&dxgiFactory));
+        ASSERT_EQ(hr, S_OK);
 
-            ComPtr<IDXGIFactory4> dxgiFactory;
-            HRESULT hr = ::CreateDXGIFactory2(0, IID_PPV_ARGS(&dxgiFactory));
-            ASSERT_EQ(hr, S_OK);
+        ComPtr<IDXGIAdapter> dxgiAdapter;
+        hr = dxgiFactory->EnumAdapterByLuid(adapterLuid, IID_PPV_ARGS(&dxgiAdapter));
+        ASSERT_EQ(hr, S_OK);
 
-            ComPtr<IDXGIAdapter> dxgiAdapter;
-            hr = dxgiFactory->EnumAdapterByLuid(adapterLuid, IID_PPV_ARGS(&dxgiAdapter));
-            ASSERT_EQ(hr, S_OK);
+        ComPtr<ID3D11Device> d3d11Device;
+        D3D_FEATURE_LEVEL d3dFeatureLevel;
+        ComPtr<ID3D11DeviceContext> d3d11DeviceContext;
+        hr = ::D3D11CreateDevice(dxgiAdapter.Get(), D3D_DRIVER_TYPE_UNKNOWN, nullptr, 0, nullptr, 0,
+                                 D3D11_SDK_VERSION, &d3d11Device, &d3dFeatureLevel,
+                                 &d3d11DeviceContext);
+        ASSERT_EQ(hr, S_OK);
 
-            ComPtr<ID3D11Device> d3d11Device;
-            D3D_FEATURE_LEVEL d3dFeatureLevel;
-            ComPtr<ID3D11DeviceContext> d3d11DeviceContext;
-            hr = ::D3D11CreateDevice(dxgiAdapter.Get(), D3D_DRIVER_TYPE_UNKNOWN, nullptr, 0,
-                                     nullptr, 0, D3D11_SDK_VERSION, &d3d11Device, &d3dFeatureLevel,
-                                     &d3d11DeviceContext);
-            ASSERT_EQ(hr, S_OK);
+        mD3d11Device = std::move(d3d11Device);
+        mD3d11DeviceContext = std::move(d3d11DeviceContext);
 
-            mD3d11Device = std::move(d3d11Device);
-            mD3d11DeviceContext = std::move(d3d11DeviceContext);
+        baseDawnDescriptor.dimension = wgpu::TextureDimension::e2D;
+        baseDawnDescriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        baseDawnDescriptor.size = {kTestWidth, kTestHeight, 1};
+        baseDawnDescriptor.sampleCount = 1;
+        baseDawnDescriptor.mipLevelCount = 1;
+        baseDawnDescriptor.usage =
+            wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc |
+            wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopyDst;
 
-            baseDawnDescriptor.dimension = wgpu::TextureDimension::e2D;
-            baseDawnDescriptor.format = wgpu::TextureFormat::RGBA8Unorm;
-            baseDawnDescriptor.size = {kTestWidth, kTestHeight, 1};
-            baseDawnDescriptor.sampleCount = 1;
-            baseDawnDescriptor.mipLevelCount = 1;
-            baseDawnDescriptor.usage =
-                wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopySrc |
-                wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopyDst;
+        baseD3dDescriptor.Width = kTestWidth;
+        baseD3dDescriptor.Height = kTestHeight;
+        baseD3dDescriptor.MipLevels = 1;
+        baseD3dDescriptor.ArraySize = 1;
+        baseD3dDescriptor.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
+        baseD3dDescriptor.SampleDesc.Count = 1;
+        baseD3dDescriptor.SampleDesc.Quality = 0;
+        baseD3dDescriptor.Usage = D3D11_USAGE_DEFAULT;
+        baseD3dDescriptor.BindFlags = D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_RENDER_TARGET;
+        baseD3dDescriptor.CPUAccessFlags = 0;
+        baseD3dDescriptor.MiscFlags =
+            D3D11_RESOURCE_MISC_SHARED_NTHANDLE | D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX;
+    }
 
-            baseD3dDescriptor.Width = kTestWidth;
-            baseD3dDescriptor.Height = kTestHeight;
-            baseD3dDescriptor.MipLevels = 1;
-            baseD3dDescriptor.ArraySize = 1;
-            baseD3dDescriptor.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
-            baseD3dDescriptor.SampleDesc.Count = 1;
-            baseD3dDescriptor.SampleDesc.Quality = 0;
-            baseD3dDescriptor.Usage = D3D11_USAGE_DEFAULT;
-            baseD3dDescriptor.BindFlags = D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_RENDER_TARGET;
-            baseD3dDescriptor.CPUAccessFlags = 0;
-            baseD3dDescriptor.MiscFlags =
-                D3D11_RESOURCE_MISC_SHARED_NTHANDLE | D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX;
+  protected:
+    void WrapSharedHandle(
+        const wgpu::TextureDescriptor* dawnDesc,
+        const D3D11_TEXTURE2D_DESC* baseD3dDescriptor,
+        wgpu::Texture* dawnTexture,
+        ID3D11Texture2D** d3d11TextureOut,
+        std::unique_ptr<dawn::native::d3d12::ExternalImageDXGI>* externalImageOut = nullptr) const {
+        ComPtr<ID3D11Texture2D> d3d11Texture;
+        HRESULT hr = mD3d11Device->CreateTexture2D(baseD3dDescriptor, nullptr, &d3d11Texture);
+        ASSERT_EQ(hr, S_OK);
+
+        ComPtr<IDXGIResource1> dxgiResource;
+        hr = d3d11Texture.As(&dxgiResource);
+        ASSERT_EQ(hr, S_OK);
+
+        HANDLE sharedHandle;
+        hr = dxgiResource->CreateSharedHandle(
+            nullptr, DXGI_SHARED_RESOURCE_READ | DXGI_SHARED_RESOURCE_WRITE, nullptr,
+            &sharedHandle);
+        ASSERT_EQ(hr, S_OK);
+
+        dawn::native::d3d12::ExternalImageDescriptorDXGISharedHandle externalImageDesc;
+        externalImageDesc.cTextureDescriptor =
+            reinterpret_cast<const WGPUTextureDescriptor*>(dawnDesc);
+        externalImageDesc.sharedHandle = sharedHandle;
+
+        std::unique_ptr<dawn::native::d3d12::ExternalImageDXGI> externalImage =
+            dawn::native::d3d12::ExternalImageDXGI::Create(device.Get(), &externalImageDesc);
+
+        // Now that we've created all of our resources, we can close the handle
+        // since we no longer need it.
+        ::CloseHandle(sharedHandle);
+
+        // Cannot access a non-existent external image (ex. validation error).
+        if (externalImage == nullptr) {
+            return;
         }
 
-      protected:
-        void WrapSharedHandle(const wgpu::TextureDescriptor* dawnDesc,
-                              const D3D11_TEXTURE2D_DESC* baseD3dDescriptor,
-                              wgpu::Texture* dawnTexture,
-                              ID3D11Texture2D** d3d11TextureOut,
-                              std::unique_ptr<dawn::native::d3d12::ExternalImageDXGI>*
-                                  externalImageOut = nullptr) const {
-            ComPtr<ID3D11Texture2D> d3d11Texture;
-            HRESULT hr = mD3d11Device->CreateTexture2D(baseD3dDescriptor, nullptr, &d3d11Texture);
-            ASSERT_EQ(hr, S_OK);
+        dawn::native::d3d12::ExternalImageAccessDescriptorDXGIKeyedMutex externalAccessDesc;
+        externalAccessDesc.usage = static_cast<WGPUTextureUsageFlags>(dawnDesc->usage);
 
-            ComPtr<IDXGIResource1> dxgiResource;
-            hr = d3d11Texture.As(&dxgiResource);
-            ASSERT_EQ(hr, S_OK);
+        *dawnTexture = wgpu::Texture::Acquire(
+            externalImage->ProduceTexture(device.Get(), &externalAccessDesc));
+        *d3d11TextureOut = d3d11Texture.Detach();
 
-            HANDLE sharedHandle;
-            hr = dxgiResource->CreateSharedHandle(
-                nullptr, DXGI_SHARED_RESOURCE_READ | DXGI_SHARED_RESOURCE_WRITE, nullptr,
-                &sharedHandle);
-            ASSERT_EQ(hr, S_OK);
-
-            dawn::native::d3d12::ExternalImageDescriptorDXGISharedHandle externalImageDesc;
-            externalImageDesc.cTextureDescriptor =
-                reinterpret_cast<const WGPUTextureDescriptor*>(dawnDesc);
-            externalImageDesc.sharedHandle = sharedHandle;
-
-            std::unique_ptr<dawn::native::d3d12::ExternalImageDXGI> externalImage =
-                dawn::native::d3d12::ExternalImageDXGI::Create(device.Get(), &externalImageDesc);
-
-            // Now that we've created all of our resources, we can close the handle
-            // since we no longer need it.
-            ::CloseHandle(sharedHandle);
-
-            // Cannot access a non-existent external image (ex. validation error).
-            if (externalImage == nullptr) {
-                return;
-            }
-
-            dawn::native::d3d12::ExternalImageAccessDescriptorDXGIKeyedMutex externalAccessDesc;
-            externalAccessDesc.usage = static_cast<WGPUTextureUsageFlags>(dawnDesc->usage);
-
-            *dawnTexture = wgpu::Texture::Acquire(
-                externalImage->ProduceTexture(device.Get(), &externalAccessDesc));
-            *d3d11TextureOut = d3d11Texture.Detach();
-
-            if (externalImageOut != nullptr) {
-                *externalImageOut = std::move(externalImage);
-            }
+        if (externalImageOut != nullptr) {
+            *externalImageOut = std::move(externalImage);
         }
+    }
 
-        static constexpr size_t kTestWidth = 10;
-        static constexpr size_t kTestHeight = 10;
+    static constexpr size_t kTestWidth = 10;
+    static constexpr size_t kTestHeight = 10;
 
-        ComPtr<ID3D11Device> mD3d11Device;
-        ComPtr<ID3D11DeviceContext> mD3d11DeviceContext;
+    ComPtr<ID3D11Device> mD3d11Device;
+    ComPtr<ID3D11DeviceContext> mD3d11DeviceContext;
 
-        D3D11_TEXTURE2D_DESC baseD3dDescriptor;
-        wgpu::TextureDescriptor baseDawnDescriptor;
-    };
+    D3D11_TEXTURE2D_DESC baseD3dDescriptor;
+    wgpu::TextureDescriptor baseDawnDescriptor;
+};
 
 }  // anonymous namespace
 
diff --git a/src/dawn/tests/end2end/DepthStencilCopyTests.cpp b/src/dawn/tests/end2end/DepthStencilCopyTests.cpp
index 9dd62bd..aef6c3c 100644
--- a/src/dawn/tests/end2end/DepthStencilCopyTests.cpp
+++ b/src/dawn/tests/end2end/DepthStencilCopyTests.cpp
@@ -16,27 +16,27 @@
 #include <string>
 #include <vector>
 
-#include "dawn/tests/DawnTest.h"
 #include "dawn/common/Constants.h"
 #include "dawn/common/Math.h"
+#include "dawn/tests/DawnTest.h"
 #include "dawn/utils/ComboRenderPipelineDescriptor.h"
 #include "dawn/utils/TestUtils.h"
 #include "dawn/utils/TextureUtils.h"
 #include "dawn/utils/WGPUHelpers.h"
 
 namespace {
-    using TextureFormat = wgpu::TextureFormat;
-    DAWN_TEST_PARAM_STRUCT(DepthStencilCopyTestParams, TextureFormat);
+using TextureFormat = wgpu::TextureFormat;
+DAWN_TEST_PARAM_STRUCT(DepthStencilCopyTestParams, TextureFormat);
 
-    constexpr std::array<wgpu::TextureFormat, 3> kValidDepthCopyTextureFormats = {
-        wgpu::TextureFormat::Depth16Unorm,
-        wgpu::TextureFormat::Depth32Float,
-        wgpu::TextureFormat::Depth32FloatStencil8,
-    };
+constexpr std::array<wgpu::TextureFormat, 3> kValidDepthCopyTextureFormats = {
+    wgpu::TextureFormat::Depth16Unorm,
+    wgpu::TextureFormat::Depth32Float,
+    wgpu::TextureFormat::Depth32FloatStencil8,
+};
 
-    constexpr std::array<wgpu::TextureFormat, 1> kValidDepthCopyFromBufferFormats = {
-        wgpu::TextureFormat::Depth16Unorm,
-    };
+constexpr std::array<wgpu::TextureFormat, 1> kValidDepthCopyFromBufferFormats = {
+    wgpu::TextureFormat::Depth16Unorm,
+};
 }  // namespace
 
 class DepthStencilCopyTests : public DawnTestWithParams<DepthStencilCopyTestParams> {
diff --git a/src/dawn/tests/end2end/DepthStencilLoadOpTests.cpp b/src/dawn/tests/end2end/DepthStencilLoadOpTests.cpp
index e60102e..9f27263 100644
--- a/src/dawn/tests/end2end/DepthStencilLoadOpTests.cpp
+++ b/src/dawn/tests/end2end/DepthStencilLoadOpTests.cpp
@@ -21,165 +21,164 @@
 
 namespace {
 
-    using Format = wgpu::TextureFormat;
-    enum class Check {
-        CopyStencil,
-        StencilTest,
-        CopyDepth,
-        DepthTest,
-        SampleDepth,
-    };
+using Format = wgpu::TextureFormat;
+enum class Check {
+    CopyStencil,
+    StencilTest,
+    CopyDepth,
+    DepthTest,
+    SampleDepth,
+};
 
-    std::ostream& operator<<(std::ostream& o, Check check) {
-        switch (check) {
-            case Check::CopyStencil:
-                o << "CopyStencil";
-                break;
-            case Check::StencilTest:
-                o << "StencilTest";
-                break;
-            case Check::CopyDepth:
-                o << "CopyDepth";
-                break;
-            case Check::DepthTest:
-                o << "DepthTest";
-                break;
-            case Check::SampleDepth:
-                o << "SampleDepth";
-                break;
+std::ostream& operator<<(std::ostream& o, Check check) {
+    switch (check) {
+        case Check::CopyStencil:
+            o << "CopyStencil";
+            break;
+        case Check::StencilTest:
+            o << "StencilTest";
+            break;
+        case Check::CopyDepth:
+            o << "CopyDepth";
+            break;
+        case Check::DepthTest:
+            o << "DepthTest";
+            break;
+        case Check::SampleDepth:
+            o << "SampleDepth";
+            break;
+    }
+    return o;
+}
+
+DAWN_TEST_PARAM_STRUCT(DepthStencilLoadOpTestParams, Format, Check);
+
+constexpr static uint32_t kRTSize = 16;
+constexpr uint32_t kMipLevelCount = 2u;
+constexpr std::array<float, kMipLevelCount> kDepthValues = {0.125f, 0.875f};
+constexpr std::array<uint16_t, kMipLevelCount> kU16DepthValues = {8192u, 57343u};
+constexpr std::array<uint8_t, kMipLevelCount> kStencilValues = {7u, 3u};
+
+class DepthStencilLoadOpTests : public DawnTestWithParams<DepthStencilLoadOpTestParams> {
+  protected:
+    void SetUp() override {
+        DawnTestWithParams<DepthStencilLoadOpTestParams>::SetUp();
+
+        DAWN_TEST_UNSUPPORTED_IF(!mIsFormatSupported);
+
+        // Readback of Depth/Stencil textures not fully supported on GL right now.
+        // Also depends on glTextureView which is not supported on ES.
+        DAWN_SUPPRESS_TEST_IF(IsOpenGL() || IsOpenGLES());
+
+        wgpu::TextureDescriptor descriptor;
+        descriptor.size = {kRTSize, kRTSize};
+        descriptor.format = GetParam().mFormat;
+        descriptor.mipLevelCount = kMipLevelCount;
+        descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc |
+                           wgpu::TextureUsage::TextureBinding;
+
+        texture = device.CreateTexture(&descriptor);
+
+        wgpu::TextureViewDescriptor textureViewDesc = {};
+        textureViewDesc.mipLevelCount = 1;
+
+        for (uint32_t mipLevel = 0; mipLevel < kMipLevelCount; ++mipLevel) {
+            textureViewDesc.baseMipLevel = mipLevel;
+            textureViews[mipLevel] = texture.CreateView(&textureViewDesc);
+
+            utils::ComboRenderPassDescriptor renderPassDescriptor({}, textureViews[mipLevel]);
+            renderPassDescriptor.UnsetDepthStencilLoadStoreOpsForFormat(GetParam().mFormat);
+            renderPassDescriptor.cDepthStencilAttachmentInfo.depthClearValue =
+                kDepthValues[mipLevel];
+            renderPassDescriptor.cDepthStencilAttachmentInfo.stencilClearValue =
+                kStencilValues[mipLevel];
+            renderPassDescriptors.push_back(renderPassDescriptor);
         }
-        return o;
     }
 
-    DAWN_TEST_PARAM_STRUCT(DepthStencilLoadOpTestParams, Format, Check);
-
-    constexpr static uint32_t kRTSize = 16;
-    constexpr uint32_t kMipLevelCount = 2u;
-    constexpr std::array<float, kMipLevelCount> kDepthValues = {0.125f, 0.875f};
-    constexpr std::array<uint16_t, kMipLevelCount> kU16DepthValues = {8192u, 57343u};
-    constexpr std::array<uint8_t, kMipLevelCount> kStencilValues = {7u, 3u};
-
-    class DepthStencilLoadOpTests : public DawnTestWithParams<DepthStencilLoadOpTestParams> {
-      protected:
-        void SetUp() override {
-            DawnTestWithParams<DepthStencilLoadOpTestParams>::SetUp();
-
-            DAWN_TEST_UNSUPPORTED_IF(!mIsFormatSupported);
-
-            // Readback of Depth/Stencil textures not fully supported on GL right now.
-            // Also depends on glTextureView which is not supported on ES.
-            DAWN_SUPPRESS_TEST_IF(IsOpenGL() || IsOpenGLES());
-
-            wgpu::TextureDescriptor descriptor;
-            descriptor.size = {kRTSize, kRTSize};
-            descriptor.format = GetParam().mFormat;
-            descriptor.mipLevelCount = kMipLevelCount;
-            descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc |
-                               wgpu::TextureUsage::TextureBinding;
-
-            texture = device.CreateTexture(&descriptor);
-
-            wgpu::TextureViewDescriptor textureViewDesc = {};
-            textureViewDesc.mipLevelCount = 1;
-
-            for (uint32_t mipLevel = 0; mipLevel < kMipLevelCount; ++mipLevel) {
-                textureViewDesc.baseMipLevel = mipLevel;
-                textureViews[mipLevel] = texture.CreateView(&textureViewDesc);
-
-                utils::ComboRenderPassDescriptor renderPassDescriptor({}, textureViews[mipLevel]);
-                renderPassDescriptor.UnsetDepthStencilLoadStoreOpsForFormat(GetParam().mFormat);
-                renderPassDescriptor.cDepthStencilAttachmentInfo.depthClearValue =
-                    kDepthValues[mipLevel];
-                renderPassDescriptor.cDepthStencilAttachmentInfo.stencilClearValue =
-                    kStencilValues[mipLevel];
-                renderPassDescriptors.push_back(renderPassDescriptor);
-            }
-        }
-
-        std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
-            switch (GetParam().mFormat) {
-                case wgpu::TextureFormat::Depth24UnormStencil8:
-                    if (SupportsFeatures({wgpu::FeatureName::Depth24UnormStencil8})) {
-                        mIsFormatSupported = true;
-                        return {wgpu::FeatureName::Depth24UnormStencil8};
-                    }
-                    return {};
-                case wgpu::TextureFormat::Depth32FloatStencil8:
-                    if (SupportsFeatures({wgpu::FeatureName::Depth32FloatStencil8})) {
-                        mIsFormatSupported = true;
-                        return {wgpu::FeatureName::Depth32FloatStencil8};
-                    }
-                    return {};
-                default:
+    std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+        switch (GetParam().mFormat) {
+            case wgpu::TextureFormat::Depth24UnormStencil8:
+                if (SupportsFeatures({wgpu::FeatureName::Depth24UnormStencil8})) {
                     mIsFormatSupported = true;
-                    return {};
+                    return {wgpu::FeatureName::Depth24UnormStencil8};
+                }
+                return {};
+            case wgpu::TextureFormat::Depth32FloatStencil8:
+                if (SupportsFeatures({wgpu::FeatureName::Depth32FloatStencil8})) {
+                    mIsFormatSupported = true;
+                    return {wgpu::FeatureName::Depth32FloatStencil8};
+                }
+                return {};
+            default:
+                mIsFormatSupported = true;
+                return {};
+        }
+    }
+
+    void CheckMipLevel(uint32_t mipLevel) {
+        uint32_t mipSize = std::max(kRTSize >> mipLevel, 1u);
+
+        switch (GetParam().mCheck) {
+            case Check::SampleDepth: {
+                std::vector<float> expectedDepth(mipSize * mipSize, kDepthValues[mipLevel]);
+                ExpectSampledDepthData(
+                    texture, mipSize, mipSize, 0, mipLevel,
+                    new detail::ExpectEq<float>(expectedDepth.data(), expectedDepth.size(), 0.0001))
+                    << "sample depth mip " << mipLevel;
+                break;
+            }
+
+            case Check::CopyDepth: {
+                if (GetParam().mFormat == wgpu::TextureFormat::Depth16Unorm) {
+                    std::vector<uint16_t> expectedDepth(mipSize * mipSize,
+                                                        kU16DepthValues[mipLevel]);
+                    EXPECT_TEXTURE_EQ(expectedDepth.data(), texture, {0, 0}, {mipSize, mipSize},
+                                      mipLevel, wgpu::TextureAspect::DepthOnly)
+                        << "copy depth mip " << mipLevel;
+                } else {
+                    std::vector<float> expectedDepth(mipSize * mipSize, kDepthValues[mipLevel]);
+                    EXPECT_TEXTURE_EQ(expectedDepth.data(), texture, {0, 0}, {mipSize, mipSize},
+                                      mipLevel, wgpu::TextureAspect::DepthOnly)
+                        << "copy depth mip " << mipLevel;
+                }
+
+                break;
+            }
+
+            case Check::CopyStencil: {
+                std::vector<uint8_t> expectedStencil(mipSize * mipSize, kStencilValues[mipLevel]);
+                EXPECT_TEXTURE_EQ(expectedStencil.data(), texture, {0, 0}, {mipSize, mipSize},
+                                  mipLevel, wgpu::TextureAspect::StencilOnly)
+                    << "copy stencil mip " << mipLevel;
+                break;
+            }
+
+            case Check::DepthTest: {
+                std::vector<float> expectedDepth(mipSize * mipSize, kDepthValues[mipLevel]);
+                ExpectAttachmentDepthTestData(texture, GetParam().mFormat, mipSize, mipSize, 0,
+                                              mipLevel, expectedDepth)
+                    << "depth test mip " << mipLevel;
+                break;
+            }
+
+            case Check::StencilTest: {
+                ExpectAttachmentStencilTestData(texture, GetParam().mFormat, mipSize, mipSize, 0,
+                                                mipLevel, kStencilValues[mipLevel])
+                    << "stencil test mip " << mipLevel;
+                break;
             }
         }
+    }
 
-        void CheckMipLevel(uint32_t mipLevel) {
-            uint32_t mipSize = std::max(kRTSize >> mipLevel, 1u);
+    wgpu::Texture texture;
+    std::array<wgpu::TextureView, kMipLevelCount> textureViews;
+    // Vector instead of array because there is no default constructor.
+    std::vector<utils::ComboRenderPassDescriptor> renderPassDescriptors;
 
-            switch (GetParam().mCheck) {
-                case Check::SampleDepth: {
-                    std::vector<float> expectedDepth(mipSize * mipSize, kDepthValues[mipLevel]);
-                    ExpectSampledDepthData(texture, mipSize, mipSize, 0, mipLevel,
-                                           new detail::ExpectEq<float>(
-                                               expectedDepth.data(), expectedDepth.size(), 0.0001))
-                        << "sample depth mip " << mipLevel;
-                    break;
-                }
-
-                case Check::CopyDepth: {
-                    if (GetParam().mFormat == wgpu::TextureFormat::Depth16Unorm) {
-                        std::vector<uint16_t> expectedDepth(mipSize * mipSize,
-                                                            kU16DepthValues[mipLevel]);
-                        EXPECT_TEXTURE_EQ(expectedDepth.data(), texture, {0, 0}, {mipSize, mipSize},
-                                          mipLevel, wgpu::TextureAspect::DepthOnly)
-                            << "copy depth mip " << mipLevel;
-                    } else {
-                        std::vector<float> expectedDepth(mipSize * mipSize, kDepthValues[mipLevel]);
-                        EXPECT_TEXTURE_EQ(expectedDepth.data(), texture, {0, 0}, {mipSize, mipSize},
-                                          mipLevel, wgpu::TextureAspect::DepthOnly)
-                            << "copy depth mip " << mipLevel;
-                    }
-
-                    break;
-                }
-
-                case Check::CopyStencil: {
-                    std::vector<uint8_t> expectedStencil(mipSize * mipSize,
-                                                         kStencilValues[mipLevel]);
-                    EXPECT_TEXTURE_EQ(expectedStencil.data(), texture, {0, 0}, {mipSize, mipSize},
-                                      mipLevel, wgpu::TextureAspect::StencilOnly)
-                        << "copy stencil mip " << mipLevel;
-                    break;
-                }
-
-                case Check::DepthTest: {
-                    std::vector<float> expectedDepth(mipSize * mipSize, kDepthValues[mipLevel]);
-                    ExpectAttachmentDepthTestData(texture, GetParam().mFormat, mipSize, mipSize, 0,
-                                                  mipLevel, expectedDepth)
-                        << "depth test mip " << mipLevel;
-                    break;
-                }
-
-                case Check::StencilTest: {
-                    ExpectAttachmentStencilTestData(texture, GetParam().mFormat, mipSize, mipSize,
-                                                    0, mipLevel, kStencilValues[mipLevel])
-                        << "stencil test mip " << mipLevel;
-                    break;
-                }
-            }
-        }
-
-        wgpu::Texture texture;
-        std::array<wgpu::TextureView, kMipLevelCount> textureViews;
-        // Vector instead of array because there is no default constructor.
-        std::vector<utils::ComboRenderPassDescriptor> renderPassDescriptors;
-
-      private:
-        bool mIsFormatSupported = false;
-    };
+  private:
+    bool mIsFormatSupported = false;
+};
 
 }  // anonymous namespace
 
@@ -258,31 +257,31 @@
 
 namespace {
 
-    auto GenerateParams() {
-        auto params1 = MakeParamGenerator<DepthStencilLoadOpTestParams>(
-            {D3D12Backend(), D3D12Backend({}, {"use_d3d12_render_pass"}), MetalBackend(),
-             OpenGLBackend(), OpenGLESBackend(), VulkanBackend()},
-            {wgpu::TextureFormat::Depth32Float, wgpu::TextureFormat::Depth16Unorm},
-            {Check::CopyDepth, Check::DepthTest, Check::SampleDepth});
+auto GenerateParams() {
+    auto params1 = MakeParamGenerator<DepthStencilLoadOpTestParams>(
+        {D3D12Backend(), D3D12Backend({}, {"use_d3d12_render_pass"}), MetalBackend(),
+         OpenGLBackend(), OpenGLESBackend(), VulkanBackend()},
+        {wgpu::TextureFormat::Depth32Float, wgpu::TextureFormat::Depth16Unorm},
+        {Check::CopyDepth, Check::DepthTest, Check::SampleDepth});
 
-        auto params2 = MakeParamGenerator<DepthStencilLoadOpTestParams>(
-            {D3D12Backend(), D3D12Backend({}, {"use_d3d12_render_pass"}), MetalBackend(),
-             OpenGLBackend(), OpenGLESBackend(), VulkanBackend()},
-            {wgpu::TextureFormat::Depth24PlusStencil8, wgpu::TextureFormat::Depth24UnormStencil8,
-             wgpu::TextureFormat::Depth32FloatStencil8},
-            {Check::CopyStencil, Check::StencilTest, Check::DepthTest, Check::SampleDepth});
+    auto params2 = MakeParamGenerator<DepthStencilLoadOpTestParams>(
+        {D3D12Backend(), D3D12Backend({}, {"use_d3d12_render_pass"}), MetalBackend(),
+         OpenGLBackend(), OpenGLESBackend(), VulkanBackend()},
+        {wgpu::TextureFormat::Depth24PlusStencil8, wgpu::TextureFormat::Depth24UnormStencil8,
+         wgpu::TextureFormat::Depth32FloatStencil8},
+        {Check::CopyStencil, Check::StencilTest, Check::DepthTest, Check::SampleDepth});
 
-        std::vector<DepthStencilLoadOpTestParams> allParams;
-        allParams.insert(allParams.end(), params1.begin(), params1.end());
-        allParams.insert(allParams.end(), params2.begin(), params2.end());
+    std::vector<DepthStencilLoadOpTestParams> allParams;
+    allParams.insert(allParams.end(), params1.begin(), params1.end());
+    allParams.insert(allParams.end(), params2.begin(), params2.end());
 
-        return allParams;
-    }
+    return allParams;
+}
 
-    INSTANTIATE_TEST_SUITE_P(,
-                             DepthStencilLoadOpTests,
-                             ::testing::ValuesIn(GenerateParams()),
-                             DawnTestBase::PrintToStringParamName("DepthStencilLoadOpTests"));
-    GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(DepthStencilLoadOpTests);
+INSTANTIATE_TEST_SUITE_P(,
+                         DepthStencilLoadOpTests,
+                         ::testing::ValuesIn(GenerateParams()),
+                         DawnTestBase::PrintToStringParamName("DepthStencilLoadOpTests"));
+GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(DepthStencilLoadOpTests);
 
 }  // namespace
diff --git a/src/dawn/tests/end2end/DepthStencilSamplingTests.cpp b/src/dawn/tests/end2end/DepthStencilSamplingTests.cpp
index 4da042d..574573c 100644
--- a/src/dawn/tests/end2end/DepthStencilSamplingTests.cpp
+++ b/src/dawn/tests/end2end/DepthStencilSamplingTests.cpp
@@ -21,24 +21,24 @@
 #include "dawn/utils/WGPUHelpers.h"
 
 namespace {
-    using TextureFormat = wgpu::TextureFormat;
-    DAWN_TEST_PARAM_STRUCT(DepthStencilSamplingTestParams, TextureFormat);
+using TextureFormat = wgpu::TextureFormat;
+DAWN_TEST_PARAM_STRUCT(DepthStencilSamplingTestParams, TextureFormat);
 
-    constexpr wgpu::CompareFunction kCompareFunctions[] = {
-        wgpu::CompareFunction::Never,        wgpu::CompareFunction::Less,
-        wgpu::CompareFunction::LessEqual,    wgpu::CompareFunction::Greater,
-        wgpu::CompareFunction::GreaterEqual, wgpu::CompareFunction::Equal,
-        wgpu::CompareFunction::NotEqual,     wgpu::CompareFunction::Always,
-    };
+constexpr wgpu::CompareFunction kCompareFunctions[] = {
+    wgpu::CompareFunction::Never,        wgpu::CompareFunction::Less,
+    wgpu::CompareFunction::LessEqual,    wgpu::CompareFunction::Greater,
+    wgpu::CompareFunction::GreaterEqual, wgpu::CompareFunction::Equal,
+    wgpu::CompareFunction::NotEqual,     wgpu::CompareFunction::Always,
+};
 
-    // Test a "normal" ref value between 0 and 1; as well as negative and > 1 refs.
-    constexpr float kCompareRefs[] = {-0.1, 0.4, 1.2};
+// Test a "normal" ref value between 0 and 1; as well as negative and > 1 refs.
+constexpr float kCompareRefs[] = {-0.1, 0.4, 1.2};
 
-    // Test 0, below the ref, equal to, above the ref, and 1.
-    const std::vector<float> kNormalizedTextureValues = {0.0, 0.3, 0.4, 0.5, 1.0};
+// Test 0, below the ref, equal to, above the ref, and 1.
+const std::vector<float> kNormalizedTextureValues = {0.0, 0.3, 0.4, 0.5, 1.0};
 
-    // Test the limits, and some values in between.
-    const std::vector<uint32_t> kStencilValues = {0, 1, 38, 255};
+// Test the limits, and some values in between.
+const std::vector<uint32_t> kStencilValues = {0, 1, 38, 255};
 
 }  // anonymous namespace
 
@@ -426,8 +426,7 @@
         using StencilData = std::array<uint32_t, 4>;
 
       public:
-        explicit ExtraStencilComponentsExpectation(uint32_t expected) : mExpected(expected) {
-        }
+        explicit ExtraStencilComponentsExpectation(uint32_t expected) : mExpected(expected) {}
 
         ~ExtraStencilComponentsExpectation() override = default;
 
diff --git a/src/dawn/tests/end2end/DeviceInitializationTests.cpp b/src/dawn/tests/end2end/DeviceInitializationTests.cpp
index 0bd35bb..0c7e621 100644
--- a/src/dawn/tests/end2end/DeviceInitializationTests.cpp
+++ b/src/dawn/tests/end2end/DeviceInitializationTests.cpp
@@ -21,13 +21,9 @@
 #include "dawn/utils/WGPUHelpers.h"
 
 class DeviceInitializationTest : public testing::Test {
-    void SetUp() override {
-        dawnProcSetProcs(&dawn::native::GetProcs());
-    }
+    void SetUp() override { dawnProcSetProcs(&dawn::native::GetProcs()); }
 
-    void TearDown() override {
-        dawnProcSetProcs(nullptr);
-    }
+    void TearDown() override { dawnProcSetProcs(nullptr); }
 };
 
 // Test that device operations are still valid if the reference to the instance
diff --git a/src/dawn/tests/end2end/DeviceLostTests.cpp b/src/dawn/tests/end2end/DeviceLostTests.cpp
index 7dfff52..2ac47f6 100644
--- a/src/dawn/tests/end2end/DeviceLostTests.cpp
+++ b/src/dawn/tests/end2end/DeviceLostTests.cpp
@@ -16,11 +16,11 @@
 #include <memory>
 #include <string>
 
-#include "gmock/gmock.h"
 #include "dawn/tests/DawnTest.h"
 #include "dawn/tests/MockCallback.h"
 #include "dawn/utils/ComboRenderPipelineDescriptor.h"
 #include "dawn/utils/WGPUHelpers.h"
+#include "gmock/gmock.h"
 
 using testing::_;
 using testing::Exactly;
diff --git a/src/dawn/tests/end2end/DynamicBufferOffsetTests.cpp b/src/dawn/tests/end2end/DynamicBufferOffsetTests.cpp
index 4e2c444..23dee82 100644
--- a/src/dawn/tests/end2end/DynamicBufferOffsetTests.cpp
+++ b/src/dawn/tests/end2end/DynamicBufferOffsetTests.cpp
@@ -17,8 +17,8 @@
 #include <string>
 #include <vector>
 
-#include "dawn/tests/DawnTest.h"
 #include "dawn/common/Math.h"
+#include "dawn/tests/DawnTest.h"
 #include "dawn/utils/ComboRenderPipelineDescriptor.h"
 #include "dawn/utils/WGPUHelpers.h"
 
@@ -404,11 +404,11 @@
 }
 
 namespace {
-    using ReadBufferUsage = wgpu::BufferUsage;
-    using OOBRead = bool;
-    using OOBWrite = bool;
+using ReadBufferUsage = wgpu::BufferUsage;
+using OOBRead = bool;
+using OOBWrite = bool;
 
-    DAWN_TEST_PARAM_STRUCT(ClampedOOBDynamicBufferOffsetParams, ReadBufferUsage, OOBRead, OOBWrite);
+DAWN_TEST_PARAM_STRUCT(ClampedOOBDynamicBufferOffsetParams, ReadBufferUsage, OOBRead, OOBWrite);
 }  // anonymous namespace
 
 class ClampedOOBDynamicBufferOffsetTests
diff --git a/src/dawn/tests/end2end/ExternalTextureTests.cpp b/src/dawn/tests/end2end/ExternalTextureTests.cpp
index afe28f8..4215c77 100644
--- a/src/dawn/tests/end2end/ExternalTextureTests.cpp
+++ b/src/dawn/tests/end2end/ExternalTextureTests.cpp
@@ -18,30 +18,30 @@
 
 namespace {
 
-    wgpu::Texture Create2DTexture(wgpu::Device device,
-                                  uint32_t width,
-                                  uint32_t height,
-                                  wgpu::TextureFormat format,
-                                  wgpu::TextureUsage usage) {
-        wgpu::TextureDescriptor descriptor;
-        descriptor.dimension = wgpu::TextureDimension::e2D;
-        descriptor.size.width = width;
-        descriptor.size.height = height;
-        descriptor.size.depthOrArrayLayers = 1;
-        descriptor.sampleCount = 1;
-        descriptor.format = format;
-        descriptor.mipLevelCount = 1;
-        descriptor.usage = usage;
-        return device.CreateTexture(&descriptor);
-    }
+wgpu::Texture Create2DTexture(wgpu::Device device,
+                              uint32_t width,
+                              uint32_t height,
+                              wgpu::TextureFormat format,
+                              wgpu::TextureUsage usage) {
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.size.width = width;
+    descriptor.size.height = height;
+    descriptor.size.depthOrArrayLayers = 1;
+    descriptor.sampleCount = 1;
+    descriptor.format = format;
+    descriptor.mipLevelCount = 1;
+    descriptor.usage = usage;
+    return device.CreateTexture(&descriptor);
+}
 
-    class ExternalTextureTests : public DawnTest {
-      protected:
-        static constexpr uint32_t kWidth = 4;
-        static constexpr uint32_t kHeight = 4;
-        static constexpr wgpu::TextureFormat kFormat = wgpu::TextureFormat::RGBA8Unorm;
-        static constexpr wgpu::TextureUsage kSampledUsage = wgpu::TextureUsage::TextureBinding;
-    };
+class ExternalTextureTests : public DawnTest {
+  protected:
+    static constexpr uint32_t kWidth = 4;
+    static constexpr uint32_t kHeight = 4;
+    static constexpr wgpu::TextureFormat kFormat = wgpu::TextureFormat::RGBA8Unorm;
+    static constexpr wgpu::TextureUsage kSampledUsage = wgpu::TextureUsage::TextureBinding;
+};
 }  // anonymous namespace
 
 TEST_P(ExternalTextureTests, CreateExternalTextureSuccess) {
diff --git a/src/dawn/tests/end2end/FirstIndexOffsetTests.cpp b/src/dawn/tests/end2end/FirstIndexOffsetTests.cpp
index 9dc9e19..768db35 100644
--- a/src/dawn/tests/end2end/FirstIndexOffsetTests.cpp
+++ b/src/dawn/tests/end2end/FirstIndexOffsetTests.cpp
@@ -40,10 +40,10 @@
 }
 
 namespace dawn {
-    template <>
-    struct IsDawnBitmask<CheckIndex> {
-        static constexpr bool enable = true;
-    };
+template <>
+struct IsDawnBitmask<CheckIndex> {
+    static constexpr bool enable = true;
+};
 }  // namespace dawn
 
 class FirstIndexOffsetTests : public DawnTest {
diff --git a/src/dawn/tests/end2end/IOSurfaceWrappingTests.cpp b/src/dawn/tests/end2end/IOSurfaceWrappingTests.cpp
index 0fd8c5b..79f862c 100644
--- a/src/dawn/tests/end2end/IOSurfaceWrappingTests.cpp
+++ b/src/dawn/tests/end2end/IOSurfaceWrappingTests.cpp
@@ -24,88 +24,82 @@
 
 namespace {
 
-    void AddIntegerValue(CFMutableDictionaryRef dictionary, const CFStringRef key, int32_t value) {
-        CFNumberRef number = CFNumberCreate(nullptr, kCFNumberSInt32Type, &value);
-        CFDictionaryAddValue(dictionary, key, number);
-        CFRelease(number);
+void AddIntegerValue(CFMutableDictionaryRef dictionary, const CFStringRef key, int32_t value) {
+    CFNumberRef number = CFNumberCreate(nullptr, kCFNumberSInt32Type, &value);
+    CFDictionaryAddValue(dictionary, key, number);
+    CFRelease(number);
+}
+
+class ScopedIOSurfaceRef {
+  public:
+    ScopedIOSurfaceRef() : mSurface(nullptr) {}
+    explicit ScopedIOSurfaceRef(IOSurfaceRef surface) : mSurface(surface) {}
+
+    ~ScopedIOSurfaceRef() {
+        if (mSurface != nullptr) {
+            CFRelease(mSurface);
+            mSurface = nullptr;
+        }
     }
 
-    class ScopedIOSurfaceRef {
-      public:
-        ScopedIOSurfaceRef() : mSurface(nullptr) {
+    IOSurfaceRef get() const { return mSurface; }
+
+    ScopedIOSurfaceRef(ScopedIOSurfaceRef&& other) {
+        if (mSurface != nullptr) {
+            CFRelease(mSurface);
         }
-        explicit ScopedIOSurfaceRef(IOSurfaceRef surface) : mSurface(surface) {
-        }
-
-        ~ScopedIOSurfaceRef() {
-            if (mSurface != nullptr) {
-                CFRelease(mSurface);
-                mSurface = nullptr;
-            }
-        }
-
-        IOSurfaceRef get() const {
-            return mSurface;
-        }
-
-        ScopedIOSurfaceRef(ScopedIOSurfaceRef&& other) {
-            if (mSurface != nullptr) {
-                CFRelease(mSurface);
-            }
-            mSurface = other.mSurface;
-            other.mSurface = nullptr;
-        }
-
-        ScopedIOSurfaceRef& operator=(ScopedIOSurfaceRef&& other) {
-            if (mSurface != nullptr) {
-                CFRelease(mSurface);
-            }
-            mSurface = other.mSurface;
-            other.mSurface = nullptr;
-
-            return *this;
-        }
-
-        ScopedIOSurfaceRef(const ScopedIOSurfaceRef&) = delete;
-        ScopedIOSurfaceRef& operator=(const ScopedIOSurfaceRef&) = delete;
-
-      private:
-        IOSurfaceRef mSurface = nullptr;
-    };
-
-    ScopedIOSurfaceRef CreateSinglePlaneIOSurface(uint32_t width,
-                                                  uint32_t height,
-                                                  uint32_t format,
-                                                  uint32_t bytesPerElement) {
-        CFMutableDictionaryRef dict =
-            CFDictionaryCreateMutable(kCFAllocatorDefault, 0, &kCFTypeDictionaryKeyCallBacks,
-                                      &kCFTypeDictionaryValueCallBacks);
-        AddIntegerValue(dict, kIOSurfaceWidth, width);
-        AddIntegerValue(dict, kIOSurfaceHeight, height);
-        AddIntegerValue(dict, kIOSurfacePixelFormat, format);
-        AddIntegerValue(dict, kIOSurfaceBytesPerElement, bytesPerElement);
-
-        IOSurfaceRef ioSurface = IOSurfaceCreate(dict);
-        EXPECT_NE(nullptr, ioSurface);
-        CFRelease(dict);
-
-        return ScopedIOSurfaceRef(ioSurface);
+        mSurface = other.mSurface;
+        other.mSurface = nullptr;
     }
 
-    class IOSurfaceTestBase : public DawnTest {
-      public:
-        wgpu::Texture WrapIOSurface(const wgpu::TextureDescriptor* descriptor,
-                                    IOSurfaceRef ioSurface,
-                                    bool isInitialized = true) {
-            dawn::native::metal::ExternalImageDescriptorIOSurface externDesc;
-            externDesc.cTextureDescriptor =
-                reinterpret_cast<const WGPUTextureDescriptor*>(descriptor);
-            externDesc.ioSurface = ioSurface;
-            externDesc.isInitialized = isInitialized;
-            WGPUTexture texture = dawn::native::metal::WrapIOSurface(device.Get(), &externDesc);
-            return wgpu::Texture::Acquire(texture);
+    ScopedIOSurfaceRef& operator=(ScopedIOSurfaceRef&& other) {
+        if (mSurface != nullptr) {
+            CFRelease(mSurface);
         }
-    };
+        mSurface = other.mSurface;
+        other.mSurface = nullptr;
+
+        return *this;
+    }
+
+    ScopedIOSurfaceRef(const ScopedIOSurfaceRef&) = delete;
+    ScopedIOSurfaceRef& operator=(const ScopedIOSurfaceRef&) = delete;
+
+  private:
+    IOSurfaceRef mSurface = nullptr;
+};
+
+ScopedIOSurfaceRef CreateSinglePlaneIOSurface(uint32_t width,
+                                              uint32_t height,
+                                              uint32_t format,
+                                              uint32_t bytesPerElement) {
+    CFMutableDictionaryRef dict = CFDictionaryCreateMutable(
+        kCFAllocatorDefault, 0, &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks);
+    AddIntegerValue(dict, kIOSurfaceWidth, width);
+    AddIntegerValue(dict, kIOSurfaceHeight, height);
+    AddIntegerValue(dict, kIOSurfacePixelFormat, format);
+    AddIntegerValue(dict, kIOSurfaceBytesPerElement, bytesPerElement);
+
+    IOSurfaceRef ioSurface = IOSurfaceCreate(dict);
+    EXPECT_NE(nullptr, ioSurface);
+    CFRelease(dict);
+
+    return ScopedIOSurfaceRef(ioSurface);
+}
+
+class IOSurfaceTestBase : public DawnTest {
+  public:
+    wgpu::Texture WrapIOSurface(const wgpu::TextureDescriptor* descriptor,
+                                IOSurfaceRef ioSurface,
+                                bool isInitialized = true) {
+        dawn::native::metal::ExternalImageDescriptorIOSurface externDesc;
+        externDesc.cTextureDescriptor = reinterpret_cast<const WGPUTextureDescriptor*>(descriptor);
+        externDesc.ioSurface = ioSurface;
+        externDesc.isInitialized = isInitialized;
+        WGPUTexture texture = dawn::native::metal::WrapIOSurface(device.Get(), &externDesc);
+        return wgpu::Texture::Acquire(texture);
+    }
+};
 
 }  // anonymous namespace
 
diff --git a/src/dawn/tests/end2end/MultisampledSamplingTests.cpp b/src/dawn/tests/end2end/MultisampledSamplingTests.cpp
index 8013f6c..9a8ca2a 100644
--- a/src/dawn/tests/end2end/MultisampledSamplingTests.cpp
+++ b/src/dawn/tests/end2end/MultisampledSamplingTests.cpp
@@ -20,18 +20,18 @@
 #include "dawn/utils/WGPUHelpers.h"
 
 namespace {
-    // https://github.com/gpuweb/gpuweb/issues/108
-    // Vulkan, Metal, and D3D11 have the same standard multisample pattern. D3D12 is the same as
-    // D3D11 but it was left out of the documentation.
-    // {0.375, 0.125}, {0.875, 0.375}, {0.125 0.625}, {0.625, 0.875}
-    // In this test, we store them in -1 to 1 space because it makes it
-    // simpler to upload vertex data. Y is flipped because there is a flip between clip space and
-    // rasterization space.
-    static constexpr std::array<std::array<float, 2>, 4> kSamplePositions = {
-        {{0.375 * 2 - 1, 1 - 0.125 * 2},
-         {0.875 * 2 - 1, 1 - 0.375 * 2},
-         {0.125 * 2 - 1, 1 - 0.625 * 2},
-         {0.625 * 2 - 1, 1 - 0.875 * 2}}};
+// https://github.com/gpuweb/gpuweb/issues/108
+// Vulkan, Metal, and D3D11 have the same standard multisample pattern. D3D12 is the same as
+// D3D11 but it was left out of the documentation.
+// {0.375, 0.125}, {0.875, 0.375}, {0.125 0.625}, {0.625, 0.875}
+// In this test, we store them in -1 to 1 space because it makes it
+// simpler to upload vertex data. Y is flipped because there is a flip between clip space and
+// rasterization space.
+static constexpr std::array<std::array<float, 2>, 4> kSamplePositions = {
+    {{0.375 * 2 - 1, 1 - 0.125 * 2},
+     {0.875 * 2 - 1, 1 - 0.375 * 2},
+     {0.125 * 2 - 1, 1 - 0.625 * 2},
+     {0.625 * 2 - 1, 1 - 0.875 * 2}}};
 }  // anonymous namespace
 
 class MultisampledSamplingTest : public DawnTest {
diff --git a/src/dawn/tests/end2end/NonzeroTextureCreationTests.cpp b/src/dawn/tests/end2end/NonzeroTextureCreationTests.cpp
index 2b98552..25ab718 100644
--- a/src/dawn/tests/end2end/NonzeroTextureCreationTests.cpp
+++ b/src/dawn/tests/end2end/NonzeroTextureCreationTests.cpp
@@ -24,245 +24,240 @@
 
 namespace {
 
-    using Format = wgpu::TextureFormat;
-    using Aspect = wgpu::TextureAspect;
-    using Usage = wgpu::TextureUsage;
-    using Dimension = wgpu::TextureDimension;
-    using DepthOrArrayLayers = uint32_t;
-    using MipCount = uint32_t;
-    using Mip = uint32_t;
-    using SampleCount = uint32_t;
+using Format = wgpu::TextureFormat;
+using Aspect = wgpu::TextureAspect;
+using Usage = wgpu::TextureUsage;
+using Dimension = wgpu::TextureDimension;
+using DepthOrArrayLayers = uint32_t;
+using MipCount = uint32_t;
+using Mip = uint32_t;
+using SampleCount = uint32_t;
 
-    DAWN_TEST_PARAM_STRUCT(Params,
-                           Format,
-                           Aspect,
-                           Usage,
-                           Dimension,
-                           DepthOrArrayLayers,
-                           MipCount,
-                           Mip,
-                           SampleCount);
+DAWN_TEST_PARAM_STRUCT(Params,
+                       Format,
+                       Aspect,
+                       Usage,
+                       Dimension,
+                       DepthOrArrayLayers,
+                       MipCount,
+                       Mip,
+                       SampleCount);
 
-    template <typename T>
-    class ExpectNonZero : public detail::CustomTextureExpectation {
-      public:
-        uint32_t DataSize() override {
-            return sizeof(T);
+template <typename T>
+class ExpectNonZero : public detail::CustomTextureExpectation {
+  public:
+    uint32_t DataSize() override { return sizeof(T); }
+
+    testing::AssertionResult Check(const void* data, size_t size) override {
+        ASSERT(size % DataSize() == 0 && size > 0);
+        const T* actual = static_cast<const T*>(data);
+        T value = *actual;
+        if (value == T(0)) {
+            return testing::AssertionFailure()
+                   << "Expected data to be non-zero, was " << value << std::endl;
         }
-
-        testing::AssertionResult Check(const void* data, size_t size) override {
-            ASSERT(size % DataSize() == 0 && size > 0);
-            const T* actual = static_cast<const T*>(data);
-            T value = *actual;
-            if (value == T(0)) {
+        for (size_t i = 0; i < size / DataSize(); ++i) {
+            if (actual[i] != value) {
                 return testing::AssertionFailure()
-                       << "Expected data to be non-zero, was " << value << std::endl;
+                       << "Expected data[" << i << "] to match non-zero value " << value
+                       << ", actual " << actual[i] << std::endl;
             }
-            for (size_t i = 0; i < size / DataSize(); ++i) {
-                if (actual[i] != value) {
-                    return testing::AssertionFailure()
-                           << "Expected data[" << i << "] to match non-zero value " << value
-                           << ", actual " << actual[i] << std::endl;
-                }
-            }
-
-            return testing::AssertionSuccess();
         }
-    };
+
+        return testing::AssertionSuccess();
+    }
+};
 
 #define EXPECT_TEXTURE_NONZERO(T, ...) \
     AddTextureExpectation(__FILE__, __LINE__, new ExpectNonZero<T>(), __VA_ARGS__)
 
-    class NonzeroTextureCreationTests : public DawnTestWithParams<Params> {
-      protected:
-        constexpr static uint32_t kSize = 128;
+class NonzeroTextureCreationTests : public DawnTestWithParams<Params> {
+  protected:
+    constexpr static uint32_t kSize = 128;
 
-        std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
-            if (GetParam().mFormat == wgpu::TextureFormat::BC1RGBAUnorm &&
-                SupportsFeatures({wgpu::FeatureName::TextureCompressionBC})) {
-                return {wgpu::FeatureName::TextureCompressionBC};
-            }
-            return {};
+    std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+        if (GetParam().mFormat == wgpu::TextureFormat::BC1RGBAUnorm &&
+            SupportsFeatures({wgpu::FeatureName::TextureCompressionBC})) {
+            return {wgpu::FeatureName::TextureCompressionBC};
         }
+        return {};
+    }
 
-        void Run() {
-            DAWN_TEST_UNSUPPORTED_IF(GetParam().mFormat == wgpu::TextureFormat::BC1RGBAUnorm &&
-                                     !SupportsFeatures({wgpu::FeatureName::TextureCompressionBC}));
+    void Run() {
+        DAWN_TEST_UNSUPPORTED_IF(GetParam().mFormat == wgpu::TextureFormat::BC1RGBAUnorm &&
+                                 !SupportsFeatures({wgpu::FeatureName::TextureCompressionBC}));
 
-            // TODO(crbug.com/dawn/667): Work around the fact that some platforms do not support
-            // reading from Snorm textures.
-            DAWN_TEST_UNSUPPORTED_IF(GetParam().mFormat == wgpu::TextureFormat::RGBA8Snorm &&
-                                     HasToggleEnabled("disable_snorm_read"));
+        // TODO(crbug.com/dawn/667): Work around the fact that some platforms do not support
+        // reading from Snorm textures.
+        DAWN_TEST_UNSUPPORTED_IF(GetParam().mFormat == wgpu::TextureFormat::RGBA8Snorm &&
+                                 HasToggleEnabled("disable_snorm_read"));
 
-            // TODO(crbug.com/dawn/791): Determine Intel specific platforms this occurs on, and
-            // implement a workaround on all backends (happens on Windows too, but not on our test
-            // machines).
-            DAWN_SUPPRESS_TEST_IF(
-                (GetParam().mFormat == wgpu::TextureFormat::Depth32Float ||
-                 GetParam().mFormat == wgpu::TextureFormat::Depth24PlusStencil8) &&
-                IsMetal() && IsIntel() && GetParam().mMip != 0);
+        // TODO(crbug.com/dawn/791): Determine Intel specific platforms this occurs on, and
+        // implement a workaround on all backends (happens on Windows too, but not on our test
+        // machines).
+        DAWN_SUPPRESS_TEST_IF((GetParam().mFormat == wgpu::TextureFormat::Depth32Float ||
+                               GetParam().mFormat == wgpu::TextureFormat::Depth24PlusStencil8) &&
+                              IsMetal() && IsIntel() && GetParam().mMip != 0);
 
-            // TODO(crbug.com/dawn/1071): Implement a workaround on Intel/Metal backends.
-            DAWN_SUPPRESS_TEST_IF((GetParam().mFormat == wgpu::TextureFormat::R8Unorm ||
-                                   GetParam().mFormat == wgpu::TextureFormat::RG8Unorm) &&
-                                  GetParam().mMipCount > 1 &&
-                                  HasToggleEnabled("disable_r8_rg8_mipmaps"));
+        // TODO(crbug.com/dawn/1071): Implement a workaround on Intel/Metal backends.
+        DAWN_SUPPRESS_TEST_IF((GetParam().mFormat == wgpu::TextureFormat::R8Unorm ||
+                               GetParam().mFormat == wgpu::TextureFormat::RG8Unorm) &&
+                              GetParam().mMipCount > 1 &&
+                              HasToggleEnabled("disable_r8_rg8_mipmaps"));
 
-            // TODO(crbug.com/dawn/667): ANGLE claims to support NV_read_stencil, but won't read
-            // correctly from a DEPTH32F_STENCIL8 texture.
-            DAWN_SUPPRESS_TEST_IF(GetParam().mFormat == wgpu::TextureFormat::Depth24PlusStencil8 &&
-                                  GetParam().mAspect == wgpu::TextureAspect::StencilOnly &&
-                                  IsANGLE());
+        // TODO(crbug.com/dawn/667): ANGLE claims to support NV_read_stencil, but won't read
+        // correctly from a DEPTH32F_STENCIL8 texture.
+        DAWN_SUPPRESS_TEST_IF(GetParam().mFormat == wgpu::TextureFormat::Depth24PlusStencil8 &&
+                              GetParam().mAspect == wgpu::TextureAspect::StencilOnly && IsANGLE());
 
-            // TODO(crbug.com/dawn/667): Work around the fact that some platforms do not support
-            // reading depth.
-            DAWN_TEST_UNSUPPORTED_IF(GetParam().mAspect == wgpu::TextureAspect::DepthOnly &&
-                                     HasToggleEnabled("disable_depth_read"));
+        // TODO(crbug.com/dawn/667): Work around the fact that some platforms do not support
+        // reading depth.
+        DAWN_TEST_UNSUPPORTED_IF(GetParam().mAspect == wgpu::TextureAspect::DepthOnly &&
+                                 HasToggleEnabled("disable_depth_read"));
 
-            // TODO(crbug.com/dawn/667): Work around the fact that some platforms do not support
-            // reading stencil.
-            DAWN_TEST_UNSUPPORTED_IF(GetParam().mAspect == wgpu::TextureAspect::StencilOnly &&
-                                     HasToggleEnabled("disable_stencil_read"));
+        // TODO(crbug.com/dawn/667): Work around the fact that some platforms do not support
+        // reading stencil.
+        DAWN_TEST_UNSUPPORTED_IF(GetParam().mAspect == wgpu::TextureAspect::StencilOnly &&
+                                 HasToggleEnabled("disable_stencil_read"));
 
-            // GL may support the feature, but reading data back is not implemented.
-            DAWN_TEST_UNSUPPORTED_IF(GetParam().mFormat == wgpu::TextureFormat::BC1RGBAUnorm &&
-                                     (IsOpenGL() || IsOpenGLES()));
+        // GL may support the feature, but reading data back is not implemented.
+        DAWN_TEST_UNSUPPORTED_IF(GetParam().mFormat == wgpu::TextureFormat::BC1RGBAUnorm &&
+                                 (IsOpenGL() || IsOpenGLES()));
 
-            wgpu::TextureDescriptor descriptor;
-            descriptor.dimension = GetParam().mDimension;
-            descriptor.size.width = kSize;
-            descriptor.size.height = kSize;
-            descriptor.size.depthOrArrayLayers = GetParam().mDepthOrArrayLayers;
-            descriptor.sampleCount = GetParam().mSampleCount;
-            descriptor.format = GetParam().mFormat;
-            descriptor.usage = GetParam().mUsage;
-            descriptor.mipLevelCount = GetParam().mMipCount;
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = GetParam().mDimension;
+        descriptor.size.width = kSize;
+        descriptor.size.height = kSize;
+        descriptor.size.depthOrArrayLayers = GetParam().mDepthOrArrayLayers;
+        descriptor.sampleCount = GetParam().mSampleCount;
+        descriptor.format = GetParam().mFormat;
+        descriptor.usage = GetParam().mUsage;
+        descriptor.mipLevelCount = GetParam().mMipCount;
 
-            wgpu::Texture texture = device.CreateTexture(&descriptor);
+        wgpu::Texture texture = device.CreateTexture(&descriptor);
 
-            uint32_t mip = GetParam().mMip;
-            uint32_t mipSize = std::max(kSize >> mip, 1u);
-            uint32_t depthOrArrayLayers = GetParam().mDimension == wgpu::TextureDimension::e3D
-                                              ? std::max(GetParam().mDepthOrArrayLayers >> mip, 1u)
-                                              : GetParam().mDepthOrArrayLayers;
-            switch (GetParam().mFormat) {
-                case wgpu::TextureFormat::R8Unorm: {
-                    if (GetParam().mSampleCount > 1) {
-                        ExpectMultisampledFloatData(texture, mipSize, mipSize, 1,
-                                                    GetParam().mSampleCount, 0, mip,
-                                                    new ExpectNonZero<float>());
-                    } else {
-                        EXPECT_TEXTURE_EQ(new ExpectNonZero<uint8_t>(), texture, {0, 0, 0},
-                                          {mipSize, mipSize, depthOrArrayLayers}, mip);
-                    }
-                    break;
-                }
-                case wgpu::TextureFormat::RG8Unorm: {
-                    if (GetParam().mSampleCount > 1) {
-                        ExpectMultisampledFloatData(texture, mipSize, mipSize, 2,
-                                                    GetParam().mSampleCount, 0, mip,
-                                                    new ExpectNonZero<float>());
-                    } else {
-                        EXPECT_TEXTURE_EQ(new ExpectNonZero<uint16_t>(), texture, {0, 0, 0},
-                                          {mipSize, mipSize, depthOrArrayLayers}, mip);
-                    }
-                    break;
-                }
-                case wgpu::TextureFormat::RGBA8Unorm:
-                case wgpu::TextureFormat::RGBA8Snorm: {
-                    if (GetParam().mSampleCount > 1) {
-                        ExpectMultisampledFloatData(texture, mipSize, mipSize, 4,
-                                                    GetParam().mSampleCount, 0, mip,
-                                                    new ExpectNonZero<float>());
-                    } else {
-                        EXPECT_TEXTURE_EQ(new ExpectNonZero<uint32_t>(), texture, {0, 0, 0},
-                                          {mipSize, mipSize, depthOrArrayLayers}, mip);
-                    }
-                    break;
-                }
-                case wgpu::TextureFormat::Depth32Float: {
-                    EXPECT_TEXTURE_EQ(new ExpectNonZero<float>(), texture, {0, 0, 0},
+        uint32_t mip = GetParam().mMip;
+        uint32_t mipSize = std::max(kSize >> mip, 1u);
+        uint32_t depthOrArrayLayers = GetParam().mDimension == wgpu::TextureDimension::e3D
+                                          ? std::max(GetParam().mDepthOrArrayLayers >> mip, 1u)
+                                          : GetParam().mDepthOrArrayLayers;
+        switch (GetParam().mFormat) {
+            case wgpu::TextureFormat::R8Unorm: {
+                if (GetParam().mSampleCount > 1) {
+                    ExpectMultisampledFloatData(texture, mipSize, mipSize, 1,
+                                                GetParam().mSampleCount, 0, mip,
+                                                new ExpectNonZero<float>());
+                } else {
+                    EXPECT_TEXTURE_EQ(new ExpectNonZero<uint8_t>(), texture, {0, 0, 0},
                                       {mipSize, mipSize, depthOrArrayLayers}, mip);
-                    break;
                 }
-                case wgpu::TextureFormat::Depth24PlusStencil8: {
-                    switch (GetParam().mAspect) {
-                        case wgpu::TextureAspect::DepthOnly: {
-                            for (uint32_t arrayLayer = 0;
-                                 arrayLayer < GetParam().mDepthOrArrayLayers; ++arrayLayer) {
-                                ExpectSampledDepthData(texture, mipSize, mipSize, arrayLayer, mip,
-                                                       new ExpectNonZero<float>())
-                                    << "arrayLayer " << arrayLayer;
-                            }
-                            break;
-                        }
-                        case wgpu::TextureAspect::StencilOnly: {
-                            uint32_t texelCount = mipSize * mipSize * depthOrArrayLayers;
-                            std::vector<uint8_t> expectedStencil(texelCount, 1);
-                            EXPECT_TEXTURE_EQ(expectedStencil.data(), texture, {0, 0, 0},
-                                              {mipSize, mipSize, depthOrArrayLayers}, mip,
-                                              wgpu::TextureAspect::StencilOnly);
-
-                            break;
-                        }
-                        default:
-                            UNREACHABLE();
-                    }
-                    break;
-                }
-                case wgpu::TextureFormat::BC1RGBAUnorm: {
-                    // Set buffer with dirty data so we know it is cleared by the lazy cleared
-                    // texture copy
-                    uint32_t blockWidth = utils::GetTextureFormatBlockWidth(GetParam().mFormat);
-                    uint32_t blockHeight = utils::GetTextureFormatBlockHeight(GetParam().mFormat);
-                    wgpu::Extent3D copySize = {Align(mipSize, blockWidth),
-                                               Align(mipSize, blockHeight), depthOrArrayLayers};
-
-                    uint32_t bytesPerRow =
-                        utils::GetMinimumBytesPerRow(GetParam().mFormat, copySize.width);
-                    uint32_t rowsPerImage = copySize.height / blockHeight;
-
-                    uint64_t bufferSize = utils::RequiredBytesInCopy(bytesPerRow, rowsPerImage,
-                                                                     copySize, GetParam().mFormat);
-
-                    std::vector<uint8_t> data(bufferSize, 100);
-                    wgpu::Buffer bufferDst = utils::CreateBufferFromData(
-                        device, data.data(), bufferSize, wgpu::BufferUsage::CopySrc);
-
-                    wgpu::ImageCopyBuffer imageCopyBuffer =
-                        utils::CreateImageCopyBuffer(bufferDst, 0, bytesPerRow, rowsPerImage);
-                    wgpu::ImageCopyTexture imageCopyTexture =
-                        utils::CreateImageCopyTexture(texture, mip, {0, 0, 0});
-
-                    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                    encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, &copySize);
-                    wgpu::CommandBuffer commands = encoder.Finish();
-                    queue.Submit(1, &commands);
-
-                    uint32_t copiedWidthInBytes =
-                        utils::GetTexelBlockSizeInBytes(GetParam().mFormat) * copySize.width /
-                        blockWidth;
-                    uint8_t* d = data.data();
-                    for (uint32_t z = 0; z < depthOrArrayLayers; ++z) {
-                        for (uint32_t row = 0; row < copySize.height / blockHeight; ++row) {
-                            std::fill_n(d, copiedWidthInBytes, 1);
-                            d += bytesPerRow;
-                        }
-                    }
-                    EXPECT_BUFFER_U8_RANGE_EQ(data.data(), bufferDst, 0, bufferSize);
-                    break;
-                }
-                default:
-                    UNREACHABLE();
+                break;
             }
-        }
-    };
+            case wgpu::TextureFormat::RG8Unorm: {
+                if (GetParam().mSampleCount > 1) {
+                    ExpectMultisampledFloatData(texture, mipSize, mipSize, 2,
+                                                GetParam().mSampleCount, 0, mip,
+                                                new ExpectNonZero<float>());
+                } else {
+                    EXPECT_TEXTURE_EQ(new ExpectNonZero<uint16_t>(), texture, {0, 0, 0},
+                                      {mipSize, mipSize, depthOrArrayLayers}, mip);
+                }
+                break;
+            }
+            case wgpu::TextureFormat::RGBA8Unorm:
+            case wgpu::TextureFormat::RGBA8Snorm: {
+                if (GetParam().mSampleCount > 1) {
+                    ExpectMultisampledFloatData(texture, mipSize, mipSize, 4,
+                                                GetParam().mSampleCount, 0, mip,
+                                                new ExpectNonZero<float>());
+                } else {
+                    EXPECT_TEXTURE_EQ(new ExpectNonZero<uint32_t>(), texture, {0, 0, 0},
+                                      {mipSize, mipSize, depthOrArrayLayers}, mip);
+                }
+                break;
+            }
+            case wgpu::TextureFormat::Depth32Float: {
+                EXPECT_TEXTURE_EQ(new ExpectNonZero<float>(), texture, {0, 0, 0},
+                                  {mipSize, mipSize, depthOrArrayLayers}, mip);
+                break;
+            }
+            case wgpu::TextureFormat::Depth24PlusStencil8: {
+                switch (GetParam().mAspect) {
+                    case wgpu::TextureAspect::DepthOnly: {
+                        for (uint32_t arrayLayer = 0; arrayLayer < GetParam().mDepthOrArrayLayers;
+                             ++arrayLayer) {
+                            ExpectSampledDepthData(texture, mipSize, mipSize, arrayLayer, mip,
+                                                   new ExpectNonZero<float>())
+                                << "arrayLayer " << arrayLayer;
+                        }
+                        break;
+                    }
+                    case wgpu::TextureAspect::StencilOnly: {
+                        uint32_t texelCount = mipSize * mipSize * depthOrArrayLayers;
+                        std::vector<uint8_t> expectedStencil(texelCount, 1);
+                        EXPECT_TEXTURE_EQ(expectedStencil.data(), texture, {0, 0, 0},
+                                          {mipSize, mipSize, depthOrArrayLayers}, mip,
+                                          wgpu::TextureAspect::StencilOnly);
 
-    class NonzeroNonrenderableTextureCreationTests : public NonzeroTextureCreationTests {};
-    class NonzeroCompressedTextureCreationTests : public NonzeroTextureCreationTests {};
-    class NonzeroDepthTextureCreationTests : public NonzeroTextureCreationTests {};
-    class NonzeroDepthStencilTextureCreationTests : public NonzeroTextureCreationTests {};
-    class NonzeroMultisampledTextureCreationTests : public NonzeroTextureCreationTests {};
+                        break;
+                    }
+                    default:
+                        UNREACHABLE();
+                }
+                break;
+            }
+            case wgpu::TextureFormat::BC1RGBAUnorm: {
+                // Set buffer with dirty data so we know it is cleared by the lazy cleared
+                // texture copy
+                uint32_t blockWidth = utils::GetTextureFormatBlockWidth(GetParam().mFormat);
+                uint32_t blockHeight = utils::GetTextureFormatBlockHeight(GetParam().mFormat);
+                wgpu::Extent3D copySize = {Align(mipSize, blockWidth), Align(mipSize, blockHeight),
+                                           depthOrArrayLayers};
+
+                uint32_t bytesPerRow =
+                    utils::GetMinimumBytesPerRow(GetParam().mFormat, copySize.width);
+                uint32_t rowsPerImage = copySize.height / blockHeight;
+
+                uint64_t bufferSize = utils::RequiredBytesInCopy(bytesPerRow, rowsPerImage,
+                                                                 copySize, GetParam().mFormat);
+
+                std::vector<uint8_t> data(bufferSize, 100);
+                wgpu::Buffer bufferDst = utils::CreateBufferFromData(
+                    device, data.data(), bufferSize, wgpu::BufferUsage::CopySrc);
+
+                wgpu::ImageCopyBuffer imageCopyBuffer =
+                    utils::CreateImageCopyBuffer(bufferDst, 0, bytesPerRow, rowsPerImage);
+                wgpu::ImageCopyTexture imageCopyTexture =
+                    utils::CreateImageCopyTexture(texture, mip, {0, 0, 0});
+
+                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                encoder.CopyTextureToBuffer(&imageCopyTexture, &imageCopyBuffer, &copySize);
+                wgpu::CommandBuffer commands = encoder.Finish();
+                queue.Submit(1, &commands);
+
+                uint32_t copiedWidthInBytes = utils::GetTexelBlockSizeInBytes(GetParam().mFormat) *
+                                              copySize.width / blockWidth;
+                uint8_t* d = data.data();
+                for (uint32_t z = 0; z < depthOrArrayLayers; ++z) {
+                    for (uint32_t row = 0; row < copySize.height / blockHeight; ++row) {
+                        std::fill_n(d, copiedWidthInBytes, 1);
+                        d += bytesPerRow;
+                    }
+                }
+                EXPECT_BUFFER_U8_RANGE_EQ(data.data(), bufferDst, 0, bufferSize);
+                break;
+            }
+            default:
+                UNREACHABLE();
+        }
+    }
+};
+
+class NonzeroNonrenderableTextureCreationTests : public NonzeroTextureCreationTests {};
+class NonzeroCompressedTextureCreationTests : public NonzeroTextureCreationTests {};
+class NonzeroDepthTextureCreationTests : public NonzeroTextureCreationTests {};
+class NonzeroDepthStencilTextureCreationTests : public NonzeroTextureCreationTests {};
+class NonzeroMultisampledTextureCreationTests : public NonzeroTextureCreationTests {};
 
 }  // anonymous namespace
 
diff --git a/src/dawn/tests/end2end/QueryTests.cpp b/src/dawn/tests/end2end/QueryTests.cpp
index 28f9761..3fa4860 100644
--- a/src/dawn/tests/end2end/QueryTests.cpp
+++ b/src/dawn/tests/end2end/QueryTests.cpp
@@ -41,9 +41,7 @@
 
     ~OcclusionExpectation() override = default;
 
-    explicit OcclusionExpectation(Result expected) {
-        mExpected = expected;
-    }
+    explicit OcclusionExpectation(Result expected) { mExpected = expected; }
 
     testing::AssertionResult Check(const void* data, size_t size) override {
         ASSERT(size % sizeof(uint64_t) == 0);
diff --git a/src/dawn/tests/end2end/ReadOnlyDepthStencilAttachmentTests.cpp b/src/dawn/tests/end2end/ReadOnlyDepthStencilAttachmentTests.cpp
index a3a0979b..ad5c697 100644
--- a/src/dawn/tests/end2end/ReadOnlyDepthStencilAttachmentTests.cpp
+++ b/src/dawn/tests/end2end/ReadOnlyDepthStencilAttachmentTests.cpp
@@ -22,8 +22,8 @@
 constexpr static uint32_t kSize = 4;
 
 namespace {
-    using TextureFormat = wgpu::TextureFormat;
-    DAWN_TEST_PARAM_STRUCT(ReadOnlyDepthStencilAttachmentTestsParams, TextureFormat);
+using TextureFormat = wgpu::TextureFormat;
+DAWN_TEST_PARAM_STRUCT(ReadOnlyDepthStencilAttachmentTestsParams, TextureFormat);
 }  // namespace
 
 class ReadOnlyDepthStencilAttachmentTests
@@ -57,9 +57,7 @@
         }
     }
 
-    bool IsFormatSupported() const {
-        return mIsFormatSupported;
-    }
+    bool IsFormatSupported() const { return mIsFormatSupported; }
 
     wgpu::RenderPipeline CreateRenderPipeline(wgpu::TextureAspect aspect,
                                               wgpu::TextureFormat format,
diff --git a/src/dawn/tests/end2end/RenderPassLoadOpTests.cpp b/src/dawn/tests/end2end/RenderPassLoadOpTests.cpp
index f61e7d8..6fb5ddf 100644
--- a/src/dawn/tests/end2end/RenderPassLoadOpTests.cpp
+++ b/src/dawn/tests/end2end/RenderPassLoadOpTests.cpp
@@ -23,8 +23,7 @@
 
 class DrawQuad {
   public:
-    DrawQuad() {
-    }
+    DrawQuad() {}
     DrawQuad(wgpu::Device device, const char* vsSource, const char* fsSource) : device(device) {
         vsModule = utils::CreateShaderModule(device, vsSource);
         fsModule = utils::CreateShaderModule(device, fsSource);
diff --git a/src/dawn/tests/end2end/RequiredBufferSizeInCopyTests.cpp b/src/dawn/tests/end2end/RequiredBufferSizeInCopyTests.cpp
index ef0608c..58c45ce 100644
--- a/src/dawn/tests/end2end/RequiredBufferSizeInCopyTests.cpp
+++ b/src/dawn/tests/end2end/RequiredBufferSizeInCopyTests.cpp
@@ -26,28 +26,28 @@
 constexpr static uint32_t kBytesPerBlock = 4;
 
 namespace {
-    enum class Type { B2TCopy, T2BCopy };
+enum class Type { B2TCopy, T2BCopy };
 
-    std::ostream& operator<<(std::ostream& o, Type copyType) {
-        switch (copyType) {
-            case Type::B2TCopy:
-                o << "B2TCopy";
-                break;
-            case Type::T2BCopy:
-                o << "T2BCopy";
-                break;
-        }
-        return o;
+std::ostream& operator<<(std::ostream& o, Type copyType) {
+    switch (copyType) {
+        case Type::B2TCopy:
+            o << "B2TCopy";
+            break;
+        case Type::T2BCopy:
+            o << "T2BCopy";
+            break;
     }
+    return o;
+}
 
-    using TextureDimension = wgpu::TextureDimension;
-    using CopyDepth = uint32_t;
-    using ExtraRowsPerImage = uint64_t;
-    DAWN_TEST_PARAM_STRUCT(RequiredBufferSizeInCopyTestsParams,
-                           Type,
-                           TextureDimension,
-                           CopyDepth,
-                           ExtraRowsPerImage);
+using TextureDimension = wgpu::TextureDimension;
+using CopyDepth = uint32_t;
+using ExtraRowsPerImage = uint64_t;
+DAWN_TEST_PARAM_STRUCT(RequiredBufferSizeInCopyTestsParams,
+                       Type,
+                       TextureDimension,
+                       CopyDepth,
+                       ExtraRowsPerImage);
 }  // namespace
 
 // Tests in this file are used to expose an error on D3D12 about required minimum buffer size.
diff --git a/src/dawn/tests/end2end/SamplerFilterAnisotropicTests.cpp b/src/dawn/tests/end2end/SamplerFilterAnisotropicTests.cpp
index fdec46e..1e5a91e 100644
--- a/src/dawn/tests/end2end/SamplerFilterAnisotropicTests.cpp
+++ b/src/dawn/tests/end2end/SamplerFilterAnisotropicTests.cpp
@@ -15,20 +15,20 @@
 #include <cmath>
 #include <vector>
 
-#include "dawn/tests/DawnTest.h"
 #include "dawn/common/Assert.h"
 #include "dawn/common/Constants.h"
+#include "dawn/tests/DawnTest.h"
 #include "dawn/utils/ComboRenderPipelineDescriptor.h"
 #include "dawn/utils/WGPUHelpers.h"
 
 constexpr static unsigned int kRTSize = 16;
 
 namespace {
-    // MipLevel colors, ordering from base level to high level
-    // each mipmap of the texture is having a different color
-    // so we can check if the sampler anisotropic filtering is fetching
-    // from the correct miplevel
-    const std::array<RGBA8, 3> colors = {RGBA8::kRed, RGBA8::kGreen, RGBA8::kBlue};
+// MipLevel colors, ordering from base level to high level
+// each mipmap of the texture is having a different color
+// so we can check if the sampler anisotropic filtering is fetching
+// from the correct miplevel
+const std::array<RGBA8, 3> colors = {RGBA8::kRed, RGBA8::kGreen, RGBA8::kBlue};
 }  // namespace
 
 class SamplerFilterAnisotropicTest : public DawnTest {
diff --git a/src/dawn/tests/end2end/SamplerTests.cpp b/src/dawn/tests/end2end/SamplerTests.cpp
index c2c9830..3ad38ec 100644
--- a/src/dawn/tests/end2end/SamplerTests.cpp
+++ b/src/dawn/tests/end2end/SamplerTests.cpp
@@ -25,28 +25,28 @@
 constexpr static unsigned int kRTSize = 64;
 
 namespace {
-    struct AddressModeTestCase {
-        wgpu::AddressMode mMode;
-        uint8_t mExpected2;
-        uint8_t mExpected3;
-    };
-    AddressModeTestCase addressModes[] = {
-        {
-            wgpu::AddressMode::Repeat,
-            0,
-            255,
-        },
-        {
-            wgpu::AddressMode::MirrorRepeat,
-            255,
-            0,
-        },
-        {
-            wgpu::AddressMode::ClampToEdge,
-            255,
-            255,
-        },
-    };
+struct AddressModeTestCase {
+    wgpu::AddressMode mMode;
+    uint8_t mExpected2;
+    uint8_t mExpected3;
+};
+AddressModeTestCase addressModes[] = {
+    {
+        wgpu::AddressMode::Repeat,
+        0,
+        255,
+    },
+    {
+        wgpu::AddressMode::MirrorRepeat,
+        255,
+        0,
+    },
+    {
+        wgpu::AddressMode::ClampToEdge,
+        255,
+        255,
+    },
+};
 }  // namespace
 
 class SamplerTest : public DawnTest {
diff --git a/src/dawn/tests/end2end/ShaderFloat16Tests.cpp b/src/dawn/tests/end2end/ShaderFloat16Tests.cpp
index b1af453..81c7ed6 100644
--- a/src/dawn/tests/end2end/ShaderFloat16Tests.cpp
+++ b/src/dawn/tests/end2end/ShaderFloat16Tests.cpp
@@ -29,9 +29,7 @@
         return {wgpu::FeatureName::DawnShaderFloat16};
     }
 
-    bool IsShaderFloat16Supported() const {
-        return mIsShaderFloat16Supported;
-    }
+    bool IsShaderFloat16Supported() const { return mIsShaderFloat16Supported; }
 
     bool mIsShaderFloat16Supported = false;
 };
diff --git a/src/dawn/tests/end2end/StorageTextureTests.cpp b/src/dawn/tests/end2end/StorageTextureTests.cpp
index 5cd0411..058e108 100644
--- a/src/dawn/tests/end2end/StorageTextureTests.cpp
+++ b/src/dawn/tests/end2end/StorageTextureTests.cpp
@@ -25,11 +25,11 @@
 #include "dawn/utils/WGPUHelpers.h"
 
 namespace {
-    bool OpenGLESSupportsStorageTexture(wgpu::TextureFormat format) {
-        // TODO(crbug.com/dawn/595): 32-bit RG* formats are unsupported on OpenGL ES.
-        return format != wgpu::TextureFormat::RG32Float &&
-               format != wgpu::TextureFormat::RG32Sint && format != wgpu::TextureFormat::RG32Uint;
-    }
+bool OpenGLESSupportsStorageTexture(wgpu::TextureFormat format) {
+    // TODO(crbug.com/dawn/595): 32-bit RG* formats are unsupported on OpenGL ES.
+    return format != wgpu::TextureFormat::RG32Float && format != wgpu::TextureFormat::RG32Sint &&
+           format != wgpu::TextureFormat::RG32Uint;
+}
 }  // namespace
 
 class StorageTextureTests : public DawnTest {
diff --git a/src/dawn/tests/end2end/SwapChainValidationTests.cpp b/src/dawn/tests/end2end/SwapChainValidationTests.cpp
index 0da7210..534e95c 100644
--- a/src/dawn/tests/end2end/SwapChainValidationTests.cpp
+++ b/src/dawn/tests/end2end/SwapChainValidationTests.cpp
@@ -70,9 +70,7 @@
     wgpu::SwapChainDescriptor badDescriptor;
 
     // Checks that a RenderAttachment view is an error by trying to create a render pass on it.
-    void CheckTextureViewIsError(wgpu::TextureView view) {
-        CheckTextureView(view, true, false);
-    }
+    void CheckTextureViewIsError(wgpu::TextureView view) { CheckTextureView(view, true, false); }
 
     // Checks that a RenderAttachment view is an error by trying to submit a render pass on it.
     void CheckTextureViewIsDestroyed(wgpu::TextureView view) {
@@ -80,9 +78,7 @@
     }
 
     // Checks that a RenderAttachment view is valid by submitting a render pass on it.
-    void CheckTextureViewIsValid(wgpu::TextureView view) {
-        CheckTextureView(view, false, false);
-    }
+    void CheckTextureViewIsValid(wgpu::TextureView view) { CheckTextureView(view, false, false); }
 
   private:
     void CheckTextureView(wgpu::TextureView view, bool errorAtFinish, bool errorAtSubmit) {
diff --git a/src/dawn/tests/end2end/TextureFormatTests.cpp b/src/dawn/tests/end2end/TextureFormatTests.cpp
index ca875bc..0c7de09 100644
--- a/src/dawn/tests/end2end/TextureFormatTests.cpp
+++ b/src/dawn/tests/end2end/TextureFormatTests.cpp
@@ -18,9 +18,9 @@
 #include <utility>
 #include <vector>
 
-#include "dawn/tests/DawnTest.h"
 #include "dawn/common/Assert.h"
 #include "dawn/common/Math.h"
+#include "dawn/tests/DawnTest.h"
 #include "dawn/utils/ComboRenderPipelineDescriptor.h"
 #include "dawn/utils/TextureUtils.h"
 #include "dawn/utils/WGPUHelpers.h"
@@ -30,8 +30,7 @@
 class ExpectFloatWithTolerance : public detail::Expectation {
   public:
     ExpectFloatWithTolerance(std::vector<float> expected, float tolerance)
-        : mExpected(std::move(expected)), mTolerance(tolerance) {
-    }
+        : mExpected(std::move(expected)), mTolerance(tolerance) {}
 
     testing::AssertionResult Check(const void* data, size_t size) override {
         ASSERT(size == sizeof(float) * mExpected.size());
@@ -78,8 +77,7 @@
 // An expectation for float16 buffers that can correctly compare NaNs (all NaNs are equivalent).
 class ExpectFloat16 : public detail::Expectation {
   public:
-    explicit ExpectFloat16(std::vector<uint16_t> expected) : mExpected(std::move(expected)) {
-    }
+    explicit ExpectFloat16(std::vector<uint16_t> expected) : mExpected(std::move(expected)) {}
 
     testing::AssertionResult Check(const void* data, size_t size) override {
         ASSERT(size == sizeof(uint16_t) * mExpected.size());
diff --git a/src/dawn/tests/end2end/TextureViewTests.cpp b/src/dawn/tests/end2end/TextureViewTests.cpp
index 6c1fed9..2ad6cd0 100644
--- a/src/dawn/tests/end2end/TextureViewTests.cpp
+++ b/src/dawn/tests/end2end/TextureViewTests.cpp
@@ -17,10 +17,10 @@
 #include <string>
 #include <vector>
 
-#include "dawn/tests/DawnTest.h"
 #include "dawn/common/Assert.h"
 #include "dawn/common/Constants.h"
 #include "dawn/common/Math.h"
+#include "dawn/tests/DawnTest.h"
 #include "dawn/utils/ComboRenderPipelineDescriptor.h"
 #include "dawn/utils/WGPUHelpers.h"
 
@@ -29,40 +29,40 @@
 constexpr uint32_t kBytesPerTexel = 4;
 
 namespace {
-    wgpu::Texture Create2DTexture(wgpu::Device device,
-                                  uint32_t width,
-                                  uint32_t height,
-                                  uint32_t arrayLayerCount,
-                                  uint32_t mipLevelCount,
-                                  wgpu::TextureUsage usage) {
-        wgpu::TextureDescriptor descriptor;
-        descriptor.dimension = wgpu::TextureDimension::e2D;
-        descriptor.size.width = width;
-        descriptor.size.height = height;
-        descriptor.size.depthOrArrayLayers = arrayLayerCount;
-        descriptor.sampleCount = 1;
-        descriptor.format = kDefaultFormat;
-        descriptor.mipLevelCount = mipLevelCount;
-        descriptor.usage = usage;
-        return device.CreateTexture(&descriptor);
-    }
+wgpu::Texture Create2DTexture(wgpu::Device device,
+                              uint32_t width,
+                              uint32_t height,
+                              uint32_t arrayLayerCount,
+                              uint32_t mipLevelCount,
+                              wgpu::TextureUsage usage) {
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.size.width = width;
+    descriptor.size.height = height;
+    descriptor.size.depthOrArrayLayers = arrayLayerCount;
+    descriptor.sampleCount = 1;
+    descriptor.format = kDefaultFormat;
+    descriptor.mipLevelCount = mipLevelCount;
+    descriptor.usage = usage;
+    return device.CreateTexture(&descriptor);
+}
 
-    wgpu::Texture Create3DTexture(wgpu::Device device,
-                                  wgpu::Extent3D size,
-                                  uint32_t mipLevelCount,
-                                  wgpu::TextureUsage usage) {
-        wgpu::TextureDescriptor descriptor;
-        descriptor.dimension = wgpu::TextureDimension::e3D;
-        descriptor.size = size;
-        descriptor.sampleCount = 1;
-        descriptor.format = kDefaultFormat;
-        descriptor.mipLevelCount = mipLevelCount;
-        descriptor.usage = usage;
-        return device.CreateTexture(&descriptor);
-    }
+wgpu::Texture Create3DTexture(wgpu::Device device,
+                              wgpu::Extent3D size,
+                              uint32_t mipLevelCount,
+                              wgpu::TextureUsage usage) {
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e3D;
+    descriptor.size = size;
+    descriptor.sampleCount = 1;
+    descriptor.format = kDefaultFormat;
+    descriptor.mipLevelCount = mipLevelCount;
+    descriptor.usage = usage;
+    return device.CreateTexture(&descriptor);
+}
 
-    wgpu::ShaderModule CreateDefaultVertexShaderModule(wgpu::Device device) {
-        return utils::CreateShaderModule(device, R"(
+wgpu::ShaderModule CreateDefaultVertexShaderModule(wgpu::Device device) {
+    return utils::CreateShaderModule(device, R"(
             struct VertexOut {
                 @location(0) texCoord : vec2<f32>,
                 @builtin(position) position : vec4<f32>,
@@ -90,7 +90,7 @@
                 return output;
             }
         )");
-    }
+}
 }  // anonymous namespace
 
 class TextureViewSamplingTest : public DawnTest {
diff --git a/src/dawn/tests/end2end/TextureZeroInitTests.cpp b/src/dawn/tests/end2end/TextureZeroInitTests.cpp
index 8a19943..aa30357 100644
--- a/src/dawn/tests/end2end/TextureZeroInitTests.cpp
+++ b/src/dawn/tests/end2end/TextureZeroInitTests.cpp
@@ -1721,9 +1721,7 @@
         return {wgpu::FeatureName::TextureCompressionBC};
     }
 
-    bool IsBCFormatSupported() const {
-        return mIsBCFormatSupported;
-    }
+    bool IsBCFormatSupported() const { return mIsBCFormatSupported; }
 
     // Copy the compressed texture data into the destination texture.
     void InitializeDataInCompressedTextureAndExpectLazyClear(
diff --git a/src/dawn/tests/end2end/VideoViewsTests.cpp b/src/dawn/tests/end2end/VideoViewsTests.cpp
index c195996..32666db 100644
--- a/src/dawn/tests/end2end/VideoViewsTests.cpp
+++ b/src/dawn/tests/end2end/VideoViewsTests.cpp
@@ -19,8 +19,7 @@
 #include "dawn/utils/WGPUHelpers.h"
 
 VideoViewsTestBackend::PlatformTexture::PlatformTexture(wgpu::Texture&& texture)
-    : wgpuTexture(texture) {
-}
+    : wgpuTexture(texture) {}
 VideoViewsTestBackend::PlatformTexture::~PlatformTexture() = default;
 
 VideoViewsTestBackend::~VideoViewsTestBackend() = default;
diff --git a/src/dawn/tests/end2end/VideoViewsTests.h b/src/dawn/tests/end2end/VideoViewsTests.h
index a97999a..e614fea 100644
--- a/src/dawn/tests/end2end/VideoViewsTests.h
+++ b/src/dawn/tests/end2end/VideoViewsTests.h
@@ -29,8 +29,7 @@
     virtual ~VideoViewsTestBackend();
 
     virtual void OnSetUp(WGPUDevice device) = 0;
-    virtual void OnTearDown() {
-    }
+    virtual void OnTearDown() {}
 
     class PlatformTexture {
       public:
diff --git a/src/dawn/tests/end2end/VideoViewsTests_gbm.cpp b/src/dawn/tests/end2end/VideoViewsTests_gbm.cpp
index 9116de1..9a45b6c 100644
--- a/src/dawn/tests/end2end/VideoViewsTests_gbm.cpp
+++ b/src/dawn/tests/end2end/VideoViewsTests_gbm.cpp
@@ -27,16 +27,15 @@
 // "linux-chromeos-rel"'s gbm.h is too old to compile, missing this change at least:
 // https://chromium-review.googlesource.com/c/chromiumos/platform/minigbm/+/1963001/10/gbm.h#244
 #ifndef MINIGBM
-#    define GBM_BO_USE_TEXTURING (1 << 5)
-#    define GBM_BO_USE_SW_WRITE_RARELY (1 << 12)
-#    define GBM_BO_USE_HW_VIDEO_DECODER (1 << 13)
+#define GBM_BO_USE_TEXTURING (1 << 5)
+#define GBM_BO_USE_SW_WRITE_RARELY (1 << 12)
+#define GBM_BO_USE_HW_VIDEO_DECODER (1 << 13)
 #endif
 
 class PlatformTextureGbm : public VideoViewsTestBackend::PlatformTexture {
   public:
     PlatformTextureGbm(wgpu::Texture&& texture, gbm_bo* gbmBo)
-        : PlatformTexture(std::move(texture)), mGbmBo(gbmBo) {
-    }
+        : PlatformTexture(std::move(texture)), mGbmBo(gbmBo) {}
     ~PlatformTextureGbm() override = default;
 
     // TODO(chromium:1258986): Add DISJOINT vkImage support for multi-plannar formats.
@@ -52,9 +51,7 @@
         return true;
     }
 
-    gbm_bo* GetGbmBo() {
-        return mGbmBo;
-    }
+    gbm_bo* GetGbmBo() { return mGbmBo; }
 
   private:
     gbm_bo* mGbmBo = nullptr;
@@ -67,9 +64,7 @@
         mGbmDevice = CreateGbmDevice();
     }
 
-    void OnTearDown() override {
-        gbm_device_destroy(mGbmDevice);
-    }
+    void OnTearDown() override { gbm_device_destroy(mGbmDevice); }
 
   private:
     gbm_device* CreateGbmDevice() {
diff --git a/src/dawn/tests/end2end/VideoViewsTests_mac.cpp b/src/dawn/tests/end2end/VideoViewsTests_mac.cpp
index 449eba0..c7f480e 100644
--- a/src/dawn/tests/end2end/VideoViewsTests_mac.cpp
+++ b/src/dawn/tests/end2end/VideoViewsTests_mac.cpp
@@ -26,11 +26,11 @@
 #include "dawn/native/MetalBackend.h"
 
 namespace {
-    void AddIntegerValue(CFMutableDictionaryRef dictionary, const CFStringRef key, int32_t value) {
-        CFNumberRef number(CFNumberCreate(nullptr, kCFNumberSInt32Type, &value));
-        CFDictionaryAddValue(dictionary, key, number);
-        CFRelease(number);
-    }
+void AddIntegerValue(CFMutableDictionaryRef dictionary, const CFStringRef key, int32_t value) {
+    CFNumberRef number(CFNumberCreate(nullptr, kCFNumberSInt32Type, &value));
+    CFDictionaryAddValue(dictionary, key, number);
+    CFRelease(number);
+}
 
 }  // anonymous namespace
 
@@ -40,13 +40,9 @@
         : PlatformTexture(std::move(texture)) {
         mIOSurface = AcquireCFRef<IOSurfaceRef>(iosurface);
     }
-    ~PlatformTextureIOSurface() override {
-        mIOSurface = nullptr;
-    }
+    ~PlatformTextureIOSurface() override { mIOSurface = nullptr; }
 
-    bool CanWrapAsWGPUTexture() override {
-        return true;
-    }
+    bool CanWrapAsWGPUTexture() override { return true; }
 
   private:
     CFRef<IOSurfaceRef> mIOSurface = nullptr;
@@ -54,9 +50,7 @@
 
 class VideoViewsTestBackendIOSurface : public VideoViewsTestBackend {
   public:
-    void OnSetUp(WGPUDevice device) override {
-        mWGPUDevice = device;
-    }
+    void OnSetUp(WGPUDevice device) override { mWGPUDevice = device; }
 
   private:
     OSType ToCVFormat(wgpu::TextureFormat format) {
@@ -173,8 +167,7 @@
     }
 
     void DestroyVideoTextureForTest(
-        std::unique_ptr<VideoViewsTestBackend::PlatformTexture>&& platformTexture) override {
-    }
+        std::unique_ptr<VideoViewsTestBackend::PlatformTexture>&& platformTexture) override {}
 
     WGPUDevice mWGPUDevice = nullptr;
 };
diff --git a/src/dawn/tests/end2end/VideoViewsTests_win.cpp b/src/dawn/tests/end2end/VideoViewsTests_win.cpp
index beba015..a402dee 100644
--- a/src/dawn/tests/end2end/VideoViewsTests_win.cpp
+++ b/src/dawn/tests/end2end/VideoViewsTests_win.cpp
@@ -29,13 +29,10 @@
 
 class PlatformTextureWin : public VideoViewsTestBackend::PlatformTexture {
   public:
-    explicit PlatformTextureWin(wgpu::Texture&& texture) : PlatformTexture(std::move(texture)) {
-    }
+    explicit PlatformTextureWin(wgpu::Texture&& texture) : PlatformTexture(std::move(texture)) {}
     ~PlatformTextureWin() override = default;
 
-    bool CanWrapAsWGPUTexture() override {
-        return true;
-    }
+    bool CanWrapAsWGPUTexture() override { return true; }
 };
 
 class VideoViewsTestBackendWin : public VideoViewsTestBackend {
@@ -173,8 +170,7 @@
     }
 
     void DestroyVideoTextureForTest(
-        std::unique_ptr<VideoViewsTestBackend::PlatformTexture>&& PlatformTexture) override {
-    }
+        std::unique_ptr<VideoViewsTestBackend::PlatformTexture>&& PlatformTexture) override {}
 
     WGPUDevice mWGPUDevice = nullptr;
     ComPtr<ID3D11Device> mD3d11Device;
diff --git a/src/dawn/tests/end2end/WindowSurfaceTests.cpp b/src/dawn/tests/end2end/WindowSurfaceTests.cpp
index 6e45523..f916605 100644
--- a/src/dawn/tests/end2end/WindowSurfaceTests.cpp
+++ b/src/dawn/tests/end2end/WindowSurfaceTests.cpp
@@ -25,17 +25,17 @@
 
 // Include windows.h before GLFW so GLFW's APIENTRY macro doesn't conflict with windows.h's.
 #if defined(DAWN_PLATFORM_WINDOWS)
-#    include "dawn/common/windows_with_undefs.h"
+#include "dawn/common/windows_with_undefs.h"
 #endif  // defined(DAWN_PLATFORM_WINDOWS)
 
 #include "GLFW/glfw3.h"
 
 #if defined(DAWN_USE_X11)
-#    include "dawn/common/xlib_with_undefs.h"
+#include "dawn/common/xlib_with_undefs.h"
 #endif  // defined(DAWN_USE_X11)
 
 #if defined(DAWN_ENABLE_BACKEND_METAL)
-#    include "dawn/utils/ObjCUtils.h"
+#include "dawn/utils/ObjCUtils.h"
 #endif  // defined(DAWN_ENABLE_BACKEND_METAL)
 
 #include "GLFW/glfw3native.h"
diff --git a/src/dawn/tests/end2end/mocks/CachingInterfaceMock.cpp b/src/dawn/tests/end2end/mocks/CachingInterfaceMock.cpp
index d07b18d..8467437 100644
--- a/src/dawn/tests/end2end/mocks/CachingInterfaceMock.cpp
+++ b/src/dawn/tests/end2end/mocks/CachingInterfaceMock.cpp
@@ -79,8 +79,7 @@
 }
 
 DawnCachingMockPlatform::DawnCachingMockPlatform(dawn::platform::CachingInterface* cachingInterface)
-    : mCachingInterface(cachingInterface) {
-}
+    : mCachingInterface(cachingInterface) {}
 
 dawn::platform::CachingInterface* DawnCachingMockPlatform::GetCachingInterface(
     const void* fingerprint,
diff --git a/src/dawn/tests/perf_tests/BufferUploadPerf.cpp b/src/dawn/tests/perf_tests/BufferUploadPerf.cpp
index 0331e47..8381729 100644
--- a/src/dawn/tests/perf_tests/BufferUploadPerf.cpp
+++ b/src/dawn/tests/perf_tests/BufferUploadPerf.cpp
@@ -19,68 +19,67 @@
 
 namespace {
 
-    constexpr unsigned int kNumIterations = 50;
+constexpr unsigned int kNumIterations = 50;
 
-    enum class UploadMethod {
-        WriteBuffer,
-        MappedAtCreation,
-    };
+enum class UploadMethod {
+    WriteBuffer,
+    MappedAtCreation,
+};
 
-    // Perf delta exists between ranges [0, 1MB] vs [1MB, MAX_SIZE).
-    // These are sample buffer sizes within each range.
-    enum class UploadSize {
-        BufferSize_1KB = 1 * 1024,
-        BufferSize_64KB = 64 * 1024,
-        BufferSize_1MB = 1 * 1024 * 1024,
+// Perf delta exists between ranges [0, 1MB] vs [1MB, MAX_SIZE).
+// These are sample buffer sizes within each range.
+enum class UploadSize {
+    BufferSize_1KB = 1 * 1024,
+    BufferSize_64KB = 64 * 1024,
+    BufferSize_1MB = 1 * 1024 * 1024,
 
-        BufferSize_4MB = 4 * 1024 * 1024,
-        BufferSize_16MB = 16 * 1024 * 1024,
-    };
+    BufferSize_4MB = 4 * 1024 * 1024,
+    BufferSize_16MB = 16 * 1024 * 1024,
+};
 
-    struct BufferUploadParams : AdapterTestParam {
-        BufferUploadParams(const AdapterTestParam& param,
-                           UploadMethod uploadMethod,
-                           UploadSize uploadSize)
-            : AdapterTestParam(param), uploadMethod(uploadMethod), uploadSize(uploadSize) {
-        }
+struct BufferUploadParams : AdapterTestParam {
+    BufferUploadParams(const AdapterTestParam& param,
+                       UploadMethod uploadMethod,
+                       UploadSize uploadSize)
+        : AdapterTestParam(param), uploadMethod(uploadMethod), uploadSize(uploadSize) {}
 
-        UploadMethod uploadMethod;
-        UploadSize uploadSize;
-    };
+    UploadMethod uploadMethod;
+    UploadSize uploadSize;
+};
 
-    std::ostream& operator<<(std::ostream& ostream, const BufferUploadParams& param) {
-        ostream << static_cast<const AdapterTestParam&>(param);
+std::ostream& operator<<(std::ostream& ostream, const BufferUploadParams& param) {
+    ostream << static_cast<const AdapterTestParam&>(param);
 
-        switch (param.uploadMethod) {
-            case UploadMethod::WriteBuffer:
-                ostream << "_WriteBuffer";
-                break;
-            case UploadMethod::MappedAtCreation:
-                ostream << "_MappedAtCreation";
-                break;
-        }
-
-        switch (param.uploadSize) {
-            case UploadSize::BufferSize_1KB:
-                ostream << "_BufferSize_1KB";
-                break;
-            case UploadSize::BufferSize_64KB:
-                ostream << "_BufferSize_64KB";
-                break;
-            case UploadSize::BufferSize_1MB:
-                ostream << "_BufferSize_1MB";
-                break;
-            case UploadSize::BufferSize_4MB:
-                ostream << "_BufferSize_4MB";
-                break;
-            case UploadSize::BufferSize_16MB:
-                ostream << "_BufferSize_16MB";
-                break;
-        }
-
-        return ostream;
+    switch (param.uploadMethod) {
+        case UploadMethod::WriteBuffer:
+            ostream << "_WriteBuffer";
+            break;
+        case UploadMethod::MappedAtCreation:
+            ostream << "_MappedAtCreation";
+            break;
     }
 
+    switch (param.uploadSize) {
+        case UploadSize::BufferSize_1KB:
+            ostream << "_BufferSize_1KB";
+            break;
+        case UploadSize::BufferSize_64KB:
+            ostream << "_BufferSize_64KB";
+            break;
+        case UploadSize::BufferSize_1MB:
+            ostream << "_BufferSize_1MB";
+            break;
+        case UploadSize::BufferSize_4MB:
+            ostream << "_BufferSize_4MB";
+            break;
+        case UploadSize::BufferSize_16MB:
+            ostream << "_BufferSize_16MB";
+            break;
+    }
+
+    return ostream;
+}
+
 }  // namespace
 
 // Test uploading |kBufferSize| bytes of data |kNumIterations| times.
@@ -88,8 +87,7 @@
   public:
     BufferUploadPerf()
         : DawnPerfTestWithParams(kNumIterations, 1),
-          data(static_cast<size_t>(GetParam().uploadSize)) {
-    }
+          data(static_cast<size_t>(GetParam().uploadSize)) {}
     ~BufferUploadPerf() override = default;
 
     void SetUp() override;
diff --git a/src/dawn/tests/perf_tests/DawnPerfTest.cpp b/src/dawn/tests/perf_tests/DawnPerfTest.cpp
index aece2b8..71d5eec 100644
--- a/src/dawn/tests/perf_tests/DawnPerfTest.cpp
+++ b/src/dawn/tests/perf_tests/DawnPerfTest.cpp
@@ -27,47 +27,47 @@
 
 namespace {
 
-    DawnPerfTestEnvironment* gTestEnv = nullptr;
+DawnPerfTestEnvironment* gTestEnv = nullptr;
 
-    void DumpTraceEventsToJSONFile(
-        const std::vector<DawnPerfTestPlatform::TraceEvent>& traceEventBuffer,
-        const char* traceFile) {
-        std::ofstream outFile;
-        outFile.open(traceFile, std::ios_base::app);
+void DumpTraceEventsToJSONFile(
+    const std::vector<DawnPerfTestPlatform::TraceEvent>& traceEventBuffer,
+    const char* traceFile) {
+    std::ofstream outFile;
+    outFile.open(traceFile, std::ios_base::app);
 
-        for (const DawnPerfTestPlatform::TraceEvent& traceEvent : traceEventBuffer) {
-            const char* category = nullptr;
-            switch (traceEvent.category) {
-                case dawn::platform::TraceCategory::General:
-                    category = "general";
-                    break;
-                case dawn::platform::TraceCategory::Validation:
-                    category = "validation";
-                    break;
-                case dawn::platform::TraceCategory::Recording:
-                    category = "recording";
-                    break;
-                case dawn::platform::TraceCategory::GPUWork:
-                    category = "gpu";
-                    break;
-                default:
-                    UNREACHABLE();
-            }
-
-            uint64_t microseconds = static_cast<uint64_t>(traceEvent.timestamp * 1000.0 * 1000.0);
-
-            outFile << ", { "
-                    << "\"name\": \"" << traceEvent.name << "\", "
-                    << "\"cat\": \"" << category << "\", "
-                    << "\"ph\": \"" << traceEvent.phase << "\", "
-                    << "\"id\": " << traceEvent.id << ", "
-                    << "\"tid\": " << traceEvent.threadId << ", "
-                    << "\"ts\": " << microseconds << ", "
-                    << "\"pid\": \"Dawn\""
-                    << " }";
+    for (const DawnPerfTestPlatform::TraceEvent& traceEvent : traceEventBuffer) {
+        const char* category = nullptr;
+        switch (traceEvent.category) {
+            case dawn::platform::TraceCategory::General:
+                category = "general";
+                break;
+            case dawn::platform::TraceCategory::Validation:
+                category = "validation";
+                break;
+            case dawn::platform::TraceCategory::Recording:
+                category = "recording";
+                break;
+            case dawn::platform::TraceCategory::GPUWork:
+                category = "gpu";
+                break;
+            default:
+                UNREACHABLE();
         }
-        outFile.close();
+
+        uint64_t microseconds = static_cast<uint64_t>(traceEvent.timestamp * 1000.0 * 1000.0);
+
+        outFile << ", { "
+                << "\"name\": \"" << traceEvent.name << "\", "
+                << "\"cat\": \"" << category << "\", "
+                << "\"ph\": \"" << traceEvent.phase << "\", "
+                << "\"id\": " << traceEvent.id << ", "
+                << "\"tid\": " << traceEvent.threadId << ", "
+                << "\"ts\": " << microseconds << ", "
+                << "\"pid\": \"Dawn\""
+                << " }";
     }
+    outFile.close();
+}
 
 }  // namespace
 
@@ -179,8 +179,7 @@
     : mTest(test),
       mIterationsPerStep(iterationsPerStep),
       mMaxStepsInFlight(maxStepsInFlight),
-      mTimer(utils::CreateTimer()) {
-}
+      mTimer(utils::CreateTimer()) {}
 
 DawnPerfTestBase::~DawnPerfTestBase() = default;
 
diff --git a/src/dawn/tests/perf_tests/DawnPerfTest.h b/src/dawn/tests/perf_tests/DawnPerfTest.h
index 7b70c1b..20d2a2f 100644
--- a/src/dawn/tests/perf_tests/DawnPerfTest.h
+++ b/src/dawn/tests/perf_tests/DawnPerfTest.h
@@ -21,7 +21,7 @@
 #include "dawn/tests/DawnTest.h"
 
 namespace utils {
-    class Timer;
+class Timer;
 }
 
 class DawnPerfTestPlatform;
@@ -116,8 +116,7 @@
   protected:
     DawnPerfTestWithParams(unsigned int iterationsPerStep, unsigned int maxStepsInFlight)
         : DawnTestWithParams<Params>(),
-          DawnPerfTestBase(this, iterationsPerStep, maxStepsInFlight) {
-    }
+          DawnPerfTestBase(this, iterationsPerStep, maxStepsInFlight) {}
     void SetUp() override {
         DawnTestWithParams<Params>::SetUp();
 
diff --git a/src/dawn/tests/perf_tests/DawnPerfTestPlatform.cpp b/src/dawn/tests/perf_tests/DawnPerfTestPlatform.cpp
index ae5307a..32e77fd 100644
--- a/src/dawn/tests/perf_tests/DawnPerfTestPlatform.cpp
+++ b/src/dawn/tests/perf_tests/DawnPerfTestPlatform.cpp
@@ -24,28 +24,27 @@
 #include "dawn/utils/Timer.h"
 namespace {
 
-    struct TraceCategoryInfo {
-        unsigned char enabled;
-        dawn::platform::TraceCategory category;
-    };
+struct TraceCategoryInfo {
+    unsigned char enabled;
+    dawn::platform::TraceCategory category;
+};
 
-    constexpr TraceCategoryInfo gTraceCategories[4] = {
-        {1, dawn::platform::TraceCategory::General},
-        {1, dawn::platform::TraceCategory::Validation},
-        {1, dawn::platform::TraceCategory::Recording},
-        {1, dawn::platform::TraceCategory::GPUWork},
-    };
+constexpr TraceCategoryInfo gTraceCategories[4] = {
+    {1, dawn::platform::TraceCategory::General},
+    {1, dawn::platform::TraceCategory::Validation},
+    {1, dawn::platform::TraceCategory::Recording},
+    {1, dawn::platform::TraceCategory::GPUWork},
+};
 
-    static_assert(static_cast<uint32_t>(dawn::platform::TraceCategory::General) == 0);
-    static_assert(static_cast<uint32_t>(dawn::platform::TraceCategory::Validation) == 1);
-    static_assert(static_cast<uint32_t>(dawn::platform::TraceCategory::Recording) == 2);
-    static_assert(static_cast<uint32_t>(dawn::platform::TraceCategory::GPUWork) == 3);
+static_assert(static_cast<uint32_t>(dawn::platform::TraceCategory::General) == 0);
+static_assert(static_cast<uint32_t>(dawn::platform::TraceCategory::Validation) == 1);
+static_assert(static_cast<uint32_t>(dawn::platform::TraceCategory::Recording) == 2);
+static_assert(static_cast<uint32_t>(dawn::platform::TraceCategory::GPUWork) == 3);
 
 }  // anonymous namespace
 
 DawnPerfTestPlatform::DawnPerfTestPlatform()
-    : dawn::platform::Platform(), mTimer(utils::CreateTimer()) {
-}
+    : dawn::platform::Platform(), mTimer(utils::CreateTimer()) {}
 
 DawnPerfTestPlatform::~DawnPerfTestPlatform() = default;
 
diff --git a/src/dawn/tests/perf_tests/DawnPerfTestPlatform.h b/src/dawn/tests/perf_tests/DawnPerfTestPlatform.h
index 996d2bb..6c3a95e 100644
--- a/src/dawn/tests/perf_tests/DawnPerfTestPlatform.h
+++ b/src/dawn/tests/perf_tests/DawnPerfTestPlatform.h
@@ -25,7 +25,7 @@
 #include "dawn/platform/DawnPlatform.h"
 
 namespace utils {
-    class Timer;
+class Timer;
 }
 
 class DawnPerfTestPlatform : public dawn::platform::Platform {
@@ -34,15 +34,17 @@
     // See https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU
     // Only a subset of the properties are implemented.
     struct TraceEvent final {
-        TraceEvent() {
-        }
+        TraceEvent() {}
         TraceEvent(char phaseIn,
                    dawn::platform::TraceCategory categoryIn,
                    const char* nameIn,
                    uint64_t idIn,
                    double timestampIn)
-            : phase(phaseIn), category(categoryIn), name(nameIn), id(idIn), timestamp(timestampIn) {
-        }
+            : phase(phaseIn),
+              category(categoryIn),
+              name(nameIn),
+              id(idIn),
+              timestamp(timestampIn) {}
 
         char phase = 0;
         dawn::platform::TraceCategory category;
diff --git a/src/dawn/tests/perf_tests/DrawCallPerf.cpp b/src/dawn/tests/perf_tests/DrawCallPerf.cpp
index 2dd0b93..7b2cc02 100644
--- a/src/dawn/tests/perf_tests/DrawCallPerf.cpp
+++ b/src/dawn/tests/perf_tests/DrawCallPerf.cpp
@@ -24,23 +24,23 @@
 
 namespace {
 
-    constexpr unsigned int kNumDraws = 2000;
+constexpr unsigned int kNumDraws = 2000;
 
-    constexpr uint32_t kTextureSize = 64;
-    constexpr size_t kUniformSize = 3 * sizeof(float);
+constexpr uint32_t kTextureSize = 64;
+constexpr size_t kUniformSize = 3 * sizeof(float);
 
-    constexpr float kVertexData[12] = {
-        0.0f, 0.5f, 0.0f, 1.0f, -0.5f, -0.5f, 0.0f, 1.0f, 0.5f, -0.5f, 0.0f, 1.0f,
-    };
+constexpr float kVertexData[12] = {
+    0.0f, 0.5f, 0.0f, 1.0f, -0.5f, -0.5f, 0.0f, 1.0f, 0.5f, -0.5f, 0.0f, 1.0f,
+};
 
-    constexpr char kVertexShader[] = R"(
+constexpr char kVertexShader[] = R"(
         @stage(vertex) fn main(
             @location(0) pos : vec4<f32>
         ) -> @builtin(position) vec4<f32> {
             return pos;
         })";
 
-    constexpr char kFragmentShaderA[] = R"(
+constexpr char kFragmentShaderA[] = R"(
         struct Uniforms {
             color : vec3<f32>
         }
@@ -49,7 +49,7 @@
             return vec4<f32>(uniforms.color * (1.0 / 5000.0), 1.0);
         })";
 
-    constexpr char kFragmentShaderB[] = R"(
+constexpr char kFragmentShaderB[] = R"(
         struct Constants {
             color : vec3<f32>
         }
@@ -63,149 +63,147 @@
             return vec4<f32>((constants.color + uniforms.color) * (1.0 / 5000.0), 1.0);
         })";
 
-    enum class Pipeline {
-        Static,     // Keep the same pipeline for all draws.
-        Redundant,  // Use the same pipeline, but redundantly set it.
-        Dynamic,    // Change the pipeline between draws.
+enum class Pipeline {
+    Static,     // Keep the same pipeline for all draws.
+    Redundant,  // Use the same pipeline, but redundantly set it.
+    Dynamic,    // Change the pipeline between draws.
+};
+
+enum class UniformData {
+    Static,   // Don't update per-draw uniform data.
+    Dynamic,  // Update the per-draw uniform data once per frame.
+};
+
+enum class BindGroup {
+    NoChange,   // Use one bind group for all draws.
+    Redundant,  // Use the same bind group, but redundantly set it.
+    NoReuse,    // Create a new bind group every time.
+    Multiple,   // Use multiple static bind groups.
+    Dynamic,    // Use bind groups with dynamic offsets.
+};
+
+enum class VertexBuffer {
+    NoChange,  // Use one vertex buffer for all draws.
+    Multiple,  // Use multiple static vertex buffers.
+    Dynamic,   // Switch vertex buffers between draws.
+};
+
+enum class RenderBundle {
+    No,   // Record commands in a render pass
+    Yes,  // Record commands in a render bundle
+};
+
+struct DrawCallParam {
+    Pipeline pipelineType;
+    VertexBuffer vertexBufferType;
+    BindGroup bindGroupType;
+    UniformData uniformDataType;
+    RenderBundle withRenderBundle;
+};
+
+using DrawCallParamTuple = std::tuple<Pipeline, VertexBuffer, BindGroup, UniformData, RenderBundle>;
+
+template <typename T>
+unsigned int AssignParam(T& lhs, T rhs) {
+    lhs = rhs;
+    return 0u;
+}
+
+// This helper function allows creating a DrawCallParam from a list of arguments
+// without specifying all of the members. Provided members can be passed once in an arbitrary
+// order. Unspecified members default to:
+//  - Pipeline::Static
+//  - VertexBuffer::NoChange
+//  - BindGroup::NoChange
+//  - UniformData::Static
+//  - RenderBundle::No
+template <typename... Ts>
+DrawCallParam MakeParam(Ts... args) {
+    // Baseline param
+    DrawCallParamTuple paramTuple{Pipeline::Static, VertexBuffer::NoChange, BindGroup::NoChange,
+                                  UniformData::Static, RenderBundle::No};
+
+    unsigned int unused[] = {
+        0,  // Avoid making a 0-sized array.
+        AssignParam(std::get<Ts>(paramTuple), args)...,
     };
+    DAWN_UNUSED(unused);
 
-    enum class UniformData {
-        Static,   // Don't update per-draw uniform data.
-        Dynamic,  // Update the per-draw uniform data once per frame.
+    return DrawCallParam{
+        std::get<Pipeline>(paramTuple),     std::get<VertexBuffer>(paramTuple),
+        std::get<BindGroup>(paramTuple),    std::get<UniformData>(paramTuple),
+        std::get<RenderBundle>(paramTuple),
     };
+}
 
-    enum class BindGroup {
-        NoChange,   // Use one bind group for all draws.
-        Redundant,  // Use the same bind group, but redundantly set it.
-        NoReuse,    // Create a new bind group every time.
-        Multiple,   // Use multiple static bind groups.
-        Dynamic,    // Use bind groups with dynamic offsets.
-    };
+struct DrawCallParamForTest : AdapterTestParam {
+    DrawCallParamForTest(const AdapterTestParam& backendParam, DrawCallParam param)
+        : AdapterTestParam(backendParam), param(param) {}
+    DrawCallParam param;
+};
 
-    enum class VertexBuffer {
-        NoChange,  // Use one vertex buffer for all draws.
-        Multiple,  // Use multiple static vertex buffers.
-        Dynamic,   // Switch vertex buffers between draws.
-    };
+std::ostream& operator<<(std::ostream& ostream, const DrawCallParamForTest& testParams) {
+    ostream << static_cast<const AdapterTestParam&>(testParams);
 
-    enum class RenderBundle {
-        No,   // Record commands in a render pass
-        Yes,  // Record commands in a render bundle
-    };
+    const DrawCallParam& param = testParams.param;
 
-    struct DrawCallParam {
-        Pipeline pipelineType;
-        VertexBuffer vertexBufferType;
-        BindGroup bindGroupType;
-        UniformData uniformDataType;
-        RenderBundle withRenderBundle;
-    };
-
-    using DrawCallParamTuple =
-        std::tuple<Pipeline, VertexBuffer, BindGroup, UniformData, RenderBundle>;
-
-    template <typename T>
-    unsigned int AssignParam(T& lhs, T rhs) {
-        lhs = rhs;
-        return 0u;
+    switch (param.pipelineType) {
+        case Pipeline::Static:
+            break;
+        case Pipeline::Redundant:
+            ostream << "_RedundantPipeline";
+            break;
+        case Pipeline::Dynamic:
+            ostream << "_DynamicPipeline";
+            break;
     }
 
-    // This helper function allows creating a DrawCallParam from a list of arguments
-    // without specifying all of the members. Provided members can be passed once in an arbitrary
-    // order. Unspecified members default to:
-    //  - Pipeline::Static
-    //  - VertexBuffer::NoChange
-    //  - BindGroup::NoChange
-    //  - UniformData::Static
-    //  - RenderBundle::No
-    template <typename... Ts>
-    DrawCallParam MakeParam(Ts... args) {
-        // Baseline param
-        DrawCallParamTuple paramTuple{Pipeline::Static, VertexBuffer::NoChange, BindGroup::NoChange,
-                                      UniformData::Static, RenderBundle::No};
-
-        unsigned int unused[] = {
-            0,  // Avoid making a 0-sized array.
-            AssignParam(std::get<Ts>(paramTuple), args)...,
-        };
-        DAWN_UNUSED(unused);
-
-        return DrawCallParam{
-            std::get<Pipeline>(paramTuple),     std::get<VertexBuffer>(paramTuple),
-            std::get<BindGroup>(paramTuple),    std::get<UniformData>(paramTuple),
-            std::get<RenderBundle>(paramTuple),
-        };
+    switch (param.vertexBufferType) {
+        case VertexBuffer::NoChange:
+            break;
+        case VertexBuffer::Multiple:
+            ostream << "_MultipleVertexBuffers";
+            break;
+        case VertexBuffer::Dynamic:
+            ostream << "_DynamicVertexBuffer";
     }
 
-    struct DrawCallParamForTest : AdapterTestParam {
-        DrawCallParamForTest(const AdapterTestParam& backendParam, DrawCallParam param)
-            : AdapterTestParam(backendParam), param(param) {
-        }
-        DrawCallParam param;
-    };
-
-    std::ostream& operator<<(std::ostream& ostream, const DrawCallParamForTest& testParams) {
-        ostream << static_cast<const AdapterTestParam&>(testParams);
-
-        const DrawCallParam& param = testParams.param;
-
-        switch (param.pipelineType) {
-            case Pipeline::Static:
-                break;
-            case Pipeline::Redundant:
-                ostream << "_RedundantPipeline";
-                break;
-            case Pipeline::Dynamic:
-                ostream << "_DynamicPipeline";
-                break;
-        }
-
-        switch (param.vertexBufferType) {
-            case VertexBuffer::NoChange:
-                break;
-            case VertexBuffer::Multiple:
-                ostream << "_MultipleVertexBuffers";
-                break;
-            case VertexBuffer::Dynamic:
-                ostream << "_DynamicVertexBuffer";
-        }
-
-        switch (param.bindGroupType) {
-            case BindGroup::NoChange:
-                break;
-            case BindGroup::Redundant:
-                ostream << "_RedundantBindGroups";
-                break;
-            case BindGroup::NoReuse:
-                ostream << "_NoReuseBindGroups";
-                break;
-            case BindGroup::Multiple:
-                ostream << "_MultipleBindGroups";
-                break;
-            case BindGroup::Dynamic:
-                ostream << "_DynamicBindGroup";
-                break;
-        }
-
-        switch (param.uniformDataType) {
-            case UniformData::Static:
-                break;
-            case UniformData::Dynamic:
-                ostream << "_DynamicData";
-                break;
-        }
-
-        switch (param.withRenderBundle) {
-            case RenderBundle::No:
-                break;
-            case RenderBundle::Yes:
-                ostream << "_RenderBundle";
-                break;
-        }
-
-        return ostream;
+    switch (param.bindGroupType) {
+        case BindGroup::NoChange:
+            break;
+        case BindGroup::Redundant:
+            ostream << "_RedundantBindGroups";
+            break;
+        case BindGroup::NoReuse:
+            ostream << "_NoReuseBindGroups";
+            break;
+        case BindGroup::Multiple:
+            ostream << "_MultipleBindGroups";
+            break;
+        case BindGroup::Dynamic:
+            ostream << "_DynamicBindGroup";
+            break;
     }
 
+    switch (param.uniformDataType) {
+        case UniformData::Static:
+            break;
+        case UniformData::Dynamic:
+            ostream << "_DynamicData";
+            break;
+    }
+
+    switch (param.withRenderBundle) {
+        case RenderBundle::No:
+            break;
+        case RenderBundle::Yes:
+            ostream << "_RenderBundle";
+            break;
+    }
+
+    return ostream;
+}
+
 }  // anonymous namespace
 
 // DrawCallPerf is an uber-benchmark with supports many parameterizations.
@@ -224,16 +222,13 @@
 //     the efficiency of resource transitions.
 class DrawCallPerf : public DawnPerfTestWithParams<DrawCallParamForTest> {
   public:
-    DrawCallPerf() : DawnPerfTestWithParams(kNumDraws, 3) {
-    }
+    DrawCallPerf() : DawnPerfTestWithParams(kNumDraws, 3) {}
     ~DrawCallPerf() override = default;
 
     void SetUp() override;
 
   protected:
-    DrawCallParam GetParam() const {
-        return DawnPerfTestWithParams::GetParam().param;
-    }
+    DrawCallParam GetParam() const { return DawnPerfTestWithParams::GetParam().param; }
 
     template <typename Encoder>
     void RecordRenderCommands(Encoder encoder);
diff --git a/src/dawn/tests/perf_tests/ShaderRobustnessPerf.cpp b/src/dawn/tests/perf_tests/ShaderRobustnessPerf.cpp
index 4b99de1..b6cc974 100644
--- a/src/dawn/tests/perf_tests/ShaderRobustnessPerf.cpp
+++ b/src/dawn/tests/perf_tests/ShaderRobustnessPerf.cpp
@@ -19,9 +19,9 @@
 #include "dawn/utils/WGPUHelpers.h"
 
 namespace {
-    constexpr uint32_t kTileSize = 32u;
+constexpr uint32_t kTileSize = 32u;
 
-    const std::string& kMatMulFloatHeader = R"(
+const std::string& kMatMulFloatHeader = R"(
         struct Uniforms {
             dimAOuter : u32,
             dimInner : u32,
@@ -68,13 +68,13 @@
         let TileBOuter : u32 = 32u;
         let TileInner : u32 = 32u;)";
 
-    const std::string& kMatMulFloatSharedArray1D = R"(
+const std::string& kMatMulFloatSharedArray1D = R"(
         var<workgroup> mm_Asub : array<f32, 1024>;
         var<workgroup> mm_Bsub : array<f32, 1024>;)";
-    const std::string& kMatMulFloatSharedArray2D = R"(
+const std::string& kMatMulFloatSharedArray2D = R"(
         var<workgroup> mm_Asub : array<array<f32, 32>, 32>;
         var<workgroup> mm_Bsub : array<array<f32, 32>, 32>;)";
-    const std::string& kMatMulFloatBodyPart1 = R"(
+const std::string& kMatMulFloatBodyPart1 = R"(
         @stage(compute) @workgroup_size(8, 8, 1)
         fn main(@builtin(local_invocation_id) local_id : vec3<u32>,
                 @builtin(global_invocation_id) global_id  : vec3<u32>) {
@@ -109,7 +109,7 @@
                 for (var innerCol : u32 = 0u; innerCol < ColPerThreadA; innerCol = innerCol + 1u) {
                     let inputRow : u32 = tileRow + innerRow;
                     let inputCol : u32 = tileColA + innerCol;)";
-    const std::string& kMatMulFloatBodyPart2Array1D = R"(
+const std::string& kMatMulFloatBodyPart2Array1D = R"(
                     let index : u32 = inputRow * TileInner + inputCol;
                     mm_Asub[index] = mm_readA(globalRow + innerRow, t * TileInner + inputCol);
                 }
@@ -135,7 +135,7 @@
 
                     for (var innerRow : u32 = 0u; innerRow < RowPerThread; innerRow = innerRow + 1u) {
                         ACached = mm_Asub[(tileRow + innerRow) * TileInner + k];)";
-    const std::string& kMatMulFloatBodyPart2Array2D = R"(
+const std::string& kMatMulFloatBodyPart2Array2D = R"(
                     mm_Asub[inputRow][inputCol] = mm_readA(globalRow + innerRow, t * TileInner + inputCol);
                 }
                 }
@@ -159,7 +159,7 @@
 
                     for (var innerRow : u32 = 0u; innerRow < RowPerThread; innerRow = innerRow + 1u) {
                         ACached = mm_Asub[tileRow + innerRow][k];)";
-    const std::string& kMatMulFloatBodyPart3 = R"(
+const std::string& kMatMulFloatBodyPart3 = R"(
                         for (var innerCol : u32 = 0u; innerCol < ColPerThread; innerCol = innerCol + 1u) {
                             let index : u32 = innerRow * ColPerThread + innerCol;
                             acc[index] = acc[index] + ACached * BCached[innerCol];
@@ -179,16 +179,16 @@
             }
             }
         })";
-    const std::string& kMatMulFloatOneDimensionalSharedArray =
-        kMatMulFloatHeader + kMatMulFloatSharedArray1D + kMatMulFloatBodyPart1 +
-        kMatMulFloatBodyPart2Array1D + kMatMulFloatBodyPart3;
+const std::string& kMatMulFloatOneDimensionalSharedArray =
+    kMatMulFloatHeader + kMatMulFloatSharedArray1D + kMatMulFloatBodyPart1 +
+    kMatMulFloatBodyPart2Array1D + kMatMulFloatBodyPart3;
 
-    const std::string& kMatMulFloatTwoDimensionalSharedArray =
-        kMatMulFloatHeader + kMatMulFloatSharedArray2D + kMatMulFloatBodyPart1 +
-        kMatMulFloatBodyPart2Array2D + kMatMulFloatBodyPart3;
+const std::string& kMatMulFloatTwoDimensionalSharedArray =
+    kMatMulFloatHeader + kMatMulFloatSharedArray2D + kMatMulFloatBodyPart1 +
+    kMatMulFloatBodyPart2Array2D + kMatMulFloatBodyPart3;
 
-    // The vec4 version requires that dimInner and dimBOuter are divisible by 4.
-    const std::string& kMatMulVec4Header = R"(
+// The vec4 version requires that dimInner and dimBOuter are divisible by 4.
+const std::string& kMatMulVec4Header = R"(
         struct Uniforms {
             dimAOuter : u32,
             dimInner : u32,
@@ -233,13 +233,13 @@
         let ColPerThread : u32 = 4u;
         let TileOuter : u32 = 32u;
         let TileInner : u32 = 32u;)";
-    const std::string& kMatMulVec4SharedArray1D = R"(
+const std::string& kMatMulVec4SharedArray1D = R"(
         var<workgroup> mm_Asub : array<vec4<f32>, 256>;
         var<workgroup> mm_Bsub : array<vec4<f32>, 256>;)";
-    const std::string& kMatMulVec4SharedArray2D = R"(
+const std::string& kMatMulVec4SharedArray2D = R"(
         var<workgroup> mm_Asub : array<array<vec4<f32>, 8>, 32>;
         var<workgroup> mm_Bsub : array<array<vec4<f32>, 8>, 32>;)";
-    const std::string& kMatMulVec4BodyPart1 = R"(
+const std::string& kMatMulVec4BodyPart1 = R"(
         @stage(compute) @workgroup_size(8, 8, 1)
         fn main(@builtin(local_invocation_id) local_id : vec3<u32>,
                 @builtin(global_invocation_id) global_id  : vec3<u32>) {
@@ -272,7 +272,7 @@
                 for (var innerRow : u32 = 0u; innerRow < RowPerThread; innerRow = innerRow + 1u) {
                     let inputRow : u32 = tileRow + innerRow;
                     let inputCol : u32 = tileCol;)";
-    const std::string& kMatMulVec4BodyPart2Array1D = R"(
+const std::string& kMatMulVec4BodyPart2Array1D = R"(
                     let index : u32 = inputRow * TileInner / ColPerThread + inputCol;
                     mm_Asub[index] = mm_readA(globalRow + innerRow, globalColA);
                 }
@@ -297,7 +297,7 @@
 
                     for (var i : u32 = 0u; i < RowPerThread; i = i + 1u) {
                         ACached = mm_Asub[(tileRow + i) * (TileInner / ColPerThread) + k];)";
-    const std::string& kMatMulVec4BodyPart2Array2D = R"(
+const std::string& kMatMulVec4BodyPart2Array2D = R"(
                     mm_Asub[inputRow][inputCol] = mm_readA(globalRow + innerRow, globalColA);
                 }
                 globalColA = globalColA + TileInner / ColPerThread;
@@ -320,7 +320,7 @@
 
                     for (var i : u32 = 0u; i < RowPerThread; i = i + 1u) {
                         ACached = mm_Asub[tileRow + i][k];)";
-    const std::string& kMatMulVec4BodyPart3 = R"(
+const std::string& kMatMulVec4BodyPart3 = R"(
                         acc[i] = BCached[0] * ACached.x + acc[i];
                         acc[i] = BCached[1] * ACached.y + acc[i];
                         acc[i] = BCached[2] * ACached.z + acc[i];
@@ -338,45 +338,45 @@
             }
         })";
 
-    const std::string& kMatMulVec4OneDimensionalSharedArray =
-        kMatMulVec4Header + kMatMulVec4SharedArray1D + kMatMulVec4BodyPart1 +
-        kMatMulVec4BodyPart2Array1D + kMatMulVec4BodyPart3;
+const std::string& kMatMulVec4OneDimensionalSharedArray =
+    kMatMulVec4Header + kMatMulVec4SharedArray1D + kMatMulVec4BodyPart1 +
+    kMatMulVec4BodyPart2Array1D + kMatMulVec4BodyPart3;
 
-    const std::string& kMatMulVec4TwoDimensionalSharedArray =
-        kMatMulVec4Header + kMatMulVec4SharedArray2D + kMatMulVec4BodyPart1 +
-        kMatMulVec4BodyPart2Array2D + kMatMulVec4BodyPart3;
+const std::string& kMatMulVec4TwoDimensionalSharedArray =
+    kMatMulVec4Header + kMatMulVec4SharedArray2D + kMatMulVec4BodyPart1 +
+    kMatMulVec4BodyPart2Array2D + kMatMulVec4BodyPart3;
 
-    constexpr unsigned int kNumIterations = 50;
+constexpr unsigned int kNumIterations = 50;
 
-    enum class MatMulMethod {
-        MatMulFloatOneDimSharedArray,
-        MatMulFloatTwoDimSharedArray,
-        MatMulVec4OneDimSharedArray,
-        MatMulVec4TwoDimSharedArray
-    };
+enum class MatMulMethod {
+    MatMulFloatOneDimSharedArray,
+    MatMulFloatTwoDimSharedArray,
+    MatMulVec4OneDimSharedArray,
+    MatMulVec4TwoDimSharedArray
+};
 
-    std::ostream& operator<<(std::ostream& ostream, const MatMulMethod& matMulMethod) {
-        switch (matMulMethod) {
-            case MatMulMethod::MatMulFloatOneDimSharedArray:
-                ostream << "MatMulFloatOneDimSharedArray";
-                break;
-            case MatMulMethod::MatMulFloatTwoDimSharedArray:
-                ostream << "MatMulFloatTwoDimSharedArray";
-                break;
-            case MatMulMethod::MatMulVec4OneDimSharedArray:
-                ostream << "MatMulVec4OneDimSharedArray";
-                break;
-            case MatMulMethod::MatMulVec4TwoDimSharedArray:
-                ostream << "MatMulVec4TwoDimSharedArray";
-                break;
-        }
-        return ostream;
+std::ostream& operator<<(std::ostream& ostream, const MatMulMethod& matMulMethod) {
+    switch (matMulMethod) {
+        case MatMulMethod::MatMulFloatOneDimSharedArray:
+            ostream << "MatMulFloatOneDimSharedArray";
+            break;
+        case MatMulMethod::MatMulFloatTwoDimSharedArray:
+            ostream << "MatMulFloatTwoDimSharedArray";
+            break;
+        case MatMulMethod::MatMulVec4OneDimSharedArray:
+            ostream << "MatMulVec4OneDimSharedArray";
+            break;
+        case MatMulMethod::MatMulVec4TwoDimSharedArray:
+            ostream << "MatMulVec4TwoDimSharedArray";
+            break;
     }
+    return ostream;
+}
 
-    using DimAOuter = uint32_t;
-    using DimInner = uint32_t;
-    using DimBOuter = uint32_t;
-    DAWN_TEST_PARAM_STRUCT(ShaderRobustnessParams, MatMulMethod, DimAOuter, DimInner, DimBOuter);
+using DimAOuter = uint32_t;
+using DimInner = uint32_t;
+using DimBOuter = uint32_t;
+DAWN_TEST_PARAM_STRUCT(ShaderRobustnessParams, MatMulMethod, DimAOuter, DimInner, DimBOuter);
 
 }  // namespace
 
@@ -388,8 +388,7 @@
         : DawnPerfTestWithParams(kNumIterations, 1),
           mDimAOuter(GetParam().mDimAOuter),
           mDimInner(GetParam().mDimInner),
-          mDimBOuter(GetParam().mDimBOuter) {
-    }
+          mDimBOuter(GetParam().mDimBOuter) {}
     ~ShaderRobustnessPerf() override = default;
 
     void SetUp() override;
diff --git a/src/dawn/tests/perf_tests/SubresourceTrackingPerf.cpp b/src/dawn/tests/perf_tests/SubresourceTrackingPerf.cpp
index b70c68b..33ae07e 100644
--- a/src/dawn/tests/perf_tests/SubresourceTrackingPerf.cpp
+++ b/src/dawn/tests/perf_tests/SubresourceTrackingPerf.cpp
@@ -23,8 +23,7 @@
                               uint32_t mipLevelCountIn)
         : AdapterTestParam(param),
           arrayLayerCount(arrayLayerCountIn),
-          mipLevelCount(mipLevelCountIn) {
-    }
+          mipLevelCount(mipLevelCountIn) {}
     uint32_t arrayLayerCount;
     uint32_t mipLevelCount;
 };
@@ -44,8 +43,7 @@
   public:
     static constexpr unsigned int kNumIterations = 50;
 
-    SubresourceTrackingPerf() : DawnPerfTestWithParams(kNumIterations, 1) {
-    }
+    SubresourceTrackingPerf() : DawnPerfTestWithParams(kNumIterations, 1) {}
     ~SubresourceTrackingPerf() override = default;
 
     void SetUp() override {
diff --git a/src/dawn/tests/unittests/AsyncTaskTests.cpp b/src/dawn/tests/unittests/AsyncTaskTests.cpp
index 7ebde3e..a61baa9 100644
--- a/src/dawn/tests/unittests/AsyncTaskTests.cpp
+++ b/src/dawn/tests/unittests/AsyncTaskTests.cpp
@@ -29,38 +29,38 @@
 
 namespace {
 
-    struct SimpleTaskResult {
-        uint32_t id;
-    };
+struct SimpleTaskResult {
+    uint32_t id;
+};
 
-    // A thread-safe queue that stores the task results.
-    class ConcurrentTaskResultQueue : public NonCopyable {
-      public:
-        void AddResult(std::unique_ptr<SimpleTaskResult> result) {
-            std::lock_guard<std::mutex> lock(mMutex);
-            mTaskResults.push_back(std::move(result));
-        }
-
-        std::vector<std::unique_ptr<SimpleTaskResult>> GetAllResults() {
-            std::vector<std::unique_ptr<SimpleTaskResult>> outputResults;
-            {
-                std::lock_guard<std::mutex> lock(mMutex);
-                outputResults.swap(mTaskResults);
-            }
-            return outputResults;
-        }
-
-      private:
-        std::mutex mMutex;
-        std::vector<std::unique_ptr<SimpleTaskResult>> mTaskResults;
-    };
-
-    void DoTask(ConcurrentTaskResultQueue* resultQueue, uint32_t id) {
-        std::unique_ptr<SimpleTaskResult> result = std::make_unique<SimpleTaskResult>();
-        result->id = id;
-        resultQueue->AddResult(std::move(result));
+// A thread-safe queue that stores the task results.
+class ConcurrentTaskResultQueue : public NonCopyable {
+  public:
+    void AddResult(std::unique_ptr<SimpleTaskResult> result) {
+        std::lock_guard<std::mutex> lock(mMutex);
+        mTaskResults.push_back(std::move(result));
     }
 
+    std::vector<std::unique_ptr<SimpleTaskResult>> GetAllResults() {
+        std::vector<std::unique_ptr<SimpleTaskResult>> outputResults;
+        {
+            std::lock_guard<std::mutex> lock(mMutex);
+            outputResults.swap(mTaskResults);
+        }
+        return outputResults;
+    }
+
+  private:
+    std::mutex mMutex;
+    std::vector<std::unique_ptr<SimpleTaskResult>> mTaskResults;
+};
+
+void DoTask(ConcurrentTaskResultQueue* resultQueue, uint32_t id) {
+    std::unique_ptr<SimpleTaskResult> result = std::make_unique<SimpleTaskResult>();
+    result->id = id;
+    resultQueue->AddResult(std::move(result));
+}
+
 }  // anonymous namespace
 
 class AsyncTaskTest : public testing::Test {};
diff --git a/src/dawn/tests/unittests/BuddyAllocatorTests.cpp b/src/dawn/tests/unittests/BuddyAllocatorTests.cpp
index 2c76322..ad716c3 100644
--- a/src/dawn/tests/unittests/BuddyAllocatorTests.cpp
+++ b/src/dawn/tests/unittests/BuddyAllocatorTests.cpp
@@ -19,313 +19,313 @@
 
 namespace dawn::native {
 
-    constexpr uint64_t BuddyAllocator::kInvalidOffset;
+constexpr uint64_t BuddyAllocator::kInvalidOffset;
 
-    // Verify the buddy allocator with a basic test.
-    TEST(BuddyAllocatorTests, SingleBlock) {
-        // After one 32 byte allocation:
-        //
-        //  Level          --------------------------------
-        //      0       32 |               A              |
-        //                 --------------------------------
-        //
-        constexpr uint64_t maxBlockSize = 32;
+// Verify the buddy allocator with a basic test.
+TEST(BuddyAllocatorTests, SingleBlock) {
+    // After one 32 byte allocation:
+    //
+    //  Level          --------------------------------
+    //      0       32 |               A              |
+    //                 --------------------------------
+    //
+    constexpr uint64_t maxBlockSize = 32;
+    BuddyAllocator allocator(maxBlockSize);
+
+    // Check that we cannot allocate a oversized block.
+    ASSERT_EQ(allocator.Allocate(maxBlockSize * 2), BuddyAllocator::kInvalidOffset);
+
+    // Check that we cannot allocate a zero sized block.
+    ASSERT_EQ(allocator.Allocate(0u), BuddyAllocator::kInvalidOffset);
+
+    // Allocate the block.
+    uint64_t blockOffset = allocator.Allocate(maxBlockSize);
+    ASSERT_EQ(blockOffset, 0u);
+
+    // Check that we are full.
+    ASSERT_EQ(allocator.Allocate(maxBlockSize), BuddyAllocator::kInvalidOffset);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u);
+
+    // Deallocate the block.
+    allocator.Deallocate(blockOffset);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+}
+
+// Verify multiple allocations succeeds using a buddy allocator.
+TEST(BuddyAllocatorTests, MultipleBlocks) {
+    // Fill every level in the allocator (order-n = 2^n)
+    const uint64_t maxBlockSize = (1ull << 16);
+    for (uint64_t order = 1; (1ull << order) <= maxBlockSize; order++) {
         BuddyAllocator allocator(maxBlockSize);
 
-        // Check that we cannot allocate a oversized block.
-        ASSERT_EQ(allocator.Allocate(maxBlockSize * 2), BuddyAllocator::kInvalidOffset);
-
-        // Check that we cannot allocate a zero sized block.
-        ASSERT_EQ(allocator.Allocate(0u), BuddyAllocator::kInvalidOffset);
-
-        // Allocate the block.
-        uint64_t blockOffset = allocator.Allocate(maxBlockSize);
-        ASSERT_EQ(blockOffset, 0u);
-
-        // Check that we are full.
-        ASSERT_EQ(allocator.Allocate(maxBlockSize), BuddyAllocator::kInvalidOffset);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u);
-
-        // Deallocate the block.
-        allocator.Deallocate(blockOffset);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
-    }
-
-    // Verify multiple allocations succeeds using a buddy allocator.
-    TEST(BuddyAllocatorTests, MultipleBlocks) {
-        // Fill every level in the allocator (order-n = 2^n)
-        const uint64_t maxBlockSize = (1ull << 16);
-        for (uint64_t order = 1; (1ull << order) <= maxBlockSize; order++) {
-            BuddyAllocator allocator(maxBlockSize);
-
-            uint64_t blockSize = (1ull << order);
-            for (uint32_t blocki = 0; blocki < (maxBlockSize / blockSize); blocki++) {
-                ASSERT_EQ(allocator.Allocate(blockSize), blockSize * blocki);
-            }
+        uint64_t blockSize = (1ull << order);
+        for (uint32_t blocki = 0; blocki < (maxBlockSize / blockSize); blocki++) {
+            ASSERT_EQ(allocator.Allocate(blockSize), blockSize * blocki);
         }
     }
+}
 
-    // Verify that a single allocation succeeds using a buddy allocator.
-    TEST(BuddyAllocatorTests, SingleSplitBlock) {
-        //  After one 8 byte allocation:
-        //
-        //  Level          --------------------------------
-        //      0       32 |               S              |
-        //                 --------------------------------
-        //      1       16 |       S       |       F      |        S - split
-        //                 --------------------------------        F - free
-        //      2       8  |   A   |   F   |       |      |        A - allocated
-        //                 --------------------------------
-        //
-        constexpr uint64_t maxBlockSize = 32;
-        BuddyAllocator allocator(maxBlockSize);
+// Verify that a single allocation succeeds using a buddy allocator.
+TEST(BuddyAllocatorTests, SingleSplitBlock) {
+    //  After one 8 byte allocation:
+    //
+    //  Level          --------------------------------
+    //      0       32 |               S              |
+    //                 --------------------------------
+    //      1       16 |       S       |       F      |        S - split
+    //                 --------------------------------        F - free
+    //      2       8  |   A   |   F   |       |      |        A - allocated
+    //                 --------------------------------
+    //
+    constexpr uint64_t maxBlockSize = 32;
+    BuddyAllocator allocator(maxBlockSize);
 
-        // Allocate block (splits two blocks).
-        uint64_t blockOffset = allocator.Allocate(8);
-        ASSERT_EQ(blockOffset, 0u);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
+    // Allocate block (splits two blocks).
+    uint64_t blockOffset = allocator.Allocate(8);
+    ASSERT_EQ(blockOffset, 0u);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
 
-        // Deallocate block (merges two blocks).
-        allocator.Deallocate(blockOffset);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+    // Deallocate block (merges two blocks).
+    allocator.Deallocate(blockOffset);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
 
-        // Check that we cannot allocate a block that is oversized.
-        ASSERT_EQ(allocator.Allocate(maxBlockSize * 2), BuddyAllocator::kInvalidOffset);
+    // Check that we cannot allocate a block that is oversized.
+    ASSERT_EQ(allocator.Allocate(maxBlockSize * 2), BuddyAllocator::kInvalidOffset);
 
-        // Re-allocate the largest block allowed after merging.
-        blockOffset = allocator.Allocate(maxBlockSize);
-        ASSERT_EQ(blockOffset, 0u);
+    // Re-allocate the largest block allowed after merging.
+    blockOffset = allocator.Allocate(maxBlockSize);
+    ASSERT_EQ(blockOffset, 0u);
 
-        allocator.Deallocate(blockOffset);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+    allocator.Deallocate(blockOffset);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+}
+
+// Verify that a multiple allocated blocks can be removed in the free-list.
+TEST(BuddyAllocatorTests, MultipleSplitBlocks) {
+    //  After four 16 byte allocations:
+    //
+    //  Level          --------------------------------
+    //      0       32 |               S              |
+    //                 --------------------------------
+    //      1       16 |       S       |       S      |        S - split
+    //                 --------------------------------        F - free
+    //      2       8  |   Aa  |   Ab  |  Ac  |   Ad  |        A - allocated
+    //                 --------------------------------
+    //
+    constexpr uint64_t maxBlockSize = 32;
+    BuddyAllocator allocator(maxBlockSize);
+
+    // Populates the free-list with four blocks at Level2.
+
+    // Allocate "a" block (two splits).
+    constexpr uint64_t blockSizeInBytes = 8;
+    uint64_t blockOffsetA = allocator.Allocate(blockSizeInBytes);
+    ASSERT_EQ(blockOffsetA, 0u);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
+
+    // Allocate "b" block.
+    uint64_t blockOffsetB = allocator.Allocate(blockSizeInBytes);
+    ASSERT_EQ(blockOffsetB, blockSizeInBytes);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+
+    // Allocate "c" block (three splits).
+    uint64_t blockOffsetC = allocator.Allocate(blockSizeInBytes);
+    ASSERT_EQ(blockOffsetC, blockOffsetB + blockSizeInBytes);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+
+    // Allocate "d" block.
+    uint64_t blockOffsetD = allocator.Allocate(blockSizeInBytes);
+    ASSERT_EQ(blockOffsetD, blockOffsetC + blockSizeInBytes);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u);
+
+    // Deallocate "d" block.
+    // FreeList[Level2] = [BlockD] -> x
+    allocator.Deallocate(blockOffsetD);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+
+    // Deallocate "b" block.
+    // FreeList[Level2] = [BlockB] -> [BlockD] -> x
+    allocator.Deallocate(blockOffsetB);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
+
+    // Deallocate "c" block (one merges).
+    // FreeList[Level1] = [BlockCD] -> x
+    // FreeList[Level2] = [BlockB] -> x
+    allocator.Deallocate(blockOffsetC);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
+
+    // Deallocate "a" block (two merges).
+    // FreeList[Level0] = [BlockABCD] -> x
+    allocator.Deallocate(blockOffsetA);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+}
+
+// Verify the buddy allocator can handle allocations of various sizes.
+TEST(BuddyAllocatorTests, MultipleSplitBlockIncreasingSize) {
+    //  After four Level4-to-Level1 byte then one L4 block allocations:
+    //
+    //  Level          -----------------------------------------------------------------
+    //      0      512 |                               S                               |
+    //                 -----------------------------------------------------------------
+    //      1      256 |               S               |               A               |
+    //                 -----------------------------------------------------------------
+    //      2      128 |       S       |       A       |               |               |
+    //                 -----------------------------------------------------------------
+    //      3       64 |   S   |   A   |       |       |       |       |       |       |
+    //                 -----------------------------------------------------------------
+    //      4       32 | A | F |   |   |   |   |   |   |   |   |   |   |   |   |   |   |
+    //                 -----------------------------------------------------------------
+    //
+    constexpr uint64_t maxBlockSize = 512;
+    BuddyAllocator allocator(maxBlockSize);
+
+    ASSERT_EQ(allocator.Allocate(32), 0ull);
+    ASSERT_EQ(allocator.Allocate(64), 64ull);
+    ASSERT_EQ(allocator.Allocate(128), 128ull);
+    ASSERT_EQ(allocator.Allocate(256), 256ull);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+
+    // Fill in the last free block.
+    ASSERT_EQ(allocator.Allocate(32), 32ull);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u);
+
+    // Check if we're full.
+    ASSERT_EQ(allocator.Allocate(32), BuddyAllocator::kInvalidOffset);
+}
+
+// Verify very small allocations using a larger allocator works correctly.
+TEST(BuddyAllocatorTests, MultipleSplitBlocksVariableSizes) {
+    //  After allocating four pairs of one 64 byte block and one 32 byte block.
+    //
+    //  Level          -----------------------------------------------------------------
+    //      0      512 |                               S                               |
+    //                 -----------------------------------------------------------------
+    //      1      256 |               S               |               S               |
+    //                 -----------------------------------------------------------------
+    //      2      128 |       S       |       S       |       S       |       F       |
+    //                 -----------------------------------------------------------------
+    //      3       64 |   A   |   S   |   A   |   A   |   S   |   A   |       |       |
+    //                 -----------------------------------------------------------------
+    //      4       32 |   |   | A | A |   |   |   |   | A | A |   |   |   |   |   |   |
+    //                 -----------------------------------------------------------------
+    //
+    constexpr uint64_t maxBlockSize = 512;
+    BuddyAllocator allocator(maxBlockSize);
+
+    ASSERT_EQ(allocator.Allocate(64), 0ull);
+    ASSERT_EQ(allocator.Allocate(32), 64ull);
+
+    ASSERT_EQ(allocator.Allocate(64), 128ull);
+    ASSERT_EQ(allocator.Allocate(32), 96ull);
+
+    ASSERT_EQ(allocator.Allocate(64), 192ull);
+    ASSERT_EQ(allocator.Allocate(32), 256ull);
+
+    ASSERT_EQ(allocator.Allocate(64), 320ull);
+    ASSERT_EQ(allocator.Allocate(32), 288ull);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+}
+
+// Verify the buddy allocator can deal with bad fragmentation.
+TEST(BuddyAllocatorTests, MultipleSplitBlocksInterleaved) {
+    //  Allocate every leaf then de-allocate every other of those allocations.
+    //
+    //  Level          -----------------------------------------------------------------
+    //      0      512 |                               S                               |
+    //                 -----------------------------------------------------------------
+    //      1      256 |               S               |               S               |
+    //                 -----------------------------------------------------------------
+    //      2      128 |       S       |       S       |        S       |        S     |
+    //                 -----------------------------------------------------------------
+    //      3       64 |   S   |   S   |   S   |   S   |   S   |   S   |   S   |   S   |
+    //                 -----------------------------------------------------------------
+    //      4       32 | A | F | A | F | A | F | A | F | A | F | A | F | A | F | A | F |
+    //                 -----------------------------------------------------------------
+    //
+    constexpr uint64_t maxBlockSize = 512;
+    BuddyAllocator allocator(maxBlockSize);
+
+    // Allocate leaf blocks
+    constexpr uint64_t minBlockSizeInBytes = 32;
+    std::vector<uint64_t> blockOffsets;
+    for (uint64_t i = 0; i < maxBlockSize / minBlockSizeInBytes; i++) {
+        blockOffsets.push_back(allocator.Allocate(minBlockSizeInBytes));
     }
 
-    // Verify that a multiple allocated blocks can be removed in the free-list.
-    TEST(BuddyAllocatorTests, MultipleSplitBlocks) {
-        //  After four 16 byte allocations:
-        //
-        //  Level          --------------------------------
-        //      0       32 |               S              |
-        //                 --------------------------------
-        //      1       16 |       S       |       S      |        S - split
-        //                 --------------------------------        F - free
-        //      2       8  |   Aa  |   Ab  |  Ac  |   Ad  |        A - allocated
-        //                 --------------------------------
-        //
-        constexpr uint64_t maxBlockSize = 32;
-        BuddyAllocator allocator(maxBlockSize);
-
-        // Populates the free-list with four blocks at Level2.
-
-        // Allocate "a" block (two splits).
-        constexpr uint64_t blockSizeInBytes = 8;
-        uint64_t blockOffsetA = allocator.Allocate(blockSizeInBytes);
-        ASSERT_EQ(blockOffsetA, 0u);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
-
-        // Allocate "b" block.
-        uint64_t blockOffsetB = allocator.Allocate(blockSizeInBytes);
-        ASSERT_EQ(blockOffsetB, blockSizeInBytes);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
-
-        // Allocate "c" block (three splits).
-        uint64_t blockOffsetC = allocator.Allocate(blockSizeInBytes);
-        ASSERT_EQ(blockOffsetC, blockOffsetB + blockSizeInBytes);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
-
-        // Allocate "d" block.
-        uint64_t blockOffsetD = allocator.Allocate(blockSizeInBytes);
-        ASSERT_EQ(blockOffsetD, blockOffsetC + blockSizeInBytes);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u);
-
-        // Deallocate "d" block.
-        // FreeList[Level2] = [BlockD] -> x
-        allocator.Deallocate(blockOffsetD);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
-
-        // Deallocate "b" block.
-        // FreeList[Level2] = [BlockB] -> [BlockD] -> x
-        allocator.Deallocate(blockOffsetB);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
-
-        // Deallocate "c" block (one merges).
-        // FreeList[Level1] = [BlockCD] -> x
-        // FreeList[Level2] = [BlockB] -> x
-        allocator.Deallocate(blockOffsetC);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
-
-        // Deallocate "a" block (two merges).
-        // FreeList[Level0] = [BlockABCD] -> x
-        allocator.Deallocate(blockOffsetA);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+    // Free every other leaf block.
+    for (size_t count = 1; count < blockOffsets.size(); count += 2) {
+        allocator.Deallocate(blockOffsets[count]);
     }
 
-    // Verify the buddy allocator can handle allocations of various sizes.
-    TEST(BuddyAllocatorTests, MultipleSplitBlockIncreasingSize) {
-        //  After four Level4-to-Level1 byte then one L4 block allocations:
-        //
-        //  Level          -----------------------------------------------------------------
-        //      0      512 |                               S                               |
-        //                 -----------------------------------------------------------------
-        //      1      256 |               S               |               A               |
-        //                 -----------------------------------------------------------------
-        //      2      128 |       S       |       A       |               |               |
-        //                 -----------------------------------------------------------------
-        //      3       64 |   S   |   A   |       |       |       |       |       |       |
-        //                 -----------------------------------------------------------------
-        //      4       32 | A | F |   |   |   |   |   |   |   |   |   |   |   |   |   |   |
-        //                 -----------------------------------------------------------------
-        //
-        constexpr uint64_t maxBlockSize = 512;
-        BuddyAllocator allocator(maxBlockSize);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 8u);
+}
 
-        ASSERT_EQ(allocator.Allocate(32), 0ull);
-        ASSERT_EQ(allocator.Allocate(64), 64ull);
-        ASSERT_EQ(allocator.Allocate(128), 128ull);
-        ASSERT_EQ(allocator.Allocate(256), 256ull);
+// Verify the buddy allocator can deal with multiple allocations with mixed alignments.
+TEST(BuddyAllocatorTests, SameSizeVariousAlignment) {
+    //  After two 8 byte allocations with 16 byte alignment then one 8 byte allocation with 8
+    //  byte alignment.
+    //
+    //  Level          --------------------------------
+    //      0       32 |               S              |
+    //                 --------------------------------
+    //      1       16 |       S       |       S      |       S - split
+    //                 --------------------------------       F - free
+    //      2       8  |   Aa  |   F   |  Ab   |  Ac  |       A - allocated
+    //                 --------------------------------
+    //
+    BuddyAllocator allocator(32);
 
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+    // Allocate Aa (two splits).
+    ASSERT_EQ(allocator.Allocate(8, 16), 0u);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
 
-        // Fill in the last free block.
-        ASSERT_EQ(allocator.Allocate(32), 32ull);
+    // Allocate Ab (skip Aa buddy due to alignment and perform another split).
+    ASSERT_EQ(allocator.Allocate(8, 16), 16u);
 
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
 
-        // Check if we're full.
-        ASSERT_EQ(allocator.Allocate(32), BuddyAllocator::kInvalidOffset);
-    }
+    // Check that we cannot fit another.
+    ASSERT_EQ(allocator.Allocate(8, 16), BuddyAllocator::kInvalidOffset);
 
-    // Verify very small allocations using a larger allocator works correctly.
-    TEST(BuddyAllocatorTests, MultipleSplitBlocksVariableSizes) {
-        //  After allocating four pairs of one 64 byte block and one 32 byte block.
-        //
-        //  Level          -----------------------------------------------------------------
-        //      0      512 |                               S                               |
-        //                 -----------------------------------------------------------------
-        //      1      256 |               S               |               S               |
-        //                 -----------------------------------------------------------------
-        //      2      128 |       S       |       S       |       S       |       F       |
-        //                 -----------------------------------------------------------------
-        //      3       64 |   A   |   S   |   A   |   A   |   S   |   A   |       |       |
-        //                 -----------------------------------------------------------------
-        //      4       32 |   |   | A | A |   |   |   |   | A | A |   |   |   |   |   |   |
-        //                 -----------------------------------------------------------------
-        //
-        constexpr uint64_t maxBlockSize = 512;
-        BuddyAllocator allocator(maxBlockSize);
+    // Allocate Ac (zero splits and Ab's buddy is now the first free block).
+    ASSERT_EQ(allocator.Allocate(8, 8), 24u);
 
-        ASSERT_EQ(allocator.Allocate(64), 0ull);
-        ASSERT_EQ(allocator.Allocate(32), 64ull);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
+}
 
-        ASSERT_EQ(allocator.Allocate(64), 128ull);
-        ASSERT_EQ(allocator.Allocate(32), 96ull);
+// Verify the buddy allocator can deal with multiple allocations with equal alignments.
+TEST(BuddyAllocatorTests, VariousSizeSameAlignment) {
+    //  After two 8 byte allocations with 4 byte alignment then one 16 byte allocation with 4
+    //  byte alignment.
+    //
+    //  Level          --------------------------------
+    //      0       32 |               S              |
+    //                 --------------------------------
+    //      1       16 |       S       |       Ac     |       S - split
+    //                 --------------------------------       F - free
+    //      2       8  |   Aa  |   Ab  |              |       A - allocated
+    //                 --------------------------------
+    //
+    constexpr uint64_t maxBlockSize = 32;
+    constexpr uint64_t alignment = 4;
+    BuddyAllocator allocator(maxBlockSize);
 
-        ASSERT_EQ(allocator.Allocate(64), 192ull);
-        ASSERT_EQ(allocator.Allocate(32), 256ull);
+    // Allocate block Aa (two splits)
+    ASSERT_EQ(allocator.Allocate(8, alignment), 0u);
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
 
-        ASSERT_EQ(allocator.Allocate(64), 320ull);
-        ASSERT_EQ(allocator.Allocate(32), 288ull);
+    // Allocate block Ab (Aa's buddy)
+    ASSERT_EQ(allocator.Allocate(8, alignment), 8u);
 
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
-    }
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
 
-    // Verify the buddy allocator can deal with bad fragmentation.
-    TEST(BuddyAllocatorTests, MultipleSplitBlocksInterleaved) {
-        //  Allocate every leaf then de-allocate every other of those allocations.
-        //
-        //  Level          -----------------------------------------------------------------
-        //      0      512 |                               S                               |
-        //                 -----------------------------------------------------------------
-        //      1      256 |               S               |               S               |
-        //                 -----------------------------------------------------------------
-        //      2      128 |       S       |       S       |        S       |        S     |
-        //                 -----------------------------------------------------------------
-        //      3       64 |   S   |   S   |   S   |   S   |   S   |   S   |   S   |   S   |
-        //                 -----------------------------------------------------------------
-        //      4       32 | A | F | A | F | A | F | A | F | A | F | A | F | A | F | A | F |
-        //                 -----------------------------------------------------------------
-        //
-        constexpr uint64_t maxBlockSize = 512;
-        BuddyAllocator allocator(maxBlockSize);
+    // Check that we can still allocate Ac.
+    ASSERT_EQ(allocator.Allocate(16, alignment), 16ull);
 
-        // Allocate leaf blocks
-        constexpr uint64_t minBlockSizeInBytes = 32;
-        std::vector<uint64_t> blockOffsets;
-        for (uint64_t i = 0; i < maxBlockSize / minBlockSizeInBytes; i++) {
-            blockOffsets.push_back(allocator.Allocate(minBlockSizeInBytes));
-        }
-
-        // Free every other leaf block.
-        for (size_t count = 1; count < blockOffsets.size(); count += 2) {
-            allocator.Deallocate(blockOffsets[count]);
-        }
-
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 8u);
-    }
-
-    // Verify the buddy allocator can deal with multiple allocations with mixed alignments.
-    TEST(BuddyAllocatorTests, SameSizeVariousAlignment) {
-        //  After two 8 byte allocations with 16 byte alignment then one 8 byte allocation with 8
-        //  byte alignment.
-        //
-        //  Level          --------------------------------
-        //      0       32 |               S              |
-        //                 --------------------------------
-        //      1       16 |       S       |       S      |       S - split
-        //                 --------------------------------       F - free
-        //      2       8  |   Aa  |   F   |  Ab   |  Ac  |       A - allocated
-        //                 --------------------------------
-        //
-        BuddyAllocator allocator(32);
-
-        // Allocate Aa (two splits).
-        ASSERT_EQ(allocator.Allocate(8, 16), 0u);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
-
-        // Allocate Ab (skip Aa buddy due to alignment and perform another split).
-        ASSERT_EQ(allocator.Allocate(8, 16), 16u);
-
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
-
-        // Check that we cannot fit another.
-        ASSERT_EQ(allocator.Allocate(8, 16), BuddyAllocator::kInvalidOffset);
-
-        // Allocate Ac (zero splits and Ab's buddy is now the first free block).
-        ASSERT_EQ(allocator.Allocate(8, 8), 24u);
-
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
-    }
-
-    // Verify the buddy allocator can deal with multiple allocations with equal alignments.
-    TEST(BuddyAllocatorTests, VariousSizeSameAlignment) {
-        //  After two 8 byte allocations with 4 byte alignment then one 16 byte allocation with 4
-        //  byte alignment.
-        //
-        //  Level          --------------------------------
-        //      0       32 |               S              |
-        //                 --------------------------------
-        //      1       16 |       S       |       Ac     |       S - split
-        //                 --------------------------------       F - free
-        //      2       8  |   Aa  |   Ab  |              |       A - allocated
-        //                 --------------------------------
-        //
-        constexpr uint64_t maxBlockSize = 32;
-        constexpr uint64_t alignment = 4;
-        BuddyAllocator allocator(maxBlockSize);
-
-        // Allocate block Aa (two splits)
-        ASSERT_EQ(allocator.Allocate(8, alignment), 0u);
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 2u);
-
-        // Allocate block Ab (Aa's buddy)
-        ASSERT_EQ(allocator.Allocate(8, alignment), 8u);
-
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 1u);
-
-        // Check that we can still allocate Ac.
-        ASSERT_EQ(allocator.Allocate(16, alignment), 16ull);
-
-        ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u);
-    }
+    ASSERT_EQ(allocator.ComputeTotalNumOfFreeBlocksForTesting(), 0u);
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/tests/unittests/BuddyMemoryAllocatorTests.cpp b/src/dawn/tests/unittests/BuddyMemoryAllocatorTests.cpp
index c70af25..86ed75b 100644
--- a/src/dawn/tests/unittests/BuddyMemoryAllocatorTests.cpp
+++ b/src/dawn/tests/unittests/BuddyMemoryAllocatorTests.cpp
@@ -17,448 +17,442 @@
 #include <utility>
 #include <vector>
 
-#include "gtest/gtest.h"
 #include "dawn/native/BuddyMemoryAllocator.h"
 #include "dawn/native/PooledResourceMemoryAllocator.h"
 #include "dawn/native/ResourceHeapAllocator.h"
+#include "gtest/gtest.h"
 
 namespace dawn::native {
 
-    class PlaceholderResourceHeapAllocator : public ResourceHeapAllocator {
-      public:
-        ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(
-            uint64_t size) override {
-            return std::make_unique<ResourceHeapBase>();
-        }
-        void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override {
-        }
-    };
+class PlaceholderResourceHeapAllocator : public ResourceHeapAllocator {
+  public:
+    ResultOrError<std::unique_ptr<ResourceHeapBase>> AllocateResourceHeap(uint64_t size) override {
+        return std::make_unique<ResourceHeapBase>();
+    }
+    void DeallocateResourceHeap(std::unique_ptr<ResourceHeapBase> allocation) override {}
+};
 
-    class PlaceholderBuddyResourceAllocator {
-      public:
-        PlaceholderBuddyResourceAllocator(uint64_t maxBlockSize, uint64_t memorySize)
-            : mAllocator(maxBlockSize, memorySize, &mHeapAllocator) {
-        }
+class PlaceholderBuddyResourceAllocator {
+  public:
+    PlaceholderBuddyResourceAllocator(uint64_t maxBlockSize, uint64_t memorySize)
+        : mAllocator(maxBlockSize, memorySize, &mHeapAllocator) {}
 
-        PlaceholderBuddyResourceAllocator(uint64_t maxBlockSize,
-                                          uint64_t memorySize,
-                                          ResourceHeapAllocator* heapAllocator)
-            : mAllocator(maxBlockSize, memorySize, heapAllocator) {
-        }
+    PlaceholderBuddyResourceAllocator(uint64_t maxBlockSize,
+                                      uint64_t memorySize,
+                                      ResourceHeapAllocator* heapAllocator)
+        : mAllocator(maxBlockSize, memorySize, heapAllocator) {}
 
-        ResourceMemoryAllocation Allocate(uint64_t allocationSize, uint64_t alignment = 1) {
-            ResultOrError<ResourceMemoryAllocation> result =
-                mAllocator.Allocate(allocationSize, alignment);
-            return (result.IsSuccess()) ? result.AcquireSuccess() : ResourceMemoryAllocation{};
-        }
-
-        void Deallocate(ResourceMemoryAllocation& allocation) {
-            mAllocator.Deallocate(allocation);
-        }
-
-        uint64_t ComputeTotalNumOfHeapsForTesting() const {
-            return mAllocator.ComputeTotalNumOfHeapsForTesting();
-        }
-
-      private:
-        PlaceholderResourceHeapAllocator mHeapAllocator;
-        BuddyMemoryAllocator mAllocator;
-    };
-
-    // Verify a single resource allocation in a single heap.
-    TEST(BuddyMemoryAllocatorTests, SingleHeap) {
-        // After one 128 byte resource allocation:
-        //
-        // max block size -> ---------------------------
-        //                   |          A1/H0          |       Hi - Heap at index i
-        // max heap size  -> ---------------------------       An - Resource allocation n
-        //
-        constexpr uint64_t heapSize = 128;
-        constexpr uint64_t maxBlockSize = heapSize;
-        PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
-
-        // Cannot allocate greater than heap size.
-        ResourceMemoryAllocation invalidAllocation = allocator.Allocate(heapSize * 2);
-        ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
-
-        // Allocate one 128 byte allocation (same size as heap).
-        ResourceMemoryAllocation allocation1 = allocator.Allocate(128);
-        ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
-        ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
-
-        // Cannot allocate when allocator is full.
-        invalidAllocation = allocator.Allocate(128);
-        ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
-
-        allocator.Deallocate(allocation1);
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u);
+    ResourceMemoryAllocation Allocate(uint64_t allocationSize, uint64_t alignment = 1) {
+        ResultOrError<ResourceMemoryAllocation> result =
+            mAllocator.Allocate(allocationSize, alignment);
+        return (result.IsSuccess()) ? result.AcquireSuccess() : ResourceMemoryAllocation{};
     }
 
-    // Verify that multiple allocation are created in separate heaps.
-    TEST(BuddyMemoryAllocatorTests, MultipleHeaps) {
-        // After two 128 byte resource allocations:
-        //
-        // max block size -> ---------------------------
-        //                   |                         |       Hi - Heap at index i
-        // max heap size  -> ---------------------------       An - Resource allocation n
-        //                   |   A1/H0    |    A2/H1   |
-        //                   ---------------------------
-        //
-        constexpr uint64_t maxBlockSize = 256;
-        constexpr uint64_t heapSize = 128;
-        PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
+    void Deallocate(ResourceMemoryAllocation& allocation) { mAllocator.Deallocate(allocation); }
 
-        // Cannot allocate greater than heap size.
-        ResourceMemoryAllocation invalidAllocation = allocator.Allocate(heapSize * 2);
-        ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
-
-        // Cannot allocate greater than max block size.
-        invalidAllocation = allocator.Allocate(maxBlockSize * 2);
-        ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
-
-        // Allocate two 128 byte allocations.
-        ResourceMemoryAllocation allocation1 = allocator.Allocate(heapSize);
-        ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
-        ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-
-        // First allocation creates first heap.
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
-
-        ResourceMemoryAllocation allocation2 = allocator.Allocate(heapSize);
-        ASSERT_EQ(allocation2.GetInfo().mBlockOffset, heapSize);
-        ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-
-        // Second allocation creates second heap.
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
-        ASSERT_NE(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
-
-        // Deallocate both allocations
-        allocator.Deallocate(allocation1);
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);  // Released H0
-
-        allocator.Deallocate(allocation2);
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u);  // Released H1
+    uint64_t ComputeTotalNumOfHeapsForTesting() const {
+        return mAllocator.ComputeTotalNumOfHeapsForTesting();
     }
 
-    // Verify multiple sub-allocations can re-use heaps.
-    TEST(BuddyMemoryAllocatorTests, MultipleSplitHeaps) {
-        // After two 64 byte allocations with 128 byte heaps.
-        //
-        // max block size -> ---------------------------
-        //                   |                         |       Hi - Heap at index i
-        // max heap size  -> ---------------------------       An - Resource allocation n
-        //                   |     H0     |     H1     |
-        //                   ---------------------------
-        //                   |  A1 |  A2  |  A3 |      |
-        //                   ---------------------------
-        //
-        constexpr uint64_t maxBlockSize = 256;
-        constexpr uint64_t heapSize = 128;
-        PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
+  private:
+    PlaceholderResourceHeapAllocator mHeapAllocator;
+    BuddyMemoryAllocator mAllocator;
+};
 
-        // Allocate two 64 byte sub-allocations.
-        ResourceMemoryAllocation allocation1 = allocator.Allocate(heapSize / 2);
-        ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
-        ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+// Verify a single resource allocation in a single heap.
+TEST(BuddyMemoryAllocatorTests, SingleHeap) {
+    // After one 128 byte resource allocation:
+    //
+    // max block size -> ---------------------------
+    //                   |          A1/H0          |       Hi - Heap at index i
+    // max heap size  -> ---------------------------       An - Resource allocation n
+    //
+    constexpr uint64_t heapSize = 128;
+    constexpr uint64_t maxBlockSize = heapSize;
+    PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
 
-        // First sub-allocation creates first heap.
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
+    // Cannot allocate greater than heap size.
+    ResourceMemoryAllocation invalidAllocation = allocator.Allocate(heapSize * 2);
+    ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
 
-        ResourceMemoryAllocation allocation2 = allocator.Allocate(heapSize / 2);
-        ASSERT_EQ(allocation2.GetInfo().mBlockOffset, heapSize / 2);
-        ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+    // Allocate one 128 byte allocation (same size as heap).
+    ResourceMemoryAllocation allocation1 = allocator.Allocate(128);
+    ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
+    ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
 
-        // Second allocation re-uses first heap.
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
-        ASSERT_EQ(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
 
-        ResourceMemoryAllocation allocation3 = allocator.Allocate(heapSize / 2);
-        ASSERT_EQ(allocation3.GetInfo().mBlockOffset, heapSize);
-        ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+    // Cannot allocate when allocator is full.
+    invalidAllocation = allocator.Allocate(128);
+    ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
 
-        // Third allocation creates second heap.
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
-        ASSERT_NE(allocation1.GetResourceHeap(), allocation3.GetResourceHeap());
+    allocator.Deallocate(allocation1);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u);
+}
 
-        // Deallocate all allocations in reverse order.
-        allocator.Deallocate(allocation1);
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(),
-                  2u);  // A2 pins H0.
+// Verify that multiple allocation are created in separate heaps.
+TEST(BuddyMemoryAllocatorTests, MultipleHeaps) {
+    // After two 128 byte resource allocations:
+    //
+    // max block size -> ---------------------------
+    //                   |                         |       Hi - Heap at index i
+    // max heap size  -> ---------------------------       An - Resource allocation n
+    //                   |   A1/H0    |    A2/H1   |
+    //                   ---------------------------
+    //
+    constexpr uint64_t maxBlockSize = 256;
+    constexpr uint64_t heapSize = 128;
+    PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
 
-        allocator.Deallocate(allocation2);
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);  // Released H0
+    // Cannot allocate greater than heap size.
+    ResourceMemoryAllocation invalidAllocation = allocator.Allocate(heapSize * 2);
+    ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
 
-        allocator.Deallocate(allocation3);
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u);  // Released H1
+    // Cannot allocate greater than max block size.
+    invalidAllocation = allocator.Allocate(maxBlockSize * 2);
+    ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
+
+    // Allocate two 128 byte allocations.
+    ResourceMemoryAllocation allocation1 = allocator.Allocate(heapSize);
+    ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
+    ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    // First allocation creates first heap.
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
+
+    ResourceMemoryAllocation allocation2 = allocator.Allocate(heapSize);
+    ASSERT_EQ(allocation2.GetInfo().mBlockOffset, heapSize);
+    ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    // Second allocation creates second heap.
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
+    ASSERT_NE(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
+
+    // Deallocate both allocations
+    allocator.Deallocate(allocation1);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);  // Released H0
+
+    allocator.Deallocate(allocation2);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u);  // Released H1
+}
+
+// Verify multiple sub-allocations can re-use heaps.
+TEST(BuddyMemoryAllocatorTests, MultipleSplitHeaps) {
+    // After two 64 byte allocations with 128 byte heaps.
+    //
+    // max block size -> ---------------------------
+    //                   |                         |       Hi - Heap at index i
+    // max heap size  -> ---------------------------       An - Resource allocation n
+    //                   |     H0     |     H1     |
+    //                   ---------------------------
+    //                   |  A1 |  A2  |  A3 |      |
+    //                   ---------------------------
+    //
+    constexpr uint64_t maxBlockSize = 256;
+    constexpr uint64_t heapSize = 128;
+    PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
+
+    // Allocate two 64 byte sub-allocations.
+    ResourceMemoryAllocation allocation1 = allocator.Allocate(heapSize / 2);
+    ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
+    ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    // First sub-allocation creates first heap.
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
+
+    ResourceMemoryAllocation allocation2 = allocator.Allocate(heapSize / 2);
+    ASSERT_EQ(allocation2.GetInfo().mBlockOffset, heapSize / 2);
+    ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    // Second allocation re-uses first heap.
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
+    ASSERT_EQ(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
+
+    ResourceMemoryAllocation allocation3 = allocator.Allocate(heapSize / 2);
+    ASSERT_EQ(allocation3.GetInfo().mBlockOffset, heapSize);
+    ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    // Third allocation creates second heap.
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
+    ASSERT_NE(allocation1.GetResourceHeap(), allocation3.GetResourceHeap());
+
+    // Deallocate all allocations in reverse order.
+    allocator.Deallocate(allocation1);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(),
+              2u);  // A2 pins H0.
+
+    allocator.Deallocate(allocation2);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);  // Released H0
+
+    allocator.Deallocate(allocation3);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u);  // Released H1
+}
+
+// Verify resource sub-allocation of various sizes over multiple heaps.
+TEST(BuddyMemoryAllocatorTests, MultiplSplitHeapsVariableSizes) {
+    // After three 64 byte allocations and two 128 byte allocations.
+    //
+    // max block size -> -------------------------------------------------------
+    //                   |                                                     |
+    //                   -------------------------------------------------------
+    //                   |                         |                           |
+    // max heap size  -> -------------------------------------------------------
+    //                   |     H0     |    A3/H1   |      H2     |    A5/H3    |
+    //                   -------------------------------------------------------
+    //                   |  A1 |  A2  |            |   A4  |     |             |
+    //                   -------------------------------------------------------
+    //
+    constexpr uint64_t heapSize = 128;
+    constexpr uint64_t maxBlockSize = 512;
+    PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
+
+    // Allocate two 64-byte allocations.
+    ResourceMemoryAllocation allocation1 = allocator.Allocate(64);
+    ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
+    ASSERT_EQ(allocation1.GetOffset(), 0u);
+    ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ResourceMemoryAllocation allocation2 = allocator.Allocate(64);
+    ASSERT_EQ(allocation2.GetInfo().mBlockOffset, 64u);
+    ASSERT_EQ(allocation2.GetOffset(), 64u);
+    ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    // A1 and A2 share H0
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
+    ASSERT_EQ(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
+
+    ResourceMemoryAllocation allocation3 = allocator.Allocate(128);
+    ASSERT_EQ(allocation3.GetInfo().mBlockOffset, 128u);
+    ASSERT_EQ(allocation3.GetOffset(), 0u);
+    ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    // A3 creates and fully occupies a new heap.
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
+    ASSERT_NE(allocation2.GetResourceHeap(), allocation3.GetResourceHeap());
+
+    ResourceMemoryAllocation allocation4 = allocator.Allocate(64);
+    ASSERT_EQ(allocation4.GetInfo().mBlockOffset, 256u);
+    ASSERT_EQ(allocation4.GetOffset(), 0u);
+    ASSERT_EQ(allocation4.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);
+    ASSERT_NE(allocation3.GetResourceHeap(), allocation4.GetResourceHeap());
+
+    // R5 size forms 64 byte hole after R4.
+    ResourceMemoryAllocation allocation5 = allocator.Allocate(128);
+    ASSERT_EQ(allocation5.GetInfo().mBlockOffset, 384u);
+    ASSERT_EQ(allocation5.GetOffset(), 0u);
+    ASSERT_EQ(allocation5.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 4u);
+    ASSERT_NE(allocation4.GetResourceHeap(), allocation5.GetResourceHeap());
+
+    // Deallocate allocations in staggered order.
+    allocator.Deallocate(allocation1);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 4u);  // A2 pins H0
+
+    allocator.Deallocate(allocation5);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);  // Released H3
+
+    allocator.Deallocate(allocation2);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);  // Released H0
+
+    allocator.Deallocate(allocation4);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);  // Released H2
+
+    allocator.Deallocate(allocation3);
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u);  // Released H1
+}
+
+// Verify resource sub-allocation of same sizes with various alignments.
+TEST(BuddyMemoryAllocatorTests, SameSizeVariousAlignment) {
+    // After three 64 byte and one 128 byte resource allocations.
+    //
+    // max block size -> -------------------------------------------------------
+    //                   |                                                     |
+    //                   -------------------------------------------------------
+    //                   |                         |                           |
+    // max heap size  -> -------------------------------------------------------
+    //                   |     H0     |     H1     |     H2     |              |
+    //                   -------------------------------------------------------
+    //                   |  A1  |     |  A2  |     |  A3  |  A4 |              |
+    //                   -------------------------------------------------------
+    //
+    constexpr uint64_t heapSize = 128;
+    constexpr uint64_t maxBlockSize = 512;
+    PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
+
+    ResourceMemoryAllocation allocation1 = allocator.Allocate(64, 128);
+    ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
+    ASSERT_EQ(allocation1.GetOffset(), 0u);
+    ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
+
+    ResourceMemoryAllocation allocation2 = allocator.Allocate(64, 128);
+    ASSERT_EQ(allocation2.GetInfo().mBlockOffset, 128u);
+    ASSERT_EQ(allocation2.GetOffset(), 0u);
+    ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
+    ASSERT_NE(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
+
+    ResourceMemoryAllocation allocation3 = allocator.Allocate(64, 128);
+    ASSERT_EQ(allocation3.GetInfo().mBlockOffset, 256u);
+    ASSERT_EQ(allocation3.GetOffset(), 0u);
+    ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);
+    ASSERT_NE(allocation2.GetResourceHeap(), allocation3.GetResourceHeap());
+
+    ResourceMemoryAllocation allocation4 = allocator.Allocate(64, 64);
+    ASSERT_EQ(allocation4.GetInfo().mBlockOffset, 320u);
+    ASSERT_EQ(allocation4.GetOffset(), 64u);
+    ASSERT_EQ(allocation4.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);
+    ASSERT_EQ(allocation3.GetResourceHeap(), allocation4.GetResourceHeap());
+}
+
+// Verify resource sub-allocation of various sizes with same alignments.
+TEST(BuddyMemoryAllocatorTests, VariousSizeSameAlignment) {
+    // After two 64 byte and two 128 byte resource allocations:
+    //
+    // max block size -> -------------------------------------------------------
+    //                   |                                                     |
+    //                   -------------------------------------------------------
+    //                   |                         |                           |
+    // max heap size  -> -------------------------------------------------------
+    //                   |     H0     |    A3/H1   |    A4/H2   |              |
+    //                   -------------------------------------------------------
+    //                   |  A1 |  A2  |            |            |              |
+    //                   -------------------------------------------------------
+    //
+    constexpr uint64_t heapSize = 128;
+    constexpr uint64_t maxBlockSize = 512;
+    PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
+
+    constexpr uint64_t alignment = 64;
+
+    ResourceMemoryAllocation allocation1 = allocator.Allocate(64, alignment);
+    ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
+    ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
+
+    ResourceMemoryAllocation allocation2 = allocator.Allocate(64, alignment);
+    ASSERT_EQ(allocation2.GetInfo().mBlockOffset, 64u);
+    ASSERT_EQ(allocation2.GetOffset(), 64u);
+    ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);  // Reuses H0
+    ASSERT_EQ(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
+
+    ResourceMemoryAllocation allocation3 = allocator.Allocate(128, alignment);
+    ASSERT_EQ(allocation3.GetInfo().mBlockOffset, 128u);
+    ASSERT_EQ(allocation3.GetOffset(), 0u);
+    ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
+    ASSERT_NE(allocation2.GetResourceHeap(), allocation3.GetResourceHeap());
+
+    ResourceMemoryAllocation allocation4 = allocator.Allocate(128, alignment);
+    ASSERT_EQ(allocation4.GetInfo().mBlockOffset, 256u);
+    ASSERT_EQ(allocation4.GetOffset(), 0u);
+    ASSERT_EQ(allocation4.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+
+    ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);
+    ASSERT_NE(allocation3.GetResourceHeap(), allocation4.GetResourceHeap());
+}
+
+// Verify allocating a very large resource does not overflow.
+TEST(BuddyMemoryAllocatorTests, AllocationOverflow) {
+    constexpr uint64_t heapSize = 128;
+    constexpr uint64_t maxBlockSize = 512;
+    PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
+
+    constexpr uint64_t largeBlock = (1ull << 63) + 1;
+    ResourceMemoryAllocation invalidAllocation = allocator.Allocate(largeBlock);
+    ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
+}
+
+// Verify resource heaps will be reused from a pool.
+TEST(BuddyMemoryAllocatorTests, ReuseFreedHeaps) {
+    constexpr uint64_t kHeapSize = 128;
+    constexpr uint64_t kMaxBlockSize = 4096;
+
+    PlaceholderResourceHeapAllocator heapAllocator;
+    PooledResourceMemoryAllocator poolAllocator(&heapAllocator);
+    PlaceholderBuddyResourceAllocator allocator(kMaxBlockSize, kHeapSize, &poolAllocator);
+
+    std::set<ResourceHeapBase*> heaps = {};
+    std::vector<ResourceMemoryAllocation> allocations = {};
+
+    constexpr uint32_t kNumOfAllocations = 100;
+
+    // Allocate |kNumOfAllocations|.
+    for (uint32_t i = 0; i < kNumOfAllocations; i++) {
+        ResourceMemoryAllocation allocation = allocator.Allocate(4);
+        ASSERT_EQ(allocation.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+        heaps.insert(allocation.GetResourceHeap());
+        allocations.push_back(std::move(allocation));
     }
 
-    // Verify resource sub-allocation of various sizes over multiple heaps.
-    TEST(BuddyMemoryAllocatorTests, MultiplSplitHeapsVariableSizes) {
-        // After three 64 byte allocations and two 128 byte allocations.
-        //
-        // max block size -> -------------------------------------------------------
-        //                   |                                                     |
-        //                   -------------------------------------------------------
-        //                   |                         |                           |
-        // max heap size  -> -------------------------------------------------------
-        //                   |     H0     |    A3/H1   |      H2     |    A5/H3    |
-        //                   -------------------------------------------------------
-        //                   |  A1 |  A2  |            |   A4  |     |             |
-        //                   -------------------------------------------------------
-        //
-        constexpr uint64_t heapSize = 128;
-        constexpr uint64_t maxBlockSize = 512;
-        PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
+    ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u);
 
-        // Allocate two 64-byte allocations.
-        ResourceMemoryAllocation allocation1 = allocator.Allocate(64);
-        ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
-        ASSERT_EQ(allocation1.GetOffset(), 0u);
-        ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-
-        ResourceMemoryAllocation allocation2 = allocator.Allocate(64);
-        ASSERT_EQ(allocation2.GetInfo().mBlockOffset, 64u);
-        ASSERT_EQ(allocation2.GetOffset(), 64u);
-        ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-
-        // A1 and A2 share H0
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
-        ASSERT_EQ(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
-
-        ResourceMemoryAllocation allocation3 = allocator.Allocate(128);
-        ASSERT_EQ(allocation3.GetInfo().mBlockOffset, 128u);
-        ASSERT_EQ(allocation3.GetOffset(), 0u);
-        ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-
-        // A3 creates and fully occupies a new heap.
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
-        ASSERT_NE(allocation2.GetResourceHeap(), allocation3.GetResourceHeap());
-
-        ResourceMemoryAllocation allocation4 = allocator.Allocate(64);
-        ASSERT_EQ(allocation4.GetInfo().mBlockOffset, 256u);
-        ASSERT_EQ(allocation4.GetOffset(), 0u);
-        ASSERT_EQ(allocation4.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);
-        ASSERT_NE(allocation3.GetResourceHeap(), allocation4.GetResourceHeap());
-
-        // R5 size forms 64 byte hole after R4.
-        ResourceMemoryAllocation allocation5 = allocator.Allocate(128);
-        ASSERT_EQ(allocation5.GetInfo().mBlockOffset, 384u);
-        ASSERT_EQ(allocation5.GetOffset(), 0u);
-        ASSERT_EQ(allocation5.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 4u);
-        ASSERT_NE(allocation4.GetResourceHeap(), allocation5.GetResourceHeap());
-
-        // Deallocate allocations in staggered order.
-        allocator.Deallocate(allocation1);
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 4u);  // A2 pins H0
-
-        allocator.Deallocate(allocation5);
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);  // Released H3
-
-        allocator.Deallocate(allocation2);
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);  // Released H0
-
-        allocator.Deallocate(allocation4);
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);  // Released H2
-
-        allocator.Deallocate(allocation3);
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 0u);  // Released H1
+    // Return the allocations to the pool.
+    for (ResourceMemoryAllocation& allocation : allocations) {
+        allocator.Deallocate(allocation);
     }
 
-    // Verify resource sub-allocation of same sizes with various alignments.
-    TEST(BuddyMemoryAllocatorTests, SameSizeVariousAlignment) {
-        // After three 64 byte and one 128 byte resource allocations.
-        //
-        // max block size -> -------------------------------------------------------
-        //                   |                                                     |
-        //                   -------------------------------------------------------
-        //                   |                         |                           |
-        // max heap size  -> -------------------------------------------------------
-        //                   |     H0     |     H1     |     H2     |              |
-        //                   -------------------------------------------------------
-        //                   |  A1  |     |  A2  |     |  A3  |  A4 |              |
-        //                   -------------------------------------------------------
-        //
-        constexpr uint64_t heapSize = 128;
-        constexpr uint64_t maxBlockSize = 512;
-        PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
+    ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), heaps.size());
 
-        ResourceMemoryAllocation allocation1 = allocator.Allocate(64, 128);
-        ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
-        ASSERT_EQ(allocation1.GetOffset(), 0u);
-        ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
-
-        ResourceMemoryAllocation allocation2 = allocator.Allocate(64, 128);
-        ASSERT_EQ(allocation2.GetInfo().mBlockOffset, 128u);
-        ASSERT_EQ(allocation2.GetOffset(), 0u);
-        ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
-        ASSERT_NE(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
-
-        ResourceMemoryAllocation allocation3 = allocator.Allocate(64, 128);
-        ASSERT_EQ(allocation3.GetInfo().mBlockOffset, 256u);
-        ASSERT_EQ(allocation3.GetOffset(), 0u);
-        ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);
-        ASSERT_NE(allocation2.GetResourceHeap(), allocation3.GetResourceHeap());
-
-        ResourceMemoryAllocation allocation4 = allocator.Allocate(64, 64);
-        ASSERT_EQ(allocation4.GetInfo().mBlockOffset, 320u);
-        ASSERT_EQ(allocation4.GetOffset(), 64u);
-        ASSERT_EQ(allocation4.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);
-        ASSERT_EQ(allocation3.GetResourceHeap(), allocation4.GetResourceHeap());
+    // Allocate again reusing the same heaps.
+    for (uint32_t i = 0; i < kNumOfAllocations; i++) {
+        ResourceMemoryAllocation allocation = allocator.Allocate(4);
+        ASSERT_EQ(allocation.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+        ASSERT_FALSE(heaps.insert(allocation.GetResourceHeap()).second);
     }
 
-    // Verify resource sub-allocation of various sizes with same alignments.
-    TEST(BuddyMemoryAllocatorTests, VariousSizeSameAlignment) {
-        // After two 64 byte and two 128 byte resource allocations:
-        //
-        // max block size -> -------------------------------------------------------
-        //                   |                                                     |
-        //                   -------------------------------------------------------
-        //                   |                         |                           |
-        // max heap size  -> -------------------------------------------------------
-        //                   |     H0     |    A3/H1   |    A4/H2   |              |
-        //                   -------------------------------------------------------
-        //                   |  A1 |  A2  |            |            |              |
-        //                   -------------------------------------------------------
-        //
-        constexpr uint64_t heapSize = 128;
-        constexpr uint64_t maxBlockSize = 512;
-        PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
+    ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u);
+}
 
-        constexpr uint64_t alignment = 64;
+// Verify resource heaps that were reused from a pool can be destroyed.
+TEST(BuddyMemoryAllocatorTests, DestroyHeaps) {
+    constexpr uint64_t kHeapSize = 128;
+    constexpr uint64_t kMaxBlockSize = 4096;
 
-        ResourceMemoryAllocation allocation1 = allocator.Allocate(64, alignment);
-        ASSERT_EQ(allocation1.GetInfo().mBlockOffset, 0u);
-        ASSERT_EQ(allocation1.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+    PlaceholderResourceHeapAllocator heapAllocator;
+    PooledResourceMemoryAllocator poolAllocator(&heapAllocator);
+    PlaceholderBuddyResourceAllocator allocator(kMaxBlockSize, kHeapSize, &poolAllocator);
 
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);
+    std::set<ResourceHeapBase*> heaps = {};
+    std::vector<ResourceMemoryAllocation> allocations = {};
 
-        ResourceMemoryAllocation allocation2 = allocator.Allocate(64, alignment);
-        ASSERT_EQ(allocation2.GetInfo().mBlockOffset, 64u);
-        ASSERT_EQ(allocation2.GetOffset(), 64u);
-        ASSERT_EQ(allocation2.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+    // Count by heap (vs number of allocations) to ensure there are exactly |kNumOfHeaps| worth
+    // of buffers. Otherwise, the heap may be reused if not full.
+    constexpr uint32_t kNumOfHeaps = 10;
 
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 1u);  // Reuses H0
-        ASSERT_EQ(allocation1.GetResourceHeap(), allocation2.GetResourceHeap());
-
-        ResourceMemoryAllocation allocation3 = allocator.Allocate(128, alignment);
-        ASSERT_EQ(allocation3.GetInfo().mBlockOffset, 128u);
-        ASSERT_EQ(allocation3.GetOffset(), 0u);
-        ASSERT_EQ(allocation3.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 2u);
-        ASSERT_NE(allocation2.GetResourceHeap(), allocation3.GetResourceHeap());
-
-        ResourceMemoryAllocation allocation4 = allocator.Allocate(128, alignment);
-        ASSERT_EQ(allocation4.GetInfo().mBlockOffset, 256u);
-        ASSERT_EQ(allocation4.GetOffset(), 0u);
-        ASSERT_EQ(allocation4.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-
-        ASSERT_EQ(allocator.ComputeTotalNumOfHeapsForTesting(), 3u);
-        ASSERT_NE(allocation3.GetResourceHeap(), allocation4.GetResourceHeap());
+    // Allocate |kNumOfHeaps| worth.
+    while (heaps.size() < kNumOfHeaps) {
+        ResourceMemoryAllocation allocation = allocator.Allocate(4);
+        ASSERT_EQ(allocation.GetInfo().mMethod, AllocationMethod::kSubAllocated);
+        heaps.insert(allocation.GetResourceHeap());
+        allocations.push_back(std::move(allocation));
     }
 
-    // Verify allocating a very large resource does not overflow.
-    TEST(BuddyMemoryAllocatorTests, AllocationOverflow) {
-        constexpr uint64_t heapSize = 128;
-        constexpr uint64_t maxBlockSize = 512;
-        PlaceholderBuddyResourceAllocator allocator(maxBlockSize, heapSize);
+    ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u);
 
-        constexpr uint64_t largeBlock = (1ull << 63) + 1;
-        ResourceMemoryAllocation invalidAllocation = allocator.Allocate(largeBlock);
-        ASSERT_EQ(invalidAllocation.GetInfo().mMethod, AllocationMethod::kInvalid);
+    // Return the allocations to the pool.
+    for (ResourceMemoryAllocation& allocation : allocations) {
+        allocator.Deallocate(allocation);
     }
 
-    // Verify resource heaps will be reused from a pool.
-    TEST(BuddyMemoryAllocatorTests, ReuseFreedHeaps) {
-        constexpr uint64_t kHeapSize = 128;
-        constexpr uint64_t kMaxBlockSize = 4096;
+    ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), kNumOfHeaps);
 
-        PlaceholderResourceHeapAllocator heapAllocator;
-        PooledResourceMemoryAllocator poolAllocator(&heapAllocator);
-        PlaceholderBuddyResourceAllocator allocator(kMaxBlockSize, kHeapSize, &poolAllocator);
-
-        std::set<ResourceHeapBase*> heaps = {};
-        std::vector<ResourceMemoryAllocation> allocations = {};
-
-        constexpr uint32_t kNumOfAllocations = 100;
-
-        // Allocate |kNumOfAllocations|.
-        for (uint32_t i = 0; i < kNumOfAllocations; i++) {
-            ResourceMemoryAllocation allocation = allocator.Allocate(4);
-            ASSERT_EQ(allocation.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-            heaps.insert(allocation.GetResourceHeap());
-            allocations.push_back(std::move(allocation));
-        }
-
-        ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u);
-
-        // Return the allocations to the pool.
-        for (ResourceMemoryAllocation& allocation : allocations) {
-            allocator.Deallocate(allocation);
-        }
-
-        ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), heaps.size());
-
-        // Allocate again reusing the same heaps.
-        for (uint32_t i = 0; i < kNumOfAllocations; i++) {
-            ResourceMemoryAllocation allocation = allocator.Allocate(4);
-            ASSERT_EQ(allocation.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-            ASSERT_FALSE(heaps.insert(allocation.GetResourceHeap()).second);
-        }
-
-        ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u);
-    }
-
-    // Verify resource heaps that were reused from a pool can be destroyed.
-    TEST(BuddyMemoryAllocatorTests, DestroyHeaps) {
-        constexpr uint64_t kHeapSize = 128;
-        constexpr uint64_t kMaxBlockSize = 4096;
-
-        PlaceholderResourceHeapAllocator heapAllocator;
-        PooledResourceMemoryAllocator poolAllocator(&heapAllocator);
-        PlaceholderBuddyResourceAllocator allocator(kMaxBlockSize, kHeapSize, &poolAllocator);
-
-        std::set<ResourceHeapBase*> heaps = {};
-        std::vector<ResourceMemoryAllocation> allocations = {};
-
-        // Count by heap (vs number of allocations) to ensure there are exactly |kNumOfHeaps| worth
-        // of buffers. Otherwise, the heap may be reused if not full.
-        constexpr uint32_t kNumOfHeaps = 10;
-
-        // Allocate |kNumOfHeaps| worth.
-        while (heaps.size() < kNumOfHeaps) {
-            ResourceMemoryAllocation allocation = allocator.Allocate(4);
-            ASSERT_EQ(allocation.GetInfo().mMethod, AllocationMethod::kSubAllocated);
-            heaps.insert(allocation.GetResourceHeap());
-            allocations.push_back(std::move(allocation));
-        }
-
-        ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u);
-
-        // Return the allocations to the pool.
-        for (ResourceMemoryAllocation& allocation : allocations) {
-            allocator.Deallocate(allocation);
-        }
-
-        ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), kNumOfHeaps);
-
-        // Make sure we can destroy the remaining heaps.
-        poolAllocator.DestroyPool();
-        ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u);
-    }
+    // Make sure we can destroy the remaining heaps.
+    poolAllocator.DestroyPool();
+    ASSERT_EQ(poolAllocator.GetPoolSizeForTesting(), 0u);
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/tests/unittests/CommandAllocatorTests.cpp b/src/dawn/tests/unittests/CommandAllocatorTests.cpp
index cea1922..2c63327 100644
--- a/src/dawn/tests/unittests/CommandAllocatorTests.cpp
+++ b/src/dawn/tests/unittests/CommandAllocatorTests.cpp
@@ -16,493 +16,490 @@
 #include <utility>
 #include <vector>
 
-#include "gtest/gtest.h"
 #include "dawn/native/CommandAllocator.h"
+#include "gtest/gtest.h"
 
 namespace dawn::native {
 
-    // Definition of the command types used in the tests
-    enum class CommandType {
-        Draw,
-        Pipeline,
-        PushConstants,
-        Big,
-        Small,
-    };
+// Definition of the command types used in the tests
+enum class CommandType {
+    Draw,
+    Pipeline,
+    PushConstants,
+    Big,
+    Small,
+};
 
-    struct CommandDraw {
-        uint32_t first;
-        uint32_t count;
-    };
+struct CommandDraw {
+    uint32_t first;
+    uint32_t count;
+};
 
-    struct CommandPipeline {
-        uint64_t pipeline;
-        uint32_t attachmentPoint;
-    };
+struct CommandPipeline {
+    uint64_t pipeline;
+    uint32_t attachmentPoint;
+};
 
-    struct CommandPushConstants {
-        uint8_t size;
-        uint8_t offset;
-    };
+struct CommandPushConstants {
+    uint8_t size;
+    uint8_t offset;
+};
 
-    constexpr int kBigBufferSize = 65536;
+constexpr int kBigBufferSize = 65536;
 
-    struct CommandBig {
-        uint32_t buffer[kBigBufferSize];
-    };
+struct CommandBig {
+    uint32_t buffer[kBigBufferSize];
+};
 
-    struct CommandSmall {
-        uint16_t data;
-    };
+struct CommandSmall {
+    uint16_t data;
+};
 
-    // Test allocating nothing works
-    TEST(CommandAllocator, DoNothingAllocator) {
-        CommandAllocator allocator;
+// Test allocating nothing works
+TEST(CommandAllocator, DoNothingAllocator) {
+    CommandAllocator allocator;
+}
+
+// Test iterating over nothing works
+TEST(CommandAllocator, DoNothingAllocatorWithIterator) {
+    CommandAllocator allocator;
+    CommandIterator iterator(std::move(allocator));
+    iterator.MakeEmptyAsDataWasDestroyed();
+}
+
+// Test basic usage of allocator + iterator
+TEST(CommandAllocator, Basic) {
+    CommandAllocator allocator;
+
+    uint64_t myPipeline = 0xDEADBEEFBEEFDEAD;
+    uint32_t myAttachmentPoint = 2;
+    uint32_t myFirst = 42;
+    uint32_t myCount = 16;
+
+    {
+        CommandPipeline* pipeline = allocator.Allocate<CommandPipeline>(CommandType::Pipeline);
+        pipeline->pipeline = myPipeline;
+        pipeline->attachmentPoint = myAttachmentPoint;
+
+        CommandDraw* draw = allocator.Allocate<CommandDraw>(CommandType::Draw);
+        draw->first = myFirst;
+        draw->count = myCount;
     }
 
-    // Test iterating over nothing works
-    TEST(CommandAllocator, DoNothingAllocatorWithIterator) {
-        CommandAllocator allocator;
-        CommandIterator iterator(std::move(allocator));
-        iterator.MakeEmptyAsDataWasDestroyed();
-    }
-
-    // Test basic usage of allocator + iterator
-    TEST(CommandAllocator, Basic) {
-        CommandAllocator allocator;
-
-        uint64_t myPipeline = 0xDEADBEEFBEEFDEAD;
-        uint32_t myAttachmentPoint = 2;
-        uint32_t myFirst = 42;
-        uint32_t myCount = 16;
-
-        {
-            CommandPipeline* pipeline = allocator.Allocate<CommandPipeline>(CommandType::Pipeline);
-            pipeline->pipeline = myPipeline;
-            pipeline->attachmentPoint = myAttachmentPoint;
-
-            CommandDraw* draw = allocator.Allocate<CommandDraw>(CommandType::Draw);
-            draw->first = myFirst;
-            draw->count = myCount;
-        }
-
-        {
-            CommandIterator iterator(std::move(allocator));
-            CommandType type;
-
-            bool hasNext = iterator.NextCommandId(&type);
-            ASSERT_TRUE(hasNext);
-            ASSERT_EQ(type, CommandType::Pipeline);
-
-            CommandPipeline* pipeline = iterator.NextCommand<CommandPipeline>();
-            ASSERT_EQ(pipeline->pipeline, myPipeline);
-            ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint);
-
-            hasNext = iterator.NextCommandId(&type);
-            ASSERT_TRUE(hasNext);
-            ASSERT_EQ(type, CommandType::Draw);
-
-            CommandDraw* draw = iterator.NextCommand<CommandDraw>();
-            ASSERT_EQ(draw->first, myFirst);
-            ASSERT_EQ(draw->count, myCount);
-
-            hasNext = iterator.NextCommandId(&type);
-            ASSERT_FALSE(hasNext);
-
-            iterator.MakeEmptyAsDataWasDestroyed();
-        }
-    }
-
-    // Test basic usage of allocator + iterator with data
-    TEST(CommandAllocator, BasicWithData) {
-        CommandAllocator allocator;
-
-        uint8_t mySize = 8;
-        uint8_t myOffset = 3;
-        uint32_t myValues[5] = {6, 42, 0xFFFFFFFF, 0, 54};
-
-        {
-            CommandPushConstants* pushConstants =
-                allocator.Allocate<CommandPushConstants>(CommandType::PushConstants);
-            pushConstants->size = mySize;
-            pushConstants->offset = myOffset;
-
-            uint32_t* values = allocator.AllocateData<uint32_t>(5);
-            for (size_t i = 0; i < 5; i++) {
-                values[i] = myValues[i];
-            }
-        }
-
-        {
-            CommandIterator iterator(std::move(allocator));
-            CommandType type;
-
-            bool hasNext = iterator.NextCommandId(&type);
-            ASSERT_TRUE(hasNext);
-            ASSERT_EQ(type, CommandType::PushConstants);
-
-            CommandPushConstants* pushConstants = iterator.NextCommand<CommandPushConstants>();
-            ASSERT_EQ(pushConstants->size, mySize);
-            ASSERT_EQ(pushConstants->offset, myOffset);
-
-            uint32_t* values = iterator.NextData<uint32_t>(5);
-            for (size_t i = 0; i < 5; i++) {
-                ASSERT_EQ(values[i], myValues[i]);
-            }
-
-            hasNext = iterator.NextCommandId(&type);
-            ASSERT_FALSE(hasNext);
-
-            iterator.MakeEmptyAsDataWasDestroyed();
-        }
-    }
-
-    // Test basic iterating several times
-    TEST(CommandAllocator, MultipleIterations) {
-        CommandAllocator allocator;
-
-        uint32_t myFirst = 42;
-        uint32_t myCount = 16;
-
-        {
-            CommandDraw* draw = allocator.Allocate<CommandDraw>(CommandType::Draw);
-            draw->first = myFirst;
-            draw->count = myCount;
-        }
-
-        {
-            CommandIterator iterator(std::move(allocator));
-            CommandType type;
-
-            // First iteration
-            bool hasNext = iterator.NextCommandId(&type);
-            ASSERT_TRUE(hasNext);
-            ASSERT_EQ(type, CommandType::Draw);
-
-            CommandDraw* draw = iterator.NextCommand<CommandDraw>();
-            ASSERT_EQ(draw->first, myFirst);
-            ASSERT_EQ(draw->count, myCount);
-
-            hasNext = iterator.NextCommandId(&type);
-            ASSERT_FALSE(hasNext);
-
-            // Second iteration
-            hasNext = iterator.NextCommandId(&type);
-            ASSERT_TRUE(hasNext);
-            ASSERT_EQ(type, CommandType::Draw);
-
-            draw = iterator.NextCommand<CommandDraw>();
-            ASSERT_EQ(draw->first, myFirst);
-            ASSERT_EQ(draw->count, myCount);
-
-            hasNext = iterator.NextCommandId(&type);
-            ASSERT_FALSE(hasNext);
-
-            iterator.MakeEmptyAsDataWasDestroyed();
-        }
-    }
-    // Test large commands work
-    TEST(CommandAllocator, LargeCommands) {
-        CommandAllocator allocator;
-
-        const int kCommandCount = 5;
-
-        uint32_t count = 0;
-        for (int i = 0; i < kCommandCount; i++) {
-            CommandBig* big = allocator.Allocate<CommandBig>(CommandType::Big);
-            for (int j = 0; j < kBigBufferSize; j++) {
-                big->buffer[j] = count++;
-            }
-        }
-
+    {
         CommandIterator iterator(std::move(allocator));
         CommandType type;
-        count = 0;
-        int numCommands = 0;
-        while (iterator.NextCommandId(&type)) {
-            ASSERT_EQ(type, CommandType::Big);
 
-            CommandBig* big = iterator.NextCommand<CommandBig>();
-            for (int i = 0; i < kBigBufferSize; i++) {
-                ASSERT_EQ(big->buffer[i], count);
-                count++;
-            }
-            numCommands++;
-        }
-        ASSERT_EQ(numCommands, kCommandCount);
+        bool hasNext = iterator.NextCommandId(&type);
+        ASSERT_TRUE(hasNext);
+        ASSERT_EQ(type, CommandType::Pipeline);
+
+        CommandPipeline* pipeline = iterator.NextCommand<CommandPipeline>();
+        ASSERT_EQ(pipeline->pipeline, myPipeline);
+        ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint);
+
+        hasNext = iterator.NextCommandId(&type);
+        ASSERT_TRUE(hasNext);
+        ASSERT_EQ(type, CommandType::Draw);
+
+        CommandDraw* draw = iterator.NextCommand<CommandDraw>();
+        ASSERT_EQ(draw->first, myFirst);
+        ASSERT_EQ(draw->count, myCount);
+
+        hasNext = iterator.NextCommandId(&type);
+        ASSERT_FALSE(hasNext);
 
         iterator.MakeEmptyAsDataWasDestroyed();
     }
+}
 
-    // Test many small commands work
-    TEST(CommandAllocator, ManySmallCommands) {
-        CommandAllocator allocator;
+// Test basic usage of allocator + iterator with data
+TEST(CommandAllocator, BasicWithData) {
+    CommandAllocator allocator;
 
-        // Stay under max representable uint16_t
-        const int kCommandCount = 50000;
+    uint8_t mySize = 8;
+    uint8_t myOffset = 3;
+    uint32_t myValues[5] = {6, 42, 0xFFFFFFFF, 0, 54};
 
-        uint16_t count = 0;
-        for (int i = 0; i < kCommandCount; i++) {
-            CommandSmall* small = allocator.Allocate<CommandSmall>(CommandType::Small);
-            small->data = count++;
+    {
+        CommandPushConstants* pushConstants =
+            allocator.Allocate<CommandPushConstants>(CommandType::PushConstants);
+        pushConstants->size = mySize;
+        pushConstants->offset = myOffset;
+
+        uint32_t* values = allocator.AllocateData<uint32_t>(5);
+        for (size_t i = 0; i < 5; i++) {
+            values[i] = myValues[i];
         }
+    }
 
+    {
         CommandIterator iterator(std::move(allocator));
         CommandType type;
-        count = 0;
-        int numCommands = 0;
-        while (iterator.NextCommandId(&type)) {
-            ASSERT_EQ(type, CommandType::Small);
 
-            CommandSmall* small = iterator.NextCommand<CommandSmall>();
-            ASSERT_EQ(small->data, count);
+        bool hasNext = iterator.NextCommandId(&type);
+        ASSERT_TRUE(hasNext);
+        ASSERT_EQ(type, CommandType::PushConstants);
+
+        CommandPushConstants* pushConstants = iterator.NextCommand<CommandPushConstants>();
+        ASSERT_EQ(pushConstants->size, mySize);
+        ASSERT_EQ(pushConstants->offset, myOffset);
+
+        uint32_t* values = iterator.NextData<uint32_t>(5);
+        for (size_t i = 0; i < 5; i++) {
+            ASSERT_EQ(values[i], myValues[i]);
+        }
+
+        hasNext = iterator.NextCommandId(&type);
+        ASSERT_FALSE(hasNext);
+
+        iterator.MakeEmptyAsDataWasDestroyed();
+    }
+}
+
+// Test basic iterating several times
+TEST(CommandAllocator, MultipleIterations) {
+    CommandAllocator allocator;
+
+    uint32_t myFirst = 42;
+    uint32_t myCount = 16;
+
+    {
+        CommandDraw* draw = allocator.Allocate<CommandDraw>(CommandType::Draw);
+        draw->first = myFirst;
+        draw->count = myCount;
+    }
+
+    {
+        CommandIterator iterator(std::move(allocator));
+        CommandType type;
+
+        // First iteration
+        bool hasNext = iterator.NextCommandId(&type);
+        ASSERT_TRUE(hasNext);
+        ASSERT_EQ(type, CommandType::Draw);
+
+        CommandDraw* draw = iterator.NextCommand<CommandDraw>();
+        ASSERT_EQ(draw->first, myFirst);
+        ASSERT_EQ(draw->count, myCount);
+
+        hasNext = iterator.NextCommandId(&type);
+        ASSERT_FALSE(hasNext);
+
+        // Second iteration
+        hasNext = iterator.NextCommandId(&type);
+        ASSERT_TRUE(hasNext);
+        ASSERT_EQ(type, CommandType::Draw);
+
+        draw = iterator.NextCommand<CommandDraw>();
+        ASSERT_EQ(draw->first, myFirst);
+        ASSERT_EQ(draw->count, myCount);
+
+        hasNext = iterator.NextCommandId(&type);
+        ASSERT_FALSE(hasNext);
+
+        iterator.MakeEmptyAsDataWasDestroyed();
+    }
+}
+// Test large commands work
+TEST(CommandAllocator, LargeCommands) {
+    CommandAllocator allocator;
+
+    const int kCommandCount = 5;
+
+    uint32_t count = 0;
+    for (int i = 0; i < kCommandCount; i++) {
+        CommandBig* big = allocator.Allocate<CommandBig>(CommandType::Big);
+        for (int j = 0; j < kBigBufferSize; j++) {
+            big->buffer[j] = count++;
+        }
+    }
+
+    CommandIterator iterator(std::move(allocator));
+    CommandType type;
+    count = 0;
+    int numCommands = 0;
+    while (iterator.NextCommandId(&type)) {
+        ASSERT_EQ(type, CommandType::Big);
+
+        CommandBig* big = iterator.NextCommand<CommandBig>();
+        for (int i = 0; i < kBigBufferSize; i++) {
+            ASSERT_EQ(big->buffer[i], count);
             count++;
-            numCommands++;
         }
-        ASSERT_EQ(numCommands, kCommandCount);
+        numCommands++;
+    }
+    ASSERT_EQ(numCommands, kCommandCount);
+
+    iterator.MakeEmptyAsDataWasDestroyed();
+}
+
+// Test many small commands work
+TEST(CommandAllocator, ManySmallCommands) {
+    CommandAllocator allocator;
+
+    // Stay under max representable uint16_t
+    const int kCommandCount = 50000;
+
+    uint16_t count = 0;
+    for (int i = 0; i < kCommandCount; i++) {
+        CommandSmall* small = allocator.Allocate<CommandSmall>(CommandType::Small);
+        small->data = count++;
+    }
+
+    CommandIterator iterator(std::move(allocator));
+    CommandType type;
+    count = 0;
+    int numCommands = 0;
+    while (iterator.NextCommandId(&type)) {
+        ASSERT_EQ(type, CommandType::Small);
+
+        CommandSmall* small = iterator.NextCommand<CommandSmall>();
+        ASSERT_EQ(small->data, count);
+        count++;
+        numCommands++;
+    }
+    ASSERT_EQ(numCommands, kCommandCount);
+
+    iterator.MakeEmptyAsDataWasDestroyed();
+}
+
+/*        ________
+ *       /        \
+ *       | POUIC! |
+ *       \_ ______/
+ *         v
+ *    ()_()
+ *    (O.o)
+ *    (> <)o
+ */
+
+// Test usage of iterator.Reset
+TEST(CommandAllocator, IteratorReset) {
+    CommandAllocator allocator;
+
+    uint64_t myPipeline = 0xDEADBEEFBEEFDEAD;
+    uint32_t myAttachmentPoint = 2;
+    uint32_t myFirst = 42;
+    uint32_t myCount = 16;
+
+    {
+        CommandPipeline* pipeline = allocator.Allocate<CommandPipeline>(CommandType::Pipeline);
+        pipeline->pipeline = myPipeline;
+        pipeline->attachmentPoint = myAttachmentPoint;
+
+        CommandDraw* draw = allocator.Allocate<CommandDraw>(CommandType::Draw);
+        draw->first = myFirst;
+        draw->count = myCount;
+    }
+
+    {
+        CommandIterator iterator(std::move(allocator));
+        CommandType type;
+
+        bool hasNext = iterator.NextCommandId(&type);
+        ASSERT_TRUE(hasNext);
+        ASSERT_EQ(type, CommandType::Pipeline);
+
+        CommandPipeline* pipeline = iterator.NextCommand<CommandPipeline>();
+        ASSERT_EQ(pipeline->pipeline, myPipeline);
+        ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint);
+
+        iterator.Reset();
+
+        hasNext = iterator.NextCommandId(&type);
+        ASSERT_TRUE(hasNext);
+        ASSERT_EQ(type, CommandType::Pipeline);
+
+        pipeline = iterator.NextCommand<CommandPipeline>();
+        ASSERT_EQ(pipeline->pipeline, myPipeline);
+        ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint);
+
+        hasNext = iterator.NextCommandId(&type);
+        ASSERT_TRUE(hasNext);
+        ASSERT_EQ(type, CommandType::Draw);
+
+        CommandDraw* draw = iterator.NextCommand<CommandDraw>();
+        ASSERT_EQ(draw->first, myFirst);
+        ASSERT_EQ(draw->count, myCount);
+
+        hasNext = iterator.NextCommandId(&type);
+        ASSERT_FALSE(hasNext);
 
         iterator.MakeEmptyAsDataWasDestroyed();
     }
+}
 
-    /*        ________
-     *       /        \
-     *       | POUIC! |
-     *       \_ ______/
-     *         v
-     *    ()_()
-     *    (O.o)
-     *    (> <)o
-     */
-
-    // Test usage of iterator.Reset
-    TEST(CommandAllocator, IteratorReset) {
+// Test iterating empty iterators
+TEST(CommandAllocator, EmptyIterator) {
+    {
         CommandAllocator allocator;
+        CommandIterator iterator(std::move(allocator));
 
-        uint64_t myPipeline = 0xDEADBEEFBEEFDEAD;
-        uint32_t myAttachmentPoint = 2;
-        uint32_t myFirst = 42;
-        uint32_t myCount = 16;
+        CommandType type;
+        bool hasNext = iterator.NextCommandId(&type);
+        ASSERT_FALSE(hasNext);
 
-        {
+        iterator.MakeEmptyAsDataWasDestroyed();
+    }
+    {
+        CommandAllocator allocator;
+        CommandIterator iterator1(std::move(allocator));
+        CommandIterator iterator2(std::move(iterator1));
+
+        CommandType type;
+        bool hasNext = iterator2.NextCommandId(&type);
+        ASSERT_FALSE(hasNext);
+
+        iterator1.MakeEmptyAsDataWasDestroyed();
+        iterator2.MakeEmptyAsDataWasDestroyed();
+    }
+    {
+        CommandIterator iterator1;
+        CommandIterator iterator2(std::move(iterator1));
+
+        CommandType type;
+        bool hasNext = iterator2.NextCommandId(&type);
+        ASSERT_FALSE(hasNext);
+
+        iterator1.MakeEmptyAsDataWasDestroyed();
+        iterator2.MakeEmptyAsDataWasDestroyed();
+    }
+}
+
+template <size_t A>
+struct alignas(A) AlignedStruct {
+    char placeholder;
+};
+
+// Test for overflows in Allocate's computations, size 1 variant
+TEST(CommandAllocator, AllocationOverflow_1) {
+    CommandAllocator allocator;
+    AlignedStruct<1>* data =
+        allocator.AllocateData<AlignedStruct<1>>(std::numeric_limits<size_t>::max() / 1);
+    ASSERT_EQ(data, nullptr);
+}
+
+// Test for overflows in Allocate's computations, size 2 variant
+TEST(CommandAllocator, AllocationOverflow_2) {
+    CommandAllocator allocator;
+    AlignedStruct<2>* data =
+        allocator.AllocateData<AlignedStruct<2>>(std::numeric_limits<size_t>::max() / 2);
+    ASSERT_EQ(data, nullptr);
+}
+
+// Test for overflows in Allocate's computations, size 4 variant
+TEST(CommandAllocator, AllocationOverflow_4) {
+    CommandAllocator allocator;
+    AlignedStruct<4>* data =
+        allocator.AllocateData<AlignedStruct<4>>(std::numeric_limits<size_t>::max() / 4);
+    ASSERT_EQ(data, nullptr);
+}
+
+// Test for overflows in Allocate's computations, size 8 variant
+TEST(CommandAllocator, AllocationOverflow_8) {
+    CommandAllocator allocator;
+    AlignedStruct<8>* data =
+        allocator.AllocateData<AlignedStruct<8>>(std::numeric_limits<size_t>::max() / 8);
+    ASSERT_EQ(data, nullptr);
+}
+
+template <int DefaultValue>
+struct IntWithDefault {
+    IntWithDefault() : value(DefaultValue) {}
+
+    int value;
+};
+
+// Test that the allcator correctly defaults initalizes data for Allocate
+TEST(CommandAllocator, AllocateDefaultInitializes) {
+    CommandAllocator allocator;
+
+    IntWithDefault<42>* int42 = allocator.Allocate<IntWithDefault<42>>(CommandType::Draw);
+    ASSERT_EQ(int42->value, 42);
+
+    IntWithDefault<43>* int43 = allocator.Allocate<IntWithDefault<43>>(CommandType::Draw);
+    ASSERT_EQ(int43->value, 43);
+
+    IntWithDefault<44>* int44 = allocator.Allocate<IntWithDefault<44>>(CommandType::Draw);
+    ASSERT_EQ(int44->value, 44);
+
+    CommandIterator iterator(std::move(allocator));
+    iterator.MakeEmptyAsDataWasDestroyed();
+}
+
+// Test that the allocator correctly default-initalizes data for AllocateData
+TEST(CommandAllocator, AllocateDataDefaultInitializes) {
+    CommandAllocator allocator;
+
+    IntWithDefault<33>* int33 = allocator.AllocateData<IntWithDefault<33>>(1);
+    ASSERT_EQ(int33[0].value, 33);
+
+    IntWithDefault<34>* int34 = allocator.AllocateData<IntWithDefault<34>>(2);
+    ASSERT_EQ(int34[0].value, 34);
+    ASSERT_EQ(int34[0].value, 34);
+
+    IntWithDefault<35>* int35 = allocator.AllocateData<IntWithDefault<35>>(3);
+    ASSERT_EQ(int35[0].value, 35);
+    ASSERT_EQ(int35[1].value, 35);
+    ASSERT_EQ(int35[2].value, 35);
+
+    CommandIterator iterator(std::move(allocator));
+    iterator.MakeEmptyAsDataWasDestroyed();
+}
+
+// Tests flattening of multiple CommandAllocators into a single CommandIterator using
+// AcquireCommandBlocks.
+TEST(CommandAllocator, AcquireCommandBlocks) {
+    constexpr size_t kNumAllocators = 2;
+    constexpr size_t kNumCommandsPerAllocator = 2;
+    const uint64_t pipelines[kNumAllocators][kNumCommandsPerAllocator] = {
+        {0xDEADBEEFBEEFDEAD, 0xC0FFEEF00DC0FFEE},
+        {0x1337C0DE1337C0DE, 0xCAFEFACEFACECAFE},
+    };
+    const uint32_t attachmentPoints[kNumAllocators][kNumCommandsPerAllocator] = {{1, 2}, {3, 4}};
+    const uint32_t firsts[kNumAllocators][kNumCommandsPerAllocator] = {{42, 43}, {5, 6}};
+    const uint32_t counts[kNumAllocators][kNumCommandsPerAllocator] = {{16, 32}, {4, 8}};
+
+    std::vector<CommandAllocator> allocators(kNumAllocators);
+    for (size_t j = 0; j < kNumAllocators; ++j) {
+        CommandAllocator& allocator = allocators[j];
+        for (size_t i = 0; i < kNumCommandsPerAllocator; ++i) {
             CommandPipeline* pipeline = allocator.Allocate<CommandPipeline>(CommandType::Pipeline);
-            pipeline->pipeline = myPipeline;
-            pipeline->attachmentPoint = myAttachmentPoint;
+            pipeline->pipeline = pipelines[j][i];
+            pipeline->attachmentPoint = attachmentPoints[j][i];
 
             CommandDraw* draw = allocator.Allocate<CommandDraw>(CommandType::Draw);
-            draw->first = myFirst;
-            draw->count = myCount;
+            draw->first = firsts[j][i];
+            draw->count = counts[j][i];
         }
+    }
 
-        {
-            CommandIterator iterator(std::move(allocator));
+    CommandIterator iterator;
+    iterator.AcquireCommandBlocks(std::move(allocators));
+    for (size_t j = 0; j < kNumAllocators; ++j) {
+        for (size_t i = 0; i < kNumCommandsPerAllocator; ++i) {
             CommandType type;
-
             bool hasNext = iterator.NextCommandId(&type);
             ASSERT_TRUE(hasNext);
             ASSERT_EQ(type, CommandType::Pipeline);
 
             CommandPipeline* pipeline = iterator.NextCommand<CommandPipeline>();
-            ASSERT_EQ(pipeline->pipeline, myPipeline);
-            ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint);
-
-            iterator.Reset();
-
-            hasNext = iterator.NextCommandId(&type);
-            ASSERT_TRUE(hasNext);
-            ASSERT_EQ(type, CommandType::Pipeline);
-
-            pipeline = iterator.NextCommand<CommandPipeline>();
-            ASSERT_EQ(pipeline->pipeline, myPipeline);
-            ASSERT_EQ(pipeline->attachmentPoint, myAttachmentPoint);
+            ASSERT_EQ(pipeline->pipeline, pipelines[j][i]);
+            ASSERT_EQ(pipeline->attachmentPoint, attachmentPoints[j][i]);
 
             hasNext = iterator.NextCommandId(&type);
             ASSERT_TRUE(hasNext);
             ASSERT_EQ(type, CommandType::Draw);
 
             CommandDraw* draw = iterator.NextCommand<CommandDraw>();
-            ASSERT_EQ(draw->first, myFirst);
-            ASSERT_EQ(draw->count, myCount);
-
-            hasNext = iterator.NextCommandId(&type);
-            ASSERT_FALSE(hasNext);
-
-            iterator.MakeEmptyAsDataWasDestroyed();
+            ASSERT_EQ(draw->first, firsts[j][i]);
+            ASSERT_EQ(draw->count, counts[j][i]);
         }
     }
-
-    // Test iterating empty iterators
-    TEST(CommandAllocator, EmptyIterator) {
-        {
-            CommandAllocator allocator;
-            CommandIterator iterator(std::move(allocator));
-
-            CommandType type;
-            bool hasNext = iterator.NextCommandId(&type);
-            ASSERT_FALSE(hasNext);
-
-            iterator.MakeEmptyAsDataWasDestroyed();
-        }
-        {
-            CommandAllocator allocator;
-            CommandIterator iterator1(std::move(allocator));
-            CommandIterator iterator2(std::move(iterator1));
-
-            CommandType type;
-            bool hasNext = iterator2.NextCommandId(&type);
-            ASSERT_FALSE(hasNext);
-
-            iterator1.MakeEmptyAsDataWasDestroyed();
-            iterator2.MakeEmptyAsDataWasDestroyed();
-        }
-        {
-            CommandIterator iterator1;
-            CommandIterator iterator2(std::move(iterator1));
-
-            CommandType type;
-            bool hasNext = iterator2.NextCommandId(&type);
-            ASSERT_FALSE(hasNext);
-
-            iterator1.MakeEmptyAsDataWasDestroyed();
-            iterator2.MakeEmptyAsDataWasDestroyed();
-        }
-    }
-
-    template <size_t A>
-    struct alignas(A) AlignedStruct {
-        char placeholder;
-    };
-
-    // Test for overflows in Allocate's computations, size 1 variant
-    TEST(CommandAllocator, AllocationOverflow_1) {
-        CommandAllocator allocator;
-        AlignedStruct<1>* data =
-            allocator.AllocateData<AlignedStruct<1>>(std::numeric_limits<size_t>::max() / 1);
-        ASSERT_EQ(data, nullptr);
-    }
-
-    // Test for overflows in Allocate's computations, size 2 variant
-    TEST(CommandAllocator, AllocationOverflow_2) {
-        CommandAllocator allocator;
-        AlignedStruct<2>* data =
-            allocator.AllocateData<AlignedStruct<2>>(std::numeric_limits<size_t>::max() / 2);
-        ASSERT_EQ(data, nullptr);
-    }
-
-    // Test for overflows in Allocate's computations, size 4 variant
-    TEST(CommandAllocator, AllocationOverflow_4) {
-        CommandAllocator allocator;
-        AlignedStruct<4>* data =
-            allocator.AllocateData<AlignedStruct<4>>(std::numeric_limits<size_t>::max() / 4);
-        ASSERT_EQ(data, nullptr);
-    }
-
-    // Test for overflows in Allocate's computations, size 8 variant
-    TEST(CommandAllocator, AllocationOverflow_8) {
-        CommandAllocator allocator;
-        AlignedStruct<8>* data =
-            allocator.AllocateData<AlignedStruct<8>>(std::numeric_limits<size_t>::max() / 8);
-        ASSERT_EQ(data, nullptr);
-    }
-
-    template <int DefaultValue>
-    struct IntWithDefault {
-        IntWithDefault() : value(DefaultValue) {
-        }
-
-        int value;
-    };
-
-    // Test that the allcator correctly defaults initalizes data for Allocate
-    TEST(CommandAllocator, AllocateDefaultInitializes) {
-        CommandAllocator allocator;
-
-        IntWithDefault<42>* int42 = allocator.Allocate<IntWithDefault<42>>(CommandType::Draw);
-        ASSERT_EQ(int42->value, 42);
-
-        IntWithDefault<43>* int43 = allocator.Allocate<IntWithDefault<43>>(CommandType::Draw);
-        ASSERT_EQ(int43->value, 43);
-
-        IntWithDefault<44>* int44 = allocator.Allocate<IntWithDefault<44>>(CommandType::Draw);
-        ASSERT_EQ(int44->value, 44);
-
-        CommandIterator iterator(std::move(allocator));
-        iterator.MakeEmptyAsDataWasDestroyed();
-    }
-
-    // Test that the allocator correctly default-initalizes data for AllocateData
-    TEST(CommandAllocator, AllocateDataDefaultInitializes) {
-        CommandAllocator allocator;
-
-        IntWithDefault<33>* int33 = allocator.AllocateData<IntWithDefault<33>>(1);
-        ASSERT_EQ(int33[0].value, 33);
-
-        IntWithDefault<34>* int34 = allocator.AllocateData<IntWithDefault<34>>(2);
-        ASSERT_EQ(int34[0].value, 34);
-        ASSERT_EQ(int34[0].value, 34);
-
-        IntWithDefault<35>* int35 = allocator.AllocateData<IntWithDefault<35>>(3);
-        ASSERT_EQ(int35[0].value, 35);
-        ASSERT_EQ(int35[1].value, 35);
-        ASSERT_EQ(int35[2].value, 35);
-
-        CommandIterator iterator(std::move(allocator));
-        iterator.MakeEmptyAsDataWasDestroyed();
-    }
-
-    // Tests flattening of multiple CommandAllocators into a single CommandIterator using
-    // AcquireCommandBlocks.
-    TEST(CommandAllocator, AcquireCommandBlocks) {
-        constexpr size_t kNumAllocators = 2;
-        constexpr size_t kNumCommandsPerAllocator = 2;
-        const uint64_t pipelines[kNumAllocators][kNumCommandsPerAllocator] = {
-            {0xDEADBEEFBEEFDEAD, 0xC0FFEEF00DC0FFEE},
-            {0x1337C0DE1337C0DE, 0xCAFEFACEFACECAFE},
-        };
-        const uint32_t attachmentPoints[kNumAllocators][kNumCommandsPerAllocator] = {{1, 2},
-                                                                                     {3, 4}};
-        const uint32_t firsts[kNumAllocators][kNumCommandsPerAllocator] = {{42, 43}, {5, 6}};
-        const uint32_t counts[kNumAllocators][kNumCommandsPerAllocator] = {{16, 32}, {4, 8}};
-
-        std::vector<CommandAllocator> allocators(kNumAllocators);
-        for (size_t j = 0; j < kNumAllocators; ++j) {
-            CommandAllocator& allocator = allocators[j];
-            for (size_t i = 0; i < kNumCommandsPerAllocator; ++i) {
-                CommandPipeline* pipeline =
-                    allocator.Allocate<CommandPipeline>(CommandType::Pipeline);
-                pipeline->pipeline = pipelines[j][i];
-                pipeline->attachmentPoint = attachmentPoints[j][i];
-
-                CommandDraw* draw = allocator.Allocate<CommandDraw>(CommandType::Draw);
-                draw->first = firsts[j][i];
-                draw->count = counts[j][i];
-            }
-        }
-
-        CommandIterator iterator;
-        iterator.AcquireCommandBlocks(std::move(allocators));
-        for (size_t j = 0; j < kNumAllocators; ++j) {
-            for (size_t i = 0; i < kNumCommandsPerAllocator; ++i) {
-                CommandType type;
-                bool hasNext = iterator.NextCommandId(&type);
-                ASSERT_TRUE(hasNext);
-                ASSERT_EQ(type, CommandType::Pipeline);
-
-                CommandPipeline* pipeline = iterator.NextCommand<CommandPipeline>();
-                ASSERT_EQ(pipeline->pipeline, pipelines[j][i]);
-                ASSERT_EQ(pipeline->attachmentPoint, attachmentPoints[j][i]);
-
-                hasNext = iterator.NextCommandId(&type);
-                ASSERT_TRUE(hasNext);
-                ASSERT_EQ(type, CommandType::Draw);
-
-                CommandDraw* draw = iterator.NextCommand<CommandDraw>();
-                ASSERT_EQ(draw->first, firsts[j][i]);
-                ASSERT_EQ(draw->count, counts[j][i]);
-            }
-        }
-        CommandType type;
-        ASSERT_FALSE(iterator.NextCommandId(&type));
-        iterator.MakeEmptyAsDataWasDestroyed();
-    }
+    CommandType type;
+    ASSERT_FALSE(iterator.NextCommandId(&type));
+    iterator.MakeEmptyAsDataWasDestroyed();
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/tests/unittests/ConcurrentCacheTests.cpp b/src/dawn/tests/unittests/ConcurrentCacheTests.cpp
index 9687a3c..1d4f91d 100644
--- a/src/dawn/tests/unittests/ConcurrentCacheTests.cpp
+++ b/src/dawn/tests/unittests/ConcurrentCacheTests.cpp
@@ -22,37 +22,31 @@
 #include "gtest/gtest.h"
 
 namespace {
-    class SimpleCachedObject {
-      public:
-        explicit SimpleCachedObject(size_t value) : mValue(value) {
+class SimpleCachedObject {
+  public:
+    explicit SimpleCachedObject(size_t value) : mValue(value) {}
+
+    size_t GetValue() const { return mValue; }
+
+    struct EqualityFunc {
+        bool operator()(const SimpleCachedObject* a, const SimpleCachedObject* b) const {
+            return a->mValue == b->mValue;
         }
-
-        size_t GetValue() const {
-            return mValue;
-        }
-
-        struct EqualityFunc {
-            bool operator()(const SimpleCachedObject* a, const SimpleCachedObject* b) const {
-                return a->mValue == b->mValue;
-            }
-        };
-
-        struct HashFunc {
-            size_t operator()(const SimpleCachedObject* obj) const {
-                return obj->mValue;
-            }
-        };
-
-      private:
-        size_t mValue;
     };
 
+    struct HashFunc {
+        size_t operator()(const SimpleCachedObject* obj) const { return obj->mValue; }
+    };
+
+  private:
+    size_t mValue;
+};
+
 }  // anonymous namespace
 
 class ConcurrentCacheTest : public testing::Test {
   public:
-    ConcurrentCacheTest() : mPool(mPlatform.CreateWorkerTaskPool()), mTaskManager(mPool.get()) {
-    }
+    ConcurrentCacheTest() : mPool(mPlatform.CreateWorkerTaskPool()), mTaskManager(mPool.get()) {}
 
   protected:
     dawn::platform::Platform mPlatform;
diff --git a/src/dawn/tests/unittests/EnumClassBitmasksTests.cpp b/src/dawn/tests/unittests/EnumClassBitmasksTests.cpp
index 26849bd..bb2f916 100644
--- a/src/dawn/tests/unittests/EnumClassBitmasksTests.cpp
+++ b/src/dawn/tests/unittests/EnumClassBitmasksTests.cpp
@@ -18,76 +18,76 @@
 
 namespace dawn {
 
-    enum class Color : uint32_t {
-        R = 1,
-        G = 2,
-        B = 4,
-        A = 8,
-    };
+enum class Color : uint32_t {
+    R = 1,
+    G = 2,
+    B = 4,
+    A = 8,
+};
 
-    template <>
-    struct IsDawnBitmask<Color> {
-        static constexpr bool enable = true;
-    };
+template <>
+struct IsDawnBitmask<Color> {
+    static constexpr bool enable = true;
+};
 
-    TEST(BitmaskTests, BasicOperations) {
-        Color test1 = Color::R | Color::G;
-        ASSERT_EQ(1u | 2u, static_cast<uint32_t>(test1));
+TEST(BitmaskTests, BasicOperations) {
+    Color test1 = Color::R | Color::G;
+    ASSERT_EQ(1u | 2u, static_cast<uint32_t>(test1));
 
-        Color test2 = test1 ^ (Color::R | Color::A);
-        ASSERT_EQ(2u | 8u, static_cast<uint32_t>(test2));
+    Color test2 = test1 ^ (Color::R | Color::A);
+    ASSERT_EQ(2u | 8u, static_cast<uint32_t>(test2));
 
-        Color test3 = test2 & Color::A;
-        ASSERT_EQ(8u, static_cast<uint32_t>(test3));
+    Color test3 = test2 & Color::A;
+    ASSERT_EQ(8u, static_cast<uint32_t>(test3));
 
-        Color test4 = ~test3;
-        ASSERT_EQ(~uint32_t(8), static_cast<uint32_t>(test4));
+    Color test4 = ~test3;
+    ASSERT_EQ(~uint32_t(8), static_cast<uint32_t>(test4));
+}
+
+TEST(BitmaskTests, AssignOperations) {
+    Color test1 = Color::R;
+    test1 |= Color::G;
+    ASSERT_EQ(1u | 2u, static_cast<uint32_t>(test1));
+
+    Color test2 = test1;
+    test2 ^= (Color::R | Color::A);
+    ASSERT_EQ(2u | 8u, static_cast<uint32_t>(test2));
+
+    Color test3 = test2;
+    test3 &= Color::A;
+    ASSERT_EQ(8u, static_cast<uint32_t>(test3));
+}
+
+TEST(BitmaskTests, BoolConversion) {
+    bool test1 = Color::R | Color::G;
+    ASSERT_TRUE(test1);
+
+    bool test2 = Color::R & Color::G;
+    ASSERT_FALSE(test2);
+
+    bool test3 = Color::R ^ Color::G;
+    ASSERT_TRUE(test3);
+
+    if (Color::R & ~Color::R) {
+        ASSERT_TRUE(false);
     }
+}
 
-    TEST(BitmaskTests, AssignOperations) {
-        Color test1 = Color::R;
-        test1 |= Color::G;
-        ASSERT_EQ(1u | 2u, static_cast<uint32_t>(test1));
+TEST(BitmaskTests, ThreeOrs) {
+    Color c = Color::R | Color::G | Color::B;
+    ASSERT_EQ(7u, static_cast<uint32_t>(c));
+}
 
-        Color test2 = test1;
-        test2 ^= (Color::R | Color::A);
-        ASSERT_EQ(2u | 8u, static_cast<uint32_t>(test2));
-
-        Color test3 = test2;
-        test3 &= Color::A;
-        ASSERT_EQ(8u, static_cast<uint32_t>(test3));
-    }
-
-    TEST(BitmaskTests, BoolConversion) {
-        bool test1 = Color::R | Color::G;
-        ASSERT_TRUE(test1);
-
-        bool test2 = Color::R & Color::G;
-        ASSERT_FALSE(test2);
-
-        bool test3 = Color::R ^ Color::G;
-        ASSERT_TRUE(test3);
-
-        if (Color::R & ~Color::R) {
-            ASSERT_TRUE(false);
-        }
-    }
-
-    TEST(BitmaskTests, ThreeOrs) {
-        Color c = Color::R | Color::G | Color::B;
-        ASSERT_EQ(7u, static_cast<uint32_t>(c));
-    }
-
-    TEST(BitmaskTests, ZeroOrOneBits) {
-        Color zero = static_cast<Color>(0);
-        ASSERT_TRUE(HasZeroOrOneBits(zero));
-        ASSERT_TRUE(HasZeroOrOneBits(Color::R));
-        ASSERT_TRUE(HasZeroOrOneBits(Color::G));
-        ASSERT_TRUE(HasZeroOrOneBits(Color::B));
-        ASSERT_TRUE(HasZeroOrOneBits(Color::A));
-        ASSERT_FALSE(HasZeroOrOneBits(static_cast<Color>(Color::R | Color::G)));
-        ASSERT_FALSE(HasZeroOrOneBits(static_cast<Color>(Color::G | Color::B)));
-        ASSERT_FALSE(HasZeroOrOneBits(static_cast<Color>(Color::B | Color::A)));
-    }
+TEST(BitmaskTests, ZeroOrOneBits) {
+    Color zero = static_cast<Color>(0);
+    ASSERT_TRUE(HasZeroOrOneBits(zero));
+    ASSERT_TRUE(HasZeroOrOneBits(Color::R));
+    ASSERT_TRUE(HasZeroOrOneBits(Color::G));
+    ASSERT_TRUE(HasZeroOrOneBits(Color::B));
+    ASSERT_TRUE(HasZeroOrOneBits(Color::A));
+    ASSERT_FALSE(HasZeroOrOneBits(static_cast<Color>(Color::R | Color::G)));
+    ASSERT_FALSE(HasZeroOrOneBits(static_cast<Color>(Color::G | Color::B)));
+    ASSERT_FALSE(HasZeroOrOneBits(static_cast<Color>(Color::B | Color::A)));
+}
 
 }  // namespace dawn
diff --git a/src/dawn/tests/unittests/EnumMaskIteratorTests.cpp b/src/dawn/tests/unittests/EnumMaskIteratorTests.cpp
index b6c6727..fec0376 100644
--- a/src/dawn/tests/unittests/EnumMaskIteratorTests.cpp
+++ b/src/dawn/tests/unittests/EnumMaskIteratorTests.cpp
@@ -18,55 +18,55 @@
 
 namespace dawn::native {
 
-    enum class TestAspect : uint8_t {
-        Color = 1,
-        Depth = 2,
-        Stencil = 4,
-    };
+enum class TestAspect : uint8_t {
+    Color = 1,
+    Depth = 2,
+    Stencil = 4,
+};
 
-    template <>
-    struct EnumBitmaskSize<TestAspect> {
-        static constexpr unsigned value = 3;
-    };
+template <>
+struct EnumBitmaskSize<TestAspect> {
+    static constexpr unsigned value = 3;
+};
 
 }  // namespace dawn::native
 
 namespace dawn {
 
-    template <>
-    struct IsDawnBitmask<dawn::native::TestAspect> {
-        static constexpr bool enable = true;
-    };
+template <>
+struct IsDawnBitmask<dawn::native::TestAspect> {
+    static constexpr bool enable = true;
+};
 
 }  // namespace dawn
 
 namespace dawn::native {
 
-    static_assert(EnumBitmaskSize<TestAspect>::value == 3);
+static_assert(EnumBitmaskSize<TestAspect>::value == 3);
 
-    TEST(EnumMaskIteratorTests, None) {
-        for (TestAspect aspect : IterateEnumMask(static_cast<TestAspect>(0))) {
-            FAIL();
-            DAWN_UNUSED(aspect);
-        }
+TEST(EnumMaskIteratorTests, None) {
+    for (TestAspect aspect : IterateEnumMask(static_cast<TestAspect>(0))) {
+        FAIL();
+        DAWN_UNUSED(aspect);
     }
+}
 
-    TEST(EnumMaskIteratorTests, All) {
-        TestAspect expected[] = {TestAspect::Color, TestAspect::Depth, TestAspect::Stencil};
-        uint32_t i = 0;
-        TestAspect aspects = TestAspect::Color | TestAspect::Depth | TestAspect::Stencil;
-        for (TestAspect aspect : IterateEnumMask(aspects)) {
-            EXPECT_EQ(aspect, expected[i++]);
-        }
+TEST(EnumMaskIteratorTests, All) {
+    TestAspect expected[] = {TestAspect::Color, TestAspect::Depth, TestAspect::Stencil};
+    uint32_t i = 0;
+    TestAspect aspects = TestAspect::Color | TestAspect::Depth | TestAspect::Stencil;
+    for (TestAspect aspect : IterateEnumMask(aspects)) {
+        EXPECT_EQ(aspect, expected[i++]);
     }
+}
 
-    TEST(EnumMaskIteratorTests, Partial) {
-        TestAspect expected[] = {TestAspect::Color, TestAspect::Stencil};
-        uint32_t i = 0;
-        TestAspect aspects = TestAspect::Stencil | TestAspect::Color;
-        for (TestAspect aspect : IterateEnumMask(aspects)) {
-            EXPECT_EQ(aspect, expected[i++]);
-        }
+TEST(EnumMaskIteratorTests, Partial) {
+    TestAspect expected[] = {TestAspect::Color, TestAspect::Stencil};
+    uint32_t i = 0;
+    TestAspect aspects = TestAspect::Stencil | TestAspect::Color;
+    for (TestAspect aspect : IterateEnumMask(aspects)) {
+        EXPECT_EQ(aspect, expected[i++]);
     }
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/tests/unittests/ErrorTests.cpp b/src/dawn/tests/unittests/ErrorTests.cpp
index f856613..8a3f568 100644
--- a/src/dawn/tests/unittests/ErrorTests.cpp
+++ b/src/dawn/tests/unittests/ErrorTests.cpp
@@ -18,346 +18,348 @@
 #include "dawn/native/ErrorData.h"
 #include "gtest/gtest.h"
 
-namespace dawn::native { namespace {
+namespace dawn::native {
+namespace {
 
-    int placeholderSuccess = 0xbeef;
-    const char* placeholderErrorMessage = "I am an error message :3";
+int placeholderSuccess = 0xbeef;
+const char* placeholderErrorMessage = "I am an error message :3";
 
-    // Check returning a success MaybeError with {};
-    TEST(ErrorTests, Error_Success) {
-        auto ReturnSuccess = []() -> MaybeError { return {}; };
+// Check returning a success MaybeError with {};
+TEST(ErrorTests, Error_Success) {
+    auto ReturnSuccess = []() -> MaybeError { return {}; };
 
-        MaybeError result = ReturnSuccess();
-        ASSERT_TRUE(result.IsSuccess());
-    }
+    MaybeError result = ReturnSuccess();
+    ASSERT_TRUE(result.IsSuccess());
+}
 
-    // Check returning an error MaybeError with "return DAWN_VALIDATION_ERROR"
-    TEST(ErrorTests, Error_Error) {
-        auto ReturnError = []() -> MaybeError {
-            return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
-        };
+// Check returning an error MaybeError with "return DAWN_VALIDATION_ERROR"
+TEST(ErrorTests, Error_Error) {
+    auto ReturnError = []() -> MaybeError {
+        return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
+    };
 
-        MaybeError result = ReturnError();
-        ASSERT_TRUE(result.IsError());
+    MaybeError result = ReturnError();
+    ASSERT_TRUE(result.IsError());
 
-        std::unique_ptr<ErrorData> errorData = result.AcquireError();
-        ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
-    }
+    std::unique_ptr<ErrorData> errorData = result.AcquireError();
+    ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
+}
 
-    // Check returning a success ResultOrError with an implicit conversion
-    TEST(ErrorTests, ResultOrError_Success) {
-        auto ReturnSuccess = []() -> ResultOrError<int*> { return &placeholderSuccess; };
+// Check returning a success ResultOrError with an implicit conversion
+TEST(ErrorTests, ResultOrError_Success) {
+    auto ReturnSuccess = []() -> ResultOrError<int*> { return &placeholderSuccess; };
 
-        ResultOrError<int*> result = ReturnSuccess();
-        ASSERT_TRUE(result.IsSuccess());
-        ASSERT_EQ(result.AcquireSuccess(), &placeholderSuccess);
-    }
+    ResultOrError<int*> result = ReturnSuccess();
+    ASSERT_TRUE(result.IsSuccess());
+    ASSERT_EQ(result.AcquireSuccess(), &placeholderSuccess);
+}
 
-    // Check returning an error ResultOrError with "return DAWN_VALIDATION_ERROR"
-    TEST(ErrorTests, ResultOrError_Error) {
-        auto ReturnError = []() -> ResultOrError<int*> {
-            return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
-        };
+// Check returning an error ResultOrError with "return DAWN_VALIDATION_ERROR"
+TEST(ErrorTests, ResultOrError_Error) {
+    auto ReturnError = []() -> ResultOrError<int*> {
+        return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
+    };
 
-        ResultOrError<int*> result = ReturnError();
-        ASSERT_TRUE(result.IsError());
+    ResultOrError<int*> result = ReturnError();
+    ASSERT_TRUE(result.IsError());
 
-        std::unique_ptr<ErrorData> errorData = result.AcquireError();
-        ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
-    }
+    std::unique_ptr<ErrorData> errorData = result.AcquireError();
+    ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
+}
 
-    // Check DAWN_TRY handles successes correctly.
-    TEST(ErrorTests, TRY_Success) {
-        auto ReturnSuccess = []() -> MaybeError { return {}; };
+// Check DAWN_TRY handles successes correctly.
+TEST(ErrorTests, TRY_Success) {
+    auto ReturnSuccess = []() -> MaybeError { return {}; };
 
-        // We need to check that DAWN_TRY doesn't return on successes
-        bool tryReturned = true;
+    // We need to check that DAWN_TRY doesn't return on successes
+    bool tryReturned = true;
 
-        auto Try = [ReturnSuccess, &tryReturned]() -> MaybeError {
-            DAWN_TRY(ReturnSuccess());
-            tryReturned = false;
-            return {};
-        };
+    auto Try = [ReturnSuccess, &tryReturned]() -> MaybeError {
+        DAWN_TRY(ReturnSuccess());
+        tryReturned = false;
+        return {};
+    };
 
-        MaybeError result = Try();
-        ASSERT_TRUE(result.IsSuccess());
-        ASSERT_FALSE(tryReturned);
-    }
+    MaybeError result = Try();
+    ASSERT_TRUE(result.IsSuccess());
+    ASSERT_FALSE(tryReturned);
+}
 
-    // Check DAWN_TRY handles errors correctly.
-    TEST(ErrorTests, TRY_Error) {
-        auto ReturnError = []() -> MaybeError {
-            return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
-        };
+// Check DAWN_TRY handles errors correctly.
+TEST(ErrorTests, TRY_Error) {
+    auto ReturnError = []() -> MaybeError {
+        return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
+    };
 
-        auto Try = [ReturnError]() -> MaybeError {
-            DAWN_TRY(ReturnError());
-            // DAWN_TRY should return before this point
-            EXPECT_FALSE(true);
-            return {};
-        };
+    auto Try = [ReturnError]() -> MaybeError {
+        DAWN_TRY(ReturnError());
+        // DAWN_TRY should return before this point
+        EXPECT_FALSE(true);
+        return {};
+    };
 
-        MaybeError result = Try();
-        ASSERT_TRUE(result.IsError());
+    MaybeError result = Try();
+    ASSERT_TRUE(result.IsError());
 
-        std::unique_ptr<ErrorData> errorData = result.AcquireError();
-        ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
-    }
+    std::unique_ptr<ErrorData> errorData = result.AcquireError();
+    ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
+}
 
-    // Check DAWN_TRY adds to the backtrace.
-    TEST(ErrorTests, TRY_AddsToBacktrace) {
-        auto ReturnError = []() -> MaybeError {
-            return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
-        };
+// Check DAWN_TRY adds to the backtrace.
+TEST(ErrorTests, TRY_AddsToBacktrace) {
+    auto ReturnError = []() -> MaybeError {
+        return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
+    };
 
-        auto SingleTry = [ReturnError]() -> MaybeError {
-            DAWN_TRY(ReturnError());
-            return {};
-        };
+    auto SingleTry = [ReturnError]() -> MaybeError {
+        DAWN_TRY(ReturnError());
+        return {};
+    };
 
-        auto DoubleTry = [SingleTry]() -> MaybeError {
-            DAWN_TRY(SingleTry());
-            return {};
-        };
+    auto DoubleTry = [SingleTry]() -> MaybeError {
+        DAWN_TRY(SingleTry());
+        return {};
+    };
 
-        MaybeError singleResult = SingleTry();
-        ASSERT_TRUE(singleResult.IsError());
+    MaybeError singleResult = SingleTry();
+    ASSERT_TRUE(singleResult.IsError());
 
-        MaybeError doubleResult = DoubleTry();
-        ASSERT_TRUE(doubleResult.IsError());
+    MaybeError doubleResult = DoubleTry();
+    ASSERT_TRUE(doubleResult.IsError());
 
-        std::unique_ptr<ErrorData> singleData = singleResult.AcquireError();
-        std::unique_ptr<ErrorData> doubleData = doubleResult.AcquireError();
+    std::unique_ptr<ErrorData> singleData = singleResult.AcquireError();
+    std::unique_ptr<ErrorData> doubleData = doubleResult.AcquireError();
 
-        ASSERT_EQ(singleData->GetBacktrace().size() + 1, doubleData->GetBacktrace().size());
-    }
+    ASSERT_EQ(singleData->GetBacktrace().size() + 1, doubleData->GetBacktrace().size());
+}
 
-    // Check DAWN_TRY_ASSIGN handles successes correctly.
-    TEST(ErrorTests, TRY_RESULT_Success) {
-        auto ReturnSuccess = []() -> ResultOrError<int*> { return &placeholderSuccess; };
+// Check DAWN_TRY_ASSIGN handles successes correctly.
+TEST(ErrorTests, TRY_RESULT_Success) {
+    auto ReturnSuccess = []() -> ResultOrError<int*> { return &placeholderSuccess; };
 
-        // We need to check that DAWN_TRY doesn't return on successes
-        bool tryReturned = true;
+    // We need to check that DAWN_TRY doesn't return on successes
+    bool tryReturned = true;
 
-        auto Try = [ReturnSuccess, &tryReturned]() -> ResultOrError<int*> {
-            int* result = nullptr;
-            DAWN_TRY_ASSIGN(result, ReturnSuccess());
-            tryReturned = false;
+    auto Try = [ReturnSuccess, &tryReturned]() -> ResultOrError<int*> {
+        int* result = nullptr;
+        DAWN_TRY_ASSIGN(result, ReturnSuccess());
+        tryReturned = false;
 
-            EXPECT_EQ(result, &placeholderSuccess);
-            return result;
-        };
+        EXPECT_EQ(result, &placeholderSuccess);
+        return result;
+    };
 
-        ResultOrError<int*> result = Try();
-        ASSERT_TRUE(result.IsSuccess());
-        ASSERT_FALSE(tryReturned);
-        ASSERT_EQ(result.AcquireSuccess(), &placeholderSuccess);
-    }
+    ResultOrError<int*> result = Try();
+    ASSERT_TRUE(result.IsSuccess());
+    ASSERT_FALSE(tryReturned);
+    ASSERT_EQ(result.AcquireSuccess(), &placeholderSuccess);
+}
 
-    // Check DAWN_TRY_ASSIGN handles errors correctly.
-    TEST(ErrorTests, TRY_RESULT_Error) {
-        auto ReturnError = []() -> ResultOrError<int*> {
-            return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
-        };
+// Check DAWN_TRY_ASSIGN handles errors correctly.
+TEST(ErrorTests, TRY_RESULT_Error) {
+    auto ReturnError = []() -> ResultOrError<int*> {
+        return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
+    };
 
-        auto Try = [ReturnError]() -> ResultOrError<int*> {
-            int* result = nullptr;
-            DAWN_TRY_ASSIGN(result, ReturnError());
-            DAWN_UNUSED(result);
+    auto Try = [ReturnError]() -> ResultOrError<int*> {
+        int* result = nullptr;
+        DAWN_TRY_ASSIGN(result, ReturnError());
+        DAWN_UNUSED(result);
 
-            // DAWN_TRY should return before this point
-            EXPECT_FALSE(true);
-            return &placeholderSuccess;
-        };
+        // DAWN_TRY should return before this point
+        EXPECT_FALSE(true);
+        return &placeholderSuccess;
+    };
 
-        ResultOrError<int*> result = Try();
-        ASSERT_TRUE(result.IsError());
+    ResultOrError<int*> result = Try();
+    ASSERT_TRUE(result.IsError());
 
-        std::unique_ptr<ErrorData> errorData = result.AcquireError();
-        ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
-    }
+    std::unique_ptr<ErrorData> errorData = result.AcquireError();
+    ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
+}
 
-    // Check DAWN_TRY_ASSIGN adds to the backtrace.
-    TEST(ErrorTests, TRY_RESULT_AddsToBacktrace) {
-        auto ReturnError = []() -> ResultOrError<int*> {
-            return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
-        };
+// Check DAWN_TRY_ASSIGN adds to the backtrace.
+TEST(ErrorTests, TRY_RESULT_AddsToBacktrace) {
+    auto ReturnError = []() -> ResultOrError<int*> {
+        return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
+    };
 
-        auto SingleTry = [ReturnError]() -> ResultOrError<int*> {
-            DAWN_TRY(ReturnError());
-            return &placeholderSuccess;
-        };
+    auto SingleTry = [ReturnError]() -> ResultOrError<int*> {
+        DAWN_TRY(ReturnError());
+        return &placeholderSuccess;
+    };
 
-        auto DoubleTry = [SingleTry]() -> ResultOrError<int*> {
-            DAWN_TRY(SingleTry());
-            return &placeholderSuccess;
-        };
+    auto DoubleTry = [SingleTry]() -> ResultOrError<int*> {
+        DAWN_TRY(SingleTry());
+        return &placeholderSuccess;
+    };
 
-        ResultOrError<int*> singleResult = SingleTry();
-        ASSERT_TRUE(singleResult.IsError());
+    ResultOrError<int*> singleResult = SingleTry();
+    ASSERT_TRUE(singleResult.IsError());
 
-        ResultOrError<int*> doubleResult = DoubleTry();
-        ASSERT_TRUE(doubleResult.IsError());
+    ResultOrError<int*> doubleResult = DoubleTry();
+    ASSERT_TRUE(doubleResult.IsError());
 
-        std::unique_ptr<ErrorData> singleData = singleResult.AcquireError();
-        std::unique_ptr<ErrorData> doubleData = doubleResult.AcquireError();
+    std::unique_ptr<ErrorData> singleData = singleResult.AcquireError();
+    std::unique_ptr<ErrorData> doubleData = doubleResult.AcquireError();
 
-        ASSERT_EQ(singleData->GetBacktrace().size() + 1, doubleData->GetBacktrace().size());
-    }
+    ASSERT_EQ(singleData->GetBacktrace().size() + 1, doubleData->GetBacktrace().size());
+}
 
-    // Check a ResultOrError can be DAWN_TRY_ASSIGNED in a function that returns an Error
-    TEST(ErrorTests, TRY_RESULT_ConversionToError) {
-        auto ReturnError = []() -> ResultOrError<int*> {
-            return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
-        };
+// Check a ResultOrError can be DAWN_TRY_ASSIGNED in a function that returns an Error
+TEST(ErrorTests, TRY_RESULT_ConversionToError) {
+    auto ReturnError = []() -> ResultOrError<int*> {
+        return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
+    };
 
-        auto Try = [ReturnError]() -> MaybeError {
-            int* result = nullptr;
-            DAWN_TRY_ASSIGN(result, ReturnError());
-            DAWN_UNUSED(result);
+    auto Try = [ReturnError]() -> MaybeError {
+        int* result = nullptr;
+        DAWN_TRY_ASSIGN(result, ReturnError());
+        DAWN_UNUSED(result);
 
-            return {};
-        };
+        return {};
+    };
 
-        MaybeError result = Try();
-        ASSERT_TRUE(result.IsError());
+    MaybeError result = Try();
+    ASSERT_TRUE(result.IsError());
 
-        std::unique_ptr<ErrorData> errorData = result.AcquireError();
-        ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
-    }
+    std::unique_ptr<ErrorData> errorData = result.AcquireError();
+    ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
+}
 
-    // Check a ResultOrError can be DAWN_TRY_ASSIGNED in a function that returns an Error
-    // Version without Result<E*, T*>
-    TEST(ErrorTests, TRY_RESULT_ConversionToErrorNonPointer) {
-        auto ReturnError = []() -> ResultOrError<int> {
-            return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
-        };
+// Check a ResultOrError can be DAWN_TRY_ASSIGNED in a function that returns an Error
+// Version without Result<E*, T*>
+TEST(ErrorTests, TRY_RESULT_ConversionToErrorNonPointer) {
+    auto ReturnError = []() -> ResultOrError<int> {
+        return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
+    };
 
-        auto Try = [ReturnError]() -> MaybeError {
-            int result = 0;
-            DAWN_TRY_ASSIGN(result, ReturnError());
-            DAWN_UNUSED(result);
+    auto Try = [ReturnError]() -> MaybeError {
+        int result = 0;
+        DAWN_TRY_ASSIGN(result, ReturnError());
+        DAWN_UNUSED(result);
 
-            return {};
-        };
+        return {};
+    };
 
-        MaybeError result = Try();
-        ASSERT_TRUE(result.IsError());
+    MaybeError result = Try();
+    ASSERT_TRUE(result.IsError());
 
-        std::unique_ptr<ErrorData> errorData = result.AcquireError();
-        ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
-    }
+    std::unique_ptr<ErrorData> errorData = result.AcquireError();
+    ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
+}
 
-    // Check DAWN_TRY_ASSIGN handles successes correctly.
-    TEST(ErrorTests, TRY_RESULT_CLEANUP_Success) {
-        auto ReturnSuccess = []() -> ResultOrError<int*> { return &placeholderSuccess; };
+// Check DAWN_TRY_ASSIGN handles successes correctly.
+TEST(ErrorTests, TRY_RESULT_CLEANUP_Success) {
+    auto ReturnSuccess = []() -> ResultOrError<int*> { return &placeholderSuccess; };
 
-        // We need to check that DAWN_TRY_ASSIGN_WITH_CLEANUP doesn't return on successes and the
-        // cleanup is not called.
-        bool tryReturned = true;
-        bool tryCleanup = false;
+    // We need to check that DAWN_TRY_ASSIGN_WITH_CLEANUP doesn't return on successes and the
+    // cleanup is not called.
+    bool tryReturned = true;
+    bool tryCleanup = false;
 
-        auto Try = [ReturnSuccess, &tryReturned, &tryCleanup]() -> ResultOrError<int*> {
-            int* result = nullptr;
-            DAWN_TRY_ASSIGN_WITH_CLEANUP(result, ReturnSuccess(), { tryCleanup = true; });
-            tryReturned = false;
+    auto Try = [ReturnSuccess, &tryReturned, &tryCleanup]() -> ResultOrError<int*> {
+        int* result = nullptr;
+        DAWN_TRY_ASSIGN_WITH_CLEANUP(result, ReturnSuccess(), { tryCleanup = true; });
+        tryReturned = false;
 
-            EXPECT_EQ(result, &placeholderSuccess);
-            return result;
-        };
+        EXPECT_EQ(result, &placeholderSuccess);
+        return result;
+    };
 
-        ResultOrError<int*> result = Try();
-        ASSERT_TRUE(result.IsSuccess());
-        ASSERT_FALSE(tryReturned);
-        ASSERT_FALSE(tryCleanup);
-        ASSERT_EQ(result.AcquireSuccess(), &placeholderSuccess);
-    }
+    ResultOrError<int*> result = Try();
+    ASSERT_TRUE(result.IsSuccess());
+    ASSERT_FALSE(tryReturned);
+    ASSERT_FALSE(tryCleanup);
+    ASSERT_EQ(result.AcquireSuccess(), &placeholderSuccess);
+}
 
-    // Check DAWN_TRY_ASSIGN handles cleanups.
-    TEST(ErrorTests, TRY_RESULT_CLEANUP_Cleanup) {
-        auto ReturnError = []() -> ResultOrError<int*> {
-            return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
-        };
+// Check DAWN_TRY_ASSIGN handles cleanups.
+TEST(ErrorTests, TRY_RESULT_CLEANUP_Cleanup) {
+    auto ReturnError = []() -> ResultOrError<int*> {
+        return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
+    };
 
-        // We need to check that DAWN_TRY_ASSIGN_WITH_CLEANUP calls cleanup when error.
-        bool tryCleanup = false;
+    // We need to check that DAWN_TRY_ASSIGN_WITH_CLEANUP calls cleanup when error.
+    bool tryCleanup = false;
 
-        auto Try = [ReturnError, &tryCleanup]() -> ResultOrError<int*> {
-            int* result = nullptr;
-            DAWN_TRY_ASSIGN_WITH_CLEANUP(result, ReturnError(), { tryCleanup = true; });
-            DAWN_UNUSED(result);
+    auto Try = [ReturnError, &tryCleanup]() -> ResultOrError<int*> {
+        int* result = nullptr;
+        DAWN_TRY_ASSIGN_WITH_CLEANUP(result, ReturnError(), { tryCleanup = true; });
+        DAWN_UNUSED(result);
 
-            // DAWN_TRY_ASSIGN_WITH_CLEANUP should return before this point
-            EXPECT_FALSE(true);
-            return &placeholderSuccess;
-        };
+        // DAWN_TRY_ASSIGN_WITH_CLEANUP should return before this point
+        EXPECT_FALSE(true);
+        return &placeholderSuccess;
+    };
 
-        ResultOrError<int*> result = Try();
-        ASSERT_TRUE(result.IsError());
+    ResultOrError<int*> result = Try();
+    ASSERT_TRUE(result.IsError());
 
-        std::unique_ptr<ErrorData> errorData = result.AcquireError();
-        ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
-        ASSERT_TRUE(tryCleanup);
-    }
+    std::unique_ptr<ErrorData> errorData = result.AcquireError();
+    ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
+    ASSERT_TRUE(tryCleanup);
+}
 
-    // Check DAWN_TRY_ASSIGN can override return value when needed.
-    TEST(ErrorTests, TRY_RESULT_CLEANUP_OverrideReturn) {
-        auto ReturnError = []() -> ResultOrError<int*> {
-            return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
-        };
+// Check DAWN_TRY_ASSIGN can override return value when needed.
+TEST(ErrorTests, TRY_RESULT_CLEANUP_OverrideReturn) {
+    auto ReturnError = []() -> ResultOrError<int*> {
+        return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
+    };
 
-        auto Try = [ReturnError]() -> bool {
-            int* result = nullptr;
-            DAWN_TRY_ASSIGN_WITH_CLEANUP(result, ReturnError(), {}, true);
-            DAWN_UNUSED(result);
+    auto Try = [ReturnError]() -> bool {
+        int* result = nullptr;
+        DAWN_TRY_ASSIGN_WITH_CLEANUP(result, ReturnError(), {}, true);
+        DAWN_UNUSED(result);
 
-            // DAWN_TRY_ASSIGN_WITH_CLEANUP should return before this point
-            EXPECT_FALSE(true);
-            return false;
-        };
+        // DAWN_TRY_ASSIGN_WITH_CLEANUP should return before this point
+        EXPECT_FALSE(true);
+        return false;
+    };
 
-        bool result = Try();
-        ASSERT_TRUE(result);
-    }
+    bool result = Try();
+    ASSERT_TRUE(result);
+}
 
-    // Check a MaybeError can be DAWN_TRIED in a function that returns an ResultOrError
-    // Check DAWN_TRY handles errors correctly.
-    TEST(ErrorTests, TRY_ConversionToErrorOrResult) {
-        auto ReturnError = []() -> MaybeError {
-            return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
-        };
+// Check a MaybeError can be DAWN_TRIED in a function that returns an ResultOrError
+// Check DAWN_TRY handles errors correctly.
+TEST(ErrorTests, TRY_ConversionToErrorOrResult) {
+    auto ReturnError = []() -> MaybeError {
+        return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
+    };
 
-        auto Try = [ReturnError]() -> ResultOrError<int*> {
-            DAWN_TRY(ReturnError());
-            return &placeholderSuccess;
-        };
+    auto Try = [ReturnError]() -> ResultOrError<int*> {
+        DAWN_TRY(ReturnError());
+        return &placeholderSuccess;
+    };
 
-        ResultOrError<int*> result = Try();
-        ASSERT_TRUE(result.IsError());
+    ResultOrError<int*> result = Try();
+    ASSERT_TRUE(result.IsError());
 
-        std::unique_ptr<ErrorData> errorData = result.AcquireError();
-        ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
-    }
+    std::unique_ptr<ErrorData> errorData = result.AcquireError();
+    ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
+}
 
-    // Check a MaybeError can be DAWN_TRIED in a function that returns an ResultOrError
-    // Check DAWN_TRY handles errors correctly. Version without Result<E*, T*>
-    TEST(ErrorTests, TRY_ConversionToErrorOrResultNonPointer) {
-        auto ReturnError = []() -> MaybeError {
-            return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
-        };
+// Check a MaybeError can be DAWN_TRIED in a function that returns an ResultOrError
+// Check DAWN_TRY handles errors correctly. Version without Result<E*, T*>
+TEST(ErrorTests, TRY_ConversionToErrorOrResultNonPointer) {
+    auto ReturnError = []() -> MaybeError {
+        return DAWN_VALIDATION_ERROR(placeholderErrorMessage);
+    };
 
-        auto Try = [ReturnError]() -> ResultOrError<int> {
-            DAWN_TRY(ReturnError());
-            return 42;
-        };
+    auto Try = [ReturnError]() -> ResultOrError<int> {
+        DAWN_TRY(ReturnError());
+        return 42;
+    };
 
-        ResultOrError<int> result = Try();
-        ASSERT_TRUE(result.IsError());
+    ResultOrError<int> result = Try();
+    ASSERT_TRUE(result.IsError());
 
-        std::unique_ptr<ErrorData> errorData = result.AcquireError();
-        ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
-    }
+    std::unique_ptr<ErrorData> errorData = result.AcquireError();
+    ASSERT_EQ(errorData->GetMessage(), placeholderErrorMessage);
+}
 
-    // TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented.
-    // NOLINTNEXTLINE(readability/namespace)
-}}  // namespace dawn::native::
+// TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented.
+// NOLINTNEXTLINE(readability/namespace)
+}  // namespace
+}  // namespace dawn::native
diff --git a/src/dawn/tests/unittests/FeatureTests.cpp b/src/dawn/tests/unittests/FeatureTests.cpp
index e263485..9fb6248 100644
--- a/src/dawn/tests/unittests/FeatureTests.cpp
+++ b/src/dawn/tests/unittests/FeatureTests.cpp
@@ -24,8 +24,7 @@
     FeatureTests()
         : testing::Test(),
           mInstanceBase(dawn::native::InstanceBase::Create()),
-          mAdapterBase(mInstanceBase.Get()) {
-    }
+          mAdapterBase(mInstanceBase.Get()) {}
 
     std::vector<wgpu::FeatureName> GetAllFeatureNames() {
         std::vector<wgpu::FeatureName> allFeatureNames(kTotalFeaturesCount);
diff --git a/src/dawn/tests/unittests/GPUInfoTests.cpp b/src/dawn/tests/unittests/GPUInfoTests.cpp
index 60e2190..42f12cb 100644
--- a/src/dawn/tests/unittests/GPUInfoTests.cpp
+++ b/src/dawn/tests/unittests/GPUInfoTests.cpp
@@ -17,11 +17,11 @@
 #include "dawn/common/GPUInfo.h"
 
 namespace {
-    const PCIVendorID vendorID = 0x8086;
-    const gpu_info::D3DDriverVersion version1 = {20, 19, 15, 5107};
-    const gpu_info::D3DDriverVersion version2 = {21, 20, 16, 5077};
-    const gpu_info::D3DDriverVersion version3 = {27, 20, 100, 9946};
-    const gpu_info::D3DDriverVersion version4 = {27, 20, 101, 2003};
+const PCIVendorID vendorID = 0x8086;
+const gpu_info::D3DDriverVersion version1 = {20, 19, 15, 5107};
+const gpu_info::D3DDriverVersion version2 = {21, 20, 16, 5077};
+const gpu_info::D3DDriverVersion version3 = {27, 20, 100, 9946};
+const gpu_info::D3DDriverVersion version4 = {27, 20, 101, 2003};
 }  // anonymous namespace
 
 TEST(GPUInfo, CompareD3DDriverVersion) {
diff --git a/src/dawn/tests/unittests/GetProcAddressTests.cpp b/src/dawn/tests/unittests/GetProcAddressTests.cpp
index 50840c2..a49c042 100644
--- a/src/dawn/tests/unittests/GetProcAddressTests.cpp
+++ b/src/dawn/tests/unittests/GetProcAddressTests.cpp
@@ -26,149 +26,148 @@
 
 namespace {
 
-    // dawn_wire and dawn_native contain duplicated code for the handling of GetProcAddress
-    // so we run the tests against both implementations. This enum is used as a test parameters to
-    // know which implementation to test.
-    enum class DawnFlavor {
-        Native,
-        Wire,
-    };
+// dawn_wire and dawn_native contain duplicated code for the handling of GetProcAddress
+// so we run the tests against both implementations. This enum is used as a test parameters to
+// know which implementation to test.
+enum class DawnFlavor {
+    Native,
+    Wire,
+};
 
-    std::ostream& operator<<(std::ostream& stream, DawnFlavor flavor) {
-        switch (flavor) {
-            case DawnFlavor::Native:
-                stream << "dawn_native";
-                break;
+std::ostream& operator<<(std::ostream& stream, DawnFlavor flavor) {
+    switch (flavor) {
+        case DawnFlavor::Native:
+            stream << "dawn_native";
+            break;
 
-            case DawnFlavor::Wire:
-                stream << "dawn_wire";
+        case DawnFlavor::Wire:
+            stream << "dawn_wire";
+            break;
+
+        default:
+            UNREACHABLE();
+            break;
+    }
+    return stream;
+}
+
+class GetProcAddressTests : public testing::TestWithParam<DawnFlavor> {
+  public:
+    GetProcAddressTests()
+        : testing::TestWithParam<DawnFlavor>(),
+          mNativeInstance(dawn::native::InstanceBase::Create()),
+          mNativeAdapter(mNativeInstance.Get()) {}
+
+    void SetUp() override {
+        switch (GetParam()) {
+            case DawnFlavor::Native: {
+                mDevice = wgpu::Device::Acquire(
+                    reinterpret_cast<WGPUDevice>(mNativeAdapter.APICreateDevice()));
+                mProcs = dawn::native::GetProcs();
                 break;
+            }
+
+            case DawnFlavor::Wire: {
+                mC2sBuf = std::make_unique<utils::TerribleCommandBuffer>();
+
+                dawn::wire::WireClientDescriptor clientDesc = {};
+                clientDesc.serializer = mC2sBuf.get();
+                mWireClient = std::make_unique<dawn::wire::WireClient>(clientDesc);
+
+                mDevice = wgpu::Device::Acquire(mWireClient->ReserveDevice().device);
+                mProcs = dawn::wire::client::GetProcs();
+                break;
+            }
 
             default:
                 UNREACHABLE();
                 break;
         }
-        return stream;
+
+        dawnProcSetProcs(&mProcs);
     }
 
-    class GetProcAddressTests : public testing::TestWithParam<DawnFlavor> {
-      public:
-        GetProcAddressTests()
-            : testing::TestWithParam<DawnFlavor>(),
-              mNativeInstance(dawn::native::InstanceBase::Create()),
-              mNativeAdapter(mNativeInstance.Get()) {
-        }
-
-        void SetUp() override {
-            switch (GetParam()) {
-                case DawnFlavor::Native: {
-                    mDevice = wgpu::Device::Acquire(
-                        reinterpret_cast<WGPUDevice>(mNativeAdapter.APICreateDevice()));
-                    mProcs = dawn::native::GetProcs();
-                    break;
-                }
-
-                case DawnFlavor::Wire: {
-                    mC2sBuf = std::make_unique<utils::TerribleCommandBuffer>();
-
-                    dawn::wire::WireClientDescriptor clientDesc = {};
-                    clientDesc.serializer = mC2sBuf.get();
-                    mWireClient = std::make_unique<dawn::wire::WireClient>(clientDesc);
-
-                    mDevice = wgpu::Device::Acquire(mWireClient->ReserveDevice().device);
-                    mProcs = dawn::wire::client::GetProcs();
-                    break;
-                }
-
-                default:
-                    UNREACHABLE();
-                    break;
-            }
-
-            dawnProcSetProcs(&mProcs);
-        }
-
-        void TearDown() override {
-            // Destroy the device before freeing the instance or the wire client in the destructor
-            mDevice = wgpu::Device();
-        }
-
-      protected:
-        Ref<dawn::native::InstanceBase> mNativeInstance;
-        dawn::native::null::Adapter mNativeAdapter;
-
-        std::unique_ptr<utils::TerribleCommandBuffer> mC2sBuf;
-        std::unique_ptr<dawn::wire::WireClient> mWireClient;
-
-        wgpu::Device mDevice;
-        DawnProcTable mProcs;
-    };
-
-    // Test GetProcAddress with and without devices on some valid examples
-    TEST_P(GetProcAddressTests, ValidExamples) {
-        ASSERT_EQ(mProcs.getProcAddress(nullptr, "wgpuDeviceCreateBuffer"),
-                  reinterpret_cast<WGPUProc>(mProcs.deviceCreateBuffer));
-        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "wgpuDeviceCreateBuffer"),
-                  reinterpret_cast<WGPUProc>(mProcs.deviceCreateBuffer));
-        ASSERT_EQ(mProcs.getProcAddress(nullptr, "wgpuQueueSubmit"),
-                  reinterpret_cast<WGPUProc>(mProcs.queueSubmit));
-        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "wgpuQueueSubmit"),
-                  reinterpret_cast<WGPUProc>(mProcs.queueSubmit));
+    void TearDown() override {
+        // Destroy the device before freeing the instance or the wire client in the destructor
+        mDevice = wgpu::Device();
     }
 
-    // Test GetProcAddress with and without devices on nullptr procName
-    TEST_P(GetProcAddressTests, Nullptr) {
-        ASSERT_EQ(mProcs.getProcAddress(nullptr, nullptr), nullptr);
-        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), nullptr), nullptr);
+  protected:
+    Ref<dawn::native::InstanceBase> mNativeInstance;
+    dawn::native::null::Adapter mNativeAdapter;
+
+    std::unique_ptr<utils::TerribleCommandBuffer> mC2sBuf;
+    std::unique_ptr<dawn::wire::WireClient> mWireClient;
+
+    wgpu::Device mDevice;
+    DawnProcTable mProcs;
+};
+
+// Test GetProcAddress with and without devices on some valid examples
+TEST_P(GetProcAddressTests, ValidExamples) {
+    ASSERT_EQ(mProcs.getProcAddress(nullptr, "wgpuDeviceCreateBuffer"),
+              reinterpret_cast<WGPUProc>(mProcs.deviceCreateBuffer));
+    ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "wgpuDeviceCreateBuffer"),
+              reinterpret_cast<WGPUProc>(mProcs.deviceCreateBuffer));
+    ASSERT_EQ(mProcs.getProcAddress(nullptr, "wgpuQueueSubmit"),
+              reinterpret_cast<WGPUProc>(mProcs.queueSubmit));
+    ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "wgpuQueueSubmit"),
+              reinterpret_cast<WGPUProc>(mProcs.queueSubmit));
+}
+
+// Test GetProcAddress with and without devices on nullptr procName
+TEST_P(GetProcAddressTests, Nullptr) {
+    ASSERT_EQ(mProcs.getProcAddress(nullptr, nullptr), nullptr);
+    ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), nullptr), nullptr);
+}
+
+// Test GetProcAddress with and without devices on some invalid
+TEST_P(GetProcAddressTests, InvalidExamples) {
+    ASSERT_EQ(mProcs.getProcAddress(nullptr, "wgpuDeviceDoSomething"), nullptr);
+    ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "wgpuDeviceDoSomething"), nullptr);
+
+    // Trigger the condition where lower_bound will return the end of the procMap.
+    ASSERT_EQ(mProcs.getProcAddress(nullptr, "zzzzzzz"), nullptr);
+    ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "zzzzzzz"), nullptr);
+    ASSERT_EQ(mProcs.getProcAddress(nullptr, "ZZ"), nullptr);
+    ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "ZZ"), nullptr);
+
+    // Some more potential corner cases.
+    ASSERT_EQ(mProcs.getProcAddress(nullptr, ""), nullptr);
+    ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), ""), nullptr);
+    ASSERT_EQ(mProcs.getProcAddress(nullptr, "0"), nullptr);
+    ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "0"), nullptr);
+}
+
+// Test that GetProcAddress supports freestanding function that are handled specially
+TEST_P(GetProcAddressTests, FreeStandingFunctions) {
+    ASSERT_EQ(mProcs.getProcAddress(nullptr, "wgpuGetProcAddress"),
+              reinterpret_cast<WGPUProc>(mProcs.getProcAddress));
+    ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "wgpuGetProcAddress"),
+              reinterpret_cast<WGPUProc>(mProcs.getProcAddress));
+
+    ASSERT_EQ(mProcs.getProcAddress(nullptr, "wgpuCreateInstance"),
+              reinterpret_cast<WGPUProc>(mProcs.createInstance));
+    ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "wgpuCreateInstance"),
+              reinterpret_cast<WGPUProc>(mProcs.createInstance));
+}
+
+INSTANTIATE_TEST_SUITE_P(,
+                         GetProcAddressTests,
+                         testing::Values(DawnFlavor::Native, DawnFlavor::Wire),
+                         testing::PrintToStringParamName());
+
+TEST(GetProcAddressInternalTests, CheckDawnNativeProcMapOrder) {
+    std::vector<const char*> names = dawn::native::GetProcMapNamesForTesting();
+    for (size_t i = 1; i < names.size(); i++) {
+        ASSERT_LT(std::string(names[i - 1]), std::string(names[i]));
     }
+}
 
-    // Test GetProcAddress with and without devices on some invalid
-    TEST_P(GetProcAddressTests, InvalidExamples) {
-        ASSERT_EQ(mProcs.getProcAddress(nullptr, "wgpuDeviceDoSomething"), nullptr);
-        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "wgpuDeviceDoSomething"), nullptr);
-
-        // Trigger the condition where lower_bound will return the end of the procMap.
-        ASSERT_EQ(mProcs.getProcAddress(nullptr, "zzzzzzz"), nullptr);
-        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "zzzzzzz"), nullptr);
-        ASSERT_EQ(mProcs.getProcAddress(nullptr, "ZZ"), nullptr);
-        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "ZZ"), nullptr);
-
-        // Some more potential corner cases.
-        ASSERT_EQ(mProcs.getProcAddress(nullptr, ""), nullptr);
-        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), ""), nullptr);
-        ASSERT_EQ(mProcs.getProcAddress(nullptr, "0"), nullptr);
-        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "0"), nullptr);
+TEST(GetProcAddressInternalTests, CheckDawnWireClientProcMapOrder) {
+    std::vector<const char*> names = dawn::wire::client::GetProcMapNamesForTesting();
+    for (size_t i = 1; i < names.size(); i++) {
+        ASSERT_LT(std::string(names[i - 1]), std::string(names[i]));
     }
-
-    // Test that GetProcAddress supports freestanding function that are handled specially
-    TEST_P(GetProcAddressTests, FreeStandingFunctions) {
-        ASSERT_EQ(mProcs.getProcAddress(nullptr, "wgpuGetProcAddress"),
-                  reinterpret_cast<WGPUProc>(mProcs.getProcAddress));
-        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "wgpuGetProcAddress"),
-                  reinterpret_cast<WGPUProc>(mProcs.getProcAddress));
-
-        ASSERT_EQ(mProcs.getProcAddress(nullptr, "wgpuCreateInstance"),
-                  reinterpret_cast<WGPUProc>(mProcs.createInstance));
-        ASSERT_EQ(mProcs.getProcAddress(mDevice.Get(), "wgpuCreateInstance"),
-                  reinterpret_cast<WGPUProc>(mProcs.createInstance));
-    }
-
-    INSTANTIATE_TEST_SUITE_P(,
-                             GetProcAddressTests,
-                             testing::Values(DawnFlavor::Native, DawnFlavor::Wire),
-                             testing::PrintToStringParamName());
-
-    TEST(GetProcAddressInternalTests, CheckDawnNativeProcMapOrder) {
-        std::vector<const char*> names = dawn::native::GetProcMapNamesForTesting();
-        for (size_t i = 1; i < names.size(); i++) {
-            ASSERT_LT(std::string(names[i - 1]), std::string(names[i]));
-        }
-    }
-
-    TEST(GetProcAddressInternalTests, CheckDawnWireClientProcMapOrder) {
-        std::vector<const char*> names = dawn::wire::client::GetProcMapNamesForTesting();
-        for (size_t i = 1; i < names.size(); i++) {
-            ASSERT_LT(std::string(names[i - 1]), std::string(names[i]));
-        }
-    }
+}
 }  // anonymous namespace
diff --git a/src/dawn/tests/unittests/LinkedListTests.cpp b/src/dawn/tests/unittests/LinkedListTests.cpp
index e63ef0a..1832159 100644
--- a/src/dawn/tests/unittests/LinkedListTests.cpp
+++ b/src/dawn/tests/unittests/LinkedListTests.cpp
@@ -12,16 +12,11 @@
 
 class Node : public LinkNode<Node> {
   public:
-    explicit Node(int id) : id_(id) {
-    }
+    explicit Node(int id) : id_(id) {}
 
-    int id() const {
-        return id_;
-    }
+    int id() const { return id_; }
 
-    void set_id(int id) {
-        id_ = id;
-    }
+    void set_id(int id) { id_ = id; }
 
   private:
     int id_;
@@ -29,8 +24,7 @@
 
 class MultipleInheritanceNodeBase {
   public:
-    MultipleInheritanceNodeBase() : field_taking_up_space_(0) {
-    }
+    MultipleInheritanceNodeBase() : field_taking_up_space_(0) {}
     int field_taking_up_space_;
 };
 
@@ -42,14 +36,11 @@
 
 class MovableNode : public LinkNode<MovableNode> {
   public:
-    explicit MovableNode(int id) : id_(id) {
-    }
+    explicit MovableNode(int id) : id_(id) {}
 
     MovableNode(MovableNode&&) = default;
 
-    int id() const {
-        return id_;
-    }
+    int id() const { return id_; }
 
   private:
     int id_;
diff --git a/src/dawn/tests/unittests/MathTests.cpp b/src/dawn/tests/unittests/MathTests.cpp
index a5ce0f4..d88e858 100644
--- a/src/dawn/tests/unittests/MathTests.cpp
+++ b/src/dawn/tests/unittests/MathTests.cpp
@@ -15,24 +15,24 @@
 #include <cmath>
 #include <vector>
 
-#include "gtest/gtest.h"
 #include "dawn/EnumClassBitmasks.h"
 #include "dawn/common/Math.h"
 #include "dawn/webgpu_cpp.h"
+#include "gtest/gtest.h"
 
 namespace wgpu {
-    enum class TestEnum {
-        A = 0x1,
-        B = 0x2,
-        C = 0x4,
-    };
+enum class TestEnum {
+    A = 0x1,
+    B = 0x2,
+    C = 0x4,
+};
 }  // namespace wgpu
 
 namespace dawn {
-    template <>
-    struct IsDawnBitmask<wgpu::TestEnum> {
-        static constexpr bool enable = true;
-    };
+template <>
+struct IsDawnBitmask<wgpu::TestEnum> {
+    static constexpr bool enable = true;
+};
 }  // namespace dawn
 
 // Tests for ScanForward
diff --git a/src/dawn/tests/unittests/PerStageTests.cpp b/src/dawn/tests/unittests/PerStageTests.cpp
index ccfdee4..4c39618 100644
--- a/src/dawn/tests/unittests/PerStageTests.cpp
+++ b/src/dawn/tests/unittests/PerStageTests.cpp
@@ -18,74 +18,74 @@
 
 namespace dawn::native {
 
-    // Tests for StageBit
-    TEST(PerStage, StageBit) {
-        ASSERT_EQ(StageBit(SingleShaderStage::Vertex), wgpu::ShaderStage::Vertex);
-        ASSERT_EQ(StageBit(SingleShaderStage::Fragment), wgpu::ShaderStage::Fragment);
-        ASSERT_EQ(StageBit(SingleShaderStage::Compute), wgpu::ShaderStage::Compute);
+// Tests for StageBit
+TEST(PerStage, StageBit) {
+    ASSERT_EQ(StageBit(SingleShaderStage::Vertex), wgpu::ShaderStage::Vertex);
+    ASSERT_EQ(StageBit(SingleShaderStage::Fragment), wgpu::ShaderStage::Fragment);
+    ASSERT_EQ(StageBit(SingleShaderStage::Compute), wgpu::ShaderStage::Compute);
+}
+
+// Basic test for the PerStage container
+TEST(PerStage, PerStage) {
+    PerStage<int> data;
+
+    // Store data using wgpu::ShaderStage
+    data[SingleShaderStage::Vertex] = 42;
+    data[SingleShaderStage::Fragment] = 3;
+    data[SingleShaderStage::Compute] = -1;
+
+    // Load it using wgpu::ShaderStage
+    ASSERT_EQ(data[wgpu::ShaderStage::Vertex], 42);
+    ASSERT_EQ(data[wgpu::ShaderStage::Fragment], 3);
+    ASSERT_EQ(data[wgpu::ShaderStage::Compute], -1);
+}
+
+// Test IterateStages with kAllStages
+TEST(PerStage, IterateAllStages) {
+    PerStage<int> counts;
+    counts[SingleShaderStage::Vertex] = 0;
+    counts[SingleShaderStage::Fragment] = 0;
+    counts[SingleShaderStage::Compute] = 0;
+
+    for (auto stage : IterateStages(kAllStages)) {
+        counts[stage]++;
     }
 
-    // Basic test for the PerStage container
-    TEST(PerStage, PerStage) {
-        PerStage<int> data;
+    ASSERT_EQ(counts[wgpu::ShaderStage::Vertex], 1);
+    ASSERT_EQ(counts[wgpu::ShaderStage::Fragment], 1);
+    ASSERT_EQ(counts[wgpu::ShaderStage::Compute], 1);
+}
 
-        // Store data using wgpu::ShaderStage
-        data[SingleShaderStage::Vertex] = 42;
-        data[SingleShaderStage::Fragment] = 3;
-        data[SingleShaderStage::Compute] = -1;
+// Test IterateStages with one stage
+TEST(PerStage, IterateOneStage) {
+    PerStage<int> counts;
+    counts[SingleShaderStage::Vertex] = 0;
+    counts[SingleShaderStage::Fragment] = 0;
+    counts[SingleShaderStage::Compute] = 0;
 
-        // Load it using wgpu::ShaderStage
-        ASSERT_EQ(data[wgpu::ShaderStage::Vertex], 42);
-        ASSERT_EQ(data[wgpu::ShaderStage::Fragment], 3);
-        ASSERT_EQ(data[wgpu::ShaderStage::Compute], -1);
+    for (auto stage : IterateStages(wgpu::ShaderStage::Fragment)) {
+        counts[stage]++;
     }
 
-    // Test IterateStages with kAllStages
-    TEST(PerStage, IterateAllStages) {
-        PerStage<int> counts;
-        counts[SingleShaderStage::Vertex] = 0;
-        counts[SingleShaderStage::Fragment] = 0;
-        counts[SingleShaderStage::Compute] = 0;
+    ASSERT_EQ(counts[wgpu::ShaderStage::Vertex], 0);
+    ASSERT_EQ(counts[wgpu::ShaderStage::Fragment], 1);
+    ASSERT_EQ(counts[wgpu::ShaderStage::Compute], 0);
+}
 
-        for (auto stage : IterateStages(kAllStages)) {
-            counts[stage]++;
-        }
+// Test IterateStages with no stage
+TEST(PerStage, IterateNoStages) {
+    PerStage<int> counts;
+    counts[SingleShaderStage::Vertex] = 0;
+    counts[SingleShaderStage::Fragment] = 0;
+    counts[SingleShaderStage::Compute] = 0;
 
-        ASSERT_EQ(counts[wgpu::ShaderStage::Vertex], 1);
-        ASSERT_EQ(counts[wgpu::ShaderStage::Fragment], 1);
-        ASSERT_EQ(counts[wgpu::ShaderStage::Compute], 1);
+    for (auto stage : IterateStages(wgpu::ShaderStage::Fragment & wgpu::ShaderStage::Vertex)) {
+        counts[stage]++;
     }
 
-    // Test IterateStages with one stage
-    TEST(PerStage, IterateOneStage) {
-        PerStage<int> counts;
-        counts[SingleShaderStage::Vertex] = 0;
-        counts[SingleShaderStage::Fragment] = 0;
-        counts[SingleShaderStage::Compute] = 0;
-
-        for (auto stage : IterateStages(wgpu::ShaderStage::Fragment)) {
-            counts[stage]++;
-        }
-
-        ASSERT_EQ(counts[wgpu::ShaderStage::Vertex], 0);
-        ASSERT_EQ(counts[wgpu::ShaderStage::Fragment], 1);
-        ASSERT_EQ(counts[wgpu::ShaderStage::Compute], 0);
-    }
-
-    // Test IterateStages with no stage
-    TEST(PerStage, IterateNoStages) {
-        PerStage<int> counts;
-        counts[SingleShaderStage::Vertex] = 0;
-        counts[SingleShaderStage::Fragment] = 0;
-        counts[SingleShaderStage::Compute] = 0;
-
-        for (auto stage : IterateStages(wgpu::ShaderStage::Fragment & wgpu::ShaderStage::Vertex)) {
-            counts[stage]++;
-        }
-
-        ASSERT_EQ(counts[wgpu::ShaderStage::Vertex], 0);
-        ASSERT_EQ(counts[wgpu::ShaderStage::Fragment], 0);
-        ASSERT_EQ(counts[wgpu::ShaderStage::Compute], 0);
-    }
+    ASSERT_EQ(counts[wgpu::ShaderStage::Vertex], 0);
+    ASSERT_EQ(counts[wgpu::ShaderStage::Fragment], 0);
+    ASSERT_EQ(counts[wgpu::ShaderStage::Compute], 0);
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/tests/unittests/PerThreadProcTests.cpp b/src/dawn/tests/unittests/PerThreadProcTests.cpp
index f275456..65d64d6 100644
--- a/src/dawn/tests/unittests/PerThreadProcTests.cpp
+++ b/src/dawn/tests/unittests/PerThreadProcTests.cpp
@@ -27,8 +27,7 @@
   public:
     PerThreadProcTests()
         : mNativeInstance(dawn::native::InstanceBase::Create()),
-          mNativeAdapter(mNativeInstance.Get()) {
-    }
+          mNativeAdapter(mNativeInstance.Get()) {}
     ~PerThreadProcTests() override = default;
 
   protected:
diff --git a/src/dawn/tests/unittests/PlacementAllocatedTests.cpp b/src/dawn/tests/unittests/PlacementAllocatedTests.cpp
index a031778..b800500 100644
--- a/src/dawn/tests/unittests/PlacementAllocatedTests.cpp
+++ b/src/dawn/tests/unittests/PlacementAllocatedTests.cpp
@@ -23,39 +23,31 @@
 
 namespace {
 
-    enum class DestructedClass {
-        Foo,
-        Bar,
-    };
+enum class DestructedClass {
+    Foo,
+    Bar,
+};
 
-    class MockDestructor {
-      public:
-        MOCK_METHOD(void, Call, (void*, DestructedClass));
-    };
+class MockDestructor {
+  public:
+    MOCK_METHOD(void, Call, (void*, DestructedClass));
+};
 
-    std::unique_ptr<StrictMock<MockDestructor>> mockDestructor;
+std::unique_ptr<StrictMock<MockDestructor>> mockDestructor;
 
-    class PlacementAllocatedTests : public testing::Test {
-        void SetUp() override {
-            mockDestructor = std::make_unique<StrictMock<MockDestructor>>();
-        }
+class PlacementAllocatedTests : public testing::Test {
+    void SetUp() override { mockDestructor = std::make_unique<StrictMock<MockDestructor>>(); }
 
-        void TearDown() override {
-            mockDestructor = nullptr;
-        }
-    };
+    void TearDown() override { mockDestructor = nullptr; }
+};
 
-    struct Foo : PlacementAllocated {
-        virtual ~Foo() {
-            mockDestructor->Call(this, DestructedClass::Foo);
-        }
-    };
+struct Foo : PlacementAllocated {
+    virtual ~Foo() { mockDestructor->Call(this, DestructedClass::Foo); }
+};
 
-    struct Bar : Foo {
-        ~Bar() override {
-            mockDestructor->Call(this, DestructedClass::Bar);
-        }
-    };
+struct Bar : Foo {
+    ~Bar() override { mockDestructor->Call(this, DestructedClass::Bar); }
+};
 }  // namespace
 
 // Test that deletion calls the destructor and does not free memory.
diff --git a/src/dawn/tests/unittests/RefBaseTests.cpp b/src/dawn/tests/unittests/RefBaseTests.cpp
index 0a34aca..dafdcf7 100644
--- a/src/dawn/tests/unittests/RefBaseTests.cpp
+++ b/src/dawn/tests/unittests/RefBaseTests.cpp
@@ -19,99 +19,85 @@
 #include "gmock/gmock.h"
 
 namespace {
-    using Id = uint32_t;
+using Id = uint32_t;
 
-    enum class Action {
-        kReference,
-        kRelease,
-        kAssign,
-        kMarker,
-    };
+enum class Action {
+    kReference,
+    kRelease,
+    kAssign,
+    kMarker,
+};
 
-    struct Event {
-        Action action;
-        Id thisId = 0;
-        Id otherId = 0;
-    };
+struct Event {
+    Action action;
+    Id thisId = 0;
+    Id otherId = 0;
+};
 
-    std::ostream& operator<<(std::ostream& os, const Event& event) {
-        switch (event.action) {
-            case Action::kReference:
-                os << "Reference " << event.thisId;
-                break;
-            case Action::kRelease:
-                os << "Release " << event.thisId;
-                break;
-            case Action::kAssign:
-                os << "Assign " << event.thisId << " <- " << event.otherId;
-                break;
-            case Action::kMarker:
-                os << "Marker " << event.thisId;
-                break;
+std::ostream& operator<<(std::ostream& os, const Event& event) {
+    switch (event.action) {
+        case Action::kReference:
+            os << "Reference " << event.thisId;
+            break;
+        case Action::kRelease:
+            os << "Release " << event.thisId;
+            break;
+        case Action::kAssign:
+            os << "Assign " << event.thisId << " <- " << event.otherId;
+            break;
+        case Action::kMarker:
+            os << "Marker " << event.thisId;
+            break;
+    }
+    return os;
+}
+
+bool operator==(const Event& a, const Event& b) {
+    return a.action == b.action && a.thisId == b.thisId && a.otherId == b.otherId;
+}
+
+using Events = std::vector<Event>;
+
+struct RefTracker {
+    explicit constexpr RefTracker(nullptr_t) : mId(0), mEvents(nullptr) {}
+
+    constexpr RefTracker(const RefTracker& other) = default;
+
+    RefTracker(Id id, Events* events) : mId(id), mEvents(events) {}
+
+    void Reference() const { mEvents->emplace_back(Event{Action::kReference, mId}); }
+
+    void Release() const { mEvents->emplace_back(Event{Action::kRelease, mId}); }
+
+    RefTracker& operator=(const RefTracker& other) {
+        if (mEvents || other.mEvents) {
+            Events* events = mEvents ? mEvents : other.mEvents;
+            events->emplace_back(Event{Action::kAssign, mId, other.mId});
         }
-        return os;
+        mId = other.mId;
+        mEvents = other.mEvents;
+        return *this;
     }
 
-    bool operator==(const Event& a, const Event& b) {
-        return a.action == b.action && a.thisId == b.thisId && a.otherId == b.otherId;
-    }
+    bool operator==(const RefTracker& other) const { return mId == other.mId; }
 
-    using Events = std::vector<Event>;
+    bool operator!=(const RefTracker& other) const { return mId != other.mId; }
 
-    struct RefTracker {
-        explicit constexpr RefTracker(nullptr_t) : mId(0), mEvents(nullptr) {
-        }
+    Id mId;
+    Events* mEvents;
+};
 
-        constexpr RefTracker(const RefTracker& other) = default;
+struct RefTrackerTraits {
+    static constexpr RefTracker kNullValue{nullptr};
 
-        RefTracker(Id id, Events* events) : mId(id), mEvents(events) {
-        }
+    static void Reference(const RefTracker& handle) { handle.Reference(); }
 
-        void Reference() const {
-            mEvents->emplace_back(Event{Action::kReference, mId});
-        }
+    static void Release(const RefTracker& handle) { handle.Release(); }
+};
 
-        void Release() const {
-            mEvents->emplace_back(Event{Action::kRelease, mId});
-        }
+constexpr RefTracker RefTrackerTraits::kNullValue;
 
-        RefTracker& operator=(const RefTracker& other) {
-            if (mEvents || other.mEvents) {
-                Events* events = mEvents ? mEvents : other.mEvents;
-                events->emplace_back(Event{Action::kAssign, mId, other.mId});
-            }
-            mId = other.mId;
-            mEvents = other.mEvents;
-            return *this;
-        }
-
-        bool operator==(const RefTracker& other) const {
-            return mId == other.mId;
-        }
-
-        bool operator!=(const RefTracker& other) const {
-            return mId != other.mId;
-        }
-
-        Id mId;
-        Events* mEvents;
-    };
-
-    struct RefTrackerTraits {
-        static constexpr RefTracker kNullValue{nullptr};
-
-        static void Reference(const RefTracker& handle) {
-            handle.Reference();
-        }
-
-        static void Release(const RefTracker& handle) {
-            handle.Release();
-        }
-    };
-
-    constexpr RefTracker RefTrackerTraits::kNullValue;
-
-    using Ref = RefBase<RefTracker, RefTrackerTraits>;
+using Ref = RefBase<RefTracker, RefTrackerTraits>;
 }  // namespace
 
 TEST(RefBase, Acquire) {
diff --git a/src/dawn/tests/unittests/RefCountedTests.cpp b/src/dawn/tests/unittests/RefCountedTests.cpp
index 503b701..a1c5fca 100644
--- a/src/dawn/tests/unittests/RefCountedTests.cpp
+++ b/src/dawn/tests/unittests/RefCountedTests.cpp
@@ -20,14 +20,11 @@
 
 class RCTest : public RefCounted {
   public:
-    RCTest() : RefCounted() {
-    }
+    RCTest() : RefCounted() {}
 
-    explicit RCTest(uint64_t payload) : RefCounted(payload) {
-    }
+    explicit RCTest(uint64_t payload) : RefCounted(payload) {}
 
-    explicit RCTest(bool* deleted) : mDeleted(deleted) {
-    }
+    explicit RCTest(bool* deleted) : mDeleted(deleted) {}
 
     ~RCTest() override {
         if (mDeleted != nullptr) {
@@ -35,9 +32,7 @@
         }
     }
 
-    RCTest* GetThis() {
-        return this;
-    }
+    RCTest* GetThis() { return this; }
 
   private:
     bool* mDeleted = nullptr;
diff --git a/src/dawn/tests/unittests/ResultTests.cpp b/src/dawn/tests/unittests/ResultTests.cpp
index ded13bf..a588631 100644
--- a/src/dawn/tests/unittests/ResultTests.cpp
+++ b/src/dawn/tests/unittests/ResultTests.cpp
@@ -22,369 +22,365 @@
 
 namespace {
 
-    template <typename T, typename E>
-    void TestError(Result<T, E>* result, E expectedError) {
-        EXPECT_TRUE(result->IsError());
-        EXPECT_FALSE(result->IsSuccess());
+template <typename T, typename E>
+void TestError(Result<T, E>* result, E expectedError) {
+    EXPECT_TRUE(result->IsError());
+    EXPECT_FALSE(result->IsSuccess());
 
-        std::unique_ptr<E> storedError = result->AcquireError();
-        EXPECT_EQ(*storedError, expectedError);
-    }
+    std::unique_ptr<E> storedError = result->AcquireError();
+    EXPECT_EQ(*storedError, expectedError);
+}
 
-    template <typename T, typename E>
-    void TestSuccess(Result<T, E>* result, T expectedSuccess) {
-        EXPECT_FALSE(result->IsError());
-        EXPECT_TRUE(result->IsSuccess());
+template <typename T, typename E>
+void TestSuccess(Result<T, E>* result, T expectedSuccess) {
+    EXPECT_FALSE(result->IsError());
+    EXPECT_TRUE(result->IsSuccess());
 
-        const T storedSuccess = result->AcquireSuccess();
-        EXPECT_EQ(storedSuccess, expectedSuccess);
+    const T storedSuccess = result->AcquireSuccess();
+    EXPECT_EQ(storedSuccess, expectedSuccess);
 
-        // Once the success is acquired, result has an empty
-        // payload and is neither in the success nor error state.
-        EXPECT_FALSE(result->IsError());
-        EXPECT_FALSE(result->IsSuccess());
-    }
+    // Once the success is acquired, result has an empty
+    // payload and is neither in the success nor error state.
+    EXPECT_FALSE(result->IsError());
+    EXPECT_FALSE(result->IsSuccess());
+}
 
-    static int placeholderError = 0xbeef;
-    static float placeholderSuccess = 42.0f;
-    static const float placeholderConstSuccess = 42.0f;
+static int placeholderError = 0xbeef;
+static float placeholderSuccess = 42.0f;
+static const float placeholderConstSuccess = 42.0f;
 
-    class AClass : public RefCounted {
-      public:
-        int a = 0;
+class AClass : public RefCounted {
+  public:
+    int a = 0;
+};
+
+// Tests using the following overload of TestSuccess make
+// local Ref instances to placeholderSuccessObj. Tests should
+// ensure any local Ref objects made along the way continue
+// to point to placeholderSuccessObj.
+template <typename T, typename E>
+void TestSuccess(Result<Ref<T>, E>* result, T* expectedSuccess) {
+    EXPECT_FALSE(result->IsError());
+    EXPECT_TRUE(result->IsSuccess());
+
+    // AClass starts with a reference count of 1 and stored
+    // on the stack in the caller. The result parameter should
+    // hold the only other reference to the object.
+    EXPECT_EQ(expectedSuccess->GetRefCountForTesting(), 2u);
+
+    const Ref<T> storedSuccess = result->AcquireSuccess();
+    EXPECT_EQ(storedSuccess.Get(), expectedSuccess);
+
+    // Once the success is acquired, result has an empty
+    // payload and is neither in the success nor error state.
+    EXPECT_FALSE(result->IsError());
+    EXPECT_FALSE(result->IsSuccess());
+
+    // Once we call AcquireSuccess, result no longer stores
+    // the object. storedSuccess should contain the only other
+    // reference to the object.
+    EXPECT_EQ(storedSuccess->GetRefCountForTesting(), 2u);
+}
+
+// Result<void, E*>
+
+// Test constructing an error Result<void, E>
+TEST(ResultOnlyPointerError, ConstructingError) {
+    Result<void, int> result(std::make_unique<int>(placeholderError));
+    TestError(&result, placeholderError);
+}
+
+// Test moving an error Result<void, E>
+TEST(ResultOnlyPointerError, MovingError) {
+    Result<void, int> result(std::make_unique<int>(placeholderError));
+    Result<void, int> movedResult(std::move(result));
+    TestError(&movedResult, placeholderError);
+}
+
+// Test returning an error Result<void, E>
+TEST(ResultOnlyPointerError, ReturningError) {
+    auto CreateError = []() -> Result<void, int> {
+        return {std::make_unique<int>(placeholderError)};
     };
 
-    // Tests using the following overload of TestSuccess make
-    // local Ref instances to placeholderSuccessObj. Tests should
-    // ensure any local Ref objects made along the way continue
-    // to point to placeholderSuccessObj.
-    template <typename T, typename E>
-    void TestSuccess(Result<Ref<T>, E>* result, T* expectedSuccess) {
-        EXPECT_FALSE(result->IsError());
-        EXPECT_TRUE(result->IsSuccess());
+    Result<void, int> result = CreateError();
+    TestError(&result, placeholderError);
+}
 
-        // AClass starts with a reference count of 1 and stored
-        // on the stack in the caller. The result parameter should
-        // hold the only other reference to the object.
-        EXPECT_EQ(expectedSuccess->GetRefCountForTesting(), 2u);
+// Test constructing a success Result<void, E>
+TEST(ResultOnlyPointerError, ConstructingSuccess) {
+    Result<void, int> result;
+    EXPECT_TRUE(result.IsSuccess());
+    EXPECT_FALSE(result.IsError());
+}
 
-        const Ref<T> storedSuccess = result->AcquireSuccess();
-        EXPECT_EQ(storedSuccess.Get(), expectedSuccess);
+// Test moving a success Result<void, E>
+TEST(ResultOnlyPointerError, MovingSuccess) {
+    Result<void, int> result;
+    Result<void, int> movedResult(std::move(result));
+    EXPECT_TRUE(movedResult.IsSuccess());
+    EXPECT_FALSE(movedResult.IsError());
+}
 
-        // Once the success is acquired, result has an empty
-        // payload and is neither in the success nor error state.
-        EXPECT_FALSE(result->IsError());
-        EXPECT_FALSE(result->IsSuccess());
+// Test returning a success Result<void, E>
+TEST(ResultOnlyPointerError, ReturningSuccess) {
+    auto CreateError = []() -> Result<void, int> { return {}; };
 
-        // Once we call AcquireSuccess, result no longer stores
-        // the object. storedSuccess should contain the only other
-        // reference to the object.
-        EXPECT_EQ(storedSuccess->GetRefCountForTesting(), 2u);
-    }
+    Result<void, int> result = CreateError();
+    EXPECT_TRUE(result.IsSuccess());
+    EXPECT_FALSE(result.IsError());
+}
 
-    // Result<void, E*>
+// Result<T*, E*>
 
-    // Test constructing an error Result<void, E>
-    TEST(ResultOnlyPointerError, ConstructingError) {
-        Result<void, int> result(std::make_unique<int>(placeholderError));
-        TestError(&result, placeholderError);
-    }
+// Test constructing an error Result<T*, E>
+TEST(ResultBothPointer, ConstructingError) {
+    Result<float*, int> result(std::make_unique<int>(placeholderError));
+    TestError(&result, placeholderError);
+}
 
-    // Test moving an error Result<void, E>
-    TEST(ResultOnlyPointerError, MovingError) {
-        Result<void, int> result(std::make_unique<int>(placeholderError));
-        Result<void, int> movedResult(std::move(result));
-        TestError(&movedResult, placeholderError);
-    }
+// Test moving an error Result<T*, E>
+TEST(ResultBothPointer, MovingError) {
+    Result<float*, int> result(std::make_unique<int>(placeholderError));
+    Result<float*, int> movedResult(std::move(result));
+    TestError(&movedResult, placeholderError);
+}
 
-    // Test returning an error Result<void, E>
-    TEST(ResultOnlyPointerError, ReturningError) {
-        auto CreateError = []() -> Result<void, int> {
-            return {std::make_unique<int>(placeholderError)};
-        };
-
-        Result<void, int> result = CreateError();
-        TestError(&result, placeholderError);
-    }
-
-    // Test constructing a success Result<void, E>
-    TEST(ResultOnlyPointerError, ConstructingSuccess) {
-        Result<void, int> result;
-        EXPECT_TRUE(result.IsSuccess());
-        EXPECT_FALSE(result.IsError());
-    }
-
-    // Test moving a success Result<void, E>
-    TEST(ResultOnlyPointerError, MovingSuccess) {
-        Result<void, int> result;
-        Result<void, int> movedResult(std::move(result));
-        EXPECT_TRUE(movedResult.IsSuccess());
-        EXPECT_FALSE(movedResult.IsError());
-    }
-
-    // Test returning a success Result<void, E>
-    TEST(ResultOnlyPointerError, ReturningSuccess) {
-        auto CreateError = []() -> Result<void, int> { return {}; };
-
-        Result<void, int> result = CreateError();
-        EXPECT_TRUE(result.IsSuccess());
-        EXPECT_FALSE(result.IsError());
-    }
-
-    // Result<T*, E*>
-
-    // Test constructing an error Result<T*, E>
-    TEST(ResultBothPointer, ConstructingError) {
-        Result<float*, int> result(std::make_unique<int>(placeholderError));
-        TestError(&result, placeholderError);
-    }
-
-    // Test moving an error Result<T*, E>
-    TEST(ResultBothPointer, MovingError) {
-        Result<float*, int> result(std::make_unique<int>(placeholderError));
-        Result<float*, int> movedResult(std::move(result));
-        TestError(&movedResult, placeholderError);
-    }
-
-    // Test returning an error Result<T*, E>
-    TEST(ResultBothPointer, ReturningError) {
-        auto CreateError = []() -> Result<float*, int> {
-            return {std::make_unique<int>(placeholderError)};
-        };
-
-        Result<float*, int> result = CreateError();
-        TestError(&result, placeholderError);
-    }
-
-    // Test constructing a success Result<T*, E>
-    TEST(ResultBothPointer, ConstructingSuccess) {
-        Result<float*, int> result(&placeholderSuccess);
-        TestSuccess(&result, &placeholderSuccess);
-    }
-
-    // Test moving a success Result<T*, E>
-    TEST(ResultBothPointer, MovingSuccess) {
-        Result<float*, int> result(&placeholderSuccess);
-        Result<float*, int> movedResult(std::move(result));
-        TestSuccess(&movedResult, &placeholderSuccess);
-    }
-
-    // Test returning a success Result<T*, E>
-    TEST(ResultBothPointer, ReturningSuccess) {
-        auto CreateSuccess = []() -> Result<float*, int*> { return {&placeholderSuccess}; };
-
-        Result<float*, int*> result = CreateSuccess();
-        TestSuccess(&result, &placeholderSuccess);
-    }
-
-    // Tests converting from a Result<TChild*, E>
-    TEST(ResultBothPointer, ConversionFromChildClass) {
-        struct T {
-            int a;
-        };
-        struct TChild : T {};
-
-        TChild child;
-        T* childAsT = &child;
-        {
-            Result<T*, int> result(&child);
-            TestSuccess(&result, childAsT);
-        }
-        {
-            Result<TChild*, int> resultChild(&child);
-            Result<T*, int> result(std::move(resultChild));
-            TestSuccess(&result, childAsT);
-        }
-        {
-            Result<TChild*, int> resultChild(&child);
-            Result<T*, int> result = std::move(resultChild);
-            TestSuccess(&result, childAsT);
-        }
-    }
-
-    // Result<const T*, E>
-
-    // Test constructing an error Result<const T*, E>
-    TEST(ResultBothPointerWithConstResult, ConstructingError) {
-        Result<const float*, int> result(std::make_unique<int>(placeholderError));
-        TestError(&result, placeholderError);
-    }
-
-    // Test moving an error Result<const T*, E>
-    TEST(ResultBothPointerWithConstResult, MovingError) {
-        Result<const float*, int> result(std::make_unique<int>(placeholderError));
-        Result<const float*, int> movedResult(std::move(result));
-        TestError(&movedResult, placeholderError);
-    }
-
-    // Test returning an error Result<const T*, E*>
-    TEST(ResultBothPointerWithConstResult, ReturningError) {
-        auto CreateError = []() -> Result<const float*, int> {
-            return {std::make_unique<int>(placeholderError)};
-        };
-
-        Result<const float*, int> result = CreateError();
-        TestError(&result, placeholderError);
-    }
-
-    // Test constructing a success Result<const T*, E*>
-    TEST(ResultBothPointerWithConstResult, ConstructingSuccess) {
-        Result<const float*, int> result(&placeholderConstSuccess);
-        TestSuccess(&result, &placeholderConstSuccess);
-    }
-
-    // Test moving a success Result<const T*, E*>
-    TEST(ResultBothPointerWithConstResult, MovingSuccess) {
-        Result<const float*, int> result(&placeholderConstSuccess);
-        Result<const float*, int> movedResult(std::move(result));
-        TestSuccess(&movedResult, &placeholderConstSuccess);
-    }
-
-    // Test returning a success Result<const T*, E*>
-    TEST(ResultBothPointerWithConstResult, ReturningSuccess) {
-        auto CreateSuccess = []() -> Result<const float*, int> {
-            return {&placeholderConstSuccess};
-        };
-
-        Result<const float*, int> result = CreateSuccess();
-        TestSuccess(&result, &placeholderConstSuccess);
-    }
-
-    // Result<Ref<T>, E>
-
-    // Test constructing an error Result<Ref<T>, E>
-    TEST(ResultRefT, ConstructingError) {
-        Result<Ref<AClass>, int> result(std::make_unique<int>(placeholderError));
-        TestError(&result, placeholderError);
-    }
-
-    // Test moving an error Result<Ref<T>, E>
-    TEST(ResultRefT, MovingError) {
-        Result<Ref<AClass>, int> result(std::make_unique<int>(placeholderError));
-        Result<Ref<AClass>, int> movedResult(std::move(result));
-        TestError(&movedResult, placeholderError);
-    }
-
-    // Test returning an error Result<Ref<T>, E>
-    TEST(ResultRefT, ReturningError) {
-        auto CreateError = []() -> Result<Ref<AClass>, int> {
-            return {std::make_unique<int>(placeholderError)};
-        };
-
-        Result<Ref<AClass>, int> result = CreateError();
-        TestError(&result, placeholderError);
-    }
-
-    // Test constructing a success Result<Ref<T>, E>
-    TEST(ResultRefT, ConstructingSuccess) {
-        AClass success;
-
-        Ref<AClass> refObj(&success);
-        Result<Ref<AClass>, int> result(std::move(refObj));
-        TestSuccess(&result, &success);
-    }
-
-    // Test moving a success Result<Ref<T>, E>
-    TEST(ResultRefT, MovingSuccess) {
-        AClass success;
-
-        Ref<AClass> refObj(&success);
-        Result<Ref<AClass>, int> result(std::move(refObj));
-        Result<Ref<AClass>, int> movedResult(std::move(result));
-        TestSuccess(&movedResult, &success);
-    }
-
-    // Test returning a success Result<Ref<T>, E>
-    TEST(ResultRefT, ReturningSuccess) {
-        AClass success;
-        auto CreateSuccess = [&success]() -> Result<Ref<AClass>, int> {
-            return Ref<AClass>(&success);
-        };
-
-        Result<Ref<AClass>, int> result = CreateSuccess();
-        TestSuccess(&result, &success);
-    }
-
-    class OtherClass {
-      public:
-        int a = 0;
+// Test returning an error Result<T*, E>
+TEST(ResultBothPointer, ReturningError) {
+    auto CreateError = []() -> Result<float*, int> {
+        return {std::make_unique<int>(placeholderError)};
     };
-    class Base : public RefCounted {};
-    class Child : public OtherClass, public Base {};
 
-    // Test constructing a Result<Ref<TChild>, E>
-    TEST(ResultRefT, ConversionFromChildConstructor) {
-        Child child;
-        Ref<Child> refChild(&child);
+    Result<float*, int> result = CreateError();
+    TestError(&result, placeholderError);
+}
 
-        Result<Ref<Base>, int> result(std::move(refChild));
-        TestSuccess<Base>(&result, &child);
+// Test constructing a success Result<T*, E>
+TEST(ResultBothPointer, ConstructingSuccess) {
+    Result<float*, int> result(&placeholderSuccess);
+    TestSuccess(&result, &placeholderSuccess);
+}
+
+// Test moving a success Result<T*, E>
+TEST(ResultBothPointer, MovingSuccess) {
+    Result<float*, int> result(&placeholderSuccess);
+    Result<float*, int> movedResult(std::move(result));
+    TestSuccess(&movedResult, &placeholderSuccess);
+}
+
+// Test returning a success Result<T*, E>
+TEST(ResultBothPointer, ReturningSuccess) {
+    auto CreateSuccess = []() -> Result<float*, int*> { return {&placeholderSuccess}; };
+
+    Result<float*, int*> result = CreateSuccess();
+    TestSuccess(&result, &placeholderSuccess);
+}
+
+// Tests converting from a Result<TChild*, E>
+TEST(ResultBothPointer, ConversionFromChildClass) {
+    struct T {
+        int a;
+    };
+    struct TChild : T {};
+
+    TChild child;
+    T* childAsT = &child;
+    {
+        Result<T*, int> result(&child);
+        TestSuccess(&result, childAsT);
     }
-
-    // Test copy constructing Result<Ref<TChild>, E>
-    TEST(ResultRefT, ConversionFromChildCopyConstructor) {
-        Child child;
-        Ref<Child> refChild(&child);
-
-        Result<Ref<Child>, int> resultChild(std::move(refChild));
-        Result<Ref<Base>, int> result(std::move(resultChild));
-        TestSuccess<Base>(&result, &child);
+    {
+        Result<TChild*, int> resultChild(&child);
+        Result<T*, int> result(std::move(resultChild));
+        TestSuccess(&result, childAsT);
     }
-
-    // Test assignment operator for Result<Ref<TChild>, E>
-    TEST(ResultRefT, ConversionFromChildAssignmentOperator) {
-        Child child;
-        Ref<Child> refChild(&child);
-
-        Result<Ref<Child>, int> resultChild(std::move(refChild));
-        Result<Ref<Base>, int> result = std::move(resultChild);
-        TestSuccess<Base>(&result, &child);
+    {
+        Result<TChild*, int> resultChild(&child);
+        Result<T*, int> result = std::move(resultChild);
+        TestSuccess(&result, childAsT);
     }
+}
 
-    // Result<T, E>
+// Result<const T*, E>
 
-    // Test constructing an error Result<T, E>
-    TEST(ResultGeneric, ConstructingError) {
-        Result<std::vector<float>, int> result(std::make_unique<int>(placeholderError));
-        TestError(&result, placeholderError);
-    }
+// Test constructing an error Result<const T*, E>
+TEST(ResultBothPointerWithConstResult, ConstructingError) {
+    Result<const float*, int> result(std::make_unique<int>(placeholderError));
+    TestError(&result, placeholderError);
+}
 
-    // Test moving an error Result<T, E>
-    TEST(ResultGeneric, MovingError) {
-        Result<std::vector<float>, int> result(std::make_unique<int>(placeholderError));
-        Result<std::vector<float>, int> movedResult(std::move(result));
-        TestError(&movedResult, placeholderError);
-    }
+// Test moving an error Result<const T*, E>
+TEST(ResultBothPointerWithConstResult, MovingError) {
+    Result<const float*, int> result(std::make_unique<int>(placeholderError));
+    Result<const float*, int> movedResult(std::move(result));
+    TestError(&movedResult, placeholderError);
+}
 
-    // Test returning an error Result<T, E>
-    TEST(ResultGeneric, ReturningError) {
-        auto CreateError = []() -> Result<std::vector<float>, int> {
-            return {std::make_unique<int>(placeholderError)};
-        };
+// Test returning an error Result<const T*, E*>
+TEST(ResultBothPointerWithConstResult, ReturningError) {
+    auto CreateError = []() -> Result<const float*, int> {
+        return {std::make_unique<int>(placeholderError)};
+    };
 
-        Result<std::vector<float>, int> result = CreateError();
-        TestError(&result, placeholderError);
-    }
+    Result<const float*, int> result = CreateError();
+    TestError(&result, placeholderError);
+}
 
-    // Test constructing a success Result<T, E>
-    TEST(ResultGeneric, ConstructingSuccess) {
-        Result<std::vector<float>, int> result({1.0f});
-        TestSuccess(&result, {1.0f});
-    }
+// Test constructing a success Result<const T*, E*>
+TEST(ResultBothPointerWithConstResult, ConstructingSuccess) {
+    Result<const float*, int> result(&placeholderConstSuccess);
+    TestSuccess(&result, &placeholderConstSuccess);
+}
 
-    // Test moving a success Result<T, E>
-    TEST(ResultGeneric, MovingSuccess) {
-        Result<std::vector<float>, int> result({1.0f});
-        Result<std::vector<float>, int> movedResult(std::move(result));
-        TestSuccess(&movedResult, {1.0f});
-    }
+// Test moving a success Result<const T*, E*>
+TEST(ResultBothPointerWithConstResult, MovingSuccess) {
+    Result<const float*, int> result(&placeholderConstSuccess);
+    Result<const float*, int> movedResult(std::move(result));
+    TestSuccess(&movedResult, &placeholderConstSuccess);
+}
 
-    // Test returning a success Result<T, E>
-    TEST(ResultGeneric, ReturningSuccess) {
-        auto CreateSuccess = []() -> Result<std::vector<float>, int> { return {{1.0f}}; };
+// Test returning a success Result<const T*, E*>
+TEST(ResultBothPointerWithConstResult, ReturningSuccess) {
+    auto CreateSuccess = []() -> Result<const float*, int> { return {&placeholderConstSuccess}; };
 
-        Result<std::vector<float>, int> result = CreateSuccess();
-        TestSuccess(&result, {1.0f});
-    }
+    Result<const float*, int> result = CreateSuccess();
+    TestSuccess(&result, &placeholderConstSuccess);
+}
+
+// Result<Ref<T>, E>
+
+// Test constructing an error Result<Ref<T>, E>
+TEST(ResultRefT, ConstructingError) {
+    Result<Ref<AClass>, int> result(std::make_unique<int>(placeholderError));
+    TestError(&result, placeholderError);
+}
+
+// Test moving an error Result<Ref<T>, E>
+TEST(ResultRefT, MovingError) {
+    Result<Ref<AClass>, int> result(std::make_unique<int>(placeholderError));
+    Result<Ref<AClass>, int> movedResult(std::move(result));
+    TestError(&movedResult, placeholderError);
+}
+
+// Test returning an error Result<Ref<T>, E>
+TEST(ResultRefT, ReturningError) {
+    auto CreateError = []() -> Result<Ref<AClass>, int> {
+        return {std::make_unique<int>(placeholderError)};
+    };
+
+    Result<Ref<AClass>, int> result = CreateError();
+    TestError(&result, placeholderError);
+}
+
+// Test constructing a success Result<Ref<T>, E>
+TEST(ResultRefT, ConstructingSuccess) {
+    AClass success;
+
+    Ref<AClass> refObj(&success);
+    Result<Ref<AClass>, int> result(std::move(refObj));
+    TestSuccess(&result, &success);
+}
+
+// Test moving a success Result<Ref<T>, E>
+TEST(ResultRefT, MovingSuccess) {
+    AClass success;
+
+    Ref<AClass> refObj(&success);
+    Result<Ref<AClass>, int> result(std::move(refObj));
+    Result<Ref<AClass>, int> movedResult(std::move(result));
+    TestSuccess(&movedResult, &success);
+}
+
+// Test returning a success Result<Ref<T>, E>
+TEST(ResultRefT, ReturningSuccess) {
+    AClass success;
+    auto CreateSuccess = [&success]() -> Result<Ref<AClass>, int> { return Ref<AClass>(&success); };
+
+    Result<Ref<AClass>, int> result = CreateSuccess();
+    TestSuccess(&result, &success);
+}
+
+class OtherClass {
+  public:
+    int a = 0;
+};
+class Base : public RefCounted {};
+class Child : public OtherClass, public Base {};
+
+// Test constructing a Result<Ref<TChild>, E>
+TEST(ResultRefT, ConversionFromChildConstructor) {
+    Child child;
+    Ref<Child> refChild(&child);
+
+    Result<Ref<Base>, int> result(std::move(refChild));
+    TestSuccess<Base>(&result, &child);
+}
+
+// Test copy constructing Result<Ref<TChild>, E>
+TEST(ResultRefT, ConversionFromChildCopyConstructor) {
+    Child child;
+    Ref<Child> refChild(&child);
+
+    Result<Ref<Child>, int> resultChild(std::move(refChild));
+    Result<Ref<Base>, int> result(std::move(resultChild));
+    TestSuccess<Base>(&result, &child);
+}
+
+// Test assignment operator for Result<Ref<TChild>, E>
+TEST(ResultRefT, ConversionFromChildAssignmentOperator) {
+    Child child;
+    Ref<Child> refChild(&child);
+
+    Result<Ref<Child>, int> resultChild(std::move(refChild));
+    Result<Ref<Base>, int> result = std::move(resultChild);
+    TestSuccess<Base>(&result, &child);
+}
+
+// Result<T, E>
+
+// Test constructing an error Result<T, E>
+TEST(ResultGeneric, ConstructingError) {
+    Result<std::vector<float>, int> result(std::make_unique<int>(placeholderError));
+    TestError(&result, placeholderError);
+}
+
+// Test moving an error Result<T, E>
+TEST(ResultGeneric, MovingError) {
+    Result<std::vector<float>, int> result(std::make_unique<int>(placeholderError));
+    Result<std::vector<float>, int> movedResult(std::move(result));
+    TestError(&movedResult, placeholderError);
+}
+
+// Test returning an error Result<T, E>
+TEST(ResultGeneric, ReturningError) {
+    auto CreateError = []() -> Result<std::vector<float>, int> {
+        return {std::make_unique<int>(placeholderError)};
+    };
+
+    Result<std::vector<float>, int> result = CreateError();
+    TestError(&result, placeholderError);
+}
+
+// Test constructing a success Result<T, E>
+TEST(ResultGeneric, ConstructingSuccess) {
+    Result<std::vector<float>, int> result({1.0f});
+    TestSuccess(&result, {1.0f});
+}
+
+// Test moving a success Result<T, E>
+TEST(ResultGeneric, MovingSuccess) {
+    Result<std::vector<float>, int> result({1.0f});
+    Result<std::vector<float>, int> movedResult(std::move(result));
+    TestSuccess(&movedResult, {1.0f});
+}
+
+// Test returning a success Result<T, E>
+TEST(ResultGeneric, ReturningSuccess) {
+    auto CreateSuccess = []() -> Result<std::vector<float>, int> { return {{1.0f}}; };
+
+    Result<std::vector<float>, int> result = CreateSuccess();
+    TestSuccess(&result, {1.0f});
+}
 
 }  // anonymous namespace
diff --git a/src/dawn/tests/unittests/RingBufferAllocatorTests.cpp b/src/dawn/tests/unittests/RingBufferAllocatorTests.cpp
index b2a4f10..da6a738 100644
--- a/src/dawn/tests/unittests/RingBufferAllocatorTests.cpp
+++ b/src/dawn/tests/unittests/RingBufferAllocatorTests.cpp
@@ -19,161 +19,159 @@
 
 namespace dawn::native {
 
-    constexpr uint64_t RingBufferAllocator::kInvalidOffset;
+constexpr uint64_t RingBufferAllocator::kInvalidOffset;
 
-    // Number of basic tests for Ringbuffer
-    TEST(RingBufferAllocatorTests, BasicTest) {
-        constexpr uint64_t sizeInBytes = 64000;
-        RingBufferAllocator allocator(sizeInBytes);
+// Number of basic tests for Ringbuffer
+TEST(RingBufferAllocatorTests, BasicTest) {
+    constexpr uint64_t sizeInBytes = 64000;
+    RingBufferAllocator allocator(sizeInBytes);
 
-        // Ensure no requests exist on empty buffer.
-        EXPECT_TRUE(allocator.Empty());
+    // Ensure no requests exist on empty buffer.
+    EXPECT_TRUE(allocator.Empty());
 
-        ASSERT_EQ(allocator.GetSize(), sizeInBytes);
+    ASSERT_EQ(allocator.GetSize(), sizeInBytes);
 
-        // Ensure failure upon sub-allocating an oversized request.
-        ASSERT_EQ(allocator.Allocate(sizeInBytes + 1, ExecutionSerial(0)),
-                  RingBufferAllocator::kInvalidOffset);
+    // Ensure failure upon sub-allocating an oversized request.
+    ASSERT_EQ(allocator.Allocate(sizeInBytes + 1, ExecutionSerial(0)),
+              RingBufferAllocator::kInvalidOffset);
 
-        // Fill the entire buffer with two requests of equal size.
-        ASSERT_EQ(allocator.Allocate(sizeInBytes / 2, ExecutionSerial(1)), 0u);
-        ASSERT_EQ(allocator.Allocate(sizeInBytes / 2, ExecutionSerial(2)), 32000u);
+    // Fill the entire buffer with two requests of equal size.
+    ASSERT_EQ(allocator.Allocate(sizeInBytes / 2, ExecutionSerial(1)), 0u);
+    ASSERT_EQ(allocator.Allocate(sizeInBytes / 2, ExecutionSerial(2)), 32000u);
 
-        // Ensure the buffer is full.
-        ASSERT_EQ(allocator.Allocate(1, ExecutionSerial(3)), RingBufferAllocator::kInvalidOffset);
+    // Ensure the buffer is full.
+    ASSERT_EQ(allocator.Allocate(1, ExecutionSerial(3)), RingBufferAllocator::kInvalidOffset);
+}
+
+// Tests that several ringbuffer allocations do not fail.
+TEST(RingBufferAllocatorTests, RingBufferManyAlloc) {
+    constexpr uint64_t maxNumOfFrames = 64000;
+    constexpr uint64_t frameSizeInBytes = 4;
+
+    RingBufferAllocator allocator(maxNumOfFrames * frameSizeInBytes);
+
+    size_t offset = 0;
+    for (ExecutionSerial i(0); i < ExecutionSerial(maxNumOfFrames); ++i) {
+        offset = allocator.Allocate(frameSizeInBytes, i);
+        ASSERT_EQ(offset, uint64_t(i) * frameSizeInBytes);
     }
+}
 
-    // Tests that several ringbuffer allocations do not fail.
-    TEST(RingBufferAllocatorTests, RingBufferManyAlloc) {
-        constexpr uint64_t maxNumOfFrames = 64000;
-        constexpr uint64_t frameSizeInBytes = 4;
+// Tests ringbuffer sub-allocations of the same serial are correctly tracked.
+TEST(RingBufferAllocatorTests, AllocInSameFrame) {
+    constexpr uint64_t maxNumOfFrames = 3;
+    constexpr uint64_t frameSizeInBytes = 4;
 
-        RingBufferAllocator allocator(maxNumOfFrames * frameSizeInBytes);
+    RingBufferAllocator allocator(maxNumOfFrames * frameSizeInBytes);
 
-        size_t offset = 0;
-        for (ExecutionSerial i(0); i < ExecutionSerial(maxNumOfFrames); ++i) {
-            offset = allocator.Allocate(frameSizeInBytes, i);
-            ASSERT_EQ(offset, uint64_t(i) * frameSizeInBytes);
-        }
-    }
+    //    F1
+    //  [xxxx|--------]
+    size_t offset = allocator.Allocate(frameSizeInBytes, ExecutionSerial(1));
 
-    // Tests ringbuffer sub-allocations of the same serial are correctly tracked.
-    TEST(RingBufferAllocatorTests, AllocInSameFrame) {
-        constexpr uint64_t maxNumOfFrames = 3;
-        constexpr uint64_t frameSizeInBytes = 4;
+    //    F1   F2
+    //  [xxxx|xxxx|----]
 
-        RingBufferAllocator allocator(maxNumOfFrames * frameSizeInBytes);
+    offset = allocator.Allocate(frameSizeInBytes, ExecutionSerial(2));
 
-        //    F1
-        //  [xxxx|--------]
-        size_t offset = allocator.Allocate(frameSizeInBytes, ExecutionSerial(1));
+    //    F1     F2
+    //  [xxxx|xxxxxxxx]
 
-        //    F1   F2
-        //  [xxxx|xxxx|----]
+    offset = allocator.Allocate(frameSizeInBytes, ExecutionSerial(2));
 
-        offset = allocator.Allocate(frameSizeInBytes, ExecutionSerial(2));
+    ASSERT_EQ(offset, 8u);
+    ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 3);
 
-        //    F1     F2
-        //  [xxxx|xxxxxxxx]
+    allocator.Deallocate(ExecutionSerial(2));
 
-        offset = allocator.Allocate(frameSizeInBytes, ExecutionSerial(2));
+    ASSERT_EQ(allocator.GetUsedSize(), 0u);
+    EXPECT_TRUE(allocator.Empty());
+}
 
-        ASSERT_EQ(offset, 8u);
-        ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 3);
+// Tests ringbuffer sub-allocation at various offsets.
+TEST(RingBufferAllocatorTests, RingBufferSubAlloc) {
+    constexpr uint64_t maxNumOfFrames = 10;
+    constexpr uint64_t frameSizeInBytes = 4;
 
-        allocator.Deallocate(ExecutionSerial(2));
+    RingBufferAllocator allocator(maxNumOfFrames * frameSizeInBytes);
 
-        ASSERT_EQ(allocator.GetUsedSize(), 0u);
-        EXPECT_TRUE(allocator.Empty());
-    }
-
-    // Tests ringbuffer sub-allocation at various offsets.
-    TEST(RingBufferAllocatorTests, RingBufferSubAlloc) {
-        constexpr uint64_t maxNumOfFrames = 10;
-        constexpr uint64_t frameSizeInBytes = 4;
-
-        RingBufferAllocator allocator(maxNumOfFrames * frameSizeInBytes);
-
-        // Sub-alloc the first eight frames.
-        ExecutionSerial serial(0);
-        while (serial < ExecutionSerial(8)) {
-            allocator.Allocate(frameSizeInBytes, serial);
-            serial++;
-        }
-
-        // Each frame corrresponds to the serial number (for simplicity).
-        //
-        //    F1   F2   F3   F4   F5   F6   F7   F8
-        //  [xxxx|xxxx|xxxx|xxxx|xxxx|xxxx|xxxx|xxxx|--------]
-        //
-
-        // Ensure an oversized allocation fails (only 8 bytes left)
-        ASSERT_EQ(allocator.Allocate(frameSizeInBytes * 3, serial),
-                  RingBufferAllocator::kInvalidOffset);
-        ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 8);
-
-        // Reclaim the first 3 frames.
-        allocator.Deallocate(ExecutionSerial(2));
-
-        //                 F4   F5   F6   F7   F8
-        //  [------------|xxxx|xxxx|xxxx|xxxx|xxxx|--------]
-        //
-        ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 5);
-
-        // Re-try the over-sized allocation.
-        size_t offset = allocator.Allocate(frameSizeInBytes * 3, ExecutionSerial(serial));
-
-        //        F9       F4   F5   F6   F7   F8
-        //  [xxxxxxxxxxxx|xxxx|xxxx|xxxx|xxxx|xxxx|xxxxxxxx]
-        //                                         ^^^^^^^^ wasted
-
-        // In this example, Deallocate(8) could not reclaim the wasted bytes. The wasted bytes
-        // were added to F9's sub-allocation.
-        // TODO(bryan.bernhart@intel.com): Decide if Deallocate(8) should free these wasted bytes.
-
-        ASSERT_EQ(offset, 0u);
-        ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * maxNumOfFrames);
-
-        // Ensure we are full.
-        ASSERT_EQ(allocator.Allocate(frameSizeInBytes, serial),
-                  RingBufferAllocator::kInvalidOffset);
-
-        // Reclaim the next two frames.
-        allocator.Deallocate(ExecutionSerial(4));
-
-        //        F9       F4   F5   F6   F7   F8
-        //  [xxxxxxxxxxxx|----|----|xxxx|xxxx|xxxx|xxxxxxxx]
-        //
-        ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 8);
-
-        // Sub-alloc the chunk in the middle.
+    // Sub-alloc the first eight frames.
+    ExecutionSerial serial(0);
+    while (serial < ExecutionSerial(8)) {
+        allocator.Allocate(frameSizeInBytes, serial);
         serial++;
-        offset = allocator.Allocate(frameSizeInBytes * 2, serial);
-
-        ASSERT_EQ(offset, frameSizeInBytes * 3);
-        ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * maxNumOfFrames);
-
-        //        F9         F10      F6   F7   F8
-        //  [xxxxxxxxxxxx|xxxxxxxxx|xxxx|xxxx|xxxx|xxxxxxxx]
-        //
-
-        // Ensure we are full.
-        ASSERT_EQ(allocator.Allocate(frameSizeInBytes, serial),
-                  RingBufferAllocator::kInvalidOffset);
-
-        // Reclaim all.
-        allocator.Deallocate(kMaxExecutionSerial);
-
-        EXPECT_TRUE(allocator.Empty());
     }
 
-    // Checks if ringbuffer sub-allocation does not overflow.
-    TEST(RingBufferAllocatorTests, RingBufferOverflow) {
-        RingBufferAllocator allocator(std::numeric_limits<uint64_t>::max());
+    // Each frame corrresponds to the serial number (for simplicity).
+    //
+    //    F1   F2   F3   F4   F5   F6   F7   F8
+    //  [xxxx|xxxx|xxxx|xxxx|xxxx|xxxx|xxxx|xxxx|--------]
+    //
 
-        ASSERT_EQ(allocator.Allocate(1, ExecutionSerial(1)), 0u);
-        ASSERT_EQ(allocator.Allocate(std::numeric_limits<uint64_t>::max(), ExecutionSerial(1)),
-                  RingBufferAllocator::kInvalidOffset);
-    }
+    // Ensure an oversized allocation fails (only 8 bytes left)
+    ASSERT_EQ(allocator.Allocate(frameSizeInBytes * 3, serial),
+              RingBufferAllocator::kInvalidOffset);
+    ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 8);
+
+    // Reclaim the first 3 frames.
+    allocator.Deallocate(ExecutionSerial(2));
+
+    //                 F4   F5   F6   F7   F8
+    //  [------------|xxxx|xxxx|xxxx|xxxx|xxxx|--------]
+    //
+    ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 5);
+
+    // Re-try the over-sized allocation.
+    size_t offset = allocator.Allocate(frameSizeInBytes * 3, ExecutionSerial(serial));
+
+    //        F9       F4   F5   F6   F7   F8
+    //  [xxxxxxxxxxxx|xxxx|xxxx|xxxx|xxxx|xxxx|xxxxxxxx]
+    //                                         ^^^^^^^^ wasted
+
+    // In this example, Deallocate(8) could not reclaim the wasted bytes. The wasted bytes
+    // were added to F9's sub-allocation.
+    // TODO(bryan.bernhart@intel.com): Decide if Deallocate(8) should free these wasted bytes.
+
+    ASSERT_EQ(offset, 0u);
+    ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * maxNumOfFrames);
+
+    // Ensure we are full.
+    ASSERT_EQ(allocator.Allocate(frameSizeInBytes, serial), RingBufferAllocator::kInvalidOffset);
+
+    // Reclaim the next two frames.
+    allocator.Deallocate(ExecutionSerial(4));
+
+    //        F9       F4   F5   F6   F7   F8
+    //  [xxxxxxxxxxxx|----|----|xxxx|xxxx|xxxx|xxxxxxxx]
+    //
+    ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * 8);
+
+    // Sub-alloc the chunk in the middle.
+    serial++;
+    offset = allocator.Allocate(frameSizeInBytes * 2, serial);
+
+    ASSERT_EQ(offset, frameSizeInBytes * 3);
+    ASSERT_EQ(allocator.GetUsedSize(), frameSizeInBytes * maxNumOfFrames);
+
+    //        F9         F10      F6   F7   F8
+    //  [xxxxxxxxxxxx|xxxxxxxxx|xxxx|xxxx|xxxx|xxxxxxxx]
+    //
+
+    // Ensure we are full.
+    ASSERT_EQ(allocator.Allocate(frameSizeInBytes, serial), RingBufferAllocator::kInvalidOffset);
+
+    // Reclaim all.
+    allocator.Deallocate(kMaxExecutionSerial);
+
+    EXPECT_TRUE(allocator.Empty());
+}
+
+// Checks if ringbuffer sub-allocation does not overflow.
+TEST(RingBufferAllocatorTests, RingBufferOverflow) {
+    RingBufferAllocator allocator(std::numeric_limits<uint64_t>::max());
+
+    ASSERT_EQ(allocator.Allocate(1, ExecutionSerial(1)), 0u);
+    ASSERT_EQ(allocator.Allocate(std::numeric_limits<uint64_t>::max(), ExecutionSerial(1)),
+              RingBufferAllocator::kInvalidOffset);
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/tests/unittests/SlabAllocatorTests.cpp b/src/dawn/tests/unittests/SlabAllocatorTests.cpp
index fa16348..33f2b16 100644
--- a/src/dawn/tests/unittests/SlabAllocatorTests.cpp
+++ b/src/dawn/tests/unittests/SlabAllocatorTests.cpp
@@ -21,16 +21,15 @@
 
 namespace {
 
-    struct Foo : public PlacementAllocated {
-        explicit Foo(int value) : value(value) {
-        }
+struct Foo : public PlacementAllocated {
+    explicit Foo(int value) : value(value) {}
 
-        int value;
-    };
+    int value;
+};
 
-    struct alignas(256) AlignedFoo : public Foo {
-        using Foo::Foo;
-    };
+struct alignas(256) AlignedFoo : public Foo {
+    using Foo::Foo;
+};
 
 }  // namespace
 
diff --git a/src/dawn/tests/unittests/StackContainerTests.cpp b/src/dawn/tests/unittests/StackContainerTests.cpp
index 1143ca8..4325332 100644
--- a/src/dawn/tests/unittests/StackContainerTests.cpp
+++ b/src/dawn/tests/unittests/StackContainerTests.cpp
@@ -8,25 +8,21 @@
 #include <cstddef>
 #include <vector>
 
-#include "gtest/gtest.h"
 #include "dawn/common/RefCounted.h"
 #include "dawn/common/StackContainer.h"
+#include "gtest/gtest.h"
 
 namespace {
 
-    class Placeholder : public RefCounted {
-      public:
-        explicit Placeholder(int* alive) : mAlive(alive) {
-            ++*mAlive;
-        }
+class Placeholder : public RefCounted {
+  public:
+    explicit Placeholder(int* alive) : mAlive(alive) { ++*mAlive; }
 
-      private:
-        ~Placeholder() {
-            --*mAlive;
-        }
+  private:
+    ~Placeholder() { --*mAlive; }
 
-        int* const mAlive;
-    };
+    int* const mAlive;
+};
 
 }  // namespace
 
@@ -98,17 +94,15 @@
 
 namespace {
 
-    template <size_t alignment>
-    class AlignedData {
-      public:
-        AlignedData() {
-            memset(data_, 0, alignment);
-        }
-        ~AlignedData() = default;
-        AlignedData(const AlignedData&) = default;
-        AlignedData& operator=(const AlignedData&) = default;
-        alignas(alignment) char data_[alignment];
-    };
+template <size_t alignment>
+class AlignedData {
+  public:
+    AlignedData() { memset(data_, 0, alignment); }
+    ~AlignedData() = default;
+    AlignedData(const AlignedData&) = default;
+    AlignedData& operator=(const AlignedData&) = default;
+    alignas(alignment) char data_[alignment];
+};
 
 }  // anonymous namespace
 
diff --git a/src/dawn/tests/unittests/SubresourceStorageTests.cpp b/src/dawn/tests/unittests/SubresourceStorageTests.cpp
index a4e49c0..fb2759a 100644
--- a/src/dawn/tests/unittests/SubresourceStorageTests.cpp
+++ b/src/dawn/tests/unittests/SubresourceStorageTests.cpp
@@ -20,670 +20,659 @@
 
 namespace dawn::native {
 
-    // A fake class that replicates the behavior of SubresourceStorage but without any compression
-    // and is used to compare the results of operations on SubresourceStorage against the "ground
-    // truth" of FakeStorage.
-    template <typename T>
-    struct FakeStorage {
-        FakeStorage(Aspect aspects,
-                    uint32_t arrayLayerCount,
-                    uint32_t mipLevelCount,
-                    T initialValue = {})
-            : mAspects(aspects),
-              mArrayLayerCount(arrayLayerCount),
-              mMipLevelCount(mipLevelCount),
-              mData(GetAspectCount(aspects) * arrayLayerCount * mipLevelCount, initialValue) {
-        }
+// A fake class that replicates the behavior of SubresourceStorage but without any compression
+// and is used to compare the results of operations on SubresourceStorage against the "ground
+// truth" of FakeStorage.
+template <typename T>
+struct FakeStorage {
+    FakeStorage(Aspect aspects,
+                uint32_t arrayLayerCount,
+                uint32_t mipLevelCount,
+                T initialValue = {})
+        : mAspects(aspects),
+          mArrayLayerCount(arrayLayerCount),
+          mMipLevelCount(mipLevelCount),
+          mData(GetAspectCount(aspects) * arrayLayerCount * mipLevelCount, initialValue) {}
 
-        template <typename F>
-        void Update(const SubresourceRange& range, F&& updateFunc) {
-            for (Aspect aspect : IterateEnumMask(range.aspects)) {
-                for (uint32_t layer = range.baseArrayLayer;
-                     layer < range.baseArrayLayer + range.layerCount; layer++) {
-                    for (uint32_t level = range.baseMipLevel;
-                         level < range.baseMipLevel + range.levelCount; level++) {
-                        SubresourceRange range = SubresourceRange::MakeSingle(aspect, layer, level);
-                        updateFunc(range, &mData[GetDataIndex(aspect, layer, level)]);
-                    }
+    template <typename F>
+    void Update(const SubresourceRange& range, F&& updateFunc) {
+        for (Aspect aspect : IterateEnumMask(range.aspects)) {
+            for (uint32_t layer = range.baseArrayLayer;
+                 layer < range.baseArrayLayer + range.layerCount; layer++) {
+                for (uint32_t level = range.baseMipLevel;
+                     level < range.baseMipLevel + range.levelCount; level++) {
+                    SubresourceRange range = SubresourceRange::MakeSingle(aspect, layer, level);
+                    updateFunc(range, &mData[GetDataIndex(aspect, layer, level)]);
+                }
+            }
+        }
+    }
+
+    template <typename U, typename F>
+    void Merge(const SubresourceStorage<U>& other, F&& mergeFunc) {
+        for (Aspect aspect : IterateEnumMask(mAspects)) {
+            for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
+                for (uint32_t level = 0; level < mMipLevelCount; level++) {
+                    SubresourceRange range = SubresourceRange::MakeSingle(aspect, layer, level);
+                    mergeFunc(range, &mData[GetDataIndex(aspect, layer, level)],
+                              other.Get(aspect, layer, level));
+                }
+            }
+        }
+    }
+
+    const T& Get(Aspect aspect, uint32_t arrayLayer, uint32_t mipLevel) const {
+        return mData[GetDataIndex(aspect, arrayLayer, mipLevel)];
+    }
+
+    size_t GetDataIndex(Aspect aspect, uint32_t layer, uint32_t level) const {
+        uint32_t aspectIndex = GetAspectIndex(aspect);
+        return level + mMipLevelCount * (layer + mArrayLayerCount * aspectIndex);
+    }
+
+    // Method that checks that this and real have exactly the same content. It does so via
+    // looping on all subresources and calling Get() (hence testing Get()). It also calls
+    // Iterate() checking that every subresource is mentioned exactly once and that its content
+    // is correct (hence testing Iterate()). Its implementation requires the RangeTracker below
+    // that itself needs FakeStorage<int> so it cannot be define inline with the other methods.
+    void CheckSameAs(const SubresourceStorage<T>& real);
+
+    Aspect mAspects;
+    uint32_t mArrayLayerCount;
+    uint32_t mMipLevelCount;
+
+    std::vector<T> mData;
+};
+
+// Track a set of ranges that have been seen and can assert that in aggregate they make exactly
+// a single range (and that each subresource was seen only once).
+struct RangeTracker {
+    template <typename T>
+    explicit RangeTracker(const SubresourceStorage<T>& s)
+        : mTracked(s.GetAspectsForTesting(),
+                   s.GetArrayLayerCountForTesting(),
+                   s.GetMipLevelCountForTesting(),
+                   0) {}
+
+    void Track(const SubresourceRange& range) {
+        // Add +1 to the subresources tracked.
+        mTracked.Update(range, [](const SubresourceRange&, uint32_t* counter) {
+            ASSERT_EQ(*counter, 0u);
+            *counter += 1;
+        });
+    }
+
+    void CheckTrackedExactly(const SubresourceRange& range) {
+        // Check that all subresources in the range were tracked once and set the counter back
+        // to 0.
+        mTracked.Update(range, [](const SubresourceRange&, uint32_t* counter) {
+            ASSERT_EQ(*counter, 1u);
+            *counter = 0;
+        });
+
+        // Now all subresources should be at 0.
+        for (int counter : mTracked.mData) {
+            ASSERT_EQ(counter, 0);
+        }
+    }
+
+    FakeStorage<uint32_t> mTracked;
+};
+
+template <typename T>
+void FakeStorage<T>::CheckSameAs(const SubresourceStorage<T>& real) {
+    EXPECT_EQ(real.GetAspectsForTesting(), mAspects);
+    EXPECT_EQ(real.GetArrayLayerCountForTesting(), mArrayLayerCount);
+    EXPECT_EQ(real.GetMipLevelCountForTesting(), mMipLevelCount);
+
+    RangeTracker tracker(real);
+    real.Iterate([&](const SubresourceRange& range, const T& data) {
+        // Check that the range is sensical.
+        EXPECT_TRUE(IsSubset(range.aspects, mAspects));
+
+        EXPECT_LT(range.baseArrayLayer, mArrayLayerCount);
+        EXPECT_LE(range.baseArrayLayer + range.layerCount, mArrayLayerCount);
+
+        EXPECT_LT(range.baseMipLevel, mMipLevelCount);
+        EXPECT_LE(range.baseMipLevel + range.levelCount, mMipLevelCount);
+
+        for (Aspect aspect : IterateEnumMask(range.aspects)) {
+            for (uint32_t layer = range.baseArrayLayer;
+                 layer < range.baseArrayLayer + range.layerCount; layer++) {
+                for (uint32_t level = range.baseMipLevel;
+                     level < range.baseMipLevel + range.levelCount; level++) {
+                    EXPECT_EQ(data, Get(aspect, layer, level));
+                    EXPECT_EQ(data, real.Get(aspect, layer, level));
                 }
             }
         }
 
-        template <typename U, typename F>
-        void Merge(const SubresourceStorage<U>& other, F&& mergeFunc) {
-            for (Aspect aspect : IterateEnumMask(mAspects)) {
-                for (uint32_t layer = 0; layer < mArrayLayerCount; layer++) {
-                    for (uint32_t level = 0; level < mMipLevelCount; level++) {
-                        SubresourceRange range = SubresourceRange::MakeSingle(aspect, layer, level);
-                        mergeFunc(range, &mData[GetDataIndex(aspect, layer, level)],
-                                  other.Get(aspect, layer, level));
-                    }
-                }
-            }
+        tracker.Track(range);
+    });
+
+    tracker.CheckTrackedExactly(
+        SubresourceRange::MakeFull(mAspects, mArrayLayerCount, mMipLevelCount));
+}
+
+template <typename T>
+void CheckAspectCompressed(const SubresourceStorage<T>& s, Aspect aspect, bool expected) {
+    ASSERT(HasOneBit(aspect));
+
+    uint32_t levelCount = s.GetMipLevelCountForTesting();
+    uint32_t layerCount = s.GetArrayLayerCountForTesting();
+
+    bool seen = false;
+    s.Iterate([&](const SubresourceRange& range, const T&) {
+        if (range.aspects == aspect && range.layerCount == layerCount &&
+            range.levelCount == levelCount && range.baseArrayLayer == 0 &&
+            range.baseMipLevel == 0) {
+            seen = true;
         }
+    });
 
-        const T& Get(Aspect aspect, uint32_t arrayLayer, uint32_t mipLevel) const {
-            return mData[GetDataIndex(aspect, arrayLayer, mipLevel)];
+    ASSERT_EQ(seen, expected);
+
+    // Check that the internal state of SubresourceStorage matches what we expect.
+    // If an aspect is compressed, all its layers should be internally tagged as compressed.
+    ASSERT_EQ(s.IsAspectCompressedForTesting(aspect), expected);
+    if (expected) {
+        for (uint32_t layer = 0; layer < s.GetArrayLayerCountForTesting(); layer++) {
+            ASSERT_TRUE(s.IsLayerCompressedForTesting(aspect, layer));
         }
+    }
+}
 
-        size_t GetDataIndex(Aspect aspect, uint32_t layer, uint32_t level) const {
-            uint32_t aspectIndex = GetAspectIndex(aspect);
-            return level + mMipLevelCount * (layer + mArrayLayerCount * aspectIndex);
+template <typename T>
+void CheckLayerCompressed(const SubresourceStorage<T>& s,
+                          Aspect aspect,
+                          uint32_t layer,
+                          bool expected) {
+    ASSERT(HasOneBit(aspect));
+
+    uint32_t levelCount = s.GetMipLevelCountForTesting();
+
+    bool seen = false;
+    s.Iterate([&](const SubresourceRange& range, const T&) {
+        if (range.aspects == aspect && range.layerCount == 1 && range.levelCount == levelCount &&
+            range.baseArrayLayer == layer && range.baseMipLevel == 0) {
+            seen = true;
         }
+    });
 
-        // Method that checks that this and real have exactly the same content. It does so via
-        // looping on all subresources and calling Get() (hence testing Get()). It also calls
-        // Iterate() checking that every subresource is mentioned exactly once and that its content
-        // is correct (hence testing Iterate()). Its implementation requires the RangeTracker below
-        // that itself needs FakeStorage<int> so it cannot be define inline with the other methods.
-        void CheckSameAs(const SubresourceStorage<T>& real);
+    ASSERT_EQ(seen, expected);
+    ASSERT_EQ(s.IsLayerCompressedForTesting(aspect, layer), expected);
+}
 
-        Aspect mAspects;
-        uint32_t mArrayLayerCount;
-        uint32_t mMipLevelCount;
+struct SmallData {
+    uint32_t value = 0xF00;
+};
 
-        std::vector<T> mData;
-    };
+bool operator==(const SmallData& a, const SmallData& b) {
+    return a.value == b.value;
+}
 
-    // Track a set of ranges that have been seen and can assert that in aggregate they make exactly
-    // a single range (and that each subresource was seen only once).
-    struct RangeTracker {
-        template <typename T>
-        explicit RangeTracker(const SubresourceStorage<T>& s)
-            : mTracked(s.GetAspectsForTesting(),
-                       s.GetArrayLayerCountForTesting(),
-                       s.GetMipLevelCountForTesting(),
-                       0) {
-        }
+// Test that the default value is correctly set.
+TEST(SubresourceStorageTest, DefaultValue) {
+    // Test setting no default value for a primitive type.
+    {
+        SubresourceStorage<int> s(Aspect::Color, 3, 5);
+        EXPECT_EQ(s.Get(Aspect::Color, 1, 2), 0);
 
-        void Track(const SubresourceRange& range) {
-            // Add +1 to the subresources tracked.
-            mTracked.Update(range, [](const SubresourceRange&, uint32_t* counter) {
-                ASSERT_EQ(*counter, 0u);
-                *counter += 1;
-            });
-        }
-
-        void CheckTrackedExactly(const SubresourceRange& range) {
-            // Check that all subresources in the range were tracked once and set the counter back
-            // to 0.
-            mTracked.Update(range, [](const SubresourceRange&, uint32_t* counter) {
-                ASSERT_EQ(*counter, 1u);
-                *counter = 0;
-            });
-
-            // Now all subresources should be at 0.
-            for (int counter : mTracked.mData) {
-                ASSERT_EQ(counter, 0);
-            }
-        }
-
-        FakeStorage<uint32_t> mTracked;
-    };
-
-    template <typename T>
-    void FakeStorage<T>::CheckSameAs(const SubresourceStorage<T>& real) {
-        EXPECT_EQ(real.GetAspectsForTesting(), mAspects);
-        EXPECT_EQ(real.GetArrayLayerCountForTesting(), mArrayLayerCount);
-        EXPECT_EQ(real.GetMipLevelCountForTesting(), mMipLevelCount);
-
-        RangeTracker tracker(real);
-        real.Iterate([&](const SubresourceRange& range, const T& data) {
-            // Check that the range is sensical.
-            EXPECT_TRUE(IsSubset(range.aspects, mAspects));
-
-            EXPECT_LT(range.baseArrayLayer, mArrayLayerCount);
-            EXPECT_LE(range.baseArrayLayer + range.layerCount, mArrayLayerCount);
-
-            EXPECT_LT(range.baseMipLevel, mMipLevelCount);
-            EXPECT_LE(range.baseMipLevel + range.levelCount, mMipLevelCount);
-
-            for (Aspect aspect : IterateEnumMask(range.aspects)) {
-                for (uint32_t layer = range.baseArrayLayer;
-                     layer < range.baseArrayLayer + range.layerCount; layer++) {
-                    for (uint32_t level = range.baseMipLevel;
-                         level < range.baseMipLevel + range.levelCount; level++) {
-                        EXPECT_EQ(data, Get(aspect, layer, level));
-                        EXPECT_EQ(data, real.Get(aspect, layer, level));
-                    }
-                }
-            }
-
-            tracker.Track(range);
-        });
-
-        tracker.CheckTrackedExactly(
-            SubresourceRange::MakeFull(mAspects, mArrayLayerCount, mMipLevelCount));
+        FakeStorage<int> f(Aspect::Color, 3, 5);
+        f.CheckSameAs(s);
     }
 
-    template <typename T>
-    void CheckAspectCompressed(const SubresourceStorage<T>& s, Aspect aspect, bool expected) {
-        ASSERT(HasOneBit(aspect));
+    // Test setting a default value for a primitive type.
+    {
+        SubresourceStorage<int> s(Aspect::Color, 3, 5, 42);
+        EXPECT_EQ(s.Get(Aspect::Color, 1, 2), 42);
 
-        uint32_t levelCount = s.GetMipLevelCountForTesting();
-        uint32_t layerCount = s.GetArrayLayerCountForTesting();
+        FakeStorage<int> f(Aspect::Color, 3, 5, 42);
+        f.CheckSameAs(s);
+    }
 
-        bool seen = false;
-        s.Iterate([&](const SubresourceRange& range, const T&) {
-            if (range.aspects == aspect && range.layerCount == layerCount &&
-                range.levelCount == levelCount && range.baseArrayLayer == 0 &&
-                range.baseMipLevel == 0) {
-                seen = true;
-            }
-        });
+    // Test setting no default value for a type with a default constructor.
+    {
+        SubresourceStorage<SmallData> s(Aspect::Color, 3, 5);
+        EXPECT_EQ(s.Get(Aspect::Color, 1, 2).value, 0xF00u);
 
-        ASSERT_EQ(seen, expected);
+        FakeStorage<SmallData> f(Aspect::Color, 3, 5);
+        f.CheckSameAs(s);
+    }
+    // Test setting a default value for a type with a default constructor.
+    {
+        SubresourceStorage<SmallData> s(Aspect::Color, 3, 5, {007u});
+        EXPECT_EQ(s.Get(Aspect::Color, 1, 2).value, 007u);
 
-        // Check that the internal state of SubresourceStorage matches what we expect.
-        // If an aspect is compressed, all its layers should be internally tagged as compressed.
-        ASSERT_EQ(s.IsAspectCompressedForTesting(aspect), expected);
-        if (expected) {
-            for (uint32_t layer = 0; layer < s.GetArrayLayerCountForTesting(); layer++) {
-                ASSERT_TRUE(s.IsLayerCompressedForTesting(aspect, layer));
+        FakeStorage<SmallData> f(Aspect::Color, 3, 5, {007u});
+        f.CheckSameAs(s);
+    }
+}
+
+// The tests for Update() all follow the same pattern of setting up a real and a fake storage
+// then performing one or multiple Update()s on them and checking:
+//  - They have the same content.
+//  - The Update() range was correct.
+//  - The aspects and layers have the expected "compressed" status.
+
+// Calls Update both on the read storage and the fake storage but intercepts the call to
+// updateFunc done by the real storage to check their ranges argument aggregate to exactly the
+// update range.
+template <typename T, typename F>
+void CallUpdateOnBoth(SubresourceStorage<T>* s,
+                      FakeStorage<T>* f,
+                      const SubresourceRange& range,
+                      F&& updateFunc) {
+    RangeTracker tracker(*s);
+
+    s->Update(range, [&](const SubresourceRange& range, T* data) {
+        tracker.Track(range);
+        updateFunc(range, data);
+    });
+    f->Update(range, updateFunc);
+
+    tracker.CheckTrackedExactly(range);
+    f->CheckSameAs(*s);
+}
+
+// Test updating a single subresource on a single-aspect storage.
+TEST(SubresourceStorageTest, SingleSubresourceUpdateSingleAspect) {
+    SubresourceStorage<int> s(Aspect::Color, 5, 7);
+    FakeStorage<int> f(Aspect::Color, 5, 7);
+
+    // Update a single subresource.
+    SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 3, 2);
+    CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 1; });
+
+    CheckAspectCompressed(s, Aspect::Color, false);
+    CheckLayerCompressed(s, Aspect::Color, 2, true);
+    CheckLayerCompressed(s, Aspect::Color, 3, false);
+    CheckLayerCompressed(s, Aspect::Color, 4, true);
+}
+
+// Test updating a single subresource on a multi-aspect storage.
+TEST(SubresourceStorageTest, SingleSubresourceUpdateMultiAspect) {
+    SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, 5, 3);
+    FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, 5, 3);
+
+    SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Stencil, 1, 2);
+    CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 1; });
+
+    CheckAspectCompressed(s, Aspect::Depth, true);
+    CheckAspectCompressed(s, Aspect::Stencil, false);
+    CheckLayerCompressed(s, Aspect::Stencil, 0, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 1, false);
+    CheckLayerCompressed(s, Aspect::Stencil, 2, true);
+}
+
+// Test updating as a stipple pattern on one of two aspects then updating it completely.
+TEST(SubresourceStorageTest, UpdateStipple) {
+    const uint32_t kLayers = 10;
+    const uint32_t kLevels = 7;
+    SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
+    FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
+
+    // Update with a stipple.
+    for (uint32_t layer = 0; layer < kLayers; layer++) {
+        for (uint32_t level = 0; level < kLevels; level++) {
+            if ((layer + level) % 2 == 0) {
+                SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Depth, layer, level);
+                CallUpdateOnBoth(&s, &f, range,
+                                 [](const SubresourceRange&, int* data) { *data += 17; });
             }
         }
     }
 
-    template <typename T>
-    void CheckLayerCompressed(const SubresourceStorage<T>& s,
-                              Aspect aspect,
-                              uint32_t layer,
-                              bool expected) {
-        ASSERT(HasOneBit(aspect));
-
-        uint32_t levelCount = s.GetMipLevelCountForTesting();
-
-        bool seen = false;
-        s.Iterate([&](const SubresourceRange& range, const T&) {
-            if (range.aspects == aspect && range.layerCount == 1 &&
-                range.levelCount == levelCount && range.baseArrayLayer == layer &&
-                range.baseMipLevel == 0) {
-                seen = true;
-            }
-        });
-
-        ASSERT_EQ(seen, expected);
-        ASSERT_EQ(s.IsLayerCompressedForTesting(aspect, layer), expected);
+    // The depth should be fully uncompressed while the stencil stayed compressed.
+    CheckAspectCompressed(s, Aspect::Stencil, true);
+    CheckAspectCompressed(s, Aspect::Depth, false);
+    for (uint32_t layer = 0; layer < kLayers; layer++) {
+        CheckLayerCompressed(s, Aspect::Depth, layer, false);
     }
 
-    struct SmallData {
-        uint32_t value = 0xF00;
-    };
-
-    bool operator==(const SmallData& a, const SmallData& b) {
-        return a.value == b.value;
+    // Update completely with a single value. Recompression should happen!
+    {
+        SubresourceRange fullRange =
+            SubresourceRange::MakeFull(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
+        CallUpdateOnBoth(&s, &f, fullRange, [](const SubresourceRange&, int* data) { *data = 31; });
     }
 
-    // Test that the default value is correctly set.
-    TEST(SubresourceStorageTest, DefaultValue) {
-        // Test setting no default value for a primitive type.
-        {
-            SubresourceStorage<int> s(Aspect::Color, 3, 5);
-            EXPECT_EQ(s.Get(Aspect::Color, 1, 2), 0);
+    CheckAspectCompressed(s, Aspect::Depth, true);
+    CheckAspectCompressed(s, Aspect::Stencil, true);
+}
 
-            FakeStorage<int> f(Aspect::Color, 3, 5);
-            f.CheckSameAs(s);
-        }
+// Test updating as a crossing band pattern:
+//  - The first band is full layers [2, 3] on both aspects
+//  - The second band is full mips [5, 6] on one aspect.
+// Then updating completely.
+TEST(SubresourceStorageTest, UpdateTwoBand) {
+    const uint32_t kLayers = 5;
+    const uint32_t kLevels = 9;
+    SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
+    FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
 
-        // Test setting a default value for a primitive type.
-        {
-            SubresourceStorage<int> s(Aspect::Color, 3, 5, 42);
-            EXPECT_EQ(s.Get(Aspect::Color, 1, 2), 42);
-
-            FakeStorage<int> f(Aspect::Color, 3, 5, 42);
-            f.CheckSameAs(s);
-        }
-
-        // Test setting no default value for a type with a default constructor.
-        {
-            SubresourceStorage<SmallData> s(Aspect::Color, 3, 5);
-            EXPECT_EQ(s.Get(Aspect::Color, 1, 2).value, 0xF00u);
-
-            FakeStorage<SmallData> f(Aspect::Color, 3, 5);
-            f.CheckSameAs(s);
-        }
-        // Test setting a default value for a type with a default constructor.
-        {
-            SubresourceStorage<SmallData> s(Aspect::Color, 3, 5, {007u});
-            EXPECT_EQ(s.Get(Aspect::Color, 1, 2).value, 007u);
-
-            FakeStorage<SmallData> f(Aspect::Color, 3, 5, {007u});
-            f.CheckSameAs(s);
-        }
+    // Update the two bands
+    {
+        SubresourceRange range(Aspect::Depth | Aspect::Stencil, {2, 2}, {0, kLevels});
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 3; });
     }
 
-    // The tests for Update() all follow the same pattern of setting up a real and a fake storage
-    // then performing one or multiple Update()s on them and checking:
-    //  - They have the same content.
-    //  - The Update() range was correct.
-    //  - The aspects and layers have the expected "compressed" status.
+    // The layers were fully updated so they should stay compressed.
+    CheckLayerCompressed(s, Aspect::Depth, 2, true);
+    CheckLayerCompressed(s, Aspect::Depth, 3, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 2, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 3, true);
 
-    // Calls Update both on the read storage and the fake storage but intercepts the call to
-    // updateFunc done by the real storage to check their ranges argument aggregate to exactly the
-    // update range.
-    template <typename T, typename F>
-    void CallUpdateOnBoth(SubresourceStorage<T>* s,
-                          FakeStorage<T>* f,
-                          const SubresourceRange& range,
-                          F&& updateFunc) {
-        RangeTracker tracker(*s);
-
-        s->Update(range, [&](const SubresourceRange& range, T* data) {
-            tracker.Track(range);
-            updateFunc(range, data);
-        });
-        f->Update(range, updateFunc);
-
-        tracker.CheckTrackedExactly(range);
-        f->CheckSameAs(*s);
+    {
+        SubresourceRange range(Aspect::Depth, {0, kLayers}, {5, 2});
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data *= 3; });
     }
 
-    // Test updating a single subresource on a single-aspect storage.
-    TEST(SubresourceStorageTest, SingleSubresourceUpdateSingleAspect) {
-        SubresourceStorage<int> s(Aspect::Color, 5, 7);
-        FakeStorage<int> f(Aspect::Color, 5, 7);
+    // The layers had to be decompressed in depth
+    CheckLayerCompressed(s, Aspect::Depth, 2, false);
+    CheckLayerCompressed(s, Aspect::Depth, 3, false);
+    CheckLayerCompressed(s, Aspect::Stencil, 2, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 3, true);
 
-        // Update a single subresource.
-        SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 3, 2);
+    // Update completely. Without a single value recompression shouldn't happen.
+    {
+        SubresourceRange fullRange =
+            SubresourceRange::MakeFull(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
+        CallUpdateOnBoth(&s, &f, fullRange,
+                         [](const SubresourceRange&, int* data) { *data += 12; });
+    }
+
+    CheckAspectCompressed(s, Aspect::Depth, false);
+    CheckAspectCompressed(s, Aspect::Stencil, false);
+}
+
+// Test updating with extremal subresources
+//    - Then half of the array layers in full.
+//    - Then updating completely.
+TEST(SubresourceStorageTest, UpdateExtremas) {
+    const uint32_t kLayers = 6;
+    const uint32_t kLevels = 4;
+    SubresourceStorage<int> s(Aspect::Color, kLayers, kLevels);
+    FakeStorage<int> f(Aspect::Color, kLayers, kLevels);
+
+    // Update the two extrema
+    {
+        SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 0, kLevels - 1);
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 3; });
+    }
+    {
+        SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, kLayers - 1, 0);
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data *= 3; });
+    }
+
+    CheckLayerCompressed(s, Aspect::Color, 0, false);
+    CheckLayerCompressed(s, Aspect::Color, 1, true);
+    CheckLayerCompressed(s, Aspect::Color, kLayers - 2, true);
+    CheckLayerCompressed(s, Aspect::Color, kLayers - 1, false);
+
+    // Update half of the layers in full with constant values. Some recompression should happen.
+    {
+        SubresourceRange range(Aspect::Color, {0, kLayers / 2}, {0, kLevels});
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data = 123; });
+    }
+
+    CheckLayerCompressed(s, Aspect::Color, 0, true);
+    CheckLayerCompressed(s, Aspect::Color, 1, true);
+    CheckLayerCompressed(s, Aspect::Color, kLayers - 1, false);
+
+    // Update completely. Recompression should happen!
+    {
+        SubresourceRange fullRange = SubresourceRange::MakeFull(Aspect::Color, kLayers, kLevels);
+        CallUpdateOnBoth(&s, &f, fullRange, [](const SubresourceRange&, int* data) { *data = 35; });
+    }
+
+    CheckAspectCompressed(s, Aspect::Color, true);
+}
+
+// A regression test for an issue found while reworking the implementation where
+// RecompressAspect didn't correctly check that each each layer was compressed but only that
+// their 0th value was the same.
+TEST(SubresourceStorageTest, UpdateLevel0sHappenToMatch) {
+    SubresourceStorage<int> s(Aspect::Color, 2, 2);
+    FakeStorage<int> f(Aspect::Color, 2, 2);
+
+    // Update 0th mip levels to some value, it should decompress the aspect and both layers.
+    {
+        SubresourceRange range(Aspect::Color, {0, 2}, {0, 1});
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data = 17; });
+    }
+
+    CheckAspectCompressed(s, Aspect::Color, false);
+    CheckLayerCompressed(s, Aspect::Color, 0, false);
+    CheckLayerCompressed(s, Aspect::Color, 1, false);
+
+    // Update the whole resource by doing +1. The aspects and layers should stay decompressed.
+    {
+        SubresourceRange range = SubresourceRange::MakeFull(Aspect::Color, 2, 2);
         CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 1; });
-
-        CheckAspectCompressed(s, Aspect::Color, false);
-        CheckLayerCompressed(s, Aspect::Color, 2, true);
-        CheckLayerCompressed(s, Aspect::Color, 3, false);
-        CheckLayerCompressed(s, Aspect::Color, 4, true);
     }
 
-    // Test updating a single subresource on a multi-aspect storage.
-    TEST(SubresourceStorageTest, SingleSubresourceUpdateMultiAspect) {
-        SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, 5, 3);
-        FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, 5, 3);
+    CheckAspectCompressed(s, Aspect::Color, false);
+    CheckLayerCompressed(s, Aspect::Color, 0, false);
+    CheckLayerCompressed(s, Aspect::Color, 1, false);
+}
 
-        SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Stencil, 1, 2);
-        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 1; });
+// The tests for Merge() all follow the same as the Update() tests except that they use Update()
+// to set up the test storages.
 
-        CheckAspectCompressed(s, Aspect::Depth, true);
-        CheckAspectCompressed(s, Aspect::Stencil, false);
-        CheckLayerCompressed(s, Aspect::Stencil, 0, true);
-        CheckLayerCompressed(s, Aspect::Stencil, 1, false);
-        CheckLayerCompressed(s, Aspect::Stencil, 2, true);
+// Similar to CallUpdateOnBoth but for Merge
+template <typename T, typename U, typename F>
+void CallMergeOnBoth(SubresourceStorage<T>* s,
+                     FakeStorage<T>* f,
+                     const SubresourceStorage<U>& other,
+                     F&& mergeFunc) {
+    RangeTracker tracker(*s);
+
+    s->Merge(other, [&](const SubresourceRange& range, T* data, const U& otherData) {
+        tracker.Track(range);
+        mergeFunc(range, data, otherData);
+    });
+    f->Merge(other, mergeFunc);
+
+    tracker.CheckTrackedExactly(
+        SubresourceRange::MakeFull(f->mAspects, f->mArrayLayerCount, f->mMipLevelCount));
+    f->CheckSameAs(*s);
+}
+
+// Test merging two fully compressed single-aspect resources.
+TEST(SubresourceStorageTest, MergeFullWithFullSingleAspect) {
+    SubresourceStorage<int> s(Aspect::Color, 4, 6);
+    FakeStorage<int> f(Aspect::Color, 4, 6);
+
+    // Merge the whole resource in a single call.
+    SubresourceStorage<bool> other(Aspect::Color, 4, 6, true);
+    CallMergeOnBoth(&s, &f, other, [](const SubresourceRange&, int* data, bool other) {
+        if (other) {
+            *data = 13;
+        }
+    });
+
+    CheckAspectCompressed(s, Aspect::Color, true);
+}
+
+// Test merging two fully compressed multi-aspect resources.
+TEST(SubresourceStorageTest, MergeFullWithFullMultiAspect) {
+    SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, 6, 7);
+    FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, 6, 7);
+
+    // Merge the whole resource in a single call.
+    SubresourceStorage<bool> other(Aspect::Depth | Aspect::Stencil, 6, 7, true);
+    CallMergeOnBoth(&s, &f, other, [](const SubresourceRange&, int* data, bool other) {
+        if (other) {
+            *data = 13;
+        }
+    });
+
+    CheckAspectCompressed(s, Aspect::Depth, true);
+    CheckAspectCompressed(s, Aspect::Stencil, true);
+}
+
+// Test merging a fully compressed resource in a resource with the "cross band" pattern.
+//  - The first band is full layers [2, 3] on both aspects
+//  - The second band is full mips [5, 6] on one aspect.
+// This provides coverage of using a single piece of data from `other` to update all of `s`
+TEST(SubresourceStorageTest, MergeFullInTwoBand) {
+    const uint32_t kLayers = 5;
+    const uint32_t kLevels = 9;
+    SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
+    FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
+
+    // Update the two bands
+    {
+        SubresourceRange range(Aspect::Depth | Aspect::Stencil, {2, 2}, {0, kLevels});
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 3; });
+    }
+    {
+        SubresourceRange range(Aspect::Depth, {0, kLayers}, {5, 2});
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 5; });
     }
 
-    // Test updating as a stipple pattern on one of two aspects then updating it completely.
-    TEST(SubresourceStorageTest, UpdateStipple) {
-        const uint32_t kLayers = 10;
-        const uint32_t kLevels = 7;
-        SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
-        FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
+    // Merge the fully compressed resource.
+    SubresourceStorage<int> other(Aspect::Depth | Aspect::Stencil, kLayers, kLevels, 17);
+    CallMergeOnBoth(&s, &f, other,
+                    [](const SubresourceRange&, int* data, int other) { *data += other; });
 
-        // Update with a stipple.
-        for (uint32_t layer = 0; layer < kLayers; layer++) {
-            for (uint32_t level = 0; level < kLevels; level++) {
-                if ((layer + level) % 2 == 0) {
-                    SubresourceRange range =
-                        SubresourceRange::MakeSingle(Aspect::Depth, layer, level);
-                    CallUpdateOnBoth(&s, &f, range,
-                                     [](const SubresourceRange&, int* data) { *data += 17; });
-                }
+    // The layers traversed by the mip band are still uncompressed.
+    CheckLayerCompressed(s, Aspect::Depth, 1, false);
+    CheckLayerCompressed(s, Aspect::Depth, 2, false);
+    CheckLayerCompressed(s, Aspect::Depth, 3, false);
+    CheckLayerCompressed(s, Aspect::Depth, 4, false);
+
+    // Stencil is decompressed but all its layers are still compressed because there wasn't the
+    // mip band.
+    CheckAspectCompressed(s, Aspect::Stencil, false);
+    CheckLayerCompressed(s, Aspect::Stencil, 1, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 2, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 3, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 4, true);
+}
+// Test the reverse, mergign two-bands in a full resource. This provides coverage for
+// decompressing aspects / and partilly layers to match the compression of `other`
+TEST(SubresourceStorageTest, MergeTwoBandInFull) {
+    const uint32_t kLayers = 5;
+    const uint32_t kLevels = 9;
+    SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, kLayers, kLevels, 75);
+    FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, kLayers, kLevels, 75);
+
+    // Update the two bands
+    SubresourceStorage<int> other(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
+    {
+        SubresourceRange range(Aspect::Depth | Aspect::Stencil, {2, 2}, {0, kLevels});
+        other.Update(range, [](const SubresourceRange&, int* data) { *data += 3; });
+    }
+    {
+        SubresourceRange range(Aspect::Depth, {0, kLayers}, {5, 2});
+        other.Update(range, [](const SubresourceRange&, int* data) { *data += 5; });
+    }
+
+    // Merge the fully compressed resource.
+    CallMergeOnBoth(&s, &f, other,
+                    [](const SubresourceRange&, int* data, int other) { *data += other; });
+
+    // The layers traversed by the mip band are still uncompressed.
+    CheckLayerCompressed(s, Aspect::Depth, 1, false);
+    CheckLayerCompressed(s, Aspect::Depth, 2, false);
+    CheckLayerCompressed(s, Aspect::Depth, 3, false);
+    CheckLayerCompressed(s, Aspect::Depth, 4, false);
+
+    // Stencil is decompressed but all its layers are still compressed because there wasn't the
+    // mip band.
+    CheckAspectCompressed(s, Aspect::Stencil, false);
+    CheckLayerCompressed(s, Aspect::Stencil, 1, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 2, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 3, true);
+    CheckLayerCompressed(s, Aspect::Stencil, 4, true);
+}
+
+// Test merging storage with a layer band in a stipple patterned storage. This provide coverage
+// for the code path that uses the same layer data for other multiple times.
+TEST(SubresourceStorageTest, MergeLayerBandInStipple) {
+    const uint32_t kLayers = 3;
+    const uint32_t kLevels = 5;
+
+    SubresourceStorage<int> s(Aspect::Color, kLayers, kLevels);
+    FakeStorage<int> f(Aspect::Color, kLayers, kLevels);
+    SubresourceStorage<int> other(Aspect::Color, kLayers, kLevels);
+
+    for (uint32_t layer = 0; layer < kLayers; layer++) {
+        for (uint32_t level = 0; level < kLevels; level++) {
+            if ((layer + level) % 2 == 0) {
+                SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, layer, level);
+                CallUpdateOnBoth(&s, &f, range,
+                                 [](const SubresourceRange&, int* data) { *data += 17; });
             }
         }
-
-        // The depth should be fully uncompressed while the stencil stayed compressed.
-        CheckAspectCompressed(s, Aspect::Stencil, true);
-        CheckAspectCompressed(s, Aspect::Depth, false);
-        for (uint32_t layer = 0; layer < kLayers; layer++) {
-            CheckLayerCompressed(s, Aspect::Depth, layer, false);
+        if (layer % 2 == 0) {
+            other.Update({Aspect::Color, {layer, 1}, {0, kLevels}},
+                         [](const SubresourceRange&, int* data) { *data += 8; });
         }
-
-        // Update completely with a single value. Recompression should happen!
-        {
-            SubresourceRange fullRange =
-                SubresourceRange::MakeFull(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
-            CallUpdateOnBoth(&s, &f, fullRange,
-                             [](const SubresourceRange&, int* data) { *data = 31; });
-        }
-
-        CheckAspectCompressed(s, Aspect::Depth, true);
-        CheckAspectCompressed(s, Aspect::Stencil, true);
     }
 
-    // Test updating as a crossing band pattern:
-    //  - The first band is full layers [2, 3] on both aspects
-    //  - The second band is full mips [5, 6] on one aspect.
-    // Then updating completely.
-    TEST(SubresourceStorageTest, UpdateTwoBand) {
-        const uint32_t kLayers = 5;
-        const uint32_t kLevels = 9;
-        SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
-        FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
+    // Merge the band in the stipple.
+    CallMergeOnBoth(&s, &f, other,
+                    [](const SubresourceRange&, int* data, int other) { *data += other; });
 
-        // Update the two bands
-        {
-            SubresourceRange range(Aspect::Depth | Aspect::Stencil, {2, 2}, {0, kLevels});
-            CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 3; });
-        }
+    // None of the resulting layers are compressed.
+    CheckLayerCompressed(s, Aspect::Color, 0, false);
+    CheckLayerCompressed(s, Aspect::Color, 1, false);
+    CheckLayerCompressed(s, Aspect::Color, 2, false);
+}
 
-        // The layers were fully updated so they should stay compressed.
-        CheckLayerCompressed(s, Aspect::Depth, 2, true);
-        CheckLayerCompressed(s, Aspect::Depth, 3, true);
-        CheckLayerCompressed(s, Aspect::Stencil, 2, true);
-        CheckLayerCompressed(s, Aspect::Stencil, 3, true);
+// Regression test for a missing check that layer 0 is compressed when recompressing.
+TEST(SubresourceStorageTest, Layer0NotCompressedBlocksAspectRecompression) {
+    const uint32_t kLayers = 2;
+    const uint32_t kLevels = 2;
+    SubresourceStorage<int> s(Aspect::Color, kLayers, kLevels);
+    FakeStorage<int> f(Aspect::Color, kLayers, kLevels);
 
-        {
-            SubresourceRange range(Aspect::Depth, {0, kLayers}, {5, 2});
-            CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data *= 3; });
-        }
-
-        // The layers had to be decompressed in depth
-        CheckLayerCompressed(s, Aspect::Depth, 2, false);
-        CheckLayerCompressed(s, Aspect::Depth, 3, false);
-        CheckLayerCompressed(s, Aspect::Stencil, 2, true);
-        CheckLayerCompressed(s, Aspect::Stencil, 3, true);
-
-        // Update completely. Without a single value recompression shouldn't happen.
-        {
-            SubresourceRange fullRange =
-                SubresourceRange::MakeFull(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
-            CallUpdateOnBoth(&s, &f, fullRange,
-                             [](const SubresourceRange&, int* data) { *data += 12; });
-        }
-
-        CheckAspectCompressed(s, Aspect::Depth, false);
-        CheckAspectCompressed(s, Aspect::Stencil, false);
+    // Set up s with zeros except (0, 1) which is garbage.
+    {
+        SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 0, 1);
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 0xABC; });
     }
 
-    // Test updating with extremal subresources
-    //    - Then half of the array layers in full.
-    //    - Then updating completely.
-    TEST(SubresourceStorageTest, UpdateExtremas) {
-        const uint32_t kLayers = 6;
-        const uint32_t kLevels = 4;
-        SubresourceStorage<int> s(Aspect::Color, kLayers, kLevels);
-        FakeStorage<int> f(Aspect::Color, kLayers, kLevels);
+    // Other is 2x2 of zeroes
+    SubresourceStorage<int> other(Aspect::Color, kLayers, kLevels);
 
-        // Update the two extrema
-        {
-            SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 0, kLevels - 1);
-            CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 3; });
-        }
-        {
-            SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, kLayers - 1, 0);
-            CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data *= 3; });
-        }
+    // Fake updating F with other which is fully compressed and will trigger recompression.
+    CallMergeOnBoth(&s, &f, other, [](const SubresourceRange&, int*, int) {});
 
-        CheckLayerCompressed(s, Aspect::Color, 0, false);
-        CheckLayerCompressed(s, Aspect::Color, 1, true);
-        CheckLayerCompressed(s, Aspect::Color, kLayers - 2, true);
-        CheckLayerCompressed(s, Aspect::Color, kLayers - 1, false);
+    // The Color aspect should not have been recompressed.
+    CheckAspectCompressed(s, Aspect::Color, false);
+    CheckLayerCompressed(s, Aspect::Color, 0, false);
+}
 
-        // Update half of the layers in full with constant values. Some recompression should happen.
-        {
-            SubresourceRange range(Aspect::Color, {0, kLayers / 2}, {0, kLevels});
-            CallUpdateOnBoth(&s, &f, range,
-                             [](const SubresourceRange&, int* data) { *data = 123; });
-        }
+// Regression test for aspect decompression not copying to layer 0
+TEST(SubresourceStorageTest, AspectDecompressionUpdatesLayer0) {
+    const uint32_t kLayers = 2;
+    const uint32_t kLevels = 2;
+    SubresourceStorage<int> s(Aspect::Color, kLayers, kLevels, 3);
+    FakeStorage<int> f(Aspect::Color, kLayers, kLevels, 3);
 
-        CheckLayerCompressed(s, Aspect::Color, 0, true);
-        CheckLayerCompressed(s, Aspect::Color, 1, true);
-        CheckLayerCompressed(s, Aspect::Color, kLayers - 1, false);
-
-        // Update completely. Recompression should happen!
-        {
-            SubresourceRange fullRange =
-                SubresourceRange::MakeFull(Aspect::Color, kLayers, kLevels);
-            CallUpdateOnBoth(&s, &f, fullRange,
-                             [](const SubresourceRange&, int* data) { *data = 35; });
-        }
-
-        CheckAspectCompressed(s, Aspect::Color, true);
+    // Cause decompression by writing to a single subresource.
+    {
+        SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 1, 1);
+        CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 0xABC; });
     }
 
-    // A regression test for an issue found while reworking the implementation where
-    // RecompressAspect didn't correctly check that each each layer was compressed but only that
-    // their 0th value was the same.
-    TEST(SubresourceStorageTest, UpdateLevel0sHappenToMatch) {
-        SubresourceStorage<int> s(Aspect::Color, 2, 2);
-        FakeStorage<int> f(Aspect::Color, 2, 2);
+    // Check that the aspect's value of 3 was correctly decompressed in layer 0.
+    CheckLayerCompressed(s, Aspect::Color, 0, true);
+    EXPECT_EQ(3, s.Get(Aspect::Color, 0, 0));
+    EXPECT_EQ(3, s.Get(Aspect::Color, 0, 1));
+}
 
-        // Update 0th mip levels to some value, it should decompress the aspect and both layers.
-        {
-            SubresourceRange range(Aspect::Color, {0, 2}, {0, 1});
-            CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data = 17; });
-        }
-
-        CheckAspectCompressed(s, Aspect::Color, false);
-        CheckLayerCompressed(s, Aspect::Color, 0, false);
-        CheckLayerCompressed(s, Aspect::Color, 1, false);
-
-        // Update the whole resource by doing +1. The aspects and layers should stay decompressed.
-        {
-            SubresourceRange range = SubresourceRange::MakeFull(Aspect::Color, 2, 2);
-            CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 1; });
-        }
-
-        CheckAspectCompressed(s, Aspect::Color, false);
-        CheckLayerCompressed(s, Aspect::Color, 0, false);
-        CheckLayerCompressed(s, Aspect::Color, 1, false);
-    }
-
-    // The tests for Merge() all follow the same as the Update() tests except that they use Update()
-    // to set up the test storages.
-
-    // Similar to CallUpdateOnBoth but for Merge
-    template <typename T, typename U, typename F>
-    void CallMergeOnBoth(SubresourceStorage<T>* s,
-                         FakeStorage<T>* f,
-                         const SubresourceStorage<U>& other,
-                         F&& mergeFunc) {
-        RangeTracker tracker(*s);
-
-        s->Merge(other, [&](const SubresourceRange& range, T* data, const U& otherData) {
-            tracker.Track(range);
-            mergeFunc(range, data, otherData);
-        });
-        f->Merge(other, mergeFunc);
-
-        tracker.CheckTrackedExactly(
-            SubresourceRange::MakeFull(f->mAspects, f->mArrayLayerCount, f->mMipLevelCount));
-        f->CheckSameAs(*s);
-    }
-
-    // Test merging two fully compressed single-aspect resources.
-    TEST(SubresourceStorageTest, MergeFullWithFullSingleAspect) {
-        SubresourceStorage<int> s(Aspect::Color, 4, 6);
-        FakeStorage<int> f(Aspect::Color, 4, 6);
-
-        // Merge the whole resource in a single call.
-        SubresourceStorage<bool> other(Aspect::Color, 4, 6, true);
-        CallMergeOnBoth(&s, &f, other, [](const SubresourceRange&, int* data, bool other) {
-            if (other) {
-                *data = 13;
-            }
-        });
-
-        CheckAspectCompressed(s, Aspect::Color, true);
-    }
-
-    // Test merging two fully compressed multi-aspect resources.
-    TEST(SubresourceStorageTest, MergeFullWithFullMultiAspect) {
-        SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, 6, 7);
-        FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, 6, 7);
-
-        // Merge the whole resource in a single call.
-        SubresourceStorage<bool> other(Aspect::Depth | Aspect::Stencil, 6, 7, true);
-        CallMergeOnBoth(&s, &f, other, [](const SubresourceRange&, int* data, bool other) {
-            if (other) {
-                *data = 13;
-            }
-        });
-
-        CheckAspectCompressed(s, Aspect::Depth, true);
-        CheckAspectCompressed(s, Aspect::Stencil, true);
-    }
-
-    // Test merging a fully compressed resource in a resource with the "cross band" pattern.
-    //  - The first band is full layers [2, 3] on both aspects
-    //  - The second band is full mips [5, 6] on one aspect.
-    // This provides coverage of using a single piece of data from `other` to update all of `s`
-    TEST(SubresourceStorageTest, MergeFullInTwoBand) {
-        const uint32_t kLayers = 5;
-        const uint32_t kLevels = 9;
-        SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
-        FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
-
-        // Update the two bands
-        {
-            SubresourceRange range(Aspect::Depth | Aspect::Stencil, {2, 2}, {0, kLevels});
-            CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 3; });
-        }
-        {
-            SubresourceRange range(Aspect::Depth, {0, kLayers}, {5, 2});
-            CallUpdateOnBoth(&s, &f, range, [](const SubresourceRange&, int* data) { *data += 5; });
-        }
-
-        // Merge the fully compressed resource.
-        SubresourceStorage<int> other(Aspect::Depth | Aspect::Stencil, kLayers, kLevels, 17);
-        CallMergeOnBoth(&s, &f, other,
-                        [](const SubresourceRange&, int* data, int other) { *data += other; });
-
-        // The layers traversed by the mip band are still uncompressed.
-        CheckLayerCompressed(s, Aspect::Depth, 1, false);
-        CheckLayerCompressed(s, Aspect::Depth, 2, false);
-        CheckLayerCompressed(s, Aspect::Depth, 3, false);
-        CheckLayerCompressed(s, Aspect::Depth, 4, false);
-
-        // Stencil is decompressed but all its layers are still compressed because there wasn't the
-        // mip band.
-        CheckAspectCompressed(s, Aspect::Stencil, false);
-        CheckLayerCompressed(s, Aspect::Stencil, 1, true);
-        CheckLayerCompressed(s, Aspect::Stencil, 2, true);
-        CheckLayerCompressed(s, Aspect::Stencil, 3, true);
-        CheckLayerCompressed(s, Aspect::Stencil, 4, true);
-    }
-    // Test the reverse, mergign two-bands in a full resource. This provides coverage for
-    // decompressing aspects / and partilly layers to match the compression of `other`
-    TEST(SubresourceStorageTest, MergeTwoBandInFull) {
-        const uint32_t kLayers = 5;
-        const uint32_t kLevels = 9;
-        SubresourceStorage<int> s(Aspect::Depth | Aspect::Stencil, kLayers, kLevels, 75);
-        FakeStorage<int> f(Aspect::Depth | Aspect::Stencil, kLayers, kLevels, 75);
-
-        // Update the two bands
-        SubresourceStorage<int> other(Aspect::Depth | Aspect::Stencil, kLayers, kLevels);
-        {
-            SubresourceRange range(Aspect::Depth | Aspect::Stencil, {2, 2}, {0, kLevels});
-            other.Update(range, [](const SubresourceRange&, int* data) { *data += 3; });
-        }
-        {
-            SubresourceRange range(Aspect::Depth, {0, kLayers}, {5, 2});
-            other.Update(range, [](const SubresourceRange&, int* data) { *data += 5; });
-        }
-
-        // Merge the fully compressed resource.
-        CallMergeOnBoth(&s, &f, other,
-                        [](const SubresourceRange&, int* data, int other) { *data += other; });
-
-        // The layers traversed by the mip band are still uncompressed.
-        CheckLayerCompressed(s, Aspect::Depth, 1, false);
-        CheckLayerCompressed(s, Aspect::Depth, 2, false);
-        CheckLayerCompressed(s, Aspect::Depth, 3, false);
-        CheckLayerCompressed(s, Aspect::Depth, 4, false);
-
-        // Stencil is decompressed but all its layers are still compressed because there wasn't the
-        // mip band.
-        CheckAspectCompressed(s, Aspect::Stencil, false);
-        CheckLayerCompressed(s, Aspect::Stencil, 1, true);
-        CheckLayerCompressed(s, Aspect::Stencil, 2, true);
-        CheckLayerCompressed(s, Aspect::Stencil, 3, true);
-        CheckLayerCompressed(s, Aspect::Stencil, 4, true);
-    }
-
-    // Test merging storage with a layer band in a stipple patterned storage. This provide coverage
-    // for the code path that uses the same layer data for other multiple times.
-    TEST(SubresourceStorageTest, MergeLayerBandInStipple) {
-        const uint32_t kLayers = 3;
-        const uint32_t kLevels = 5;
-
-        SubresourceStorage<int> s(Aspect::Color, kLayers, kLevels);
-        FakeStorage<int> f(Aspect::Color, kLayers, kLevels);
-        SubresourceStorage<int> other(Aspect::Color, kLayers, kLevels);
-
-        for (uint32_t layer = 0; layer < kLayers; layer++) {
-            for (uint32_t level = 0; level < kLevels; level++) {
-                if ((layer + level) % 2 == 0) {
-                    SubresourceRange range =
-                        SubresourceRange::MakeSingle(Aspect::Color, layer, level);
-                    CallUpdateOnBoth(&s, &f, range,
-                                     [](const SubresourceRange&, int* data) { *data += 17; });
-                }
-            }
-            if (layer % 2 == 0) {
-                other.Update({Aspect::Color, {layer, 1}, {0, kLevels}},
-                             [](const SubresourceRange&, int* data) { *data += 8; });
-            }
-        }
-
-        // Merge the band in the stipple.
-        CallMergeOnBoth(&s, &f, other,
-                        [](const SubresourceRange&, int* data, int other) { *data += other; });
-
-        // None of the resulting layers are compressed.
-        CheckLayerCompressed(s, Aspect::Color, 0, false);
-        CheckLayerCompressed(s, Aspect::Color, 1, false);
-        CheckLayerCompressed(s, Aspect::Color, 2, false);
-    }
-
-    // Regression test for a missing check that layer 0 is compressed when recompressing.
-    TEST(SubresourceStorageTest, Layer0NotCompressedBlocksAspectRecompression) {
-        const uint32_t kLayers = 2;
-        const uint32_t kLevels = 2;
-        SubresourceStorage<int> s(Aspect::Color, kLayers, kLevels);
-        FakeStorage<int> f(Aspect::Color, kLayers, kLevels);
-
-        // Set up s with zeros except (0, 1) which is garbage.
-        {
-            SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 0, 1);
-            CallUpdateOnBoth(&s, &f, range,
-                             [](const SubresourceRange&, int* data) { *data += 0xABC; });
-        }
-
-        // Other is 2x2 of zeroes
-        SubresourceStorage<int> other(Aspect::Color, kLayers, kLevels);
-
-        // Fake updating F with other which is fully compressed and will trigger recompression.
-        CallMergeOnBoth(&s, &f, other, [](const SubresourceRange&, int*, int) {});
-
-        // The Color aspect should not have been recompressed.
-        CheckAspectCompressed(s, Aspect::Color, false);
-        CheckLayerCompressed(s, Aspect::Color, 0, false);
-    }
-
-    // Regression test for aspect decompression not copying to layer 0
-    TEST(SubresourceStorageTest, AspectDecompressionUpdatesLayer0) {
-        const uint32_t kLayers = 2;
-        const uint32_t kLevels = 2;
-        SubresourceStorage<int> s(Aspect::Color, kLayers, kLevels, 3);
-        FakeStorage<int> f(Aspect::Color, kLayers, kLevels, 3);
-
-        // Cause decompression by writing to a single subresource.
-        {
-            SubresourceRange range = SubresourceRange::MakeSingle(Aspect::Color, 1, 1);
-            CallUpdateOnBoth(&s, &f, range,
-                             [](const SubresourceRange&, int* data) { *data += 0xABC; });
-        }
-
-        // Check that the aspect's value of 3 was correctly decompressed in layer 0.
-        CheckLayerCompressed(s, Aspect::Color, 0, true);
-        EXPECT_EQ(3, s.Get(Aspect::Color, 0, 0));
-        EXPECT_EQ(3, s.Get(Aspect::Color, 0, 1));
-    }
-
-    // Bugs found while testing:
-    //  - mLayersCompressed not initialized to true.
-    //  - DecompressLayer setting Compressed to true instead of false.
-    //  - Get() checking for !compressed instead of compressed for the early exit.
-    //  - ASSERT in RecompressLayers was inverted.
-    //  - Two != being converted to == during a rework.
-    //  - (with ASSERT) that RecompressAspect didn't check that aspect 0 was compressed.
-    //  - Missing decompression of layer 0 after introducing mInlineAspectData.
+// Bugs found while testing:
+//  - mLayersCompressed not initialized to true.
+//  - DecompressLayer setting Compressed to true instead of false.
+//  - Get() checking for !compressed instead of compressed for the early exit.
+//  - ASSERT in RecompressLayers was inverted.
+//  - Two != being converted to == during a rework.
+//  - (with ASSERT) that RecompressAspect didn't check that aspect 0 was compressed.
+//  - Missing decompression of layer 0 after introducing mInlineAspectData.
 
 }  // namespace dawn::native
diff --git a/src/dawn/tests/unittests/ToBackendTests.cpp b/src/dawn/tests/unittests/ToBackendTests.cpp
index 2ee3a75..8d64fce 100644
--- a/src/dawn/tests/unittests/ToBackendTests.cpp
+++ b/src/dawn/tests/unittests/ToBackendTests.cpp
@@ -21,7 +21,7 @@
 
 // Make our own Base - Backend object pair, reusing the AdapterBase name
 namespace dawn::native {
-    class AdapterBase : public RefCounted {};
+class AdapterBase : public RefCounted {};
 
 class MyAdapter : public AdapterBase {};
 
diff --git a/src/dawn/tests/unittests/VersionTests.cpp b/src/dawn/tests/unittests/VersionTests.cpp
index 173456c..ae7ea8b 100644
--- a/src/dawn/tests/unittests/VersionTests.cpp
+++ b/src/dawn/tests/unittests/VersionTests.cpp
@@ -18,13 +18,15 @@
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
 
-namespace dawn { namespace {
+namespace dawn {
+namespace {
 
-    using ::testing::SizeIs;
+using ::testing::SizeIs;
 
-    TEST(VersionTests, GitCommitHashLength) {
-        // Git hashes should be 40 characters long.
-        EXPECT_THAT(std::string(kGitHash), SizeIs(40));
-    }
+TEST(VersionTests, GitCommitHashLength) {
+    // Git hashes should be 40 characters long.
+    EXPECT_THAT(std::string(kGitHash), SizeIs(40));
+}
 
-}}  // namespace dawn::
+}  // namespace
+}  // namespace dawn
diff --git a/src/dawn/tests/unittests/d3d12/CopySplitTests.cpp b/src/dawn/tests/unittests/d3d12/CopySplitTests.cpp
index 1f94b98..1bb99398 100644
--- a/src/dawn/tests/unittests/d3d12/CopySplitTests.cpp
+++ b/src/dawn/tests/unittests/d3d12/CopySplitTests.cpp
@@ -25,515 +25,505 @@
 #include "gtest/gtest.h"
 
 namespace dawn::native::d3d12 {
-    namespace {
+namespace {
 
-        struct TextureSpec {
-            uint32_t x;
-            uint32_t y;
-            uint32_t z;
-            uint32_t width;
-            uint32_t height;
-            uint32_t depthOrArrayLayers;
-            uint32_t texelBlockSizeInBytes;
-            uint32_t blockWidth = 1;
-            uint32_t blockHeight = 1;
-        };
+struct TextureSpec {
+    uint32_t x;
+    uint32_t y;
+    uint32_t z;
+    uint32_t width;
+    uint32_t height;
+    uint32_t depthOrArrayLayers;
+    uint32_t texelBlockSizeInBytes;
+    uint32_t blockWidth = 1;
+    uint32_t blockHeight = 1;
+};
 
-        struct BufferSpec {
-            uint64_t offset;
-            uint32_t bytesPerRow;
-            uint32_t rowsPerImage;
-        };
+struct BufferSpec {
+    uint64_t offset;
+    uint32_t bytesPerRow;
+    uint32_t rowsPerImage;
+};
 
-        // Check that each copy region fits inside the buffer footprint
-        void ValidateFootprints(const TextureSpec& textureSpec,
-                                const BufferSpec& bufferSpec,
-                                const TextureCopySubresource& copySplit,
-                                wgpu::TextureDimension dimension) {
-            for (uint32_t i = 0; i < copySplit.count; ++i) {
-                const auto& copy = copySplit.copies[i];
-                ASSERT_LE(copy.bufferOffset.x + copy.copySize.width, copy.bufferSize.width);
-                ASSERT_LE(copy.bufferOffset.y + copy.copySize.height, copy.bufferSize.height);
-                ASSERT_LE(copy.bufferOffset.z + copy.copySize.depthOrArrayLayers,
-                          copy.bufferSize.depthOrArrayLayers);
+// Check that each copy region fits inside the buffer footprint
+void ValidateFootprints(const TextureSpec& textureSpec,
+                        const BufferSpec& bufferSpec,
+                        const TextureCopySubresource& copySplit,
+                        wgpu::TextureDimension dimension) {
+    for (uint32_t i = 0; i < copySplit.count; ++i) {
+        const auto& copy = copySplit.copies[i];
+        ASSERT_LE(copy.bufferOffset.x + copy.copySize.width, copy.bufferSize.width);
+        ASSERT_LE(copy.bufferOffset.y + copy.copySize.height, copy.bufferSize.height);
+        ASSERT_LE(copy.bufferOffset.z + copy.copySize.depthOrArrayLayers,
+                  copy.bufferSize.depthOrArrayLayers);
 
-                // If there are multiple layers, 2D texture splitter actually splits each layer
-                // independently. See the details in Compute2DTextureCopySplits(). As a result,
-                // if we simply expand a copy region generated by 2D texture splitter to all
-                // layers, the copy region might be OOB. But that is not the approach that the
-                // current 2D texture splitter is doing, although Compute2DTextureCopySubresource
-                // forwards "copySize.depthOrArrayLayers" to the copy region it generated. So skip
-                // the test below for 2D textures with multiple layers.
-                if (textureSpec.depthOrArrayLayers <= 1 ||
-                    dimension == wgpu::TextureDimension::e3D) {
-                    uint32_t widthInBlocks = textureSpec.width / textureSpec.blockWidth;
-                    uint32_t heightInBlocks = textureSpec.height / textureSpec.blockHeight;
-                    uint64_t minimumRequiredBufferSize =
-                        bufferSpec.offset +
-                        utils::RequiredBytesInCopy(bufferSpec.bytesPerRow, bufferSpec.rowsPerImage,
-                                                   widthInBlocks, heightInBlocks,
-                                                   textureSpec.depthOrArrayLayers,
-                                                   textureSpec.texelBlockSizeInBytes);
+        // If there are multiple layers, 2D texture splitter actually splits each layer
+        // independently. See the details in Compute2DTextureCopySplits(). As a result,
+        // if we simply expand a copy region generated by 2D texture splitter to all
+        // layers, the copy region might be OOB. But that is not the approach that the
+        // current 2D texture splitter is doing, although Compute2DTextureCopySubresource
+        // forwards "copySize.depthOrArrayLayers" to the copy region it generated. So skip
+        // the test below for 2D textures with multiple layers.
+        if (textureSpec.depthOrArrayLayers <= 1 || dimension == wgpu::TextureDimension::e3D) {
+            uint32_t widthInBlocks = textureSpec.width / textureSpec.blockWidth;
+            uint32_t heightInBlocks = textureSpec.height / textureSpec.blockHeight;
+            uint64_t minimumRequiredBufferSize =
+                bufferSpec.offset +
+                utils::RequiredBytesInCopy(
+                    bufferSpec.bytesPerRow, bufferSpec.rowsPerImage, widthInBlocks, heightInBlocks,
+                    textureSpec.depthOrArrayLayers, textureSpec.texelBlockSizeInBytes);
 
-                    // The last pixel (buffer footprint) of each copy region depends on its
-                    // bufferOffset and copySize. It is not the last pixel where the bufferSize
-                    // ends.
-                    ASSERT_EQ(copy.bufferOffset.x % textureSpec.blockWidth, 0u);
-                    ASSERT_EQ(copy.copySize.width % textureSpec.blockWidth, 0u);
-                    uint32_t footprintWidth = copy.bufferOffset.x + copy.copySize.width;
-                    ASSERT_EQ(footprintWidth % textureSpec.blockWidth, 0u);
-                    uint32_t footprintWidthInBlocks = footprintWidth / textureSpec.blockWidth;
+            // The last pixel (buffer footprint) of each copy region depends on its
+            // bufferOffset and copySize. It is not the last pixel where the bufferSize
+            // ends.
+            ASSERT_EQ(copy.bufferOffset.x % textureSpec.blockWidth, 0u);
+            ASSERT_EQ(copy.copySize.width % textureSpec.blockWidth, 0u);
+            uint32_t footprintWidth = copy.bufferOffset.x + copy.copySize.width;
+            ASSERT_EQ(footprintWidth % textureSpec.blockWidth, 0u);
+            uint32_t footprintWidthInBlocks = footprintWidth / textureSpec.blockWidth;
 
-                    ASSERT_EQ(copy.bufferOffset.y % textureSpec.blockHeight, 0u);
-                    ASSERT_EQ(copy.copySize.height % textureSpec.blockHeight, 0u);
-                    uint32_t footprintHeight = copy.bufferOffset.y + copy.copySize.height;
-                    ASSERT_EQ(footprintHeight % textureSpec.blockHeight, 0u);
-                    uint32_t footprintHeightInBlocks = footprintHeight / textureSpec.blockHeight;
+            ASSERT_EQ(copy.bufferOffset.y % textureSpec.blockHeight, 0u);
+            ASSERT_EQ(copy.copySize.height % textureSpec.blockHeight, 0u);
+            uint32_t footprintHeight = copy.bufferOffset.y + copy.copySize.height;
+            ASSERT_EQ(footprintHeight % textureSpec.blockHeight, 0u);
+            uint32_t footprintHeightInBlocks = footprintHeight / textureSpec.blockHeight;
 
-                    uint64_t bufferSizeForFootprint =
-                        copy.alignedOffset +
-                        utils::RequiredBytesInCopy(bufferSpec.bytesPerRow, copy.bufferSize.height,
-                                                   footprintWidthInBlocks, footprintHeightInBlocks,
-                                                   copy.bufferSize.depthOrArrayLayers,
-                                                   textureSpec.texelBlockSizeInBytes);
+            uint64_t bufferSizeForFootprint =
+                copy.alignedOffset +
+                utils::RequiredBytesInCopy(bufferSpec.bytesPerRow, copy.bufferSize.height,
+                                           footprintWidthInBlocks, footprintHeightInBlocks,
+                                           copy.bufferSize.depthOrArrayLayers,
+                                           textureSpec.texelBlockSizeInBytes);
 
-                    // The buffer footprint of each copy region should not exceed the minimum
-                    // required buffer size. Otherwise, pixels accessed by copy may be OOB.
-                    ASSERT_LE(bufferSizeForFootprint, minimumRequiredBufferSize);
-                }
-            }
+            // The buffer footprint of each copy region should not exceed the minimum
+            // required buffer size. Otherwise, pixels accessed by copy may be OOB.
+            ASSERT_LE(bufferSizeForFootprint, minimumRequiredBufferSize);
         }
+    }
+}
 
-        // Check that the offset is aligned
-        void ValidateOffset(const TextureCopySubresource& copySplit) {
-            for (uint32_t i = 0; i < copySplit.count; ++i) {
-                ASSERT_TRUE(Align(copySplit.copies[i].alignedOffset,
-                                  D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT) ==
-                            copySplit.copies[i].alignedOffset);
-            }
+// Check that the offset is aligned
+void ValidateOffset(const TextureCopySubresource& copySplit) {
+    for (uint32_t i = 0; i < copySplit.count; ++i) {
+        ASSERT_TRUE(
+            Align(copySplit.copies[i].alignedOffset, D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT) ==
+            copySplit.copies[i].alignedOffset);
+    }
+}
+
+bool InclusiveRangesOverlap(uint32_t minA, uint32_t maxA, uint32_t minB, uint32_t maxB) {
+    return (minA <= minB && minB <= maxA) || (minB <= minA && minA <= maxB);
+}
+
+// Check that no pair of copy regions intersect each other
+void ValidateDisjoint(const TextureCopySubresource& copySplit) {
+    for (uint32_t i = 0; i < copySplit.count; ++i) {
+        const auto& a = copySplit.copies[i];
+        for (uint32_t j = i + 1; j < copySplit.count; ++j) {
+            const auto& b = copySplit.copies[j];
+            // If textureOffset.x is 0, and copySize.width is 2, we are copying pixel 0 and
+            // 1. We never touch pixel 2 on x-axis. So the copied range on x-axis should be
+            // [textureOffset.x, textureOffset.x + copySize.width - 1] and both ends are
+            // included.
+            bool overlapX =
+                InclusiveRangesOverlap(a.textureOffset.x, a.textureOffset.x + a.copySize.width - 1,
+                                       b.textureOffset.x, b.textureOffset.x + b.copySize.width - 1);
+            bool overlapY = InclusiveRangesOverlap(
+                a.textureOffset.y, a.textureOffset.y + a.copySize.height - 1, b.textureOffset.y,
+                b.textureOffset.y + b.copySize.height - 1);
+            bool overlapZ = InclusiveRangesOverlap(
+                a.textureOffset.z, a.textureOffset.z + a.copySize.depthOrArrayLayers - 1,
+                b.textureOffset.z, b.textureOffset.z + b.copySize.depthOrArrayLayers - 1);
+            ASSERT_TRUE(!overlapX || !overlapY || !overlapZ);
         }
+    }
+}
 
-        bool InclusiveRangesOverlap(uint32_t minA, uint32_t maxA, uint32_t minB, uint32_t maxB) {
-            return (minA <= minB && minB <= maxA) || (minB <= minA && minA <= maxB);
+// Check that the union of the copy regions exactly covers the texture region
+void ValidateTextureBounds(const TextureSpec& textureSpec,
+                           const TextureCopySubresource& copySplit) {
+    ASSERT_GT(copySplit.count, 0u);
+
+    uint32_t minX = copySplit.copies[0].textureOffset.x;
+    uint32_t minY = copySplit.copies[0].textureOffset.y;
+    uint32_t minZ = copySplit.copies[0].textureOffset.z;
+    uint32_t maxX = copySplit.copies[0].textureOffset.x + copySplit.copies[0].copySize.width;
+    uint32_t maxY = copySplit.copies[0].textureOffset.y + copySplit.copies[0].copySize.height;
+    uint32_t maxZ =
+        copySplit.copies[0].textureOffset.z + copySplit.copies[0].copySize.depthOrArrayLayers;
+
+    for (uint32_t i = 1; i < copySplit.count; ++i) {
+        const auto& copy = copySplit.copies[i];
+        minX = std::min(minX, copy.textureOffset.x);
+        minY = std::min(minY, copy.textureOffset.y);
+        minZ = std::min(minZ, copy.textureOffset.z);
+        maxX = std::max(maxX, copy.textureOffset.x + copy.copySize.width);
+        maxY = std::max(maxY, copy.textureOffset.y + copy.copySize.height);
+        maxZ = std::max(maxZ, copy.textureOffset.z + copy.copySize.depthOrArrayLayers);
+    }
+
+    ASSERT_EQ(minX, textureSpec.x);
+    ASSERT_EQ(minY, textureSpec.y);
+    ASSERT_EQ(minZ, textureSpec.z);
+    ASSERT_EQ(maxX, textureSpec.x + textureSpec.width);
+    ASSERT_EQ(maxY, textureSpec.y + textureSpec.height);
+    ASSERT_EQ(maxZ, textureSpec.z + textureSpec.depthOrArrayLayers);
+}
+
+// Validate that the number of pixels copied is exactly equal to the number of pixels in the
+// texture region
+void ValidatePixelCount(const TextureSpec& textureSpec, const TextureCopySubresource& copySplit) {
+    uint32_t count = 0;
+    for (uint32_t i = 0; i < copySplit.count; ++i) {
+        const auto& copy = copySplit.copies[i];
+        uint32_t copiedPixels =
+            copy.copySize.width * copy.copySize.height * copy.copySize.depthOrArrayLayers;
+        ASSERT_GT(copiedPixels, 0u);
+        count += copiedPixels;
+    }
+    ASSERT_EQ(count, textureSpec.width * textureSpec.height * textureSpec.depthOrArrayLayers);
+}
+
+// Check that every buffer offset is at the correct pixel location
+void ValidateBufferOffset(const TextureSpec& textureSpec,
+                          const BufferSpec& bufferSpec,
+                          const TextureCopySubresource& copySplit,
+                          wgpu::TextureDimension dimension) {
+    ASSERT_GT(copySplit.count, 0u);
+
+    uint32_t texelsPerBlock = textureSpec.blockWidth * textureSpec.blockHeight;
+    for (uint32_t i = 0; i < copySplit.count; ++i) {
+        const auto& copy = copySplit.copies[i];
+
+        uint32_t bytesPerRowInTexels =
+            bufferSpec.bytesPerRow / textureSpec.texelBlockSizeInBytes * texelsPerBlock;
+        uint32_t slicePitchInTexels =
+            bytesPerRowInTexels * (bufferSpec.rowsPerImage / textureSpec.blockHeight);
+        uint32_t absoluteTexelOffset =
+            copy.alignedOffset / textureSpec.texelBlockSizeInBytes * texelsPerBlock +
+            copy.bufferOffset.x / textureSpec.blockWidth * texelsPerBlock +
+            copy.bufferOffset.y / textureSpec.blockHeight * bytesPerRowInTexels;
+
+        // There is one empty row at most in a 2D copy region. However, it is not true for
+        // a 3D texture copy region when we are copying the last row of each slice. We may
+        // need to offset a lot rows and copy.bufferOffset.y may be big.
+        if (dimension == wgpu::TextureDimension::e2D) {
+            ASSERT_LE(copy.bufferOffset.y, textureSpec.blockHeight);
         }
+        ASSERT_EQ(copy.bufferOffset.z, 0u);
 
-        // Check that no pair of copy regions intersect each other
-        void ValidateDisjoint(const TextureCopySubresource& copySplit) {
-            for (uint32_t i = 0; i < copySplit.count; ++i) {
-                const auto& a = copySplit.copies[i];
-                for (uint32_t j = i + 1; j < copySplit.count; ++j) {
-                    const auto& b = copySplit.copies[j];
-                    // If textureOffset.x is 0, and copySize.width is 2, we are copying pixel 0 and
-                    // 1. We never touch pixel 2 on x-axis. So the copied range on x-axis should be
-                    // [textureOffset.x, textureOffset.x + copySize.width - 1] and both ends are
-                    // included.
-                    bool overlapX = InclusiveRangesOverlap(
-                        a.textureOffset.x, a.textureOffset.x + a.copySize.width - 1,
-                        b.textureOffset.x, b.textureOffset.x + b.copySize.width - 1);
-                    bool overlapY = InclusiveRangesOverlap(
-                        a.textureOffset.y, a.textureOffset.y + a.copySize.height - 1,
-                        b.textureOffset.y, b.textureOffset.y + b.copySize.height - 1);
-                    bool overlapZ = InclusiveRangesOverlap(
-                        a.textureOffset.z, a.textureOffset.z + a.copySize.depthOrArrayLayers - 1,
-                        b.textureOffset.z, b.textureOffset.z + b.copySize.depthOrArrayLayers - 1);
-                    ASSERT_TRUE(!overlapX || !overlapY || !overlapZ);
-                }
-            }
-        }
+        ASSERT_GE(absoluteTexelOffset,
+                  bufferSpec.offset / textureSpec.texelBlockSizeInBytes * texelsPerBlock);
+        uint32_t relativeTexelOffset = absoluteTexelOffset - bufferSpec.offset /
+                                                                 textureSpec.texelBlockSizeInBytes *
+                                                                 texelsPerBlock;
 
-        // Check that the union of the copy regions exactly covers the texture region
-        void ValidateTextureBounds(const TextureSpec& textureSpec,
-                                   const TextureCopySubresource& copySplit) {
-            ASSERT_GT(copySplit.count, 0u);
+        uint32_t z = relativeTexelOffset / slicePitchInTexels;
+        uint32_t y = (relativeTexelOffset % slicePitchInTexels) / bytesPerRowInTexels;
+        uint32_t x = relativeTexelOffset % bytesPerRowInTexels;
 
-            uint32_t minX = copySplit.copies[0].textureOffset.x;
-            uint32_t minY = copySplit.copies[0].textureOffset.y;
-            uint32_t minZ = copySplit.copies[0].textureOffset.z;
-            uint32_t maxX =
-                copySplit.copies[0].textureOffset.x + copySplit.copies[0].copySize.width;
-            uint32_t maxY =
-                copySplit.copies[0].textureOffset.y + copySplit.copies[0].copySize.height;
-            uint32_t maxZ = copySplit.copies[0].textureOffset.z +
-                            copySplit.copies[0].copySize.depthOrArrayLayers;
+        ASSERT_EQ(copy.textureOffset.x - textureSpec.x, x);
+        ASSERT_EQ(copy.textureOffset.y - textureSpec.y, y);
+        ASSERT_EQ(copy.textureOffset.z - textureSpec.z, z);
+    }
+}
 
-            for (uint32_t i = 1; i < copySplit.count; ++i) {
-                const auto& copy = copySplit.copies[i];
-                minX = std::min(minX, copy.textureOffset.x);
-                minY = std::min(minY, copy.textureOffset.y);
-                minZ = std::min(minZ, copy.textureOffset.z);
-                maxX = std::max(maxX, copy.textureOffset.x + copy.copySize.width);
-                maxY = std::max(maxY, copy.textureOffset.y + copy.copySize.height);
-                maxZ = std::max(maxZ, copy.textureOffset.z + copy.copySize.depthOrArrayLayers);
-            }
+void ValidateCopySplit(const TextureSpec& textureSpec,
+                       const BufferSpec& bufferSpec,
+                       const TextureCopySubresource& copySplit,
+                       wgpu::TextureDimension dimension) {
+    ValidateFootprints(textureSpec, bufferSpec, copySplit, dimension);
+    ValidateOffset(copySplit);
+    ValidateDisjoint(copySplit);
+    ValidateTextureBounds(textureSpec, copySplit);
+    ValidatePixelCount(textureSpec, copySplit);
+    ValidateBufferOffset(textureSpec, bufferSpec, copySplit, dimension);
+}
 
-            ASSERT_EQ(minX, textureSpec.x);
-            ASSERT_EQ(minY, textureSpec.y);
-            ASSERT_EQ(minZ, textureSpec.z);
-            ASSERT_EQ(maxX, textureSpec.x + textureSpec.width);
-            ASSERT_EQ(maxY, textureSpec.y + textureSpec.height);
-            ASSERT_EQ(maxZ, textureSpec.z + textureSpec.depthOrArrayLayers);
-        }
+std::ostream& operator<<(std::ostream& os, const TextureSpec& textureSpec) {
+    os << "TextureSpec("
+       << "[(" << textureSpec.x << ", " << textureSpec.y << ", " << textureSpec.z << "), ("
+       << textureSpec.width << ", " << textureSpec.height << ", " << textureSpec.depthOrArrayLayers
+       << ")], " << textureSpec.texelBlockSizeInBytes << ")";
+    return os;
+}
 
-        // Validate that the number of pixels copied is exactly equal to the number of pixels in the
-        // texture region
-        void ValidatePixelCount(const TextureSpec& textureSpec,
-                                const TextureCopySubresource& copySplit) {
-            uint32_t count = 0;
-            for (uint32_t i = 0; i < copySplit.count; ++i) {
-                const auto& copy = copySplit.copies[i];
-                uint32_t copiedPixels =
-                    copy.copySize.width * copy.copySize.height * copy.copySize.depthOrArrayLayers;
-                ASSERT_GT(copiedPixels, 0u);
-                count += copiedPixels;
-            }
-            ASSERT_EQ(count,
-                      textureSpec.width * textureSpec.height * textureSpec.depthOrArrayLayers);
-        }
+std::ostream& operator<<(std::ostream& os, const BufferSpec& bufferSpec) {
+    os << "BufferSpec(" << bufferSpec.offset << ", " << bufferSpec.bytesPerRow << ", "
+       << bufferSpec.rowsPerImage << ")";
+    return os;
+}
 
-        // Check that every buffer offset is at the correct pixel location
-        void ValidateBufferOffset(const TextureSpec& textureSpec,
-                                  const BufferSpec& bufferSpec,
-                                  const TextureCopySubresource& copySplit,
-                                  wgpu::TextureDimension dimension) {
-            ASSERT_GT(copySplit.count, 0u);
+std::ostream& operator<<(std::ostream& os, const TextureCopySubresource& copySplit) {
+    os << "CopySplit" << std::endl;
+    for (uint32_t i = 0; i < copySplit.count; ++i) {
+        const auto& copy = copySplit.copies[i];
+        os << "  " << i << ": Texture at (" << copy.textureOffset.x << ", " << copy.textureOffset.y
+           << ", " << copy.textureOffset.z << "), size (" << copy.copySize.width << ", "
+           << copy.copySize.height << ", " << copy.copySize.depthOrArrayLayers << ")" << std::endl;
+        os << "  " << i << ": Buffer at (" << copy.bufferOffset.x << ", " << copy.bufferOffset.y
+           << ", " << copy.bufferOffset.z << "), footprint (" << copy.bufferSize.width << ", "
+           << copy.bufferSize.height << ", " << copy.bufferSize.depthOrArrayLayers << ")"
+           << std::endl;
+    }
+    return os;
+}
 
-            uint32_t texelsPerBlock = textureSpec.blockWidth * textureSpec.blockHeight;
-            for (uint32_t i = 0; i < copySplit.count; ++i) {
-                const auto& copy = copySplit.copies[i];
+// Define base texture sizes and offsets to test with: some aligned, some unaligned
+constexpr TextureSpec kBaseTextureSpecs[] = {
+    {0, 0, 0, 1, 1, 1, 4},
+    {0, 0, 0, 64, 1, 1, 4},
+    {0, 0, 0, 128, 1, 1, 4},
+    {0, 0, 0, 192, 1, 1, 4},
+    {31, 16, 0, 1, 1, 1, 4},
+    {64, 16, 0, 1, 1, 1, 4},
+    {64, 16, 8, 1, 1, 1, 4},
 
-                uint32_t bytesPerRowInTexels =
-                    bufferSpec.bytesPerRow / textureSpec.texelBlockSizeInBytes * texelsPerBlock;
-                uint32_t slicePitchInTexels =
-                    bytesPerRowInTexels * (bufferSpec.rowsPerImage / textureSpec.blockHeight);
-                uint32_t absoluteTexelOffset =
-                    copy.alignedOffset / textureSpec.texelBlockSizeInBytes * texelsPerBlock +
-                    copy.bufferOffset.x / textureSpec.blockWidth * texelsPerBlock +
-                    copy.bufferOffset.y / textureSpec.blockHeight * bytesPerRowInTexels;
+    {0, 0, 0, 64, 2, 1, 4},
+    {0, 0, 0, 64, 1, 2, 4},
+    {0, 0, 0, 64, 2, 2, 4},
+    {0, 0, 0, 128, 2, 1, 4},
+    {0, 0, 0, 128, 1, 2, 4},
+    {0, 0, 0, 128, 2, 2, 4},
+    {0, 0, 0, 192, 2, 1, 4},
+    {0, 0, 0, 192, 1, 2, 4},
+    {0, 0, 0, 192, 2, 2, 4},
 
-                // There is one empty row at most in a 2D copy region. However, it is not true for
-                // a 3D texture copy region when we are copying the last row of each slice. We may
-                // need to offset a lot rows and copy.bufferOffset.y may be big.
-                if (dimension == wgpu::TextureDimension::e2D) {
-                    ASSERT_LE(copy.bufferOffset.y, textureSpec.blockHeight);
-                }
-                ASSERT_EQ(copy.bufferOffset.z, 0u);
+    {0, 0, 0, 1024, 1024, 1, 4},
+    {256, 512, 0, 1024, 1024, 1, 4},
+    {64, 48, 0, 1024, 1024, 1, 4},
+    {64, 48, 16, 1024, 1024, 1024, 4},
 
-                ASSERT_GE(absoluteTexelOffset,
-                          bufferSpec.offset / textureSpec.texelBlockSizeInBytes * texelsPerBlock);
-                uint32_t relativeTexelOffset =
-                    absoluteTexelOffset -
-                    bufferSpec.offset / textureSpec.texelBlockSizeInBytes * texelsPerBlock;
+    {0, 0, 0, 257, 31, 1, 4},
+    {0, 0, 0, 17, 93, 1, 4},
+    {59, 13, 0, 257, 31, 1, 4},
+    {17, 73, 0, 17, 93, 1, 4},
+    {17, 73, 59, 17, 93, 99, 4},
 
-                uint32_t z = relativeTexelOffset / slicePitchInTexels;
-                uint32_t y = (relativeTexelOffset % slicePitchInTexels) / bytesPerRowInTexels;
-                uint32_t x = relativeTexelOffset % bytesPerRowInTexels;
+    {0, 0, 0, 4, 4, 1, 8, 4, 4},
+    {64, 16, 0, 4, 4, 1, 8, 4, 4},
+    {64, 16, 8, 4, 4, 1, 8, 4, 4},
+    {0, 0, 0, 4, 4, 1, 16, 4, 4},
+    {64, 16, 0, 4, 4, 1, 16, 4, 4},
+    {64, 16, 8, 4, 4, 1, 16, 4, 4},
 
-                ASSERT_EQ(copy.textureOffset.x - textureSpec.x, x);
-                ASSERT_EQ(copy.textureOffset.y - textureSpec.y, y);
-                ASSERT_EQ(copy.textureOffset.z - textureSpec.z, z);
-            }
-        }
+    {0, 0, 0, 1024, 1024, 1, 8, 4, 4},
+    {256, 512, 0, 1024, 1024, 1, 8, 4, 4},
+    {64, 48, 0, 1024, 1024, 1, 8, 4, 4},
+    {64, 48, 16, 1024, 1024, 1, 8, 4, 4},
+    {0, 0, 0, 1024, 1024, 1, 16, 4, 4},
+    {256, 512, 0, 1024, 1024, 1, 16, 4, 4},
+    {64, 48, 0, 1024, 1024, 1, 4, 16, 4},
+    {64, 48, 16, 1024, 1024, 1, 16, 4, 4},
+};
 
-        void ValidateCopySplit(const TextureSpec& textureSpec,
-                               const BufferSpec& bufferSpec,
-                               const TextureCopySubresource& copySplit,
-                               wgpu::TextureDimension dimension) {
-            ValidateFootprints(textureSpec, bufferSpec, copySplit, dimension);
-            ValidateOffset(copySplit);
-            ValidateDisjoint(copySplit);
-            ValidateTextureBounds(textureSpec, copySplit);
-            ValidatePixelCount(textureSpec, copySplit);
-            ValidateBufferOffset(textureSpec, bufferSpec, copySplit, dimension);
-        }
+// Define base buffer sizes to work with: some offsets aligned, some unaligned. bytesPerRow
+// is the minimum required
+std::array<BufferSpec, 15> BaseBufferSpecs(const TextureSpec& textureSpec) {
+    uint32_t bytesPerRow =
+        Align(textureSpec.texelBlockSizeInBytes * textureSpec.width, kTextureBytesPerRowAlignment);
 
-        std::ostream& operator<<(std::ostream& os, const TextureSpec& textureSpec) {
-            os << "TextureSpec("
-               << "[(" << textureSpec.x << ", " << textureSpec.y << ", " << textureSpec.z << "), ("
-               << textureSpec.width << ", " << textureSpec.height << ", "
-               << textureSpec.depthOrArrayLayers << ")], " << textureSpec.texelBlockSizeInBytes
-               << ")";
-            return os;
-        }
-
-        std::ostream& operator<<(std::ostream& os, const BufferSpec& bufferSpec) {
-            os << "BufferSpec(" << bufferSpec.offset << ", " << bufferSpec.bytesPerRow << ", "
-               << bufferSpec.rowsPerImage << ")";
-            return os;
-        }
-
-        std::ostream& operator<<(std::ostream& os, const TextureCopySubresource& copySplit) {
-            os << "CopySplit" << std::endl;
-            for (uint32_t i = 0; i < copySplit.count; ++i) {
-                const auto& copy = copySplit.copies[i];
-                os << "  " << i << ": Texture at (" << copy.textureOffset.x << ", "
-                   << copy.textureOffset.y << ", " << copy.textureOffset.z << "), size ("
-                   << copy.copySize.width << ", " << copy.copySize.height << ", "
-                   << copy.copySize.depthOrArrayLayers << ")" << std::endl;
-                os << "  " << i << ": Buffer at (" << copy.bufferOffset.x << ", "
-                   << copy.bufferOffset.y << ", " << copy.bufferOffset.z << "), footprint ("
-                   << copy.bufferSize.width << ", " << copy.bufferSize.height << ", "
-                   << copy.bufferSize.depthOrArrayLayers << ")" << std::endl;
-            }
-            return os;
-        }
-
-        // Define base texture sizes and offsets to test with: some aligned, some unaligned
-        constexpr TextureSpec kBaseTextureSpecs[] = {
-            {0, 0, 0, 1, 1, 1, 4},
-            {0, 0, 0, 64, 1, 1, 4},
-            {0, 0, 0, 128, 1, 1, 4},
-            {0, 0, 0, 192, 1, 1, 4},
-            {31, 16, 0, 1, 1, 1, 4},
-            {64, 16, 0, 1, 1, 1, 4},
-            {64, 16, 8, 1, 1, 1, 4},
-
-            {0, 0, 0, 64, 2, 1, 4},
-            {0, 0, 0, 64, 1, 2, 4},
-            {0, 0, 0, 64, 2, 2, 4},
-            {0, 0, 0, 128, 2, 1, 4},
-            {0, 0, 0, 128, 1, 2, 4},
-            {0, 0, 0, 128, 2, 2, 4},
-            {0, 0, 0, 192, 2, 1, 4},
-            {0, 0, 0, 192, 1, 2, 4},
-            {0, 0, 0, 192, 2, 2, 4},
-
-            {0, 0, 0, 1024, 1024, 1, 4},
-            {256, 512, 0, 1024, 1024, 1, 4},
-            {64, 48, 0, 1024, 1024, 1, 4},
-            {64, 48, 16, 1024, 1024, 1024, 4},
-
-            {0, 0, 0, 257, 31, 1, 4},
-            {0, 0, 0, 17, 93, 1, 4},
-            {59, 13, 0, 257, 31, 1, 4},
-            {17, 73, 0, 17, 93, 1, 4},
-            {17, 73, 59, 17, 93, 99, 4},
-
-            {0, 0, 0, 4, 4, 1, 8, 4, 4},
-            {64, 16, 0, 4, 4, 1, 8, 4, 4},
-            {64, 16, 8, 4, 4, 1, 8, 4, 4},
-            {0, 0, 0, 4, 4, 1, 16, 4, 4},
-            {64, 16, 0, 4, 4, 1, 16, 4, 4},
-            {64, 16, 8, 4, 4, 1, 16, 4, 4},
-
-            {0, 0, 0, 1024, 1024, 1, 8, 4, 4},
-            {256, 512, 0, 1024, 1024, 1, 8, 4, 4},
-            {64, 48, 0, 1024, 1024, 1, 8, 4, 4},
-            {64, 48, 16, 1024, 1024, 1, 8, 4, 4},
-            {0, 0, 0, 1024, 1024, 1, 16, 4, 4},
-            {256, 512, 0, 1024, 1024, 1, 16, 4, 4},
-            {64, 48, 0, 1024, 1024, 1, 4, 16, 4},
-            {64, 48, 16, 1024, 1024, 1, 16, 4, 4},
-        };
-
-        // Define base buffer sizes to work with: some offsets aligned, some unaligned. bytesPerRow
-        // is the minimum required
-        std::array<BufferSpec, 15> BaseBufferSpecs(const TextureSpec& textureSpec) {
-            uint32_t bytesPerRow = Align(textureSpec.texelBlockSizeInBytes * textureSpec.width,
-                                         kTextureBytesPerRowAlignment);
-
-            auto alignNonPow2 = [](uint32_t value, uint32_t size) -> uint32_t {
-                return value == 0 ? 0 : ((value - 1) / size + 1) * size;
-            };
-
-            return {
-                BufferSpec{alignNonPow2(0, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height},
-                BufferSpec{alignNonPow2(256, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height},
-                BufferSpec{alignNonPow2(512, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height},
-                BufferSpec{alignNonPow2(1024, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height},
-                BufferSpec{alignNonPow2(1024, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height * 2},
-
-                BufferSpec{alignNonPow2(32, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height},
-                BufferSpec{alignNonPow2(64, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height},
-                BufferSpec{alignNonPow2(64, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height * 2},
-
-                BufferSpec{alignNonPow2(31, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height},
-                BufferSpec{alignNonPow2(257, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height},
-                BufferSpec{alignNonPow2(384, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height},
-                BufferSpec{alignNonPow2(511, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height},
-                BufferSpec{alignNonPow2(513, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height},
-                BufferSpec{alignNonPow2(1023, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height},
-                BufferSpec{alignNonPow2(1023, textureSpec.texelBlockSizeInBytes), bytesPerRow,
-                           textureSpec.height * 2},
-            };
-        }
-
-        // Define a list of values to set properties in the spec structs
-        constexpr uint32_t kCheckValues[] = {
-            1,  2,  3,  4,   5,   6,   7,    8,     // small values
-            16, 32, 64, 128, 256, 512, 1024, 2048,  // powers of 2
-            15, 31, 63, 127, 257, 511, 1023, 2047,  // misalignments
-            17, 33, 65, 129, 257, 513, 1025, 2049};
-
-    }  // namespace
-
-    class CopySplitTest : public testing::TestWithParam<wgpu::TextureDimension> {
-      protected:
-        void DoTest(const TextureSpec& textureSpec, const BufferSpec& bufferSpec) {
-            ASSERT(textureSpec.width % textureSpec.blockWidth == 0 &&
-                   textureSpec.height % textureSpec.blockHeight == 0);
-
-            wgpu::TextureDimension dimension = GetParam();
-            TextureCopySubresource copySplit;
-            switch (dimension) {
-                case wgpu::TextureDimension::e2D: {
-                    copySplit = Compute2DTextureCopySubresource(
-                        {textureSpec.x, textureSpec.y, textureSpec.z},
-                        {textureSpec.width, textureSpec.height, textureSpec.depthOrArrayLayers},
-                        {textureSpec.texelBlockSizeInBytes, textureSpec.blockWidth,
-                         textureSpec.blockHeight},
-                        bufferSpec.offset, bufferSpec.bytesPerRow);
-                    break;
-                }
-                case wgpu::TextureDimension::e3D: {
-                    copySplit = Compute3DTextureCopySplits(
-                        {textureSpec.x, textureSpec.y, textureSpec.z},
-                        {textureSpec.width, textureSpec.height, textureSpec.depthOrArrayLayers},
-                        {textureSpec.texelBlockSizeInBytes, textureSpec.blockWidth,
-                         textureSpec.blockHeight},
-                        bufferSpec.offset, bufferSpec.bytesPerRow, bufferSpec.rowsPerImage);
-                    break;
-                }
-                default:
-                    UNREACHABLE();
-                    break;
-            }
-
-            ValidateCopySplit(textureSpec, bufferSpec, copySplit, dimension);
-
-            if (HasFatalFailure()) {
-                std::ostringstream message;
-                message << "Failed generating splits: " << textureSpec << ", " << bufferSpec
-                        << std::endl
-                        << dimension << " " << copySplit << std::endl;
-                FAIL() << message.str();
-            }
-        }
+    auto alignNonPow2 = [](uint32_t value, uint32_t size) -> uint32_t {
+        return value == 0 ? 0 : ((value - 1) / size + 1) * size;
     };
 
-    TEST_P(CopySplitTest, General) {
-        for (TextureSpec textureSpec : kBaseTextureSpecs) {
+    return {
+        BufferSpec{alignNonPow2(0, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height},
+        BufferSpec{alignNonPow2(256, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height},
+        BufferSpec{alignNonPow2(512, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height},
+        BufferSpec{alignNonPow2(1024, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height},
+        BufferSpec{alignNonPow2(1024, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height * 2},
+
+        BufferSpec{alignNonPow2(32, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height},
+        BufferSpec{alignNonPow2(64, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height},
+        BufferSpec{alignNonPow2(64, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height * 2},
+
+        BufferSpec{alignNonPow2(31, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height},
+        BufferSpec{alignNonPow2(257, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height},
+        BufferSpec{alignNonPow2(384, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height},
+        BufferSpec{alignNonPow2(511, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height},
+        BufferSpec{alignNonPow2(513, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height},
+        BufferSpec{alignNonPow2(1023, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height},
+        BufferSpec{alignNonPow2(1023, textureSpec.texelBlockSizeInBytes), bytesPerRow,
+                   textureSpec.height * 2},
+    };
+}
+
+// Define a list of values to set properties in the spec structs
+constexpr uint32_t kCheckValues[] = {1,  2,  3,  4,   5,   6,   7,    8,     // small values
+                                     16, 32, 64, 128, 256, 512, 1024, 2048,  // powers of 2
+                                     15, 31, 63, 127, 257, 511, 1023, 2047,  // misalignments
+                                     17, 33, 65, 129, 257, 513, 1025, 2049};
+
+}  // namespace
+
+class CopySplitTest : public testing::TestWithParam<wgpu::TextureDimension> {
+  protected:
+    void DoTest(const TextureSpec& textureSpec, const BufferSpec& bufferSpec) {
+        ASSERT(textureSpec.width % textureSpec.blockWidth == 0 &&
+               textureSpec.height % textureSpec.blockHeight == 0);
+
+        wgpu::TextureDimension dimension = GetParam();
+        TextureCopySubresource copySplit;
+        switch (dimension) {
+            case wgpu::TextureDimension::e2D: {
+                copySplit = Compute2DTextureCopySubresource(
+                    {textureSpec.x, textureSpec.y, textureSpec.z},
+                    {textureSpec.width, textureSpec.height, textureSpec.depthOrArrayLayers},
+                    {textureSpec.texelBlockSizeInBytes, textureSpec.blockWidth,
+                     textureSpec.blockHeight},
+                    bufferSpec.offset, bufferSpec.bytesPerRow);
+                break;
+            }
+            case wgpu::TextureDimension::e3D: {
+                copySplit = Compute3DTextureCopySplits(
+                    {textureSpec.x, textureSpec.y, textureSpec.z},
+                    {textureSpec.width, textureSpec.height, textureSpec.depthOrArrayLayers},
+                    {textureSpec.texelBlockSizeInBytes, textureSpec.blockWidth,
+                     textureSpec.blockHeight},
+                    bufferSpec.offset, bufferSpec.bytesPerRow, bufferSpec.rowsPerImage);
+                break;
+            }
+            default:
+                UNREACHABLE();
+                break;
+        }
+
+        ValidateCopySplit(textureSpec, bufferSpec, copySplit, dimension);
+
+        if (HasFatalFailure()) {
+            std::ostringstream message;
+            message << "Failed generating splits: " << textureSpec << ", " << bufferSpec
+                    << std::endl
+                    << dimension << " " << copySplit << std::endl;
+            FAIL() << message.str();
+        }
+    }
+};
+
+TEST_P(CopySplitTest, General) {
+    for (TextureSpec textureSpec : kBaseTextureSpecs) {
+        for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
+            DoTest(textureSpec, bufferSpec);
+        }
+    }
+}
+
+TEST_P(CopySplitTest, TextureWidth) {
+    for (TextureSpec textureSpec : kBaseTextureSpecs) {
+        for (uint32_t val : kCheckValues) {
+            if (val % textureSpec.blockWidth != 0) {
+                continue;
+            }
+            textureSpec.width = val;
             for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
                 DoTest(textureSpec, bufferSpec);
             }
         }
     }
+}
 
-    TEST_P(CopySplitTest, TextureWidth) {
-        for (TextureSpec textureSpec : kBaseTextureSpecs) {
-            for (uint32_t val : kCheckValues) {
-                if (val % textureSpec.blockWidth != 0) {
-                    continue;
-                }
-                textureSpec.width = val;
-                for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
-                    DoTest(textureSpec, bufferSpec);
-                }
+TEST_P(CopySplitTest, TextureHeight) {
+    for (TextureSpec textureSpec : kBaseTextureSpecs) {
+        for (uint32_t val : kCheckValues) {
+            if (val % textureSpec.blockHeight != 0) {
+                continue;
             }
-        }
-    }
-
-    TEST_P(CopySplitTest, TextureHeight) {
-        for (TextureSpec textureSpec : kBaseTextureSpecs) {
-            for (uint32_t val : kCheckValues) {
-                if (val % textureSpec.blockHeight != 0) {
-                    continue;
-                }
-                textureSpec.height = val;
-                for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
-                    DoTest(textureSpec, bufferSpec);
-                }
-            }
-        }
-    }
-
-    TEST_P(CopySplitTest, TextureX) {
-        for (TextureSpec textureSpec : kBaseTextureSpecs) {
-            for (uint32_t val : kCheckValues) {
-                textureSpec.x = val;
-                for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
-                    DoTest(textureSpec, bufferSpec);
-                }
-            }
-        }
-    }
-
-    TEST_P(CopySplitTest, TextureY) {
-        for (TextureSpec textureSpec : kBaseTextureSpecs) {
-            for (uint32_t val : kCheckValues) {
-                textureSpec.y = val;
-                for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
-                    DoTest(textureSpec, bufferSpec);
-                }
-            }
-        }
-    }
-
-    TEST_P(CopySplitTest, TexelSize) {
-        for (TextureSpec textureSpec : kBaseTextureSpecs) {
-            for (uint32_t texelSize : {4, 8, 16, 32, 64}) {
-                textureSpec.texelBlockSizeInBytes = texelSize;
-                for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
-                    DoTest(textureSpec, bufferSpec);
-                }
-            }
-        }
-    }
-
-    TEST_P(CopySplitTest, BufferOffset) {
-        for (TextureSpec textureSpec : kBaseTextureSpecs) {
+            textureSpec.height = val;
             for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
-                for (uint32_t val : kCheckValues) {
-                    bufferSpec.offset = textureSpec.texelBlockSizeInBytes * val;
-
-                    DoTest(textureSpec, bufferSpec);
-                }
+                DoTest(textureSpec, bufferSpec);
             }
         }
     }
+}
 
-    TEST_P(CopySplitTest, RowPitch) {
-        for (TextureSpec textureSpec : kBaseTextureSpecs) {
+TEST_P(CopySplitTest, TextureX) {
+    for (TextureSpec textureSpec : kBaseTextureSpecs) {
+        for (uint32_t val : kCheckValues) {
+            textureSpec.x = val;
             for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
-                uint32_t baseRowPitch = bufferSpec.bytesPerRow;
-                for (uint32_t i = 0; i < 5; ++i) {
-                    bufferSpec.bytesPerRow = baseRowPitch + i * 256;
-
-                    DoTest(textureSpec, bufferSpec);
-                }
+                DoTest(textureSpec, bufferSpec);
             }
         }
     }
+}
 
-    TEST_P(CopySplitTest, ImageHeight) {
-        for (TextureSpec textureSpec : kBaseTextureSpecs) {
+TEST_P(CopySplitTest, TextureY) {
+    for (TextureSpec textureSpec : kBaseTextureSpecs) {
+        for (uint32_t val : kCheckValues) {
+            textureSpec.y = val;
             for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
-                uint32_t baseImageHeight = bufferSpec.rowsPerImage;
-                for (uint32_t i = 0; i < 5; ++i) {
-                    bufferSpec.rowsPerImage = baseImageHeight + i * 256;
-
-                    DoTest(textureSpec, bufferSpec);
-                }
+                DoTest(textureSpec, bufferSpec);
             }
         }
     }
+}
 
-    INSTANTIATE_TEST_SUITE_P(,
-                             CopySplitTest,
-                             testing::Values(wgpu::TextureDimension::e2D,
-                                             wgpu::TextureDimension::e3D));
+TEST_P(CopySplitTest, TexelSize) {
+    for (TextureSpec textureSpec : kBaseTextureSpecs) {
+        for (uint32_t texelSize : {4, 8, 16, 32, 64}) {
+            textureSpec.texelBlockSizeInBytes = texelSize;
+            for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
+                DoTest(textureSpec, bufferSpec);
+            }
+        }
+    }
+}
+
+TEST_P(CopySplitTest, BufferOffset) {
+    for (TextureSpec textureSpec : kBaseTextureSpecs) {
+        for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
+            for (uint32_t val : kCheckValues) {
+                bufferSpec.offset = textureSpec.texelBlockSizeInBytes * val;
+
+                DoTest(textureSpec, bufferSpec);
+            }
+        }
+    }
+}
+
+TEST_P(CopySplitTest, RowPitch) {
+    for (TextureSpec textureSpec : kBaseTextureSpecs) {
+        for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
+            uint32_t baseRowPitch = bufferSpec.bytesPerRow;
+            for (uint32_t i = 0; i < 5; ++i) {
+                bufferSpec.bytesPerRow = baseRowPitch + i * 256;
+
+                DoTest(textureSpec, bufferSpec);
+            }
+        }
+    }
+}
+
+TEST_P(CopySplitTest, ImageHeight) {
+    for (TextureSpec textureSpec : kBaseTextureSpecs) {
+        for (BufferSpec bufferSpec : BaseBufferSpecs(textureSpec)) {
+            uint32_t baseImageHeight = bufferSpec.rowsPerImage;
+            for (uint32_t i = 0; i < 5; ++i) {
+                bufferSpec.rowsPerImage = baseImageHeight + i * 256;
+
+                DoTest(textureSpec, bufferSpec);
+            }
+        }
+    }
+}
+
+INSTANTIATE_TEST_SUITE_P(,
+                         CopySplitTest,
+                         testing::Values(wgpu::TextureDimension::e2D, wgpu::TextureDimension::e3D));
 
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/tests/unittests/native/CacheKeyTests.cpp b/src/dawn/tests/unittests/native/CacheKeyTests.cpp
index 42cd3e4..abd1acc 100644
--- a/src/dawn/tests/unittests/native/CacheKeyTests.cpp
+++ b/src/dawn/tests/unittests/native/CacheKeyTests.cpp
@@ -23,162 +23,161 @@
 
 namespace dawn::native {
 
-    // Testing classes with mock serializing implemented for testing.
-    class A {
-      public:
-        MOCK_METHOD(void, SerializeMock, (CacheKey*, const A&), (const));
-    };
-    template <>
-    void CacheKeySerializer<A>::Serialize(CacheKey* key, const A& t) {
-        t.SerializeMock(key, t);
+// Testing classes with mock serializing implemented for testing.
+class A {
+  public:
+    MOCK_METHOD(void, SerializeMock, (CacheKey*, const A&), (const));
+};
+template <>
+void CacheKeySerializer<A>::Serialize(CacheKey* key, const A& t) {
+    t.SerializeMock(key, t);
+}
+
+// Custom printer for CacheKey for clearer debug testing messages.
+void PrintTo(const CacheKey& key, std::ostream* stream) {
+    *stream << std::hex;
+    for (const int b : key) {
+        *stream << std::setfill('0') << std::setw(2) << b << " ";
+    }
+    *stream << std::dec;
+}
+
+namespace {
+
+using ::testing::InSequence;
+using ::testing::NotNull;
+using ::testing::PrintToString;
+using ::testing::Ref;
+
+// Matcher to compare CacheKeys for easier testing.
+MATCHER_P(CacheKeyEq, key, PrintToString(key)) {
+    return arg.size() == key.size() && memcmp(arg.data(), key.data(), key.size()) == 0;
+}
+
+TEST(CacheKeyTests, RecordSingleMember) {
+    CacheKey key;
+
+    A a;
+    EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
+    EXPECT_THAT(key.Record(a), CacheKeyEq(CacheKey()));
+}
+
+TEST(CacheKeyTests, RecordManyMembers) {
+    constexpr size_t kNumMembers = 100;
+
+    CacheKey key;
+    for (size_t i = 0; i < kNumMembers; ++i) {
+        A a;
+        EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
+        key.Record(a);
+    }
+    EXPECT_THAT(key, CacheKeyEq(CacheKey()));
+}
+
+TEST(CacheKeyTests, RecordIterable) {
+    constexpr size_t kIterableSize = 100;
+
+    // Expecting the size of the container.
+    CacheKey expected;
+    expected.Record(kIterableSize);
+
+    std::vector<A> iterable(kIterableSize);
+    {
+        InSequence seq;
+        for (const auto& a : iterable) {
+            EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
+        }
+        for (const auto& a : iterable) {
+            EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
+        }
     }
 
-    // Custom printer for CacheKey for clearer debug testing messages.
-    void PrintTo(const CacheKey& key, std::ostream* stream) {
-        *stream << std::hex;
-        for (const int b : key) {
-            *stream << std::setfill('0') << std::setw(2) << b << " ";
-        }
-        *stream << std::dec;
+    EXPECT_THAT(CacheKey().RecordIterable(iterable), CacheKeyEq(expected));
+    EXPECT_THAT(CacheKey().RecordIterable(iterable.data(), kIterableSize), CacheKeyEq(expected));
+}
+
+TEST(CacheKeyTests, RecordNested) {
+    CacheKey expected;
+    CacheKey actual;
+    {
+        // Recording a single member.
+        A a;
+        EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
+        actual.Record(CacheKey().Record(a));
     }
-
-    namespace {
-
-        using ::testing::InSequence;
-        using ::testing::NotNull;
-        using ::testing::PrintToString;
-        using ::testing::Ref;
-
-        // Matcher to compare CacheKeys for easier testing.
-        MATCHER_P(CacheKeyEq, key, PrintToString(key)) {
-            return arg.size() == key.size() && memcmp(arg.data(), key.data(), key.size()) == 0;
-        }
-
-        TEST(CacheKeyTests, RecordSingleMember) {
-            CacheKey key;
-
+    {
+        // Recording multiple members.
+        constexpr size_t kNumMembers = 2;
+        CacheKey sub;
+        for (size_t i = 0; i < kNumMembers; ++i) {
             A a;
             EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
-            EXPECT_THAT(key.Record(a), CacheKeyEq(CacheKey()));
+            sub.Record(a);
         }
-
-        TEST(CacheKeyTests, RecordManyMembers) {
-            constexpr size_t kNumMembers = 100;
-
-            CacheKey key;
-            for (size_t i = 0; i < kNumMembers; ++i) {
-                A a;
+        actual.Record(sub);
+    }
+    {
+        // Record an iterable.
+        constexpr size_t kIterableSize = 2;
+        expected.Record(kIterableSize);
+        std::vector<A> iterable(kIterableSize);
+        {
+            InSequence seq;
+            for (const auto& a : iterable) {
                 EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
-                key.Record(a);
             }
-            EXPECT_THAT(key, CacheKeyEq(CacheKey()));
         }
+        actual.Record(CacheKey().RecordIterable(iterable));
+    }
+    EXPECT_THAT(actual, CacheKeyEq(expected));
+}
 
-        TEST(CacheKeyTests, RecordIterable) {
-            constexpr size_t kIterableSize = 100;
+TEST(CacheKeySerializerTests, IntegralTypes) {
+    // Only testing explicitly sized types for simplicity, and using 0s for larger types to
+    // avoid dealing with endianess.
+    EXPECT_THAT(CacheKey().Record('c'), CacheKeyEq(CacheKey({'c'})));
+    EXPECT_THAT(CacheKey().Record(uint8_t(255)), CacheKeyEq(CacheKey({255})));
+    EXPECT_THAT(CacheKey().Record(uint16_t(0)), CacheKeyEq(CacheKey({0, 0})));
+    EXPECT_THAT(CacheKey().Record(uint32_t(0)), CacheKeyEq(CacheKey({0, 0, 0, 0})));
+}
 
-            // Expecting the size of the container.
-            CacheKey expected;
-            expected.Record(kIterableSize);
+TEST(CacheKeySerializerTests, FloatingTypes) {
+    // Using 0s to avoid dealing with implementation specific float details.
+    EXPECT_THAT(CacheKey().Record(float{0}), CacheKeyEq(CacheKey(sizeof(float), 0)));
+    EXPECT_THAT(CacheKey().Record(double{0}), CacheKeyEq(CacheKey(sizeof(double), 0)));
+}
 
-            std::vector<A> iterable(kIterableSize);
-            {
-                InSequence seq;
-                for (const auto& a : iterable) {
-                    EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
-                }
-                for (const auto& a : iterable) {
-                    EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
-                }
-            }
+TEST(CacheKeySerializerTests, LiteralStrings) {
+    // Using a std::string here to help with creating the expected result.
+    std::string str = "string";
 
-            EXPECT_THAT(CacheKey().RecordIterable(iterable), CacheKeyEq(expected));
-            EXPECT_THAT(CacheKey().RecordIterable(iterable.data(), kIterableSize),
-                        CacheKeyEq(expected));
-        }
+    CacheKey expected;
+    expected.Record(size_t(7));
+    expected.insert(expected.end(), str.begin(), str.end());
+    expected.push_back('\0');
 
-        TEST(CacheKeyTests, RecordNested) {
-            CacheKey expected;
-            CacheKey actual;
-            {
-                // Recording a single member.
-                A a;
-                EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
-                actual.Record(CacheKey().Record(a));
-            }
-            {
-                // Recording multiple members.
-                constexpr size_t kNumMembers = 2;
-                CacheKey sub;
-                for (size_t i = 0; i < kNumMembers; ++i) {
-                    A a;
-                    EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
-                    sub.Record(a);
-                }
-                actual.Record(sub);
-            }
-            {
-                // Record an iterable.
-                constexpr size_t kIterableSize = 2;
-                expected.Record(kIterableSize);
-                std::vector<A> iterable(kIterableSize);
-                {
-                    InSequence seq;
-                    for (const auto& a : iterable) {
-                        EXPECT_CALL(a, SerializeMock(NotNull(), Ref(a))).Times(1);
-                    }
-                }
-                actual.Record(CacheKey().RecordIterable(iterable));
-            }
-            EXPECT_THAT(actual, CacheKeyEq(expected));
-        }
+    EXPECT_THAT(CacheKey().Record("string"), CacheKeyEq(expected));
+}
 
-        TEST(CacheKeySerializerTests, IntegralTypes) {
-            // Only testing explicitly sized types for simplicity, and using 0s for larger types to
-            // avoid dealing with endianess.
-            EXPECT_THAT(CacheKey().Record('c'), CacheKeyEq(CacheKey({'c'})));
-            EXPECT_THAT(CacheKey().Record(uint8_t(255)), CacheKeyEq(CacheKey({255})));
-            EXPECT_THAT(CacheKey().Record(uint16_t(0)), CacheKeyEq(CacheKey({0, 0})));
-            EXPECT_THAT(CacheKey().Record(uint32_t(0)), CacheKeyEq(CacheKey({0, 0, 0, 0})));
-        }
+TEST(CacheKeySerializerTests, StdStrings) {
+    std::string str = "string";
 
-        TEST(CacheKeySerializerTests, FloatingTypes) {
-            // Using 0s to avoid dealing with implementation specific float details.
-            EXPECT_THAT(CacheKey().Record(float{0}), CacheKeyEq(CacheKey(sizeof(float), 0)));
-            EXPECT_THAT(CacheKey().Record(double{0}), CacheKeyEq(CacheKey(sizeof(double), 0)));
-        }
+    CacheKey expected;
+    expected.Record((size_t)6);
+    expected.insert(expected.end(), str.begin(), str.end());
 
-        TEST(CacheKeySerializerTests, LiteralStrings) {
-            // Using a std::string here to help with creating the expected result.
-            std::string str = "string";
+    EXPECT_THAT(CacheKey().Record(str), CacheKeyEq(expected));
+}
 
-            CacheKey expected;
-            expected.Record(size_t(7));
-            expected.insert(expected.end(), str.begin(), str.end());
-            expected.push_back('\0');
+TEST(CacheKeySerializerTests, CacheKeys) {
+    CacheKey data = {'d', 'a', 't', 'a'};
 
-            EXPECT_THAT(CacheKey().Record("string"), CacheKeyEq(expected));
-        }
+    CacheKey expected;
+    expected.insert(expected.end(), data.begin(), data.end());
 
-        TEST(CacheKeySerializerTests, StdStrings) {
-            std::string str = "string";
+    EXPECT_THAT(CacheKey().Record(data), CacheKeyEq(expected));
+}
 
-            CacheKey expected;
-            expected.Record((size_t)6);
-            expected.insert(expected.end(), str.begin(), str.end());
-
-            EXPECT_THAT(CacheKey().Record(str), CacheKeyEq(expected));
-        }
-
-        TEST(CacheKeySerializerTests, CacheKeys) {
-            CacheKey data = {'d', 'a', 't', 'a'};
-
-            CacheKey expected;
-            expected.insert(expected.end(), data.begin(), data.end());
-
-            EXPECT_THAT(CacheKey().Record(data), CacheKeyEq(expected));
-        }
-
-    }  // namespace
+}  // namespace
 
 }  // namespace dawn::native
diff --git a/src/dawn/tests/unittests/native/CommandBufferEncodingTests.cpp b/src/dawn/tests/unittests/native/CommandBufferEncodingTests.cpp
index 6a6f19f..cf76918 100644
--- a/src/dawn/tests/unittests/native/CommandBufferEncodingTests.cpp
+++ b/src/dawn/tests/unittests/native/CommandBufferEncodingTests.cpp
@@ -23,293 +23,290 @@
 
 namespace dawn::native {
 
-    class CommandBufferEncodingTests : public DawnNativeTest {
-      protected:
-        void ExpectCommands(
-            dawn::native::CommandIterator* commands,
-            std::vector<std::pair<dawn::native::Command,
-                                  std::function<void(dawn::native::CommandIterator*)>>>
-                expectedCommands) {
-            dawn::native::Command commandId;
-            for (uint32_t commandIndex = 0; commands->NextCommandId(&commandId); ++commandIndex) {
-                ASSERT_LT(commandIndex, expectedCommands.size()) << "Unexpected command";
-                ASSERT_EQ(commandId, expectedCommands[commandIndex].first)
-                    << "at command " << commandIndex;
-                expectedCommands[commandIndex].second(commands);
+class CommandBufferEncodingTests : public DawnNativeTest {
+  protected:
+    void ExpectCommands(dawn::native::CommandIterator* commands,
+                        std::vector<std::pair<dawn::native::Command,
+                                              std::function<void(dawn::native::CommandIterator*)>>>
+                            expectedCommands) {
+        dawn::native::Command commandId;
+        for (uint32_t commandIndex = 0; commands->NextCommandId(&commandId); ++commandIndex) {
+            ASSERT_LT(commandIndex, expectedCommands.size()) << "Unexpected command";
+            ASSERT_EQ(commandId, expectedCommands[commandIndex].first)
+                << "at command " << commandIndex;
+            expectedCommands[commandIndex].second(commands);
+        }
+    }
+};
+
+// Indirect dispatch validation changes the bind groups in the middle
+// of a pass. Test that bindings are restored after the validation runs.
+TEST_F(CommandBufferEncodingTests, ComputePassEncoderIndirectDispatchStateRestoration) {
+    wgpu::BindGroupLayout staticLayout =
+        utils::MakeBindGroupLayout(device, {{
+                                               0,
+                                               wgpu::ShaderStage::Compute,
+                                               wgpu::BufferBindingType::Uniform,
+                                           }});
+
+    wgpu::BindGroupLayout dynamicLayout =
+        utils::MakeBindGroupLayout(device, {{
+                                               0,
+                                               wgpu::ShaderStage::Compute,
+                                               wgpu::BufferBindingType::Uniform,
+                                               true,
+                                           }});
+
+    // Create a simple pipeline
+    wgpu::ComputePipelineDescriptor csDesc;
+    csDesc.compute.module = utils::CreateShaderModule(device, R"(
+        @stage(compute) @workgroup_size(1, 1, 1)
+        fn main() {
+        })");
+    csDesc.compute.entryPoint = "main";
+
+    wgpu::PipelineLayout pl0 = utils::MakePipelineLayout(device, {staticLayout, dynamicLayout});
+    csDesc.layout = pl0;
+    wgpu::ComputePipeline pipeline0 = device.CreateComputePipeline(&csDesc);
+
+    wgpu::PipelineLayout pl1 = utils::MakePipelineLayout(device, {dynamicLayout, staticLayout});
+    csDesc.layout = pl1;
+    wgpu::ComputePipeline pipeline1 = device.CreateComputePipeline(&csDesc);
+
+    // Create buffers to use for both the indirect buffer and the bind groups.
+    wgpu::Buffer indirectBuffer =
+        utils::CreateBufferFromData<uint32_t>(device, wgpu::BufferUsage::Indirect, {1, 2, 3, 4});
+
+    wgpu::BufferDescriptor uniformBufferDesc = {};
+    uniformBufferDesc.size = 512;
+    uniformBufferDesc.usage = wgpu::BufferUsage::Uniform;
+    wgpu::Buffer uniformBuffer = device.CreateBuffer(&uniformBufferDesc);
+
+    wgpu::BindGroup staticBG = utils::MakeBindGroup(device, staticLayout, {{0, uniformBuffer}});
+
+    wgpu::BindGroup dynamicBG =
+        utils::MakeBindGroup(device, dynamicLayout, {{0, uniformBuffer, 0, 256}});
+
+    uint32_t dynamicOffset = 256;
+    std::vector<uint32_t> emptyDynamicOffsets = {};
+    std::vector<uint32_t> singleDynamicOffset = {dynamicOffset};
+
+    // Begin encoding commands.
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+
+    CommandBufferStateTracker* stateTracker =
+        FromAPI(pass.Get())->GetCommandBufferStateTrackerForTesting();
+
+    // Perform a dispatch indirect which will be preceded by a validation dispatch.
+    pass.SetPipeline(pipeline0);
+    pass.SetBindGroup(0, staticBG);
+    pass.SetBindGroup(1, dynamicBG, 1, &dynamicOffset);
+    EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline0.Get());
+
+    pass.DispatchWorkgroupsIndirect(indirectBuffer, 0);
+
+    // Expect restored state.
+    EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline0.Get());
+    EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl0.Get());
+    EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(0))), staticBG.Get());
+    EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(0)), emptyDynamicOffsets);
+    EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(1))), dynamicBG.Get());
+    EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(1)), singleDynamicOffset);
+
+    // Dispatch again to check that the restored state can be used.
+    // Also pass an indirect offset which should get replaced with the offset
+    // into the scratch indirect buffer (0).
+    pass.DispatchWorkgroupsIndirect(indirectBuffer, 4);
+
+    // Expect restored state.
+    EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline0.Get());
+    EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl0.Get());
+    EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(0))), staticBG.Get());
+    EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(0)), emptyDynamicOffsets);
+    EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(1))), dynamicBG.Get());
+    EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(1)), singleDynamicOffset);
+
+    // Change the pipeline
+    pass.SetPipeline(pipeline1);
+    pass.SetBindGroup(0, dynamicBG, 1, &dynamicOffset);
+    pass.SetBindGroup(1, staticBG);
+    EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline1.Get());
+    EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl1.Get());
+
+    pass.DispatchWorkgroupsIndirect(indirectBuffer, 0);
+
+    // Expect restored state.
+    EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline1.Get());
+    EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl1.Get());
+    EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(0))), dynamicBG.Get());
+    EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(0)), singleDynamicOffset);
+    EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(1))), staticBG.Get());
+    EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(1)), emptyDynamicOffsets);
+
+    pass.End();
+
+    wgpu::CommandBuffer commandBuffer = encoder.Finish();
+
+    auto ExpectSetPipeline = [](wgpu::ComputePipeline pipeline) {
+        return [pipeline](CommandIterator* commands) {
+            auto* cmd = commands->NextCommand<SetComputePipelineCmd>();
+            EXPECT_EQ(ToAPI(cmd->pipeline.Get()), pipeline.Get());
+        };
+    };
+
+    auto ExpectSetBindGroup = [](uint32_t index, wgpu::BindGroup bg,
+                                 std::vector<uint32_t> offsets = {}) {
+        return [index, bg, offsets](CommandIterator* commands) {
+            auto* cmd = commands->NextCommand<SetBindGroupCmd>();
+            uint32_t* dynamicOffsets = nullptr;
+            if (cmd->dynamicOffsetCount > 0) {
+                dynamicOffsets = commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
             }
+
+            ASSERT_EQ(cmd->index, BindGroupIndex(index));
+            ASSERT_EQ(ToAPI(cmd->group.Get()), bg.Get());
+            ASSERT_EQ(cmd->dynamicOffsetCount, offsets.size());
+            for (uint32_t i = 0; i < cmd->dynamicOffsetCount; ++i) {
+                ASSERT_EQ(dynamicOffsets[i], offsets[i]);
+            }
+        };
+    };
+
+    // Initialize as null. Once we know the pointer, we'll check
+    // that it's the same buffer every time.
+    WGPUBuffer indirectScratchBuffer = nullptr;
+    auto ExpectDispatchIndirect = [&](CommandIterator* commands) {
+        auto* cmd = commands->NextCommand<DispatchIndirectCmd>();
+        if (indirectScratchBuffer == nullptr) {
+            indirectScratchBuffer = ToAPI(cmd->indirectBuffer.Get());
+        }
+        ASSERT_EQ(ToAPI(cmd->indirectBuffer.Get()), indirectScratchBuffer);
+        ASSERT_EQ(cmd->indirectOffset, uint64_t(0));
+    };
+
+    // Initialize as null. Once we know the pointer, we'll check
+    // that it's the same pipeline every time.
+    WGPUComputePipeline validationPipeline = nullptr;
+    auto ExpectSetValidationPipeline = [&](CommandIterator* commands) {
+        auto* cmd = commands->NextCommand<SetComputePipelineCmd>();
+        WGPUComputePipeline pipeline = ToAPI(cmd->pipeline.Get());
+        if (validationPipeline != nullptr) {
+            EXPECT_EQ(pipeline, validationPipeline);
+        } else {
+            EXPECT_NE(pipeline, nullptr);
+            validationPipeline = pipeline;
         }
     };
 
-    // Indirect dispatch validation changes the bind groups in the middle
-    // of a pass. Test that bindings are restored after the validation runs.
-    TEST_F(CommandBufferEncodingTests, ComputePassEncoderIndirectDispatchStateRestoration) {
-        wgpu::BindGroupLayout staticLayout =
-            utils::MakeBindGroupLayout(device, {{
-                                                   0,
-                                                   wgpu::ShaderStage::Compute,
-                                                   wgpu::BufferBindingType::Uniform,
-                                               }});
+    auto ExpectSetValidationBindGroup = [&](CommandIterator* commands) {
+        auto* cmd = commands->NextCommand<SetBindGroupCmd>();
+        ASSERT_EQ(cmd->index, BindGroupIndex(0));
+        ASSERT_NE(cmd->group.Get(), nullptr);
+        ASSERT_EQ(cmd->dynamicOffsetCount, 0u);
+    };
 
-        wgpu::BindGroupLayout dynamicLayout =
-            utils::MakeBindGroupLayout(device, {{
-                                                   0,
-                                                   wgpu::ShaderStage::Compute,
-                                                   wgpu::BufferBindingType::Uniform,
-                                                   true,
-                                               }});
+    auto ExpectSetValidationDispatch = [&](CommandIterator* commands) {
+        auto* cmd = commands->NextCommand<DispatchCmd>();
+        ASSERT_EQ(cmd->x, 1u);
+        ASSERT_EQ(cmd->y, 1u);
+        ASSERT_EQ(cmd->z, 1u);
+    };
 
-        // Create a simple pipeline
-        wgpu::ComputePipelineDescriptor csDesc;
-        csDesc.compute.module = utils::CreateShaderModule(device, R"(
+    ExpectCommands(
+        FromAPI(commandBuffer.Get())->GetCommandIteratorForTesting(),
+        {
+            {Command::BeginComputePass,
+             [&](CommandIterator* commands) { SkipCommand(commands, Command::BeginComputePass); }},
+            // Expect the state to be set.
+            {Command::SetComputePipeline, ExpectSetPipeline(pipeline0)},
+            {Command::SetBindGroup, ExpectSetBindGroup(0, staticBG)},
+            {Command::SetBindGroup, ExpectSetBindGroup(1, dynamicBG, {dynamicOffset})},
+
+            // Expect the validation.
+            {Command::SetComputePipeline, ExpectSetValidationPipeline},
+            {Command::SetBindGroup, ExpectSetValidationBindGroup},
+            {Command::Dispatch, ExpectSetValidationDispatch},
+
+            // Expect the state to be restored.
+            {Command::SetComputePipeline, ExpectSetPipeline(pipeline0)},
+            {Command::SetBindGroup, ExpectSetBindGroup(0, staticBG)},
+            {Command::SetBindGroup, ExpectSetBindGroup(1, dynamicBG, {dynamicOffset})},
+
+            // Expect the dispatchIndirect.
+            {Command::DispatchIndirect, ExpectDispatchIndirect},
+
+            // Expect the validation.
+            {Command::SetComputePipeline, ExpectSetValidationPipeline},
+            {Command::SetBindGroup, ExpectSetValidationBindGroup},
+            {Command::Dispatch, ExpectSetValidationDispatch},
+
+            // Expect the state to be restored.
+            {Command::SetComputePipeline, ExpectSetPipeline(pipeline0)},
+            {Command::SetBindGroup, ExpectSetBindGroup(0, staticBG)},
+            {Command::SetBindGroup, ExpectSetBindGroup(1, dynamicBG, {dynamicOffset})},
+
+            // Expect the dispatchIndirect.
+            {Command::DispatchIndirect, ExpectDispatchIndirect},
+
+            // Expect the state to be set (new pipeline).
+            {Command::SetComputePipeline, ExpectSetPipeline(pipeline1)},
+            {Command::SetBindGroup, ExpectSetBindGroup(0, dynamicBG, {dynamicOffset})},
+            {Command::SetBindGroup, ExpectSetBindGroup(1, staticBG)},
+
+            // Expect the validation.
+            {Command::SetComputePipeline, ExpectSetValidationPipeline},
+            {Command::SetBindGroup, ExpectSetValidationBindGroup},
+            {Command::Dispatch, ExpectSetValidationDispatch},
+
+            // Expect the state to be restored.
+            {Command::SetComputePipeline, ExpectSetPipeline(pipeline1)},
+            {Command::SetBindGroup, ExpectSetBindGroup(0, dynamicBG, {dynamicOffset})},
+            {Command::SetBindGroup, ExpectSetBindGroup(1, staticBG)},
+
+            // Expect the dispatchIndirect.
+            {Command::DispatchIndirect, ExpectDispatchIndirect},
+
+            {Command::EndComputePass,
+             [&](CommandIterator* commands) { commands->NextCommand<EndComputePassCmd>(); }},
+        });
+}
+
+// Test that after restoring state, it is fully applied to the state tracker
+// and does not leak state changes that occured between a snapshot and the
+// state restoration.
+TEST_F(CommandBufferEncodingTests, StateNotLeakedAfterRestore) {
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+
+    CommandBufferStateTracker* stateTracker =
+        FromAPI(pass.Get())->GetCommandBufferStateTrackerForTesting();
+
+    // Snapshot the state.
+    CommandBufferStateTracker snapshot = *stateTracker;
+    // Expect no pipeline in the snapshot
+    EXPECT_FALSE(snapshot.HasPipeline());
+
+    // Create a simple pipeline
+    wgpu::ComputePipelineDescriptor csDesc;
+    csDesc.compute.module = utils::CreateShaderModule(device, R"(
         @stage(compute) @workgroup_size(1, 1, 1)
         fn main() {
         })");
-        csDesc.compute.entryPoint = "main";
+    csDesc.compute.entryPoint = "main";
+    wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&csDesc);
 
-        wgpu::PipelineLayout pl0 = utils::MakePipelineLayout(device, {staticLayout, dynamicLayout});
-        csDesc.layout = pl0;
-        wgpu::ComputePipeline pipeline0 = device.CreateComputePipeline(&csDesc);
+    // Set the pipeline.
+    pass.SetPipeline(pipeline);
 
-        wgpu::PipelineLayout pl1 = utils::MakePipelineLayout(device, {dynamicLayout, staticLayout});
-        csDesc.layout = pl1;
-        wgpu::ComputePipeline pipeline1 = device.CreateComputePipeline(&csDesc);
+    // Expect the pipeline to be set.
+    EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline.Get());
 
-        // Create buffers to use for both the indirect buffer and the bind groups.
-        wgpu::Buffer indirectBuffer = utils::CreateBufferFromData<uint32_t>(
-            device, wgpu::BufferUsage::Indirect, {1, 2, 3, 4});
+    // Restore the state.
+    FromAPI(pass.Get())->RestoreCommandBufferStateForTesting(std::move(snapshot));
 
-        wgpu::BufferDescriptor uniformBufferDesc = {};
-        uniformBufferDesc.size = 512;
-        uniformBufferDesc.usage = wgpu::BufferUsage::Uniform;
-        wgpu::Buffer uniformBuffer = device.CreateBuffer(&uniformBufferDesc);
-
-        wgpu::BindGroup staticBG = utils::MakeBindGroup(device, staticLayout, {{0, uniformBuffer}});
-
-        wgpu::BindGroup dynamicBG =
-            utils::MakeBindGroup(device, dynamicLayout, {{0, uniformBuffer, 0, 256}});
-
-        uint32_t dynamicOffset = 256;
-        std::vector<uint32_t> emptyDynamicOffsets = {};
-        std::vector<uint32_t> singleDynamicOffset = {dynamicOffset};
-
-        // Begin encoding commands.
-        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-
-        CommandBufferStateTracker* stateTracker =
-            FromAPI(pass.Get())->GetCommandBufferStateTrackerForTesting();
-
-        // Perform a dispatch indirect which will be preceded by a validation dispatch.
-        pass.SetPipeline(pipeline0);
-        pass.SetBindGroup(0, staticBG);
-        pass.SetBindGroup(1, dynamicBG, 1, &dynamicOffset);
-        EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline0.Get());
-
-        pass.DispatchWorkgroupsIndirect(indirectBuffer, 0);
-
-        // Expect restored state.
-        EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline0.Get());
-        EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl0.Get());
-        EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(0))), staticBG.Get());
-        EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(0)), emptyDynamicOffsets);
-        EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(1))), dynamicBG.Get());
-        EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(1)), singleDynamicOffset);
-
-        // Dispatch again to check that the restored state can be used.
-        // Also pass an indirect offset which should get replaced with the offset
-        // into the scratch indirect buffer (0).
-        pass.DispatchWorkgroupsIndirect(indirectBuffer, 4);
-
-        // Expect restored state.
-        EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline0.Get());
-        EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl0.Get());
-        EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(0))), staticBG.Get());
-        EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(0)), emptyDynamicOffsets);
-        EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(1))), dynamicBG.Get());
-        EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(1)), singleDynamicOffset);
-
-        // Change the pipeline
-        pass.SetPipeline(pipeline1);
-        pass.SetBindGroup(0, dynamicBG, 1, &dynamicOffset);
-        pass.SetBindGroup(1, staticBG);
-        EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline1.Get());
-        EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl1.Get());
-
-        pass.DispatchWorkgroupsIndirect(indirectBuffer, 0);
-
-        // Expect restored state.
-        EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline1.Get());
-        EXPECT_EQ(ToAPI(stateTracker->GetPipelineLayout()), pl1.Get());
-        EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(0))), dynamicBG.Get());
-        EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(0)), singleDynamicOffset);
-        EXPECT_EQ(ToAPI(stateTracker->GetBindGroup(BindGroupIndex(1))), staticBG.Get());
-        EXPECT_EQ(stateTracker->GetDynamicOffsets(BindGroupIndex(1)), emptyDynamicOffsets);
-
-        pass.End();
-
-        wgpu::CommandBuffer commandBuffer = encoder.Finish();
-
-        auto ExpectSetPipeline = [](wgpu::ComputePipeline pipeline) {
-            return [pipeline](CommandIterator* commands) {
-                auto* cmd = commands->NextCommand<SetComputePipelineCmd>();
-                EXPECT_EQ(ToAPI(cmd->pipeline.Get()), pipeline.Get());
-            };
-        };
-
-        auto ExpectSetBindGroup = [](uint32_t index, wgpu::BindGroup bg,
-                                     std::vector<uint32_t> offsets = {}) {
-            return [index, bg, offsets](CommandIterator* commands) {
-                auto* cmd = commands->NextCommand<SetBindGroupCmd>();
-                uint32_t* dynamicOffsets = nullptr;
-                if (cmd->dynamicOffsetCount > 0) {
-                    dynamicOffsets = commands->NextData<uint32_t>(cmd->dynamicOffsetCount);
-                }
-
-                ASSERT_EQ(cmd->index, BindGroupIndex(index));
-                ASSERT_EQ(ToAPI(cmd->group.Get()), bg.Get());
-                ASSERT_EQ(cmd->dynamicOffsetCount, offsets.size());
-                for (uint32_t i = 0; i < cmd->dynamicOffsetCount; ++i) {
-                    ASSERT_EQ(dynamicOffsets[i], offsets[i]);
-                }
-            };
-        };
-
-        // Initialize as null. Once we know the pointer, we'll check
-        // that it's the same buffer every time.
-        WGPUBuffer indirectScratchBuffer = nullptr;
-        auto ExpectDispatchIndirect = [&](CommandIterator* commands) {
-            auto* cmd = commands->NextCommand<DispatchIndirectCmd>();
-            if (indirectScratchBuffer == nullptr) {
-                indirectScratchBuffer = ToAPI(cmd->indirectBuffer.Get());
-            }
-            ASSERT_EQ(ToAPI(cmd->indirectBuffer.Get()), indirectScratchBuffer);
-            ASSERT_EQ(cmd->indirectOffset, uint64_t(0));
-        };
-
-        // Initialize as null. Once we know the pointer, we'll check
-        // that it's the same pipeline every time.
-        WGPUComputePipeline validationPipeline = nullptr;
-        auto ExpectSetValidationPipeline = [&](CommandIterator* commands) {
-            auto* cmd = commands->NextCommand<SetComputePipelineCmd>();
-            WGPUComputePipeline pipeline = ToAPI(cmd->pipeline.Get());
-            if (validationPipeline != nullptr) {
-                EXPECT_EQ(pipeline, validationPipeline);
-            } else {
-                EXPECT_NE(pipeline, nullptr);
-                validationPipeline = pipeline;
-            }
-        };
-
-        auto ExpectSetValidationBindGroup = [&](CommandIterator* commands) {
-            auto* cmd = commands->NextCommand<SetBindGroupCmd>();
-            ASSERT_EQ(cmd->index, BindGroupIndex(0));
-            ASSERT_NE(cmd->group.Get(), nullptr);
-            ASSERT_EQ(cmd->dynamicOffsetCount, 0u);
-        };
-
-        auto ExpectSetValidationDispatch = [&](CommandIterator* commands) {
-            auto* cmd = commands->NextCommand<DispatchCmd>();
-            ASSERT_EQ(cmd->x, 1u);
-            ASSERT_EQ(cmd->y, 1u);
-            ASSERT_EQ(cmd->z, 1u);
-        };
-
-        ExpectCommands(
-            FromAPI(commandBuffer.Get())->GetCommandIteratorForTesting(),
-            {
-                {Command::BeginComputePass,
-                 [&](CommandIterator* commands) {
-                     SkipCommand(commands, Command::BeginComputePass);
-                 }},
-                // Expect the state to be set.
-                {Command::SetComputePipeline, ExpectSetPipeline(pipeline0)},
-                {Command::SetBindGroup, ExpectSetBindGroup(0, staticBG)},
-                {Command::SetBindGroup, ExpectSetBindGroup(1, dynamicBG, {dynamicOffset})},
-
-                // Expect the validation.
-                {Command::SetComputePipeline, ExpectSetValidationPipeline},
-                {Command::SetBindGroup, ExpectSetValidationBindGroup},
-                {Command::Dispatch, ExpectSetValidationDispatch},
-
-                // Expect the state to be restored.
-                {Command::SetComputePipeline, ExpectSetPipeline(pipeline0)},
-                {Command::SetBindGroup, ExpectSetBindGroup(0, staticBG)},
-                {Command::SetBindGroup, ExpectSetBindGroup(1, dynamicBG, {dynamicOffset})},
-
-                // Expect the dispatchIndirect.
-                {Command::DispatchIndirect, ExpectDispatchIndirect},
-
-                // Expect the validation.
-                {Command::SetComputePipeline, ExpectSetValidationPipeline},
-                {Command::SetBindGroup, ExpectSetValidationBindGroup},
-                {Command::Dispatch, ExpectSetValidationDispatch},
-
-                // Expect the state to be restored.
-                {Command::SetComputePipeline, ExpectSetPipeline(pipeline0)},
-                {Command::SetBindGroup, ExpectSetBindGroup(0, staticBG)},
-                {Command::SetBindGroup, ExpectSetBindGroup(1, dynamicBG, {dynamicOffset})},
-
-                // Expect the dispatchIndirect.
-                {Command::DispatchIndirect, ExpectDispatchIndirect},
-
-                // Expect the state to be set (new pipeline).
-                {Command::SetComputePipeline, ExpectSetPipeline(pipeline1)},
-                {Command::SetBindGroup, ExpectSetBindGroup(0, dynamicBG, {dynamicOffset})},
-                {Command::SetBindGroup, ExpectSetBindGroup(1, staticBG)},
-
-                // Expect the validation.
-                {Command::SetComputePipeline, ExpectSetValidationPipeline},
-                {Command::SetBindGroup, ExpectSetValidationBindGroup},
-                {Command::Dispatch, ExpectSetValidationDispatch},
-
-                // Expect the state to be restored.
-                {Command::SetComputePipeline, ExpectSetPipeline(pipeline1)},
-                {Command::SetBindGroup, ExpectSetBindGroup(0, dynamicBG, {dynamicOffset})},
-                {Command::SetBindGroup, ExpectSetBindGroup(1, staticBG)},
-
-                // Expect the dispatchIndirect.
-                {Command::DispatchIndirect, ExpectDispatchIndirect},
-
-                {Command::EndComputePass,
-                 [&](CommandIterator* commands) { commands->NextCommand<EndComputePassCmd>(); }},
-            });
-    }
-
-    // Test that after restoring state, it is fully applied to the state tracker
-    // and does not leak state changes that occured between a snapshot and the
-    // state restoration.
-    TEST_F(CommandBufferEncodingTests, StateNotLeakedAfterRestore) {
-        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-
-        CommandBufferStateTracker* stateTracker =
-            FromAPI(pass.Get())->GetCommandBufferStateTrackerForTesting();
-
-        // Snapshot the state.
-        CommandBufferStateTracker snapshot = *stateTracker;
-        // Expect no pipeline in the snapshot
-        EXPECT_FALSE(snapshot.HasPipeline());
-
-        // Create a simple pipeline
-        wgpu::ComputePipelineDescriptor csDesc;
-        csDesc.compute.module = utils::CreateShaderModule(device, R"(
-        @stage(compute) @workgroup_size(1, 1, 1)
-        fn main() {
-        })");
-        csDesc.compute.entryPoint = "main";
-        wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&csDesc);
-
-        // Set the pipeline.
-        pass.SetPipeline(pipeline);
-
-        // Expect the pipeline to be set.
-        EXPECT_EQ(ToAPI(stateTracker->GetComputePipeline()), pipeline.Get());
-
-        // Restore the state.
-        FromAPI(pass.Get())->RestoreCommandBufferStateForTesting(std::move(snapshot));
-
-        // Expect no pipeline
-        EXPECT_FALSE(stateTracker->HasPipeline());
-    }
+    // Expect no pipeline
+    EXPECT_FALSE(stateTracker->HasPipeline());
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/tests/unittests/native/DestroyObjectTests.cpp b/src/dawn/tests/unittests/native/DestroyObjectTests.cpp
index 47481c5..dd57ed5 100644
--- a/src/dawn/tests/unittests/native/DestroyObjectTests.cpp
+++ b/src/dawn/tests/unittests/native/DestroyObjectTests.cpp
@@ -33,795 +33,786 @@
 #include "mocks/SwapChainMock.h"
 #include "mocks/TextureMock.h"
 
-namespace dawn::native { namespace {
+namespace dawn::native {
+namespace {
 
-    using ::testing::_;
-    using ::testing::ByMove;
-    using ::testing::InSequence;
-    using ::testing::Return;
-    using ::testing::Test;
+using ::testing::_;
+using ::testing::ByMove;
+using ::testing::InSequence;
+using ::testing::Return;
+using ::testing::Test;
 
-    class DestroyObjectTests : public Test {
-      public:
-        DestroyObjectTests() : Test() {
-            // Skipping validation on descriptors as coverage for validation is already present.
-            mDevice.SetToggle(Toggle::SkipValidation, true);
-        }
+class DestroyObjectTests : public Test {
+  public:
+    DestroyObjectTests() : Test() {
+        // Skipping validation on descriptors as coverage for validation is already present.
+        mDevice.SetToggle(Toggle::SkipValidation, true);
+    }
 
-        Ref<TextureMock> GetTexture() {
-            if (mTexture != nullptr) {
-                return mTexture;
-            }
-            mTexture =
-                AcquireRef(new TextureMock(&mDevice, TextureBase::TextureState::OwnedInternal));
-            EXPECT_CALL(*mTexture.Get(), DestroyImpl).Times(1);
+    Ref<TextureMock> GetTexture() {
+        if (mTexture != nullptr) {
             return mTexture;
         }
+        mTexture = AcquireRef(new TextureMock(&mDevice, TextureBase::TextureState::OwnedInternal));
+        EXPECT_CALL(*mTexture.Get(), DestroyImpl).Times(1);
+        return mTexture;
+    }
 
-        Ref<PipelineLayoutMock> GetPipelineLayout() {
-            if (mPipelineLayout != nullptr) {
-                return mPipelineLayout;
-            }
-            mPipelineLayout = AcquireRef(new PipelineLayoutMock(&mDevice));
-            EXPECT_CALL(*mPipelineLayout.Get(), DestroyImpl).Times(1);
+    Ref<PipelineLayoutMock> GetPipelineLayout() {
+        if (mPipelineLayout != nullptr) {
             return mPipelineLayout;
         }
+        mPipelineLayout = AcquireRef(new PipelineLayoutMock(&mDevice));
+        EXPECT_CALL(*mPipelineLayout.Get(), DestroyImpl).Times(1);
+        return mPipelineLayout;
+    }
 
-        Ref<ShaderModuleMock> GetVertexShaderModule() {
-            if (mVsModule != nullptr) {
-                return mVsModule;
-            }
-            DAWN_TRY_ASSIGN_WITH_CLEANUP(
-                mVsModule, ShaderModuleMock::Create(&mDevice, R"(
+    Ref<ShaderModuleMock> GetVertexShaderModule() {
+        if (mVsModule != nullptr) {
+            return mVsModule;
+        }
+        DAWN_TRY_ASSIGN_WITH_CLEANUP(
+            mVsModule, ShaderModuleMock::Create(&mDevice, R"(
             @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
                 return vec4<f32>(0.0, 0.0, 0.0, 1.0);
             })"),
-                { ASSERT(false); }, mVsModule);
-            EXPECT_CALL(*mVsModule.Get(), DestroyImpl).Times(1);
-            return mVsModule;
-        }
+            { ASSERT(false); }, mVsModule);
+        EXPECT_CALL(*mVsModule.Get(), DestroyImpl).Times(1);
+        return mVsModule;
+    }
 
-        Ref<ShaderModuleMock> GetComputeShaderModule() {
-            if (mCsModule != nullptr) {
-                return mCsModule;
-            }
-            DAWN_TRY_ASSIGN_WITH_CLEANUP(
-                mCsModule, ShaderModuleMock::Create(&mDevice, R"(
-            @stage(compute) @workgroup_size(1) fn main() {
-            })"),
-                { ASSERT(false); }, mCsModule);
-            EXPECT_CALL(*mCsModule.Get(), DestroyImpl).Times(1);
+    Ref<ShaderModuleMock> GetComputeShaderModule() {
+        if (mCsModule != nullptr) {
             return mCsModule;
         }
-
-      protected:
-        DeviceMock mDevice;
-
-        // The following lazy-initialized objects are used to facilitate creation of dependent
-        // objects under test.
-        Ref<TextureMock> mTexture;
-        Ref<PipelineLayoutMock> mPipelineLayout;
-        Ref<ShaderModuleMock> mVsModule;
-        Ref<ShaderModuleMock> mCsModule;
-    };
-
-    TEST_F(DestroyObjectTests, BindGroupExplicit) {
-        BindGroupMock bindGroupMock(&mDevice);
-        EXPECT_CALL(bindGroupMock, DestroyImpl).Times(1);
-
-        EXPECT_TRUE(bindGroupMock.IsAlive());
-        bindGroupMock.Destroy();
-        EXPECT_FALSE(bindGroupMock.IsAlive());
+        DAWN_TRY_ASSIGN_WITH_CLEANUP(
+            mCsModule, ShaderModuleMock::Create(&mDevice, R"(
+            @stage(compute) @workgroup_size(1) fn main() {
+            })"),
+            { ASSERT(false); }, mCsModule);
+        EXPECT_CALL(*mCsModule.Get(), DestroyImpl).Times(1);
+        return mCsModule;
     }
 
-    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
-    // will also complain if there is a memory leak.
-    TEST_F(DestroyObjectTests, BindGroupImplicit) {
-        BindGroupMock* bindGroupMock = new BindGroupMock(&mDevice);
-        EXPECT_CALL(*bindGroupMock, DestroyImpl).Times(1);
+  protected:
+    DeviceMock mDevice;
+
+    // The following lazy-initialized objects are used to facilitate creation of dependent
+    // objects under test.
+    Ref<TextureMock> mTexture;
+    Ref<PipelineLayoutMock> mPipelineLayout;
+    Ref<ShaderModuleMock> mVsModule;
+    Ref<ShaderModuleMock> mCsModule;
+};
+
+TEST_F(DestroyObjectTests, BindGroupExplicit) {
+    BindGroupMock bindGroupMock(&mDevice);
+    EXPECT_CALL(bindGroupMock, DestroyImpl).Times(1);
+
+    EXPECT_TRUE(bindGroupMock.IsAlive());
+    bindGroupMock.Destroy();
+    EXPECT_FALSE(bindGroupMock.IsAlive());
+}
+
+// If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+// will also complain if there is a memory leak.
+TEST_F(DestroyObjectTests, BindGroupImplicit) {
+    BindGroupMock* bindGroupMock = new BindGroupMock(&mDevice);
+    EXPECT_CALL(*bindGroupMock, DestroyImpl).Times(1);
+    {
+        BindGroupDescriptor desc = {};
+        Ref<BindGroupBase> bindGroup;
+        EXPECT_CALL(mDevice, CreateBindGroupImpl)
+            .WillOnce(Return(ByMove(AcquireRef(bindGroupMock))));
+        DAWN_ASSERT_AND_ASSIGN(bindGroup, mDevice.CreateBindGroup(&desc));
+
+        EXPECT_TRUE(bindGroup->IsAlive());
+    }
+}
+
+TEST_F(DestroyObjectTests, BindGroupLayoutExplicit) {
+    BindGroupLayoutMock bindGroupLayoutMock(&mDevice);
+    EXPECT_CALL(bindGroupLayoutMock, DestroyImpl).Times(1);
+
+    EXPECT_TRUE(bindGroupLayoutMock.IsAlive());
+    bindGroupLayoutMock.Destroy();
+    EXPECT_FALSE(bindGroupLayoutMock.IsAlive());
+}
+
+// If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+// will also complain if there is a memory leak.
+TEST_F(DestroyObjectTests, BindGroupLayoutImplicit) {
+    BindGroupLayoutMock* bindGroupLayoutMock = new BindGroupLayoutMock(&mDevice);
+    EXPECT_CALL(*bindGroupLayoutMock, DestroyImpl).Times(1);
+    {
+        BindGroupLayoutDescriptor desc = {};
+        Ref<BindGroupLayoutBase> bindGroupLayout;
+        EXPECT_CALL(mDevice, CreateBindGroupLayoutImpl)
+            .WillOnce(Return(ByMove(AcquireRef(bindGroupLayoutMock))));
+        DAWN_ASSERT_AND_ASSIGN(bindGroupLayout, mDevice.CreateBindGroupLayout(&desc));
+
+        EXPECT_TRUE(bindGroupLayout->IsAlive());
+        EXPECT_TRUE(bindGroupLayout->IsCachedReference());
+    }
+}
+
+TEST_F(DestroyObjectTests, BufferExplicit) {
+    {
+        BufferMock bufferMock(&mDevice, BufferBase::BufferState::Unmapped);
+        EXPECT_CALL(bufferMock, DestroyImpl).Times(1);
+
+        EXPECT_TRUE(bufferMock.IsAlive());
+        bufferMock.Destroy();
+        EXPECT_FALSE(bufferMock.IsAlive());
+    }
+    {
+        BufferMock bufferMock(&mDevice, BufferBase::BufferState::Mapped);
         {
-            BindGroupDescriptor desc = {};
-            Ref<BindGroupBase> bindGroup;
-            EXPECT_CALL(mDevice, CreateBindGroupImpl)
-                .WillOnce(Return(ByMove(AcquireRef(bindGroupMock))));
-            DAWN_ASSERT_AND_ASSIGN(bindGroup, mDevice.CreateBindGroup(&desc));
-
-            EXPECT_TRUE(bindGroup->IsAlive());
-        }
-    }
-
-    TEST_F(DestroyObjectTests, BindGroupLayoutExplicit) {
-        BindGroupLayoutMock bindGroupLayoutMock(&mDevice);
-        EXPECT_CALL(bindGroupLayoutMock, DestroyImpl).Times(1);
-
-        EXPECT_TRUE(bindGroupLayoutMock.IsAlive());
-        bindGroupLayoutMock.Destroy();
-        EXPECT_FALSE(bindGroupLayoutMock.IsAlive());
-    }
-
-    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
-    // will also complain if there is a memory leak.
-    TEST_F(DestroyObjectTests, BindGroupLayoutImplicit) {
-        BindGroupLayoutMock* bindGroupLayoutMock = new BindGroupLayoutMock(&mDevice);
-        EXPECT_CALL(*bindGroupLayoutMock, DestroyImpl).Times(1);
-        {
-            BindGroupLayoutDescriptor desc = {};
-            Ref<BindGroupLayoutBase> bindGroupLayout;
-            EXPECT_CALL(mDevice, CreateBindGroupLayoutImpl)
-                .WillOnce(Return(ByMove(AcquireRef(bindGroupLayoutMock))));
-            DAWN_ASSERT_AND_ASSIGN(bindGroupLayout, mDevice.CreateBindGroupLayout(&desc));
-
-            EXPECT_TRUE(bindGroupLayout->IsAlive());
-            EXPECT_TRUE(bindGroupLayout->IsCachedReference());
-        }
-    }
-
-    TEST_F(DestroyObjectTests, BufferExplicit) {
-        {
-            BufferMock bufferMock(&mDevice, BufferBase::BufferState::Unmapped);
+            InSequence seq;
             EXPECT_CALL(bufferMock, DestroyImpl).Times(1);
-
-            EXPECT_TRUE(bufferMock.IsAlive());
-            bufferMock.Destroy();
-            EXPECT_FALSE(bufferMock.IsAlive());
+            EXPECT_CALL(bufferMock, UnmapImpl).Times(1);
         }
-        {
-            BufferMock bufferMock(&mDevice, BufferBase::BufferState::Mapped);
-            {
-                InSequence seq;
-                EXPECT_CALL(bufferMock, DestroyImpl).Times(1);
-                EXPECT_CALL(bufferMock, UnmapImpl).Times(1);
-            }
 
-            EXPECT_TRUE(bufferMock.IsAlive());
-            bufferMock.Destroy();
-            EXPECT_FALSE(bufferMock.IsAlive());
+        EXPECT_TRUE(bufferMock.IsAlive());
+        bufferMock.Destroy();
+        EXPECT_FALSE(bufferMock.IsAlive());
+    }
+}
+
+// If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+// will also complain if there is a memory leak.
+TEST_F(DestroyObjectTests, BufferImplicit) {
+    {
+        BufferMock* bufferMock = new BufferMock(&mDevice, BufferBase::BufferState::Unmapped);
+        EXPECT_CALL(*bufferMock, DestroyImpl).Times(1);
+        {
+            BufferDescriptor desc = {};
+            Ref<BufferBase> buffer;
+            EXPECT_CALL(mDevice, CreateBufferImpl).WillOnce(Return(ByMove(AcquireRef(bufferMock))));
+            DAWN_ASSERT_AND_ASSIGN(buffer, mDevice.CreateBuffer(&desc));
+
+            EXPECT_TRUE(buffer->IsAlive());
         }
     }
-
-    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
-    // will also complain if there is a memory leak.
-    TEST_F(DestroyObjectTests, BufferImplicit) {
+    {
+        BufferMock* bufferMock = new BufferMock(&mDevice, BufferBase::BufferState::Mapped);
         {
-            BufferMock* bufferMock = new BufferMock(&mDevice, BufferBase::BufferState::Unmapped);
+            InSequence seq;
             EXPECT_CALL(*bufferMock, DestroyImpl).Times(1);
-            {
-                BufferDescriptor desc = {};
-                Ref<BufferBase> buffer;
-                EXPECT_CALL(mDevice, CreateBufferImpl)
-                    .WillOnce(Return(ByMove(AcquireRef(bufferMock))));
-                DAWN_ASSERT_AND_ASSIGN(buffer, mDevice.CreateBuffer(&desc));
-
-                EXPECT_TRUE(buffer->IsAlive());
-            }
+            EXPECT_CALL(*bufferMock, UnmapImpl).Times(1);
         }
         {
-            BufferMock* bufferMock = new BufferMock(&mDevice, BufferBase::BufferState::Mapped);
-            {
-                InSequence seq;
-                EXPECT_CALL(*bufferMock, DestroyImpl).Times(1);
-                EXPECT_CALL(*bufferMock, UnmapImpl).Times(1);
-            }
-            {
-                BufferDescriptor desc = {};
-                Ref<BufferBase> buffer;
-                EXPECT_CALL(mDevice, CreateBufferImpl)
-                    .WillOnce(Return(ByMove(AcquireRef(bufferMock))));
-                DAWN_ASSERT_AND_ASSIGN(buffer, mDevice.CreateBuffer(&desc));
+            BufferDescriptor desc = {};
+            Ref<BufferBase> buffer;
+            EXPECT_CALL(mDevice, CreateBufferImpl).WillOnce(Return(ByMove(AcquireRef(bufferMock))));
+            DAWN_ASSERT_AND_ASSIGN(buffer, mDevice.CreateBuffer(&desc));
 
-                EXPECT_TRUE(buffer->IsAlive());
-            }
+            EXPECT_TRUE(buffer->IsAlive());
         }
     }
+}
 
-    TEST_F(DestroyObjectTests, CommandBufferExplicit) {
-        CommandBufferMock commandBufferMock(&mDevice);
-        EXPECT_CALL(commandBufferMock, DestroyImpl).Times(1);
+TEST_F(DestroyObjectTests, CommandBufferExplicit) {
+    CommandBufferMock commandBufferMock(&mDevice);
+    EXPECT_CALL(commandBufferMock, DestroyImpl).Times(1);
 
-        EXPECT_TRUE(commandBufferMock.IsAlive());
-        commandBufferMock.Destroy();
-        EXPECT_FALSE(commandBufferMock.IsAlive());
+    EXPECT_TRUE(commandBufferMock.IsAlive());
+    commandBufferMock.Destroy();
+    EXPECT_FALSE(commandBufferMock.IsAlive());
+}
+
+// If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+// will also complain if there is a memory leak.
+TEST_F(DestroyObjectTests, CommandBufferImplicit) {
+    CommandBufferMock* commandBufferMock = new CommandBufferMock(&mDevice);
+    EXPECT_CALL(*commandBufferMock, DestroyImpl).Times(1);
+    {
+        CommandBufferDescriptor desc = {};
+        Ref<CommandBufferBase> commandBuffer;
+        EXPECT_CALL(mDevice, CreateCommandBuffer)
+            .WillOnce(Return(ByMove(AcquireRef(commandBufferMock))));
+        DAWN_ASSERT_AND_ASSIGN(commandBuffer, mDevice.CreateCommandBuffer(nullptr, &desc));
+
+        EXPECT_TRUE(commandBuffer->IsAlive());
     }
+}
 
-    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
-    // will also complain if there is a memory leak.
-    TEST_F(DestroyObjectTests, CommandBufferImplicit) {
-        CommandBufferMock* commandBufferMock = new CommandBufferMock(&mDevice);
+TEST_F(DestroyObjectTests, ComputePipelineExplicit) {
+    ComputePipelineMock computePipelineMock(&mDevice);
+    EXPECT_CALL(computePipelineMock, DestroyImpl).Times(1);
+
+    EXPECT_TRUE(computePipelineMock.IsAlive());
+    computePipelineMock.Destroy();
+    EXPECT_FALSE(computePipelineMock.IsAlive());
+}
+
+// If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+// will also complain if there is a memory leak.
+TEST_F(DestroyObjectTests, ComputePipelineImplicit) {
+    // ComputePipelines usually set their hash values at construction, but the mock does not, so
+    // we set it here.
+    constexpr size_t hash = 0x12345;
+    ComputePipelineMock* computePipelineMock = new ComputePipelineMock(&mDevice);
+    computePipelineMock->SetContentHash(hash);
+    ON_CALL(*computePipelineMock, ComputeContentHash).WillByDefault(Return(hash));
+
+    // Compute pipelines are initialized during their creation via the device.
+    EXPECT_CALL(*computePipelineMock, Initialize).Times(1);
+    EXPECT_CALL(*computePipelineMock, DestroyImpl).Times(1);
+
+    {
+        ComputePipelineDescriptor desc = {};
+        desc.layout = GetPipelineLayout().Get();
+        desc.compute.module = GetComputeShaderModule().Get();
+
+        Ref<ComputePipelineBase> computePipeline;
+        EXPECT_CALL(mDevice, CreateUninitializedComputePipelineImpl)
+            .WillOnce(Return(ByMove(AcquireRef(computePipelineMock))));
+        DAWN_ASSERT_AND_ASSIGN(computePipeline, mDevice.CreateComputePipeline(&desc));
+
+        EXPECT_TRUE(computePipeline->IsAlive());
+        EXPECT_TRUE(computePipeline->IsCachedReference());
+    }
+}
+
+TEST_F(DestroyObjectTests, ExternalTextureExplicit) {
+    ExternalTextureMock externalTextureMock(&mDevice);
+    EXPECT_CALL(externalTextureMock, DestroyImpl).Times(1);
+
+    EXPECT_TRUE(externalTextureMock.IsAlive());
+    externalTextureMock.Destroy();
+    EXPECT_FALSE(externalTextureMock.IsAlive());
+}
+
+TEST_F(DestroyObjectTests, ExternalTextureImplicit) {
+    ExternalTextureMock* externalTextureMock = new ExternalTextureMock(&mDevice);
+    EXPECT_CALL(*externalTextureMock, DestroyImpl).Times(1);
+    {
+        ExternalTextureDescriptor desc = {};
+        Ref<ExternalTextureBase> externalTexture;
+        EXPECT_CALL(mDevice, CreateExternalTextureImpl)
+            .WillOnce(Return(ByMove(AcquireRef(externalTextureMock))));
+        DAWN_ASSERT_AND_ASSIGN(externalTexture, mDevice.CreateExternalTextureImpl(&desc));
+
+        EXPECT_TRUE(externalTexture->IsAlive());
+    }
+}
+
+TEST_F(DestroyObjectTests, PipelineLayoutExplicit) {
+    PipelineLayoutMock pipelineLayoutMock(&mDevice);
+    EXPECT_CALL(pipelineLayoutMock, DestroyImpl).Times(1);
+
+    EXPECT_TRUE(pipelineLayoutMock.IsAlive());
+    pipelineLayoutMock.Destroy();
+    EXPECT_FALSE(pipelineLayoutMock.IsAlive());
+}
+
+// If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+// will also complain if there is a memory leak.
+TEST_F(DestroyObjectTests, PipelineLayoutImplicit) {
+    PipelineLayoutMock* pipelineLayoutMock = new PipelineLayoutMock(&mDevice);
+    EXPECT_CALL(*pipelineLayoutMock, DestroyImpl).Times(1);
+    {
+        PipelineLayoutDescriptor desc = {};
+        Ref<PipelineLayoutBase> pipelineLayout;
+        EXPECT_CALL(mDevice, CreatePipelineLayoutImpl)
+            .WillOnce(Return(ByMove(AcquireRef(pipelineLayoutMock))));
+        DAWN_ASSERT_AND_ASSIGN(pipelineLayout, mDevice.CreatePipelineLayout(&desc));
+
+        EXPECT_TRUE(pipelineLayout->IsAlive());
+        EXPECT_TRUE(pipelineLayout->IsCachedReference());
+    }
+}
+
+TEST_F(DestroyObjectTests, QuerySetExplicit) {
+    QuerySetMock querySetMock(&mDevice);
+    EXPECT_CALL(querySetMock, DestroyImpl).Times(1);
+
+    EXPECT_TRUE(querySetMock.IsAlive());
+    querySetMock.Destroy();
+    EXPECT_FALSE(querySetMock.IsAlive());
+}
+
+// If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+// will also complain if there is a memory leak.
+TEST_F(DestroyObjectTests, QuerySetImplicit) {
+    QuerySetMock* querySetMock = new QuerySetMock(&mDevice);
+    EXPECT_CALL(*querySetMock, DestroyImpl).Times(1);
+    {
+        QuerySetDescriptor desc = {};
+        Ref<QuerySetBase> querySet;
+        EXPECT_CALL(mDevice, CreateQuerySetImpl).WillOnce(Return(ByMove(AcquireRef(querySetMock))));
+        DAWN_ASSERT_AND_ASSIGN(querySet, mDevice.CreateQuerySet(&desc));
+
+        EXPECT_TRUE(querySet->IsAlive());
+    }
+}
+
+TEST_F(DestroyObjectTests, RenderPipelineExplicit) {
+    RenderPipelineMock renderPipelineMock(&mDevice);
+    EXPECT_CALL(renderPipelineMock, DestroyImpl).Times(1);
+
+    EXPECT_TRUE(renderPipelineMock.IsAlive());
+    renderPipelineMock.Destroy();
+    EXPECT_FALSE(renderPipelineMock.IsAlive());
+}
+
+// If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+// will also complain if there is a memory leak.
+TEST_F(DestroyObjectTests, RenderPipelineImplicit) {
+    // RenderPipelines usually set their hash values at construction, but the mock does not, so
+    // we set it here.
+    constexpr size_t hash = 0x12345;
+    RenderPipelineMock* renderPipelineMock = new RenderPipelineMock(&mDevice);
+    renderPipelineMock->SetContentHash(hash);
+    ON_CALL(*renderPipelineMock, ComputeContentHash).WillByDefault(Return(hash));
+
+    // Render pipelines are initialized during their creation via the device.
+    EXPECT_CALL(*renderPipelineMock, Initialize).Times(1);
+    EXPECT_CALL(*renderPipelineMock, DestroyImpl).Times(1);
+
+    {
+        RenderPipelineDescriptor desc = {};
+        desc.layout = GetPipelineLayout().Get();
+        desc.vertex.module = GetVertexShaderModule().Get();
+
+        Ref<RenderPipelineBase> renderPipeline;
+        EXPECT_CALL(mDevice, CreateUninitializedRenderPipelineImpl)
+            .WillOnce(Return(ByMove(AcquireRef(renderPipelineMock))));
+        DAWN_ASSERT_AND_ASSIGN(renderPipeline, mDevice.CreateRenderPipeline(&desc));
+
+        EXPECT_TRUE(renderPipeline->IsAlive());
+        EXPECT_TRUE(renderPipeline->IsCachedReference());
+    }
+}
+
+TEST_F(DestroyObjectTests, SamplerExplicit) {
+    SamplerMock samplerMock(&mDevice);
+    EXPECT_CALL(samplerMock, DestroyImpl).Times(1);
+
+    EXPECT_TRUE(samplerMock.IsAlive());
+    samplerMock.Destroy();
+    EXPECT_FALSE(samplerMock.IsAlive());
+}
+
+// If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+// will also complain if there is a memory leak.
+TEST_F(DestroyObjectTests, SamplerImplicit) {
+    SamplerMock* samplerMock = new SamplerMock(&mDevice);
+    EXPECT_CALL(*samplerMock, DestroyImpl).Times(1);
+    {
+        SamplerDescriptor desc = {};
+        Ref<SamplerBase> sampler;
+        EXPECT_CALL(mDevice, CreateSamplerImpl).WillOnce(Return(ByMove(AcquireRef(samplerMock))));
+        DAWN_ASSERT_AND_ASSIGN(sampler, mDevice.CreateSampler(&desc));
+
+        EXPECT_TRUE(sampler->IsAlive());
+        EXPECT_TRUE(sampler->IsCachedReference());
+    }
+}
+
+TEST_F(DestroyObjectTests, ShaderModuleExplicit) {
+    ShaderModuleMock shaderModuleMock(&mDevice);
+    EXPECT_CALL(shaderModuleMock, DestroyImpl).Times(1);
+
+    EXPECT_TRUE(shaderModuleMock.IsAlive());
+    shaderModuleMock.Destroy();
+    EXPECT_FALSE(shaderModuleMock.IsAlive());
+}
+
+// If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+// will also complain if there is a memory leak.
+TEST_F(DestroyObjectTests, ShaderModuleImplicit) {
+    ShaderModuleMock* shaderModuleMock = new ShaderModuleMock(&mDevice);
+    EXPECT_CALL(*shaderModuleMock, DestroyImpl).Times(1);
+    {
+        ShaderModuleWGSLDescriptor wgslDesc;
+        wgslDesc.source = R"(
+                @stage(compute) @workgroup_size(1) fn main() {
+                }
+            )";
+        ShaderModuleDescriptor desc = {};
+        desc.nextInChain = &wgslDesc;
+        Ref<ShaderModuleBase> shaderModule;
+        EXPECT_CALL(mDevice, CreateShaderModuleImpl)
+            .WillOnce(Return(ByMove(AcquireRef(shaderModuleMock))));
+        DAWN_ASSERT_AND_ASSIGN(shaderModule, mDevice.CreateShaderModule(&desc));
+
+        EXPECT_TRUE(shaderModule->IsAlive());
+        EXPECT_TRUE(shaderModule->IsCachedReference());
+    }
+}
+
+TEST_F(DestroyObjectTests, SwapChainExplicit) {
+    SwapChainMock swapChainMock(&mDevice);
+    EXPECT_CALL(swapChainMock, DestroyImpl).Times(1);
+
+    EXPECT_TRUE(swapChainMock.IsAlive());
+    swapChainMock.Destroy();
+    EXPECT_FALSE(swapChainMock.IsAlive());
+}
+
+// If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+// will also complain if there is a memory leak.
+TEST_F(DestroyObjectTests, SwapChainImplicit) {
+    SwapChainMock* swapChainMock = new SwapChainMock(&mDevice);
+    EXPECT_CALL(*swapChainMock, DestroyImpl).Times(1);
+    {
+        SwapChainDescriptor desc = {};
+        Ref<SwapChainBase> swapChain;
+        EXPECT_CALL(mDevice, CreateSwapChainImpl(_))
+            .WillOnce(Return(ByMove(AcquireRef(swapChainMock))));
+        DAWN_ASSERT_AND_ASSIGN(swapChain, mDevice.CreateSwapChain(nullptr, &desc));
+
+        EXPECT_TRUE(swapChain->IsAlive());
+    }
+}
+
+TEST_F(DestroyObjectTests, TextureExplicit) {
+    {
+        TextureMock textureMock(&mDevice, TextureBase::TextureState::OwnedInternal);
+        EXPECT_CALL(textureMock, DestroyImpl).Times(1);
+
+        EXPECT_TRUE(textureMock.IsAlive());
+        textureMock.Destroy();
+        EXPECT_FALSE(textureMock.IsAlive());
+    }
+    {
+        TextureMock textureMock(&mDevice, TextureBase::TextureState::OwnedExternal);
+        EXPECT_CALL(textureMock, DestroyImpl).Times(1);
+
+        EXPECT_TRUE(textureMock.IsAlive());
+        textureMock.Destroy();
+        EXPECT_FALSE(textureMock.IsAlive());
+    }
+}
+
+// If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+// will also complain if there is a memory leak.
+TEST_F(DestroyObjectTests, TextureImplicit) {
+    {
+        TextureMock* textureMock =
+            new TextureMock(&mDevice, TextureBase::TextureState::OwnedInternal);
+        EXPECT_CALL(*textureMock, DestroyImpl).Times(1);
+        {
+            TextureDescriptor desc = {};
+            Ref<TextureBase> texture;
+            EXPECT_CALL(mDevice, CreateTextureImpl)
+                .WillOnce(Return(ByMove(AcquireRef(textureMock))));
+            DAWN_ASSERT_AND_ASSIGN(texture, mDevice.CreateTexture(&desc));
+
+            EXPECT_TRUE(texture->IsAlive());
+        }
+    }
+    {
+        TextureMock* textureMock =
+            new TextureMock(&mDevice, TextureBase::TextureState::OwnedExternal);
+        EXPECT_CALL(*textureMock, DestroyImpl).Times(1);
+        {
+            TextureDescriptor desc = {};
+            Ref<TextureBase> texture;
+            EXPECT_CALL(mDevice, CreateTextureImpl)
+                .WillOnce(Return(ByMove(AcquireRef(textureMock))));
+            DAWN_ASSERT_AND_ASSIGN(texture, mDevice.CreateTexture(&desc));
+
+            EXPECT_TRUE(texture->IsAlive());
+        }
+    }
+}
+
+TEST_F(DestroyObjectTests, TextureViewExplicit) {
+    TextureViewMock textureViewMock(GetTexture().Get());
+    EXPECT_CALL(textureViewMock, DestroyImpl).Times(1);
+
+    EXPECT_TRUE(textureViewMock.IsAlive());
+    textureViewMock.Destroy();
+    EXPECT_FALSE(textureViewMock.IsAlive());
+}
+
+// If the reference count on API objects reach 0, they should delete themselves. Note that GTest
+// will also complain if there is a memory leak.
+TEST_F(DestroyObjectTests, TextureViewImplicit) {
+    TextureViewMock* textureViewMock = new TextureViewMock(GetTexture().Get());
+    EXPECT_CALL(*textureViewMock, DestroyImpl).Times(1);
+    {
+        TextureViewDescriptor desc = {};
+        Ref<TextureViewBase> textureView;
+        EXPECT_CALL(mDevice, CreateTextureViewImpl)
+            .WillOnce(Return(ByMove(AcquireRef(textureViewMock))));
+        DAWN_ASSERT_AND_ASSIGN(textureView, mDevice.CreateTextureView(GetTexture().Get(), &desc));
+
+        EXPECT_TRUE(textureView->IsAlive());
+    }
+}
+
+// Destroying the objects on the mDevice should result in all created objects being destroyed in
+// order.
+TEST_F(DestroyObjectTests, DestroyObjects) {
+    BindGroupMock* bindGroupMock = new BindGroupMock(&mDevice);
+    BindGroupLayoutMock* bindGroupLayoutMock = new BindGroupLayoutMock(&mDevice);
+    BufferMock* bufferMock = new BufferMock(&mDevice, BufferBase::BufferState::Unmapped);
+    CommandBufferMock* commandBufferMock = new CommandBufferMock(&mDevice);
+    ComputePipelineMock* computePipelineMock = new ComputePipelineMock(&mDevice);
+    ExternalTextureMock* externalTextureMock = new ExternalTextureMock(&mDevice);
+    PipelineLayoutMock* pipelineLayoutMock = new PipelineLayoutMock(&mDevice);
+    QuerySetMock* querySetMock = new QuerySetMock(&mDevice);
+    RenderPipelineMock* renderPipelineMock = new RenderPipelineMock(&mDevice);
+    SamplerMock* samplerMock = new SamplerMock(&mDevice);
+    ShaderModuleMock* shaderModuleMock = new ShaderModuleMock(&mDevice);
+    SwapChainMock* swapChainMock = new SwapChainMock(&mDevice);
+    TextureMock* textureMock = new TextureMock(&mDevice, TextureBase::TextureState::OwnedInternal);
+    TextureViewMock* textureViewMock = new TextureViewMock(GetTexture().Get());
+    {
+        InSequence seq;
         EXPECT_CALL(*commandBufferMock, DestroyImpl).Times(1);
-        {
-            CommandBufferDescriptor desc = {};
-            Ref<CommandBufferBase> commandBuffer;
-            EXPECT_CALL(mDevice, CreateCommandBuffer)
-                .WillOnce(Return(ByMove(AcquireRef(commandBufferMock))));
-            DAWN_ASSERT_AND_ASSIGN(commandBuffer, mDevice.CreateCommandBuffer(nullptr, &desc));
-
-            EXPECT_TRUE(commandBuffer->IsAlive());
-        }
+        EXPECT_CALL(*renderPipelineMock, DestroyImpl).Times(1);
+        EXPECT_CALL(*computePipelineMock, DestroyImpl).Times(1);
+        EXPECT_CALL(*pipelineLayoutMock, DestroyImpl).Times(1);
+        EXPECT_CALL(*swapChainMock, DestroyImpl).Times(1);
+        EXPECT_CALL(*bindGroupMock, DestroyImpl).Times(1);
+        EXPECT_CALL(*bindGroupLayoutMock, DestroyImpl).Times(1);
+        EXPECT_CALL(*shaderModuleMock, DestroyImpl).Times(1);
+        EXPECT_CALL(*externalTextureMock, DestroyImpl).Times(1);
+        EXPECT_CALL(*textureViewMock, DestroyImpl).Times(1);
+        EXPECT_CALL(*textureMock, DestroyImpl).Times(1);
+        EXPECT_CALL(*querySetMock, DestroyImpl).Times(1);
+        EXPECT_CALL(*samplerMock, DestroyImpl).Times(1);
+        EXPECT_CALL(*bufferMock, DestroyImpl).Times(1);
     }
 
-    TEST_F(DestroyObjectTests, ComputePipelineExplicit) {
-        ComputePipelineMock computePipelineMock(&mDevice);
-        EXPECT_CALL(computePipelineMock, DestroyImpl).Times(1);
-
-        EXPECT_TRUE(computePipelineMock.IsAlive());
-        computePipelineMock.Destroy();
-        EXPECT_FALSE(computePipelineMock.IsAlive());
+    Ref<BindGroupBase> bindGroup;
+    {
+        BindGroupDescriptor desc = {};
+        EXPECT_CALL(mDevice, CreateBindGroupImpl)
+            .WillOnce(Return(ByMove(AcquireRef(bindGroupMock))));
+        DAWN_ASSERT_AND_ASSIGN(bindGroup, mDevice.CreateBindGroup(&desc));
+        EXPECT_TRUE(bindGroup->IsAlive());
     }
 
-    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
-    // will also complain if there is a memory leak.
-    TEST_F(DestroyObjectTests, ComputePipelineImplicit) {
-        // ComputePipelines usually set their hash values at construction, but the mock does not, so
-        // we set it here.
+    Ref<BindGroupLayoutBase> bindGroupLayout;
+    {
+        BindGroupLayoutDescriptor desc = {};
+        EXPECT_CALL(mDevice, CreateBindGroupLayoutImpl)
+            .WillOnce(Return(ByMove(AcquireRef(bindGroupLayoutMock))));
+        DAWN_ASSERT_AND_ASSIGN(bindGroupLayout, mDevice.CreateBindGroupLayout(&desc));
+        EXPECT_TRUE(bindGroupLayout->IsAlive());
+        EXPECT_TRUE(bindGroupLayout->IsCachedReference());
+    }
+
+    Ref<BufferBase> buffer;
+    {
+        BufferDescriptor desc = {};
+        EXPECT_CALL(mDevice, CreateBufferImpl).WillOnce(Return(ByMove(AcquireRef(bufferMock))));
+        DAWN_ASSERT_AND_ASSIGN(buffer, mDevice.CreateBuffer(&desc));
+        EXPECT_TRUE(buffer->IsAlive());
+    }
+
+    Ref<CommandBufferBase> commandBuffer;
+    {
+        CommandBufferDescriptor desc = {};
+        EXPECT_CALL(mDevice, CreateCommandBuffer)
+            .WillOnce(Return(ByMove(AcquireRef(commandBufferMock))));
+        DAWN_ASSERT_AND_ASSIGN(commandBuffer, mDevice.CreateCommandBuffer(nullptr, &desc));
+        EXPECT_TRUE(commandBuffer->IsAlive());
+    }
+
+    Ref<ComputePipelineBase> computePipeline;
+    {
+        // Compute pipelines usually set their hash values at construction, but the mock does
+        // not, so we set it here.
         constexpr size_t hash = 0x12345;
-        ComputePipelineMock* computePipelineMock = new ComputePipelineMock(&mDevice);
         computePipelineMock->SetContentHash(hash);
         ON_CALL(*computePipelineMock, ComputeContentHash).WillByDefault(Return(hash));
 
         // Compute pipelines are initialized during their creation via the device.
         EXPECT_CALL(*computePipelineMock, Initialize).Times(1);
-        EXPECT_CALL(*computePipelineMock, DestroyImpl).Times(1);
 
-        {
-            ComputePipelineDescriptor desc = {};
-            desc.layout = GetPipelineLayout().Get();
-            desc.compute.module = GetComputeShaderModule().Get();
-
-            Ref<ComputePipelineBase> computePipeline;
-            EXPECT_CALL(mDevice, CreateUninitializedComputePipelineImpl)
-                .WillOnce(Return(ByMove(AcquireRef(computePipelineMock))));
-            DAWN_ASSERT_AND_ASSIGN(computePipeline, mDevice.CreateComputePipeline(&desc));
-
-            EXPECT_TRUE(computePipeline->IsAlive());
-            EXPECT_TRUE(computePipeline->IsCachedReference());
-        }
+        ComputePipelineDescriptor desc = {};
+        desc.layout = GetPipelineLayout().Get();
+        desc.compute.module = GetComputeShaderModule().Get();
+        EXPECT_CALL(mDevice, CreateUninitializedComputePipelineImpl)
+            .WillOnce(Return(ByMove(AcquireRef(computePipelineMock))));
+        DAWN_ASSERT_AND_ASSIGN(computePipeline, mDevice.CreateComputePipeline(&desc));
+        EXPECT_TRUE(computePipeline->IsAlive());
+        EXPECT_TRUE(computePipeline->IsCachedReference());
     }
 
-    TEST_F(DestroyObjectTests, ExternalTextureExplicit) {
-        ExternalTextureMock externalTextureMock(&mDevice);
-        EXPECT_CALL(externalTextureMock, DestroyImpl).Times(1);
-
-        EXPECT_TRUE(externalTextureMock.IsAlive());
-        externalTextureMock.Destroy();
-        EXPECT_FALSE(externalTextureMock.IsAlive());
+    Ref<ExternalTextureBase> externalTexture;
+    {
+        ExternalTextureDescriptor desc = {};
+        EXPECT_CALL(mDevice, CreateExternalTextureImpl)
+            .WillOnce(Return(ByMove(AcquireRef(externalTextureMock))));
+        DAWN_ASSERT_AND_ASSIGN(externalTexture, mDevice.CreateExternalTextureImpl(&desc));
+        EXPECT_TRUE(externalTexture->IsAlive());
     }
 
-    TEST_F(DestroyObjectTests, ExternalTextureImplicit) {
-        ExternalTextureMock* externalTextureMock = new ExternalTextureMock(&mDevice);
-        EXPECT_CALL(*externalTextureMock, DestroyImpl).Times(1);
-        {
-            ExternalTextureDescriptor desc = {};
-            Ref<ExternalTextureBase> externalTexture;
-            EXPECT_CALL(mDevice, CreateExternalTextureImpl)
-                .WillOnce(Return(ByMove(AcquireRef(externalTextureMock))));
-            DAWN_ASSERT_AND_ASSIGN(externalTexture, mDevice.CreateExternalTextureImpl(&desc));
-
-            EXPECT_TRUE(externalTexture->IsAlive());
-        }
+    Ref<PipelineLayoutBase> pipelineLayout;
+    {
+        PipelineLayoutDescriptor desc = {};
+        EXPECT_CALL(mDevice, CreatePipelineLayoutImpl)
+            .WillOnce(Return(ByMove(AcquireRef(pipelineLayoutMock))));
+        DAWN_ASSERT_AND_ASSIGN(pipelineLayout, mDevice.CreatePipelineLayout(&desc));
+        EXPECT_TRUE(pipelineLayout->IsAlive());
+        EXPECT_TRUE(pipelineLayout->IsCachedReference());
     }
 
-    TEST_F(DestroyObjectTests, PipelineLayoutExplicit) {
-        PipelineLayoutMock pipelineLayoutMock(&mDevice);
-        EXPECT_CALL(pipelineLayoutMock, DestroyImpl).Times(1);
-
-        EXPECT_TRUE(pipelineLayoutMock.IsAlive());
-        pipelineLayoutMock.Destroy();
-        EXPECT_FALSE(pipelineLayoutMock.IsAlive());
+    Ref<QuerySetBase> querySet;
+    {
+        QuerySetDescriptor desc = {};
+        EXPECT_CALL(mDevice, CreateQuerySetImpl).WillOnce(Return(ByMove(AcquireRef(querySetMock))));
+        DAWN_ASSERT_AND_ASSIGN(querySet, mDevice.CreateQuerySet(&desc));
+        EXPECT_TRUE(querySet->IsAlive());
     }
 
-    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
-    // will also complain if there is a memory leak.
-    TEST_F(DestroyObjectTests, PipelineLayoutImplicit) {
-        PipelineLayoutMock* pipelineLayoutMock = new PipelineLayoutMock(&mDevice);
-        EXPECT_CALL(*pipelineLayoutMock, DestroyImpl).Times(1);
-        {
-            PipelineLayoutDescriptor desc = {};
-            Ref<PipelineLayoutBase> pipelineLayout;
-            EXPECT_CALL(mDevice, CreatePipelineLayoutImpl)
-                .WillOnce(Return(ByMove(AcquireRef(pipelineLayoutMock))));
-            DAWN_ASSERT_AND_ASSIGN(pipelineLayout, mDevice.CreatePipelineLayout(&desc));
-
-            EXPECT_TRUE(pipelineLayout->IsAlive());
-            EXPECT_TRUE(pipelineLayout->IsCachedReference());
-        }
-    }
-
-    TEST_F(DestroyObjectTests, QuerySetExplicit) {
-        QuerySetMock querySetMock(&mDevice);
-        EXPECT_CALL(querySetMock, DestroyImpl).Times(1);
-
-        EXPECT_TRUE(querySetMock.IsAlive());
-        querySetMock.Destroy();
-        EXPECT_FALSE(querySetMock.IsAlive());
-    }
-
-    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
-    // will also complain if there is a memory leak.
-    TEST_F(DestroyObjectTests, QuerySetImplicit) {
-        QuerySetMock* querySetMock = new QuerySetMock(&mDevice);
-        EXPECT_CALL(*querySetMock, DestroyImpl).Times(1);
-        {
-            QuerySetDescriptor desc = {};
-            Ref<QuerySetBase> querySet;
-            EXPECT_CALL(mDevice, CreateQuerySetImpl)
-                .WillOnce(Return(ByMove(AcquireRef(querySetMock))));
-            DAWN_ASSERT_AND_ASSIGN(querySet, mDevice.CreateQuerySet(&desc));
-
-            EXPECT_TRUE(querySet->IsAlive());
-        }
-    }
-
-    TEST_F(DestroyObjectTests, RenderPipelineExplicit) {
-        RenderPipelineMock renderPipelineMock(&mDevice);
-        EXPECT_CALL(renderPipelineMock, DestroyImpl).Times(1);
-
-        EXPECT_TRUE(renderPipelineMock.IsAlive());
-        renderPipelineMock.Destroy();
-        EXPECT_FALSE(renderPipelineMock.IsAlive());
-    }
-
-    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
-    // will also complain if there is a memory leak.
-    TEST_F(DestroyObjectTests, RenderPipelineImplicit) {
-        // RenderPipelines usually set their hash values at construction, but the mock does not, so
-        // we set it here.
+    Ref<RenderPipelineBase> renderPipeline;
+    {
+        // Render pipelines usually set their hash values at construction, but the mock does
+        // not, so we set it here.
         constexpr size_t hash = 0x12345;
-        RenderPipelineMock* renderPipelineMock = new RenderPipelineMock(&mDevice);
         renderPipelineMock->SetContentHash(hash);
         ON_CALL(*renderPipelineMock, ComputeContentHash).WillByDefault(Return(hash));
 
         // Render pipelines are initialized during their creation via the device.
         EXPECT_CALL(*renderPipelineMock, Initialize).Times(1);
-        EXPECT_CALL(*renderPipelineMock, DestroyImpl).Times(1);
 
-        {
-            RenderPipelineDescriptor desc = {};
-            desc.layout = GetPipelineLayout().Get();
-            desc.vertex.module = GetVertexShaderModule().Get();
-
-            Ref<RenderPipelineBase> renderPipeline;
-            EXPECT_CALL(mDevice, CreateUninitializedRenderPipelineImpl)
-                .WillOnce(Return(ByMove(AcquireRef(renderPipelineMock))));
-            DAWN_ASSERT_AND_ASSIGN(renderPipeline, mDevice.CreateRenderPipeline(&desc));
-
-            EXPECT_TRUE(renderPipeline->IsAlive());
-            EXPECT_TRUE(renderPipeline->IsCachedReference());
-        }
+        RenderPipelineDescriptor desc = {};
+        desc.layout = GetPipelineLayout().Get();
+        desc.vertex.module = GetVertexShaderModule().Get();
+        EXPECT_CALL(mDevice, CreateUninitializedRenderPipelineImpl)
+            .WillOnce(Return(ByMove(AcquireRef(renderPipelineMock))));
+        DAWN_ASSERT_AND_ASSIGN(renderPipeline, mDevice.CreateRenderPipeline(&desc));
+        EXPECT_TRUE(renderPipeline->IsAlive());
+        EXPECT_TRUE(renderPipeline->IsCachedReference());
     }
 
-    TEST_F(DestroyObjectTests, SamplerExplicit) {
-        SamplerMock samplerMock(&mDevice);
-        EXPECT_CALL(samplerMock, DestroyImpl).Times(1);
-
-        EXPECT_TRUE(samplerMock.IsAlive());
-        samplerMock.Destroy();
-        EXPECT_FALSE(samplerMock.IsAlive());
+    Ref<SamplerBase> sampler;
+    {
+        SamplerDescriptor desc = {};
+        EXPECT_CALL(mDevice, CreateSamplerImpl).WillOnce(Return(ByMove(AcquireRef(samplerMock))));
+        DAWN_ASSERT_AND_ASSIGN(sampler, mDevice.CreateSampler(&desc));
+        EXPECT_TRUE(sampler->IsAlive());
+        EXPECT_TRUE(sampler->IsCachedReference());
     }
 
-    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
-    // will also complain if there is a memory leak.
-    TEST_F(DestroyObjectTests, SamplerImplicit) {
-        SamplerMock* samplerMock = new SamplerMock(&mDevice);
-        EXPECT_CALL(*samplerMock, DestroyImpl).Times(1);
-        {
-            SamplerDescriptor desc = {};
-            Ref<SamplerBase> sampler;
-            EXPECT_CALL(mDevice, CreateSamplerImpl)
-                .WillOnce(Return(ByMove(AcquireRef(samplerMock))));
-            DAWN_ASSERT_AND_ASSIGN(sampler, mDevice.CreateSampler(&desc));
-
-            EXPECT_TRUE(sampler->IsAlive());
-            EXPECT_TRUE(sampler->IsCachedReference());
-        }
-    }
-
-    TEST_F(DestroyObjectTests, ShaderModuleExplicit) {
-        ShaderModuleMock shaderModuleMock(&mDevice);
-        EXPECT_CALL(shaderModuleMock, DestroyImpl).Times(1);
-
-        EXPECT_TRUE(shaderModuleMock.IsAlive());
-        shaderModuleMock.Destroy();
-        EXPECT_FALSE(shaderModuleMock.IsAlive());
-    }
-
-    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
-    // will also complain if there is a memory leak.
-    TEST_F(DestroyObjectTests, ShaderModuleImplicit) {
-        ShaderModuleMock* shaderModuleMock = new ShaderModuleMock(&mDevice);
-        EXPECT_CALL(*shaderModuleMock, DestroyImpl).Times(1);
-        {
-            ShaderModuleWGSLDescriptor wgslDesc;
-            wgslDesc.source = R"(
+    Ref<ShaderModuleBase> shaderModule;
+    {
+        ShaderModuleWGSLDescriptor wgslDesc;
+        wgslDesc.source = R"(
                 @stage(compute) @workgroup_size(1) fn main() {
                 }
             )";
-            ShaderModuleDescriptor desc = {};
-            desc.nextInChain = &wgslDesc;
-            Ref<ShaderModuleBase> shaderModule;
-            EXPECT_CALL(mDevice, CreateShaderModuleImpl)
-                .WillOnce(Return(ByMove(AcquireRef(shaderModuleMock))));
-            DAWN_ASSERT_AND_ASSIGN(shaderModule, mDevice.CreateShaderModule(&desc));
+        ShaderModuleDescriptor desc = {};
+        desc.nextInChain = &wgslDesc;
 
-            EXPECT_TRUE(shaderModule->IsAlive());
-            EXPECT_TRUE(shaderModule->IsCachedReference());
-        }
+        EXPECT_CALL(mDevice, CreateShaderModuleImpl)
+            .WillOnce(Return(ByMove(AcquireRef(shaderModuleMock))));
+        DAWN_ASSERT_AND_ASSIGN(shaderModule, mDevice.CreateShaderModule(&desc));
+        EXPECT_TRUE(shaderModule->IsAlive());
+        EXPECT_TRUE(shaderModule->IsCachedReference());
     }
 
-    TEST_F(DestroyObjectTests, SwapChainExplicit) {
-        SwapChainMock swapChainMock(&mDevice);
-        EXPECT_CALL(swapChainMock, DestroyImpl).Times(1);
-
-        EXPECT_TRUE(swapChainMock.IsAlive());
-        swapChainMock.Destroy();
-        EXPECT_FALSE(swapChainMock.IsAlive());
+    Ref<SwapChainBase> swapChain;
+    {
+        SwapChainDescriptor desc = {};
+        EXPECT_CALL(mDevice, CreateSwapChainImpl(_))
+            .WillOnce(Return(ByMove(AcquireRef(swapChainMock))));
+        DAWN_ASSERT_AND_ASSIGN(swapChain, mDevice.CreateSwapChain(nullptr, &desc));
+        EXPECT_TRUE(swapChain->IsAlive());
     }
 
-    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
-    // will also complain if there is a memory leak.
-    TEST_F(DestroyObjectTests, SwapChainImplicit) {
-        SwapChainMock* swapChainMock = new SwapChainMock(&mDevice);
-        EXPECT_CALL(*swapChainMock, DestroyImpl).Times(1);
-        {
-            SwapChainDescriptor desc = {};
-            Ref<SwapChainBase> swapChain;
-            EXPECT_CALL(mDevice, CreateSwapChainImpl(_))
-                .WillOnce(Return(ByMove(AcquireRef(swapChainMock))));
-            DAWN_ASSERT_AND_ASSIGN(swapChain, mDevice.CreateSwapChain(nullptr, &desc));
-
-            EXPECT_TRUE(swapChain->IsAlive());
-        }
+    Ref<TextureBase> texture;
+    {
+        TextureDescriptor desc = {};
+        EXPECT_CALL(mDevice, CreateTextureImpl).WillOnce(Return(ByMove(AcquireRef(textureMock))));
+        DAWN_ASSERT_AND_ASSIGN(texture, mDevice.CreateTexture(&desc));
+        EXPECT_TRUE(texture->IsAlive());
     }
 
-    TEST_F(DestroyObjectTests, TextureExplicit) {
-        {
-            TextureMock textureMock(&mDevice, TextureBase::TextureState::OwnedInternal);
-            EXPECT_CALL(textureMock, DestroyImpl).Times(1);
-
-            EXPECT_TRUE(textureMock.IsAlive());
-            textureMock.Destroy();
-            EXPECT_FALSE(textureMock.IsAlive());
-        }
-        {
-            TextureMock textureMock(&mDevice, TextureBase::TextureState::OwnedExternal);
-            EXPECT_CALL(textureMock, DestroyImpl).Times(1);
-
-            EXPECT_TRUE(textureMock.IsAlive());
-            textureMock.Destroy();
-            EXPECT_FALSE(textureMock.IsAlive());
-        }
+    Ref<TextureViewBase> textureView;
+    {
+        TextureViewDescriptor desc = {};
+        EXPECT_CALL(mDevice, CreateTextureViewImpl)
+            .WillOnce(Return(ByMove(AcquireRef(textureViewMock))));
+        DAWN_ASSERT_AND_ASSIGN(textureView, mDevice.CreateTextureView(GetTexture().Get(), &desc));
+        EXPECT_TRUE(textureView->IsAlive());
     }
 
-    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
-    // will also complain if there is a memory leak.
-    TEST_F(DestroyObjectTests, TextureImplicit) {
-        {
-            TextureMock* textureMock =
-                new TextureMock(&mDevice, TextureBase::TextureState::OwnedInternal);
-            EXPECT_CALL(*textureMock, DestroyImpl).Times(1);
-            {
-                TextureDescriptor desc = {};
-                Ref<TextureBase> texture;
-                EXPECT_CALL(mDevice, CreateTextureImpl)
-                    .WillOnce(Return(ByMove(AcquireRef(textureMock))));
-                DAWN_ASSERT_AND_ASSIGN(texture, mDevice.CreateTexture(&desc));
+    mDevice.DestroyObjects();
+    EXPECT_FALSE(bindGroup->IsAlive());
+    EXPECT_FALSE(bindGroupLayout->IsAlive());
+    EXPECT_FALSE(buffer->IsAlive());
+    EXPECT_FALSE(commandBuffer->IsAlive());
+    EXPECT_FALSE(computePipeline->IsAlive());
+    EXPECT_FALSE(externalTexture->IsAlive());
+    EXPECT_FALSE(pipelineLayout->IsAlive());
+    EXPECT_FALSE(querySet->IsAlive());
+    EXPECT_FALSE(renderPipeline->IsAlive());
+    EXPECT_FALSE(sampler->IsAlive());
+    EXPECT_FALSE(shaderModule->IsAlive());
+    EXPECT_FALSE(swapChain->IsAlive());
+    EXPECT_FALSE(texture->IsAlive());
+    EXPECT_FALSE(textureView->IsAlive());
+}
 
-                EXPECT_TRUE(texture->IsAlive());
-            }
-        }
-        {
-            TextureMock* textureMock =
-                new TextureMock(&mDevice, TextureBase::TextureState::OwnedExternal);
-            EXPECT_CALL(*textureMock, DestroyImpl).Times(1);
-            {
-                TextureDescriptor desc = {};
-                Ref<TextureBase> texture;
-                EXPECT_CALL(mDevice, CreateTextureImpl)
-                    .WillOnce(Return(ByMove(AcquireRef(textureMock))));
-                DAWN_ASSERT_AND_ASSIGN(texture, mDevice.CreateTexture(&desc));
-
-                EXPECT_TRUE(texture->IsAlive());
-            }
-        }
-    }
-
-    TEST_F(DestroyObjectTests, TextureViewExplicit) {
-        TextureViewMock textureViewMock(GetTexture().Get());
-        EXPECT_CALL(textureViewMock, DestroyImpl).Times(1);
-
-        EXPECT_TRUE(textureViewMock.IsAlive());
-        textureViewMock.Destroy();
-        EXPECT_FALSE(textureViewMock.IsAlive());
-    }
-
-    // If the reference count on API objects reach 0, they should delete themselves. Note that GTest
-    // will also complain if there is a memory leak.
-    TEST_F(DestroyObjectTests, TextureViewImplicit) {
-        TextureViewMock* textureViewMock = new TextureViewMock(GetTexture().Get());
-        EXPECT_CALL(*textureViewMock, DestroyImpl).Times(1);
-        {
-            TextureViewDescriptor desc = {};
-            Ref<TextureViewBase> textureView;
-            EXPECT_CALL(mDevice, CreateTextureViewImpl)
-                .WillOnce(Return(ByMove(AcquireRef(textureViewMock))));
-            DAWN_ASSERT_AND_ASSIGN(textureView,
-                                   mDevice.CreateTextureView(GetTexture().Get(), &desc));
-
-            EXPECT_TRUE(textureView->IsAlive());
-        }
-    }
-
-    // Destroying the objects on the mDevice should result in all created objects being destroyed in
-    // order.
-    TEST_F(DestroyObjectTests, DestroyObjects) {
-        BindGroupMock* bindGroupMock = new BindGroupMock(&mDevice);
-        BindGroupLayoutMock* bindGroupLayoutMock = new BindGroupLayoutMock(&mDevice);
-        BufferMock* bufferMock = new BufferMock(&mDevice, BufferBase::BufferState::Unmapped);
-        CommandBufferMock* commandBufferMock = new CommandBufferMock(&mDevice);
-        ComputePipelineMock* computePipelineMock = new ComputePipelineMock(&mDevice);
-        ExternalTextureMock* externalTextureMock = new ExternalTextureMock(&mDevice);
-        PipelineLayoutMock* pipelineLayoutMock = new PipelineLayoutMock(&mDevice);
-        QuerySetMock* querySetMock = new QuerySetMock(&mDevice);
-        RenderPipelineMock* renderPipelineMock = new RenderPipelineMock(&mDevice);
-        SamplerMock* samplerMock = new SamplerMock(&mDevice);
-        ShaderModuleMock* shaderModuleMock = new ShaderModuleMock(&mDevice);
-        SwapChainMock* swapChainMock = new SwapChainMock(&mDevice);
-        TextureMock* textureMock =
-            new TextureMock(&mDevice, TextureBase::TextureState::OwnedInternal);
-        TextureViewMock* textureViewMock = new TextureViewMock(GetTexture().Get());
-        {
-            InSequence seq;
-            EXPECT_CALL(*commandBufferMock, DestroyImpl).Times(1);
-            EXPECT_CALL(*renderPipelineMock, DestroyImpl).Times(1);
-            EXPECT_CALL(*computePipelineMock, DestroyImpl).Times(1);
-            EXPECT_CALL(*pipelineLayoutMock, DestroyImpl).Times(1);
-            EXPECT_CALL(*swapChainMock, DestroyImpl).Times(1);
-            EXPECT_CALL(*bindGroupMock, DestroyImpl).Times(1);
-            EXPECT_CALL(*bindGroupLayoutMock, DestroyImpl).Times(1);
-            EXPECT_CALL(*shaderModuleMock, DestroyImpl).Times(1);
-            EXPECT_CALL(*externalTextureMock, DestroyImpl).Times(1);
-            EXPECT_CALL(*textureViewMock, DestroyImpl).Times(1);
-            EXPECT_CALL(*textureMock, DestroyImpl).Times(1);
-            EXPECT_CALL(*querySetMock, DestroyImpl).Times(1);
-            EXPECT_CALL(*samplerMock, DestroyImpl).Times(1);
-            EXPECT_CALL(*bufferMock, DestroyImpl).Times(1);
-        }
-
-        Ref<BindGroupBase> bindGroup;
-        {
-            BindGroupDescriptor desc = {};
-            EXPECT_CALL(mDevice, CreateBindGroupImpl)
-                .WillOnce(Return(ByMove(AcquireRef(bindGroupMock))));
-            DAWN_ASSERT_AND_ASSIGN(bindGroup, mDevice.CreateBindGroup(&desc));
-            EXPECT_TRUE(bindGroup->IsAlive());
-        }
-
-        Ref<BindGroupLayoutBase> bindGroupLayout;
-        {
-            BindGroupLayoutDescriptor desc = {};
-            EXPECT_CALL(mDevice, CreateBindGroupLayoutImpl)
-                .WillOnce(Return(ByMove(AcquireRef(bindGroupLayoutMock))));
-            DAWN_ASSERT_AND_ASSIGN(bindGroupLayout, mDevice.CreateBindGroupLayout(&desc));
-            EXPECT_TRUE(bindGroupLayout->IsAlive());
-            EXPECT_TRUE(bindGroupLayout->IsCachedReference());
-        }
-
-        Ref<BufferBase> buffer;
-        {
-            BufferDescriptor desc = {};
-            EXPECT_CALL(mDevice, CreateBufferImpl).WillOnce(Return(ByMove(AcquireRef(bufferMock))));
-            DAWN_ASSERT_AND_ASSIGN(buffer, mDevice.CreateBuffer(&desc));
-            EXPECT_TRUE(buffer->IsAlive());
-        }
-
-        Ref<CommandBufferBase> commandBuffer;
-        {
-            CommandBufferDescriptor desc = {};
-            EXPECT_CALL(mDevice, CreateCommandBuffer)
-                .WillOnce(Return(ByMove(AcquireRef(commandBufferMock))));
-            DAWN_ASSERT_AND_ASSIGN(commandBuffer, mDevice.CreateCommandBuffer(nullptr, &desc));
-            EXPECT_TRUE(commandBuffer->IsAlive());
-        }
-
-        Ref<ComputePipelineBase> computePipeline;
-        {
-            // Compute pipelines usually set their hash values at construction, but the mock does
-            // not, so we set it here.
-            constexpr size_t hash = 0x12345;
-            computePipelineMock->SetContentHash(hash);
-            ON_CALL(*computePipelineMock, ComputeContentHash).WillByDefault(Return(hash));
-
-            // Compute pipelines are initialized during their creation via the device.
-            EXPECT_CALL(*computePipelineMock, Initialize).Times(1);
-
-            ComputePipelineDescriptor desc = {};
-            desc.layout = GetPipelineLayout().Get();
-            desc.compute.module = GetComputeShaderModule().Get();
-            EXPECT_CALL(mDevice, CreateUninitializedComputePipelineImpl)
-                .WillOnce(Return(ByMove(AcquireRef(computePipelineMock))));
-            DAWN_ASSERT_AND_ASSIGN(computePipeline, mDevice.CreateComputePipeline(&desc));
-            EXPECT_TRUE(computePipeline->IsAlive());
-            EXPECT_TRUE(computePipeline->IsCachedReference());
-        }
-
-        Ref<ExternalTextureBase> externalTexture;
-        {
-            ExternalTextureDescriptor desc = {};
-            EXPECT_CALL(mDevice, CreateExternalTextureImpl)
-                .WillOnce(Return(ByMove(AcquireRef(externalTextureMock))));
-            DAWN_ASSERT_AND_ASSIGN(externalTexture, mDevice.CreateExternalTextureImpl(&desc));
-            EXPECT_TRUE(externalTexture->IsAlive());
-        }
-
-        Ref<PipelineLayoutBase> pipelineLayout;
-        {
-            PipelineLayoutDescriptor desc = {};
-            EXPECT_CALL(mDevice, CreatePipelineLayoutImpl)
-                .WillOnce(Return(ByMove(AcquireRef(pipelineLayoutMock))));
-            DAWN_ASSERT_AND_ASSIGN(pipelineLayout, mDevice.CreatePipelineLayout(&desc));
-            EXPECT_TRUE(pipelineLayout->IsAlive());
-            EXPECT_TRUE(pipelineLayout->IsCachedReference());
-        }
-
-        Ref<QuerySetBase> querySet;
-        {
-            QuerySetDescriptor desc = {};
-            EXPECT_CALL(mDevice, CreateQuerySetImpl)
-                .WillOnce(Return(ByMove(AcquireRef(querySetMock))));
-            DAWN_ASSERT_AND_ASSIGN(querySet, mDevice.CreateQuerySet(&desc));
-            EXPECT_TRUE(querySet->IsAlive());
-        }
-
-        Ref<RenderPipelineBase> renderPipeline;
-        {
-            // Render pipelines usually set their hash values at construction, but the mock does
-            // not, so we set it here.
-            constexpr size_t hash = 0x12345;
-            renderPipelineMock->SetContentHash(hash);
-            ON_CALL(*renderPipelineMock, ComputeContentHash).WillByDefault(Return(hash));
-
-            // Render pipelines are initialized during their creation via the device.
-            EXPECT_CALL(*renderPipelineMock, Initialize).Times(1);
-
-            RenderPipelineDescriptor desc = {};
-            desc.layout = GetPipelineLayout().Get();
-            desc.vertex.module = GetVertexShaderModule().Get();
-            EXPECT_CALL(mDevice, CreateUninitializedRenderPipelineImpl)
-                .WillOnce(Return(ByMove(AcquireRef(renderPipelineMock))));
-            DAWN_ASSERT_AND_ASSIGN(renderPipeline, mDevice.CreateRenderPipeline(&desc));
-            EXPECT_TRUE(renderPipeline->IsAlive());
-            EXPECT_TRUE(renderPipeline->IsCachedReference());
-        }
-
-        Ref<SamplerBase> sampler;
-        {
-            SamplerDescriptor desc = {};
-            EXPECT_CALL(mDevice, CreateSamplerImpl)
-                .WillOnce(Return(ByMove(AcquireRef(samplerMock))));
-            DAWN_ASSERT_AND_ASSIGN(sampler, mDevice.CreateSampler(&desc));
-            EXPECT_TRUE(sampler->IsAlive());
-            EXPECT_TRUE(sampler->IsCachedReference());
-        }
-
-        Ref<ShaderModuleBase> shaderModule;
-        {
-            ShaderModuleWGSLDescriptor wgslDesc;
-            wgslDesc.source = R"(
-                @stage(compute) @workgroup_size(1) fn main() {
-                }
-            )";
-            ShaderModuleDescriptor desc = {};
-            desc.nextInChain = &wgslDesc;
-
-            EXPECT_CALL(mDevice, CreateShaderModuleImpl)
-                .WillOnce(Return(ByMove(AcquireRef(shaderModuleMock))));
-            DAWN_ASSERT_AND_ASSIGN(shaderModule, mDevice.CreateShaderModule(&desc));
-            EXPECT_TRUE(shaderModule->IsAlive());
-            EXPECT_TRUE(shaderModule->IsCachedReference());
-        }
-
-        Ref<SwapChainBase> swapChain;
-        {
-            SwapChainDescriptor desc = {};
-            EXPECT_CALL(mDevice, CreateSwapChainImpl(_))
-                .WillOnce(Return(ByMove(AcquireRef(swapChainMock))));
-            DAWN_ASSERT_AND_ASSIGN(swapChain, mDevice.CreateSwapChain(nullptr, &desc));
-            EXPECT_TRUE(swapChain->IsAlive());
-        }
-
-        Ref<TextureBase> texture;
-        {
-            TextureDescriptor desc = {};
-            EXPECT_CALL(mDevice, CreateTextureImpl)
-                .WillOnce(Return(ByMove(AcquireRef(textureMock))));
-            DAWN_ASSERT_AND_ASSIGN(texture, mDevice.CreateTexture(&desc));
-            EXPECT_TRUE(texture->IsAlive());
-        }
-
-        Ref<TextureViewBase> textureView;
-        {
-            TextureViewDescriptor desc = {};
-            EXPECT_CALL(mDevice, CreateTextureViewImpl)
-                .WillOnce(Return(ByMove(AcquireRef(textureViewMock))));
-            DAWN_ASSERT_AND_ASSIGN(textureView,
-                                   mDevice.CreateTextureView(GetTexture().Get(), &desc));
-            EXPECT_TRUE(textureView->IsAlive());
-        }
-
-        mDevice.DestroyObjects();
-        EXPECT_FALSE(bindGroup->IsAlive());
-        EXPECT_FALSE(bindGroupLayout->IsAlive());
-        EXPECT_FALSE(buffer->IsAlive());
-        EXPECT_FALSE(commandBuffer->IsAlive());
-        EXPECT_FALSE(computePipeline->IsAlive());
-        EXPECT_FALSE(externalTexture->IsAlive());
-        EXPECT_FALSE(pipelineLayout->IsAlive());
-        EXPECT_FALSE(querySet->IsAlive());
-        EXPECT_FALSE(renderPipeline->IsAlive());
-        EXPECT_FALSE(sampler->IsAlive());
-        EXPECT_FALSE(shaderModule->IsAlive());
-        EXPECT_FALSE(swapChain->IsAlive());
-        EXPECT_FALSE(texture->IsAlive());
-        EXPECT_FALSE(textureView->IsAlive());
-    }
-
-    static constexpr std::string_view kComputeShader = R"(
+static constexpr std::string_view kComputeShader = R"(
         @stage(compute) @workgroup_size(1) fn main() {}
     )";
 
-    static constexpr std::string_view kVertexShader = R"(
+static constexpr std::string_view kVertexShader = R"(
         @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
             return vec4<f32>(0.0, 0.0, 0.0, 0.0);
         }
     )";
 
-    static constexpr std::string_view kFragmentShader = R"(
+static constexpr std::string_view kFragmentShader = R"(
         @stage(fragment) fn main() {}
     )";
 
-    class DestroyObjectRegressionTests : public DawnNativeTest {};
+class DestroyObjectRegressionTests : public DawnNativeTest {};
 
-    // LastRefInCommand* tests are regression test(s) for https://crbug.com/chromium/1318792. The
-    // regression tests here are not exhuastive. In order to have an exhuastive test case for this
-    // class of failures, we should test every possible command with the commands holding the last
-    // references (or as last as possible) of their needed objects. For now, including simple cases
-    // including a stripped-down case from the original bug.
+// LastRefInCommand* tests are regression test(s) for https://crbug.com/chromium/1318792. The
+// regression tests here are not exhuastive. In order to have an exhuastive test case for this
+// class of failures, we should test every possible command with the commands holding the last
+// references (or as last as possible) of their needed objects. For now, including simple cases
+// including a stripped-down case from the original bug.
 
-    // Tests that when a RenderPipeline's last reference is held in a command in an unfinished
-    // CommandEncoder, that destroying the device still works as expected (and does not cause
-    // double-free).
-    TEST_F(DestroyObjectRegressionTests, LastRefInCommandRenderPipeline) {
-        utils::BasicRenderPass pass = utils::CreateBasicRenderPass(device, 1, 1);
+// Tests that when a RenderPipeline's last reference is held in a command in an unfinished
+// CommandEncoder, that destroying the device still works as expected (and does not cause
+// double-free).
+TEST_F(DestroyObjectRegressionTests, LastRefInCommandRenderPipeline) {
+    utils::BasicRenderPass pass = utils::CreateBasicRenderPass(device, 1, 1);
 
-        utils::ComboRenderPassDescriptor passDesc{};
-        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-        wgpu::RenderPassEncoder renderEncoder = encoder.BeginRenderPass(&pass.renderPassInfo);
+    utils::ComboRenderPassDescriptor passDesc{};
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::RenderPassEncoder renderEncoder = encoder.BeginRenderPass(&pass.renderPassInfo);
 
-        utils::ComboRenderPipelineDescriptor pipelineDesc;
-        pipelineDesc.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
-        pipelineDesc.vertex.module = utils::CreateShaderModule(device, kVertexShader.data());
-        pipelineDesc.vertex.entryPoint = "main";
-        pipelineDesc.cFragment.module = utils::CreateShaderModule(device, kFragmentShader.data());
-        pipelineDesc.cFragment.entryPoint = "main";
-        renderEncoder.SetPipeline(device.CreateRenderPipeline(&pipelineDesc));
+    utils::ComboRenderPipelineDescriptor pipelineDesc;
+    pipelineDesc.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+    pipelineDesc.vertex.module = utils::CreateShaderModule(device, kVertexShader.data());
+    pipelineDesc.vertex.entryPoint = "main";
+    pipelineDesc.cFragment.module = utils::CreateShaderModule(device, kFragmentShader.data());
+    pipelineDesc.cFragment.entryPoint = "main";
+    renderEncoder.SetPipeline(device.CreateRenderPipeline(&pipelineDesc));
 
-        device.Destroy();
-    }
+    device.Destroy();
+}
 
-    // Tests that when a ComputePipelines's last reference is held in a command in an unfinished
-    // CommandEncoder, that destroying the device still works as expected (and does not cause
-    // double-free).
-    TEST_F(DestroyObjectRegressionTests, LastRefInCommandComputePipeline) {
-        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-        wgpu::ComputePassEncoder computeEncoder = encoder.BeginComputePass();
+// Tests that when a ComputePipelines's last reference is held in a command in an unfinished
+// CommandEncoder, that destroying the device still works as expected (and does not cause
+// double-free).
+TEST_F(DestroyObjectRegressionTests, LastRefInCommandComputePipeline) {
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    wgpu::ComputePassEncoder computeEncoder = encoder.BeginComputePass();
 
-        wgpu::ComputePipelineDescriptor pipelineDesc;
-        pipelineDesc.compute.module = utils::CreateShaderModule(device, kComputeShader.data());
-        pipelineDesc.compute.entryPoint = "main";
-        computeEncoder.SetPipeline(device.CreateComputePipeline(&pipelineDesc));
+    wgpu::ComputePipelineDescriptor pipelineDesc;
+    pipelineDesc.compute.module = utils::CreateShaderModule(device, kComputeShader.data());
+    pipelineDesc.compute.entryPoint = "main";
+    computeEncoder.SetPipeline(device.CreateComputePipeline(&pipelineDesc));
 
-        device.Destroy();
-    }
+    device.Destroy();
+}
 
-    // TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented.
-    // NOLINTNEXTLINE(readability/namespace)
-}}  // namespace dawn::native::
+// TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented.
+// NOLINTNEXTLINE(readability/namespace)
+}  // namespace
+}  // namespace dawn::native
diff --git a/src/dawn/tests/unittests/native/DeviceCreationTests.cpp b/src/dawn/tests/unittests/native/DeviceCreationTests.cpp
index f7553ec..09fe994 100644
--- a/src/dawn/tests/unittests/native/DeviceCreationTests.cpp
+++ b/src/dawn/tests/unittests/native/DeviceCreationTests.cpp
@@ -25,166 +25,166 @@
 
 namespace {
 
-    using testing::Contains;
-    using testing::MockCallback;
-    using testing::NotNull;
-    using testing::SaveArg;
-    using testing::StrEq;
+using testing::Contains;
+using testing::MockCallback;
+using testing::NotNull;
+using testing::SaveArg;
+using testing::StrEq;
 
-    class DeviceCreationTest : public testing::Test {
-      protected:
-        void SetUp() override {
-            dawnProcSetProcs(&dawn::native::GetProcs());
+class DeviceCreationTest : public testing::Test {
+  protected:
+    void SetUp() override {
+        dawnProcSetProcs(&dawn::native::GetProcs());
 
-            instance = std::make_unique<dawn::native::Instance>();
-            instance->DiscoverDefaultAdapters();
-            for (dawn::native::Adapter& nativeAdapter : instance->GetAdapters()) {
-                wgpu::AdapterProperties properties;
-                nativeAdapter.GetProperties(&properties);
+        instance = std::make_unique<dawn::native::Instance>();
+        instance->DiscoverDefaultAdapters();
+        for (dawn::native::Adapter& nativeAdapter : instance->GetAdapters()) {
+            wgpu::AdapterProperties properties;
+            nativeAdapter.GetProperties(&properties);
 
-                if (properties.backendType == wgpu::BackendType::Null) {
-                    adapter = wgpu::Adapter(nativeAdapter.Get());
-                    break;
-                }
+            if (properties.backendType == wgpu::BackendType::Null) {
+                adapter = wgpu::Adapter(nativeAdapter.Get());
+                break;
             }
-            ASSERT_NE(adapter, nullptr);
         }
-
-        void TearDown() override {
-            adapter = nullptr;
-            instance = nullptr;
-            dawnProcSetProcs(nullptr);
-        }
-
-        std::unique_ptr<dawn::native::Instance> instance;
-        wgpu::Adapter adapter;
-    };
-
-    // Test successful call to CreateDevice with no descriptor
-    TEST_F(DeviceCreationTest, CreateDeviceNoDescriptorSuccess) {
-        wgpu::Device device = adapter.CreateDevice();
-        EXPECT_NE(device, nullptr);
+        ASSERT_NE(adapter, nullptr);
     }
 
-    // Test successful call to CreateDevice with descriptor.
-    TEST_F(DeviceCreationTest, CreateDeviceSuccess) {
+    void TearDown() override {
+        adapter = nullptr;
+        instance = nullptr;
+        dawnProcSetProcs(nullptr);
+    }
+
+    std::unique_ptr<dawn::native::Instance> instance;
+    wgpu::Adapter adapter;
+};
+
+// Test successful call to CreateDevice with no descriptor
+TEST_F(DeviceCreationTest, CreateDeviceNoDescriptorSuccess) {
+    wgpu::Device device = adapter.CreateDevice();
+    EXPECT_NE(device, nullptr);
+}
+
+// Test successful call to CreateDevice with descriptor.
+TEST_F(DeviceCreationTest, CreateDeviceSuccess) {
+    wgpu::DeviceDescriptor desc = {};
+    wgpu::Device device = adapter.CreateDevice(&desc);
+    EXPECT_NE(device, nullptr);
+}
+
+// Test successful call to CreateDevice with toggle descriptor.
+TEST_F(DeviceCreationTest, CreateDeviceWithTogglesSuccess) {
+    wgpu::DeviceDescriptor desc = {};
+    wgpu::DawnTogglesDeviceDescriptor togglesDesc = {};
+    desc.nextInChain = &togglesDesc;
+
+    const char* toggle = "skip_validation";
+    togglesDesc.forceEnabledToggles = &toggle;
+    togglesDesc.forceEnabledTogglesCount = 1;
+
+    wgpu::Device device = adapter.CreateDevice(&desc);
+    EXPECT_NE(device, nullptr);
+
+    auto toggles = dawn::native::GetTogglesUsed(device.Get());
+    EXPECT_THAT(toggles, Contains(StrEq(toggle)));
+}
+
+TEST_F(DeviceCreationTest, CreateDeviceWithCacheSuccess) {
+    // Default device descriptor should have the same cache key as a device descriptor with a
+    // default cache descriptor.
+    {
         wgpu::DeviceDescriptor desc = {};
-        wgpu::Device device = adapter.CreateDevice(&desc);
-        EXPECT_NE(device, nullptr);
-    }
+        wgpu::Device device1 = adapter.CreateDevice(&desc);
+        EXPECT_NE(device1, nullptr);
 
-    // Test successful call to CreateDevice with toggle descriptor.
-    TEST_F(DeviceCreationTest, CreateDeviceWithTogglesSuccess) {
+        wgpu::DawnCacheDeviceDescriptor cacheDesc = {};
+        desc.nextInChain = &cacheDesc;
+        wgpu::Device device2 = adapter.CreateDevice(&desc);
+
+        EXPECT_EQ(dawn::native::FromAPI(device1.Get())->GetCacheKey(),
+                  dawn::native::FromAPI(device2.Get())->GetCacheKey());
+    }
+    // Default device descriptor should not have the same cache key as a device descriptor with
+    // a non-default cache descriptor.
+    {
         wgpu::DeviceDescriptor desc = {};
-        wgpu::DawnTogglesDeviceDescriptor togglesDesc = {};
-        desc.nextInChain = &togglesDesc;
+        wgpu::Device device1 = adapter.CreateDevice(&desc);
+        EXPECT_NE(device1, nullptr);
 
-        const char* toggle = "skip_validation";
-        togglesDesc.forceEnabledToggles = &toggle;
-        togglesDesc.forceEnabledTogglesCount = 1;
+        wgpu::DawnCacheDeviceDescriptor cacheDesc = {};
+        desc.nextInChain = &cacheDesc;
+        const char* isolationKey = "isolation key";
+        cacheDesc.isolationKey = isolationKey;
+        wgpu::Device device2 = adapter.CreateDevice(&desc);
+        EXPECT_NE(device2, nullptr);
 
-        wgpu::Device device = adapter.CreateDevice(&desc);
-        EXPECT_NE(device, nullptr);
-
-        auto toggles = dawn::native::GetTogglesUsed(device.Get());
-        EXPECT_THAT(toggles, Contains(StrEq(toggle)));
+        EXPECT_NE(dawn::native::FromAPI(device1.Get())->GetCacheKey(),
+                  dawn::native::FromAPI(device2.Get())->GetCacheKey());
     }
+    // Two non-default cache descriptors should not have the same cache key.
+    {
+        wgpu::DawnCacheDeviceDescriptor cacheDesc = {};
+        const char* isolationKey1 = "isolation key 1";
+        const char* isolationKey2 = "isolation key 2";
+        wgpu::DeviceDescriptor desc = {};
+        desc.nextInChain = &cacheDesc;
 
-    TEST_F(DeviceCreationTest, CreateDeviceWithCacheSuccess) {
-        // Default device descriptor should have the same cache key as a device descriptor with a
-        // default cache descriptor.
-        {
-            wgpu::DeviceDescriptor desc = {};
-            wgpu::Device device1 = adapter.CreateDevice(&desc);
-            EXPECT_NE(device1, nullptr);
+        cacheDesc.isolationKey = isolationKey1;
+        wgpu::Device device1 = adapter.CreateDevice(&desc);
+        EXPECT_NE(device1, nullptr);
 
-            wgpu::DawnCacheDeviceDescriptor cacheDesc = {};
-            desc.nextInChain = &cacheDesc;
-            wgpu::Device device2 = adapter.CreateDevice(&desc);
+        cacheDesc.isolationKey = isolationKey2;
+        wgpu::Device device2 = adapter.CreateDevice(&desc);
+        EXPECT_NE(device2, nullptr);
 
-            EXPECT_EQ(dawn::native::FromAPI(device1.Get())->GetCacheKey(),
-                      dawn::native::FromAPI(device2.Get())->GetCacheKey());
-        }
-        // Default device descriptor should not have the same cache key as a device descriptor with
-        // a non-default cache descriptor.
-        {
-            wgpu::DeviceDescriptor desc = {};
-            wgpu::Device device1 = adapter.CreateDevice(&desc);
-            EXPECT_NE(device1, nullptr);
-
-            wgpu::DawnCacheDeviceDescriptor cacheDesc = {};
-            desc.nextInChain = &cacheDesc;
-            const char* isolationKey = "isolation key";
-            cacheDesc.isolationKey = isolationKey;
-            wgpu::Device device2 = adapter.CreateDevice(&desc);
-            EXPECT_NE(device2, nullptr);
-
-            EXPECT_NE(dawn::native::FromAPI(device1.Get())->GetCacheKey(),
-                      dawn::native::FromAPI(device2.Get())->GetCacheKey());
-        }
-        // Two non-default cache descriptors should not have the same cache key.
-        {
-            wgpu::DawnCacheDeviceDescriptor cacheDesc = {};
-            const char* isolationKey1 = "isolation key 1";
-            const char* isolationKey2 = "isolation key 2";
-            wgpu::DeviceDescriptor desc = {};
-            desc.nextInChain = &cacheDesc;
-
-            cacheDesc.isolationKey = isolationKey1;
-            wgpu::Device device1 = adapter.CreateDevice(&desc);
-            EXPECT_NE(device1, nullptr);
-
-            cacheDesc.isolationKey = isolationKey2;
-            wgpu::Device device2 = adapter.CreateDevice(&desc);
-            EXPECT_NE(device2, nullptr);
-
-            EXPECT_NE(dawn::native::FromAPI(device1.Get())->GetCacheKey(),
-                      dawn::native::FromAPI(device2.Get())->GetCacheKey());
-        }
+        EXPECT_NE(dawn::native::FromAPI(device1.Get())->GetCacheKey(),
+                  dawn::native::FromAPI(device2.Get())->GetCacheKey());
     }
+}
 
-    // Test successful call to RequestDevice with descriptor
-    TEST_F(DeviceCreationTest, RequestDeviceSuccess) {
-        WGPUDevice cDevice;
-        {
-            MockCallback<WGPURequestDeviceCallback> cb;
-            EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Success, NotNull(), nullptr, this))
-                .WillOnce(SaveArg<1>(&cDevice));
-
-            wgpu::DeviceDescriptor desc = {};
-            adapter.RequestDevice(&desc, cb.Callback(), cb.MakeUserdata(this));
-        }
-
-        wgpu::Device device = wgpu::Device::Acquire(cDevice);
-        EXPECT_NE(device, nullptr);
-    }
-
-    // Test successful call to RequestDevice with a null descriptor
-    TEST_F(DeviceCreationTest, RequestDeviceNullDescriptorSuccess) {
-        WGPUDevice cDevice;
-        {
-            MockCallback<WGPURequestDeviceCallback> cb;
-            EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Success, NotNull(), nullptr, this))
-                .WillOnce(SaveArg<1>(&cDevice));
-
-            adapter.RequestDevice(nullptr, cb.Callback(), cb.MakeUserdata(this));
-        }
-
-        wgpu::Device device = wgpu::Device::Acquire(cDevice);
-        EXPECT_NE(device, nullptr);
-    }
-
-    // Test failing call to RequestDevice with invalid feature
-    TEST_F(DeviceCreationTest, RequestDeviceFailure) {
+// Test successful call to RequestDevice with descriptor
+TEST_F(DeviceCreationTest, RequestDeviceSuccess) {
+    WGPUDevice cDevice;
+    {
         MockCallback<WGPURequestDeviceCallback> cb;
-        EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Error, nullptr, NotNull(), this)).Times(1);
+        EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Success, NotNull(), nullptr, this))
+            .WillOnce(SaveArg<1>(&cDevice));
 
         wgpu::DeviceDescriptor desc = {};
-        wgpu::FeatureName invalidFeature = static_cast<wgpu::FeatureName>(WGPUFeatureName_Force32);
-        desc.requiredFeatures = &invalidFeature;
-        desc.requiredFeaturesCount = 1;
-
         adapter.RequestDevice(&desc, cb.Callback(), cb.MakeUserdata(this));
     }
 
+    wgpu::Device device = wgpu::Device::Acquire(cDevice);
+    EXPECT_NE(device, nullptr);
+}
+
+// Test successful call to RequestDevice with a null descriptor
+TEST_F(DeviceCreationTest, RequestDeviceNullDescriptorSuccess) {
+    WGPUDevice cDevice;
+    {
+        MockCallback<WGPURequestDeviceCallback> cb;
+        EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Success, NotNull(), nullptr, this))
+            .WillOnce(SaveArg<1>(&cDevice));
+
+        adapter.RequestDevice(nullptr, cb.Callback(), cb.MakeUserdata(this));
+    }
+
+    wgpu::Device device = wgpu::Device::Acquire(cDevice);
+    EXPECT_NE(device, nullptr);
+}
+
+// Test failing call to RequestDevice with invalid feature
+TEST_F(DeviceCreationTest, RequestDeviceFailure) {
+    MockCallback<WGPURequestDeviceCallback> cb;
+    EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Error, nullptr, NotNull(), this)).Times(1);
+
+    wgpu::DeviceDescriptor desc = {};
+    wgpu::FeatureName invalidFeature = static_cast<wgpu::FeatureName>(WGPUFeatureName_Force32);
+    desc.requiredFeatures = &invalidFeature;
+    desc.requiredFeaturesCount = 1;
+
+    adapter.RequestDevice(&desc, cb.Callback(), cb.MakeUserdata(this));
+}
+
 }  // anonymous namespace
diff --git a/src/dawn/tests/unittests/native/mocks/BindGroupLayoutMock.h b/src/dawn/tests/unittests/native/mocks/BindGroupLayoutMock.h
index a1ab605..7276889 100644
--- a/src/dawn/tests/unittests/native/mocks/BindGroupLayoutMock.h
+++ b/src/dawn/tests/unittests/native/mocks/BindGroupLayoutMock.h
@@ -22,17 +22,17 @@
 
 namespace dawn::native {
 
-    class BindGroupLayoutMock final : public BindGroupLayoutBase {
-      public:
-        explicit BindGroupLayoutMock(DeviceBase* device) : BindGroupLayoutBase(device) {
-            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
-                this->BindGroupLayoutBase::DestroyImpl();
-            });
-        }
-        ~BindGroupLayoutMock() override = default;
+class BindGroupLayoutMock final : public BindGroupLayoutBase {
+  public:
+    explicit BindGroupLayoutMock(DeviceBase* device) : BindGroupLayoutBase(device) {
+        ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
+            this->BindGroupLayoutBase::DestroyImpl();
+        });
+    }
+    ~BindGroupLayoutMock() override = default;
 
-        MOCK_METHOD(void, DestroyImpl, (), (override));
-    };
+    MOCK_METHOD(void, DestroyImpl, (), (override));
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/unittests/native/mocks/BindGroupMock.h b/src/dawn/tests/unittests/native/mocks/BindGroupMock.h
index 5661f2f..8f5ce34 100644
--- a/src/dawn/tests/unittests/native/mocks/BindGroupMock.h
+++ b/src/dawn/tests/unittests/native/mocks/BindGroupMock.h
@@ -22,17 +22,15 @@
 
 namespace dawn::native {
 
-    class BindGroupMock : public BindGroupBase {
-      public:
-        explicit BindGroupMock(DeviceBase* device) : BindGroupBase(device) {
-            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
-                this->BindGroupBase::DestroyImpl();
-            });
-        }
-        ~BindGroupMock() override = default;
+class BindGroupMock : public BindGroupBase {
+  public:
+    explicit BindGroupMock(DeviceBase* device) : BindGroupBase(device) {
+        ON_CALL(*this, DestroyImpl).WillByDefault([this]() { this->BindGroupBase::DestroyImpl(); });
+    }
+    ~BindGroupMock() override = default;
 
-        MOCK_METHOD(void, DestroyImpl, (), (override));
-    };
+    MOCK_METHOD(void, DestroyImpl, (), (override));
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/unittests/native/mocks/BufferMock.h b/src/dawn/tests/unittests/native/mocks/BufferMock.h
index f44dd8b..d9d2211 100644
--- a/src/dawn/tests/unittests/native/mocks/BufferMock.h
+++ b/src/dawn/tests/unittests/native/mocks/BufferMock.h
@@ -22,27 +22,25 @@
 
 namespace dawn::native {
 
-    class BufferMock : public BufferBase {
-      public:
-        BufferMock(DeviceBase* device, BufferBase::BufferState state) : BufferBase(device, state) {
-            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
-                this->BufferBase::DestroyImpl();
-            });
-        }
-        ~BufferMock() override = default;
+class BufferMock : public BufferBase {
+  public:
+    BufferMock(DeviceBase* device, BufferBase::BufferState state) : BufferBase(device, state) {
+        ON_CALL(*this, DestroyImpl).WillByDefault([this]() { this->BufferBase::DestroyImpl(); });
+    }
+    ~BufferMock() override = default;
 
-        MOCK_METHOD(void, DestroyImpl, (), (override));
+    MOCK_METHOD(void, DestroyImpl, (), (override));
 
-        MOCK_METHOD(MaybeError, MapAtCreationImpl, (), (override));
-        MOCK_METHOD(MaybeError,
-                    MapAsyncImpl,
-                    (wgpu::MapMode mode, size_t offset, size_t size),
-                    (override));
-        MOCK_METHOD(void, UnmapImpl, (), (override));
-        MOCK_METHOD(void*, GetMappedPointerImpl, (), (override));
+    MOCK_METHOD(MaybeError, MapAtCreationImpl, (), (override));
+    MOCK_METHOD(MaybeError,
+                MapAsyncImpl,
+                (wgpu::MapMode mode, size_t offset, size_t size),
+                (override));
+    MOCK_METHOD(void, UnmapImpl, (), (override));
+    MOCK_METHOD(void*, GetMappedPointerImpl, (), (override));
 
-        MOCK_METHOD(bool, IsCPUWritableAtCreation, (), (const, override));
-    };
+    MOCK_METHOD(bool, IsCPUWritableAtCreation, (), (const, override));
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/unittests/native/mocks/CommandBufferMock.h b/src/dawn/tests/unittests/native/mocks/CommandBufferMock.h
index 5e16e0e..01843e8 100644
--- a/src/dawn/tests/unittests/native/mocks/CommandBufferMock.h
+++ b/src/dawn/tests/unittests/native/mocks/CommandBufferMock.h
@@ -22,17 +22,17 @@
 
 namespace dawn::native {
 
-    class CommandBufferMock : public CommandBufferBase {
-      public:
-        explicit CommandBufferMock(DeviceBase* device) : CommandBufferBase(device) {
-            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
-                this->CommandBufferBase::DestroyImpl();
-            });
-        }
-        ~CommandBufferMock() override = default;
+class CommandBufferMock : public CommandBufferBase {
+  public:
+    explicit CommandBufferMock(DeviceBase* device) : CommandBufferBase(device) {
+        ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
+            this->CommandBufferBase::DestroyImpl();
+        });
+    }
+    ~CommandBufferMock() override = default;
 
-        MOCK_METHOD(void, DestroyImpl, (), (override));
-    };
+    MOCK_METHOD(void, DestroyImpl, (), (override));
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/unittests/native/mocks/ComputePipelineMock.h b/src/dawn/tests/unittests/native/mocks/ComputePipelineMock.h
index f4fd5a1..7f6a598 100644
--- a/src/dawn/tests/unittests/native/mocks/ComputePipelineMock.h
+++ b/src/dawn/tests/unittests/native/mocks/ComputePipelineMock.h
@@ -22,19 +22,19 @@
 
 namespace dawn::native {
 
-    class ComputePipelineMock : public ComputePipelineBase {
-      public:
-        explicit ComputePipelineMock(DeviceBase* device) : ComputePipelineBase(device) {
-            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
-                this->ComputePipelineBase::DestroyImpl();
-            });
-        }
-        ~ComputePipelineMock() override = default;
+class ComputePipelineMock : public ComputePipelineBase {
+  public:
+    explicit ComputePipelineMock(DeviceBase* device) : ComputePipelineBase(device) {
+        ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
+            this->ComputePipelineBase::DestroyImpl();
+        });
+    }
+    ~ComputePipelineMock() override = default;
 
-        MOCK_METHOD(MaybeError, Initialize, (), (override));
-        MOCK_METHOD(size_t, ComputeContentHash, (), (override));
-        MOCK_METHOD(void, DestroyImpl, (), (override));
-    };
+    MOCK_METHOD(MaybeError, Initialize, (), (override));
+    MOCK_METHOD(size_t, ComputeContentHash, (), (override));
+    MOCK_METHOD(void, DestroyImpl, (), (override));
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/unittests/native/mocks/DeviceMock.h b/src/dawn/tests/unittests/native/mocks/DeviceMock.h
index ba54b2b..540a8f5 100644
--- a/src/dawn/tests/unittests/native/mocks/DeviceMock.h
+++ b/src/dawn/tests/unittests/native/mocks/DeviceMock.h
@@ -23,99 +23,98 @@
 
 namespace dawn::native {
 
-    class DeviceMock : public DeviceBase {
-      public:
-        // Exposes some protected functions for testing purposes.
-        using DeviceBase::DestroyObjects;
-        using DeviceBase::SetToggle;
+class DeviceMock : public DeviceBase {
+  public:
+    // Exposes some protected functions for testing purposes.
+    using DeviceBase::DestroyObjects;
+    using DeviceBase::SetToggle;
 
-        MOCK_METHOD(ResultOrError<Ref<CommandBufferBase>>,
-                    CreateCommandBuffer,
-                    (CommandEncoder*, const CommandBufferDescriptor*),
-                    (override));
+    MOCK_METHOD(ResultOrError<Ref<CommandBufferBase>>,
+                CreateCommandBuffer,
+                (CommandEncoder*, const CommandBufferDescriptor*),
+                (override));
 
-        MOCK_METHOD(ResultOrError<std::unique_ptr<StagingBufferBase>>,
-                    CreateStagingBuffer,
-                    (size_t),
-                    (override));
-        MOCK_METHOD(MaybeError,
-                    CopyFromStagingToBuffer,
-                    (StagingBufferBase*, uint64_t, BufferBase*, uint64_t, uint64_t),
-                    (override));
-        MOCK_METHOD(
-            MaybeError,
-            CopyFromStagingToTexture,
-            (const StagingBufferBase*, const TextureDataLayout&, TextureCopy*, const Extent3D&),
-            (override));
+    MOCK_METHOD(ResultOrError<std::unique_ptr<StagingBufferBase>>,
+                CreateStagingBuffer,
+                (size_t),
+                (override));
+    MOCK_METHOD(MaybeError,
+                CopyFromStagingToBuffer,
+                (StagingBufferBase*, uint64_t, BufferBase*, uint64_t, uint64_t),
+                (override));
+    MOCK_METHOD(MaybeError,
+                CopyFromStagingToTexture,
+                (const StagingBufferBase*, const TextureDataLayout&, TextureCopy*, const Extent3D&),
+                (override));
 
-        MOCK_METHOD(uint32_t, GetOptimalBytesPerRowAlignment, (), (const, override));
-        MOCK_METHOD(uint64_t, GetOptimalBufferToTextureCopyOffsetAlignment, (), (const, override));
+    MOCK_METHOD(uint32_t, GetOptimalBytesPerRowAlignment, (), (const, override));
+    MOCK_METHOD(uint64_t, GetOptimalBufferToTextureCopyOffsetAlignment, (), (const, override));
 
-        MOCK_METHOD(float, GetTimestampPeriodInNS, (), (const, override));
+    MOCK_METHOD(float, GetTimestampPeriodInNS, (), (const, override));
 
-        MOCK_METHOD(ResultOrError<Ref<BindGroupBase>>,
-                    CreateBindGroupImpl,
-                    (const BindGroupDescriptor*),
-                    (override));
-        MOCK_METHOD(ResultOrError<Ref<BindGroupLayoutBase>>,
-                    CreateBindGroupLayoutImpl,
-                    (const BindGroupLayoutDescriptor*, PipelineCompatibilityToken),
-                    (override));
-        MOCK_METHOD(ResultOrError<Ref<BufferBase>>,
-                    CreateBufferImpl,
-                    (const BufferDescriptor*),
-                    (override));
-        MOCK_METHOD(Ref<ComputePipelineBase>,
-                    CreateUninitializedComputePipelineImpl,
-                    (const ComputePipelineDescriptor*),
-                    (override));
-        MOCK_METHOD(ResultOrError<Ref<ExternalTextureBase>>,
-                    CreateExternalTextureImpl,
-                    (const ExternalTextureDescriptor*),
-                    (override));
-        MOCK_METHOD(ResultOrError<Ref<PipelineLayoutBase>>,
-                    CreatePipelineLayoutImpl,
-                    (const PipelineLayoutDescriptor*),
-                    (override));
-        MOCK_METHOD(ResultOrError<Ref<QuerySetBase>>,
-                    CreateQuerySetImpl,
-                    (const QuerySetDescriptor*),
-                    (override));
-        MOCK_METHOD(Ref<RenderPipelineBase>,
-                    CreateUninitializedRenderPipelineImpl,
-                    (const RenderPipelineDescriptor*),
-                    (override));
-        MOCK_METHOD(ResultOrError<Ref<SamplerBase>>,
-                    CreateSamplerImpl,
-                    (const SamplerDescriptor*),
-                    (override));
-        MOCK_METHOD(ResultOrError<Ref<ShaderModuleBase>>,
-                    CreateShaderModuleImpl,
-                    (const ShaderModuleDescriptor*, ShaderModuleParseResult*),
-                    (override));
-        MOCK_METHOD(ResultOrError<Ref<SwapChainBase>>,
-                    CreateSwapChainImpl,
-                    (const SwapChainDescriptor*),
-                    (override));
-        MOCK_METHOD(ResultOrError<Ref<NewSwapChainBase>>,
-                    CreateSwapChainImpl,
-                    (Surface*, NewSwapChainBase*, const SwapChainDescriptor*),
-                    (override));
-        MOCK_METHOD(ResultOrError<Ref<TextureBase>>,
-                    CreateTextureImpl,
-                    (const TextureDescriptor*),
-                    (override));
-        MOCK_METHOD(ResultOrError<Ref<TextureViewBase>>,
-                    CreateTextureViewImpl,
-                    (TextureBase*, const TextureViewDescriptor*),
-                    (override));
+    MOCK_METHOD(ResultOrError<Ref<BindGroupBase>>,
+                CreateBindGroupImpl,
+                (const BindGroupDescriptor*),
+                (override));
+    MOCK_METHOD(ResultOrError<Ref<BindGroupLayoutBase>>,
+                CreateBindGroupLayoutImpl,
+                (const BindGroupLayoutDescriptor*, PipelineCompatibilityToken),
+                (override));
+    MOCK_METHOD(ResultOrError<Ref<BufferBase>>,
+                CreateBufferImpl,
+                (const BufferDescriptor*),
+                (override));
+    MOCK_METHOD(Ref<ComputePipelineBase>,
+                CreateUninitializedComputePipelineImpl,
+                (const ComputePipelineDescriptor*),
+                (override));
+    MOCK_METHOD(ResultOrError<Ref<ExternalTextureBase>>,
+                CreateExternalTextureImpl,
+                (const ExternalTextureDescriptor*),
+                (override));
+    MOCK_METHOD(ResultOrError<Ref<PipelineLayoutBase>>,
+                CreatePipelineLayoutImpl,
+                (const PipelineLayoutDescriptor*),
+                (override));
+    MOCK_METHOD(ResultOrError<Ref<QuerySetBase>>,
+                CreateQuerySetImpl,
+                (const QuerySetDescriptor*),
+                (override));
+    MOCK_METHOD(Ref<RenderPipelineBase>,
+                CreateUninitializedRenderPipelineImpl,
+                (const RenderPipelineDescriptor*),
+                (override));
+    MOCK_METHOD(ResultOrError<Ref<SamplerBase>>,
+                CreateSamplerImpl,
+                (const SamplerDescriptor*),
+                (override));
+    MOCK_METHOD(ResultOrError<Ref<ShaderModuleBase>>,
+                CreateShaderModuleImpl,
+                (const ShaderModuleDescriptor*, ShaderModuleParseResult*),
+                (override));
+    MOCK_METHOD(ResultOrError<Ref<SwapChainBase>>,
+                CreateSwapChainImpl,
+                (const SwapChainDescriptor*),
+                (override));
+    MOCK_METHOD(ResultOrError<Ref<NewSwapChainBase>>,
+                CreateSwapChainImpl,
+                (Surface*, NewSwapChainBase*, const SwapChainDescriptor*),
+                (override));
+    MOCK_METHOD(ResultOrError<Ref<TextureBase>>,
+                CreateTextureImpl,
+                (const TextureDescriptor*),
+                (override));
+    MOCK_METHOD(ResultOrError<Ref<TextureViewBase>>,
+                CreateTextureViewImpl,
+                (TextureBase*, const TextureViewDescriptor*),
+                (override));
 
-        MOCK_METHOD(MaybeError, TickImpl, (), (override));
+    MOCK_METHOD(MaybeError, TickImpl, (), (override));
 
-        MOCK_METHOD(ResultOrError<ExecutionSerial>, CheckAndUpdateCompletedSerials, (), (override));
-        MOCK_METHOD(void, DestroyImpl, (), (override));
-        MOCK_METHOD(MaybeError, WaitForIdleForDestruction, (), (override));
-    };
+    MOCK_METHOD(ResultOrError<ExecutionSerial>, CheckAndUpdateCompletedSerials, (), (override));
+    MOCK_METHOD(void, DestroyImpl, (), (override));
+    MOCK_METHOD(MaybeError, WaitForIdleForDestruction, (), (override));
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/unittests/native/mocks/ExternalTextureMock.h b/src/dawn/tests/unittests/native/mocks/ExternalTextureMock.h
index c5df066..4114fe8 100644
--- a/src/dawn/tests/unittests/native/mocks/ExternalTextureMock.h
+++ b/src/dawn/tests/unittests/native/mocks/ExternalTextureMock.h
@@ -22,17 +22,17 @@
 
 namespace dawn::native {
 
-    class ExternalTextureMock : public ExternalTextureBase {
-      public:
-        explicit ExternalTextureMock(DeviceBase* device) : ExternalTextureBase(device) {
-            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
-                this->ExternalTextureBase::DestroyImpl();
-            });
-        }
-        ~ExternalTextureMock() override = default;
+class ExternalTextureMock : public ExternalTextureBase {
+  public:
+    explicit ExternalTextureMock(DeviceBase* device) : ExternalTextureBase(device) {
+        ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
+            this->ExternalTextureBase::DestroyImpl();
+        });
+    }
+    ~ExternalTextureMock() override = default;
 
-        MOCK_METHOD(void, DestroyImpl, (), (override));
-    };
+    MOCK_METHOD(void, DestroyImpl, (), (override));
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/unittests/native/mocks/PipelineLayoutMock.h b/src/dawn/tests/unittests/native/mocks/PipelineLayoutMock.h
index 090bee6..754cc95 100644
--- a/src/dawn/tests/unittests/native/mocks/PipelineLayoutMock.h
+++ b/src/dawn/tests/unittests/native/mocks/PipelineLayoutMock.h
@@ -22,17 +22,17 @@
 
 namespace dawn::native {
 
-    class PipelineLayoutMock : public PipelineLayoutBase {
-      public:
-        explicit PipelineLayoutMock(DeviceBase* device) : PipelineLayoutBase(device) {
-            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
-                this->PipelineLayoutBase::DestroyImpl();
-            });
-        }
-        ~PipelineLayoutMock() override = default;
+class PipelineLayoutMock : public PipelineLayoutBase {
+  public:
+    explicit PipelineLayoutMock(DeviceBase* device) : PipelineLayoutBase(device) {
+        ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
+            this->PipelineLayoutBase::DestroyImpl();
+        });
+    }
+    ~PipelineLayoutMock() override = default;
 
-        MOCK_METHOD(void, DestroyImpl, (), (override));
-    };
+    MOCK_METHOD(void, DestroyImpl, (), (override));
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/unittests/native/mocks/QuerySetMock.h b/src/dawn/tests/unittests/native/mocks/QuerySetMock.h
index 65c5726..0d081b7 100644
--- a/src/dawn/tests/unittests/native/mocks/QuerySetMock.h
+++ b/src/dawn/tests/unittests/native/mocks/QuerySetMock.h
@@ -22,17 +22,15 @@
 
 namespace dawn::native {
 
-    class QuerySetMock : public QuerySetBase {
-      public:
-        explicit QuerySetMock(DeviceBase* device) : QuerySetBase(device) {
-            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
-                this->QuerySetBase::DestroyImpl();
-            });
-        }
-        ~QuerySetMock() override = default;
+class QuerySetMock : public QuerySetBase {
+  public:
+    explicit QuerySetMock(DeviceBase* device) : QuerySetBase(device) {
+        ON_CALL(*this, DestroyImpl).WillByDefault([this]() { this->QuerySetBase::DestroyImpl(); });
+    }
+    ~QuerySetMock() override = default;
 
-        MOCK_METHOD(void, DestroyImpl, (), (override));
-    };
+    MOCK_METHOD(void, DestroyImpl, (), (override));
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/unittests/native/mocks/RenderPipelineMock.h b/src/dawn/tests/unittests/native/mocks/RenderPipelineMock.h
index 0aaa5e2..a7b0b62 100644
--- a/src/dawn/tests/unittests/native/mocks/RenderPipelineMock.h
+++ b/src/dawn/tests/unittests/native/mocks/RenderPipelineMock.h
@@ -22,19 +22,19 @@
 
 namespace dawn::native {
 
-    class RenderPipelineMock : public RenderPipelineBase {
-      public:
-        explicit RenderPipelineMock(DeviceBase* device) : RenderPipelineBase(device) {
-            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
-                this->RenderPipelineBase::DestroyImpl();
-            });
-        }
-        ~RenderPipelineMock() override = default;
+class RenderPipelineMock : public RenderPipelineBase {
+  public:
+    explicit RenderPipelineMock(DeviceBase* device) : RenderPipelineBase(device) {
+        ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
+            this->RenderPipelineBase::DestroyImpl();
+        });
+    }
+    ~RenderPipelineMock() override = default;
 
-        MOCK_METHOD(MaybeError, Initialize, (), (override));
-        MOCK_METHOD(size_t, ComputeContentHash, (), (override));
-        MOCK_METHOD(void, DestroyImpl, (), (override));
-    };
+    MOCK_METHOD(MaybeError, Initialize, (), (override));
+    MOCK_METHOD(size_t, ComputeContentHash, (), (override));
+    MOCK_METHOD(void, DestroyImpl, (), (override));
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/unittests/native/mocks/SamplerMock.h b/src/dawn/tests/unittests/native/mocks/SamplerMock.h
index 2427075..7e75255 100644
--- a/src/dawn/tests/unittests/native/mocks/SamplerMock.h
+++ b/src/dawn/tests/unittests/native/mocks/SamplerMock.h
@@ -22,17 +22,15 @@
 
 namespace dawn::native {
 
-    class SamplerMock : public SamplerBase {
-      public:
-        explicit SamplerMock(DeviceBase* device) : SamplerBase(device) {
-            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
-                this->SamplerBase::DestroyImpl();
-            });
-        }
-        ~SamplerMock() override = default;
+class SamplerMock : public SamplerBase {
+  public:
+    explicit SamplerMock(DeviceBase* device) : SamplerBase(device) {
+        ON_CALL(*this, DestroyImpl).WillByDefault([this]() { this->SamplerBase::DestroyImpl(); });
+    }
+    ~SamplerMock() override = default;
 
-        MOCK_METHOD(void, DestroyImpl, (), (override));
-    };
+    MOCK_METHOD(void, DestroyImpl, (), (override));
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/unittests/native/mocks/ShaderModuleMock.cpp b/src/dawn/tests/unittests/native/mocks/ShaderModuleMock.cpp
index 710737c..d497b75 100644
--- a/src/dawn/tests/unittests/native/mocks/ShaderModuleMock.cpp
+++ b/src/dawn/tests/unittests/native/mocks/ShaderModuleMock.cpp
@@ -16,25 +16,23 @@
 
 namespace dawn::native {
 
-    ShaderModuleMock::ShaderModuleMock(DeviceBase* device) : ShaderModuleBase(device) {
-        ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
-            this->ShaderModuleBase::DestroyImpl();
-        });
-    }
+ShaderModuleMock::ShaderModuleMock(DeviceBase* device) : ShaderModuleBase(device) {
+    ON_CALL(*this, DestroyImpl).WillByDefault([this]() { this->ShaderModuleBase::DestroyImpl(); });
+}
 
-    ResultOrError<Ref<ShaderModuleMock>> ShaderModuleMock::Create(DeviceBase* device,
-                                                                  const char* source) {
-        ShaderModuleMock* mock = new ShaderModuleMock(device);
+ResultOrError<Ref<ShaderModuleMock>> ShaderModuleMock::Create(DeviceBase* device,
+                                                              const char* source) {
+    ShaderModuleMock* mock = new ShaderModuleMock(device);
 
-        ShaderModuleWGSLDescriptor wgslDesc;
-        wgslDesc.source = source;
-        ShaderModuleDescriptor desc;
-        desc.nextInChain = &wgslDesc;
+    ShaderModuleWGSLDescriptor wgslDesc;
+    wgslDesc.source = source;
+    ShaderModuleDescriptor desc;
+    desc.nextInChain = &wgslDesc;
 
-        ShaderModuleParseResult parseResult;
-        DAWN_TRY(ValidateShaderModuleDescriptor(device, &desc, &parseResult, nullptr));
-        DAWN_TRY(mock->InitializeBase(&parseResult));
-        return AcquireRef(mock);
-    }
+    ShaderModuleParseResult parseResult;
+    DAWN_TRY(ValidateShaderModuleDescriptor(device, &desc, &parseResult, nullptr));
+    DAWN_TRY(mock->InitializeBase(&parseResult));
+    return AcquireRef(mock);
+}
 
 }  // namespace dawn::native
diff --git a/src/dawn/tests/unittests/native/mocks/ShaderModuleMock.h b/src/dawn/tests/unittests/native/mocks/ShaderModuleMock.h
index 5857a6e5..d35a319 100644
--- a/src/dawn/tests/unittests/native/mocks/ShaderModuleMock.h
+++ b/src/dawn/tests/unittests/native/mocks/ShaderModuleMock.h
@@ -25,16 +25,16 @@
 
 namespace dawn::native {
 
-    class ShaderModuleMock : public ShaderModuleBase {
-      public:
-        explicit ShaderModuleMock(DeviceBase* device);
-        ~ShaderModuleMock() override = default;
+class ShaderModuleMock : public ShaderModuleBase {
+  public:
+    explicit ShaderModuleMock(DeviceBase* device);
+    ~ShaderModuleMock() override = default;
 
-        MOCK_METHOD(void, DestroyImpl, (), (override));
+    MOCK_METHOD(void, DestroyImpl, (), (override));
 
-        // Creates a shader module mock based on the wgsl source.
-        static ResultOrError<Ref<ShaderModuleMock>> Create(DeviceBase* device, const char* source);
-    };
+    // Creates a shader module mock based on the wgsl source.
+    static ResultOrError<Ref<ShaderModuleMock>> Create(DeviceBase* device, const char* source);
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/unittests/native/mocks/SwapChainMock.h b/src/dawn/tests/unittests/native/mocks/SwapChainMock.h
index c0aa249..8f3386c 100644
--- a/src/dawn/tests/unittests/native/mocks/SwapChainMock.h
+++ b/src/dawn/tests/unittests/native/mocks/SwapChainMock.h
@@ -22,24 +22,22 @@
 
 namespace dawn::native {
 
-    class SwapChainMock : public SwapChainBase {
-      public:
-        explicit SwapChainMock(DeviceBase* device) : SwapChainBase(device) {
-            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
-                this->SwapChainBase::DestroyImpl();
-            });
-        }
-        ~SwapChainMock() override = default;
+class SwapChainMock : public SwapChainBase {
+  public:
+    explicit SwapChainMock(DeviceBase* device) : SwapChainBase(device) {
+        ON_CALL(*this, DestroyImpl).WillByDefault([this]() { this->SwapChainBase::DestroyImpl(); });
+    }
+    ~SwapChainMock() override = default;
 
-        MOCK_METHOD(void, DestroyImpl, (), (override));
+    MOCK_METHOD(void, DestroyImpl, (), (override));
 
-        MOCK_METHOD(void,
-                    APIConfigure,
-                    (wgpu::TextureFormat, wgpu::TextureUsage, uint32_t, uint32_t),
-                    (override));
-        MOCK_METHOD(TextureViewBase*, APIGetCurrentTextureView, (), (override));
-        MOCK_METHOD(void, APIPresent, (), (override));
-    };
+    MOCK_METHOD(void,
+                APIConfigure,
+                (wgpu::TextureFormat, wgpu::TextureUsage, uint32_t, uint32_t),
+                (override));
+    MOCK_METHOD(TextureViewBase*, APIGetCurrentTextureView, (), (override));
+    MOCK_METHOD(void, APIPresent, (), (override));
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/unittests/native/mocks/TextureMock.h b/src/dawn/tests/unittests/native/mocks/TextureMock.h
index a6ea6fb..b371b1a 100644
--- a/src/dawn/tests/unittests/native/mocks/TextureMock.h
+++ b/src/dawn/tests/unittests/native/mocks/TextureMock.h
@@ -22,27 +22,23 @@
 
 namespace dawn::native {
 
-    class TextureMock : public TextureBase {
-      public:
-        TextureMock(DeviceBase* device, TextureBase::TextureState state)
-            : TextureBase(device, state) {
-            ON_CALL(*this, DestroyImpl).WillByDefault([this]() {
-                this->TextureBase::DestroyImpl();
-            });
-        }
-        ~TextureMock() override = default;
+class TextureMock : public TextureBase {
+  public:
+    TextureMock(DeviceBase* device, TextureBase::TextureState state) : TextureBase(device, state) {
+        ON_CALL(*this, DestroyImpl).WillByDefault([this]() { this->TextureBase::DestroyImpl(); });
+    }
+    ~TextureMock() override = default;
 
-        MOCK_METHOD(void, DestroyImpl, (), (override));
-    };
+    MOCK_METHOD(void, DestroyImpl, (), (override));
+};
 
-    class TextureViewMock : public TextureViewBase {
-      public:
-        explicit TextureViewMock(TextureBase* texture) : TextureViewBase(texture) {
-        }
-        ~TextureViewMock() override = default;
+class TextureViewMock : public TextureViewBase {
+  public:
+    explicit TextureViewMock(TextureBase* texture) : TextureViewBase(texture) {}
+    ~TextureViewMock() override = default;
 
-        MOCK_METHOD(void, DestroyImpl, (), (override));
-    };
+    MOCK_METHOD(void, DestroyImpl, (), (override));
+};
 
 }  // namespace dawn::native
 
diff --git a/src/dawn/tests/unittests/validation/BufferValidationTests.cpp b/src/dawn/tests/unittests/validation/BufferValidationTests.cpp
index 1c0abff..a063c4c 100644
--- a/src/dawn/tests/unittests/validation/BufferValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/BufferValidationTests.cpp
@@ -15,8 +15,8 @@
 #include <limits>
 #include <memory>
 
-#include "gmock/gmock.h"
 #include "dawn/tests/unittests/validation/ValidationTest.h"
+#include "gmock/gmock.h"
 
 using testing::_;
 using testing::InvokeWithoutArgs;
diff --git a/src/dawn/tests/unittests/validation/DrawVertexAndIndexBufferOOBValidationTests.cpp b/src/dawn/tests/unittests/validation/DrawVertexAndIndexBufferOOBValidationTests.cpp
index 4876e92..e2f1906 100644
--- a/src/dawn/tests/unittests/validation/DrawVertexAndIndexBufferOOBValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/DrawVertexAndIndexBufferOOBValidationTests.cpp
@@ -20,729 +20,721 @@
 #include "dawn/utils/WGPUHelpers.h"
 
 namespace {
-    constexpr uint32_t kRTSize = 4;
-    constexpr uint32_t kFloat32x2Stride = 2 * sizeof(float);
-    constexpr uint32_t kFloat32x4Stride = 4 * sizeof(float);
+constexpr uint32_t kRTSize = 4;
+constexpr uint32_t kFloat32x2Stride = 2 * sizeof(float);
+constexpr uint32_t kFloat32x4Stride = 4 * sizeof(float);
 
-    class DrawVertexAndIndexBufferOOBValidationTests : public ValidationTest {
-      public:
-        // Parameters for testing index buffer
-        struct IndexBufferParams {
-            wgpu::IndexFormat indexFormat;
-            uint64_t indexBufferSize;              // Size for creating index buffer
-            uint64_t indexBufferOffsetForEncoder;  // Offset for SetIndexBuffer in encoder
-            uint64_t indexBufferSizeForEncoder;    // Size for SetIndexBuffer in encoder
-            uint32_t maxValidIndexNumber;  // max number of {indexCount + firstIndex} for this set
-                                           // of parameters
-        };
+class DrawVertexAndIndexBufferOOBValidationTests : public ValidationTest {
+  public:
+    // Parameters for testing index buffer
+    struct IndexBufferParams {
+        wgpu::IndexFormat indexFormat;
+        uint64_t indexBufferSize;              // Size for creating index buffer
+        uint64_t indexBufferOffsetForEncoder;  // Offset for SetIndexBuffer in encoder
+        uint64_t indexBufferSizeForEncoder;    // Size for SetIndexBuffer in encoder
+        uint32_t maxValidIndexNumber;  // max number of {indexCount + firstIndex} for this set
+                                       // of parameters
+    };
 
-        // Parameters for testing vertex-step-mode and instance-step-mode vertex buffer
-        struct VertexBufferParams {
-            uint32_t bufferStride;
-            uint64_t bufferSize;              // Size for creating vertex buffer
-            uint64_t bufferOffsetForEncoder;  // Offset for SetVertexBuffer in encoder
-            uint64_t bufferSizeForEncoder;    // Size for SetVertexBuffer in encoder
-            uint32_t maxValidAccessNumber;    // max number of valid access time for this set of
-                                              // parameters, i.e. {vertexCount + firstVertex} for
-            // vertex-step-mode, and {instanceCount + firstInstance}
-            // for instance-step-mode
-        };
+    // Parameters for testing vertex-step-mode and instance-step-mode vertex buffer
+    struct VertexBufferParams {
+        uint32_t bufferStride;
+        uint64_t bufferSize;              // Size for creating vertex buffer
+        uint64_t bufferOffsetForEncoder;  // Offset for SetVertexBuffer in encoder
+        uint64_t bufferSizeForEncoder;    // Size for SetVertexBuffer in encoder
+        uint32_t maxValidAccessNumber;    // max number of valid access time for this set of
+                                          // parameters, i.e. {vertexCount + firstVertex} for
+        // vertex-step-mode, and {instanceCount + firstInstance}
+        // for instance-step-mode
+    };
 
-        // Parameters for setIndexBuffer
-        struct IndexBufferDesc {
-            const wgpu::Buffer buffer;
-            wgpu::IndexFormat indexFormat;
-            uint64_t offset = 0;
-            uint64_t size = wgpu::kWholeSize;
-        };
+    // Parameters for setIndexBuffer
+    struct IndexBufferDesc {
+        const wgpu::Buffer buffer;
+        wgpu::IndexFormat indexFormat;
+        uint64_t offset = 0;
+        uint64_t size = wgpu::kWholeSize;
+    };
 
-        // Parameters for setVertexBuffer
-        struct VertexBufferSpec {
-            uint32_t slot;
-            const wgpu::Buffer buffer;
-            uint64_t offset = 0;
-            uint64_t size = wgpu::kWholeSize;
-        };
-        using VertexBufferList = std::vector<VertexBufferSpec>;
+    // Parameters for setVertexBuffer
+    struct VertexBufferSpec {
+        uint32_t slot;
+        const wgpu::Buffer buffer;
+        uint64_t offset = 0;
+        uint64_t size = wgpu::kWholeSize;
+    };
+    using VertexBufferList = std::vector<VertexBufferSpec>;
 
-        // Buffer layout parameters for creating pipeline
-        struct PipelineVertexBufferAttributeDesc {
-            uint32_t shaderLocation;
-            wgpu::VertexFormat format;
-            uint64_t offset = 0;
-        };
-        struct PipelineVertexBufferDesc {
-            uint64_t arrayStride;
-            wgpu::VertexStepMode stepMode;
-            std::vector<PipelineVertexBufferAttributeDesc> attributes = {};
-        };
+    // Buffer layout parameters for creating pipeline
+    struct PipelineVertexBufferAttributeDesc {
+        uint32_t shaderLocation;
+        wgpu::VertexFormat format;
+        uint64_t offset = 0;
+    };
+    struct PipelineVertexBufferDesc {
+        uint64_t arrayStride;
+        wgpu::VertexStepMode stepMode;
+        std::vector<PipelineVertexBufferAttributeDesc> attributes = {};
+    };
 
-        void SetUp() override {
-            ValidationTest::SetUp();
+    void SetUp() override {
+        ValidationTest::SetUp();
 
-            renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+        renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
 
-            fsModule = utils::CreateShaderModule(device, R"(
+        fsModule = utils::CreateShaderModule(device, R"(
             @stage(fragment) fn main() -> @location(0) vec4<f32> {
                 return vec4<f32>(0.0, 1.0, 0.0, 1.0);
             })");
-        }
+    }
 
-        const wgpu::RenderPassDescriptor* GetBasicRenderPassDescriptor() const {
-            return &renderPass.renderPassInfo;
-        }
+    const wgpu::RenderPassDescriptor* GetBasicRenderPassDescriptor() const {
+        return &renderPass.renderPassInfo;
+    }
 
-        wgpu::Buffer CreateBuffer(uint64_t size,
-                                  wgpu::BufferUsage usage = wgpu::BufferUsage::Vertex) {
-            wgpu::BufferDescriptor descriptor;
-            descriptor.size = size;
-            descriptor.usage = usage;
+    wgpu::Buffer CreateBuffer(uint64_t size, wgpu::BufferUsage usage = wgpu::BufferUsage::Vertex) {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = size;
+        descriptor.usage = usage;
 
-            return device.CreateBuffer(&descriptor);
-        }
+        return device.CreateBuffer(&descriptor);
+    }
 
-        wgpu::ShaderModule CreateVertexShaderModuleWithBuffer(
-            std::vector<PipelineVertexBufferDesc> bufferDescList) {
-            uint32_t attributeCount = 0;
-            std::stringstream inputStringStream;
+    wgpu::ShaderModule CreateVertexShaderModuleWithBuffer(
+        std::vector<PipelineVertexBufferDesc> bufferDescList) {
+        uint32_t attributeCount = 0;
+        std::stringstream inputStringStream;
 
-            for (auto buffer : bufferDescList) {
-                for (auto attr : buffer.attributes) {
-                    // @location({shaderLocation}) var_{id} : {typeString},
-                    inputStringStream << "@location(" << attr.shaderLocation << ") var_"
-                                      << attributeCount << " : vec4<f32>,";
-                    attributeCount++;
-                }
+        for (auto buffer : bufferDescList) {
+            for (auto attr : buffer.attributes) {
+                // @location({shaderLocation}) var_{id} : {typeString},
+                inputStringStream << "@location(" << attr.shaderLocation << ") var_"
+                                  << attributeCount << " : vec4<f32>,";
+                attributeCount++;
             }
+        }
 
-            std::stringstream shaderStringStream;
+        std::stringstream shaderStringStream;
 
-            shaderStringStream << R"(
+        shaderStringStream << R"(
             @stage(vertex)
             fn main()" << inputStringStream.str()
-                               << R"() -> @builtin(position) vec4<f32> {
+                           << R"() -> @builtin(position) vec4<f32> {
                 return vec4<f32>(0.0, 1.0, 0.0, 1.0);
             })";
 
-            return utils::CreateShaderModule(device, shaderStringStream.str().c_str());
-        }
+        return utils::CreateShaderModule(device, shaderStringStream.str().c_str());
+    }
 
-        // Create a render pipeline with given buffer layout description, using a vertex shader
-        // module automatically generated from the buffer description.
-        wgpu::RenderPipeline CreateRenderPipelineWithBufferDesc(
-            std::vector<PipelineVertexBufferDesc> bufferDescList) {
-            utils::ComboRenderPipelineDescriptor descriptor;
+    // Create a render pipeline with given buffer layout description, using a vertex shader
+    // module automatically generated from the buffer description.
+    wgpu::RenderPipeline CreateRenderPipelineWithBufferDesc(
+        std::vector<PipelineVertexBufferDesc> bufferDescList) {
+        utils::ComboRenderPipelineDescriptor descriptor;
 
-            descriptor.vertex.module = CreateVertexShaderModuleWithBuffer(bufferDescList);
-            descriptor.cFragment.module = fsModule;
-            descriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
+        descriptor.vertex.module = CreateVertexShaderModuleWithBuffer(bufferDescList);
+        descriptor.cFragment.module = fsModule;
+        descriptor.primitive.topology = wgpu::PrimitiveTopology::TriangleList;
 
-            descriptor.vertex.bufferCount = bufferDescList.size();
+        descriptor.vertex.bufferCount = bufferDescList.size();
 
-            size_t attributeCount = 0;
+        size_t attributeCount = 0;
 
-            for (size_t bufferCount = 0; bufferCount < bufferDescList.size(); bufferCount++) {
-                auto bufferDesc = bufferDescList[bufferCount];
-                descriptor.cBuffers[bufferCount].arrayStride = bufferDesc.arrayStride;
-                descriptor.cBuffers[bufferCount].stepMode = bufferDesc.stepMode;
-                if (bufferDesc.attributes.size() > 0) {
-                    descriptor.cBuffers[bufferCount].attributeCount = bufferDesc.attributes.size();
-                    descriptor.cBuffers[bufferCount].attributes =
-                        &descriptor.cAttributes[attributeCount];
-                    for (auto attribute : bufferDesc.attributes) {
-                        descriptor.cAttributes[attributeCount].shaderLocation =
-                            attribute.shaderLocation;
-                        descriptor.cAttributes[attributeCount].format = attribute.format;
-                        descriptor.cAttributes[attributeCount].offset = attribute.offset;
-                        attributeCount++;
-                    }
-                } else {
-                    descriptor.cBuffers[bufferCount].attributeCount = 0;
-                    descriptor.cBuffers[bufferCount].attributes = nullptr;
+        for (size_t bufferCount = 0; bufferCount < bufferDescList.size(); bufferCount++) {
+            auto bufferDesc = bufferDescList[bufferCount];
+            descriptor.cBuffers[bufferCount].arrayStride = bufferDesc.arrayStride;
+            descriptor.cBuffers[bufferCount].stepMode = bufferDesc.stepMode;
+            if (bufferDesc.attributes.size() > 0) {
+                descriptor.cBuffers[bufferCount].attributeCount = bufferDesc.attributes.size();
+                descriptor.cBuffers[bufferCount].attributes =
+                    &descriptor.cAttributes[attributeCount];
+                for (auto attribute : bufferDesc.attributes) {
+                    descriptor.cAttributes[attributeCount].shaderLocation =
+                        attribute.shaderLocation;
+                    descriptor.cAttributes[attributeCount].format = attribute.format;
+                    descriptor.cAttributes[attributeCount].offset = attribute.offset;
+                    attributeCount++;
                 }
-            }
-
-            descriptor.cTargets[0].format = renderPass.colorFormat;
-
-            return device.CreateRenderPipeline(&descriptor);
-        }
-
-        // Create a render pipeline using only one vertex-step-mode Float32x4 buffer
-        wgpu::RenderPipeline CreateBasicRenderPipeline(uint32_t bufferStride = kFloat32x4Stride) {
-            DAWN_ASSERT(bufferStride >= kFloat32x4Stride);
-
-            std::vector<PipelineVertexBufferDesc> bufferDescList = {
-                {bufferStride, wgpu::VertexStepMode::Vertex, {{0, wgpu::VertexFormat::Float32x4}}},
-            };
-
-            return CreateRenderPipelineWithBufferDesc(bufferDescList);
-        }
-
-        // Create a render pipeline using one vertex-step-mode Float32x4 buffer and one
-        // instance-step-mode Float32x2 buffer
-        wgpu::RenderPipeline CreateBasicRenderPipelineWithInstance(
-            uint32_t bufferStride1 = kFloat32x4Stride,
-            uint32_t bufferStride2 = kFloat32x2Stride) {
-            DAWN_ASSERT(bufferStride1 >= kFloat32x4Stride);
-            DAWN_ASSERT(bufferStride2 >= kFloat32x2Stride);
-
-            std::vector<PipelineVertexBufferDesc> bufferDescList = {
-                {bufferStride1, wgpu::VertexStepMode::Vertex, {{0, wgpu::VertexFormat::Float32x4}}},
-                {bufferStride2,
-                 wgpu::VertexStepMode::Instance,
-                 {{3, wgpu::VertexFormat::Float32x2}}},
-            };
-
-            return CreateRenderPipelineWithBufferDesc(bufferDescList);
-        }
-
-        // Create a render pipeline using one vertex-step-mode and one instance-step-mode buffer,
-        // both with a zero array stride. The minimal size of vertex step mode buffer should be 28,
-        // and the minimal size of instance step mode buffer should be 20.
-        wgpu::RenderPipeline CreateBasicRenderPipelineWithZeroArrayStride() {
-            std::vector<PipelineVertexBufferDesc> bufferDescList = {
-                {0,
-                 wgpu::VertexStepMode::Vertex,
-                 {{0, wgpu::VertexFormat::Float32x4, 0}, {1, wgpu::VertexFormat::Float32x2, 20}}},
-                {0,
-                 wgpu::VertexStepMode::Instance,
-                 // Two attributes are overlapped within this instance step mode vertex buffer
-                 {{3, wgpu::VertexFormat::Float32x4, 4}, {7, wgpu::VertexFormat::Float32x3, 0}}},
-            };
-
-            return CreateRenderPipelineWithBufferDesc(bufferDescList);
-        }
-
-        void TestRenderPassDraw(const wgpu::RenderPipeline& pipeline,
-                                VertexBufferList vertexBufferList,
-                                uint32_t vertexCount,
-                                uint32_t instanceCount,
-                                uint32_t firstVertex,
-                                uint32_t firstInstance,
-                                bool isSuccess) {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder renderPassEncoder =
-                encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
-            renderPassEncoder.SetPipeline(pipeline);
-
-            for (auto vertexBufferParam : vertexBufferList) {
-                renderPassEncoder.SetVertexBuffer(vertexBufferParam.slot, vertexBufferParam.buffer,
-                                                  vertexBufferParam.offset, vertexBufferParam.size);
-            }
-            renderPassEncoder.Draw(vertexCount, instanceCount, firstVertex, firstInstance);
-            renderPassEncoder.End();
-
-            if (isSuccess) {
-                encoder.Finish();
             } else {
-                ASSERT_DEVICE_ERROR(encoder.Finish());
+                descriptor.cBuffers[bufferCount].attributeCount = 0;
+                descriptor.cBuffers[bufferCount].attributes = nullptr;
             }
         }
 
-        void TestRenderPassDrawIndexed(const wgpu::RenderPipeline& pipeline,
-                                       IndexBufferDesc indexBuffer,
-                                       VertexBufferList vertexBufferList,
-                                       uint32_t indexCount,
-                                       uint32_t instanceCount,
-                                       uint32_t firstIndex,
-                                       int32_t baseVertex,
-                                       uint32_t firstInstance,
-                                       bool isSuccess) {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder renderPassEncoder =
-                encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
-            renderPassEncoder.SetPipeline(pipeline);
+        descriptor.cTargets[0].format = renderPass.colorFormat;
 
-            renderPassEncoder.SetIndexBuffer(indexBuffer.buffer, indexBuffer.indexFormat,
-                                             indexBuffer.offset, indexBuffer.size);
+        return device.CreateRenderPipeline(&descriptor);
+    }
 
-            for (auto vertexBufferParam : vertexBufferList) {
-                renderPassEncoder.SetVertexBuffer(vertexBufferParam.slot, vertexBufferParam.buffer,
-                                                  vertexBufferParam.offset, vertexBufferParam.size);
-            }
-            renderPassEncoder.DrawIndexed(indexCount, instanceCount, firstIndex, baseVertex,
-                                          firstInstance);
-            renderPassEncoder.End();
+    // Create a render pipeline using only one vertex-step-mode Float32x4 buffer
+    wgpu::RenderPipeline CreateBasicRenderPipeline(uint32_t bufferStride = kFloat32x4Stride) {
+        DAWN_ASSERT(bufferStride >= kFloat32x4Stride);
 
-            if (isSuccess) {
-                encoder.Finish();
-            } else {
-                ASSERT_DEVICE_ERROR(encoder.Finish());
-            }
+        std::vector<PipelineVertexBufferDesc> bufferDescList = {
+            {bufferStride, wgpu::VertexStepMode::Vertex, {{0, wgpu::VertexFormat::Float32x4}}},
+        };
+
+        return CreateRenderPipelineWithBufferDesc(bufferDescList);
+    }
+
+    // Create a render pipeline using one vertex-step-mode Float32x4 buffer and one
+    // instance-step-mode Float32x2 buffer
+    wgpu::RenderPipeline CreateBasicRenderPipelineWithInstance(
+        uint32_t bufferStride1 = kFloat32x4Stride,
+        uint32_t bufferStride2 = kFloat32x2Stride) {
+        DAWN_ASSERT(bufferStride1 >= kFloat32x4Stride);
+        DAWN_ASSERT(bufferStride2 >= kFloat32x2Stride);
+
+        std::vector<PipelineVertexBufferDesc> bufferDescList = {
+            {bufferStride1, wgpu::VertexStepMode::Vertex, {{0, wgpu::VertexFormat::Float32x4}}},
+            {bufferStride2, wgpu::VertexStepMode::Instance, {{3, wgpu::VertexFormat::Float32x2}}},
+        };
+
+        return CreateRenderPipelineWithBufferDesc(bufferDescList);
+    }
+
+    // Create a render pipeline using one vertex-step-mode and one instance-step-mode buffer,
+    // both with a zero array stride. The minimal size of vertex step mode buffer should be 28,
+    // and the minimal size of instance step mode buffer should be 20.
+    wgpu::RenderPipeline CreateBasicRenderPipelineWithZeroArrayStride() {
+        std::vector<PipelineVertexBufferDesc> bufferDescList = {
+            {0,
+             wgpu::VertexStepMode::Vertex,
+             {{0, wgpu::VertexFormat::Float32x4, 0}, {1, wgpu::VertexFormat::Float32x2, 20}}},
+            {0,
+             wgpu::VertexStepMode::Instance,
+             // Two attributes are overlapped within this instance step mode vertex buffer
+             {{3, wgpu::VertexFormat::Float32x4, 4}, {7, wgpu::VertexFormat::Float32x3, 0}}},
+        };
+
+        return CreateRenderPipelineWithBufferDesc(bufferDescList);
+    }
+
+    void TestRenderPassDraw(const wgpu::RenderPipeline& pipeline,
+                            VertexBufferList vertexBufferList,
+                            uint32_t vertexCount,
+                            uint32_t instanceCount,
+                            uint32_t firstVertex,
+                            uint32_t firstInstance,
+                            bool isSuccess) {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPassEncoder =
+            encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
+        renderPassEncoder.SetPipeline(pipeline);
+
+        for (auto vertexBufferParam : vertexBufferList) {
+            renderPassEncoder.SetVertexBuffer(vertexBufferParam.slot, vertexBufferParam.buffer,
+                                              vertexBufferParam.offset, vertexBufferParam.size);
         }
+        renderPassEncoder.Draw(vertexCount, instanceCount, firstVertex, firstInstance);
+        renderPassEncoder.End();
 
-        // Parameters list for index buffer. Should cover all IndexFormat, and the zero/non-zero
-        // offset and size case in SetIndexBuffer
-        const std::vector<IndexBufferParams> kIndexParamsList = {
-            {wgpu::IndexFormat::Uint32, 12 * sizeof(uint32_t), 0, wgpu::kWholeSize, 12},
-            {wgpu::IndexFormat::Uint32, 13 * sizeof(uint32_t), sizeof(uint32_t), wgpu::kWholeSize,
-             12},
-            {wgpu::IndexFormat::Uint32, 13 * sizeof(uint32_t), 0, 12 * sizeof(uint32_t), 12},
-            {wgpu::IndexFormat::Uint32, 14 * sizeof(uint32_t), sizeof(uint32_t),
-             12 * sizeof(uint32_t), 12},
+        if (isSuccess) {
+            encoder.Finish();
+        } else {
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+    }
 
-            {wgpu::IndexFormat::Uint16, 12 * sizeof(uint16_t), 0, wgpu::kWholeSize, 12},
-            {wgpu::IndexFormat::Uint16, 13 * sizeof(uint16_t), sizeof(uint16_t), wgpu::kWholeSize,
-             12},
-            {wgpu::IndexFormat::Uint16, 13 * sizeof(uint16_t), 0, 12 * sizeof(uint16_t), 12},
-            {wgpu::IndexFormat::Uint16, 14 * sizeof(uint16_t), sizeof(uint16_t),
-             12 * sizeof(uint16_t), 12},
-        };
-        // Parameters list for vertex-step-mode buffer. These parameters should cover different
-        // stride, buffer size, SetVertexBuffer size and offset.
-        const std::vector<VertexBufferParams> kVertexParamsList = {
-            // For stride = kFloat32x4Stride
-            {kFloat32x4Stride, 3 * kFloat32x4Stride, 0, wgpu::kWholeSize, 3},
-            // Non-zero offset
-            {kFloat32x4Stride, 4 * kFloat32x4Stride, kFloat32x4Stride, wgpu::kWholeSize, 3},
-            // Non-default size
-            {kFloat32x4Stride, 4 * kFloat32x4Stride, 0, 3 * kFloat32x4Stride, 3},
-            // Non-zero offset and size
-            {kFloat32x4Stride, 5 * kFloat32x4Stride, kFloat32x4Stride, 3 * kFloat32x4Stride, 3},
-            // For stride = 2 * kFloat32x4Stride
-            {(2 * kFloat32x4Stride), 3 * (2 * kFloat32x4Stride), 0, wgpu::kWholeSize, 3},
-            // Non-zero offset
-            {(2 * kFloat32x4Stride), 4 * (2 * kFloat32x4Stride), (2 * kFloat32x4Stride),
-             wgpu::kWholeSize, 3},
-            // Non-default size
-            {(2 * kFloat32x4Stride), 4 * (2 * kFloat32x4Stride), 0, 3 * (2 * kFloat32x4Stride), 3},
-            // Non-zero offset and size
-            {(2 * kFloat32x4Stride), 5 * (2 * kFloat32x4Stride), (2 * kFloat32x4Stride),
-             3 * (2 * kFloat32x4Stride), 3},
-        };
-        // Parameters list for instance-step-mode buffer.
-        const std::vector<VertexBufferParams> kInstanceParamsList = {
-            // For stride = kFloat32x2Stride
-            {kFloat32x2Stride, 5 * kFloat32x2Stride, 0, wgpu::kWholeSize, 5},
-            // Non-zero offset
-            {kFloat32x2Stride, 6 * kFloat32x2Stride, kFloat32x2Stride, wgpu::kWholeSize, 5},
-            // Non-default size
-            {kFloat32x2Stride, 6 * kFloat32x2Stride, 0, 5 * kFloat32x2Stride, 5},
-            // Non-zero offset and size
-            {kFloat32x2Stride, 7 * kFloat32x2Stride, kFloat32x2Stride, 5 * kFloat32x2Stride, 5},
-            // For stride = 3 * kFloat32x2Stride
-            {(3 * kFloat32x2Stride), 5 * (3 * kFloat32x2Stride), 0, wgpu::kWholeSize, 5},
-            // Non-zero offset
-            {(3 * kFloat32x2Stride), 6 * (3 * kFloat32x2Stride), (3 * kFloat32x2Stride),
-             wgpu::kWholeSize, 5},
-            // Non-default size
-            {(3 * kFloat32x2Stride), 6 * (3 * kFloat32x2Stride), 0, 5 * (3 * kFloat32x2Stride), 5},
-            // Non-zero offset and size
-            {(3 * kFloat32x2Stride), 7 * (3 * kFloat32x2Stride), (3 * kFloat32x2Stride),
-             5 * (3 * kFloat32x2Stride), 5},
-        };
+    void TestRenderPassDrawIndexed(const wgpu::RenderPipeline& pipeline,
+                                   IndexBufferDesc indexBuffer,
+                                   VertexBufferList vertexBufferList,
+                                   uint32_t indexCount,
+                                   uint32_t instanceCount,
+                                   uint32_t firstIndex,
+                                   int32_t baseVertex,
+                                   uint32_t firstInstance,
+                                   bool isSuccess) {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPassEncoder =
+            encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
+        renderPassEncoder.SetPipeline(pipeline);
 
-      private:
-        wgpu::ShaderModule fsModule;
-        utils::BasicRenderPass renderPass;
+        renderPassEncoder.SetIndexBuffer(indexBuffer.buffer, indexBuffer.indexFormat,
+                                         indexBuffer.offset, indexBuffer.size);
+
+        for (auto vertexBufferParam : vertexBufferList) {
+            renderPassEncoder.SetVertexBuffer(vertexBufferParam.slot, vertexBufferParam.buffer,
+                                              vertexBufferParam.offset, vertexBufferParam.size);
+        }
+        renderPassEncoder.DrawIndexed(indexCount, instanceCount, firstIndex, baseVertex,
+                                      firstInstance);
+        renderPassEncoder.End();
+
+        if (isSuccess) {
+            encoder.Finish();
+        } else {
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+    }
+
+    // Parameters list for index buffer. Should cover all IndexFormat, and the zero/non-zero
+    // offset and size case in SetIndexBuffer
+    const std::vector<IndexBufferParams> kIndexParamsList = {
+        {wgpu::IndexFormat::Uint32, 12 * sizeof(uint32_t), 0, wgpu::kWholeSize, 12},
+        {wgpu::IndexFormat::Uint32, 13 * sizeof(uint32_t), sizeof(uint32_t), wgpu::kWholeSize, 12},
+        {wgpu::IndexFormat::Uint32, 13 * sizeof(uint32_t), 0, 12 * sizeof(uint32_t), 12},
+        {wgpu::IndexFormat::Uint32, 14 * sizeof(uint32_t), sizeof(uint32_t), 12 * sizeof(uint32_t),
+         12},
+
+        {wgpu::IndexFormat::Uint16, 12 * sizeof(uint16_t), 0, wgpu::kWholeSize, 12},
+        {wgpu::IndexFormat::Uint16, 13 * sizeof(uint16_t), sizeof(uint16_t), wgpu::kWholeSize, 12},
+        {wgpu::IndexFormat::Uint16, 13 * sizeof(uint16_t), 0, 12 * sizeof(uint16_t), 12},
+        {wgpu::IndexFormat::Uint16, 14 * sizeof(uint16_t), sizeof(uint16_t), 12 * sizeof(uint16_t),
+         12},
+    };
+    // Parameters list for vertex-step-mode buffer. These parameters should cover different
+    // stride, buffer size, SetVertexBuffer size and offset.
+    const std::vector<VertexBufferParams> kVertexParamsList = {
+        // For stride = kFloat32x4Stride
+        {kFloat32x4Stride, 3 * kFloat32x4Stride, 0, wgpu::kWholeSize, 3},
+        // Non-zero offset
+        {kFloat32x4Stride, 4 * kFloat32x4Stride, kFloat32x4Stride, wgpu::kWholeSize, 3},
+        // Non-default size
+        {kFloat32x4Stride, 4 * kFloat32x4Stride, 0, 3 * kFloat32x4Stride, 3},
+        // Non-zero offset and size
+        {kFloat32x4Stride, 5 * kFloat32x4Stride, kFloat32x4Stride, 3 * kFloat32x4Stride, 3},
+        // For stride = 2 * kFloat32x4Stride
+        {(2 * kFloat32x4Stride), 3 * (2 * kFloat32x4Stride), 0, wgpu::kWholeSize, 3},
+        // Non-zero offset
+        {(2 * kFloat32x4Stride), 4 * (2 * kFloat32x4Stride), (2 * kFloat32x4Stride),
+         wgpu::kWholeSize, 3},
+        // Non-default size
+        {(2 * kFloat32x4Stride), 4 * (2 * kFloat32x4Stride), 0, 3 * (2 * kFloat32x4Stride), 3},
+        // Non-zero offset and size
+        {(2 * kFloat32x4Stride), 5 * (2 * kFloat32x4Stride), (2 * kFloat32x4Stride),
+         3 * (2 * kFloat32x4Stride), 3},
+    };
+    // Parameters list for instance-step-mode buffer.
+    const std::vector<VertexBufferParams> kInstanceParamsList = {
+        // For stride = kFloat32x2Stride
+        {kFloat32x2Stride, 5 * kFloat32x2Stride, 0, wgpu::kWholeSize, 5},
+        // Non-zero offset
+        {kFloat32x2Stride, 6 * kFloat32x2Stride, kFloat32x2Stride, wgpu::kWholeSize, 5},
+        // Non-default size
+        {kFloat32x2Stride, 6 * kFloat32x2Stride, 0, 5 * kFloat32x2Stride, 5},
+        // Non-zero offset and size
+        {kFloat32x2Stride, 7 * kFloat32x2Stride, kFloat32x2Stride, 5 * kFloat32x2Stride, 5},
+        // For stride = 3 * kFloat32x2Stride
+        {(3 * kFloat32x2Stride), 5 * (3 * kFloat32x2Stride), 0, wgpu::kWholeSize, 5},
+        // Non-zero offset
+        {(3 * kFloat32x2Stride), 6 * (3 * kFloat32x2Stride), (3 * kFloat32x2Stride),
+         wgpu::kWholeSize, 5},
+        // Non-default size
+        {(3 * kFloat32x2Stride), 6 * (3 * kFloat32x2Stride), 0, 5 * (3 * kFloat32x2Stride), 5},
+        // Non-zero offset and size
+        {(3 * kFloat32x2Stride), 7 * (3 * kFloat32x2Stride), (3 * kFloat32x2Stride),
+         5 * (3 * kFloat32x2Stride), 5},
     };
 
-    // Control case for Draw
-    TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawBasic) {
-        wgpu::RenderPipeline pipeline = CreateBasicRenderPipeline();
+  private:
+    wgpu::ShaderModule fsModule;
+    utils::BasicRenderPass renderPass;
+};
 
-        wgpu::Buffer vertexBuffer = CreateBuffer(3 * kFloat32x4Stride);
+// Control case for Draw
+TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawBasic) {
+    wgpu::RenderPipeline pipeline = CreateBasicRenderPipeline();
 
-        {
-            // Implicit size
-            VertexBufferList vertexBufferList = {{0, vertexBuffer, 0, wgpu::kWholeSize}};
-            TestRenderPassDraw(pipeline, vertexBufferList, 3, 1, 0, 0, true);
-        }
+    wgpu::Buffer vertexBuffer = CreateBuffer(3 * kFloat32x4Stride);
 
-        {
-            // Explicit zero size
-            VertexBufferList vertexBufferList = {{0, vertexBuffer, 0, 0}};
-            TestRenderPassDraw(pipeline, vertexBufferList, 3, 1, 0, 0, false);
-        }
+    {
+        // Implicit size
+        VertexBufferList vertexBufferList = {{0, vertexBuffer, 0, wgpu::kWholeSize}};
+        TestRenderPassDraw(pipeline, vertexBufferList, 3, 1, 0, 0, true);
     }
 
-    // Verify vertex buffer OOB for non-instanced Draw are caught in command encoder
-    TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawVertexBufferOutOfBoundWithoutInstance) {
-        for (VertexBufferParams params : kVertexParamsList) {
-            // Create a render pipeline without instance step mode buffer
-            wgpu::RenderPipeline pipeline = CreateBasicRenderPipeline(params.bufferStride);
+    {
+        // Explicit zero size
+        VertexBufferList vertexBufferList = {{0, vertexBuffer, 0, 0}};
+        TestRenderPassDraw(pipeline, vertexBufferList, 3, 1, 0, 0, false);
+    }
+}
 
-            // Build vertex buffer for 3 vertices
-            wgpu::Buffer vertexBuffer = CreateBuffer(params.bufferSize);
+// Verify vertex buffer OOB for non-instanced Draw are caught in command encoder
+TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawVertexBufferOutOfBoundWithoutInstance) {
+    for (VertexBufferParams params : kVertexParamsList) {
+        // Create a render pipeline without instance step mode buffer
+        wgpu::RenderPipeline pipeline = CreateBasicRenderPipeline(params.bufferStride);
+
+        // Build vertex buffer for 3 vertices
+        wgpu::Buffer vertexBuffer = CreateBuffer(params.bufferSize);
+        VertexBufferList vertexBufferList = {
+            {0, vertexBuffer, params.bufferOffsetForEncoder, params.bufferSizeForEncoder}};
+
+        uint32_t n = params.maxValidAccessNumber;
+        // It is ok to draw n vertices with vertex buffer
+        TestRenderPassDraw(pipeline, vertexBufferList, n, 1, 0, 0, true);
+        // It is ok to draw n-1 vertices with offset 1
+        TestRenderPassDraw(pipeline, vertexBufferList, n - 1, 1, 1, 0, true);
+        // Drawing more vertices will cause OOB, even if not enough for another primitive
+        TestRenderPassDraw(pipeline, vertexBufferList, n + 1, 1, 0, 0, false);
+        // Drawing n vertices will non-zero offset will cause OOB
+        TestRenderPassDraw(pipeline, vertexBufferList, n, 1, 1, 0, false);
+        // It is ok to draw any number of instances, as we have no instance-mode buffer
+        TestRenderPassDraw(pipeline, vertexBufferList, n, 5, 0, 0, true);
+        TestRenderPassDraw(pipeline, vertexBufferList, n, 5, 0, 5, true);
+    }
+}
+
+// Verify vertex buffer OOB for instanced Draw are caught in command encoder
+TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawVertexBufferOutOfBoundWithInstance) {
+    for (VertexBufferParams vertexParams : kVertexParamsList) {
+        for (VertexBufferParams instanceParams : kInstanceParamsList) {
+            // Create pipeline with given buffer stride
+            wgpu::RenderPipeline pipeline = CreateBasicRenderPipelineWithInstance(
+                vertexParams.bufferStride, instanceParams.bufferStride);
+
+            // Build vertex buffer
+            wgpu::Buffer vertexBuffer = CreateBuffer(vertexParams.bufferSize);
+            wgpu::Buffer instanceBuffer = CreateBuffer(instanceParams.bufferSize);
+
             VertexBufferList vertexBufferList = {
-                {0, vertexBuffer, params.bufferOffsetForEncoder, params.bufferSizeForEncoder}};
+                {0, vertexBuffer, vertexParams.bufferOffsetForEncoder,
+                 vertexParams.bufferSizeForEncoder},
+                {1, instanceBuffer, instanceParams.bufferOffsetForEncoder,
+                 instanceParams.bufferSizeForEncoder},
+            };
 
-            uint32_t n = params.maxValidAccessNumber;
-            // It is ok to draw n vertices with vertex buffer
-            TestRenderPassDraw(pipeline, vertexBufferList, n, 1, 0, 0, true);
-            // It is ok to draw n-1 vertices with offset 1
-            TestRenderPassDraw(pipeline, vertexBufferList, n - 1, 1, 1, 0, true);
-            // Drawing more vertices will cause OOB, even if not enough for another primitive
-            TestRenderPassDraw(pipeline, vertexBufferList, n + 1, 1, 0, 0, false);
-            // Drawing n vertices will non-zero offset will cause OOB
-            TestRenderPassDraw(pipeline, vertexBufferList, n, 1, 1, 0, false);
-            // It is ok to draw any number of instances, as we have no instance-mode buffer
-            TestRenderPassDraw(pipeline, vertexBufferList, n, 5, 0, 0, true);
-            TestRenderPassDraw(pipeline, vertexBufferList, n, 5, 0, 5, true);
+            uint32_t vert = vertexParams.maxValidAccessNumber;
+            uint32_t inst = instanceParams.maxValidAccessNumber;
+            // It is ok to draw vert vertices
+            TestRenderPassDraw(pipeline, vertexBufferList, vert, 1, 0, 0, true);
+            TestRenderPassDraw(pipeline, vertexBufferList, vert - 1, 1, 1, 0, true);
+            // It is ok to draw vert vertices and inst instences
+            TestRenderPassDraw(pipeline, vertexBufferList, vert, inst, 0, 0, true);
+            TestRenderPassDraw(pipeline, vertexBufferList, vert, inst - 1, 0, 1, true);
+            // more vertices causing OOB
+            TestRenderPassDraw(pipeline, vertexBufferList, vert + 1, 1, 0, 0, false);
+            TestRenderPassDraw(pipeline, vertexBufferList, vert, 1, 1, 0, false);
+            TestRenderPassDraw(pipeline, vertexBufferList, vert + 1, inst, 0, 0, false);
+            TestRenderPassDraw(pipeline, vertexBufferList, vert, inst, 1, 0, false);
+            // more instances causing OOB
+            TestRenderPassDraw(pipeline, vertexBufferList, vert, inst + 1, 0, 0, false);
+            TestRenderPassDraw(pipeline, vertexBufferList, vert, inst, 0, 1, false);
+            // Both OOB
+            TestRenderPassDraw(pipeline, vertexBufferList, vert, inst + 1, 0, 0, false);
+            TestRenderPassDraw(pipeline, vertexBufferList, vert, inst, 1, 1, false);
         }
     }
+}
 
-    // Verify vertex buffer OOB for instanced Draw are caught in command encoder
-    TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawVertexBufferOutOfBoundWithInstance) {
-        for (VertexBufferParams vertexParams : kVertexParamsList) {
-            for (VertexBufferParams instanceParams : kInstanceParamsList) {
-                // Create pipeline with given buffer stride
-                wgpu::RenderPipeline pipeline = CreateBasicRenderPipelineWithInstance(
-                    vertexParams.bufferStride, instanceParams.bufferStride);
+// Control case for DrawIndexed
+TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawIndexedBasic) {
+    wgpu::RenderPipeline pipeline = CreateBasicRenderPipeline();
 
-                // Build vertex buffer
-                wgpu::Buffer vertexBuffer = CreateBuffer(vertexParams.bufferSize);
-                wgpu::Buffer instanceBuffer = CreateBuffer(instanceParams.bufferSize);
+    // Build index buffer for 12 indexes
+    wgpu::Buffer indexBuffer = CreateBuffer(12 * sizeof(uint32_t), wgpu::BufferUsage::Index);
 
-                VertexBufferList vertexBufferList = {
-                    {0, vertexBuffer, vertexParams.bufferOffsetForEncoder,
-                     vertexParams.bufferSizeForEncoder},
-                    {1, instanceBuffer, instanceParams.bufferOffsetForEncoder,
-                     instanceParams.bufferSizeForEncoder},
-                };
+    // Build vertex buffer for 3 vertices
+    wgpu::Buffer vertexBuffer = CreateBuffer(3 * kFloat32x4Stride);
+    VertexBufferList vertexBufferList = {{0, vertexBuffer, 0, wgpu::kWholeSize}};
 
-                uint32_t vert = vertexParams.maxValidAccessNumber;
-                uint32_t inst = instanceParams.maxValidAccessNumber;
-                // It is ok to draw vert vertices
-                TestRenderPassDraw(pipeline, vertexBufferList, vert, 1, 0, 0, true);
-                TestRenderPassDraw(pipeline, vertexBufferList, vert - 1, 1, 1, 0, true);
-                // It is ok to draw vert vertices and inst instences
-                TestRenderPassDraw(pipeline, vertexBufferList, vert, inst, 0, 0, true);
-                TestRenderPassDraw(pipeline, vertexBufferList, vert, inst - 1, 0, 1, true);
-                // more vertices causing OOB
-                TestRenderPassDraw(pipeline, vertexBufferList, vert + 1, 1, 0, 0, false);
-                TestRenderPassDraw(pipeline, vertexBufferList, vert, 1, 1, 0, false);
-                TestRenderPassDraw(pipeline, vertexBufferList, vert + 1, inst, 0, 0, false);
-                TestRenderPassDraw(pipeline, vertexBufferList, vert, inst, 1, 0, false);
-                // more instances causing OOB
-                TestRenderPassDraw(pipeline, vertexBufferList, vert, inst + 1, 0, 0, false);
-                TestRenderPassDraw(pipeline, vertexBufferList, vert, inst, 0, 1, false);
-                // Both OOB
-                TestRenderPassDraw(pipeline, vertexBufferList, vert, inst + 1, 0, 0, false);
-                TestRenderPassDraw(pipeline, vertexBufferList, vert, inst, 1, 1, false);
+    IndexBufferDesc indexBufferDesc = {indexBuffer, wgpu::IndexFormat::Uint32};
+
+    TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12, 1, 0, 0, 0, true);
+}
+
+// Verify index buffer OOB for DrawIndexed are caught in command encoder
+TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawIndexedIndexBufferOOB) {
+    wgpu::RenderPipeline pipeline = CreateBasicRenderPipelineWithInstance();
+
+    for (IndexBufferParams params : kIndexParamsList) {
+        // Build index buffer use given params
+        wgpu::Buffer indexBuffer = CreateBuffer(params.indexBufferSize, wgpu::BufferUsage::Index);
+        // Build vertex buffer for 3 vertices
+        wgpu::Buffer vertexBuffer = CreateBuffer(3 * kFloat32x4Stride);
+        // Build vertex buffer for 5 instances
+        wgpu::Buffer instanceBuffer = CreateBuffer(5 * kFloat32x2Stride);
+
+        VertexBufferList vertexBufferList = {{0, vertexBuffer, 0, wgpu::kWholeSize},
+                                             {1, instanceBuffer, 0, wgpu::kWholeSize}};
+
+        IndexBufferDesc indexBufferDesc = {indexBuffer, params.indexFormat,
+                                           params.indexBufferOffsetForEncoder,
+                                           params.indexBufferSizeForEncoder};
+
+        uint32_t n = params.maxValidIndexNumber;
+
+        // Control case
+        TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n, 5, 0, 0, 0, true);
+        TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n - 1, 5, 1, 0, 0,
+                                  true);
+        // Index buffer OOB, indexCount too large
+        TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n + 1, 5, 0, 0, 0,
+                                  false);
+        // Index buffer OOB, indexCount + firstIndex too large
+        TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n, 5, 1, 0, 0,
+                                  false);
+
+        if (!HasToggleEnabled("disable_base_vertex")) {
+            // baseVertex is not considered in CPU validation and has no effect on validation
+            // Although baseVertex is too large, it will still pass
+            TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n, 5, 0, 100, 0,
+                                      true);
+            // Index buffer OOB, indexCount too large
+            TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n + 1, 5, 0, 100,
+                                      0, false);
+        }
+    }
+}
+
+// Verify instance mode vertex buffer OOB for DrawIndexed are caught in command encoder
+TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawIndexedVertexBufferOOB) {
+    for (VertexBufferParams vertexParams : kVertexParamsList) {
+        for (VertexBufferParams instanceParams : kInstanceParamsList) {
+            // Create pipeline with given buffer stride
+            wgpu::RenderPipeline pipeline = CreateBasicRenderPipelineWithInstance(
+                vertexParams.bufferStride, instanceParams.bufferStride);
+
+            auto indexFormat = wgpu::IndexFormat::Uint32;
+            auto indexStride = sizeof(uint32_t);
+
+            // Build index buffer for 12 indexes
+            wgpu::Buffer indexBuffer = CreateBuffer(12 * indexStride, wgpu::BufferUsage::Index);
+            // Build vertex buffer for vertices
+            wgpu::Buffer vertexBuffer = CreateBuffer(vertexParams.bufferSize);
+            // Build vertex buffer for instances
+            wgpu::Buffer instanceBuffer = CreateBuffer(instanceParams.bufferSize);
+
+            VertexBufferList vertexBufferList = {
+                {0, vertexBuffer, vertexParams.bufferOffsetForEncoder,
+                 vertexParams.bufferSizeForEncoder},
+                {1, instanceBuffer, instanceParams.bufferOffsetForEncoder,
+                 instanceParams.bufferSizeForEncoder}};
+
+            IndexBufferDesc indexBufferDesc = {indexBuffer, indexFormat};
+
+            uint32_t inst = instanceParams.maxValidAccessNumber;
+            // Control case
+            TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12, inst, 0, 0,
+                                      0, true);
+            // Vertex buffer (stepMode = instance) OOB, instanceCount too large
+            TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12, inst + 1, 0,
+                                      0, 0, false);
+
+            if (!HasToggleEnabled("disable_base_instance")) {
+                // firstInstance is considered in CPU validation
+                // Vertex buffer (stepMode = instance) in bound
+                TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12, inst - 1,
+                                          0, 0, 1, true);
+                // Vertex buffer (stepMode = instance) OOB, instanceCount + firstInstance too
+                // large
+                TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12, inst, 0,
+                                          0, 1, false);
             }
         }
     }
+}
 
-    // Control case for DrawIndexed
-    TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawIndexedBasic) {
-        wgpu::RenderPipeline pipeline = CreateBasicRenderPipeline();
+// Verify instance mode vertex buffer OOB for DrawIndexed are caught in command encoder
+TEST_F(DrawVertexAndIndexBufferOOBValidationTests, ZeroArrayStrideVertexBufferOOB) {
+    // In this test, we use VertexBufferParams.maxValidAccessNumber > 0 to indicate that such
+    // buffer parameter meet the requirement of pipeline, and maxValidAccessNumber == 0 to
+    // indicate that such buffer parameter will cause OOB.
+    const std::vector<VertexBufferParams> kVertexParamsListForZeroStride = {
+        // Control case
+        {0, 28, 0, wgpu::kWholeSize, 1},
+        // Non-zero offset
+        {0, 28, 4, wgpu::kWholeSize, 0},
+        {0, 28, 28, wgpu::kWholeSize, 0},
+        // Non-default size
+        {0, 28, 0, 28, 1},
+        {0, 28, 0, 27, 0},
+        // Non-zero offset and size
+        {0, 32, 4, 28, 1},
+        {0, 31, 4, 27, 0},
+        {0, 31, 4, wgpu::kWholeSize, 0},
+    };
 
-        // Build index buffer for 12 indexes
-        wgpu::Buffer indexBuffer = CreateBuffer(12 * sizeof(uint32_t), wgpu::BufferUsage::Index);
+    const std::vector<VertexBufferParams> kInstanceParamsListForZeroStride = {
+        // Control case
+        {0, 20, 0, wgpu::kWholeSize, 1},
+        // Non-zero offset
+        {0, 24, 4, wgpu::kWholeSize, 1},
+        {0, 23, 4, wgpu::kWholeSize, 0},
+        {0, 20, 4, wgpu::kWholeSize, 0},
+        {0, 20, 20, wgpu::kWholeSize, 0},
+        // Non-default size
+        {0, 21, 0, 20, 1},
+        {0, 20, 0, 19, 0},
+        // Non-zero offset and size
+        {0, 30, 4, 20, 1},
+        {0, 30, 4, 19, 0},
+    };
 
-        // Build vertex buffer for 3 vertices
-        wgpu::Buffer vertexBuffer = CreateBuffer(3 * kFloat32x4Stride);
-        VertexBufferList vertexBufferList = {{0, vertexBuffer, 0, wgpu::kWholeSize}};
+    // Build a pipeline that require a vertex step mode vertex buffer no smaller than 28 bytes
+    // and an instance step mode buffer no smaller than 20 bytes
+    wgpu::RenderPipeline pipeline = CreateBasicRenderPipelineWithZeroArrayStride();
 
-        IndexBufferDesc indexBufferDesc = {indexBuffer, wgpu::IndexFormat::Uint32};
+    for (VertexBufferParams vertexParams : kVertexParamsListForZeroStride) {
+        for (VertexBufferParams instanceParams : kInstanceParamsListForZeroStride) {
+            auto indexFormat = wgpu::IndexFormat::Uint32;
+            auto indexStride = sizeof(uint32_t);
 
-        TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12, 1, 0, 0, 0,
-                                  true);
+            // Build index buffer for 12 indexes
+            wgpu::Buffer indexBuffer = CreateBuffer(12 * indexStride, wgpu::BufferUsage::Index);
+            // Build vertex buffer for vertices
+            wgpu::Buffer vertexBuffer = CreateBuffer(vertexParams.bufferSize);
+            // Build vertex buffer for instances
+            wgpu::Buffer instanceBuffer = CreateBuffer(instanceParams.bufferSize);
+
+            VertexBufferList vertexBufferList = {
+                {0, vertexBuffer, vertexParams.bufferOffsetForEncoder,
+                 vertexParams.bufferSizeForEncoder},
+                {1, instanceBuffer, instanceParams.bufferOffsetForEncoder,
+                 instanceParams.bufferSizeForEncoder}};
+
+            IndexBufferDesc indexBufferDesc = {indexBuffer, indexFormat};
+
+            const bool isSuccess = (vertexParams.maxValidAccessNumber > 0) &&
+                                   (instanceParams.maxValidAccessNumber > 0);
+            // vertexCount and instanceCount doesn't matter, as array stride is zero and all
+            // vertex/instance access the same space of buffer
+            TestRenderPassDraw(pipeline, vertexBufferList, 100, 100, 0, 0, isSuccess);
+            // indexCount doesn't matter as long as no index buffer OOB happened
+            TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12, 100, 0, 0, 0,
+                                      isSuccess);
+        }
     }
+}
 
-    // Verify index buffer OOB for DrawIndexed are caught in command encoder
-    TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawIndexedIndexBufferOOB) {
+// Verify that if setVertexBuffer and/or setIndexBuffer for multiple times, only the last one is
+// taken into account
+TEST_F(DrawVertexAndIndexBufferOOBValidationTests, SetBufferMultipleTime) {
+    wgpu::IndexFormat indexFormat = wgpu::IndexFormat::Uint32;
+    uint32_t indexStride = sizeof(uint32_t);
+
+    // Build index buffer for 11 indexes
+    wgpu::Buffer indexBuffer11 = CreateBuffer(11 * indexStride, wgpu::BufferUsage::Index);
+    // Build index buffer for 12 indexes
+    wgpu::Buffer indexBuffer12 = CreateBuffer(12 * indexStride, wgpu::BufferUsage::Index);
+    // Build vertex buffer for 2 vertices
+    wgpu::Buffer vertexBuffer2 = CreateBuffer(2 * kFloat32x4Stride);
+    // Build vertex buffer for 3 vertices
+    wgpu::Buffer vertexBuffer3 = CreateBuffer(3 * kFloat32x4Stride);
+    // Build vertex buffer for 4 instances
+    wgpu::Buffer instanceBuffer4 = CreateBuffer(4 * kFloat32x2Stride);
+    // Build vertex buffer for 5 instances
+    wgpu::Buffer instanceBuffer5 = CreateBuffer(5 * kFloat32x2Stride);
+
+    // Test for setting vertex buffer for multiple times
+    {
         wgpu::RenderPipeline pipeline = CreateBasicRenderPipelineWithInstance();
 
-        for (IndexBufferParams params : kIndexParamsList) {
-            // Build index buffer use given params
-            wgpu::Buffer indexBuffer =
-                CreateBuffer(params.indexBufferSize, wgpu::BufferUsage::Index);
-            // Build vertex buffer for 3 vertices
-            wgpu::Buffer vertexBuffer = CreateBuffer(3 * kFloat32x4Stride);
-            // Build vertex buffer for 5 instances
-            wgpu::Buffer instanceBuffer = CreateBuffer(5 * kFloat32x2Stride);
+        // Set to vertexBuffer3 and instanceBuffer5 at last
+        VertexBufferList vertexBufferList = {{0, vertexBuffer2, 0, wgpu::kWholeSize},
+                                             {1, instanceBuffer4, 0, wgpu::kWholeSize},
+                                             {1, instanceBuffer5, 0, wgpu::kWholeSize},
+                                             {0, vertexBuffer3, 0, wgpu::kWholeSize}};
 
-            VertexBufferList vertexBufferList = {{0, vertexBuffer, 0, wgpu::kWholeSize},
-                                                 {1, instanceBuffer, 0, wgpu::kWholeSize}};
+        // For Draw, the max vertexCount is 3 and the max instanceCount is 5
+        TestRenderPassDraw(pipeline, vertexBufferList, 3, 5, 0, 0, true);
+        TestRenderPassDraw(pipeline, vertexBufferList, 4, 5, 0, 0, false);
+        TestRenderPassDraw(pipeline, vertexBufferList, 3, 6, 0, 0, false);
+        // For DrawIndex, the max instanceCount is 5
+        TestRenderPassDrawIndexed(pipeline, {indexBuffer12, indexFormat}, vertexBufferList, 12, 5,
+                                  0, 0, 0, true);
+        TestRenderPassDrawIndexed(pipeline, {indexBuffer12, indexFormat}, vertexBufferList, 12, 6,
+                                  0, 0, 0, false);
 
-            IndexBufferDesc indexBufferDesc = {indexBuffer, params.indexFormat,
-                                               params.indexBufferOffsetForEncoder,
-                                               params.indexBufferSizeForEncoder};
+        // Set to vertexBuffer2 and instanceBuffer4 at last
+        vertexBufferList = VertexBufferList{{0, vertexBuffer3, 0, wgpu::kWholeSize},
+                                            {1, instanceBuffer5, 0, wgpu::kWholeSize},
+                                            {0, vertexBuffer2, 0, wgpu::kWholeSize},
+                                            {1, instanceBuffer4, 0, wgpu::kWholeSize}};
 
-            uint32_t n = params.maxValidIndexNumber;
-
-            // Control case
-            TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n, 5, 0, 0, 0,
-                                      true);
-            TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n - 1, 5, 1, 0,
-                                      0, true);
-            // Index buffer OOB, indexCount too large
-            TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n + 1, 5, 0, 0,
-                                      0, false);
-            // Index buffer OOB, indexCount + firstIndex too large
-            TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n, 5, 1, 0, 0,
-                                      false);
-
-            if (!HasToggleEnabled("disable_base_vertex")) {
-                // baseVertex is not considered in CPU validation and has no effect on validation
-                // Although baseVertex is too large, it will still pass
-                TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n, 5, 0, 100,
-                                          0, true);
-                // Index buffer OOB, indexCount too large
-                TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, n + 1, 5, 0,
-                                          100, 0, false);
-            }
-        }
+        // For Draw, the max vertexCount is 2 and the max instanceCount is 4
+        TestRenderPassDraw(pipeline, vertexBufferList, 2, 4, 0, 0, true);
+        TestRenderPassDraw(pipeline, vertexBufferList, 3, 4, 0, 0, false);
+        TestRenderPassDraw(pipeline, vertexBufferList, 2, 5, 0, 0, false);
+        // For DrawIndex, the max instanceCount is 4
+        TestRenderPassDrawIndexed(pipeline, {indexBuffer12, indexFormat}, vertexBufferList, 12, 4,
+                                  0, 0, 0, true);
+        TestRenderPassDrawIndexed(pipeline, {indexBuffer12, indexFormat}, vertexBufferList, 12, 5,
+                                  0, 0, 0, false);
     }
 
-    // Verify instance mode vertex buffer OOB for DrawIndexed are caught in command encoder
-    TEST_F(DrawVertexAndIndexBufferOOBValidationTests, DrawIndexedVertexBufferOOB) {
-        for (VertexBufferParams vertexParams : kVertexParamsList) {
-            for (VertexBufferParams instanceParams : kInstanceParamsList) {
-                // Create pipeline with given buffer stride
-                wgpu::RenderPipeline pipeline = CreateBasicRenderPipelineWithInstance(
-                    vertexParams.bufferStride, instanceParams.bufferStride);
+    // Test for setIndexBuffer multiple times
+    {
+        wgpu::RenderPipeline pipeline = CreateBasicRenderPipeline();
 
-                auto indexFormat = wgpu::IndexFormat::Uint32;
-                auto indexStride = sizeof(uint32_t);
-
-                // Build index buffer for 12 indexes
-                wgpu::Buffer indexBuffer = CreateBuffer(12 * indexStride, wgpu::BufferUsage::Index);
-                // Build vertex buffer for vertices
-                wgpu::Buffer vertexBuffer = CreateBuffer(vertexParams.bufferSize);
-                // Build vertex buffer for instances
-                wgpu::Buffer instanceBuffer = CreateBuffer(instanceParams.bufferSize);
-
-                VertexBufferList vertexBufferList = {
-                    {0, vertexBuffer, vertexParams.bufferOffsetForEncoder,
-                     vertexParams.bufferSizeForEncoder},
-                    {1, instanceBuffer, instanceParams.bufferOffsetForEncoder,
-                     instanceParams.bufferSizeForEncoder}};
-
-                IndexBufferDesc indexBufferDesc = {indexBuffer, indexFormat};
-
-                uint32_t inst = instanceParams.maxValidAccessNumber;
-                // Control case
-                TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12, inst, 0,
-                                          0, 0, true);
-                // Vertex buffer (stepMode = instance) OOB, instanceCount too large
-                TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12, inst + 1,
-                                          0, 0, 0, false);
-
-                if (!HasToggleEnabled("disable_base_instance")) {
-                    // firstInstance is considered in CPU validation
-                    // Vertex buffer (stepMode = instance) in bound
-                    TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12,
-                                              inst - 1, 0, 0, 1, true);
-                    // Vertex buffer (stepMode = instance) OOB, instanceCount + firstInstance too
-                    // large
-                    TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12, inst,
-                                              0, 0, 1, false);
-                }
-            }
-        }
-    }
-
-    // Verify instance mode vertex buffer OOB for DrawIndexed are caught in command encoder
-    TEST_F(DrawVertexAndIndexBufferOOBValidationTests, ZeroArrayStrideVertexBufferOOB) {
-        // In this test, we use VertexBufferParams.maxValidAccessNumber > 0 to indicate that such
-        // buffer parameter meet the requirement of pipeline, and maxValidAccessNumber == 0 to
-        // indicate that such buffer parameter will cause OOB.
-        const std::vector<VertexBufferParams> kVertexParamsListForZeroStride = {
-            // Control case
-            {0, 28, 0, wgpu::kWholeSize, 1},
-            // Non-zero offset
-            {0, 28, 4, wgpu::kWholeSize, 0},
-            {0, 28, 28, wgpu::kWholeSize, 0},
-            // Non-default size
-            {0, 28, 0, 28, 1},
-            {0, 28, 0, 27, 0},
-            // Non-zero offset and size
-            {0, 32, 4, 28, 1},
-            {0, 31, 4, 27, 0},
-            {0, 31, 4, wgpu::kWholeSize, 0},
-        };
-
-        const std::vector<VertexBufferParams> kInstanceParamsListForZeroStride = {
-            // Control case
-            {0, 20, 0, wgpu::kWholeSize, 1},
-            // Non-zero offset
-            {0, 24, 4, wgpu::kWholeSize, 1},
-            {0, 23, 4, wgpu::kWholeSize, 0},
-            {0, 20, 4, wgpu::kWholeSize, 0},
-            {0, 20, 20, wgpu::kWholeSize, 0},
-            // Non-default size
-            {0, 21, 0, 20, 1},
-            {0, 20, 0, 19, 0},
-            // Non-zero offset and size
-            {0, 30, 4, 20, 1},
-            {0, 30, 4, 19, 0},
-        };
-
-        // Build a pipeline that require a vertex step mode vertex buffer no smaller than 28 bytes
-        // and an instance step mode buffer no smaller than 20 bytes
-        wgpu::RenderPipeline pipeline = CreateBasicRenderPipelineWithZeroArrayStride();
-
-        for (VertexBufferParams vertexParams : kVertexParamsListForZeroStride) {
-            for (VertexBufferParams instanceParams : kInstanceParamsListForZeroStride) {
-                auto indexFormat = wgpu::IndexFormat::Uint32;
-                auto indexStride = sizeof(uint32_t);
-
-                // Build index buffer for 12 indexes
-                wgpu::Buffer indexBuffer = CreateBuffer(12 * indexStride, wgpu::BufferUsage::Index);
-                // Build vertex buffer for vertices
-                wgpu::Buffer vertexBuffer = CreateBuffer(vertexParams.bufferSize);
-                // Build vertex buffer for instances
-                wgpu::Buffer instanceBuffer = CreateBuffer(instanceParams.bufferSize);
-
-                VertexBufferList vertexBufferList = {
-                    {0, vertexBuffer, vertexParams.bufferOffsetForEncoder,
-                     vertexParams.bufferSizeForEncoder},
-                    {1, instanceBuffer, instanceParams.bufferOffsetForEncoder,
-                     instanceParams.bufferSizeForEncoder}};
-
-                IndexBufferDesc indexBufferDesc = {indexBuffer, indexFormat};
-
-                const bool isSuccess = (vertexParams.maxValidAccessNumber > 0) &&
-                                       (instanceParams.maxValidAccessNumber > 0);
-                // vertexCount and instanceCount doesn't matter, as array stride is zero and all
-                // vertex/instance access the same space of buffer
-                TestRenderPassDraw(pipeline, vertexBufferList, 100, 100, 0, 0, isSuccess);
-                // indexCount doesn't matter as long as no index buffer OOB happened
-                TestRenderPassDrawIndexed(pipeline, indexBufferDesc, vertexBufferList, 12, 100, 0,
-                                          0, 0, isSuccess);
-            }
-        }
-    }
-
-    // Verify that if setVertexBuffer and/or setIndexBuffer for multiple times, only the last one is
-    // taken into account
-    TEST_F(DrawVertexAndIndexBufferOOBValidationTests, SetBufferMultipleTime) {
-        wgpu::IndexFormat indexFormat = wgpu::IndexFormat::Uint32;
-        uint32_t indexStride = sizeof(uint32_t);
-
-        // Build index buffer for 11 indexes
-        wgpu::Buffer indexBuffer11 = CreateBuffer(11 * indexStride, wgpu::BufferUsage::Index);
-        // Build index buffer for 12 indexes
-        wgpu::Buffer indexBuffer12 = CreateBuffer(12 * indexStride, wgpu::BufferUsage::Index);
-        // Build vertex buffer for 2 vertices
-        wgpu::Buffer vertexBuffer2 = CreateBuffer(2 * kFloat32x4Stride);
-        // Build vertex buffer for 3 vertices
-        wgpu::Buffer vertexBuffer3 = CreateBuffer(3 * kFloat32x4Stride);
-        // Build vertex buffer for 4 instances
-        wgpu::Buffer instanceBuffer4 = CreateBuffer(4 * kFloat32x2Stride);
-        // Build vertex buffer for 5 instances
-        wgpu::Buffer instanceBuffer5 = CreateBuffer(5 * kFloat32x2Stride);
-
-        // Test for setting vertex buffer for multiple times
         {
-            wgpu::RenderPipeline pipeline = CreateBasicRenderPipelineWithInstance();
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder renderPassEncoder =
+                encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
+            renderPassEncoder.SetPipeline(pipeline);
 
-            // Set to vertexBuffer3 and instanceBuffer5 at last
-            VertexBufferList vertexBufferList = {{0, vertexBuffer2, 0, wgpu::kWholeSize},
-                                                 {1, instanceBuffer4, 0, wgpu::kWholeSize},
-                                                 {1, instanceBuffer5, 0, wgpu::kWholeSize},
-                                                 {0, vertexBuffer3, 0, wgpu::kWholeSize}};
+            // Index buffer is set to indexBuffer12 at last
+            renderPassEncoder.SetIndexBuffer(indexBuffer11, indexFormat);
+            renderPassEncoder.SetIndexBuffer(indexBuffer12, indexFormat);
 
-            // For Draw, the max vertexCount is 3 and the max instanceCount is 5
-            TestRenderPassDraw(pipeline, vertexBufferList, 3, 5, 0, 0, true);
-            TestRenderPassDraw(pipeline, vertexBufferList, 4, 5, 0, 0, false);
-            TestRenderPassDraw(pipeline, vertexBufferList, 3, 6, 0, 0, false);
-            // For DrawIndex, the max instanceCount is 5
-            TestRenderPassDrawIndexed(pipeline, {indexBuffer12, indexFormat}, vertexBufferList, 12,
-                                      5, 0, 0, 0, true);
-            TestRenderPassDrawIndexed(pipeline, {indexBuffer12, indexFormat}, vertexBufferList, 12,
-                                      6, 0, 0, 0, false);
+            renderPassEncoder.SetVertexBuffer(0, vertexBuffer3);
+            // It should be ok to draw 12 index
+            renderPassEncoder.DrawIndexed(12, 1, 0, 0, 0);
+            renderPassEncoder.End();
 
-            // Set to vertexBuffer2 and instanceBuffer4 at last
-            vertexBufferList = VertexBufferList{{0, vertexBuffer3, 0, wgpu::kWholeSize},
-                                                {1, instanceBuffer5, 0, wgpu::kWholeSize},
-                                                {0, vertexBuffer2, 0, wgpu::kWholeSize},
-                                                {1, instanceBuffer4, 0, wgpu::kWholeSize}};
-
-            // For Draw, the max vertexCount is 2 and the max instanceCount is 4
-            TestRenderPassDraw(pipeline, vertexBufferList, 2, 4, 0, 0, true);
-            TestRenderPassDraw(pipeline, vertexBufferList, 3, 4, 0, 0, false);
-            TestRenderPassDraw(pipeline, vertexBufferList, 2, 5, 0, 0, false);
-            // For DrawIndex, the max instanceCount is 4
-            TestRenderPassDrawIndexed(pipeline, {indexBuffer12, indexFormat}, vertexBufferList, 12,
-                                      4, 0, 0, 0, true);
-            TestRenderPassDrawIndexed(pipeline, {indexBuffer12, indexFormat}, vertexBufferList, 12,
-                                      5, 0, 0, 0, false);
+            // Expect success
+            encoder.Finish();
         }
 
-        // Test for setIndexBuffer multiple times
         {
-            wgpu::RenderPipeline pipeline = CreateBasicRenderPipeline();
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder renderPassEncoder =
+                encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
+            renderPassEncoder.SetPipeline(pipeline);
 
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::RenderPassEncoder renderPassEncoder =
-                    encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
-                renderPassEncoder.SetPipeline(pipeline);
+            // Index buffer is set to indexBuffer12 at last
+            renderPassEncoder.SetIndexBuffer(indexBuffer11, indexFormat);
+            renderPassEncoder.SetIndexBuffer(indexBuffer12, indexFormat);
 
-                // Index buffer is set to indexBuffer12 at last
-                renderPassEncoder.SetIndexBuffer(indexBuffer11, indexFormat);
-                renderPassEncoder.SetIndexBuffer(indexBuffer12, indexFormat);
+            renderPassEncoder.SetVertexBuffer(0, vertexBuffer3);
+            // It should be index buffer OOB to draw 13 index
+            renderPassEncoder.DrawIndexed(13, 1, 0, 0, 0);
+            renderPassEncoder.End();
 
-                renderPassEncoder.SetVertexBuffer(0, vertexBuffer3);
-                // It should be ok to draw 12 index
-                renderPassEncoder.DrawIndexed(12, 1, 0, 0, 0);
-                renderPassEncoder.End();
+            // Expect failure
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
 
-                // Expect success
-                encoder.Finish();
-            }
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder renderPassEncoder =
+                encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
+            renderPassEncoder.SetPipeline(pipeline);
 
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::RenderPassEncoder renderPassEncoder =
-                    encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
-                renderPassEncoder.SetPipeline(pipeline);
+            // Index buffer is set to indexBuffer11 at last
+            renderPassEncoder.SetIndexBuffer(indexBuffer12, indexFormat);
+            renderPassEncoder.SetIndexBuffer(indexBuffer11, indexFormat);
 
-                // Index buffer is set to indexBuffer12 at last
-                renderPassEncoder.SetIndexBuffer(indexBuffer11, indexFormat);
-                renderPassEncoder.SetIndexBuffer(indexBuffer12, indexFormat);
+            renderPassEncoder.SetVertexBuffer(0, vertexBuffer3);
+            // It should be ok to draw 11 index
+            renderPassEncoder.DrawIndexed(11, 1, 0, 0, 0);
+            renderPassEncoder.End();
 
-                renderPassEncoder.SetVertexBuffer(0, vertexBuffer3);
-                // It should be index buffer OOB to draw 13 index
-                renderPassEncoder.DrawIndexed(13, 1, 0, 0, 0);
-                renderPassEncoder.End();
+            // Expect success
+            encoder.Finish();
+        }
 
-                // Expect failure
-                ASSERT_DEVICE_ERROR(encoder.Finish());
-            }
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder renderPassEncoder =
+                encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
+            renderPassEncoder.SetPipeline(pipeline);
 
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::RenderPassEncoder renderPassEncoder =
-                    encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
-                renderPassEncoder.SetPipeline(pipeline);
+            // Index buffer is set to indexBuffer11 at last
+            renderPassEncoder.SetIndexBuffer(indexBuffer12, indexFormat);
+            renderPassEncoder.SetIndexBuffer(indexBuffer11, indexFormat);
 
-                // Index buffer is set to indexBuffer11 at last
-                renderPassEncoder.SetIndexBuffer(indexBuffer12, indexFormat);
-                renderPassEncoder.SetIndexBuffer(indexBuffer11, indexFormat);
+            renderPassEncoder.SetVertexBuffer(0, vertexBuffer3);
+            // It should be index buffer OOB to draw 12 index
+            renderPassEncoder.DrawIndexed(12, 1, 0, 0, 0);
+            renderPassEncoder.End();
 
-                renderPassEncoder.SetVertexBuffer(0, vertexBuffer3);
-                // It should be ok to draw 11 index
-                renderPassEncoder.DrawIndexed(11, 1, 0, 0, 0);
-                renderPassEncoder.End();
-
-                // Expect success
-                encoder.Finish();
-            }
-
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::RenderPassEncoder renderPassEncoder =
-                    encoder.BeginRenderPass(GetBasicRenderPassDescriptor());
-                renderPassEncoder.SetPipeline(pipeline);
-
-                // Index buffer is set to indexBuffer11 at last
-                renderPassEncoder.SetIndexBuffer(indexBuffer12, indexFormat);
-                renderPassEncoder.SetIndexBuffer(indexBuffer11, indexFormat);
-
-                renderPassEncoder.SetVertexBuffer(0, vertexBuffer3);
-                // It should be index buffer OOB to draw 12 index
-                renderPassEncoder.DrawIndexed(12, 1, 0, 0, 0);
-                renderPassEncoder.End();
-
-                // Expect failure
-                ASSERT_DEVICE_ERROR(encoder.Finish());
-            }
+            // Expect failure
+            ASSERT_DEVICE_ERROR(encoder.Finish());
         }
     }
+}
 
 }  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/ExternalTextureTests.cpp b/src/dawn/tests/unittests/validation/ExternalTextureTests.cpp
index 3c4aba3..340b8f7 100644
--- a/src/dawn/tests/unittests/validation/ExternalTextureTests.cpp
+++ b/src/dawn/tests/unittests/validation/ExternalTextureTests.cpp
@@ -18,469 +18,468 @@
 #include "dawn/utils/WGPUHelpers.h"
 
 namespace {
-    class ExternalTextureTest : public ValidationTest {
-      public:
-        wgpu::TextureDescriptor CreateTextureDescriptor(
-            wgpu::TextureFormat format = kDefaultTextureFormat) {
-            wgpu::TextureDescriptor descriptor;
-            descriptor.size.width = kWidth;
-            descriptor.size.height = kHeight;
-            descriptor.size.depthOrArrayLayers = kDefaultDepth;
-            descriptor.mipLevelCount = kDefaultMipLevels;
-            descriptor.sampleCount = kDefaultSampleCount;
-            descriptor.dimension = wgpu::TextureDimension::e2D;
-            descriptor.format = format;
-            descriptor.usage = kDefaultUsage;
-            return descriptor;
-        }
-
-      protected:
-        void SetUp() override {
-            ValidationTest::SetUp();
-
-            queue = device.GetQueue();
-        }
-
-        static constexpr uint32_t kWidth = 32;
-        static constexpr uint32_t kHeight = 32;
-        static constexpr uint32_t kDefaultDepth = 1;
-        static constexpr uint32_t kDefaultMipLevels = 1;
-        static constexpr uint32_t kDefaultSampleCount = 1;
-        static constexpr wgpu::TextureUsage kDefaultUsage =
-            wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment;
-
-        static constexpr wgpu::TextureFormat kDefaultTextureFormat =
-            wgpu::TextureFormat::RGBA8Unorm;
-        static constexpr wgpu::TextureFormat kBiplanarPlane0Format = wgpu::TextureFormat::R8Unorm;
-        static constexpr wgpu::TextureFormat kBiplanarPlane1Format = wgpu::TextureFormat::RG8Unorm;
-
-        wgpu::Queue queue;
-    };
-
-    TEST_F(ExternalTextureTest, CreateExternalTextureValidation) {
-        // Creating an external texture from a 2D, single-subresource texture should succeed.
-        {
-            wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
-            wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
-
-            wgpu::ExternalTextureDescriptor externalDesc;
-            externalDesc.plane0 = texture.CreateView();
-            device.CreateExternalTexture(&externalDesc);
-        }
-
-        // Creating an external texture from a non-2D texture should fail.
-        {
-            wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
-            textureDescriptor.dimension = wgpu::TextureDimension::e3D;
-            textureDescriptor.usage = wgpu::TextureUsage::TextureBinding;
-            wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
-
-            wgpu::ExternalTextureDescriptor externalDesc;
-            externalDesc.plane0 = internalTexture.CreateView();
-            ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
-        }
-
-        // Creating an external texture from a texture with mip count > 1 should fail.
-        {
-            wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
-            textureDescriptor.mipLevelCount = 2;
-            wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
-
-            wgpu::ExternalTextureDescriptor externalDesc;
-            externalDesc.plane0 = internalTexture.CreateView();
-            ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
-        }
-
-        // Creating an external texture from a texture without TextureUsage::TextureBinding should
-        // fail.
-        {
-            wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
-            textureDescriptor.mipLevelCount = 2;
-            wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
-
-            wgpu::ExternalTextureDescriptor externalDesc;
-            externalDesc.plane0 = internalTexture.CreateView();
-            ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
-        }
-
-        // Creating an external texture with an unsupported format should fail.
-        {
-            wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
-            textureDescriptor.format = wgpu::TextureFormat::R8Uint;
-            wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
-
-            wgpu::ExternalTextureDescriptor externalDesc;
-            externalDesc.plane0 = internalTexture.CreateView();
-            ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
-        }
-
-        // Creating an external texture with an multisampled texture should fail.
-        {
-            wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
-            textureDescriptor.sampleCount = 4;
-            wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
-
-            wgpu::ExternalTextureDescriptor externalDesc;
-            externalDesc.plane0 = internalTexture.CreateView();
-            ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
-        }
-
-        // Creating an external texture with an error texture view should fail.
-        {
-            wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
-            wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
-
-            wgpu::TextureViewDescriptor errorViewDescriptor;
-            errorViewDescriptor.format = kDefaultTextureFormat;
-            errorViewDescriptor.dimension = wgpu::TextureViewDimension::e2D;
-            errorViewDescriptor.mipLevelCount = 1;
-            errorViewDescriptor.arrayLayerCount = 2;
-            ASSERT_DEVICE_ERROR(wgpu::TextureView errorTextureView =
-                                    internalTexture.CreateView(&errorViewDescriptor));
-
-            wgpu::ExternalTextureDescriptor externalDesc;
-            externalDesc.plane0 = errorTextureView;
-            ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
-        }
+class ExternalTextureTest : public ValidationTest {
+  public:
+    wgpu::TextureDescriptor CreateTextureDescriptor(
+        wgpu::TextureFormat format = kDefaultTextureFormat) {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.size.width = kWidth;
+        descriptor.size.height = kHeight;
+        descriptor.size.depthOrArrayLayers = kDefaultDepth;
+        descriptor.mipLevelCount = kDefaultMipLevels;
+        descriptor.sampleCount = kDefaultSampleCount;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.format = format;
+        descriptor.usage = kDefaultUsage;
+        return descriptor;
     }
 
-    // Test that external texture creation works as expected in multiplane scenarios.
-    TEST_F(ExternalTextureTest, CreateMultiplanarExternalTextureValidation) {
-        // Creating an external texture from two 2D, single-subresource textures with a biplanar
-        // format should succeed.
-        {
-            wgpu::TextureDescriptor plane0TextureDescriptor =
-                CreateTextureDescriptor(kBiplanarPlane0Format);
-            wgpu::TextureDescriptor plane1TextureDescriptor =
-                CreateTextureDescriptor(kBiplanarPlane1Format);
-            wgpu::Texture texture0 = device.CreateTexture(&plane0TextureDescriptor);
-            wgpu::Texture texture1 = device.CreateTexture(&plane1TextureDescriptor);
+  protected:
+    void SetUp() override {
+        ValidationTest::SetUp();
 
-            wgpu::ExternalTextureDescriptor externalDesc;
-            externalDesc.plane0 = texture0.CreateView();
-            externalDesc.plane1 = texture1.CreateView();
-
-            device.CreateExternalTexture(&externalDesc);
-        }
-
-        // Creating a multiplanar external texture with an unsupported format for plane0 should
-        // result in an error.
-        {
-            wgpu::TextureDescriptor plane0TextureDescriptor =
-                CreateTextureDescriptor(kDefaultTextureFormat);
-            wgpu::TextureDescriptor plane1TextureDescriptor =
-                CreateTextureDescriptor(kBiplanarPlane1Format);
-            wgpu::Texture texture0 = device.CreateTexture(&plane0TextureDescriptor);
-            wgpu::Texture texture1 = device.CreateTexture(&plane1TextureDescriptor);
-
-            wgpu::ExternalTextureDescriptor externalDesc;
-            externalDesc.plane0 = texture0.CreateView();
-            externalDesc.plane1 = texture1.CreateView();
-
-            ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
-        }
-
-        // Creating a multiplanar external texture with an unsupported format for plane1 should
-        // result in an error.
-        {
-            wgpu::TextureDescriptor plane0TextureDescriptor =
-                CreateTextureDescriptor(kBiplanarPlane0Format);
-            wgpu::TextureDescriptor plane1TextureDescriptor =
-                CreateTextureDescriptor(kDefaultTextureFormat);
-            wgpu::Texture texture0 = device.CreateTexture(&plane0TextureDescriptor);
-            wgpu::Texture texture1 = device.CreateTexture(&plane1TextureDescriptor);
-
-            wgpu::ExternalTextureDescriptor externalDesc;
-            externalDesc.plane0 = texture0.CreateView();
-            externalDesc.plane1 = texture1.CreateView();
-
-            ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
-        }
-
-        // Creating a multiplanar external texture with a non-sRGB color space should fail.
-        {
-            wgpu::TextureDescriptor plane0TextureDescriptor =
-                CreateTextureDescriptor(kBiplanarPlane0Format);
-            wgpu::TextureDescriptor plane1TextureDescriptor =
-                CreateTextureDescriptor(kBiplanarPlane1Format);
-            wgpu::Texture texture0 = device.CreateTexture(&plane0TextureDescriptor);
-            wgpu::Texture texture1 = device.CreateTexture(&plane1TextureDescriptor);
-
-            wgpu::ExternalTextureDescriptor externalDesc;
-            externalDesc.plane0 = texture0.CreateView();
-            externalDesc.plane1 = texture1.CreateView();
-            externalDesc.colorSpace = wgpu::PredefinedColorSpace::Undefined;
-            ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
-        }
+        queue = device.GetQueue();
     }
 
-    // Test that submitting a render pass that contains a destroyed external texture results in
-    // an error.
-    TEST_F(ExternalTextureTest, SubmitDestroyedExternalTextureInRenderPass) {
+    static constexpr uint32_t kWidth = 32;
+    static constexpr uint32_t kHeight = 32;
+    static constexpr uint32_t kDefaultDepth = 1;
+    static constexpr uint32_t kDefaultMipLevels = 1;
+    static constexpr uint32_t kDefaultSampleCount = 1;
+    static constexpr wgpu::TextureUsage kDefaultUsage =
+        wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment;
+
+    static constexpr wgpu::TextureFormat kDefaultTextureFormat = wgpu::TextureFormat::RGBA8Unorm;
+    static constexpr wgpu::TextureFormat kBiplanarPlane0Format = wgpu::TextureFormat::R8Unorm;
+    static constexpr wgpu::TextureFormat kBiplanarPlane1Format = wgpu::TextureFormat::RG8Unorm;
+
+    wgpu::Queue queue;
+};
+
+TEST_F(ExternalTextureTest, CreateExternalTextureValidation) {
+    // Creating an external texture from a 2D, single-subresource texture should succeed.
+    {
         wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
         wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
 
         wgpu::ExternalTextureDescriptor externalDesc;
         externalDesc.plane0 = texture.CreateView();
-        wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
+        device.CreateExternalTexture(&externalDesc);
+    }
 
-        // Create a bind group that contains the external texture.
+    // Creating an external texture from a non-2D texture should fail.
+    {
+        wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+        textureDescriptor.dimension = wgpu::TextureDimension::e3D;
+        textureDescriptor.usage = wgpu::TextureUsage::TextureBinding;
+        wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
+
+        wgpu::ExternalTextureDescriptor externalDesc;
+        externalDesc.plane0 = internalTexture.CreateView();
+        ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
+    }
+
+    // Creating an external texture from a texture with mip count > 1 should fail.
+    {
+        wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+        textureDescriptor.mipLevelCount = 2;
+        wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
+
+        wgpu::ExternalTextureDescriptor externalDesc;
+        externalDesc.plane0 = internalTexture.CreateView();
+        ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
+    }
+
+    // Creating an external texture from a texture without TextureUsage::TextureBinding should
+    // fail.
+    {
+        wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+        textureDescriptor.mipLevelCount = 2;
+        wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
+
+        wgpu::ExternalTextureDescriptor externalDesc;
+        externalDesc.plane0 = internalTexture.CreateView();
+        ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
+    }
+
+    // Creating an external texture with an unsupported format should fail.
+    {
+        wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+        textureDescriptor.format = wgpu::TextureFormat::R8Uint;
+        wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
+
+        wgpu::ExternalTextureDescriptor externalDesc;
+        externalDesc.plane0 = internalTexture.CreateView();
+        ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
+    }
+
+    // Creating an external texture with an multisampled texture should fail.
+    {
+        wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+        textureDescriptor.sampleCount = 4;
+        wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
+
+        wgpu::ExternalTextureDescriptor externalDesc;
+        externalDesc.plane0 = internalTexture.CreateView();
+        ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
+    }
+
+    // Creating an external texture with an error texture view should fail.
+    {
+        wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+        wgpu::Texture internalTexture = device.CreateTexture(&textureDescriptor);
+
+        wgpu::TextureViewDescriptor errorViewDescriptor;
+        errorViewDescriptor.format = kDefaultTextureFormat;
+        errorViewDescriptor.dimension = wgpu::TextureViewDimension::e2D;
+        errorViewDescriptor.mipLevelCount = 1;
+        errorViewDescriptor.arrayLayerCount = 2;
+        ASSERT_DEVICE_ERROR(wgpu::TextureView errorTextureView =
+                                internalTexture.CreateView(&errorViewDescriptor));
+
+        wgpu::ExternalTextureDescriptor externalDesc;
+        externalDesc.plane0 = errorTextureView;
+        ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
+    }
+}
+
+// Test that external texture creation works as expected in multiplane scenarios.
+TEST_F(ExternalTextureTest, CreateMultiplanarExternalTextureValidation) {
+    // Creating an external texture from two 2D, single-subresource textures with a biplanar
+    // format should succeed.
+    {
+        wgpu::TextureDescriptor plane0TextureDescriptor =
+            CreateTextureDescriptor(kBiplanarPlane0Format);
+        wgpu::TextureDescriptor plane1TextureDescriptor =
+            CreateTextureDescriptor(kBiplanarPlane1Format);
+        wgpu::Texture texture0 = device.CreateTexture(&plane0TextureDescriptor);
+        wgpu::Texture texture1 = device.CreateTexture(&plane1TextureDescriptor);
+
+        wgpu::ExternalTextureDescriptor externalDesc;
+        externalDesc.plane0 = texture0.CreateView();
+        externalDesc.plane1 = texture1.CreateView();
+
+        device.CreateExternalTexture(&externalDesc);
+    }
+
+    // Creating a multiplanar external texture with an unsupported format for plane0 should
+    // result in an error.
+    {
+        wgpu::TextureDescriptor plane0TextureDescriptor =
+            CreateTextureDescriptor(kDefaultTextureFormat);
+        wgpu::TextureDescriptor plane1TextureDescriptor =
+            CreateTextureDescriptor(kBiplanarPlane1Format);
+        wgpu::Texture texture0 = device.CreateTexture(&plane0TextureDescriptor);
+        wgpu::Texture texture1 = device.CreateTexture(&plane1TextureDescriptor);
+
+        wgpu::ExternalTextureDescriptor externalDesc;
+        externalDesc.plane0 = texture0.CreateView();
+        externalDesc.plane1 = texture1.CreateView();
+
+        ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
+    }
+
+    // Creating a multiplanar external texture with an unsupported format for plane1 should
+    // result in an error.
+    {
+        wgpu::TextureDescriptor plane0TextureDescriptor =
+            CreateTextureDescriptor(kBiplanarPlane0Format);
+        wgpu::TextureDescriptor plane1TextureDescriptor =
+            CreateTextureDescriptor(kDefaultTextureFormat);
+        wgpu::Texture texture0 = device.CreateTexture(&plane0TextureDescriptor);
+        wgpu::Texture texture1 = device.CreateTexture(&plane1TextureDescriptor);
+
+        wgpu::ExternalTextureDescriptor externalDesc;
+        externalDesc.plane0 = texture0.CreateView();
+        externalDesc.plane1 = texture1.CreateView();
+
+        ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
+    }
+
+    // Creating a multiplanar external texture with a non-sRGB color space should fail.
+    {
+        wgpu::TextureDescriptor plane0TextureDescriptor =
+            CreateTextureDescriptor(kBiplanarPlane0Format);
+        wgpu::TextureDescriptor plane1TextureDescriptor =
+            CreateTextureDescriptor(kBiplanarPlane1Format);
+        wgpu::Texture texture0 = device.CreateTexture(&plane0TextureDescriptor);
+        wgpu::Texture texture1 = device.CreateTexture(&plane1TextureDescriptor);
+
+        wgpu::ExternalTextureDescriptor externalDesc;
+        externalDesc.plane0 = texture0.CreateView();
+        externalDesc.plane1 = texture1.CreateView();
+        externalDesc.colorSpace = wgpu::PredefinedColorSpace::Undefined;
+        ASSERT_DEVICE_ERROR(device.CreateExternalTexture(&externalDesc));
+    }
+}
+
+// Test that submitting a render pass that contains a destroyed external texture results in
+// an error.
+TEST_F(ExternalTextureTest, SubmitDestroyedExternalTextureInRenderPass) {
+    wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+    wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
+
+    wgpu::ExternalTextureDescriptor externalDesc;
+    externalDesc.plane0 = texture.CreateView();
+    wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
+
+    // Create a bind group that contains the external texture.
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
+
+    // Create another texture to use as a color attachment.
+    wgpu::TextureDescriptor renderTextureDescriptor = CreateTextureDescriptor();
+    wgpu::Texture renderTexture = device.CreateTexture(&renderTextureDescriptor);
+    wgpu::TextureView renderView = renderTexture.CreateView();
+
+    utils::ComboRenderPassDescriptor renderPass({renderView}, nullptr);
+
+    // Control case should succeed.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        {
+            pass.SetBindGroup(0, bindGroup);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+
+        queue.Submit(1, &commands);
+    }
+
+    // Destroying the external texture should result in an error.
+    {
+        externalTexture.Destroy();
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        {
+            pass.SetBindGroup(0, bindGroup);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+    }
+}
+
+// Test that submitting a render pass that contains a dereferenced external texture results in
+// success
+TEST_F(ExternalTextureTest, SubmitDereferencedExternalTextureInRenderPass) {
+    wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+    wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
+
+    wgpu::ExternalTextureDescriptor externalDesc;
+    externalDesc.plane0 = texture.CreateView();
+    wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
+
+    // Create a bind group that contains the external texture.
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
+
+    // Create another texture to use as a color attachment.
+    wgpu::TextureDescriptor renderTextureDescriptor = CreateTextureDescriptor();
+    wgpu::Texture renderTexture = device.CreateTexture(&renderTextureDescriptor);
+    wgpu::TextureView renderView = renderTexture.CreateView();
+
+    utils::ComboRenderPassDescriptor renderPass({renderView}, nullptr);
+
+    // Control case should succeed.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        {
+            pass.SetBindGroup(0, bindGroup);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+
+        queue.Submit(1, &commands);
+    }
+
+    // Dereferencing the external texture should not result in a use-after-free error.
+    {
+        externalTexture = nullptr;
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        {
+            pass.SetBindGroup(0, bindGroup);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+    }
+}
+
+// Test that submitting a render pass that contains a destroyed external texture plane
+// results in an error.
+TEST_F(ExternalTextureTest, SubmitDestroyedExternalTexturePlaneInRenderPass) {
+    wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+    wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
+
+    wgpu::ExternalTextureDescriptor externalDesc;
+    externalDesc.plane0 = texture.CreateView();
+    wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
+
+    // Create a bind group that contains the external texture.
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
+
+    // Create another texture to use as a color attachment.
+    wgpu::TextureDescriptor renderTextureDescriptor = CreateTextureDescriptor();
+    wgpu::Texture renderTexture = device.CreateTexture(&renderTextureDescriptor);
+    wgpu::TextureView renderView = renderTexture.CreateView();
+
+    utils::ComboRenderPassDescriptor renderPass({renderView}, nullptr);
+
+    // Control case should succeed.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        {
+            pass.SetBindGroup(0, bindGroup);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+
+        queue.Submit(1, &commands);
+    }
+
+    // Destroying an external texture underlying plane should result in an error.
+    {
+        texture.Destroy();
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        {
+            pass.SetBindGroup(0, bindGroup);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+    }
+}
+
+// Test that submitting a compute pass that contains a destroyed external texture results in
+// an error.
+TEST_F(ExternalTextureTest, SubmitDestroyedExternalTextureInComputePass) {
+    wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+    wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
+
+    wgpu::ExternalTextureDescriptor externalDesc;
+    externalDesc.plane0 = texture.CreateView();
+    wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
+
+    // Create a bind group that contains the external texture.
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
+
+    wgpu::ComputePassDescriptor computePass;
+
+    // Control case should succeed.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass(&computePass);
+        {
+            pass.SetBindGroup(0, bindGroup);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+
+        queue.Submit(1, &commands);
+    }
+
+    // Destroying the external texture should result in an error.
+    {
+        externalTexture.Destroy();
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass(&computePass);
+        {
+            pass.SetBindGroup(0, bindGroup);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+    }
+}
+
+// Test that submitting a compute pass that contains a destroyed external texture plane
+// results in an error.
+TEST_F(ExternalTextureTest, SubmitDestroyedExternalTexturePlaneInComputePass) {
+    wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+    wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
+
+    wgpu::ExternalTextureDescriptor externalDesc;
+    externalDesc.plane0 = texture.CreateView();
+    wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
+
+    // Create a bind group that contains the external texture.
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
+    wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
+
+    wgpu::ComputePassDescriptor computePass;
+
+    // Control case should succeed.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass(&computePass);
+        {
+            pass.SetBindGroup(0, bindGroup);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+    }
+
+    // Destroying an external texture underlying plane should result in an error.
+    {
+        texture.Destroy();
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass(&computePass);
+        {
+            pass.SetBindGroup(0, bindGroup);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+    }
+}
+
+// Ensure that bind group validation catches external textures mimatched from the BGL.
+TEST_F(ExternalTextureTest, BindGroupDoesNotMatchLayout) {
+    wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
+    wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
+
+    wgpu::ExternalTextureDescriptor externalDesc;
+    externalDesc.plane0 = texture.CreateView();
+    wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
+
+    // Control case should succeed.
+    {
         wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
             device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
-        wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
-
-        // Create another texture to use as a color attachment.
-        wgpu::TextureDescriptor renderTextureDescriptor = CreateTextureDescriptor();
-        wgpu::Texture renderTexture = device.CreateTexture(&renderTextureDescriptor);
-        wgpu::TextureView renderView = renderTexture.CreateView();
-
-        utils::ComboRenderPassDescriptor renderPass({renderView}, nullptr);
-
-        // Control case should succeed.
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-            {
-                pass.SetBindGroup(0, bindGroup);
-                pass.End();
-            }
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-
-            queue.Submit(1, &commands);
-        }
-
-        // Destroying the external texture should result in an error.
-        {
-            externalTexture.Destroy();
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-            {
-                pass.SetBindGroup(0, bindGroup);
-                pass.End();
-            }
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-            ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
-        }
+        utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
     }
 
-    // Test that submitting a render pass that contains a dereferenced external texture results in
-    // success
-    TEST_F(ExternalTextureTest, SubmitDereferencedExternalTextureInRenderPass) {
-        wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
-        wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
-
-        wgpu::ExternalTextureDescriptor externalDesc;
-        externalDesc.plane0 = texture.CreateView();
-        wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
-
-        // Create a bind group that contains the external texture.
+    // Bind group creation should fail when an external texture is not present in the
+    // corresponding slot of the bind group layout.
+    {
         wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
-        wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
-
-        // Create another texture to use as a color attachment.
-        wgpu::TextureDescriptor renderTextureDescriptor = CreateTextureDescriptor();
-        wgpu::Texture renderTexture = device.CreateTexture(&renderTextureDescriptor);
-        wgpu::TextureView renderView = renderTexture.CreateView();
-
-        utils::ComboRenderPassDescriptor renderPass({renderView}, nullptr);
-
-        // Control case should succeed.
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-            {
-                pass.SetBindGroup(0, bindGroup);
-                pass.End();
-            }
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-
-            queue.Submit(1, &commands);
-        }
-
-        // Dereferencing the external texture should not result in a use-after-free error.
-        {
-            externalTexture = nullptr;
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-            {
-                pass.SetBindGroup(0, bindGroup);
-                pass.End();
-            }
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-            queue.Submit(1, &commands);
-        }
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform}});
+        ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, bgl, {{0, externalTexture}}));
     }
-
-    // Test that submitting a render pass that contains a destroyed external texture plane
-    // results in an error.
-    TEST_F(ExternalTextureTest, SubmitDestroyedExternalTexturePlaneInRenderPass) {
-        wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
-        wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
-
-        wgpu::ExternalTextureDescriptor externalDesc;
-        externalDesc.plane0 = texture.CreateView();
-        wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
-
-        // Create a bind group that contains the external texture.
-        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
-        wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
-
-        // Create another texture to use as a color attachment.
-        wgpu::TextureDescriptor renderTextureDescriptor = CreateTextureDescriptor();
-        wgpu::Texture renderTexture = device.CreateTexture(&renderTextureDescriptor);
-        wgpu::TextureView renderView = renderTexture.CreateView();
-
-        utils::ComboRenderPassDescriptor renderPass({renderView}, nullptr);
-
-        // Control case should succeed.
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-            {
-                pass.SetBindGroup(0, bindGroup);
-                pass.End();
-            }
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-
-            queue.Submit(1, &commands);
-        }
-
-        // Destroying an external texture underlying plane should result in an error.
-        {
-            texture.Destroy();
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-            {
-                pass.SetBindGroup(0, bindGroup);
-                pass.End();
-            }
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-            ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
-        }
-    }
-
-    // Test that submitting a compute pass that contains a destroyed external texture results in
-    // an error.
-    TEST_F(ExternalTextureTest, SubmitDestroyedExternalTextureInComputePass) {
-        wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
-        wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
-
-        wgpu::ExternalTextureDescriptor externalDesc;
-        externalDesc.plane0 = texture.CreateView();
-        wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
-
-        // Create a bind group that contains the external texture.
-        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
-        wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
-
-        wgpu::ComputePassDescriptor computePass;
-
-        // Control case should succeed.
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass(&computePass);
-            {
-                pass.SetBindGroup(0, bindGroup);
-                pass.End();
-            }
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-
-            queue.Submit(1, &commands);
-        }
-
-        // Destroying the external texture should result in an error.
-        {
-            externalTexture.Destroy();
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass(&computePass);
-            {
-                pass.SetBindGroup(0, bindGroup);
-                pass.End();
-            }
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-            ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
-        }
-    }
-
-    // Test that submitting a compute pass that contains a destroyed external texture plane
-    // results in an error.
-    TEST_F(ExternalTextureTest, SubmitDestroyedExternalTexturePlaneInComputePass) {
-        wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
-        wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
-
-        wgpu::ExternalTextureDescriptor externalDesc;
-        externalDesc.plane0 = texture.CreateView();
-        wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
-
-        // Create a bind group that contains the external texture.
-        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
-        wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
-
-        wgpu::ComputePassDescriptor computePass;
-
-        // Control case should succeed.
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass(&computePass);
-            {
-                pass.SetBindGroup(0, bindGroup);
-                pass.End();
-            }
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-            queue.Submit(1, &commands);
-        }
-
-        // Destroying an external texture underlying plane should result in an error.
-        {
-            texture.Destroy();
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass(&computePass);
-            {
-                pass.SetBindGroup(0, bindGroup);
-                pass.End();
-            }
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-            ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
-        }
-    }
-
-    // Ensure that bind group validation catches external textures mimatched from the BGL.
-    TEST_F(ExternalTextureTest, BindGroupDoesNotMatchLayout) {
-        wgpu::TextureDescriptor textureDescriptor = CreateTextureDescriptor();
-        wgpu::Texture texture = device.CreateTexture(&textureDescriptor);
-
-        wgpu::ExternalTextureDescriptor externalDesc;
-        externalDesc.plane0 = texture.CreateView();
-        wgpu::ExternalTexture externalTexture = device.CreateExternalTexture(&externalDesc);
-
-        // Control case should succeed.
-        {
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Fragment, &utils::kExternalTextureBindingLayout}});
-            utils::MakeBindGroup(device, bgl, {{0, externalTexture}});
-        }
-
-        // Bind group creation should fail when an external texture is not present in the
-        // corresponding slot of the bind group layout.
-        {
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform}});
-            ASSERT_DEVICE_ERROR(utils::MakeBindGroup(device, bgl, {{0, externalTexture}}));
-        }
-    }
+}
 
 }  // namespace
diff --git a/src/dawn/tests/unittests/validation/MinimumBufferSizeValidationTests.cpp b/src/dawn/tests/unittests/validation/MinimumBufferSizeValidationTests.cpp
index 4a5abc1..ae213f4 100644
--- a/src/dawn/tests/unittests/validation/MinimumBufferSizeValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/MinimumBufferSizeValidationTests.cpp
@@ -22,135 +22,134 @@
 #include "dawn/utils/WGPUHelpers.h"
 
 namespace {
-    // Helper for describing bindings throughout the tests
-    struct BindingDescriptor {
-        uint32_t group;
-        uint32_t binding;
-        std::string decl;
-        std::string ref_type;
-        std::string ref_mem;
-        uint64_t size;
-        wgpu::BufferBindingType type = wgpu::BufferBindingType::Storage;
-        wgpu::ShaderStage visibility = wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment;
-    };
+// Helper for describing bindings throughout the tests
+struct BindingDescriptor {
+    uint32_t group;
+    uint32_t binding;
+    std::string decl;
+    std::string ref_type;
+    std::string ref_mem;
+    uint64_t size;
+    wgpu::BufferBindingType type = wgpu::BufferBindingType::Storage;
+    wgpu::ShaderStage visibility = wgpu::ShaderStage::Compute | wgpu::ShaderStage::Fragment;
+};
 
-    // Runs |func| with a modified version of |originalSizes| as an argument, adding |offset| to
-    // each element one at a time This is useful to verify some behavior happens if any element is
-    // offset from original
-    template <typename F>
-    void WithEachSizeOffsetBy(int64_t offset, const std::vector<uint64_t>& originalSizes, F func) {
-        std::vector<uint64_t> modifiedSizes = originalSizes;
-        for (size_t i = 0; i < originalSizes.size(); ++i) {
-            if (offset < 0) {
-                ASSERT(originalSizes[i] >= static_cast<uint64_t>(-offset));
+// Runs |func| with a modified version of |originalSizes| as an argument, adding |offset| to
+// each element one at a time This is useful to verify some behavior happens if any element is
+// offset from original
+template <typename F>
+void WithEachSizeOffsetBy(int64_t offset, const std::vector<uint64_t>& originalSizes, F func) {
+    std::vector<uint64_t> modifiedSizes = originalSizes;
+    for (size_t i = 0; i < originalSizes.size(); ++i) {
+        if (offset < 0) {
+            ASSERT(originalSizes[i] >= static_cast<uint64_t>(-offset));
+        }
+        // Run the function with an element offset, and restore element afterwards
+        modifiedSizes[i] += offset;
+        func(modifiedSizes);
+        modifiedSizes[i] -= offset;
+    }
+}
+
+// Runs |func| with |correctSizes|, and an expectation of success and failure
+template <typename F>
+void CheckSizeBounds(const std::vector<uint64_t>& correctSizes, F func) {
+    // To validate size:
+    // Check invalid with bind group with one less
+    // Check valid with bind group with correct size
+
+    // Make sure (every size - 1) produces an error
+    WithEachSizeOffsetBy(-1, correctSizes,
+                         [&](const std::vector<uint64_t>& sizes) { func(sizes, false); });
+
+    // Make sure correct sizes work
+    func(correctSizes, true);
+
+    // Make sure (every size + 1) works
+    WithEachSizeOffsetBy(1, correctSizes,
+                         [&](const std::vector<uint64_t>& sizes) { func(sizes, true); });
+}
+
+// Creates a bind group with given bindings for shader text
+std::string GenerateBindingString(const std::vector<BindingDescriptor>& bindings) {
+    std::ostringstream ostream;
+    size_t index = 0;
+    for (const BindingDescriptor& b : bindings) {
+        ostream << "struct S" << index << " { " << b.decl << "}\n";
+        ostream << "@group(" << b.group << ") @binding(" << b.binding << ") ";
+        switch (b.type) {
+            case wgpu::BufferBindingType::Uniform:
+                ostream << "var<uniform> b" << index << " : S" << index << ";\n";
+                break;
+            case wgpu::BufferBindingType::Storage:
+                ostream << "var<storage, read_write> b" << index << " : S" << index << ";\n";
+                break;
+            case wgpu::BufferBindingType::ReadOnlyStorage:
+                ostream << "var<storage, read> b" << index << " : S" << index << ";\n";
+                break;
+            default:
+                UNREACHABLE();
+        }
+        index++;
+    }
+    return ostream.str();
+}
+
+std::string GenerateReferenceString(const std::vector<BindingDescriptor>& bindings,
+                                    wgpu::ShaderStage stage) {
+    std::ostringstream ostream;
+    size_t index = 0;
+    for (const BindingDescriptor& b : bindings) {
+        if (b.visibility & stage) {
+            if (!b.ref_type.empty() && !b.ref_mem.empty()) {
+                ostream << "var r" << index << " : " << b.ref_type << " = b" << index << "."
+                        << b.ref_mem << ";\n";
             }
-            // Run the function with an element offset, and restore element afterwards
-            modifiedSizes[i] += offset;
-            func(modifiedSizes);
-            modifiedSizes[i] -= offset;
         }
+        index++;
     }
+    return ostream.str();
+}
 
-    // Runs |func| with |correctSizes|, and an expectation of success and failure
-    template <typename F>
-    void CheckSizeBounds(const std::vector<uint64_t>& correctSizes, F func) {
-        // To validate size:
-        // Check invalid with bind group with one less
-        // Check valid with bind group with correct size
+// Used for adding custom types available throughout the tests
+// NOLINTNEXTLINE(runtime/string)
+static const std::string kStructs = "struct ThreeFloats {f1 : f32, f2 : f32, f3 : f32,}\n";
 
-        // Make sure (every size - 1) produces an error
-        WithEachSizeOffsetBy(-1, correctSizes,
-                             [&](const std::vector<uint64_t>& sizes) { func(sizes, false); });
+// Creates a compute shader with given bindings
+std::string CreateComputeShaderWithBindings(const std::vector<BindingDescriptor>& bindings) {
+    return kStructs + GenerateBindingString(bindings) +
+           "@stage(compute) @workgroup_size(1,1,1) fn main() {\n" +
+           GenerateReferenceString(bindings, wgpu::ShaderStage::Compute) + "}";
+}
 
-        // Make sure correct sizes work
-        func(correctSizes, true);
+// Creates a vertex shader with given bindings
+std::string CreateVertexShaderWithBindings(const std::vector<BindingDescriptor>& bindings) {
+    return kStructs + GenerateBindingString(bindings) +
+           "@stage(vertex) fn main() -> @builtin(position) vec4<f32> {\n" +
+           GenerateReferenceString(bindings, wgpu::ShaderStage::Vertex) +
+           "\n   return vec4<f32>(); " + "}";
+}
 
-        // Make sure (every size + 1) works
-        WithEachSizeOffsetBy(1, correctSizes,
-                             [&](const std::vector<uint64_t>& sizes) { func(sizes, true); });
+// Creates a fragment shader with given bindings
+std::string CreateFragmentShaderWithBindings(const std::vector<BindingDescriptor>& bindings) {
+    return kStructs + GenerateBindingString(bindings) + "@stage(fragment) fn main() {\n" +
+           GenerateReferenceString(bindings, wgpu::ShaderStage::Fragment) + "}";
+}
+
+// Concatenates vectors containing BindingDescriptor
+std::vector<BindingDescriptor> CombineBindings(
+    std::initializer_list<std::vector<BindingDescriptor>> bindings) {
+    std::vector<BindingDescriptor> result;
+    for (const std::vector<BindingDescriptor>& b : bindings) {
+        result.insert(result.end(), b.begin(), b.end());
     }
-
-    // Creates a bind group with given bindings for shader text
-    std::string GenerateBindingString(const std::vector<BindingDescriptor>& bindings) {
-        std::ostringstream ostream;
-        size_t index = 0;
-        for (const BindingDescriptor& b : bindings) {
-            ostream << "struct S" << index << " { " << b.decl << "}\n";
-            ostream << "@group(" << b.group << ") @binding(" << b.binding << ") ";
-            switch (b.type) {
-                case wgpu::BufferBindingType::Uniform:
-                    ostream << "var<uniform> b" << index << " : S" << index << ";\n";
-                    break;
-                case wgpu::BufferBindingType::Storage:
-                    ostream << "var<storage, read_write> b" << index << " : S" << index << ";\n";
-                    break;
-                case wgpu::BufferBindingType::ReadOnlyStorage:
-                    ostream << "var<storage, read> b" << index << " : S" << index << ";\n";
-                    break;
-                default:
-                    UNREACHABLE();
-            }
-            index++;
-        }
-        return ostream.str();
-    }
-
-    std::string GenerateReferenceString(const std::vector<BindingDescriptor>& bindings,
-                                        wgpu::ShaderStage stage) {
-        std::ostringstream ostream;
-        size_t index = 0;
-        for (const BindingDescriptor& b : bindings) {
-            if (b.visibility & stage) {
-                if (!b.ref_type.empty() && !b.ref_mem.empty()) {
-                    ostream << "var r" << index << " : " << b.ref_type << " = b" << index << "."
-                            << b.ref_mem << ";\n";
-                }
-            }
-            index++;
-        }
-        return ostream.str();
-    }
-
-    // Used for adding custom types available throughout the tests
-    static const std::string kStructs = "struct ThreeFloats {f1 : f32, f2 : f32, f3 : f32,}\n";
-
-    // Creates a compute shader with given bindings
-    std::string CreateComputeShaderWithBindings(const std::vector<BindingDescriptor>& bindings) {
-        return kStructs + GenerateBindingString(bindings) +
-               "@stage(compute) @workgroup_size(1,1,1) fn main() {\n" +
-               GenerateReferenceString(bindings, wgpu::ShaderStage::Compute) + "}";
-    }
-
-    // Creates a vertex shader with given bindings
-    std::string CreateVertexShaderWithBindings(const std::vector<BindingDescriptor>& bindings) {
-        return kStructs + GenerateBindingString(bindings) +
-               "@stage(vertex) fn main() -> @builtin(position) vec4<f32> {\n" +
-               GenerateReferenceString(bindings, wgpu::ShaderStage::Vertex) +
-               "\n   return vec4<f32>(); " + "}";
-    }
-
-    // Creates a fragment shader with given bindings
-    std::string CreateFragmentShaderWithBindings(const std::vector<BindingDescriptor>& bindings) {
-        return kStructs + GenerateBindingString(bindings) + "@stage(fragment) fn main() {\n" +
-               GenerateReferenceString(bindings, wgpu::ShaderStage::Fragment) + "}";
-    }
-
-    // Concatenates vectors containing BindingDescriptor
-    std::vector<BindingDescriptor> CombineBindings(
-        std::initializer_list<std::vector<BindingDescriptor>> bindings) {
-        std::vector<BindingDescriptor> result;
-        for (const std::vector<BindingDescriptor>& b : bindings) {
-            result.insert(result.end(), b.begin(), b.end());
-        }
-        return result;
-    }
+    return result;
+}
 }  // namespace
 
 class MinBufferSizeTestsBase : public ValidationTest {
   public:
-    void SetUp() override {
-        ValidationTest::SetUp();
-    }
+    void SetUp() override { ValidationTest::SetUp(); }
 
     wgpu::Buffer CreateBuffer(uint64_t bufferSize, wgpu::BufferUsage usage) {
         wgpu::BufferDescriptor bufferDescriptor;
diff --git a/src/dawn/tests/unittests/validation/PipelineAndPassCompatibilityTests.cpp b/src/dawn/tests/unittests/validation/PipelineAndPassCompatibilityTests.cpp
index bbb8589..49195c0 100644
--- a/src/dawn/tests/unittests/validation/PipelineAndPassCompatibilityTests.cpp
+++ b/src/dawn/tests/unittests/validation/PipelineAndPassCompatibilityTests.cpp
@@ -25,158 +25,157 @@
 
 namespace {
 
-    class RenderPipelineAndPassCompatibilityTests : public ValidationTest {
-      public:
-        wgpu::RenderPipeline CreatePipeline(wgpu::TextureFormat format,
-                                            bool enableDepthWrite,
-                                            bool enableStencilWrite) {
-            // Create a NoOp pipeline
-            utils::ComboRenderPipelineDescriptor pipelineDescriptor;
-            pipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"(
+class RenderPipelineAndPassCompatibilityTests : public ValidationTest {
+  public:
+    wgpu::RenderPipeline CreatePipeline(wgpu::TextureFormat format,
+                                        bool enableDepthWrite,
+                                        bool enableStencilWrite) {
+        // Create a NoOp pipeline
+        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+        pipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"(
                 @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
                     return vec4<f32>();
                 })");
-            pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
+        pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
                 @stage(fragment) fn main() {
                 })");
-            pipelineDescriptor.cFragment.targets = nullptr;
-            pipelineDescriptor.cFragment.targetCount = 0;
+        pipelineDescriptor.cFragment.targets = nullptr;
+        pipelineDescriptor.cFragment.targetCount = 0;
 
-            // Enable depth/stencil write if needed
-            wgpu::DepthStencilState* depthStencil = pipelineDescriptor.EnableDepthStencil(format);
-            if (enableDepthWrite) {
-                depthStencil->depthWriteEnabled = true;
-            }
-            if (enableStencilWrite) {
-                depthStencil->stencilFront.failOp = wgpu::StencilOperation::Replace;
-            }
-            return device.CreateRenderPipeline(&pipelineDescriptor);
+        // Enable depth/stencil write if needed
+        wgpu::DepthStencilState* depthStencil = pipelineDescriptor.EnableDepthStencil(format);
+        if (enableDepthWrite) {
+            depthStencil->depthWriteEnabled = true;
+        }
+        if (enableStencilWrite) {
+            depthStencil->stencilFront.failOp = wgpu::StencilOperation::Replace;
+        }
+        return device.CreateRenderPipeline(&pipelineDescriptor);
+    }
+
+    utils::ComboRenderPassDescriptor CreateRenderPassDescriptor(wgpu::TextureFormat format,
+                                                                bool depthReadOnly,
+                                                                bool stencilReadOnly) {
+        wgpu::TextureDescriptor textureDescriptor = {};
+        textureDescriptor.size = {kSize, kSize, 1};
+        textureDescriptor.format = format;
+        textureDescriptor.usage = wgpu::TextureUsage::RenderAttachment;
+        wgpu::Texture depthStencilTexture = device.CreateTexture(&textureDescriptor);
+
+        utils::ComboRenderPassDescriptor passDescriptor({}, depthStencilTexture.CreateView());
+        if (depthReadOnly) {
+            passDescriptor.cDepthStencilAttachmentInfo.depthReadOnly = true;
+            passDescriptor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+            passDescriptor.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
         }
 
-        utils::ComboRenderPassDescriptor CreateRenderPassDescriptor(wgpu::TextureFormat format,
-                                                                    bool depthReadOnly,
-                                                                    bool stencilReadOnly) {
-            wgpu::TextureDescriptor textureDescriptor = {};
-            textureDescriptor.size = {kSize, kSize, 1};
-            textureDescriptor.format = format;
-            textureDescriptor.usage = wgpu::TextureUsage::RenderAttachment;
-            wgpu::Texture depthStencilTexture = device.CreateTexture(&textureDescriptor);
-
-            utils::ComboRenderPassDescriptor passDescriptor({}, depthStencilTexture.CreateView());
-            if (depthReadOnly) {
-                passDescriptor.cDepthStencilAttachmentInfo.depthReadOnly = true;
-                passDescriptor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
-                passDescriptor.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
-            }
-
-            if (stencilReadOnly) {
-                passDescriptor.cDepthStencilAttachmentInfo.stencilReadOnly = true;
-                passDescriptor.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
-                passDescriptor.cDepthStencilAttachmentInfo.stencilStoreOp =
-                    wgpu::StoreOp::Undefined;
-            }
-
-            return passDescriptor;
+        if (stencilReadOnly) {
+            passDescriptor.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+            passDescriptor.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+            passDescriptor.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
         }
-    };
 
-    // Test depthWrite/stencilWrite in DepthStencilState in render pipeline vs
-    // depthReadOnly/stencilReadOnly in DepthStencilAttachment in render pass.
-    TEST_F(RenderPipelineAndPassCompatibilityTests, WriteAndReadOnlyConflictForDepthStencil) {
-        for (bool depthStencilReadOnlyInPass : {true, false}) {
-            for (bool depthWriteInPipeline : {true, false}) {
-                for (bool stencilWriteInPipeline : {true, false}) {
-                    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                    utils::ComboRenderPassDescriptor passDescriptor = CreateRenderPassDescriptor(
-                        kFormat, depthStencilReadOnlyInPass, depthStencilReadOnlyInPass);
-                    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&passDescriptor);
-                    wgpu::RenderPipeline pipeline =
-                        CreatePipeline(kFormat, depthWriteInPipeline, stencilWriteInPipeline);
-                    pass.SetPipeline(pipeline);
-                    pass.Draw(3);
-                    pass.End();
-                    if (depthStencilReadOnlyInPass &&
-                        (depthWriteInPipeline || stencilWriteInPipeline)) {
-                        ASSERT_DEVICE_ERROR(encoder.Finish());
-                    } else {
-                        encoder.Finish();
-                    }
+        return passDescriptor;
+    }
+};
+
+// Test depthWrite/stencilWrite in DepthStencilState in render pipeline vs
+// depthReadOnly/stencilReadOnly in DepthStencilAttachment in render pass.
+TEST_F(RenderPipelineAndPassCompatibilityTests, WriteAndReadOnlyConflictForDepthStencil) {
+    for (bool depthStencilReadOnlyInPass : {true, false}) {
+        for (bool depthWriteInPipeline : {true, false}) {
+            for (bool stencilWriteInPipeline : {true, false}) {
+                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                utils::ComboRenderPassDescriptor passDescriptor = CreateRenderPassDescriptor(
+                    kFormat, depthStencilReadOnlyInPass, depthStencilReadOnlyInPass);
+                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&passDescriptor);
+                wgpu::RenderPipeline pipeline =
+                    CreatePipeline(kFormat, depthWriteInPipeline, stencilWriteInPipeline);
+                pass.SetPipeline(pipeline);
+                pass.Draw(3);
+                pass.End();
+                if (depthStencilReadOnlyInPass &&
+                    (depthWriteInPipeline || stencilWriteInPipeline)) {
+                    ASSERT_DEVICE_ERROR(encoder.Finish());
+                } else {
+                    encoder.Finish();
                 }
             }
         }
     }
+}
 
-    // Test depthWrite/stencilWrite in DepthStencilState in render pipeline vs
-    // depthReadOnly/stencilReadOnly in RenderBundleEncoderDescriptor in render bundle.
-    TEST_F(RenderPipelineAndPassCompatibilityTests,
-           WriteAndReadOnlyConflictForDepthStencilBetweenPipelineAndBundle) {
+// Test depthWrite/stencilWrite in DepthStencilState in render pipeline vs
+// depthReadOnly/stencilReadOnly in RenderBundleEncoderDescriptor in render bundle.
+TEST_F(RenderPipelineAndPassCompatibilityTests,
+       WriteAndReadOnlyConflictForDepthStencilBetweenPipelineAndBundle) {
+    for (bool depthStencilReadOnlyInBundle : {true, false}) {
+        utils::ComboRenderBundleEncoderDescriptor desc = {};
+        desc.depthStencilFormat = kFormat;
+        desc.depthReadOnly = depthStencilReadOnlyInBundle;
+        desc.stencilReadOnly = depthStencilReadOnlyInBundle;
+
+        for (bool depthWriteInPipeline : {true, false}) {
+            for (bool stencilWriteInPipeline : {true, false}) {
+                wgpu::RenderBundleEncoder renderBundleEncoder =
+                    device.CreateRenderBundleEncoder(&desc);
+                wgpu::RenderPipeline pipeline =
+                    CreatePipeline(kFormat, depthWriteInPipeline, stencilWriteInPipeline);
+                renderBundleEncoder.SetPipeline(pipeline);
+                renderBundleEncoder.Draw(3);
+                if (depthStencilReadOnlyInBundle &&
+                    (depthWriteInPipeline || stencilWriteInPipeline)) {
+                    ASSERT_DEVICE_ERROR(renderBundleEncoder.Finish());
+                } else {
+                    renderBundleEncoder.Finish();
+                }
+            }
+        }
+    }
+}
+
+// Test depthReadOnly/stencilReadOnly in RenderBundleEncoderDescriptor in render bundle vs
+// depthReadOnly/stencilReadOnly in DepthStencilAttachment in render pass.
+TEST_F(RenderPipelineAndPassCompatibilityTests,
+       WriteAndReadOnlyConflictForDepthStencilBetweenBundleAndPass) {
+    for (bool depthStencilReadOnlyInPass : {true, false}) {
         for (bool depthStencilReadOnlyInBundle : {true, false}) {
-            utils::ComboRenderBundleEncoderDescriptor desc = {};
-            desc.depthStencilFormat = kFormat;
-            desc.depthReadOnly = depthStencilReadOnlyInBundle;
-            desc.stencilReadOnly = depthStencilReadOnlyInBundle;
-
-            for (bool depthWriteInPipeline : {true, false}) {
-                for (bool stencilWriteInPipeline : {true, false}) {
-                    wgpu::RenderBundleEncoder renderBundleEncoder =
-                        device.CreateRenderBundleEncoder(&desc);
-                    wgpu::RenderPipeline pipeline =
-                        CreatePipeline(kFormat, depthWriteInPipeline, stencilWriteInPipeline);
+            for (bool emptyBundle : {true, false}) {
+                // Create render bundle, with or without a pipeline
+                utils::ComboRenderBundleEncoderDescriptor desc = {};
+                desc.depthStencilFormat = kFormat;
+                desc.depthReadOnly = depthStencilReadOnlyInBundle;
+                desc.stencilReadOnly = depthStencilReadOnlyInBundle;
+                wgpu::RenderBundleEncoder renderBundleEncoder =
+                    device.CreateRenderBundleEncoder(&desc);
+                if (!emptyBundle) {
+                    wgpu::RenderPipeline pipeline = CreatePipeline(
+                        kFormat, !depthStencilReadOnlyInBundle, !depthStencilReadOnlyInBundle);
                     renderBundleEncoder.SetPipeline(pipeline);
                     renderBundleEncoder.Draw(3);
-                    if (depthStencilReadOnlyInBundle &&
-                        (depthWriteInPipeline || stencilWriteInPipeline)) {
-                        ASSERT_DEVICE_ERROR(renderBundleEncoder.Finish());
-                    } else {
-                        renderBundleEncoder.Finish();
-                    }
+                }
+                wgpu::RenderBundle bundle = renderBundleEncoder.Finish();
+
+                // Create render pass and call ExecuteBundles()
+                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+                utils::ComboRenderPassDescriptor passDescriptor = CreateRenderPassDescriptor(
+                    kFormat, depthStencilReadOnlyInPass, depthStencilReadOnlyInPass);
+                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&passDescriptor);
+                pass.ExecuteBundles(1, &bundle);
+                pass.End();
+                if (!depthStencilReadOnlyInPass || depthStencilReadOnlyInBundle) {
+                    encoder.Finish();
+                } else {
+                    ASSERT_DEVICE_ERROR(encoder.Finish());
                 }
             }
         }
     }
+}
 
-    // Test depthReadOnly/stencilReadOnly in RenderBundleEncoderDescriptor in render bundle vs
-    // depthReadOnly/stencilReadOnly in DepthStencilAttachment in render pass.
-    TEST_F(RenderPipelineAndPassCompatibilityTests,
-           WriteAndReadOnlyConflictForDepthStencilBetweenBundleAndPass) {
-        for (bool depthStencilReadOnlyInPass : {true, false}) {
-            for (bool depthStencilReadOnlyInBundle : {true, false}) {
-                for (bool emptyBundle : {true, false}) {
-                    // Create render bundle, with or without a pipeline
-                    utils::ComboRenderBundleEncoderDescriptor desc = {};
-                    desc.depthStencilFormat = kFormat;
-                    desc.depthReadOnly = depthStencilReadOnlyInBundle;
-                    desc.stencilReadOnly = depthStencilReadOnlyInBundle;
-                    wgpu::RenderBundleEncoder renderBundleEncoder =
-                        device.CreateRenderBundleEncoder(&desc);
-                    if (!emptyBundle) {
-                        wgpu::RenderPipeline pipeline = CreatePipeline(
-                            kFormat, !depthStencilReadOnlyInBundle, !depthStencilReadOnlyInBundle);
-                        renderBundleEncoder.SetPipeline(pipeline);
-                        renderBundleEncoder.Draw(3);
-                    }
-                    wgpu::RenderBundle bundle = renderBundleEncoder.Finish();
-
-                    // Create render pass and call ExecuteBundles()
-                    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                    utils::ComboRenderPassDescriptor passDescriptor = CreateRenderPassDescriptor(
-                        kFormat, depthStencilReadOnlyInPass, depthStencilReadOnlyInPass);
-                    wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&passDescriptor);
-                    pass.ExecuteBundles(1, &bundle);
-                    pass.End();
-                    if (!depthStencilReadOnlyInPass || depthStencilReadOnlyInBundle) {
-                        encoder.Finish();
-                    } else {
-                        ASSERT_DEVICE_ERROR(encoder.Finish());
-                    }
-                }
-            }
-        }
-    }
-
-    // TODO(dawn:485): add more tests. For example:
-    //   - depth/stencil attachment should be designated if depth/stencil test is enabled.
-    //   - pipeline and pass compatibility tests for color attachment(s).
-    //   - pipeline and pass compatibility tests for compute.
+// TODO(dawn:485): add more tests. For example:
+//   - depth/stencil attachment should be designated if depth/stencil test is enabled.
+//   - pipeline and pass compatibility tests for color attachment(s).
+//   - pipeline and pass compatibility tests for compute.
 
 }  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/QueueSubmitValidationTests.cpp b/src/dawn/tests/unittests/validation/QueueSubmitValidationTests.cpp
index 903f6af..b099e79 100644
--- a/src/dawn/tests/unittests/validation/QueueSubmitValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/QueueSubmitValidationTests.cpp
@@ -19,347 +19,347 @@
 
 namespace {
 
-    class QueueSubmitValidationTest : public ValidationTest {};
+class QueueSubmitValidationTest : public ValidationTest {};
 
-    // Test submitting with a mapped buffer is disallowed
-    TEST_F(QueueSubmitValidationTest, SubmitWithMappedBuffer) {
-        // Create a map-write buffer.
-        const uint64_t kBufferSize = 4;
-        wgpu::BufferDescriptor descriptor;
-        descriptor.usage = wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
-        descriptor.size = kBufferSize;
-        wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+// Test submitting with a mapped buffer is disallowed
+TEST_F(QueueSubmitValidationTest, SubmitWithMappedBuffer) {
+    // Create a map-write buffer.
+    const uint64_t kBufferSize = 4;
+    wgpu::BufferDescriptor descriptor;
+    descriptor.usage = wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
+    descriptor.size = kBufferSize;
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
 
-        // Create a fake copy destination buffer
-        descriptor.usage = wgpu::BufferUsage::CopyDst;
-        wgpu::Buffer targetBuffer = device.CreateBuffer(&descriptor);
+    // Create a fake copy destination buffer
+    descriptor.usage = wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer targetBuffer = device.CreateBuffer(&descriptor);
 
-        // Create a command buffer that reads from the mappable buffer.
-        wgpu::CommandBuffer commands;
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize);
-            commands = encoder.Finish();
-        }
-
-        wgpu::Queue queue = device.GetQueue();
-
-        // Submitting when the buffer has never been mapped should succeed
-        queue.Submit(1, &commands);
-
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize);
-            commands = encoder.Finish();
-        }
-
-        // Map the buffer, submitting when the buffer is mapped should fail
-        buffer.MapAsync(wgpu::MapMode::Write, 0, kBufferSize, nullptr, nullptr);
-
-        // Try submitting before the callback is fired.
-        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
-
-        WaitForAllOperations(device);
-
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize);
-            commands = encoder.Finish();
-        }
-
-        // Try submitting after the callback is fired.
-        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
-
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize);
-            commands = encoder.Finish();
-        }
-
-        // Unmap the buffer, queue submit should succeed
-        buffer.Unmap();
-        queue.Submit(1, &commands);
+    // Create a command buffer that reads from the mappable buffer.
+    wgpu::CommandBuffer commands;
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize);
+        commands = encoder.Finish();
     }
 
-    // Test it is invalid to submit a command buffer twice
-    TEST_F(QueueSubmitValidationTest, CommandBufferSubmittedTwice) {
-        wgpu::CommandBuffer commandBuffer = device.CreateCommandEncoder().Finish();
-        wgpu::Queue queue = device.GetQueue();
+    wgpu::Queue queue = device.GetQueue();
 
-        // Should succeed
-        queue.Submit(1, &commandBuffer);
+    // Submitting when the buffer has never been mapped should succeed
+    queue.Submit(1, &commands);
 
-        // Should fail because command buffer was already submitted
-        ASSERT_DEVICE_ERROR(queue.Submit(1, &commandBuffer));
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize);
+        commands = encoder.Finish();
     }
 
-    // Test resubmitting failed command buffers
-    TEST_F(QueueSubmitValidationTest, CommandBufferSubmittedFailed) {
-        // Create a map-write buffer
-        const uint64_t kBufferSize = 4;
-        wgpu::BufferDescriptor descriptor;
-        descriptor.usage = wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
-        descriptor.size = kBufferSize;
-        wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+    // Map the buffer, submitting when the buffer is mapped should fail
+    buffer.MapAsync(wgpu::MapMode::Write, 0, kBufferSize, nullptr, nullptr);
 
-        // Create a destination buffer for the b2b copy
-        descriptor.usage = wgpu::BufferUsage::CopyDst;
-        descriptor.size = kBufferSize;
-        wgpu::Buffer targetBuffer = device.CreateBuffer(&descriptor);
+    // Try submitting before the callback is fired.
+    ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
 
-        // Create a command buffer that reads from the mappable buffer
-        wgpu::CommandBuffer commands;
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize);
-            commands = encoder.Finish();
-        }
+    WaitForAllOperations(device);
 
-        wgpu::Queue queue = device.GetQueue();
-
-        // Map the source buffer to force a failure
-        buffer.MapAsync(wgpu::MapMode::Write, 0, kBufferSize, nullptr, nullptr);
-
-        // Submitting a command buffer with a mapped buffer should fail
-        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
-
-        // Unmap buffer to fix the failure
-        buffer.Unmap();
-
-        // Resubmitting any command buffer, even if the problem was fixed, should fail
-        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize);
+        commands = encoder.Finish();
     }
 
-    // Test that submitting in a buffer mapping callback doesn't cause re-entrance problems.
-    TEST_F(QueueSubmitValidationTest, SubmitInBufferMapCallback) {
-        // Create a buffer for mapping, to run our callback.
-        wgpu::BufferDescriptor descriptor;
-        descriptor.size = 4;
-        descriptor.usage = wgpu::BufferUsage::MapWrite;
-        wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+    // Try submitting after the callback is fired.
+    ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
 
-        struct CallbackData {
-            wgpu::Device device;
-            wgpu::Buffer buffer;
-        } callbackData = {device, buffer};
-
-        const auto callback = [](WGPUBufferMapAsyncStatus status, void* userdata) {
-            CallbackData* data = reinterpret_cast<CallbackData*>(userdata);
-
-            data->buffer.Unmap();
-
-            wgpu::Queue queue = data->device.GetQueue();
-            queue.Submit(0, nullptr);
-        };
-
-        buffer.MapAsync(wgpu::MapMode::Write, 0, descriptor.size, callback, &callbackData);
-
-        WaitForAllOperations(device);
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize);
+        commands = encoder.Finish();
     }
 
-    // Test that submitting in a render pipeline creation callback doesn't cause re-entrance
-    // problems.
-    TEST_F(QueueSubmitValidationTest, SubmitInCreateRenderPipelineAsyncCallback) {
-        struct CallbackData {
-            wgpu::Device device;
-        } callbackData = {device};
+    // Unmap the buffer, queue submit should succeed
+    buffer.Unmap();
+    queue.Submit(1, &commands);
+}
 
-        const auto callback = [](WGPUCreatePipelineAsyncStatus status, WGPURenderPipeline pipeline,
-                                 char const* message, void* userdata) {
-            CallbackData* data = reinterpret_cast<CallbackData*>(userdata);
+// Test it is invalid to submit a command buffer twice
+TEST_F(QueueSubmitValidationTest, CommandBufferSubmittedTwice) {
+    wgpu::CommandBuffer commandBuffer = device.CreateCommandEncoder().Finish();
+    wgpu::Queue queue = device.GetQueue();
 
-            wgpuRenderPipelineRelease(pipeline);
+    // Should succeed
+    queue.Submit(1, &commandBuffer);
 
-            wgpu::Queue queue = data->device.GetQueue();
-            queue.Submit(0, nullptr);
-        };
+    // Should fail because command buffer was already submitted
+    ASSERT_DEVICE_ERROR(queue.Submit(1, &commandBuffer));
+}
 
-        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+// Test resubmitting failed command buffers
+TEST_F(QueueSubmitValidationTest, CommandBufferSubmittedFailed) {
+    // Create a map-write buffer
+    const uint64_t kBufferSize = 4;
+    wgpu::BufferDescriptor descriptor;
+    descriptor.usage = wgpu::BufferUsage::MapWrite | wgpu::BufferUsage::CopySrc;
+    descriptor.size = kBufferSize;
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+    // Create a destination buffer for the b2b copy
+    descriptor.usage = wgpu::BufferUsage::CopyDst;
+    descriptor.size = kBufferSize;
+    wgpu::Buffer targetBuffer = device.CreateBuffer(&descriptor);
+
+    // Create a command buffer that reads from the mappable buffer
+    wgpu::CommandBuffer commands;
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(buffer, 0, targetBuffer, 0, kBufferSize);
+        commands = encoder.Finish();
+    }
+
+    wgpu::Queue queue = device.GetQueue();
+
+    // Map the source buffer to force a failure
+    buffer.MapAsync(wgpu::MapMode::Write, 0, kBufferSize, nullptr, nullptr);
+
+    // Submitting a command buffer with a mapped buffer should fail
+    ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+
+    // Unmap buffer to fix the failure
+    buffer.Unmap();
+
+    // Resubmitting any command buffer, even if the problem was fixed, should fail
+    ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+}
+
+// Test that submitting in a buffer mapping callback doesn't cause re-entrance problems.
+TEST_F(QueueSubmitValidationTest, SubmitInBufferMapCallback) {
+    // Create a buffer for mapping, to run our callback.
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 4;
+    descriptor.usage = wgpu::BufferUsage::MapWrite;
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+    struct CallbackData {
+        wgpu::Device device;
+        wgpu::Buffer buffer;
+    } callbackData = {device, buffer};
+
+    const auto callback = [](WGPUBufferMapAsyncStatus status, void* userdata) {
+        CallbackData* data = reinterpret_cast<CallbackData*>(userdata);
+
+        data->buffer.Unmap();
+
+        wgpu::Queue queue = data->device.GetQueue();
+        queue.Submit(0, nullptr);
+    };
+
+    buffer.MapAsync(wgpu::MapMode::Write, 0, descriptor.size, callback, &callbackData);
+
+    WaitForAllOperations(device);
+}
+
+// Test that submitting in a render pipeline creation callback doesn't cause re-entrance
+// problems.
+TEST_F(QueueSubmitValidationTest, SubmitInCreateRenderPipelineAsyncCallback) {
+    struct CallbackData {
+        wgpu::Device device;
+    } callbackData = {device};
+
+    const auto callback = [](WGPUCreatePipelineAsyncStatus status, WGPURenderPipeline pipeline,
+                             char const* message, void* userdata) {
+        CallbackData* data = reinterpret_cast<CallbackData*>(userdata);
+
+        wgpuRenderPipelineRelease(pipeline);
+
+        wgpu::Queue queue = data->device.GetQueue();
+        queue.Submit(0, nullptr);
+    };
+
+    wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
             @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
                 return vec4<f32>(0.0, 0.0, 0.0, 1.0);
             })");
 
-        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+    wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
             @stage(fragment) fn main() -> @location(0) vec4<f32> {
                 return vec4<f32>(0.0, 1.0, 0.0, 1.0);
             })");
 
-        utils::ComboRenderPipelineDescriptor descriptor;
-        descriptor.vertex.module = vsModule;
-        descriptor.cFragment.module = fsModule;
-        device.CreateRenderPipelineAsync(&descriptor, callback, &callbackData);
+    utils::ComboRenderPipelineDescriptor descriptor;
+    descriptor.vertex.module = vsModule;
+    descriptor.cFragment.module = fsModule;
+    device.CreateRenderPipelineAsync(&descriptor, callback, &callbackData);
 
-        WaitForAllOperations(device);
-    }
+    WaitForAllOperations(device);
+}
 
-    // Test that submitting in a compute pipeline creation callback doesn't cause re-entrance
-    // problems.
-    TEST_F(QueueSubmitValidationTest, SubmitInCreateComputePipelineAsyncCallback) {
-        struct CallbackData {
-            wgpu::Device device;
-        } callbackData = {device};
+// Test that submitting in a compute pipeline creation callback doesn't cause re-entrance
+// problems.
+TEST_F(QueueSubmitValidationTest, SubmitInCreateComputePipelineAsyncCallback) {
+    struct CallbackData {
+        wgpu::Device device;
+    } callbackData = {device};
 
-        const auto callback = [](WGPUCreatePipelineAsyncStatus status, WGPUComputePipeline pipeline,
-                                 char const* message, void* userdata) {
-            CallbackData* data = reinterpret_cast<CallbackData*>(userdata);
+    const auto callback = [](WGPUCreatePipelineAsyncStatus status, WGPUComputePipeline pipeline,
+                             char const* message, void* userdata) {
+        CallbackData* data = reinterpret_cast<CallbackData*>(userdata);
 
-            wgpuComputePipelineRelease(pipeline);
+        wgpuComputePipelineRelease(pipeline);
 
-            wgpu::Queue queue = data->device.GetQueue();
-            queue.Submit(0, nullptr);
-        };
+        wgpu::Queue queue = data->device.GetQueue();
+        queue.Submit(0, nullptr);
+    };
 
-        wgpu::ComputePipelineDescriptor descriptor;
-        descriptor.compute.module = utils::CreateShaderModule(device, R"(
+    wgpu::ComputePipelineDescriptor descriptor;
+    descriptor.compute.module = utils::CreateShaderModule(device, R"(
             @stage(compute) @workgroup_size(1) fn main() {
             })");
-        descriptor.compute.entryPoint = "main";
-        device.CreateComputePipelineAsync(&descriptor, callback, &callbackData);
+    descriptor.compute.entryPoint = "main";
+    device.CreateComputePipelineAsync(&descriptor, callback, &callbackData);
 
-        WaitForAllOperations(device);
-    }
+    WaitForAllOperations(device);
+}
 
-    // Test that buffers in unused compute pass bindgroups are still checked for in
-    // Queue::Submit validation.
-    TEST_F(QueueSubmitValidationTest, SubmitWithUnusedComputeBuffer) {
-        wgpu::Queue queue = device.GetQueue();
+// Test that buffers in unused compute pass bindgroups are still checked for in
+// Queue::Submit validation.
+TEST_F(QueueSubmitValidationTest, SubmitWithUnusedComputeBuffer) {
+    wgpu::Queue queue = device.GetQueue();
 
-        wgpu::BindGroupLayout emptyBGL = utils::MakeBindGroupLayout(device, {});
-        wgpu::BindGroup emptyBG = utils::MakeBindGroup(device, emptyBGL, {});
+    wgpu::BindGroupLayout emptyBGL = utils::MakeBindGroupLayout(device, {});
+    wgpu::BindGroup emptyBG = utils::MakeBindGroup(device, emptyBGL, {});
 
-        wgpu::BindGroupLayout testBGL = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+    wgpu::BindGroupLayout testBGL = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
 
-        // In this test we check that BindGroup 1 is checked, the texture test will check
-        // BindGroup 2. This is to provide coverage of for loops in validation code.
-        wgpu::ComputePipelineDescriptor cpDesc;
-        cpDesc.layout = utils::MakePipelineLayout(device, {emptyBGL, testBGL});
-        cpDesc.compute.entryPoint = "main";
-        cpDesc.compute.module =
-            utils::CreateShaderModule(device, "@stage(compute) @workgroup_size(1) fn main() {}");
-        wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&cpDesc);
+    // In this test we check that BindGroup 1 is checked, the texture test will check
+    // BindGroup 2. This is to provide coverage of for loops in validation code.
+    wgpu::ComputePipelineDescriptor cpDesc;
+    cpDesc.layout = utils::MakePipelineLayout(device, {emptyBGL, testBGL});
+    cpDesc.compute.entryPoint = "main";
+    cpDesc.compute.module =
+        utils::CreateShaderModule(device, "@stage(compute) @workgroup_size(1) fn main() {}");
+    wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&cpDesc);
 
-        wgpu::BufferDescriptor bufDesc;
-        bufDesc.size = 4;
-        bufDesc.usage = wgpu::BufferUsage::Storage;
+    wgpu::BufferDescriptor bufDesc;
+    bufDesc.size = 4;
+    bufDesc.usage = wgpu::BufferUsage::Storage;
 
-        // Test that completely unused bindgroups still have their buffers checked.
-        for (bool destroy : {true, false}) {
-            wgpu::Buffer unusedBuffer = device.CreateBuffer(&bufDesc);
-            wgpu::BindGroup unusedBG = utils::MakeBindGroup(device, testBGL, {{0, unusedBuffer}});
+    // Test that completely unused bindgroups still have their buffers checked.
+    for (bool destroy : {true, false}) {
+        wgpu::Buffer unusedBuffer = device.CreateBuffer(&bufDesc);
+        wgpu::BindGroup unusedBG = utils::MakeBindGroup(device, testBGL, {{0, unusedBuffer}});
 
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetBindGroup(1, unusedBG);
-            pass.End();
-            wgpu::CommandBuffer commands = encoder.Finish();
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetBindGroup(1, unusedBG);
+        pass.End();
+        wgpu::CommandBuffer commands = encoder.Finish();
 
-            if (destroy) {
-                unusedBuffer.Destroy();
-                ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
-            } else {
-                queue.Submit(1, &commands);
-            }
-        }
-
-        // Test that unused bindgroups because they were replaced still have their buffers checked.
-        for (bool destroy : {true, false}) {
-            wgpu::Buffer unusedBuffer = device.CreateBuffer(&bufDesc);
-            wgpu::BindGroup unusedBG = utils::MakeBindGroup(device, testBGL, {{0, unusedBuffer}});
-
-            wgpu::Buffer usedBuffer = device.CreateBuffer(&bufDesc);
-            wgpu::BindGroup usedBG = utils::MakeBindGroup(device, testBGL, {{0, unusedBuffer}});
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetBindGroup(0, emptyBG);
-            pass.SetBindGroup(1, unusedBG);
-            pass.SetBindGroup(1, usedBG);
-            pass.SetPipeline(pipeline);
-            pass.DispatchWorkgroups(1);
-            pass.End();
-            wgpu::CommandBuffer commands = encoder.Finish();
-
-            if (destroy) {
-                unusedBuffer.Destroy();
-                ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
-            } else {
-                queue.Submit(1, &commands);
-            }
+        if (destroy) {
+            unusedBuffer.Destroy();
+            ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+        } else {
+            queue.Submit(1, &commands);
         }
     }
 
-    // Test that textures in unused compute pass bindgroups are still checked for in
-    // Queue::Submit validation.
-    TEST_F(QueueSubmitValidationTest, SubmitWithUnusedComputeTextures) {
-        wgpu::Queue queue = device.GetQueue();
+    // Test that unused bindgroups because they were replaced still have their buffers checked.
+    for (bool destroy : {true, false}) {
+        wgpu::Buffer unusedBuffer = device.CreateBuffer(&bufDesc);
+        wgpu::BindGroup unusedBG = utils::MakeBindGroup(device, testBGL, {{0, unusedBuffer}});
 
-        wgpu::BindGroupLayout emptyBGL = utils::MakeBindGroupLayout(device, {});
-        wgpu::BindGroup emptyBG = utils::MakeBindGroup(device, emptyBGL, {});
+        wgpu::Buffer usedBuffer = device.CreateBuffer(&bufDesc);
+        wgpu::BindGroup usedBG = utils::MakeBindGroup(device, testBGL, {{0, unusedBuffer}});
 
-        wgpu::BindGroupLayout testBGL = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetBindGroup(0, emptyBG);
+        pass.SetBindGroup(1, unusedBG);
+        pass.SetBindGroup(1, usedBG);
+        pass.SetPipeline(pipeline);
+        pass.DispatchWorkgroups(1);
+        pass.End();
+        wgpu::CommandBuffer commands = encoder.Finish();
 
-        wgpu::ComputePipelineDescriptor cpDesc;
-        cpDesc.layout = utils::MakePipelineLayout(device, {emptyBGL, emptyBGL, testBGL});
-        cpDesc.compute.entryPoint = "main";
-        cpDesc.compute.module =
-            utils::CreateShaderModule(device, "@stage(compute) @workgroup_size(1) fn main() {}");
-        wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&cpDesc);
-
-        wgpu::TextureDescriptor texDesc;
-        texDesc.size = {1, 1, 1};
-        texDesc.usage = wgpu::TextureUsage::TextureBinding;
-        texDesc.format = wgpu::TextureFormat::RGBA8Unorm;
-
-        // Test that completely unused bindgroups still have their buffers checked.
-        for (bool destroy : {true, false}) {
-            wgpu::Texture unusedTexture = device.CreateTexture(&texDesc);
-            wgpu::BindGroup unusedBG =
-                utils::MakeBindGroup(device, testBGL, {{0, unusedTexture.CreateView()}});
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetBindGroup(2, unusedBG);
-            pass.End();
-            wgpu::CommandBuffer commands = encoder.Finish();
-
-            if (destroy) {
-                unusedTexture.Destroy();
-                ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
-            } else {
-                queue.Submit(1, &commands);
-            }
-        }
-
-        // Test that unused bindgroups because they were replaced still have their buffers checked.
-        for (bool destroy : {true, false}) {
-            wgpu::Texture unusedTexture = device.CreateTexture(&texDesc);
-            wgpu::BindGroup unusedBG =
-                utils::MakeBindGroup(device, testBGL, {{0, unusedTexture.CreateView()}});
-
-            wgpu::Texture usedTexture = device.CreateTexture(&texDesc);
-            wgpu::BindGroup usedBG =
-                utils::MakeBindGroup(device, testBGL, {{0, unusedTexture.CreateView()}});
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetBindGroup(0, emptyBG);
-            pass.SetBindGroup(1, emptyBG);
-            pass.SetBindGroup(2, unusedBG);
-            pass.SetBindGroup(2, usedBG);
-            pass.SetPipeline(pipeline);
-            pass.DispatchWorkgroups(1);
-            pass.End();
-            wgpu::CommandBuffer commands = encoder.Finish();
-
-            if (destroy) {
-                unusedTexture.Destroy();
-                ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
-            } else {
-                queue.Submit(1, &commands);
-            }
+        if (destroy) {
+            unusedBuffer.Destroy();
+            ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+        } else {
+            queue.Submit(1, &commands);
         }
     }
+}
+
+// Test that textures in unused compute pass bindgroups are still checked for in
+// Queue::Submit validation.
+TEST_F(QueueSubmitValidationTest, SubmitWithUnusedComputeTextures) {
+    wgpu::Queue queue = device.GetQueue();
+
+    wgpu::BindGroupLayout emptyBGL = utils::MakeBindGroupLayout(device, {});
+    wgpu::BindGroup emptyBG = utils::MakeBindGroup(device, emptyBGL, {});
+
+    wgpu::BindGroupLayout testBGL = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
+
+    wgpu::ComputePipelineDescriptor cpDesc;
+    cpDesc.layout = utils::MakePipelineLayout(device, {emptyBGL, emptyBGL, testBGL});
+    cpDesc.compute.entryPoint = "main";
+    cpDesc.compute.module =
+        utils::CreateShaderModule(device, "@stage(compute) @workgroup_size(1) fn main() {}");
+    wgpu::ComputePipeline pipeline = device.CreateComputePipeline(&cpDesc);
+
+    wgpu::TextureDescriptor texDesc;
+    texDesc.size = {1, 1, 1};
+    texDesc.usage = wgpu::TextureUsage::TextureBinding;
+    texDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+
+    // Test that completely unused bindgroups still have their buffers checked.
+    for (bool destroy : {true, false}) {
+        wgpu::Texture unusedTexture = device.CreateTexture(&texDesc);
+        wgpu::BindGroup unusedBG =
+            utils::MakeBindGroup(device, testBGL, {{0, unusedTexture.CreateView()}});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetBindGroup(2, unusedBG);
+        pass.End();
+        wgpu::CommandBuffer commands = encoder.Finish();
+
+        if (destroy) {
+            unusedTexture.Destroy();
+            ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+        } else {
+            queue.Submit(1, &commands);
+        }
+    }
+
+    // Test that unused bindgroups because they were replaced still have their buffers checked.
+    for (bool destroy : {true, false}) {
+        wgpu::Texture unusedTexture = device.CreateTexture(&texDesc);
+        wgpu::BindGroup unusedBG =
+            utils::MakeBindGroup(device, testBGL, {{0, unusedTexture.CreateView()}});
+
+        wgpu::Texture usedTexture = device.CreateTexture(&texDesc);
+        wgpu::BindGroup usedBG =
+            utils::MakeBindGroup(device, testBGL, {{0, unusedTexture.CreateView()}});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetBindGroup(0, emptyBG);
+        pass.SetBindGroup(1, emptyBG);
+        pass.SetBindGroup(2, unusedBG);
+        pass.SetBindGroup(2, usedBG);
+        pass.SetPipeline(pipeline);
+        pass.DispatchWorkgroups(1);
+        pass.End();
+        wgpu::CommandBuffer commands = encoder.Finish();
+
+        if (destroy) {
+            unusedTexture.Destroy();
+            ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+        } else {
+            queue.Submit(1, &commands);
+        }
+    }
+}
 
 }  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/QueueWriteTextureValidationTests.cpp b/src/dawn/tests/unittests/validation/QueueWriteTextureValidationTests.cpp
index 371931f..b5fe723 100644
--- a/src/dawn/tests/unittests/validation/QueueWriteTextureValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/QueueWriteTextureValidationTests.cpp
@@ -22,789 +22,767 @@
 
 namespace {
 
-    class QueueWriteTextureValidationTest : public ValidationTest {
-      private:
-        void SetUp() override {
-            ValidationTest::SetUp();
-            queue = device.GetQueue();
-        }
-
-      protected:
-        wgpu::Texture Create2DTexture(wgpu::Extent3D size,
-                                      uint32_t mipLevelCount,
-                                      wgpu::TextureFormat format,
-                                      wgpu::TextureUsage usage,
-                                      uint32_t sampleCount = 1) {
-            wgpu::TextureDescriptor descriptor;
-            descriptor.dimension = wgpu::TextureDimension::e2D;
-            descriptor.size.width = size.width;
-            descriptor.size.height = size.height;
-            descriptor.size.depthOrArrayLayers = size.depthOrArrayLayers;
-            descriptor.sampleCount = sampleCount;
-            descriptor.format = format;
-            descriptor.mipLevelCount = mipLevelCount;
-            descriptor.usage = usage;
-            wgpu::Texture tex = device.CreateTexture(&descriptor);
-            return tex;
-        }
-
-        void TestWriteTexture(size_t dataSize,
-                              uint32_t dataOffset,
-                              uint32_t dataBytesPerRow,
-                              uint32_t dataRowsPerImage,
-                              wgpu::Texture texture,
-                              uint32_t texLevel,
-                              wgpu::Origin3D texOrigin,
-                              wgpu::Extent3D size,
-                              wgpu::TextureAspect aspect = wgpu::TextureAspect::All) {
-            std::vector<uint8_t> data(dataSize);
-
-            wgpu::TextureDataLayout textureDataLayout;
-            textureDataLayout.offset = dataOffset;
-            textureDataLayout.bytesPerRow = dataBytesPerRow;
-            textureDataLayout.rowsPerImage = dataRowsPerImage;
-
-            wgpu::ImageCopyTexture imageCopyTexture =
-                utils::CreateImageCopyTexture(texture, texLevel, texOrigin, aspect);
-
-            queue.WriteTexture(&imageCopyTexture, data.data(), dataSize, &textureDataLayout, &size);
-        }
-
-        void TestWriteTextureExactDataSize(uint32_t bytesPerRow,
-                                           uint32_t rowsPerImage,
-                                           wgpu::Texture texture,
-                                           wgpu::TextureFormat textureFormat,
-                                           wgpu::Origin3D origin,
-                                           wgpu::Extent3D extent3D) {
-            // Check the minimal valid dataSize.
-            uint64_t dataSize =
-                utils::RequiredBytesInCopy(bytesPerRow, rowsPerImage, extent3D, textureFormat);
-            TestWriteTexture(dataSize, 0, bytesPerRow, rowsPerImage, texture, 0, origin, extent3D);
-
-            // Check dataSize was indeed minimal.
-            uint64_t invalidSize = dataSize - 1;
-            ASSERT_DEVICE_ERROR(TestWriteTexture(invalidSize, 0, bytesPerRow, rowsPerImage, texture,
-                                                 0, origin, extent3D));
-        }
-
-        wgpu::Queue queue;
-    };
-
-    // Test the success case for WriteTexture
-    TEST_F(QueueWriteTextureValidationTest, Success) {
-        const uint64_t dataSize =
-            utils::RequiredBytesInCopy(256, 0, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
-        wgpu::Texture destination = Create2DTexture({16, 16, 4}, 5, wgpu::TextureFormat::RGBA8Unorm,
-                                                    wgpu::TextureUsage::CopyDst);
-
-        // Different copies, including some that touch the OOB condition
-        {
-            // Copy 4x4 block in corner of first mip.
-            TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {0, 0, 0}, {4, 4, 1});
-            // Copy 4x4 block in opposite corner of first mip.
-            TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {12, 12, 0}, {4, 4, 1});
-            // Copy 4x4 block in the 4x4 mip.
-            TestWriteTexture(dataSize, 0, 256, 4, destination, 2, {0, 0, 0}, {4, 4, 1});
-            // Copy with a data offset
-            TestWriteTexture(dataSize, dataSize - 4, 256, 1, destination, 0, {0, 0, 0}, {1, 1, 1});
-            TestWriteTexture(dataSize, dataSize - 4, 256, wgpu::kCopyStrideUndefined, destination,
-                             0, {0, 0, 0}, {1, 1, 1});
-        }
-
-        // Copies with a 256-byte aligned bytes per row but unaligned texture region
-        {
-            // Unaligned region
-            TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {0, 0, 0}, {3, 4, 1});
-            // Unaligned region with texture offset
-            TestWriteTexture(dataSize, 0, 256, 3, destination, 0, {5, 7, 0}, {2, 3, 1});
-            // Unaligned region, with data offset
-            TestWriteTexture(dataSize, 31 * 4, 256, 3, destination, 0, {0, 0, 0}, {3, 3, 1});
-        }
-
-        // Empty copies are valid
-        {
-            // An empty copy
-            TestWriteTexture(dataSize, 0, 0, 0, destination, 0, {0, 0, 0}, {0, 0, 1});
-            TestWriteTexture(dataSize, 0, 0, wgpu::kCopyStrideUndefined, destination, 0, {0, 0, 0},
-                             {0, 0, 1});
-            // An empty copy with depth = 0
-            TestWriteTexture(dataSize, 0, 0, 0, destination, 0, {0, 0, 0}, {0, 0, 0});
-            TestWriteTexture(dataSize, 0, 0, wgpu::kCopyStrideUndefined, destination, 0, {0, 0, 0},
-                             {0, 0, 0});
-            // An empty copy touching the end of the data
-            TestWriteTexture(dataSize, dataSize, 0, 0, destination, 0, {0, 0, 0}, {0, 0, 1});
-            TestWriteTexture(dataSize, dataSize, 0, wgpu::kCopyStrideUndefined, destination, 0,
-                             {0, 0, 0}, {0, 0, 1});
-            // An empty copy touching the side of the texture
-            TestWriteTexture(dataSize, 0, 0, 0, destination, 0, {16, 16, 0}, {0, 0, 1});
-            TestWriteTexture(dataSize, 0, 0, wgpu::kCopyStrideUndefined, destination, 0,
-                             {16, 16, 0}, {0, 0, 1});
-            // An empty copy with depth = 1 and bytesPerRow > 0
-            TestWriteTexture(dataSize, 0, 256, 0, destination, 0, {0, 0, 0}, {0, 0, 1});
-            TestWriteTexture(dataSize, 0, 256, wgpu::kCopyStrideUndefined, destination, 0,
-                             {0, 0, 0}, {0, 0, 1});
-            // An empty copy with height > 0, depth = 0, bytesPerRow > 0 and rowsPerImage > 0
-            TestWriteTexture(dataSize, 0, 256, wgpu::kCopyStrideUndefined, destination, 0,
-                             {0, 0, 0}, {0, 1, 0});
-            TestWriteTexture(dataSize, 0, 256, 1, destination, 0, {0, 0, 0}, {0, 1, 0});
-            TestWriteTexture(dataSize, 0, 256, 16, destination, 0, {0, 0, 0}, {0, 1, 0});
-        }
+class QueueWriteTextureValidationTest : public ValidationTest {
+  private:
+    void SetUp() override {
+        ValidationTest::SetUp();
+        queue = device.GetQueue();
     }
 
-    // Test OOB conditions on the data
-    TEST_F(QueueWriteTextureValidationTest, OutOfBoundsOnData) {
-        const uint64_t dataSize =
-            utils::RequiredBytesInCopy(256, 0, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
-        wgpu::Texture destination = Create2DTexture({16, 16, 1}, 5, wgpu::TextureFormat::RGBA8Unorm,
-                                                    wgpu::TextureUsage::CopyDst);
-
-        // OOB on the data because we copy too many pixels
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, 0, 256, 5, destination, 0, {0, 0, 0}, {4, 5, 1}));
-
-        // OOB on the data because of the offset
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, 4, 256, 4, destination, 0, {0, 0, 0}, {4, 4, 1}));
-
-        // OOB on the data because utils::RequiredBytesInCopy overflows
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, 0, 512, 3, destination, 0, {0, 0, 0}, {4, 3, 1}));
-
-        // Not OOB on the data although bytes per row * height overflows
-        // but utils::RequiredBytesInCopy * depth does not overflow
-        {
-            uint32_t sourceDataSize =
-                utils::RequiredBytesInCopy(256, 0, {7, 3, 1}, wgpu::TextureFormat::RGBA8Unorm);
-            ASSERT_TRUE(256 * 3 > sourceDataSize) << "bytes per row * height should overflow data";
-
-            TestWriteTexture(sourceDataSize, 0, 256, 3, destination, 0, {0, 0, 0}, {7, 3, 1});
-        }
+  protected:
+    wgpu::Texture Create2DTexture(wgpu::Extent3D size,
+                                  uint32_t mipLevelCount,
+                                  wgpu::TextureFormat format,
+                                  wgpu::TextureUsage usage,
+                                  uint32_t sampleCount = 1) {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.size.width = size.width;
+        descriptor.size.height = size.height;
+        descriptor.size.depthOrArrayLayers = size.depthOrArrayLayers;
+        descriptor.sampleCount = sampleCount;
+        descriptor.format = format;
+        descriptor.mipLevelCount = mipLevelCount;
+        descriptor.usage = usage;
+        wgpu::Texture tex = device.CreateTexture(&descriptor);
+        return tex;
     }
 
-    // Test OOB conditions on the texture
-    TEST_F(QueueWriteTextureValidationTest, OutOfBoundsOnTexture) {
-        const uint64_t dataSize =
-            utils::RequiredBytesInCopy(256, 0, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
-        wgpu::Texture destination = Create2DTexture({16, 16, 2}, 5, wgpu::TextureFormat::RGBA8Unorm,
-                                                    wgpu::TextureUsage::CopyDst);
+    void TestWriteTexture(size_t dataSize,
+                          uint32_t dataOffset,
+                          uint32_t dataBytesPerRow,
+                          uint32_t dataRowsPerImage,
+                          wgpu::Texture texture,
+                          uint32_t texLevel,
+                          wgpu::Origin3D texOrigin,
+                          wgpu::Extent3D size,
+                          wgpu::TextureAspect aspect = wgpu::TextureAspect::All) {
+        std::vector<uint8_t> data(dataSize);
 
-        // OOB on the texture because x + width overflows
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {13, 12, 0}, {4, 4, 1}));
+        wgpu::TextureDataLayout textureDataLayout;
+        textureDataLayout.offset = dataOffset;
+        textureDataLayout.bytesPerRow = dataBytesPerRow;
+        textureDataLayout.rowsPerImage = dataRowsPerImage;
 
-        // OOB on the texture because y + width overflows
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {12, 13, 0}, {4, 4, 1}));
+        wgpu::ImageCopyTexture imageCopyTexture =
+            utils::CreateImageCopyTexture(texture, texLevel, texOrigin, aspect);
 
-        // OOB on the texture because we overflow a non-zero mip
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, 0, 256, 4, destination, 2, {1, 0, 0}, {4, 4, 1}));
-
-        // OOB on the texture even on an empty copy when we copy to a non-existent mip.
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, 0, 0, 0, destination, 5, {0, 0, 0}, {0, 0, 1}));
-
-        // OOB on the texture because slice overflows
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, 0, 0, 0, destination, 0, {0, 0, 2}, {0, 0, 1}));
+        queue.WriteTexture(&imageCopyTexture, data.data(), dataSize, &textureDataLayout, &size);
     }
 
-    // Test that we force Depth=1 on writes to 2D textures
-    TEST_F(QueueWriteTextureValidationTest, DepthConstraintFor2DTextures) {
-        const uint64_t dataSize =
-            utils::RequiredBytesInCopy(0, 0, {0, 0, 2}, wgpu::TextureFormat::RGBA8Unorm);
-        wgpu::Texture destination = Create2DTexture({16, 16, 1}, 5, wgpu::TextureFormat::RGBA8Unorm,
-                                                    wgpu::TextureUsage::CopyDst);
-
-        // Depth > 1 on an empty copy still errors
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, 0, 0, 0, destination, 0, {0, 0, 0}, {0, 0, 2}));
-    }
-
-    // Test WriteTexture with incorrect texture usage
-    TEST_F(QueueWriteTextureValidationTest, IncorrectUsage) {
-        const uint64_t dataSize =
-            utils::RequiredBytesInCopy(256, 0, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
-        wgpu::Texture sampled = Create2DTexture({16, 16, 1}, 5, wgpu::TextureFormat::RGBA8Unorm,
-                                                wgpu::TextureUsage::TextureBinding);
-
-        // Incorrect destination usage
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, 0, 256, 4, sampled, 0, {0, 0, 0}, {4, 4, 1}));
-    }
-
-    // Test incorrect values of bytesPerRow and that values not divisible by 256 are allowed.
-    TEST_F(QueueWriteTextureValidationTest, BytesPerRowConstraints) {
-        wgpu::Texture destination = Create2DTexture({3, 7, 2}, 1, wgpu::TextureFormat::RGBA8Unorm,
-                                                    wgpu::TextureUsage::CopyDst);
-
-        // bytesPerRow = 0 or wgpu::kCopyStrideUndefined
-        {
-            // copyHeight > 1
-            ASSERT_DEVICE_ERROR(
-                TestWriteTexture(128, 0, 0, 7, destination, 0, {0, 0, 0}, {3, 7, 1}));
-            TestWriteTexture(128, 0, 0, 7, destination, 0, {0, 0, 0}, {0, 7, 1});
-            ASSERT_DEVICE_ERROR(TestWriteTexture(128, 0, wgpu::kCopyStrideUndefined, 7, destination,
-                                                 0, {0, 0, 0}, {0, 7, 1}));
-
-            // copyDepth > 1
-            ASSERT_DEVICE_ERROR(
-                TestWriteTexture(128, 0, 0, 1, destination, 0, {0, 0, 0}, {3, 1, 2}));
-            TestWriteTexture(128, 0, 0, 1, destination, 0, {0, 0, 0}, {0, 1, 2});
-            ASSERT_DEVICE_ERROR(TestWriteTexture(128, 0, wgpu::kCopyStrideUndefined, 1, destination,
-                                                 0, {0, 0, 0}, {0, 1, 2}));
-
-            // copyHeight = 1 and copyDepth = 1
-            ASSERT_DEVICE_ERROR(
-                TestWriteTexture(128, 0, 0, 1, destination, 0, {0, 0, 0}, {3, 1, 1}));
-            TestWriteTexture(128, 0, wgpu::kCopyStrideUndefined, 1, destination, 0, {0, 0, 0},
-                             {3, 1, 1});
-        }
-
-        // bytesPerRow = 11 is invalid since a row takes 12 bytes.
-        {
-            // copyHeight > 1
-            ASSERT_DEVICE_ERROR(
-                TestWriteTexture(128, 0, 11, 7, destination, 0, {0, 0, 0}, {3, 7, 1}));
-            // copyHeight == 0
-            ASSERT_DEVICE_ERROR(
-                TestWriteTexture(128, 0, 11, 0, destination, 0, {0, 0, 0}, {3, 0, 1}));
-
-            // copyDepth > 1
-            ASSERT_DEVICE_ERROR(
-                TestWriteTexture(128, 0, 11, 1, destination, 0, {0, 0, 0}, {3, 1, 2}));
-            // copyDepth == 0
-            ASSERT_DEVICE_ERROR(
-                TestWriteTexture(128, 0, 11, 1, destination, 0, {0, 0, 0}, {3, 1, 0}));
-
-            // copyHeight = 1 and copyDepth = 1
-            ASSERT_DEVICE_ERROR(
-                TestWriteTexture(128, 0, 11, 1, destination, 0, {0, 0, 0}, {3, 1, 1}));
-        }
-
-        // bytesPerRow = 12 is valid since a row takes 12 bytes.
-        TestWriteTexture(128, 0, 12, 7, destination, 0, {0, 0, 0}, {3, 7, 1});
-
-        // bytesPerRow = 13 is valid since a row takes 12 bytes.
-        TestWriteTexture(128, 0, 13, 7, destination, 0, {0, 0, 0}, {3, 7, 1});
-    }
-
-    // Test that if rowsPerImage is greater than 0, it must be at least copy height.
-    TEST_F(QueueWriteTextureValidationTest, RowsPerImageConstraints) {
+    void TestWriteTextureExactDataSize(uint32_t bytesPerRow,
+                                       uint32_t rowsPerImage,
+                                       wgpu::Texture texture,
+                                       wgpu::TextureFormat textureFormat,
+                                       wgpu::Origin3D origin,
+                                       wgpu::Extent3D extent3D) {
+        // Check the minimal valid dataSize.
         uint64_t dataSize =
-            utils::RequiredBytesInCopy(256, 5, {4, 4, 2}, wgpu::TextureFormat::RGBA8Unorm);
-        wgpu::Texture destination = Create2DTexture({16, 16, 2}, 1, wgpu::TextureFormat::RGBA8Unorm,
-                                                    wgpu::TextureUsage::CopyDst);
+            utils::RequiredBytesInCopy(bytesPerRow, rowsPerImage, extent3D, textureFormat);
+        TestWriteTexture(dataSize, 0, bytesPerRow, rowsPerImage, texture, 0, origin, extent3D);
 
-        // rowsPerImage is wgpu::kCopyStrideUndefined
-        TestWriteTexture(dataSize, 0, 256, wgpu::kCopyStrideUndefined, destination, 0, {0, 0, 0},
-                         {4, 4, 1});
+        // Check dataSize was indeed minimal.
+        uint64_t invalidSize = dataSize - 1;
+        ASSERT_DEVICE_ERROR(TestWriteTexture(invalidSize, 0, bytesPerRow, rowsPerImage, texture, 0,
+                                             origin, extent3D));
+    }
 
-        // rowsPerImage is equal to copy height (Valid)
+    wgpu::Queue queue;
+};
+
+// Test the success case for WriteTexture
+TEST_F(QueueWriteTextureValidationTest, Success) {
+    const uint64_t dataSize =
+        utils::RequiredBytesInCopy(256, 0, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
+    wgpu::Texture destination = Create2DTexture({16, 16, 4}, 5, wgpu::TextureFormat::RGBA8Unorm,
+                                                wgpu::TextureUsage::CopyDst);
+
+    // Different copies, including some that touch the OOB condition
+    {
+        // Copy 4x4 block in corner of first mip.
         TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {0, 0, 0}, {4, 4, 1});
-
-        // rowsPerImage is larger than copy height (Valid)
-        TestWriteTexture(dataSize, 0, 256, 5, destination, 0, {0, 0, 0}, {4, 4, 1});
-        TestWriteTexture(dataSize, 0, 256, 5, destination, 0, {0, 0, 0}, {4, 4, 2});
-
-        // rowsPerImage is less than copy height (Invalid)
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, 0, 256, 3, destination, 0, {0, 0, 0}, {4, 4, 1}));
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, 0, 256, 0, destination, 0, {0, 0, 0}, {4, 4, 1}));
-    }
-
-    // Test WriteTexture with data offset
-    TEST_F(QueueWriteTextureValidationTest, DataOffset) {
-        uint64_t dataSize =
-            utils::RequiredBytesInCopy(256, 0, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
-        wgpu::Texture destination = Create2DTexture({16, 16, 1}, 5, wgpu::TextureFormat::RGBA8Unorm,
-                                                    wgpu::TextureUsage::CopyDst);
-
-        // Offset aligned
+        // Copy 4x4 block in opposite corner of first mip.
+        TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {12, 12, 0}, {4, 4, 1});
+        // Copy 4x4 block in the 4x4 mip.
+        TestWriteTexture(dataSize, 0, 256, 4, destination, 2, {0, 0, 0}, {4, 4, 1});
+        // Copy with a data offset
         TestWriteTexture(dataSize, dataSize - 4, 256, 1, destination, 0, {0, 0, 0}, {1, 1, 1});
-        // Offset not aligned
-        TestWriteTexture(dataSize, dataSize - 5, 256, 1, destination, 0, {0, 0, 0}, {1, 1, 1});
-        // Offset+size too large
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, dataSize - 3, 256, 1, destination, 0, {0, 0, 0}, {1, 1, 1}));
+        TestWriteTexture(dataSize, dataSize - 4, 256, wgpu::kCopyStrideUndefined, destination, 0,
+                         {0, 0, 0}, {1, 1, 1});
     }
 
-    // Test multisampled textures can be used in WriteTexture.
-    TEST_F(QueueWriteTextureValidationTest, WriteToMultisampledTexture) {
-        uint64_t dataSize =
-            utils::RequiredBytesInCopy(256, 0, {2, 2, 1}, wgpu::TextureFormat::RGBA8Unorm);
-        wgpu::Texture destination = Create2DTexture({2, 2, 1}, 1, wgpu::TextureFormat::RGBA8Unorm,
-                                                    wgpu::TextureUsage::CopyDst, 4);
-
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, 0, 256, 2, destination, 0, {0, 0, 0}, {2, 2, 1}));
+    // Copies with a 256-byte aligned bytes per row but unaligned texture region
+    {
+        // Unaligned region
+        TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {0, 0, 0}, {3, 4, 1});
+        // Unaligned region with texture offset
+        TestWriteTexture(dataSize, 0, 256, 3, destination, 0, {5, 7, 0}, {2, 3, 1});
+        // Unaligned region, with data offset
+        TestWriteTexture(dataSize, 31 * 4, 256, 3, destination, 0, {0, 0, 0}, {3, 3, 1});
     }
 
-    // Test that WriteTexture cannot be run with a destroyed texture.
-    TEST_F(QueueWriteTextureValidationTest, DestroyedTexture) {
-        const uint64_t dataSize =
-            utils::RequiredBytesInCopy(256, 4, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
-        wgpu::Texture destination = Create2DTexture({16, 16, 4}, 5, wgpu::TextureFormat::RGBA8Unorm,
-                                                    wgpu::TextureUsage::CopyDst);
-        destination.Destroy();
+    // Empty copies are valid
+    {
+        // An empty copy
+        TestWriteTexture(dataSize, 0, 0, 0, destination, 0, {0, 0, 0}, {0, 0, 1});
+        TestWriteTexture(dataSize, 0, 0, wgpu::kCopyStrideUndefined, destination, 0, {0, 0, 0},
+                         {0, 0, 1});
+        // An empty copy with depth = 0
+        TestWriteTexture(dataSize, 0, 0, 0, destination, 0, {0, 0, 0}, {0, 0, 0});
+        TestWriteTexture(dataSize, 0, 0, wgpu::kCopyStrideUndefined, destination, 0, {0, 0, 0},
+                         {0, 0, 0});
+        // An empty copy touching the end of the data
+        TestWriteTexture(dataSize, dataSize, 0, 0, destination, 0, {0, 0, 0}, {0, 0, 1});
+        TestWriteTexture(dataSize, dataSize, 0, wgpu::kCopyStrideUndefined, destination, 0,
+                         {0, 0, 0}, {0, 0, 1});
+        // An empty copy touching the side of the texture
+        TestWriteTexture(dataSize, 0, 0, 0, destination, 0, {16, 16, 0}, {0, 0, 1});
+        TestWriteTexture(dataSize, 0, 0, wgpu::kCopyStrideUndefined, destination, 0, {16, 16, 0},
+                         {0, 0, 1});
+        // An empty copy with depth = 1 and bytesPerRow > 0
+        TestWriteTexture(dataSize, 0, 256, 0, destination, 0, {0, 0, 0}, {0, 0, 1});
+        TestWriteTexture(dataSize, 0, 256, wgpu::kCopyStrideUndefined, destination, 0, {0, 0, 0},
+                         {0, 0, 1});
+        // An empty copy with height > 0, depth = 0, bytesPerRow > 0 and rowsPerImage > 0
+        TestWriteTexture(dataSize, 0, 256, wgpu::kCopyStrideUndefined, destination, 0, {0, 0, 0},
+                         {0, 1, 0});
+        TestWriteTexture(dataSize, 0, 256, 1, destination, 0, {0, 0, 0}, {0, 1, 0});
+        TestWriteTexture(dataSize, 0, 256, 16, destination, 0, {0, 0, 0}, {0, 1, 0});
+    }
+}
 
-        ASSERT_DEVICE_ERROR(
-            TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {0, 0, 0}, {4, 4, 1}));
+// Test OOB conditions on the data
+TEST_F(QueueWriteTextureValidationTest, OutOfBoundsOnData) {
+    const uint64_t dataSize =
+        utils::RequiredBytesInCopy(256, 0, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
+    wgpu::Texture destination = Create2DTexture({16, 16, 1}, 5, wgpu::TextureFormat::RGBA8Unorm,
+                                                wgpu::TextureUsage::CopyDst);
+
+    // OOB on the data because we copy too many pixels
+    ASSERT_DEVICE_ERROR(
+        TestWriteTexture(dataSize, 0, 256, 5, destination, 0, {0, 0, 0}, {4, 5, 1}));
+
+    // OOB on the data because of the offset
+    ASSERT_DEVICE_ERROR(
+        TestWriteTexture(dataSize, 4, 256, 4, destination, 0, {0, 0, 0}, {4, 4, 1}));
+
+    // OOB on the data because utils::RequiredBytesInCopy overflows
+    ASSERT_DEVICE_ERROR(
+        TestWriteTexture(dataSize, 0, 512, 3, destination, 0, {0, 0, 0}, {4, 3, 1}));
+
+    // Not OOB on the data although bytes per row * height overflows
+    // but utils::RequiredBytesInCopy * depth does not overflow
+    {
+        uint32_t sourceDataSize =
+            utils::RequiredBytesInCopy(256, 0, {7, 3, 1}, wgpu::TextureFormat::RGBA8Unorm);
+        ASSERT_TRUE(256 * 3 > sourceDataSize) << "bytes per row * height should overflow data";
+
+        TestWriteTexture(sourceDataSize, 0, 256, 3, destination, 0, {0, 0, 0}, {7, 3, 1});
+    }
+}
+
+// Test OOB conditions on the texture
+TEST_F(QueueWriteTextureValidationTest, OutOfBoundsOnTexture) {
+    const uint64_t dataSize =
+        utils::RequiredBytesInCopy(256, 0, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
+    wgpu::Texture destination = Create2DTexture({16, 16, 2}, 5, wgpu::TextureFormat::RGBA8Unorm,
+                                                wgpu::TextureUsage::CopyDst);
+
+    // OOB on the texture because x + width overflows
+    ASSERT_DEVICE_ERROR(
+        TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {13, 12, 0}, {4, 4, 1}));
+
+    // OOB on the texture because y + width overflows
+    ASSERT_DEVICE_ERROR(
+        TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {12, 13, 0}, {4, 4, 1}));
+
+    // OOB on the texture because we overflow a non-zero mip
+    ASSERT_DEVICE_ERROR(
+        TestWriteTexture(dataSize, 0, 256, 4, destination, 2, {1, 0, 0}, {4, 4, 1}));
+
+    // OOB on the texture even on an empty copy when we copy to a non-existent mip.
+    ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, 0, 0, destination, 5, {0, 0, 0}, {0, 0, 1}));
+
+    // OOB on the texture because slice overflows
+    ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, 0, 0, destination, 0, {0, 0, 2}, {0, 0, 1}));
+}
+
+// Test that we force Depth=1 on writes to 2D textures
+TEST_F(QueueWriteTextureValidationTest, DepthConstraintFor2DTextures) {
+    const uint64_t dataSize =
+        utils::RequiredBytesInCopy(0, 0, {0, 0, 2}, wgpu::TextureFormat::RGBA8Unorm);
+    wgpu::Texture destination = Create2DTexture({16, 16, 1}, 5, wgpu::TextureFormat::RGBA8Unorm,
+                                                wgpu::TextureUsage::CopyDst);
+
+    // Depth > 1 on an empty copy still errors
+    ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, 0, 0, destination, 0, {0, 0, 0}, {0, 0, 2}));
+}
+
+// Test WriteTexture with incorrect texture usage
+TEST_F(QueueWriteTextureValidationTest, IncorrectUsage) {
+    const uint64_t dataSize =
+        utils::RequiredBytesInCopy(256, 0, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
+    wgpu::Texture sampled = Create2DTexture({16, 16, 1}, 5, wgpu::TextureFormat::RGBA8Unorm,
+                                            wgpu::TextureUsage::TextureBinding);
+
+    // Incorrect destination usage
+    ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, 256, 4, sampled, 0, {0, 0, 0}, {4, 4, 1}));
+}
+
+// Test incorrect values of bytesPerRow and that values not divisible by 256 are allowed.
+TEST_F(QueueWriteTextureValidationTest, BytesPerRowConstraints) {
+    wgpu::Texture destination =
+        Create2DTexture({3, 7, 2}, 1, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopyDst);
+
+    // bytesPerRow = 0 or wgpu::kCopyStrideUndefined
+    {
+        // copyHeight > 1
+        ASSERT_DEVICE_ERROR(TestWriteTexture(128, 0, 0, 7, destination, 0, {0, 0, 0}, {3, 7, 1}));
+        TestWriteTexture(128, 0, 0, 7, destination, 0, {0, 0, 0}, {0, 7, 1});
+        ASSERT_DEVICE_ERROR(TestWriteTexture(128, 0, wgpu::kCopyStrideUndefined, 7, destination, 0,
+                                             {0, 0, 0}, {0, 7, 1}));
+
+        // copyDepth > 1
+        ASSERT_DEVICE_ERROR(TestWriteTexture(128, 0, 0, 1, destination, 0, {0, 0, 0}, {3, 1, 2}));
+        TestWriteTexture(128, 0, 0, 1, destination, 0, {0, 0, 0}, {0, 1, 2});
+        ASSERT_DEVICE_ERROR(TestWriteTexture(128, 0, wgpu::kCopyStrideUndefined, 1, destination, 0,
+                                             {0, 0, 0}, {0, 1, 2}));
+
+        // copyHeight = 1 and copyDepth = 1
+        ASSERT_DEVICE_ERROR(TestWriteTexture(128, 0, 0, 1, destination, 0, {0, 0, 0}, {3, 1, 1}));
+        TestWriteTexture(128, 0, wgpu::kCopyStrideUndefined, 1, destination, 0, {0, 0, 0},
+                         {3, 1, 1});
     }
 
-    // Test WriteTexture with texture in error state causes errors.
-    TEST_F(QueueWriteTextureValidationTest, TextureInErrorState) {
-        wgpu::TextureDescriptor errorTextureDescriptor;
-        errorTextureDescriptor.size.depthOrArrayLayers = 0;
-        ASSERT_DEVICE_ERROR(wgpu::Texture errorTexture =
-                                device.CreateTexture(&errorTextureDescriptor));
-        wgpu::ImageCopyTexture errorImageCopyTexture =
-            utils::CreateImageCopyTexture(errorTexture, 0, {0, 0, 0});
+    // bytesPerRow = 11 is invalid since a row takes 12 bytes.
+    {
+        // copyHeight > 1
+        ASSERT_DEVICE_ERROR(TestWriteTexture(128, 0, 11, 7, destination, 0, {0, 0, 0}, {3, 7, 1}));
+        // copyHeight == 0
+        ASSERT_DEVICE_ERROR(TestWriteTexture(128, 0, 11, 0, destination, 0, {0, 0, 0}, {3, 0, 1}));
 
-        wgpu::Extent3D extent3D = {0, 0, 0};
+        // copyDepth > 1
+        ASSERT_DEVICE_ERROR(TestWriteTexture(128, 0, 11, 1, destination, 0, {0, 0, 0}, {3, 1, 2}));
+        // copyDepth == 0
+        ASSERT_DEVICE_ERROR(TestWriteTexture(128, 0, 11, 1, destination, 0, {0, 0, 0}, {3, 1, 0}));
 
-        {
-            std::vector<uint8_t> data(4);
-            wgpu::TextureDataLayout textureDataLayout = utils::CreateTextureDataLayout(0, 0, 0);
+        // copyHeight = 1 and copyDepth = 1
+        ASSERT_DEVICE_ERROR(TestWriteTexture(128, 0, 11, 1, destination, 0, {0, 0, 0}, {3, 1, 1}));
+    }
 
-            ASSERT_DEVICE_ERROR(queue.WriteTexture(&errorImageCopyTexture, data.data(), 4,
-                                                   &textureDataLayout, &extent3D));
+    // bytesPerRow = 12 is valid since a row takes 12 bytes.
+    TestWriteTexture(128, 0, 12, 7, destination, 0, {0, 0, 0}, {3, 7, 1});
+
+    // bytesPerRow = 13 is valid since a row takes 12 bytes.
+    TestWriteTexture(128, 0, 13, 7, destination, 0, {0, 0, 0}, {3, 7, 1});
+}
+
+// Test that if rowsPerImage is greater than 0, it must be at least copy height.
+TEST_F(QueueWriteTextureValidationTest, RowsPerImageConstraints) {
+    uint64_t dataSize =
+        utils::RequiredBytesInCopy(256, 5, {4, 4, 2}, wgpu::TextureFormat::RGBA8Unorm);
+    wgpu::Texture destination = Create2DTexture({16, 16, 2}, 1, wgpu::TextureFormat::RGBA8Unorm,
+                                                wgpu::TextureUsage::CopyDst);
+
+    // rowsPerImage is wgpu::kCopyStrideUndefined
+    TestWriteTexture(dataSize, 0, 256, wgpu::kCopyStrideUndefined, destination, 0, {0, 0, 0},
+                     {4, 4, 1});
+
+    // rowsPerImage is equal to copy height (Valid)
+    TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {0, 0, 0}, {4, 4, 1});
+
+    // rowsPerImage is larger than copy height (Valid)
+    TestWriteTexture(dataSize, 0, 256, 5, destination, 0, {0, 0, 0}, {4, 4, 1});
+    TestWriteTexture(dataSize, 0, 256, 5, destination, 0, {0, 0, 0}, {4, 4, 2});
+
+    // rowsPerImage is less than copy height (Invalid)
+    ASSERT_DEVICE_ERROR(
+        TestWriteTexture(dataSize, 0, 256, 3, destination, 0, {0, 0, 0}, {4, 4, 1}));
+    ASSERT_DEVICE_ERROR(
+        TestWriteTexture(dataSize, 0, 256, 0, destination, 0, {0, 0, 0}, {4, 4, 1}));
+}
+
+// Test WriteTexture with data offset
+TEST_F(QueueWriteTextureValidationTest, DataOffset) {
+    uint64_t dataSize =
+        utils::RequiredBytesInCopy(256, 0, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
+    wgpu::Texture destination = Create2DTexture({16, 16, 1}, 5, wgpu::TextureFormat::RGBA8Unorm,
+                                                wgpu::TextureUsage::CopyDst);
+
+    // Offset aligned
+    TestWriteTexture(dataSize, dataSize - 4, 256, 1, destination, 0, {0, 0, 0}, {1, 1, 1});
+    // Offset not aligned
+    TestWriteTexture(dataSize, dataSize - 5, 256, 1, destination, 0, {0, 0, 0}, {1, 1, 1});
+    // Offset+size too large
+    ASSERT_DEVICE_ERROR(
+        TestWriteTexture(dataSize, dataSize - 3, 256, 1, destination, 0, {0, 0, 0}, {1, 1, 1}));
+}
+
+// Test multisampled textures can be used in WriteTexture.
+TEST_F(QueueWriteTextureValidationTest, WriteToMultisampledTexture) {
+    uint64_t dataSize =
+        utils::RequiredBytesInCopy(256, 0, {2, 2, 1}, wgpu::TextureFormat::RGBA8Unorm);
+    wgpu::Texture destination = Create2DTexture({2, 2, 1}, 1, wgpu::TextureFormat::RGBA8Unorm,
+                                                wgpu::TextureUsage::CopyDst, 4);
+
+    ASSERT_DEVICE_ERROR(
+        TestWriteTexture(dataSize, 0, 256, 2, destination, 0, {0, 0, 0}, {2, 2, 1}));
+}
+
+// Test that WriteTexture cannot be run with a destroyed texture.
+TEST_F(QueueWriteTextureValidationTest, DestroyedTexture) {
+    const uint64_t dataSize =
+        utils::RequiredBytesInCopy(256, 4, {4, 4, 1}, wgpu::TextureFormat::RGBA8Unorm);
+    wgpu::Texture destination = Create2DTexture({16, 16, 4}, 5, wgpu::TextureFormat::RGBA8Unorm,
+                                                wgpu::TextureUsage::CopyDst);
+    destination.Destroy();
+
+    ASSERT_DEVICE_ERROR(
+        TestWriteTexture(dataSize, 0, 256, 4, destination, 0, {0, 0, 0}, {4, 4, 1}));
+}
+
+// Test WriteTexture with texture in error state causes errors.
+TEST_F(QueueWriteTextureValidationTest, TextureInErrorState) {
+    wgpu::TextureDescriptor errorTextureDescriptor;
+    errorTextureDescriptor.size.depthOrArrayLayers = 0;
+    ASSERT_DEVICE_ERROR(wgpu::Texture errorTexture = device.CreateTexture(&errorTextureDescriptor));
+    wgpu::ImageCopyTexture errorImageCopyTexture =
+        utils::CreateImageCopyTexture(errorTexture, 0, {0, 0, 0});
+
+    wgpu::Extent3D extent3D = {0, 0, 0};
+
+    {
+        std::vector<uint8_t> data(4);
+        wgpu::TextureDataLayout textureDataLayout = utils::CreateTextureDataLayout(0, 0, 0);
+
+        ASSERT_DEVICE_ERROR(queue.WriteTexture(&errorImageCopyTexture, data.data(), 4,
+                                               &textureDataLayout, &extent3D));
+    }
+}
+
+// Test that WriteTexture throws an error when requiredBytesInCopy overflows uint64_t
+TEST_F(QueueWriteTextureValidationTest, RequiredBytesInCopyOverflow) {
+    wgpu::Texture destination = Create2DTexture({1, 1, 16}, 1, wgpu::TextureFormat::RGBA8Unorm,
+                                                wgpu::TextureUsage::CopyDst);
+
+    // success because depth = 1.
+    TestWriteTexture(10000, 0, (1 << 31), (1 << 31), destination, 0, {0, 0, 0}, {1, 1, 1});
+    // failure because bytesPerImage * (depth - 1) overflows.
+    ASSERT_DEVICE_ERROR(
+        TestWriteTexture(10000, 0, (1 << 31), (1 << 31), destination, 0, {0, 0, 0}, {1, 1, 16}));
+}
+
+// Regression tests for a bug in the computation of texture data size in Dawn.
+TEST_F(QueueWriteTextureValidationTest, TextureWriteDataSizeLastRowComputation) {
+    constexpr uint32_t kBytesPerRow = 256;
+    constexpr uint32_t kWidth = 4;
+    constexpr uint32_t kHeight = 4;
+
+    constexpr std::array<wgpu::TextureFormat, 2> kFormats = {wgpu::TextureFormat::RGBA8Unorm,
+                                                             wgpu::TextureFormat::RG8Unorm};
+
+    {
+        // kBytesPerRow * (kHeight - 1) + kWidth is not large enough to be the valid data size
+        // in this test because the data sizes in WriteTexture are not in texels but in bytes.
+        constexpr uint32_t kInvalidDataSize = kBytesPerRow * (kHeight - 1) + kWidth;
+
+        for (wgpu::TextureFormat format : kFormats) {
+            wgpu::Texture destination =
+                Create2DTexture({kWidth, kHeight, 1}, 1, format, wgpu::TextureUsage::CopyDst);
+            ASSERT_DEVICE_ERROR(TestWriteTexture(kInvalidDataSize, 0, kBytesPerRow, kHeight,
+                                                 destination, 0, {0, 0, 0}, {kWidth, kHeight, 1}));
         }
     }
 
-    // Test that WriteTexture throws an error when requiredBytesInCopy overflows uint64_t
-    TEST_F(QueueWriteTextureValidationTest, RequiredBytesInCopyOverflow) {
-        wgpu::Texture destination = Create2DTexture({1, 1, 16}, 1, wgpu::TextureFormat::RGBA8Unorm,
-                                                    wgpu::TextureUsage::CopyDst);
+    {
+        for (wgpu::TextureFormat format : kFormats) {
+            uint32_t validDataSize =
+                utils::RequiredBytesInCopy(kBytesPerRow, 0, {kWidth, kHeight, 1}, format);
+            wgpu::Texture destination =
+                Create2DTexture({kWidth, kHeight, 1}, 1, format, wgpu::TextureUsage::CopyDst);
 
-        // success because depth = 1.
-        TestWriteTexture(10000, 0, (1 << 31), (1 << 31), destination, 0, {0, 0, 0}, {1, 1, 1});
-        // failure because bytesPerImage * (depth - 1) overflows.
-        ASSERT_DEVICE_ERROR(TestWriteTexture(10000, 0, (1 << 31), (1 << 31), destination, 0,
-                                             {0, 0, 0}, {1, 1, 16}));
-    }
-
-    // Regression tests for a bug in the computation of texture data size in Dawn.
-    TEST_F(QueueWriteTextureValidationTest, TextureWriteDataSizeLastRowComputation) {
-        constexpr uint32_t kBytesPerRow = 256;
-        constexpr uint32_t kWidth = 4;
-        constexpr uint32_t kHeight = 4;
-
-        constexpr std::array<wgpu::TextureFormat, 2> kFormats = {wgpu::TextureFormat::RGBA8Unorm,
-                                                                 wgpu::TextureFormat::RG8Unorm};
-
-        {
-            // kBytesPerRow * (kHeight - 1) + kWidth is not large enough to be the valid data size
-            // in this test because the data sizes in WriteTexture are not in texels but in bytes.
-            constexpr uint32_t kInvalidDataSize = kBytesPerRow * (kHeight - 1) + kWidth;
-
-            for (wgpu::TextureFormat format : kFormats) {
-                wgpu::Texture destination =
-                    Create2DTexture({kWidth, kHeight, 1}, 1, format, wgpu::TextureUsage::CopyDst);
-                ASSERT_DEVICE_ERROR(TestWriteTexture(kInvalidDataSize, 0, kBytesPerRow, kHeight,
+            // Verify the return value of RequiredBytesInCopy() is exactly the minimum valid
+            // data size in this test.
+            {
+                uint32_t invalidDataSize = validDataSize - 1;
+                ASSERT_DEVICE_ERROR(TestWriteTexture(invalidDataSize, 0, kBytesPerRow, kHeight,
                                                      destination, 0, {0, 0, 0},
                                                      {kWidth, kHeight, 1}));
             }
-        }
 
-        {
-            for (wgpu::TextureFormat format : kFormats) {
-                uint32_t validDataSize =
-                    utils::RequiredBytesInCopy(kBytesPerRow, 0, {kWidth, kHeight, 1}, format);
-                wgpu::Texture destination =
-                    Create2DTexture({kWidth, kHeight, 1}, 1, format, wgpu::TextureUsage::CopyDst);
-
-                // Verify the return value of RequiredBytesInCopy() is exactly the minimum valid
-                // data size in this test.
-                {
-                    uint32_t invalidDataSize = validDataSize - 1;
-                    ASSERT_DEVICE_ERROR(TestWriteTexture(invalidDataSize, 0, kBytesPerRow, kHeight,
-                                                         destination, 0, {0, 0, 0},
-                                                         {kWidth, kHeight, 1}));
-                }
-
-                {
-                    TestWriteTexture(validDataSize, 0, kBytesPerRow, kHeight, destination, 0,
-                                     {0, 0, 0}, {kWidth, kHeight, 1});
-                }
+            {
+                TestWriteTexture(validDataSize, 0, kBytesPerRow, kHeight, destination, 0, {0, 0, 0},
+                                 {kWidth, kHeight, 1});
             }
         }
     }
+}
 
-    // Test write from data to mip map of non square texture
-    TEST_F(QueueWriteTextureValidationTest, WriteToMipmapOfNonSquareTexture) {
-        uint64_t dataSize =
-            utils::RequiredBytesInCopy(256, 0, {4, 2, 1}, wgpu::TextureFormat::RGBA8Unorm);
-        uint32_t maxMipmapLevel = 3;
-        wgpu::Texture destination =
-            Create2DTexture({4, 2, 1}, maxMipmapLevel, wgpu::TextureFormat::RGBA8Unorm,
-                            wgpu::TextureUsage::CopyDst);
+// Test write from data to mip map of non square texture
+TEST_F(QueueWriteTextureValidationTest, WriteToMipmapOfNonSquareTexture) {
+    uint64_t dataSize =
+        utils::RequiredBytesInCopy(256, 0, {4, 2, 1}, wgpu::TextureFormat::RGBA8Unorm);
+    uint32_t maxMipmapLevel = 3;
+    wgpu::Texture destination = Create2DTexture(
+        {4, 2, 1}, maxMipmapLevel, wgpu::TextureFormat::RGBA8Unorm, wgpu::TextureUsage::CopyDst);
 
-        // Copy to top level mip map
-        TestWriteTexture(dataSize, 0, 256, 1, destination, maxMipmapLevel - 1, {0, 0, 0},
-                         {1, 1, 1});
-        // Copy to high level mip map
-        TestWriteTexture(dataSize, 0, 256, 1, destination, maxMipmapLevel - 2, {0, 0, 0},
-                         {2, 1, 1});
-        // Mip level out of range
-        ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, 256, 1, destination, maxMipmapLevel,
-                                             {0, 0, 0}, {1, 1, 1}));
-        // Copy origin out of range
-        ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, 256, 1, destination, maxMipmapLevel - 2,
-                                             {1, 0, 0}, {2, 1, 1}));
-        // Copy size out of range
-        ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, 256, 2, destination, maxMipmapLevel - 2,
-                                             {0, 0, 0}, {2, 2, 1}));
+    // Copy to top level mip map
+    TestWriteTexture(dataSize, 0, 256, 1, destination, maxMipmapLevel - 1, {0, 0, 0}, {1, 1, 1});
+    // Copy to high level mip map
+    TestWriteTexture(dataSize, 0, 256, 1, destination, maxMipmapLevel - 2, {0, 0, 0}, {2, 1, 1});
+    // Mip level out of range
+    ASSERT_DEVICE_ERROR(
+        TestWriteTexture(dataSize, 0, 256, 1, destination, maxMipmapLevel, {0, 0, 0}, {1, 1, 1}));
+    // Copy origin out of range
+    ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, 256, 1, destination, maxMipmapLevel - 2,
+                                         {1, 0, 0}, {2, 1, 1}));
+    // Copy size out of range
+    ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, 256, 2, destination, maxMipmapLevel - 2,
+                                         {0, 0, 0}, {2, 2, 1}));
+}
+
+// Test writes to multiple array layers of an uncompressed texture
+TEST_F(QueueWriteTextureValidationTest, WriteToMultipleArrayLayers) {
+    wgpu::Texture destination = QueueWriteTextureValidationTest::Create2DTexture(
+        {4, 2, 5}, 1, wgpu::TextureFormat::RGBA8Unorm,
+        wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc);
+
+    // Write to all array layers
+    TestWriteTextureExactDataSize(256, 2, destination, wgpu::TextureFormat::RGBA8Unorm, {0, 0, 0},
+                                  {4, 2, 5});
+
+    // Write to the highest array layer
+    TestWriteTextureExactDataSize(256, 2, destination, wgpu::TextureFormat::RGBA8Unorm, {0, 0, 4},
+                                  {4, 2, 1});
+
+    // Write to array layers in the middle
+    TestWriteTextureExactDataSize(256, 2, destination, wgpu::TextureFormat::RGBA8Unorm, {0, 0, 1},
+                                  {4, 2, 3});
+
+    // Copy with a non-packed rowsPerImage
+    TestWriteTextureExactDataSize(256, 3, destination, wgpu::TextureFormat::RGBA8Unorm, {0, 0, 0},
+                                  {4, 2, 5});
+
+    // Copy with bytesPerRow = 500
+    TestWriteTextureExactDataSize(500, 2, destination, wgpu::TextureFormat::RGBA8Unorm, {0, 0, 1},
+                                  {4, 2, 3});
+}
+
+// Test it is invalid to write into a depth texture.
+TEST_F(QueueWriteTextureValidationTest, WriteToDepthAspect) {
+    uint32_t bytesPerRow = sizeof(float) * 4;
+    const uint64_t dataSize =
+        utils::RequiredBytesInCopy(bytesPerRow, 0, {4, 4, 1}, wgpu::TextureFormat::Depth32Float);
+
+    // Invalid to write into depth32float
+    {
+        wgpu::Texture destination = QueueWriteTextureValidationTest::Create2DTexture(
+            {4, 4, 1}, 1, wgpu::TextureFormat::Depth32Float, wgpu::TextureUsage::CopyDst);
+
+        ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 4, destination, 0, {0, 0, 0},
+                                             {4, 4, 1}, wgpu::TextureAspect::All));
+
+        ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 4, destination, 0, {0, 0, 0},
+                                             {4, 4, 1}, wgpu::TextureAspect::DepthOnly));
     }
 
-    // Test writes to multiple array layers of an uncompressed texture
-    TEST_F(QueueWriteTextureValidationTest, WriteToMultipleArrayLayers) {
+    // Invalid to write into depth24plus
+    {
         wgpu::Texture destination = QueueWriteTextureValidationTest::Create2DTexture(
-            {4, 2, 5}, 1, wgpu::TextureFormat::RGBA8Unorm,
+            {4, 4, 1}, 1, wgpu::TextureFormat::Depth24Plus, wgpu::TextureUsage::CopyDst);
+
+        ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 4, destination, 0, {0, 0, 0},
+                                             {4, 4, 1}, wgpu::TextureAspect::All));
+
+        ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 4, destination, 0, {0, 0, 0},
+                                             {4, 4, 1}, wgpu::TextureAspect::DepthOnly));
+    }
+}
+
+// Test write texture to the stencil aspect
+TEST_F(QueueWriteTextureValidationTest, WriteToStencilAspect) {
+    uint32_t bytesPerRow = 4;
+    const uint64_t dataSize =
+        utils::RequiredBytesInCopy(bytesPerRow, 0, {4, 4, 1}, wgpu::TextureFormat::R8Uint);
+
+    // It is valid to write into the stencil aspect of depth24plus-stencil8
+    {
+        wgpu::Texture destination = QueueWriteTextureValidationTest::Create2DTexture(
+            {4, 4, 1}, 1, wgpu::TextureFormat::Depth24PlusStencil8, wgpu::TextureUsage::CopyDst);
+
+        TestWriteTexture(dataSize, 0, bytesPerRow, wgpu::kCopyStrideUndefined, destination, 0,
+                         {0, 0, 0}, {4, 4, 1}, wgpu::TextureAspect::StencilOnly);
+
+        // And that it fails if the buffer is one byte too small
+        ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize - 1, 0, bytesPerRow, 4, destination, 0,
+                                             {0, 0, 0}, {4, 4, 1},
+                                             wgpu::TextureAspect::StencilOnly));
+
+        // It is invalid to write just part of the subresource size
+        ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 3, destination, 0, {0, 0, 0},
+                                             {3, 3, 1}, wgpu::TextureAspect::StencilOnly));
+    }
+
+    // It is invalid to write into the stencil aspect of depth24plus (no stencil)
+    {
+        wgpu::Texture destination = QueueWriteTextureValidationTest::Create2DTexture(
+            {4, 4, 1}, 1, wgpu::TextureFormat::Depth24Plus, wgpu::TextureUsage::CopyDst);
+
+        ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 4, destination, 0, {0, 0, 0},
+                                             {4, 4, 1}, wgpu::TextureAspect::StencilOnly));
+    }
+}
+
+class WriteTextureTest_CompressedTextureFormats : public QueueWriteTextureValidationTest {
+  protected:
+    WGPUDevice CreateTestDevice() override {
+        wgpu::DeviceDescriptor descriptor;
+        wgpu::FeatureName requiredFeatures[3] = {wgpu::FeatureName::TextureCompressionBC,
+                                                 wgpu::FeatureName::TextureCompressionETC2,
+                                                 wgpu::FeatureName::TextureCompressionASTC};
+        descriptor.requiredFeatures = requiredFeatures;
+        descriptor.requiredFeaturesCount = 3;
+        return adapter.CreateDevice(&descriptor);
+    }
+
+    wgpu::Texture Create2DTexture(wgpu::TextureFormat format,
+                                  uint32_t mipmapLevels = 1,
+                                  uint32_t width = kWidth,
+                                  uint32_t height = kHeight) {
+        constexpr wgpu::TextureUsage kUsage = wgpu::TextureUsage::CopyDst;
+        constexpr uint32_t kArrayLayers = 1;
+        return QueueWriteTextureValidationTest::Create2DTexture({width, height, kArrayLayers},
+                                                                mipmapLevels, format, kUsage, 1);
+    }
+
+    void TestWriteTexture(size_t dataSize,
+                          uint32_t dataOffset,
+                          uint32_t dataBytesPerRow,
+                          uint32_t dataRowsPerImage,
+                          wgpu::Texture texture,
+                          uint32_t textLevel,
+                          wgpu::Origin3D textOrigin,
+                          wgpu::Extent3D size) {
+        QueueWriteTextureValidationTest::TestWriteTexture(dataSize, dataOffset, dataBytesPerRow,
+                                                          dataRowsPerImage, texture, textLevel,
+                                                          textOrigin, size);
+    }
+
+    static constexpr uint32_t kWidth = 120;
+    static constexpr uint32_t kHeight = 120;
+};
+
+// Tests to verify that data offset may not be a multiple of the compressed texture block size
+TEST_F(WriteTextureTest_CompressedTextureFormats, DataOffset) {
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        wgpu::Texture texture = Create2DTexture(format);
+        uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+        uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
+
+        // Valid if aligned.
+        {
+            uint32_t kAlignedOffset = utils::GetTexelBlockSizeInBytes(format);
+            TestWriteTexture(1024, kAlignedOffset, 256, 4, texture, 0, {0, 0, 0},
+                             {blockWidth, blockHeight, 1});
+        }
+
+        // Still valid if not aligned.
+        {
+            uint32_t kUnalignedOffset = utils::GetTexelBlockSizeInBytes(format) - 1;
+            TestWriteTexture(1024, kUnalignedOffset, 256, 4, texture, 0, {0, 0, 0},
+                             {blockWidth, blockHeight, 1});
+        }
+    }
+}
+
+// Tests to verify that bytesPerRow must not be less than (width / blockWidth) *
+// blockSizeInBytes and that it doesn't have to be a multiple of the compressed
+// texture block width.
+TEST_F(WriteTextureTest_CompressedTextureFormats, BytesPerRow) {
+    // Used to compute test width and height.
+    constexpr uint32_t kTestBytesPerRow = 320;
+
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+        uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
+        uint32_t blockByteSize = utils::GetTexelBlockSizeInBytes(format);
+        uint32_t testWidth = kTestBytesPerRow * blockWidth / blockByteSize;
+        uint32_t testHeight = kTestBytesPerRow * blockHeight / blockByteSize;
+        wgpu::Texture texture = Create2DTexture(format, 1, testWidth, testHeight);
+
+        // Failures on the BytesPerRow that is not large enough.
+        {
+            uint32_t kSmallBytesPerRow = kTestBytesPerRow - blockByteSize;
+            ASSERT_DEVICE_ERROR(TestWriteTexture(1024, 0, kSmallBytesPerRow, 4, texture, 0,
+                                                 {0, 0, 0}, {testWidth, blockHeight, 1}));
+        }
+
+        // Test it is valid to use a BytesPerRow that is not a multiple of 256.
+        {
+            TestWriteTexture(1024, 0, kTestBytesPerRow, 4, texture, 0, {0, 0, 0},
+                             {testWidth, blockHeight, 1});
+        }
+
+        // Valid usage of bytesPerRow in WriteTexture with compressed texture formats.
+        {
+            TestWriteTexture(512, 0, blockByteSize, 4, texture, 0, {0, 0, 0},
+                             {blockWidth, blockHeight, 1});
+        }
+
+        // Valid usage of bytesPerRow in WriteTexture with compressed texture formats. Note that
+        // BytesPerRow is not a multiple of the blockByteSize (but is greater than it).
+        {
+            TestWriteTexture(512, 0, blockByteSize + 1, 4, texture, 0, {0, 0, 0},
+                             {blockWidth, blockHeight, 1});
+        }
+    }
+}
+
+// rowsPerImage must be >= heightInBlocks.
+TEST_F(WriteTextureTest_CompressedTextureFormats, RowsPerImage) {
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        wgpu::Texture texture = Create2DTexture(format);
+        uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+        uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
+
+        // Valid usages of rowsPerImage in WriteTexture with compressed texture formats.
+        {
+            constexpr uint32_t kValidRowsPerImage = 5;
+            TestWriteTexture(1024, 0, 256, kValidRowsPerImage, texture, 0, {0, 0, 0},
+                             {blockWidth, blockHeight * 4, 1});
+        }
+        {
+            constexpr uint32_t kValidRowsPerImage = 4;
+            TestWriteTexture(1024, 0, 256, kValidRowsPerImage, texture, 0, {0, 0, 0},
+                             {blockWidth, blockHeight * 4, 1});
+        }
+
+        // rowsPerImage is smaller than height.
+        {
+            constexpr uint32_t kInvalidRowsPerImage = 3;
+            ASSERT_DEVICE_ERROR(TestWriteTexture(1024, 0, 256, kInvalidRowsPerImage, texture, 0,
+                                                 {0, 0, 0}, {blockWidth, blockWidth * 4, 1}));
+        }
+    }
+}
+
+// Tests to verify that ImageOffset.x must be a multiple of the compressed texture block width
+// and ImageOffset.y must be a multiple of the compressed texture block height
+TEST_F(WriteTextureTest_CompressedTextureFormats, ImageOffset) {
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        wgpu::Texture texture = Create2DTexture(format);
+        wgpu::Texture texture2 = Create2DTexture(format);
+        uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+        uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
+
+        wgpu::Origin3D smallestValidOrigin3D = {blockWidth, blockHeight, 0};
+
+        // Valid usages of ImageOffset in WriteTexture with compressed texture formats.
+        {
+            TestWriteTexture(512, 0, 256, 4, texture, 0, smallestValidOrigin3D,
+                             {blockWidth, blockHeight, 1});
+        }
+
+        // Failures on invalid ImageOffset.x.
+        {
+            wgpu::Origin3D invalidOrigin3D = {smallestValidOrigin3D.x - 1, smallestValidOrigin3D.y,
+                                              0};
+            ASSERT_DEVICE_ERROR(TestWriteTexture(512, 0, 256, 4, texture, 0, invalidOrigin3D,
+                                                 {blockWidth, blockHeight, 1}));
+        }
+
+        // Failures on invalid ImageOffset.y.
+        {
+            wgpu::Origin3D invalidOrigin3D = {smallestValidOrigin3D.x, smallestValidOrigin3D.y - 1,
+                                              0};
+            ASSERT_DEVICE_ERROR(TestWriteTexture(512, 0, 256, 4, texture, 0, invalidOrigin3D,
+                                                 {blockWidth, blockHeight, 1}));
+        }
+    }
+}
+
+// Tests to verify that ImageExtent.x must be a multiple of the compressed texture block width
+// and ImageExtent.y must be a multiple of the compressed texture block height
+TEST_F(WriteTextureTest_CompressedTextureFormats, ImageExtent) {
+    constexpr uint32_t kMipmapLevels = 3;
+    // We choose a prime that is greater than the current max texel dimension size as a
+    // multiplier to compute the test texture size so that we can be certain that its level 2
+    // mipmap (x4) cannot be a multiple of the dimension. This is useful for testing padding at
+    // the edges of the mipmaps.
+    constexpr uint32_t kBlockPerDim = 13;
+
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+        uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
+        uint32_t testWidth = blockWidth * kBlockPerDim;
+        uint32_t testHeight = blockHeight * kBlockPerDim;
+        wgpu::Texture texture = Create2DTexture(format, kMipmapLevels, testWidth, testHeight);
+        wgpu::Texture texture2 = Create2DTexture(format, kMipmapLevels, testWidth, testHeight);
+
+        wgpu::Extent3D smallestValidExtent3D = {blockWidth, blockHeight, 1};
+
+        // Valid usages of ImageExtent in WriteTexture with compressed texture formats.
+        { TestWriteTexture(512, 0, 256, 4, texture, 0, {0, 0, 0}, smallestValidExtent3D); }
+
+        // Valid usages of ImageExtent in WriteTexture with compressed texture formats
+        // and non-zero mipmap levels.
+        {
+            constexpr uint32_t kTestMipmapLevel = 2;
+            wgpu::Origin3D testOrigin = {
+                ((testWidth >> kTestMipmapLevel) / blockWidth) * blockWidth,
+                ((testHeight >> kTestMipmapLevel) / blockHeight) * blockHeight, 0};
+
+            TestWriteTexture(512, 0, 256, 4, texture, kTestMipmapLevel, testOrigin,
+                             smallestValidExtent3D);
+        }
+
+        // Failures on invalid ImageExtent.x.
+        {
+            wgpu::Extent3D inValidExtent3D = {smallestValidExtent3D.width - 1,
+                                              smallestValidExtent3D.height, 1};
+            ASSERT_DEVICE_ERROR(
+                TestWriteTexture(512, 0, 256, 4, texture, 0, {0, 0, 0}, inValidExtent3D));
+        }
+
+        // Failures on invalid ImageExtent.y.
+        {
+            wgpu::Extent3D inValidExtent3D = {smallestValidExtent3D.width,
+                                              smallestValidExtent3D.height - 1, 1};
+            ASSERT_DEVICE_ERROR(
+                TestWriteTexture(512, 0, 256, 4, texture, 0, {0, 0, 0}, inValidExtent3D));
+        }
+    }
+}
+
+// Test writes to multiple array layers of a compressed texture
+TEST_F(WriteTextureTest_CompressedTextureFormats, WriteToMultipleArrayLayers) {
+    constexpr uint32_t kWidthMultiplier = 3;
+    constexpr uint32_t kHeightMultiplier = 4;
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+        uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
+        uint32_t testWidth = kWidthMultiplier * blockWidth;
+        uint32_t testHeight = kHeightMultiplier * blockHeight;
+        wgpu::Texture texture = QueueWriteTextureValidationTest::Create2DTexture(
+            {testWidth, testHeight, 20}, 1, format,
             wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc);
 
         // Write to all array layers
-        TestWriteTextureExactDataSize(256, 2, destination, wgpu::TextureFormat::RGBA8Unorm,
-                                      {0, 0, 0}, {4, 2, 5});
+        TestWriteTextureExactDataSize(256, 4, texture, format, {0, 0, 0},
+                                      {testWidth, testHeight, 20});
 
         // Write to the highest array layer
-        TestWriteTextureExactDataSize(256, 2, destination, wgpu::TextureFormat::RGBA8Unorm,
-                                      {0, 0, 4}, {4, 2, 1});
+        TestWriteTextureExactDataSize(256, 4, texture, format, {0, 0, 19},
+                                      {testWidth, testHeight, 1});
 
         // Write to array layers in the middle
-        TestWriteTextureExactDataSize(256, 2, destination, wgpu::TextureFormat::RGBA8Unorm,
-                                      {0, 0, 1}, {4, 2, 3});
+        TestWriteTextureExactDataSize(256, 4, texture, format, {0, 0, 1},
+                                      {testWidth, testHeight, 18});
 
-        // Copy with a non-packed rowsPerImage
-        TestWriteTextureExactDataSize(256, 3, destination, wgpu::TextureFormat::RGBA8Unorm,
-                                      {0, 0, 0}, {4, 2, 5});
-
-        // Copy with bytesPerRow = 500
-        TestWriteTextureExactDataSize(500, 2, destination, wgpu::TextureFormat::RGBA8Unorm,
-                                      {0, 0, 1}, {4, 2, 3});
+        // Write touching the texture corners with a non-packed rowsPerImage
+        TestWriteTextureExactDataSize(256, 6, texture, format, {blockWidth, blockHeight, 4},
+                                      {testWidth - blockWidth, testHeight - blockHeight, 16});
     }
-
-    // Test it is invalid to write into a depth texture.
-    TEST_F(QueueWriteTextureValidationTest, WriteToDepthAspect) {
-        uint32_t bytesPerRow = sizeof(float) * 4;
-        const uint64_t dataSize = utils::RequiredBytesInCopy(bytesPerRow, 0, {4, 4, 1},
-                                                             wgpu::TextureFormat::Depth32Float);
-
-        // Invalid to write into depth32float
-        {
-            wgpu::Texture destination = QueueWriteTextureValidationTest::Create2DTexture(
-                {4, 4, 1}, 1, wgpu::TextureFormat::Depth32Float, wgpu::TextureUsage::CopyDst);
-
-            ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 4, destination, 0,
-                                                 {0, 0, 0}, {4, 4, 1}, wgpu::TextureAspect::All));
-
-            ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 4, destination, 0,
-                                                 {0, 0, 0}, {4, 4, 1},
-                                                 wgpu::TextureAspect::DepthOnly));
-        }
-
-        // Invalid to write into depth24plus
-        {
-            wgpu::Texture destination = QueueWriteTextureValidationTest::Create2DTexture(
-                {4, 4, 1}, 1, wgpu::TextureFormat::Depth24Plus, wgpu::TextureUsage::CopyDst);
-
-            ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 4, destination, 0,
-                                                 {0, 0, 0}, {4, 4, 1}, wgpu::TextureAspect::All));
-
-            ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 4, destination, 0,
-                                                 {0, 0, 0}, {4, 4, 1},
-                                                 wgpu::TextureAspect::DepthOnly));
-        }
-    }
-
-    // Test write texture to the stencil aspect
-    TEST_F(QueueWriteTextureValidationTest, WriteToStencilAspect) {
-        uint32_t bytesPerRow = 4;
-        const uint64_t dataSize =
-            utils::RequiredBytesInCopy(bytesPerRow, 0, {4, 4, 1}, wgpu::TextureFormat::R8Uint);
-
-        // It is valid to write into the stencil aspect of depth24plus-stencil8
-        {
-            wgpu::Texture destination = QueueWriteTextureValidationTest::Create2DTexture(
-                {4, 4, 1}, 1, wgpu::TextureFormat::Depth24PlusStencil8,
-                wgpu::TextureUsage::CopyDst);
-
-            TestWriteTexture(dataSize, 0, bytesPerRow, wgpu::kCopyStrideUndefined, destination, 0,
-                             {0, 0, 0}, {4, 4, 1}, wgpu::TextureAspect::StencilOnly);
-
-            // And that it fails if the buffer is one byte too small
-            ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize - 1, 0, bytesPerRow, 4, destination, 0,
-                                                 {0, 0, 0}, {4, 4, 1},
-                                                 wgpu::TextureAspect::StencilOnly));
-
-            // It is invalid to write just part of the subresource size
-            ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 3, destination, 0,
-                                                 {0, 0, 0}, {3, 3, 1},
-                                                 wgpu::TextureAspect::StencilOnly));
-        }
-
-        // It is invalid to write into the stencil aspect of depth24plus (no stencil)
-        {
-            wgpu::Texture destination = QueueWriteTextureValidationTest::Create2DTexture(
-                {4, 4, 1}, 1, wgpu::TextureFormat::Depth24Plus, wgpu::TextureUsage::CopyDst);
-
-            ASSERT_DEVICE_ERROR(TestWriteTexture(dataSize, 0, bytesPerRow, 4, destination, 0,
-                                                 {0, 0, 0}, {4, 4, 1},
-                                                 wgpu::TextureAspect::StencilOnly));
-        }
-    }
-
-    class WriteTextureTest_CompressedTextureFormats : public QueueWriteTextureValidationTest {
-      protected:
-        WGPUDevice CreateTestDevice() override {
-            wgpu::DeviceDescriptor descriptor;
-            wgpu::FeatureName requiredFeatures[3] = {wgpu::FeatureName::TextureCompressionBC,
-                                                     wgpu::FeatureName::TextureCompressionETC2,
-                                                     wgpu::FeatureName::TextureCompressionASTC};
-            descriptor.requiredFeatures = requiredFeatures;
-            descriptor.requiredFeaturesCount = 3;
-            return adapter.CreateDevice(&descriptor);
-        }
-
-        wgpu::Texture Create2DTexture(wgpu::TextureFormat format,
-                                      uint32_t mipmapLevels = 1,
-                                      uint32_t width = kWidth,
-                                      uint32_t height = kHeight) {
-            constexpr wgpu::TextureUsage kUsage = wgpu::TextureUsage::CopyDst;
-            constexpr uint32_t kArrayLayers = 1;
-            return QueueWriteTextureValidationTest::Create2DTexture(
-                {width, height, kArrayLayers}, mipmapLevels, format, kUsage, 1);
-        }
-
-        void TestWriteTexture(size_t dataSize,
-                              uint32_t dataOffset,
-                              uint32_t dataBytesPerRow,
-                              uint32_t dataRowsPerImage,
-                              wgpu::Texture texture,
-                              uint32_t textLevel,
-                              wgpu::Origin3D textOrigin,
-                              wgpu::Extent3D size) {
-            QueueWriteTextureValidationTest::TestWriteTexture(dataSize, dataOffset, dataBytesPerRow,
-                                                              dataRowsPerImage, texture, textLevel,
-                                                              textOrigin, size);
-        }
-
-        static constexpr uint32_t kWidth = 120;
-        static constexpr uint32_t kHeight = 120;
-    };
-
-    // Tests to verify that data offset may not be a multiple of the compressed texture block size
-    TEST_F(WriteTextureTest_CompressedTextureFormats, DataOffset) {
-        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
-            wgpu::Texture texture = Create2DTexture(format);
-            uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
-            uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
-
-            // Valid if aligned.
-            {
-                uint32_t kAlignedOffset = utils::GetTexelBlockSizeInBytes(format);
-                TestWriteTexture(1024, kAlignedOffset, 256, 4, texture, 0, {0, 0, 0},
-                                 {blockWidth, blockHeight, 1});
-            }
-
-            // Still valid if not aligned.
-            {
-                uint32_t kUnalignedOffset = utils::GetTexelBlockSizeInBytes(format) - 1;
-                TestWriteTexture(1024, kUnalignedOffset, 256, 4, texture, 0, {0, 0, 0},
-                                 {blockWidth, blockHeight, 1});
-            }
-        }
-    }
-
-    // Tests to verify that bytesPerRow must not be less than (width / blockWidth) *
-    // blockSizeInBytes and that it doesn't have to be a multiple of the compressed
-    // texture block width.
-    TEST_F(WriteTextureTest_CompressedTextureFormats, BytesPerRow) {
-        // Used to compute test width and height.
-        constexpr uint32_t kTestBytesPerRow = 320;
-
-        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
-            uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
-            uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
-            uint32_t blockByteSize = utils::GetTexelBlockSizeInBytes(format);
-            uint32_t testWidth = kTestBytesPerRow * blockWidth / blockByteSize;
-            uint32_t testHeight = kTestBytesPerRow * blockHeight / blockByteSize;
-            wgpu::Texture texture = Create2DTexture(format, 1, testWidth, testHeight);
-
-            // Failures on the BytesPerRow that is not large enough.
-            {
-                uint32_t kSmallBytesPerRow = kTestBytesPerRow - blockByteSize;
-                ASSERT_DEVICE_ERROR(TestWriteTexture(1024, 0, kSmallBytesPerRow, 4, texture, 0,
-                                                     {0, 0, 0}, {testWidth, blockHeight, 1}));
-            }
-
-            // Test it is valid to use a BytesPerRow that is not a multiple of 256.
-            {
-                TestWriteTexture(1024, 0, kTestBytesPerRow, 4, texture, 0, {0, 0, 0},
-                                 {testWidth, blockHeight, 1});
-            }
-
-            // Valid usage of bytesPerRow in WriteTexture with compressed texture formats.
-            {
-                TestWriteTexture(512, 0, blockByteSize, 4, texture, 0, {0, 0, 0},
-                                 {blockWidth, blockHeight, 1});
-            }
-
-            // Valid usage of bytesPerRow in WriteTexture with compressed texture formats. Note that
-            // BytesPerRow is not a multiple of the blockByteSize (but is greater than it).
-            {
-                TestWriteTexture(512, 0, blockByteSize + 1, 4, texture, 0, {0, 0, 0},
-                                 {blockWidth, blockHeight, 1});
-            }
-        }
-    }
-
-    // rowsPerImage must be >= heightInBlocks.
-    TEST_F(WriteTextureTest_CompressedTextureFormats, RowsPerImage) {
-        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
-            wgpu::Texture texture = Create2DTexture(format);
-            uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
-            uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
-
-            // Valid usages of rowsPerImage in WriteTexture with compressed texture formats.
-            {
-                constexpr uint32_t kValidRowsPerImage = 5;
-                TestWriteTexture(1024, 0, 256, kValidRowsPerImage, texture, 0, {0, 0, 0},
-                                 {blockWidth, blockHeight * 4, 1});
-            }
-            {
-                constexpr uint32_t kValidRowsPerImage = 4;
-                TestWriteTexture(1024, 0, 256, kValidRowsPerImage, texture, 0, {0, 0, 0},
-                                 {blockWidth, blockHeight * 4, 1});
-            }
-
-            // rowsPerImage is smaller than height.
-            {
-                constexpr uint32_t kInvalidRowsPerImage = 3;
-                ASSERT_DEVICE_ERROR(TestWriteTexture(1024, 0, 256, kInvalidRowsPerImage, texture, 0,
-                                                     {0, 0, 0}, {blockWidth, blockWidth * 4, 1}));
-            }
-        }
-    }
-
-    // Tests to verify that ImageOffset.x must be a multiple of the compressed texture block width
-    // and ImageOffset.y must be a multiple of the compressed texture block height
-    TEST_F(WriteTextureTest_CompressedTextureFormats, ImageOffset) {
-        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
-            wgpu::Texture texture = Create2DTexture(format);
-            wgpu::Texture texture2 = Create2DTexture(format);
-            uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
-            uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
-
-            wgpu::Origin3D smallestValidOrigin3D = {blockWidth, blockHeight, 0};
-
-            // Valid usages of ImageOffset in WriteTexture with compressed texture formats.
-            {
-                TestWriteTexture(512, 0, 256, 4, texture, 0, smallestValidOrigin3D,
-                                 {blockWidth, blockHeight, 1});
-            }
-
-            // Failures on invalid ImageOffset.x.
-            {
-                wgpu::Origin3D invalidOrigin3D = {smallestValidOrigin3D.x - 1,
-                                                  smallestValidOrigin3D.y, 0};
-                ASSERT_DEVICE_ERROR(TestWriteTexture(512, 0, 256, 4, texture, 0, invalidOrigin3D,
-                                                     {blockWidth, blockHeight, 1}));
-            }
-
-            // Failures on invalid ImageOffset.y.
-            {
-                wgpu::Origin3D invalidOrigin3D = {smallestValidOrigin3D.x,
-                                                  smallestValidOrigin3D.y - 1, 0};
-                ASSERT_DEVICE_ERROR(TestWriteTexture(512, 0, 256, 4, texture, 0, invalidOrigin3D,
-                                                     {blockWidth, blockHeight, 1}));
-            }
-        }
-    }
-
-    // Tests to verify that ImageExtent.x must be a multiple of the compressed texture block width
-    // and ImageExtent.y must be a multiple of the compressed texture block height
-    TEST_F(WriteTextureTest_CompressedTextureFormats, ImageExtent) {
-        constexpr uint32_t kMipmapLevels = 3;
-        // We choose a prime that is greater than the current max texel dimension size as a
-        // multiplier to compute the test texture size so that we can be certain that its level 2
-        // mipmap (x4) cannot be a multiple of the dimension. This is useful for testing padding at
-        // the edges of the mipmaps.
-        constexpr uint32_t kBlockPerDim = 13;
-
-        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
-            uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
-            uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
-            uint32_t testWidth = blockWidth * kBlockPerDim;
-            uint32_t testHeight = blockHeight * kBlockPerDim;
-            wgpu::Texture texture = Create2DTexture(format, kMipmapLevels, testWidth, testHeight);
-            wgpu::Texture texture2 = Create2DTexture(format, kMipmapLevels, testWidth, testHeight);
-
-            wgpu::Extent3D smallestValidExtent3D = {blockWidth, blockHeight, 1};
-
-            // Valid usages of ImageExtent in WriteTexture with compressed texture formats.
-            { TestWriteTexture(512, 0, 256, 4, texture, 0, {0, 0, 0}, smallestValidExtent3D); }
-
-            // Valid usages of ImageExtent in WriteTexture with compressed texture formats
-            // and non-zero mipmap levels.
-            {
-                constexpr uint32_t kTestMipmapLevel = 2;
-                wgpu::Origin3D testOrigin = {
-                    ((testWidth >> kTestMipmapLevel) / blockWidth) * blockWidth,
-                    ((testHeight >> kTestMipmapLevel) / blockHeight) * blockHeight, 0};
-
-                TestWriteTexture(512, 0, 256, 4, texture, kTestMipmapLevel, testOrigin,
-                                 smallestValidExtent3D);
-            }
-
-            // Failures on invalid ImageExtent.x.
-            {
-                wgpu::Extent3D inValidExtent3D = {smallestValidExtent3D.width - 1,
-                                                  smallestValidExtent3D.height, 1};
-                ASSERT_DEVICE_ERROR(
-                    TestWriteTexture(512, 0, 256, 4, texture, 0, {0, 0, 0}, inValidExtent3D));
-            }
-
-            // Failures on invalid ImageExtent.y.
-            {
-                wgpu::Extent3D inValidExtent3D = {smallestValidExtent3D.width,
-                                                  smallestValidExtent3D.height - 1, 1};
-                ASSERT_DEVICE_ERROR(
-                    TestWriteTexture(512, 0, 256, 4, texture, 0, {0, 0, 0}, inValidExtent3D));
-            }
-        }
-    }
-
-    // Test writes to multiple array layers of a compressed texture
-    TEST_F(WriteTextureTest_CompressedTextureFormats, WriteToMultipleArrayLayers) {
-        constexpr uint32_t kWidthMultiplier = 3;
-        constexpr uint32_t kHeightMultiplier = 4;
-        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
-            uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
-            uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
-            uint32_t testWidth = kWidthMultiplier * blockWidth;
-            uint32_t testHeight = kHeightMultiplier * blockHeight;
-            wgpu::Texture texture = QueueWriteTextureValidationTest::Create2DTexture(
-                {testWidth, testHeight, 20}, 1, format,
-                wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc);
-
-            // Write to all array layers
-            TestWriteTextureExactDataSize(256, 4, texture, format, {0, 0, 0},
-                                          {testWidth, testHeight, 20});
-
-            // Write to the highest array layer
-            TestWriteTextureExactDataSize(256, 4, texture, format, {0, 0, 19},
-                                          {testWidth, testHeight, 1});
-
-            // Write to array layers in the middle
-            TestWriteTextureExactDataSize(256, 4, texture, format, {0, 0, 1},
-                                          {testWidth, testHeight, 18});
-
-            // Write touching the texture corners with a non-packed rowsPerImage
-            TestWriteTextureExactDataSize(256, 6, texture, format, {blockWidth, blockHeight, 4},
-                                          {testWidth - blockWidth, testHeight - blockHeight, 16});
-        }
-    }
+}
 
 }  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/RenderBundleValidationTests.cpp b/src/dawn/tests/unittests/validation/RenderBundleValidationTests.cpp
index db4d28f..817f302 100644
--- a/src/dawn/tests/unittests/validation/RenderBundleValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/RenderBundleValidationTests.cpp
@@ -22,12 +22,12 @@
 
 namespace {
 
-    class RenderBundleValidationTest : public ValidationTest {
-      protected:
-        void SetUp() override {
-            ValidationTest::SetUp();
+class RenderBundleValidationTest : public ValidationTest {
+  protected:
+    void SetUp() override {
+        ValidationTest::SetUp();
 
-            vsModule = utils::CreateShaderModule(device, R"(
+        vsModule = utils::CreateShaderModule(device, R"(
                 struct S {
                     transform : mat2x2<f32>
                 }
@@ -37,7 +37,7 @@
                     return vec4<f32>();
                 })");
 
-            fsModule = utils::CreateShaderModule(device, R"(
+        fsModule = utils::CreateShaderModule(device, R"(
                 struct Uniforms {
                     color : vec4<f32>
                 }
@@ -51,75 +51,75 @@
                 @stage(fragment) fn main() {
                 })");
 
-            wgpu::BindGroupLayout bgls[] = {
-                utils::MakeBindGroupLayout(
-                    device, {{0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform}}),
-                utils::MakeBindGroupLayout(
-                    device, {
-                                {0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform},
-                                {1, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage},
-                            })};
+        wgpu::BindGroupLayout bgls[] = {
+            utils::MakeBindGroupLayout(
+                device, {{0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform}}),
+            utils::MakeBindGroupLayout(
+                device, {
+                            {0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Uniform},
+                            {1, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage},
+                        })};
 
-            wgpu::PipelineLayoutDescriptor pipelineLayoutDesc = {};
-            pipelineLayoutDesc.bindGroupLayoutCount = 2;
-            pipelineLayoutDesc.bindGroupLayouts = bgls;
+        wgpu::PipelineLayoutDescriptor pipelineLayoutDesc = {};
+        pipelineLayoutDesc.bindGroupLayoutCount = 2;
+        pipelineLayoutDesc.bindGroupLayouts = bgls;
 
-            pipelineLayout = device.CreatePipelineLayout(&pipelineLayoutDesc);
+        pipelineLayout = device.CreatePipelineLayout(&pipelineLayoutDesc);
 
-            utils::ComboRenderPipelineDescriptor descriptor;
-            InitializeRenderPipelineDescriptor(&descriptor);
-            pipeline = device.CreateRenderPipeline(&descriptor);
+        utils::ComboRenderPipelineDescriptor descriptor;
+        InitializeRenderPipelineDescriptor(&descriptor);
+        pipeline = device.CreateRenderPipeline(&descriptor);
 
-            float data[8];
-            wgpu::Buffer buffer = utils::CreateBufferFromData(device, data, 8 * sizeof(float),
-                                                              wgpu::BufferUsage::Uniform);
+        float data[8];
+        wgpu::Buffer buffer = utils::CreateBufferFromData(device, data, 8 * sizeof(float),
+                                                          wgpu::BufferUsage::Uniform);
 
-            constexpr static float kVertices[] = {-1.f, 1.f, 1.f, -1.f, -1.f, 1.f};
+        constexpr static float kVertices[] = {-1.f, 1.f, 1.f, -1.f, -1.f, 1.f};
 
-            vertexBuffer = utils::CreateBufferFromData(device, kVertices, sizeof(kVertices),
-                                                       wgpu::BufferUsage::Vertex);
+        vertexBuffer = utils::CreateBufferFromData(device, kVertices, sizeof(kVertices),
+                                                   wgpu::BufferUsage::Vertex);
 
-            // Placeholder storage buffer.
-            wgpu::Buffer storageBuffer = utils::CreateBufferFromData(
-                device, kVertices, sizeof(kVertices), wgpu::BufferUsage::Storage);
+        // Placeholder storage buffer.
+        wgpu::Buffer storageBuffer = utils::CreateBufferFromData(
+            device, kVertices, sizeof(kVertices), wgpu::BufferUsage::Storage);
 
-            // Vertex buffer with storage usage for testing read+write error usage.
-            vertexStorageBuffer =
-                utils::CreateBufferFromData(device, kVertices, sizeof(kVertices),
-                                            wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Storage);
+        // Vertex buffer with storage usage for testing read+write error usage.
+        vertexStorageBuffer =
+            utils::CreateBufferFromData(device, kVertices, sizeof(kVertices),
+                                        wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Storage);
 
-            bg0 = utils::MakeBindGroup(device, bgls[0], {{0, buffer, 0, 8 * sizeof(float)}});
-            bg1 = utils::MakeBindGroup(
-                device, bgls[1],
-                {{0, buffer, 0, 4 * sizeof(float)}, {1, storageBuffer, 0, sizeof(kVertices)}});
+        bg0 = utils::MakeBindGroup(device, bgls[0], {{0, buffer, 0, 8 * sizeof(float)}});
+        bg1 = utils::MakeBindGroup(
+            device, bgls[1],
+            {{0, buffer, 0, 4 * sizeof(float)}, {1, storageBuffer, 0, sizeof(kVertices)}});
 
-            bg1Vertex = utils::MakeBindGroup(device, bgls[1],
-                                             {{0, buffer, 0, 8 * sizeof(float)},
-                                              {1, vertexStorageBuffer, 0, sizeof(kVertices)}});
-        }
+        bg1Vertex = utils::MakeBindGroup(
+            device, bgls[1],
+            {{0, buffer, 0, 8 * sizeof(float)}, {1, vertexStorageBuffer, 0, sizeof(kVertices)}});
+    }
 
-        void InitializeRenderPipelineDescriptor(utils::ComboRenderPipelineDescriptor* descriptor) {
-            descriptor->layout = pipelineLayout;
-            descriptor->vertex.module = vsModule;
-            descriptor->cFragment.module = fsModule;
-            descriptor->cTargets[0].writeMask = wgpu::ColorWriteMask::None;
-            descriptor->vertex.bufferCount = 1;
-            descriptor->cBuffers[0].arrayStride = 2 * sizeof(float);
-            descriptor->cBuffers[0].attributeCount = 1;
-            descriptor->cAttributes[0].format = wgpu::VertexFormat::Float32x2;
-            descriptor->cAttributes[0].shaderLocation = 0;
-        }
+    void InitializeRenderPipelineDescriptor(utils::ComboRenderPipelineDescriptor* descriptor) {
+        descriptor->layout = pipelineLayout;
+        descriptor->vertex.module = vsModule;
+        descriptor->cFragment.module = fsModule;
+        descriptor->cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+        descriptor->vertex.bufferCount = 1;
+        descriptor->cBuffers[0].arrayStride = 2 * sizeof(float);
+        descriptor->cBuffers[0].attributeCount = 1;
+        descriptor->cAttributes[0].format = wgpu::VertexFormat::Float32x2;
+        descriptor->cAttributes[0].shaderLocation = 0;
+    }
 
-        wgpu::ShaderModule vsModule;
-        wgpu::ShaderModule fsModule;
-        wgpu::PipelineLayout pipelineLayout;
-        wgpu::RenderPipeline pipeline;
-        wgpu::Buffer vertexBuffer;
-        wgpu::Buffer vertexStorageBuffer;
-        wgpu::BindGroup bg0;
-        wgpu::BindGroup bg1;
-        wgpu::BindGroup bg1Vertex;
-    };
+    wgpu::ShaderModule vsModule;
+    wgpu::ShaderModule fsModule;
+    wgpu::PipelineLayout pipelineLayout;
+    wgpu::RenderPipeline pipeline;
+    wgpu::Buffer vertexBuffer;
+    wgpu::Buffer vertexStorageBuffer;
+    wgpu::BindGroup bg0;
+    wgpu::BindGroup bg1;
+    wgpu::BindGroup bg1Vertex;
+};
 
 }  // anonymous namespace
 
diff --git a/src/dawn/tests/unittests/validation/RenderPassDescriptorValidationTests.cpp b/src/dawn/tests/unittests/validation/RenderPassDescriptorValidationTests.cpp
index 4ef9884..50f7c51 100644
--- a/src/dawn/tests/unittests/validation/RenderPassDescriptorValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/RenderPassDescriptorValidationTests.cpp
@@ -22,1103 +22,1079 @@
 
 namespace {
 
-    class RenderPassDescriptorValidationTest : public ValidationTest {
-      public:
-        void AssertBeginRenderPassSuccess(const wgpu::RenderPassDescriptor* descriptor) {
-            wgpu::CommandEncoder commandEncoder = TestBeginRenderPass(descriptor);
-            commandEncoder.Finish();
-        }
-        void AssertBeginRenderPassError(const wgpu::RenderPassDescriptor* descriptor) {
-            wgpu::CommandEncoder commandEncoder = TestBeginRenderPass(descriptor);
-            ASSERT_DEVICE_ERROR(commandEncoder.Finish());
-        }
-
-      private:
-        wgpu::CommandEncoder TestBeginRenderPass(const wgpu::RenderPassDescriptor* descriptor) {
-            wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(descriptor);
-            renderPassEncoder.End();
-            return commandEncoder;
-        }
-    };
-
-    wgpu::Texture CreateTexture(wgpu::Device& device,
-                                wgpu::TextureDimension dimension,
-                                wgpu::TextureFormat format,
-                                uint32_t width,
-                                uint32_t height,
-                                uint32_t arrayLayerCount,
-                                uint32_t mipLevelCount,
-                                uint32_t sampleCount = 1,
-                                wgpu::TextureUsage usage = wgpu::TextureUsage::RenderAttachment) {
-        wgpu::TextureDescriptor descriptor;
-        descriptor.dimension = dimension;
-        descriptor.size.width = width;
-        descriptor.size.height = height;
-        descriptor.size.depthOrArrayLayers = arrayLayerCount;
-        descriptor.sampleCount = sampleCount;
-        descriptor.format = format;
-        descriptor.mipLevelCount = mipLevelCount;
-        descriptor.usage = usage;
-
-        return device.CreateTexture(&descriptor);
+class RenderPassDescriptorValidationTest : public ValidationTest {
+  public:
+    void AssertBeginRenderPassSuccess(const wgpu::RenderPassDescriptor* descriptor) {
+        wgpu::CommandEncoder commandEncoder = TestBeginRenderPass(descriptor);
+        commandEncoder.Finish();
+    }
+    void AssertBeginRenderPassError(const wgpu::RenderPassDescriptor* descriptor) {
+        wgpu::CommandEncoder commandEncoder = TestBeginRenderPass(descriptor);
+        ASSERT_DEVICE_ERROR(commandEncoder.Finish());
     }
 
-    wgpu::TextureView Create2DAttachment(wgpu::Device& device,
-                                         uint32_t width,
-                                         uint32_t height,
-                                         wgpu::TextureFormat format) {
-        wgpu::Texture texture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, format, width, height, 1, 1);
-        return texture.CreateView();
+  private:
+    wgpu::CommandEncoder TestBeginRenderPass(const wgpu::RenderPassDescriptor* descriptor) {
+        wgpu::CommandEncoder commandEncoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder renderPassEncoder = commandEncoder.BeginRenderPass(descriptor);
+        renderPassEncoder.End();
+        return commandEncoder;
+    }
+};
+
+wgpu::Texture CreateTexture(wgpu::Device& device,
+                            wgpu::TextureDimension dimension,
+                            wgpu::TextureFormat format,
+                            uint32_t width,
+                            uint32_t height,
+                            uint32_t arrayLayerCount,
+                            uint32_t mipLevelCount,
+                            uint32_t sampleCount = 1,
+                            wgpu::TextureUsage usage = wgpu::TextureUsage::RenderAttachment) {
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = dimension;
+    descriptor.size.width = width;
+    descriptor.size.height = height;
+    descriptor.size.depthOrArrayLayers = arrayLayerCount;
+    descriptor.sampleCount = sampleCount;
+    descriptor.format = format;
+    descriptor.mipLevelCount = mipLevelCount;
+    descriptor.usage = usage;
+
+    return device.CreateTexture(&descriptor);
+}
+
+wgpu::TextureView Create2DAttachment(wgpu::Device& device,
+                                     uint32_t width,
+                                     uint32_t height,
+                                     wgpu::TextureFormat format) {
+    wgpu::Texture texture =
+        CreateTexture(device, wgpu::TextureDimension::e2D, format, width, height, 1, 1);
+    return texture.CreateView();
+}
+
+// Using BeginRenderPass with no attachments isn't valid
+TEST_F(RenderPassDescriptorValidationTest, Empty) {
+    utils::ComboRenderPassDescriptor renderPass({}, nullptr);
+    AssertBeginRenderPassError(&renderPass);
+}
+
+// A render pass with only one color or one depth attachment is ok
+TEST_F(RenderPassDescriptorValidationTest, OneAttachment) {
+    // One color attachment
+    {
+        wgpu::TextureView color = Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
+        utils::ComboRenderPassDescriptor renderPass({color});
+
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+    // One depth-stencil attachment
+    {
+        wgpu::TextureView depthStencil =
+            Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24PlusStencil8);
+        utils::ComboRenderPassDescriptor renderPass({}, depthStencil);
+
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+}
+
+// Test OOB color attachment indices are handled
+TEST_F(RenderPassDescriptorValidationTest, ColorAttachmentOutOfBounds) {
+    std::array<wgpu::RenderPassColorAttachment, kMaxColorAttachments + 1> colorAttachments;
+    for (uint32_t i = 0; i < colorAttachments.size(); i++) {
+        colorAttachments[i].view =
+            Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
+        colorAttachments[i].resolveTarget = nullptr;
+        colorAttachments[i].clearValue = {0.0f, 0.0f, 0.0f, 0.0f};
+        colorAttachments[i].loadOp = wgpu::LoadOp::Clear;
+        colorAttachments[i].storeOp = wgpu::StoreOp::Store;
     }
 
-    // Using BeginRenderPass with no attachments isn't valid
-    TEST_F(RenderPassDescriptorValidationTest, Empty) {
-        utils::ComboRenderPassDescriptor renderPass({}, nullptr);
+    // Control case: kMaxColorAttachments is valid.
+    {
+        wgpu::RenderPassDescriptor renderPass;
+        renderPass.colorAttachmentCount = kMaxColorAttachments;
+        renderPass.colorAttachments = colorAttachments.data();
+        renderPass.depthStencilAttachment = nullptr;
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // Error case: kMaxColorAttachments + 1 is an error.
+    {
+        wgpu::RenderPassDescriptor renderPass;
+        renderPass.colorAttachmentCount = kMaxColorAttachments + 1;
+        renderPass.colorAttachments = colorAttachments.data();
+        renderPass.depthStencilAttachment = nullptr;
         AssertBeginRenderPassError(&renderPass);
     }
+}
 
-    // A render pass with only one color or one depth attachment is ok
-    TEST_F(RenderPassDescriptorValidationTest, OneAttachment) {
-        // One color attachment
-        {
-            wgpu::TextureView color =
-                Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
-            utils::ComboRenderPassDescriptor renderPass({color});
+// Test sparse color attachment validations
+TEST_F(RenderPassDescriptorValidationTest, SparseColorAttachment) {
+    // Having sparse color attachment is valid.
+    {
+        std::array<wgpu::RenderPassColorAttachment, 2> colorAttachments;
+        colorAttachments[0].view = nullptr;
 
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-        // One depth-stencil attachment
+        colorAttachments[1].view =
+            Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
+        colorAttachments[1].loadOp = wgpu::LoadOp::Load;
+        colorAttachments[1].storeOp = wgpu::StoreOp::Store;
+
+        wgpu::RenderPassDescriptor renderPass;
+        renderPass.colorAttachmentCount = colorAttachments.size();
+        renderPass.colorAttachments = colorAttachments.data();
+        renderPass.depthStencilAttachment = nullptr;
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // When all color attachments are null
+    {
+        std::array<wgpu::RenderPassColorAttachment, 2> colorAttachments;
+        colorAttachments[0].view = nullptr;
+        colorAttachments[1].view = nullptr;
+
+        // Control case: depth stencil attachment is not null is valid.
         {
-            wgpu::TextureView depthStencil =
+            wgpu::TextureView depthStencilView =
                 Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24PlusStencil8);
-            utils::ComboRenderPassDescriptor renderPass({}, depthStencil);
-
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-    }
-
-    // Test OOB color attachment indices are handled
-    TEST_F(RenderPassDescriptorValidationTest, ColorAttachmentOutOfBounds) {
-        std::array<wgpu::RenderPassColorAttachment, kMaxColorAttachments + 1> colorAttachments;
-        for (uint32_t i = 0; i < colorAttachments.size(); i++) {
-            colorAttachments[i].view =
-                Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
-            colorAttachments[i].resolveTarget = nullptr;
-            colorAttachments[i].clearValue = {0.0f, 0.0f, 0.0f, 0.0f};
-            colorAttachments[i].loadOp = wgpu::LoadOp::Clear;
-            colorAttachments[i].storeOp = wgpu::StoreOp::Store;
-        }
-
-        // Control case: kMaxColorAttachments is valid.
-        {
-            wgpu::RenderPassDescriptor renderPass;
-            renderPass.colorAttachmentCount = kMaxColorAttachments;
-            renderPass.colorAttachments = colorAttachments.data();
-            renderPass.depthStencilAttachment = nullptr;
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // Error case: kMaxColorAttachments + 1 is an error.
-        {
-            wgpu::RenderPassDescriptor renderPass;
-            renderPass.colorAttachmentCount = kMaxColorAttachments + 1;
-            renderPass.colorAttachments = colorAttachments.data();
-            renderPass.depthStencilAttachment = nullptr;
-            AssertBeginRenderPassError(&renderPass);
-        }
-    }
-
-    // Test sparse color attachment validations
-    TEST_F(RenderPassDescriptorValidationTest, SparseColorAttachment) {
-        // Having sparse color attachment is valid.
-        {
-            std::array<wgpu::RenderPassColorAttachment, 2> colorAttachments;
-            colorAttachments[0].view = nullptr;
-
-            colorAttachments[1].view =
-                Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
-            colorAttachments[1].loadOp = wgpu::LoadOp::Load;
-            colorAttachments[1].storeOp = wgpu::StoreOp::Store;
+            wgpu::RenderPassDepthStencilAttachment depthStencilAttachment;
+            depthStencilAttachment.view = depthStencilView;
+            depthStencilAttachment.depthClearValue = 1.0f;
+            depthStencilAttachment.stencilClearValue = 0;
+            depthStencilAttachment.depthLoadOp = wgpu::LoadOp::Clear;
+            depthStencilAttachment.depthStoreOp = wgpu::StoreOp::Store;
+            depthStencilAttachment.stencilLoadOp = wgpu::LoadOp::Clear;
+            depthStencilAttachment.stencilStoreOp = wgpu::StoreOp::Store;
 
             wgpu::RenderPassDescriptor renderPass;
             renderPass.colorAttachmentCount = colorAttachments.size();
             renderPass.colorAttachments = colorAttachments.data();
+            renderPass.depthStencilAttachment = &depthStencilAttachment;
+            AssertBeginRenderPassSuccess(&renderPass);
+        }
+
+        // Error case: depth stencil attachment being null is invalid.
+        {
+            wgpu::RenderPassDescriptor renderPass;
+            renderPass.colorAttachmentCount = colorAttachments.size();
+            renderPass.colorAttachments = colorAttachments.data();
             renderPass.depthStencilAttachment = nullptr;
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // When all color attachments are null
-        {
-            std::array<wgpu::RenderPassColorAttachment, 2> colorAttachments;
-            colorAttachments[0].view = nullptr;
-            colorAttachments[1].view = nullptr;
-
-            // Control case: depth stencil attachment is not null is valid.
-            {
-                wgpu::TextureView depthStencilView =
-                    Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24PlusStencil8);
-                wgpu::RenderPassDepthStencilAttachment depthStencilAttachment;
-                depthStencilAttachment.view = depthStencilView;
-                depthStencilAttachment.depthClearValue = 1.0f;
-                depthStencilAttachment.stencilClearValue = 0;
-                depthStencilAttachment.depthLoadOp = wgpu::LoadOp::Clear;
-                depthStencilAttachment.depthStoreOp = wgpu::StoreOp::Store;
-                depthStencilAttachment.stencilLoadOp = wgpu::LoadOp::Clear;
-                depthStencilAttachment.stencilStoreOp = wgpu::StoreOp::Store;
-
-                wgpu::RenderPassDescriptor renderPass;
-                renderPass.colorAttachmentCount = colorAttachments.size();
-                renderPass.colorAttachments = colorAttachments.data();
-                renderPass.depthStencilAttachment = &depthStencilAttachment;
-                AssertBeginRenderPassSuccess(&renderPass);
-            }
-
-            // Error case: depth stencil attachment being null is invalid.
-            {
-                wgpu::RenderPassDescriptor renderPass;
-                renderPass.colorAttachmentCount = colorAttachments.size();
-                renderPass.colorAttachments = colorAttachments.data();
-                renderPass.depthStencilAttachment = nullptr;
-                AssertBeginRenderPassError(&renderPass);
-            }
-        }
-    }
-
-    // Check that the render pass color attachment must have the RenderAttachment usage.
-    TEST_F(RenderPassDescriptorValidationTest, ColorAttachmentInvalidUsage) {
-        // Control case: using a texture with RenderAttachment is valid.
-        {
-            wgpu::TextureView renderView =
-                Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
-            utils::ComboRenderPassDescriptor renderPass({renderView});
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // Error case: using a texture with Sampled is invalid.
-        {
-            wgpu::TextureDescriptor texDesc;
-            texDesc.usage = wgpu::TextureUsage::TextureBinding;
-            texDesc.size = {1, 1, 1};
-            texDesc.format = wgpu::TextureFormat::RGBA8Unorm;
-            wgpu::Texture sampledTex = device.CreateTexture(&texDesc);
-
-            utils::ComboRenderPassDescriptor renderPass({sampledTex.CreateView()});
             AssertBeginRenderPassError(&renderPass);
         }
     }
+}
 
-    // Attachments must have the same size
-    TEST_F(RenderPassDescriptorValidationTest, SizeMustMatch) {
-        wgpu::TextureView color1x1A =
+// Check that the render pass color attachment must have the RenderAttachment usage.
+TEST_F(RenderPassDescriptorValidationTest, ColorAttachmentInvalidUsage) {
+    // Control case: using a texture with RenderAttachment is valid.
+    {
+        wgpu::TextureView renderView =
             Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
-        wgpu::TextureView color1x1B =
-            Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
-        wgpu::TextureView color2x2 =
-            Create2DAttachment(device, 2, 2, wgpu::TextureFormat::RGBA8Unorm);
-
-        wgpu::TextureView depthStencil1x1 =
-            Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24PlusStencil8);
-        wgpu::TextureView depthStencil2x2 =
-            Create2DAttachment(device, 2, 2, wgpu::TextureFormat::Depth24PlusStencil8);
-
-        // Control case: all the same size (1x1)
-        {
-            utils::ComboRenderPassDescriptor renderPass({color1x1A, color1x1B}, depthStencil1x1);
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // One of the color attachments has a different size
-        {
-            utils::ComboRenderPassDescriptor renderPass({color1x1A, color2x2});
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // The depth stencil attachment has a different size
-        {
-            utils::ComboRenderPassDescriptor renderPass({color1x1A, color1x1B}, depthStencil2x2);
-            AssertBeginRenderPassError(&renderPass);
-        }
-    }
-
-    // Attachments formats must match whether they are used for color or depth-stencil
-    TEST_F(RenderPassDescriptorValidationTest, FormatMismatch) {
-        wgpu::TextureView color = Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
-        wgpu::TextureView depthStencil =
-            Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24PlusStencil8);
-
-        // Using depth-stencil for color
-        {
-            utils::ComboRenderPassDescriptor renderPass({depthStencil});
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Using color for depth-stencil
-        {
-            utils::ComboRenderPassDescriptor renderPass({}, color);
-            AssertBeginRenderPassError(&renderPass);
-        }
-    }
-
-    // Depth and stencil storeOps can be different
-    TEST_F(RenderPassDescriptorValidationTest, DepthStencilStoreOpMismatch) {
-        constexpr uint32_t kArrayLayers = 1;
-        constexpr uint32_t kLevelCount = 1;
-        constexpr uint32_t kSize = 32;
-        constexpr wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
-        constexpr wgpu::TextureFormat kDepthStencilFormat =
-            wgpu::TextureFormat::Depth24PlusStencil8;
-
-        wgpu::Texture colorTexture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
-                          kArrayLayers, kLevelCount);
-        wgpu::Texture depthStencilTexture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, kDepthStencilFormat, kSize, kSize,
-                          kArrayLayers, kLevelCount);
-
-        wgpu::TextureViewDescriptor descriptor;
-        descriptor.dimension = wgpu::TextureViewDimension::e2D;
-        descriptor.baseArrayLayer = 0;
-        descriptor.arrayLayerCount = kArrayLayers;
-        descriptor.baseMipLevel = 0;
-        descriptor.mipLevelCount = kLevelCount;
-        wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
-        wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
-
-        // Base case: StoreOps match so render pass is a success
-        {
-            utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // Base case: StoreOps match so render pass is a success
-        {
-            utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Discard;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Discard;
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // StoreOps mismatch still is a success
-        {
-            utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Discard;
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-    }
-
-    // Currently only texture views with arrayLayerCount == 1 are allowed to be color and depth
-    // stencil attachments
-    TEST_F(RenderPassDescriptorValidationTest, TextureViewLayerCountForColorAndDepthStencil) {
-        constexpr uint32_t kLevelCount = 1;
-        constexpr uint32_t kSize = 32;
-        constexpr wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
-        constexpr wgpu::TextureFormat kDepthStencilFormat =
-            wgpu::TextureFormat::Depth24PlusStencil8;
-
-        constexpr uint32_t kArrayLayers = 10;
-
-        wgpu::Texture colorTexture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
-                          kArrayLayers, kLevelCount);
-        wgpu::Texture depthStencilTexture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, kDepthStencilFormat, kSize, kSize,
-                          kArrayLayers, kLevelCount);
-
-        wgpu::TextureViewDescriptor baseDescriptor;
-        baseDescriptor.dimension = wgpu::TextureViewDimension::e2DArray;
-        baseDescriptor.baseArrayLayer = 0;
-        baseDescriptor.arrayLayerCount = kArrayLayers;
-        baseDescriptor.baseMipLevel = 0;
-        baseDescriptor.mipLevelCount = kLevelCount;
-
-        // Using 2D array texture view with arrayLayerCount > 1 is not allowed for color
-        {
-            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
-            descriptor.format = kColorFormat;
-            descriptor.arrayLayerCount = 5;
-
-            wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
-            utils::ComboRenderPassDescriptor renderPass({colorTextureView});
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Using 2D array texture view with arrayLayerCount > 1 is not allowed for depth stencil
-        {
-            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
-            descriptor.format = kDepthStencilFormat;
-            descriptor.arrayLayerCount = 5;
-
-            wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
-            utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Using 2D array texture view that covers the first layer of the texture is OK for color
-        {
-            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
-            descriptor.format = kColorFormat;
-            descriptor.baseArrayLayer = 0;
-            descriptor.arrayLayerCount = 1;
-
-            wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
-            utils::ComboRenderPassDescriptor renderPass({colorTextureView});
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // Using 2D array texture view that covers the first layer is OK for depth stencil
-        {
-            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
-            descriptor.format = kDepthStencilFormat;
-            descriptor.baseArrayLayer = 0;
-            descriptor.arrayLayerCount = 1;
-
-            wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
-            utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // Using 2D array texture view that covers the last layer is OK for color
-        {
-            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
-            descriptor.format = kColorFormat;
-            descriptor.baseArrayLayer = kArrayLayers - 1;
-            descriptor.arrayLayerCount = 1;
-
-            wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
-            utils::ComboRenderPassDescriptor renderPass({colorTextureView});
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // Using 2D array texture view that covers the last layer is OK for depth stencil
-        {
-            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
-            descriptor.format = kDepthStencilFormat;
-            descriptor.baseArrayLayer = kArrayLayers - 1;
-            descriptor.arrayLayerCount = 1;
-
-            wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
-            utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-    }
-
-    // Check that the render pass depth attachment must have the RenderAttachment usage.
-    TEST_F(RenderPassDescriptorValidationTest, DepthAttachmentInvalidUsage) {
-        // Control case: using a texture with RenderAttachment is valid.
-        {
-            wgpu::TextureView renderView =
-                Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth32Float);
-            utils::ComboRenderPassDescriptor renderPass({}, renderView);
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
-
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // Error case: using a texture with Sampled is invalid.
-        {
-            wgpu::TextureDescriptor texDesc;
-            texDesc.usage = wgpu::TextureUsage::TextureBinding;
-            texDesc.size = {1, 1, 1};
-            texDesc.format = wgpu::TextureFormat::Depth32Float;
-            wgpu::Texture sampledTex = device.CreateTexture(&texDesc);
-            wgpu::TextureView sampledView = sampledTex.CreateView();
-
-            utils::ComboRenderPassDescriptor renderPass({}, sampledView);
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
-
-            AssertBeginRenderPassError(&renderPass);
-        }
-    }
-
-    // Only 2D texture views with mipLevelCount == 1 are allowed to be color attachments
-    TEST_F(RenderPassDescriptorValidationTest, TextureViewLevelCountForColorAndDepthStencil) {
-        constexpr uint32_t kArrayLayers = 1;
-        constexpr uint32_t kSize = 32;
-        constexpr wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
-        constexpr wgpu::TextureFormat kDepthStencilFormat =
-            wgpu::TextureFormat::Depth24PlusStencil8;
-
-        constexpr uint32_t kLevelCount = 4;
-
-        wgpu::Texture colorTexture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
-                          kArrayLayers, kLevelCount);
-        wgpu::Texture depthStencilTexture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, kDepthStencilFormat, kSize, kSize,
-                          kArrayLayers, kLevelCount);
-
-        wgpu::TextureViewDescriptor baseDescriptor;
-        baseDescriptor.dimension = wgpu::TextureViewDimension::e2D;
-        baseDescriptor.baseArrayLayer = 0;
-        baseDescriptor.arrayLayerCount = kArrayLayers;
-        baseDescriptor.baseMipLevel = 0;
-        baseDescriptor.mipLevelCount = kLevelCount;
-
-        // Using 2D texture view with mipLevelCount > 1 is not allowed for color
-        {
-            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
-            descriptor.format = kColorFormat;
-            descriptor.mipLevelCount = 2;
-
-            wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
-            utils::ComboRenderPassDescriptor renderPass({colorTextureView});
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Using 2D texture view with mipLevelCount > 1 is not allowed for depth stencil
-        {
-            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
-            descriptor.format = kDepthStencilFormat;
-            descriptor.mipLevelCount = 2;
-
-            wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
-            utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Using 2D texture view that covers the first level of the texture is OK for color
-        {
-            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
-            descriptor.format = kColorFormat;
-            descriptor.baseMipLevel = 0;
-            descriptor.mipLevelCount = 1;
-
-            wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
-            utils::ComboRenderPassDescriptor renderPass({colorTextureView});
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // Using 2D texture view that covers the first level is OK for depth stencil
-        {
-            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
-            descriptor.format = kDepthStencilFormat;
-            descriptor.baseMipLevel = 0;
-            descriptor.mipLevelCount = 1;
-
-            wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
-            utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // Using 2D texture view that covers the last level is OK for color
-        {
-            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
-            descriptor.format = kColorFormat;
-            descriptor.baseMipLevel = kLevelCount - 1;
-            descriptor.mipLevelCount = 1;
-
-            wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
-            utils::ComboRenderPassDescriptor renderPass({colorTextureView});
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // Using 2D texture view that covers the last level is OK for depth stencil
-        {
-            wgpu::TextureViewDescriptor descriptor = baseDescriptor;
-            descriptor.format = kDepthStencilFormat;
-            descriptor.baseMipLevel = kLevelCount - 1;
-            descriptor.mipLevelCount = 1;
-
-            wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
-            utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-    }
-
-    // It is not allowed to set resolve target when the color attachment is non-multisampled.
-    TEST_F(RenderPassDescriptorValidationTest, NonMultisampledColorWithResolveTarget) {
-        static constexpr uint32_t kArrayLayers = 1;
-        static constexpr uint32_t kLevelCount = 1;
-        static constexpr uint32_t kSize = 32;
-        static constexpr uint32_t kSampleCount = 1;
-        static constexpr wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
-
-        wgpu::Texture colorTexture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
-                          kArrayLayers, kLevelCount, kSampleCount);
-        wgpu::Texture resolveTargetTexture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
-                          kArrayLayers, kLevelCount, kSampleCount);
-        wgpu::TextureView colorTextureView = colorTexture.CreateView();
-        wgpu::TextureView resolveTargetTextureView = resolveTargetTexture.CreateView();
-
-        utils::ComboRenderPassDescriptor renderPass({colorTextureView});
-        renderPass.cColorAttachments[0].resolveTarget = resolveTargetTextureView;
-        AssertBeginRenderPassError(&renderPass);
-    }
-
-    class MultisampledRenderPassDescriptorValidationTest
-        : public RenderPassDescriptorValidationTest {
-      public:
-        utils::ComboRenderPassDescriptor CreateMultisampledRenderPass() {
-            return utils::ComboRenderPassDescriptor({CreateMultisampledColorTextureView()});
-        }
-
-        wgpu::TextureView CreateMultisampledColorTextureView() {
-            return CreateColorTextureView(kSampleCount);
-        }
-
-        wgpu::TextureView CreateNonMultisampledColorTextureView() {
-            return CreateColorTextureView(1);
-        }
-
-        static constexpr uint32_t kArrayLayers = 1;
-        static constexpr uint32_t kLevelCount = 1;
-        static constexpr uint32_t kSize = 32;
-        static constexpr uint32_t kSampleCount = 4;
-        static constexpr wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
-
-      private:
-        wgpu::TextureView CreateColorTextureView(uint32_t sampleCount) {
-            wgpu::Texture colorTexture =
-                CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
-                              kArrayLayers, kLevelCount, sampleCount);
-
-            return colorTexture.CreateView();
-        }
-    };
-
-    // Tests on the use of multisampled textures as color attachments
-    TEST_F(MultisampledRenderPassDescriptorValidationTest, MultisampledColorAttachments) {
-        wgpu::TextureView colorTextureView = CreateNonMultisampledColorTextureView();
-        wgpu::TextureView resolveTargetTextureView = CreateNonMultisampledColorTextureView();
-        wgpu::TextureView multisampledColorTextureView = CreateMultisampledColorTextureView();
-
-        // It is allowed to use a multisampled color attachment without setting resolve target.
-        {
-            utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // It is not allowed to use multiple color attachments with different sample counts.
-        {
-            utils::ComboRenderPassDescriptor renderPass(
-                {multisampledColorTextureView, colorTextureView});
-            AssertBeginRenderPassError(&renderPass);
-        }
-    }
-
-    // It is not allowed to use a multisampled resolve target.
-    TEST_F(MultisampledRenderPassDescriptorValidationTest, MultisampledResolveTarget) {
-        wgpu::TextureView multisampledResolveTargetView = CreateMultisampledColorTextureView();
-
-        utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
-        renderPass.cColorAttachments[0].resolveTarget = multisampledResolveTargetView;
-        AssertBeginRenderPassError(&renderPass);
-    }
-
-    // It is not allowed to use a resolve target with array layer count > 1.
-    TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetArrayLayerMoreThanOne) {
-        constexpr uint32_t kArrayLayers2 = 2;
-        wgpu::Texture resolveTexture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
-                          kArrayLayers2, kLevelCount);
-        wgpu::TextureViewDescriptor viewDesc;
-        viewDesc.dimension = wgpu::TextureViewDimension::e2DArray;
-        wgpu::TextureView resolveTextureView = resolveTexture.CreateView(&viewDesc);
-
-        utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
-        renderPass.cColorAttachments[0].resolveTarget = resolveTextureView;
-        AssertBeginRenderPassError(&renderPass);
-    }
-
-    // It is not allowed to use a resolve target with mipmap level count > 1.
-    TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetMipmapLevelMoreThanOne) {
-        constexpr uint32_t kLevelCount2 = 2;
-        wgpu::Texture resolveTexture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
-                          kArrayLayers, kLevelCount2);
-        wgpu::TextureView resolveTextureView = resolveTexture.CreateView();
-
-        utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
-        renderPass.cColorAttachments[0].resolveTarget = resolveTextureView;
-        AssertBeginRenderPassError(&renderPass);
-    }
-
-    // It is not allowed to use a resolve target which is created from a texture whose usage does
-    // not include wgpu::TextureUsage::RenderAttachment.
-    TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetUsageNoRenderAttachment) {
-        constexpr wgpu::TextureUsage kUsage =
-            wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc;
-        wgpu::Texture nonColorUsageResolveTexture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
-                          kArrayLayers, kLevelCount, 1, kUsage);
-        wgpu::TextureView nonColorUsageResolveTextureView =
-            nonColorUsageResolveTexture.CreateView();
-
-        utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
-        renderPass.cColorAttachments[0].resolveTarget = nonColorUsageResolveTextureView;
-        AssertBeginRenderPassError(&renderPass);
-    }
-
-    // It is not allowed to use a resolve target which is in error state.
-    TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetInErrorState) {
-        wgpu::Texture resolveTexture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
-                          kArrayLayers, kLevelCount);
-        wgpu::TextureViewDescriptor errorTextureView;
-        errorTextureView.dimension = wgpu::TextureViewDimension::e2D;
-        errorTextureView.format = kColorFormat;
-        errorTextureView.baseArrayLayer = kArrayLayers + 1;
-        ASSERT_DEVICE_ERROR(wgpu::TextureView errorResolveTarget =
-                                resolveTexture.CreateView(&errorTextureView));
-
-        utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
-        renderPass.cColorAttachments[0].resolveTarget = errorResolveTarget;
-        AssertBeginRenderPassError(&renderPass);
-    }
-
-    // It is allowed to use a multisampled color attachment and a non-multisampled resolve target.
-    TEST_F(MultisampledRenderPassDescriptorValidationTest, MultisampledColorWithResolveTarget) {
-        wgpu::TextureView resolveTargetTextureView = CreateNonMultisampledColorTextureView();
-
-        utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
-        renderPass.cColorAttachments[0].resolveTarget = resolveTargetTextureView;
+        utils::ComboRenderPassDescriptor renderPass({renderView});
         AssertBeginRenderPassSuccess(&renderPass);
     }
 
-    // It is not allowed to use a resolve target in a format different from the color attachment.
-    TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetDifferentFormat) {
-        constexpr wgpu::TextureFormat kColorFormat2 = wgpu::TextureFormat::BGRA8Unorm;
-        wgpu::Texture resolveTexture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat2, kSize, kSize,
-                          kArrayLayers, kLevelCount);
-        wgpu::TextureView resolveTextureView = resolveTexture.CreateView();
+    // Error case: using a texture with Sampled is invalid.
+    {
+        wgpu::TextureDescriptor texDesc;
+        texDesc.usage = wgpu::TextureUsage::TextureBinding;
+        texDesc.size = {1, 1, 1};
+        texDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+        wgpu::Texture sampledTex = device.CreateTexture(&texDesc);
+
+        utils::ComboRenderPassDescriptor renderPass({sampledTex.CreateView()});
+        AssertBeginRenderPassError(&renderPass);
+    }
+}
+
+// Attachments must have the same size
+TEST_F(RenderPassDescriptorValidationTest, SizeMustMatch) {
+    wgpu::TextureView color1x1A = Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
+    wgpu::TextureView color1x1B = Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
+    wgpu::TextureView color2x2 = Create2DAttachment(device, 2, 2, wgpu::TextureFormat::RGBA8Unorm);
+
+    wgpu::TextureView depthStencil1x1 =
+        Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24PlusStencil8);
+    wgpu::TextureView depthStencil2x2 =
+        Create2DAttachment(device, 2, 2, wgpu::TextureFormat::Depth24PlusStencil8);
+
+    // Control case: all the same size (1x1)
+    {
+        utils::ComboRenderPassDescriptor renderPass({color1x1A, color1x1B}, depthStencil1x1);
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // One of the color attachments has a different size
+    {
+        utils::ComboRenderPassDescriptor renderPass({color1x1A, color2x2});
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // The depth stencil attachment has a different size
+    {
+        utils::ComboRenderPassDescriptor renderPass({color1x1A, color1x1B}, depthStencil2x2);
+        AssertBeginRenderPassError(&renderPass);
+    }
+}
+
+// Attachments formats must match whether they are used for color or depth-stencil
+TEST_F(RenderPassDescriptorValidationTest, FormatMismatch) {
+    wgpu::TextureView color = Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
+    wgpu::TextureView depthStencil =
+        Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24PlusStencil8);
+
+    // Using depth-stencil for color
+    {
+        utils::ComboRenderPassDescriptor renderPass({depthStencil});
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Using color for depth-stencil
+    {
+        utils::ComboRenderPassDescriptor renderPass({}, color);
+        AssertBeginRenderPassError(&renderPass);
+    }
+}
+
+// Depth and stencil storeOps can be different
+TEST_F(RenderPassDescriptorValidationTest, DepthStencilStoreOpMismatch) {
+    constexpr uint32_t kArrayLayers = 1;
+    constexpr uint32_t kLevelCount = 1;
+    constexpr uint32_t kSize = 32;
+    constexpr wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
+    constexpr wgpu::TextureFormat kDepthStencilFormat = wgpu::TextureFormat::Depth24PlusStencil8;
+
+    wgpu::Texture colorTexture = CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat,
+                                               kSize, kSize, kArrayLayers, kLevelCount);
+    wgpu::Texture depthStencilTexture =
+        CreateTexture(device, wgpu::TextureDimension::e2D, kDepthStencilFormat, kSize, kSize,
+                      kArrayLayers, kLevelCount);
+
+    wgpu::TextureViewDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureViewDimension::e2D;
+    descriptor.baseArrayLayer = 0;
+    descriptor.arrayLayerCount = kArrayLayers;
+    descriptor.baseMipLevel = 0;
+    descriptor.mipLevelCount = kLevelCount;
+    wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
+    wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
+
+    // Base case: StoreOps match so render pass is a success
+    {
+        utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // Base case: StoreOps match so render pass is a success
+    {
+        utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Discard;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Discard;
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // StoreOps mismatch still is a success
+    {
+        utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Discard;
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+}
+
+// Currently only texture views with arrayLayerCount == 1 are allowed to be color and depth
+// stencil attachments
+TEST_F(RenderPassDescriptorValidationTest, TextureViewLayerCountForColorAndDepthStencil) {
+    constexpr uint32_t kLevelCount = 1;
+    constexpr uint32_t kSize = 32;
+    constexpr wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
+    constexpr wgpu::TextureFormat kDepthStencilFormat = wgpu::TextureFormat::Depth24PlusStencil8;
+
+    constexpr uint32_t kArrayLayers = 10;
+
+    wgpu::Texture colorTexture = CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat,
+                                               kSize, kSize, kArrayLayers, kLevelCount);
+    wgpu::Texture depthStencilTexture =
+        CreateTexture(device, wgpu::TextureDimension::e2D, kDepthStencilFormat, kSize, kSize,
+                      kArrayLayers, kLevelCount);
+
+    wgpu::TextureViewDescriptor baseDescriptor;
+    baseDescriptor.dimension = wgpu::TextureViewDimension::e2DArray;
+    baseDescriptor.baseArrayLayer = 0;
+    baseDescriptor.arrayLayerCount = kArrayLayers;
+    baseDescriptor.baseMipLevel = 0;
+    baseDescriptor.mipLevelCount = kLevelCount;
+
+    // Using 2D array texture view with arrayLayerCount > 1 is not allowed for color
+    {
+        wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+        descriptor.format = kColorFormat;
+        descriptor.arrayLayerCount = 5;
+
+        wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
+        utils::ComboRenderPassDescriptor renderPass({colorTextureView});
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Using 2D array texture view with arrayLayerCount > 1 is not allowed for depth stencil
+    {
+        wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+        descriptor.format = kDepthStencilFormat;
+        descriptor.arrayLayerCount = 5;
+
+        wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
+        utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Using 2D array texture view that covers the first layer of the texture is OK for color
+    {
+        wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+        descriptor.format = kColorFormat;
+        descriptor.baseArrayLayer = 0;
+        descriptor.arrayLayerCount = 1;
+
+        wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
+        utils::ComboRenderPassDescriptor renderPass({colorTextureView});
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // Using 2D array texture view that covers the first layer is OK for depth stencil
+    {
+        wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+        descriptor.format = kDepthStencilFormat;
+        descriptor.baseArrayLayer = 0;
+        descriptor.arrayLayerCount = 1;
+
+        wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
+        utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // Using 2D array texture view that covers the last layer is OK for color
+    {
+        wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+        descriptor.format = kColorFormat;
+        descriptor.baseArrayLayer = kArrayLayers - 1;
+        descriptor.arrayLayerCount = 1;
+
+        wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
+        utils::ComboRenderPassDescriptor renderPass({colorTextureView});
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // Using 2D array texture view that covers the last layer is OK for depth stencil
+    {
+        wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+        descriptor.format = kDepthStencilFormat;
+        descriptor.baseArrayLayer = kArrayLayers - 1;
+        descriptor.arrayLayerCount = 1;
+
+        wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
+        utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+}
+
+// Check that the render pass depth attachment must have the RenderAttachment usage.
+TEST_F(RenderPassDescriptorValidationTest, DepthAttachmentInvalidUsage) {
+    // Control case: using a texture with RenderAttachment is valid.
+    {
+        wgpu::TextureView renderView =
+            Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth32Float);
+        utils::ComboRenderPassDescriptor renderPass({}, renderView);
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // Error case: using a texture with Sampled is invalid.
+    {
+        wgpu::TextureDescriptor texDesc;
+        texDesc.usage = wgpu::TextureUsage::TextureBinding;
+        texDesc.size = {1, 1, 1};
+        texDesc.format = wgpu::TextureFormat::Depth32Float;
+        wgpu::Texture sampledTex = device.CreateTexture(&texDesc);
+        wgpu::TextureView sampledView = sampledTex.CreateView();
+
+        utils::ComboRenderPassDescriptor renderPass({}, sampledView);
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+
+        AssertBeginRenderPassError(&renderPass);
+    }
+}
+
+// Only 2D texture views with mipLevelCount == 1 are allowed to be color attachments
+TEST_F(RenderPassDescriptorValidationTest, TextureViewLevelCountForColorAndDepthStencil) {
+    constexpr uint32_t kArrayLayers = 1;
+    constexpr uint32_t kSize = 32;
+    constexpr wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
+    constexpr wgpu::TextureFormat kDepthStencilFormat = wgpu::TextureFormat::Depth24PlusStencil8;
+
+    constexpr uint32_t kLevelCount = 4;
+
+    wgpu::Texture colorTexture = CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat,
+                                               kSize, kSize, kArrayLayers, kLevelCount);
+    wgpu::Texture depthStencilTexture =
+        CreateTexture(device, wgpu::TextureDimension::e2D, kDepthStencilFormat, kSize, kSize,
+                      kArrayLayers, kLevelCount);
+
+    wgpu::TextureViewDescriptor baseDescriptor;
+    baseDescriptor.dimension = wgpu::TextureViewDimension::e2D;
+    baseDescriptor.baseArrayLayer = 0;
+    baseDescriptor.arrayLayerCount = kArrayLayers;
+    baseDescriptor.baseMipLevel = 0;
+    baseDescriptor.mipLevelCount = kLevelCount;
+
+    // Using 2D texture view with mipLevelCount > 1 is not allowed for color
+    {
+        wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+        descriptor.format = kColorFormat;
+        descriptor.mipLevelCount = 2;
+
+        wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
+        utils::ComboRenderPassDescriptor renderPass({colorTextureView});
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Using 2D texture view with mipLevelCount > 1 is not allowed for depth stencil
+    {
+        wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+        descriptor.format = kDepthStencilFormat;
+        descriptor.mipLevelCount = 2;
+
+        wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
+        utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Using 2D texture view that covers the first level of the texture is OK for color
+    {
+        wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+        descriptor.format = kColorFormat;
+        descriptor.baseMipLevel = 0;
+        descriptor.mipLevelCount = 1;
+
+        wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
+        utils::ComboRenderPassDescriptor renderPass({colorTextureView});
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // Using 2D texture view that covers the first level is OK for depth stencil
+    {
+        wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+        descriptor.format = kDepthStencilFormat;
+        descriptor.baseMipLevel = 0;
+        descriptor.mipLevelCount = 1;
+
+        wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
+        utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // Using 2D texture view that covers the last level is OK for color
+    {
+        wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+        descriptor.format = kColorFormat;
+        descriptor.baseMipLevel = kLevelCount - 1;
+        descriptor.mipLevelCount = 1;
+
+        wgpu::TextureView colorTextureView = colorTexture.CreateView(&descriptor);
+        utils::ComboRenderPassDescriptor renderPass({colorTextureView});
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // Using 2D texture view that covers the last level is OK for depth stencil
+    {
+        wgpu::TextureViewDescriptor descriptor = baseDescriptor;
+        descriptor.format = kDepthStencilFormat;
+        descriptor.baseMipLevel = kLevelCount - 1;
+        descriptor.mipLevelCount = 1;
+
+        wgpu::TextureView depthStencilView = depthStencilTexture.CreateView(&descriptor);
+        utils::ComboRenderPassDescriptor renderPass({}, depthStencilView);
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+}
+
+// It is not allowed to set resolve target when the color attachment is non-multisampled.
+TEST_F(RenderPassDescriptorValidationTest, NonMultisampledColorWithResolveTarget) {
+    static constexpr uint32_t kArrayLayers = 1;
+    static constexpr uint32_t kLevelCount = 1;
+    static constexpr uint32_t kSize = 32;
+    static constexpr uint32_t kSampleCount = 1;
+    static constexpr wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
+
+    wgpu::Texture colorTexture =
+        CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize, kArrayLayers,
+                      kLevelCount, kSampleCount);
+    wgpu::Texture resolveTargetTexture =
+        CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize, kArrayLayers,
+                      kLevelCount, kSampleCount);
+    wgpu::TextureView colorTextureView = colorTexture.CreateView();
+    wgpu::TextureView resolveTargetTextureView = resolveTargetTexture.CreateView();
+
+    utils::ComboRenderPassDescriptor renderPass({colorTextureView});
+    renderPass.cColorAttachments[0].resolveTarget = resolveTargetTextureView;
+    AssertBeginRenderPassError(&renderPass);
+}
+
+class MultisampledRenderPassDescriptorValidationTest : public RenderPassDescriptorValidationTest {
+  public:
+    utils::ComboRenderPassDescriptor CreateMultisampledRenderPass() {
+        return utils::ComboRenderPassDescriptor({CreateMultisampledColorTextureView()});
+    }
+
+    wgpu::TextureView CreateMultisampledColorTextureView() {
+        return CreateColorTextureView(kSampleCount);
+    }
+
+    wgpu::TextureView CreateNonMultisampledColorTextureView() { return CreateColorTextureView(1); }
+
+    static constexpr uint32_t kArrayLayers = 1;
+    static constexpr uint32_t kLevelCount = 1;
+    static constexpr uint32_t kSize = 32;
+    static constexpr uint32_t kSampleCount = 4;
+    static constexpr wgpu::TextureFormat kColorFormat = wgpu::TextureFormat::RGBA8Unorm;
+
+  private:
+    wgpu::TextureView CreateColorTextureView(uint32_t sampleCount) {
+        wgpu::Texture colorTexture =
+            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize,
+                          kArrayLayers, kLevelCount, sampleCount);
+
+        return colorTexture.CreateView();
+    }
+};
+
+// Tests on the use of multisampled textures as color attachments
+TEST_F(MultisampledRenderPassDescriptorValidationTest, MultisampledColorAttachments) {
+    wgpu::TextureView colorTextureView = CreateNonMultisampledColorTextureView();
+    wgpu::TextureView resolveTargetTextureView = CreateNonMultisampledColorTextureView();
+    wgpu::TextureView multisampledColorTextureView = CreateMultisampledColorTextureView();
+
+    // It is allowed to use a multisampled color attachment without setting resolve target.
+    {
+        utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // It is not allowed to use multiple color attachments with different sample counts.
+    {
+        utils::ComboRenderPassDescriptor renderPass(
+            {multisampledColorTextureView, colorTextureView});
+        AssertBeginRenderPassError(&renderPass);
+    }
+}
+
+// It is not allowed to use a multisampled resolve target.
+TEST_F(MultisampledRenderPassDescriptorValidationTest, MultisampledResolveTarget) {
+    wgpu::TextureView multisampledResolveTargetView = CreateMultisampledColorTextureView();
+
+    utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
+    renderPass.cColorAttachments[0].resolveTarget = multisampledResolveTargetView;
+    AssertBeginRenderPassError(&renderPass);
+}
+
+// It is not allowed to use a resolve target with array layer count > 1.
+TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetArrayLayerMoreThanOne) {
+    constexpr uint32_t kArrayLayers2 = 2;
+    wgpu::Texture resolveTexture = CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat,
+                                                 kSize, kSize, kArrayLayers2, kLevelCount);
+    wgpu::TextureViewDescriptor viewDesc;
+    viewDesc.dimension = wgpu::TextureViewDimension::e2DArray;
+    wgpu::TextureView resolveTextureView = resolveTexture.CreateView(&viewDesc);
+
+    utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
+    renderPass.cColorAttachments[0].resolveTarget = resolveTextureView;
+    AssertBeginRenderPassError(&renderPass);
+}
+
+// It is not allowed to use a resolve target with mipmap level count > 1.
+TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetMipmapLevelMoreThanOne) {
+    constexpr uint32_t kLevelCount2 = 2;
+    wgpu::Texture resolveTexture = CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat,
+                                                 kSize, kSize, kArrayLayers, kLevelCount2);
+    wgpu::TextureView resolveTextureView = resolveTexture.CreateView();
+
+    utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
+    renderPass.cColorAttachments[0].resolveTarget = resolveTextureView;
+    AssertBeginRenderPassError(&renderPass);
+}
+
+// It is not allowed to use a resolve target which is created from a texture whose usage does
+// not include wgpu::TextureUsage::RenderAttachment.
+TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetUsageNoRenderAttachment) {
+    constexpr wgpu::TextureUsage kUsage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc;
+    wgpu::Texture nonColorUsageResolveTexture =
+        CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize, kSize, kArrayLayers,
+                      kLevelCount, 1, kUsage);
+    wgpu::TextureView nonColorUsageResolveTextureView = nonColorUsageResolveTexture.CreateView();
+
+    utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
+    renderPass.cColorAttachments[0].resolveTarget = nonColorUsageResolveTextureView;
+    AssertBeginRenderPassError(&renderPass);
+}
+
+// It is not allowed to use a resolve target which is in error state.
+TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetInErrorState) {
+    wgpu::Texture resolveTexture = CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat,
+                                                 kSize, kSize, kArrayLayers, kLevelCount);
+    wgpu::TextureViewDescriptor errorTextureView;
+    errorTextureView.dimension = wgpu::TextureViewDimension::e2D;
+    errorTextureView.format = kColorFormat;
+    errorTextureView.baseArrayLayer = kArrayLayers + 1;
+    ASSERT_DEVICE_ERROR(wgpu::TextureView errorResolveTarget =
+                            resolveTexture.CreateView(&errorTextureView));
+
+    utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
+    renderPass.cColorAttachments[0].resolveTarget = errorResolveTarget;
+    AssertBeginRenderPassError(&renderPass);
+}
+
+// It is allowed to use a multisampled color attachment and a non-multisampled resolve target.
+TEST_F(MultisampledRenderPassDescriptorValidationTest, MultisampledColorWithResolveTarget) {
+    wgpu::TextureView resolveTargetTextureView = CreateNonMultisampledColorTextureView();
+
+    utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
+    renderPass.cColorAttachments[0].resolveTarget = resolveTargetTextureView;
+    AssertBeginRenderPassSuccess(&renderPass);
+}
+
+// It is not allowed to use a resolve target in a format different from the color attachment.
+TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetDifferentFormat) {
+    constexpr wgpu::TextureFormat kColorFormat2 = wgpu::TextureFormat::BGRA8Unorm;
+    wgpu::Texture resolveTexture = CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat2,
+                                                 kSize, kSize, kArrayLayers, kLevelCount);
+    wgpu::TextureView resolveTextureView = resolveTexture.CreateView();
+
+    utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
+    renderPass.cColorAttachments[0].resolveTarget = resolveTextureView;
+    AssertBeginRenderPassError(&renderPass);
+}
+
+// Tests on the size of the resolve target.
+TEST_F(MultisampledRenderPassDescriptorValidationTest,
+       ColorAttachmentResolveTargetDimensionMismatch) {
+    constexpr uint32_t kSize2 = kSize * 2;
+    wgpu::Texture resolveTexture = CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat,
+                                                 kSize2, kSize2, kArrayLayers, kLevelCount + 1);
+
+    wgpu::TextureViewDescriptor textureViewDescriptor;
+    textureViewDescriptor.nextInChain = nullptr;
+    textureViewDescriptor.dimension = wgpu::TextureViewDimension::e2D;
+    textureViewDescriptor.format = kColorFormat;
+    textureViewDescriptor.mipLevelCount = 1;
+    textureViewDescriptor.baseArrayLayer = 0;
+    textureViewDescriptor.arrayLayerCount = 1;
+
+    {
+        wgpu::TextureViewDescriptor firstMipLevelDescriptor = textureViewDescriptor;
+        firstMipLevelDescriptor.baseMipLevel = 0;
+
+        wgpu::TextureView resolveTextureView = resolveTexture.CreateView(&firstMipLevelDescriptor);
 
         utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
         renderPass.cColorAttachments[0].resolveTarget = resolveTextureView;
         AssertBeginRenderPassError(&renderPass);
     }
 
-    // Tests on the size of the resolve target.
-    TEST_F(MultisampledRenderPassDescriptorValidationTest,
-           ColorAttachmentResolveTargetDimensionMismatch) {
-        constexpr uint32_t kSize2 = kSize * 2;
-        wgpu::Texture resolveTexture =
-            CreateTexture(device, wgpu::TextureDimension::e2D, kColorFormat, kSize2, kSize2,
-                          kArrayLayers, kLevelCount + 1);
+    {
+        wgpu::TextureViewDescriptor secondMipLevelDescriptor = textureViewDescriptor;
+        secondMipLevelDescriptor.baseMipLevel = 1;
 
-        wgpu::TextureViewDescriptor textureViewDescriptor;
-        textureViewDescriptor.nextInChain = nullptr;
-        textureViewDescriptor.dimension = wgpu::TextureViewDimension::e2D;
-        textureViewDescriptor.format = kColorFormat;
-        textureViewDescriptor.mipLevelCount = 1;
-        textureViewDescriptor.baseArrayLayer = 0;
-        textureViewDescriptor.arrayLayerCount = 1;
+        wgpu::TextureView resolveTextureView = resolveTexture.CreateView(&secondMipLevelDescriptor);
 
-        {
-            wgpu::TextureViewDescriptor firstMipLevelDescriptor = textureViewDescriptor;
-            firstMipLevelDescriptor.baseMipLevel = 0;
+        utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
+        renderPass.cColorAttachments[0].resolveTarget = resolveTextureView;
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+}
 
-            wgpu::TextureView resolveTextureView =
-                resolveTexture.CreateView(&firstMipLevelDescriptor);
+// Tests the texture format of the resolve target must support being used as resolve target.
+TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetFormat) {
+    for (wgpu::TextureFormat format : utils::kAllTextureFormats) {
+        if (!utils::TextureFormatSupportsMultisampling(format) ||
+            !utils::TextureFormatSupportsRendering(format)) {
+            continue;
+        }
 
-            utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
-            renderPass.cColorAttachments[0].resolveTarget = resolveTextureView;
+        wgpu::Texture colorTexture =
+            CreateTexture(device, wgpu::TextureDimension::e2D, format, kSize, kSize, kArrayLayers,
+                          kLevelCount, kSampleCount);
+        wgpu::Texture resolveTarget = CreateTexture(device, wgpu::TextureDimension::e2D, format,
+                                                    kSize, kSize, kArrayLayers, kLevelCount, 1);
+
+        utils::ComboRenderPassDescriptor renderPass({colorTexture.CreateView()});
+        renderPass.cColorAttachments[0].resolveTarget = resolveTarget.CreateView();
+        if (utils::TextureFormatSupportsResolveTarget(format)) {
+            AssertBeginRenderPassSuccess(&renderPass);
+        } else {
             AssertBeginRenderPassError(&renderPass);
         }
-
-        {
-            wgpu::TextureViewDescriptor secondMipLevelDescriptor = textureViewDescriptor;
-            secondMipLevelDescriptor.baseMipLevel = 1;
-
-            wgpu::TextureView resolveTextureView =
-                resolveTexture.CreateView(&secondMipLevelDescriptor);
-
-            utils::ComboRenderPassDescriptor renderPass = CreateMultisampledRenderPass();
-            renderPass.cColorAttachments[0].resolveTarget = resolveTextureView;
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
     }
+}
 
-    // Tests the texture format of the resolve target must support being used as resolve target.
-    TEST_F(MultisampledRenderPassDescriptorValidationTest, ResolveTargetFormat) {
-        for (wgpu::TextureFormat format : utils::kAllTextureFormats) {
-            if (!utils::TextureFormatSupportsMultisampling(format) ||
-                !utils::TextureFormatSupportsRendering(format)) {
-                continue;
-            }
+// Tests on the sample count of depth stencil attachment.
+TEST_F(MultisampledRenderPassDescriptorValidationTest, DepthStencilAttachmentSampleCount) {
+    constexpr wgpu::TextureFormat kDepthStencilFormat = wgpu::TextureFormat::Depth24PlusStencil8;
+    wgpu::Texture multisampledDepthStencilTexture =
+        CreateTexture(device, wgpu::TextureDimension::e2D, kDepthStencilFormat, kSize, kSize,
+                      kArrayLayers, kLevelCount, kSampleCount);
+    wgpu::TextureView multisampledDepthStencilTextureView =
+        multisampledDepthStencilTexture.CreateView();
 
-            wgpu::Texture colorTexture =
-                CreateTexture(device, wgpu::TextureDimension::e2D, format, kSize, kSize,
-                              kArrayLayers, kLevelCount, kSampleCount);
-            wgpu::Texture resolveTarget = CreateTexture(device, wgpu::TextureDimension::e2D, format,
-                                                        kSize, kSize, kArrayLayers, kLevelCount, 1);
-
-            utils::ComboRenderPassDescriptor renderPass({colorTexture.CreateView()});
-            renderPass.cColorAttachments[0].resolveTarget = resolveTarget.CreateView();
-            if (utils::TextureFormatSupportsResolveTarget(format)) {
-                AssertBeginRenderPassSuccess(&renderPass);
-            } else {
-                AssertBeginRenderPassError(&renderPass);
-            }
-        }
-    }
-
-    // Tests on the sample count of depth stencil attachment.
-    TEST_F(MultisampledRenderPassDescriptorValidationTest, DepthStencilAttachmentSampleCount) {
-        constexpr wgpu::TextureFormat kDepthStencilFormat =
-            wgpu::TextureFormat::Depth24PlusStencil8;
-        wgpu::Texture multisampledDepthStencilTexture =
+    // It is not allowed to use a depth stencil attachment whose sample count is different from
+    // the one of the color attachment.
+    {
+        wgpu::Texture depthStencilTexture =
             CreateTexture(device, wgpu::TextureDimension::e2D, kDepthStencilFormat, kSize, kSize,
-                          kArrayLayers, kLevelCount, kSampleCount);
-        wgpu::TextureView multisampledDepthStencilTextureView =
-            multisampledDepthStencilTexture.CreateView();
+                          kArrayLayers, kLevelCount);
+        wgpu::TextureView depthStencilTextureView = depthStencilTexture.CreateView();
 
-        // It is not allowed to use a depth stencil attachment whose sample count is different from
-        // the one of the color attachment.
-        {
-            wgpu::Texture depthStencilTexture =
-                CreateTexture(device, wgpu::TextureDimension::e2D, kDepthStencilFormat, kSize,
-                              kSize, kArrayLayers, kLevelCount);
-            wgpu::TextureView depthStencilTextureView = depthStencilTexture.CreateView();
-
-            utils::ComboRenderPassDescriptor renderPass({CreateMultisampledColorTextureView()},
-                                                        depthStencilTextureView);
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        {
-            utils::ComboRenderPassDescriptor renderPass({CreateNonMultisampledColorTextureView()},
-                                                        multisampledDepthStencilTextureView);
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // It is allowed to use a multisampled depth stencil attachment whose sample count is equal
-        // to the one of the color attachment.
-        {
-            utils::ComboRenderPassDescriptor renderPass({CreateMultisampledColorTextureView()},
-                                                        multisampledDepthStencilTextureView);
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // It is allowed to use a multisampled depth stencil attachment while there is no color
-        // attachment.
-        {
-            utils::ComboRenderPassDescriptor renderPass({}, multisampledDepthStencilTextureView);
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
+        utils::ComboRenderPassDescriptor renderPass({CreateMultisampledColorTextureView()},
+                                                    depthStencilTextureView);
+        AssertBeginRenderPassError(&renderPass);
     }
 
-    // Tests that NaN cannot be accepted as a valid color or depth clear value and INFINITY is valid
-    // in both color and depth clear values.
-    TEST_F(RenderPassDescriptorValidationTest, UseNaNOrINFINITYAsColorOrDepthClearValue) {
-        wgpu::TextureView color = Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
-
-        // Tests that NaN cannot be used in clearColor.
-        {
-            utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
-            renderPass.cColorAttachments[0].clearValue.r = NAN;
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        {
-            utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
-            renderPass.cColorAttachments[0].clearValue.g = NAN;
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        {
-            utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
-            renderPass.cColorAttachments[0].clearValue.b = NAN;
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        {
-            utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
-            renderPass.cColorAttachments[0].clearValue.a = NAN;
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Tests that INFINITY can be used in clearColor.
-        {
-            utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
-            renderPass.cColorAttachments[0].clearValue.r = INFINITY;
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        {
-            utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
-            renderPass.cColorAttachments[0].clearValue.g = INFINITY;
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        {
-            utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
-            renderPass.cColorAttachments[0].clearValue.b = INFINITY;
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        {
-            utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
-            renderPass.cColorAttachments[0].clearValue.a = INFINITY;
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // Tests that NaN cannot be used in depthClearValue.
-        {
-            wgpu::TextureView depth =
-                Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24Plus);
-            utils::ComboRenderPassDescriptor renderPass({color}, depth);
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthClearValue = NAN;
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Tests that INFINITY can be used in depthClearValue.
-        {
-            wgpu::TextureView depth =
-                Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24Plus);
-            utils::ComboRenderPassDescriptor renderPass({color}, depth);
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthClearValue = INFINITY;
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // TODO(https://crbug.com/dawn/666): Add a test case for clearStencil for stencilOnly
-        // once stencil8 is supported.
+    {
+        utils::ComboRenderPassDescriptor renderPass({CreateNonMultisampledColorTextureView()},
+                                                    multisampledDepthStencilTextureView);
+        AssertBeginRenderPassError(&renderPass);
     }
 
-    TEST_F(RenderPassDescriptorValidationTest, ValidateDepthStencilReadOnly) {
-        wgpu::TextureView colorView =
-            Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
-        wgpu::TextureView depthStencilView =
-            Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24PlusStencil8);
-        wgpu::TextureView depthStencilViewNoStencil =
+    // It is allowed to use a multisampled depth stencil attachment whose sample count is equal
+    // to the one of the color attachment.
+    {
+        utils::ComboRenderPassDescriptor renderPass({CreateMultisampledColorTextureView()},
+                                                    multisampledDepthStencilTextureView);
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // It is allowed to use a multisampled depth stencil attachment while there is no color
+    // attachment.
+    {
+        utils::ComboRenderPassDescriptor renderPass({}, multisampledDepthStencilTextureView);
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+}
+
+// Tests that NaN cannot be accepted as a valid color or depth clear value and INFINITY is valid
+// in both color and depth clear values.
+TEST_F(RenderPassDescriptorValidationTest, UseNaNOrINFINITYAsColorOrDepthClearValue) {
+    wgpu::TextureView color = Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
+
+    // Tests that NaN cannot be used in clearColor.
+    {
+        utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
+        renderPass.cColorAttachments[0].clearValue.r = NAN;
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    {
+        utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
+        renderPass.cColorAttachments[0].clearValue.g = NAN;
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    {
+        utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
+        renderPass.cColorAttachments[0].clearValue.b = NAN;
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    {
+        utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
+        renderPass.cColorAttachments[0].clearValue.a = NAN;
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Tests that INFINITY can be used in clearColor.
+    {
+        utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
+        renderPass.cColorAttachments[0].clearValue.r = INFINITY;
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    {
+        utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
+        renderPass.cColorAttachments[0].clearValue.g = INFINITY;
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    {
+        utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
+        renderPass.cColorAttachments[0].clearValue.b = INFINITY;
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    {
+        utils::ComboRenderPassDescriptor renderPass({color}, nullptr);
+        renderPass.cColorAttachments[0].clearValue.a = INFINITY;
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // Tests that NaN cannot be used in depthClearValue.
+    {
+        wgpu::TextureView depth =
             Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24Plus);
-
-        // Tests that a read-only pass with depthReadOnly set to true succeeds.
-        {
-            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
-            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // Tests that a pass with mismatched depthReadOnly and stencilReadOnly values passes when
-        // there is no stencil component in the format (deprecated).
-        {
-            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilViewNoStencil);
-            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
-            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = false;
-            EXPECT_DEPRECATION_WARNING(AssertBeginRenderPassSuccess(&renderPass));
-        }
-
-        // Tests that a pass with mismatched depthReadOnly and stencilReadOnly values fails when
-        // there there is no stencil component in the format and stencil loadOp/storeOp are passed.
-        {
-            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilViewNoStencil);
-            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
-            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = false;
-            AssertBeginRenderPassError(&renderPass);
-
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
-            AssertBeginRenderPassError(&renderPass);
-
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = false;
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Tests that a pass with depthReadOnly=true and stencilReadOnly=true can pass
-        // when there is only depth component in the format. We actually enable readonly
-        // depth/stencil attachment in this case.
-        {
-            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilViewNoStencil);
-            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // Tests that a pass with depthReadOnly=false and stencilReadOnly=true can pass
-        // when there is only depth component in the format. We actually don't enable readonly
-        // depth/stencil attachment in this case.
-        {
-            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilViewNoStencil);
-            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
-            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = false;
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // TODO(https://crbug.com/dawn/666): Add a test case for stencil-only once stencil8 is
-        // supported (depthReadOnly and stencilReadOnly mismatch but no depth component).
-
-        // Tests that a pass with mismatched depthReadOnly and stencilReadOnly values fails when
-        // both depth and stencil components exist.
-        {
-            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
-            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
-            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = false;
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Tests that a pass with loadOp set to clear and readOnly set to true fails.
-        {
-            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
-            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
-            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
-            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Tests that a pass with storeOp set to discard and readOnly set to true fails.
-        {
-            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
-            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Discard;
-            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Discard;
-            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Tests that a pass with only depthLoadOp set to load and readOnly set to true fails.
-        {
-            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
-            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Tests that a pass with only depthStoreOp set to store and readOnly set to true fails.
-        {
-            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
-            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
-            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Tests that a pass with only stencilLoadOp set to load and readOnly set to true fails.
-        {
-            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
-            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Tests that a pass with only stencilStoreOp set to store and readOnly set to true fails.
-        {
-            utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
-            renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
-            renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
-            AssertBeginRenderPassError(&renderPass);
-        }
+        utils::ComboRenderPassDescriptor renderPass({color}, depth);
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthClearValue = NAN;
+        AssertBeginRenderPassError(&renderPass);
     }
 
-    // Check that the depth stencil attachment must use all aspects.
-    TEST_F(RenderPassDescriptorValidationTest, ValidateDepthStencilAllAspects) {
-        wgpu::TextureDescriptor texDesc;
-        texDesc.usage = wgpu::TextureUsage::RenderAttachment;
-        texDesc.size = {1, 1, 1};
-
-        wgpu::TextureViewDescriptor viewDesc;
-        viewDesc.baseMipLevel = 0;
-        viewDesc.mipLevelCount = 1;
-        viewDesc.baseArrayLayer = 0;
-        viewDesc.arrayLayerCount = 1;
-
-        // Using all aspects of a depth+stencil texture is allowed.
-        {
-            texDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
-            viewDesc.aspect = wgpu::TextureAspect::All;
-
-            wgpu::TextureView view = device.CreateTexture(&texDesc).CreateView(&viewDesc);
-            utils::ComboRenderPassDescriptor renderPass({}, view);
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // Using only depth of a depth+stencil texture is an error.
-        {
-            texDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
-            viewDesc.aspect = wgpu::TextureAspect::DepthOnly;
-
-            wgpu::TextureView view = device.CreateTexture(&texDesc).CreateView(&viewDesc);
-            utils::ComboRenderPassDescriptor renderPass({}, view);
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Using only stencil of a depth+stencil texture is an error.
-        {
-            texDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
-            viewDesc.aspect = wgpu::TextureAspect::StencilOnly;
-
-            wgpu::TextureView view = device.CreateTexture(&texDesc).CreateView(&viewDesc);
-            utils::ComboRenderPassDescriptor renderPass({}, view);
-            AssertBeginRenderPassError(&renderPass);
-        }
-
-        // Using DepthOnly of a depth only texture is allowed.
-        {
-            texDesc.format = wgpu::TextureFormat::Depth24Plus;
-            viewDesc.aspect = wgpu::TextureAspect::DepthOnly;
-
-            wgpu::TextureView view = device.CreateTexture(&texDesc).CreateView(&viewDesc);
-            utils::ComboRenderPassDescriptor renderPass({}, view);
-            renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
-            renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
-
-            AssertBeginRenderPassSuccess(&renderPass);
-        }
-
-        // TODO(https://crbug.com/dawn/666): Add a test case for stencil-only on stencil8 once this
-        // format is supported.
+    // Tests that INFINITY can be used in depthClearValue.
+    {
+        wgpu::TextureView depth =
+            Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24Plus);
+        utils::ComboRenderPassDescriptor renderPass({color}, depth);
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthClearValue = INFINITY;
+        AssertBeginRenderPassSuccess(&renderPass);
     }
 
-    // TODO(cwallez@chromium.org): Constraints on attachment aliasing?
+    // TODO(https://crbug.com/dawn/666): Add a test case for clearStencil for stencilOnly
+    // once stencil8 is supported.
+}
+
+TEST_F(RenderPassDescriptorValidationTest, ValidateDepthStencilReadOnly) {
+    wgpu::TextureView colorView = Create2DAttachment(device, 1, 1, wgpu::TextureFormat::RGBA8Unorm);
+    wgpu::TextureView depthStencilView =
+        Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24PlusStencil8);
+    wgpu::TextureView depthStencilViewNoStencil =
+        Create2DAttachment(device, 1, 1, wgpu::TextureFormat::Depth24Plus);
+
+    // Tests that a read-only pass with depthReadOnly set to true succeeds.
+    {
+        utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
+        renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // Tests that a pass with mismatched depthReadOnly and stencilReadOnly values passes when
+    // there is no stencil component in the format (deprecated).
+    {
+        utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilViewNoStencil);
+        renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+        renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = false;
+        EXPECT_DEPRECATION_WARNING(AssertBeginRenderPassSuccess(&renderPass));
+    }
+
+    // Tests that a pass with mismatched depthReadOnly and stencilReadOnly values fails when
+    // there there is no stencil component in the format and stencil loadOp/storeOp are passed.
+    {
+        utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilViewNoStencil);
+        renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+        renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = false;
+        AssertBeginRenderPassError(&renderPass);
+
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+        AssertBeginRenderPassError(&renderPass);
+
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = false;
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Tests that a pass with depthReadOnly=true and stencilReadOnly=true can pass
+    // when there is only depth component in the format. We actually enable readonly
+    // depth/stencil attachment in this case.
+    {
+        utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilViewNoStencil);
+        renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // Tests that a pass with depthReadOnly=false and stencilReadOnly=true can pass
+    // when there is only depth component in the format. We actually don't enable readonly
+    // depth/stencil attachment in this case.
+    {
+        utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilViewNoStencil);
+        renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
+        renderPass.cDepthStencilAttachmentInfo.depthReadOnly = false;
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // TODO(https://crbug.com/dawn/666): Add a test case for stencil-only once stencil8 is
+    // supported (depthReadOnly and stencilReadOnly mismatch but no depth component).
+
+    // Tests that a pass with mismatched depthReadOnly and stencilReadOnly values fails when
+    // both depth and stencil components exist.
+    {
+        utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
+        renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+        renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = false;
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Tests that a pass with loadOp set to clear and readOnly set to true fails.
+    {
+        utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
+        renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
+        renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+        renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Tests that a pass with storeOp set to discard and readOnly set to true fails.
+    {
+        utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
+        renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Discard;
+        renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Discard;
+        renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Tests that a pass with only depthLoadOp set to load and readOnly set to true fails.
+    {
+        utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
+        renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Tests that a pass with only depthStoreOp set to store and readOnly set to true fails.
+    {
+        utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
+        renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
+        renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Tests that a pass with only stencilLoadOp set to load and readOnly set to true fails.
+    {
+        utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
+        renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Load;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Tests that a pass with only stencilStoreOp set to store and readOnly set to true fails.
+    {
+        utils::ComboRenderPassDescriptor renderPass({colorView}, depthStencilView);
+        renderPass.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.depthReadOnly = true;
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+        renderPass.cDepthStencilAttachmentInfo.stencilReadOnly = true;
+        AssertBeginRenderPassError(&renderPass);
+    }
+}
+
+// Check that the depth stencil attachment must use all aspects.
+TEST_F(RenderPassDescriptorValidationTest, ValidateDepthStencilAllAspects) {
+    wgpu::TextureDescriptor texDesc;
+    texDesc.usage = wgpu::TextureUsage::RenderAttachment;
+    texDesc.size = {1, 1, 1};
+
+    wgpu::TextureViewDescriptor viewDesc;
+    viewDesc.baseMipLevel = 0;
+    viewDesc.mipLevelCount = 1;
+    viewDesc.baseArrayLayer = 0;
+    viewDesc.arrayLayerCount = 1;
+
+    // Using all aspects of a depth+stencil texture is allowed.
+    {
+        texDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        viewDesc.aspect = wgpu::TextureAspect::All;
+
+        wgpu::TextureView view = device.CreateTexture(&texDesc).CreateView(&viewDesc);
+        utils::ComboRenderPassDescriptor renderPass({}, view);
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // Using only depth of a depth+stencil texture is an error.
+    {
+        texDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        viewDesc.aspect = wgpu::TextureAspect::DepthOnly;
+
+        wgpu::TextureView view = device.CreateTexture(&texDesc).CreateView(&viewDesc);
+        utils::ComboRenderPassDescriptor renderPass({}, view);
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Using only stencil of a depth+stencil texture is an error.
+    {
+        texDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        viewDesc.aspect = wgpu::TextureAspect::StencilOnly;
+
+        wgpu::TextureView view = device.CreateTexture(&texDesc).CreateView(&viewDesc);
+        utils::ComboRenderPassDescriptor renderPass({}, view);
+        AssertBeginRenderPassError(&renderPass);
+    }
+
+    // Using DepthOnly of a depth only texture is allowed.
+    {
+        texDesc.format = wgpu::TextureFormat::Depth24Plus;
+        viewDesc.aspect = wgpu::TextureAspect::DepthOnly;
+
+        wgpu::TextureView view = device.CreateTexture(&texDesc).CreateView(&viewDesc);
+        utils::ComboRenderPassDescriptor renderPass({}, view);
+        renderPass.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+        renderPass.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+
+        AssertBeginRenderPassSuccess(&renderPass);
+    }
+
+    // TODO(https://crbug.com/dawn/666): Add a test case for stencil-only on stencil8 once this
+    // format is supported.
+}
+
+// TODO(cwallez@chromium.org): Constraints on attachment aliasing?
 
 }  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/RenderPipelineValidationTests.cpp b/src/dawn/tests/unittests/validation/RenderPipelineValidationTests.cpp
index 8a936c9..079f9b3 100644
--- a/src/dawn/tests/unittests/validation/RenderPipelineValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/RenderPipelineValidationTests.cpp
@@ -17,8 +17,8 @@
 #include <string>
 #include <vector>
 
-#include "dawn/tests/unittests/validation/ValidationTest.h"
 #include "dawn/common/Constants.h"
+#include "dawn/tests/unittests/validation/ValidationTest.h"
 #include "dawn/utils/ComboRenderPipelineDescriptor.h"
 #include "dawn/utils/WGPUHelpers.h"
 
@@ -49,11 +49,11 @@
 };
 
 namespace {
-    bool BlendFactorContainsSrcAlpha(const wgpu::BlendFactor& blendFactor) {
-        return blendFactor == wgpu::BlendFactor::SrcAlpha ||
-               blendFactor == wgpu::BlendFactor::OneMinusSrcAlpha ||
-               blendFactor == wgpu::BlendFactor::SrcAlphaSaturated;
-    }
+bool BlendFactorContainsSrcAlpha(const wgpu::BlendFactor& blendFactor) {
+    return blendFactor == wgpu::BlendFactor::SrcAlpha ||
+           blendFactor == wgpu::BlendFactor::OneMinusSrcAlpha ||
+           blendFactor == wgpu::BlendFactor::SrcAlphaSaturated;
+}
 }  // namespace
 
 // Test cases where creation should succeed
diff --git a/src/dawn/tests/unittests/validation/ResourceUsageTrackingTests.cpp b/src/dawn/tests/unittests/validation/ResourceUsageTrackingTests.cpp
index c2902c4..8e842b3 100644
--- a/src/dawn/tests/unittests/validation/ResourceUsageTrackingTests.cpp
+++ b/src/dawn/tests/unittests/validation/ResourceUsageTrackingTests.cpp
@@ -21,1510 +21,927 @@
 
 namespace {
 
-    class ResourceUsageTrackingTest : public ValidationTest {
-      protected:
-        wgpu::Buffer CreateBuffer(uint64_t size, wgpu::BufferUsage usage) {
-            wgpu::BufferDescriptor descriptor;
-            descriptor.size = size;
-            descriptor.usage = usage;
+class ResourceUsageTrackingTest : public ValidationTest {
+  protected:
+    wgpu::Buffer CreateBuffer(uint64_t size, wgpu::BufferUsage usage) {
+        wgpu::BufferDescriptor descriptor;
+        descriptor.size = size;
+        descriptor.usage = usage;
 
-            return device.CreateBuffer(&descriptor);
-        }
+        return device.CreateBuffer(&descriptor);
+    }
 
-        wgpu::Texture CreateTexture(wgpu::TextureUsage usage,
-                                    wgpu::TextureFormat format = wgpu::TextureFormat::RGBA8Unorm) {
-            wgpu::TextureDescriptor descriptor;
-            descriptor.dimension = wgpu::TextureDimension::e2D;
-            descriptor.size = {1, 1, 1};
-            descriptor.sampleCount = 1;
-            descriptor.mipLevelCount = 1;
-            descriptor.usage = usage;
-            descriptor.format = format;
+    wgpu::Texture CreateTexture(wgpu::TextureUsage usage,
+                                wgpu::TextureFormat format = wgpu::TextureFormat::RGBA8Unorm) {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.size = {1, 1, 1};
+        descriptor.sampleCount = 1;
+        descriptor.mipLevelCount = 1;
+        descriptor.usage = usage;
+        descriptor.format = format;
 
-            return device.CreateTexture(&descriptor);
-        }
+        return device.CreateTexture(&descriptor);
+    }
 
-        // Note that it is valid to bind any bind groups for indices that the pipeline doesn't use.
-        // We create a no-op render or compute pipeline without any bindings, and set bind groups
-        // in the caller, so it is always correct for binding validation between bind groups and
-        // pipeline. But those bind groups in caller can be used for validation for other purposes.
-        wgpu::RenderPipeline CreateNoOpRenderPipeline() {
-            wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+    // Note that it is valid to bind any bind groups for indices that the pipeline doesn't use.
+    // We create a no-op render or compute pipeline without any bindings, and set bind groups
+    // in the caller, so it is always correct for binding validation between bind groups and
+    // pipeline. But those bind groups in caller can be used for validation for other purposes.
+    wgpu::RenderPipeline CreateNoOpRenderPipeline() {
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
                 @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
                     return vec4<f32>();
                 })");
 
-            wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
                 @stage(fragment) fn main() {
                 })");
-            utils::ComboRenderPipelineDescriptor pipelineDescriptor;
-            pipelineDescriptor.vertex.module = vsModule;
-            pipelineDescriptor.cFragment.module = fsModule;
-            pipelineDescriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
-            pipelineDescriptor.layout = utils::MakeBasicPipelineLayout(device, nullptr);
-            return device.CreateRenderPipeline(&pipelineDescriptor);
-        }
+        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+        pipelineDescriptor.vertex.module = vsModule;
+        pipelineDescriptor.cFragment.module = fsModule;
+        pipelineDescriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+        pipelineDescriptor.layout = utils::MakeBasicPipelineLayout(device, nullptr);
+        return device.CreateRenderPipeline(&pipelineDescriptor);
+    }
 
-        wgpu::ComputePipeline CreateNoOpComputePipeline(std::vector<wgpu::BindGroupLayout> bgls) {
-            wgpu::ShaderModule csModule = utils::CreateShaderModule(device, R"(
+    wgpu::ComputePipeline CreateNoOpComputePipeline(std::vector<wgpu::BindGroupLayout> bgls) {
+        wgpu::ShaderModule csModule = utils::CreateShaderModule(device, R"(
                 @stage(compute) @workgroup_size(1) fn main() {
                 })");
-            wgpu::ComputePipelineDescriptor pipelineDescriptor;
-            pipelineDescriptor.layout = utils::MakePipelineLayout(device, std::move(bgls));
-            pipelineDescriptor.compute.module = csModule;
-            pipelineDescriptor.compute.entryPoint = "main";
-            return device.CreateComputePipeline(&pipelineDescriptor);
-        }
-
-        static constexpr wgpu::TextureFormat kFormat = wgpu::TextureFormat::RGBA8Unorm;
-    };
-
-    // Test that using a single buffer in multiple read usages in the same pass is allowed.
-    TEST_F(ResourceUsageTrackingTest, BufferWithMultipleReadUsage) {
-        // Test render pass
-        {
-            // Create a buffer, and use the buffer as both vertex and index buffer.
-            wgpu::Buffer buffer =
-                CreateBuffer(4, wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Index);
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
-            pass.SetVertexBuffer(0, buffer);
-            pass.End();
-            encoder.Finish();
-        }
-
-        // Test compute pass
-        {
-            // Create buffer and bind group
-            wgpu::Buffer buffer =
-                CreateBuffer(4, wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage);
-
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform},
-                 {1, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}, {1, buffer}});
-
-            // Use the buffer as both uniform and readonly storage buffer in compute pass.
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetBindGroup(0, bg);
-            pass.End();
-            encoder.Finish();
-        }
+        wgpu::ComputePipelineDescriptor pipelineDescriptor;
+        pipelineDescriptor.layout = utils::MakePipelineLayout(device, std::move(bgls));
+        pipelineDescriptor.compute.module = csModule;
+        pipelineDescriptor.compute.entryPoint = "main";
+        return device.CreateComputePipeline(&pipelineDescriptor);
     }
 
-    // Test that it is invalid to use the same buffer as both readable and writable in the same
-    // render pass. It is invalid in the same dispatch in compute pass.
-    TEST_F(ResourceUsageTrackingTest, BufferWithReadAndWriteUsage) {
-        // test render pass
-        {
-            // Create buffer and bind group
-            wgpu::Buffer buffer =
-                CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
+    static constexpr wgpu::TextureFormat kFormat = wgpu::TextureFormat::RGBA8Unorm;
+};
 
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}});
+// Test that using a single buffer in multiple read usages in the same pass is allowed.
+TEST_F(ResourceUsageTrackingTest, BufferWithMultipleReadUsage) {
+    // Test render pass
+    {
+        // Create a buffer, and use the buffer as both vertex and index buffer.
+        wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Index);
 
-            // It is invalid to use the buffer as both index and storage in render pass
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
-            pass.SetBindGroup(0, bg);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        // test compute pass
-        {
-            // Create buffer and bind group
-            wgpu::Buffer buffer = CreateBuffer(512, wgpu::BufferUsage::Storage);
-
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
-                 {1, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
-            wgpu::BindGroup bg =
-                utils::MakeBindGroup(device, bgl, {{0, buffer, 0, 4}, {1, buffer, 256, 4}});
-
-            // Create a no-op compute pipeline
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
-
-            // It is valid to use the buffer as both storage and readonly storage in a single
-            // compute pass if dispatch command is not called.
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-                pass.SetBindGroup(0, bg);
-                pass.End();
-                encoder.Finish();
-            }
-
-            // It is invalid to use the buffer as both storage and readonly storage in a single
-            // dispatch.
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-                pass.SetPipeline(cp);
-                pass.SetBindGroup(0, bg);
-                pass.DispatchWorkgroups(1);
-                pass.End();
-                ASSERT_DEVICE_ERROR(encoder.Finish());
-            }
-        }
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
+        pass.SetVertexBuffer(0, buffer);
+        pass.End();
+        encoder.Finish();
     }
 
-    // Test the use of a buffer as a storage buffer multiple times in the same synchronization
-    // scope.
-    TEST_F(ResourceUsageTrackingTest, BufferUsedAsStorageMultipleTimes) {
+    // Test compute pass
+    {
+        // Create buffer and bind group
+        wgpu::Buffer buffer =
+            CreateBuffer(4, wgpu::BufferUsage::Uniform | wgpu::BufferUsage::Storage);
+
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform},
+                     {1, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}, {1, buffer}});
+
+        // Use the buffer as both uniform and readonly storage buffer in compute pass.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetBindGroup(0, bg);
+        pass.End();
+        encoder.Finish();
+    }
+}
+
+// Test that it is invalid to use the same buffer as both readable and writable in the same
+// render pass. It is invalid in the same dispatch in compute pass.
+TEST_F(ResourceUsageTrackingTest, BufferWithReadAndWriteUsage) {
+    // test render pass
+    {
+        // Create buffer and bind group
+        wgpu::Buffer buffer =
+            CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
+
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}});
+
+        // It is invalid to use the buffer as both index and storage in render pass
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
+        pass.SetBindGroup(0, bg);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // test compute pass
+    {
         // Create buffer and bind group
         wgpu::Buffer buffer = CreateBuffer(512, wgpu::BufferUsage::Storage);
 
         wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Compute,
-                      wgpu::BufferBindingType::Storage},
-                     {1, wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Compute,
-                      wgpu::BufferBindingType::Storage}});
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
+                     {1, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
         wgpu::BindGroup bg =
             utils::MakeBindGroup(device, bgl, {{0, buffer, 0, 4}, {1, buffer, 256, 4}});
 
-        // test render pass
+        // Create a no-op compute pipeline
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
+
+        // It is valid to use the buffer as both storage and readonly storage in a single
+        // compute pass if dispatch command is not called.
         {
-            // It is valid to use multiple storage usages on the same buffer in render pass
             wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
             pass.SetBindGroup(0, bg);
             pass.End();
             encoder.Finish();
         }
 
-        // test compute pass
+        // It is invalid to use the buffer as both storage and readonly storage in a single
+        // dispatch.
         {
-            // It is valid to use multiple storage usages on the same buffer in a dispatch
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
-
             wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
             wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
             pass.SetPipeline(cp);
             pass.SetBindGroup(0, bg);
             pass.DispatchWorkgroups(1);
             pass.End();
-            encoder.Finish();
-        }
-    }
-
-    // Test that using the same buffer as both readable and writable in different passes is allowed
-    TEST_F(ResourceUsageTrackingTest, BufferWithReadAndWriteUsageInDifferentPasses) {
-        // Test render pass
-        {
-            // Create buffers that will be used as index and storage buffers
-            wgpu::Buffer buffer0 =
-                CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
-            wgpu::Buffer buffer1 =
-                CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
-
-            // Create bind groups to use the buffer as storage
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl, {{0, buffer0}});
-            wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, buffer1}});
-
-            // Use these two buffers as both index and storage in different render passes
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-
-            wgpu::RenderPassEncoder pass0 = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass0.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
-            pass0.SetBindGroup(0, bg1);
-            pass0.End();
-
-            wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass1.SetIndexBuffer(buffer1, wgpu::IndexFormat::Uint32);
-            pass1.SetBindGroup(0, bg0);
-            pass1.End();
-
-            encoder.Finish();
-        }
-
-        // Test compute pass
-        {
-            // Create buffer and bind groups that will be used as storage and uniform bindings
-            wgpu::Buffer buffer =
-                CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Uniform);
-
-            wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform}});
-            wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, buffer}});
-            wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, buffer}});
-
-            // Use the buffer as both storage and uniform in different compute passes
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-
-            wgpu::ComputePassEncoder pass0 = encoder.BeginComputePass();
-            pass0.SetBindGroup(0, bg0);
-            pass0.End();
-
-            wgpu::ComputePassEncoder pass1 = encoder.BeginComputePass();
-            pass1.SetBindGroup(1, bg1);
-            pass1.End();
-
-            encoder.Finish();
-        }
-
-        // Test render pass and compute pass mixed together with resource dependency.
-        {
-            // Create buffer and bind groups that will be used as storage and uniform bindings
-            wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Storage);
-
-            wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage}});
-            wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, buffer}});
-            wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, buffer}});
-
-            // Use the buffer as storage and uniform in render pass and compute pass respectively
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-
-            wgpu::ComputePassEncoder pass0 = encoder.BeginComputePass();
-            pass0.SetBindGroup(0, bg0);
-            pass0.End();
-
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass1.SetBindGroup(1, bg1);
-            pass1.End();
-
-            encoder.Finish();
-        }
-    }
-
-    // Test that it is invalid to use the same buffer as both readable and writable in different
-    // draws in a single render pass. But it is valid in different dispatches in a single compute
-    // pass.
-    TEST_F(ResourceUsageTrackingTest, BufferWithReadAndWriteUsageInDifferentDrawsOrDispatches) {
-        // Test render pass
-        {
-            // Create a buffer and a bind group
-            wgpu::Buffer buffer =
-                CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}});
-
-            // Create a no-op render pipeline.
-            wgpu::RenderPipeline rp = CreateNoOpRenderPipeline();
-
-            // It is not allowed to use the same buffer as both readable and writable in different
-            // draws within the same render pass.
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetPipeline(rp);
-
-            pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
-            pass.Draw(3);
-
-            pass.SetBindGroup(0, bg);
-            pass.Draw(3);
-
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        // test compute pass
-        {
-            // Create a buffer and bind groups
-            wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Storage);
-
-            wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
-            wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, buffer}});
-            wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, buffer}});
-
-            // Create a no-op compute pipeline.
-            wgpu::ComputePipeline cp0 = CreateNoOpComputePipeline({bgl0});
-            wgpu::ComputePipeline cp1 = CreateNoOpComputePipeline({bgl1});
-
-            // It is valid to use the same buffer as both readable and writable in different
-            // dispatches within the same compute pass.
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-
-            pass.SetPipeline(cp0);
-            pass.SetBindGroup(0, bg0);
-            pass.DispatchWorkgroups(1);
-
-            pass.SetPipeline(cp1);
-            pass.SetBindGroup(0, bg1);
-            pass.DispatchWorkgroups(1);
-
-            pass.End();
-            encoder.Finish();
-        }
-    }
-
-    // Test that it is invalid to use the same buffer as both readable and writable in a single
-    // draw or dispatch.
-    TEST_F(ResourceUsageTrackingTest, BufferWithReadAndWriteUsageInSingleDrawOrDispatch) {
-        // Test render pass
-        {
-            // Create a buffer and a bind group
-            wgpu::Buffer buffer =
-                CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
-            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, buffer}});
-
-            // Create a no-op render pipeline.
-            wgpu::RenderPipeline rp = CreateNoOpRenderPipeline();
-
-            // It is invalid to use the same buffer as both readable and writable usages in a single
-            // draw
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetPipeline(rp);
-
-            pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
-            pass.SetBindGroup(0, writeBG);
-            pass.Draw(3);
-
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        // test compute pass
-        {
-            // Create a buffer and bind groups
-            wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Storage);
-
-            wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
-            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, buffer}});
-            wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, buffer}});
-
-            // Create a no-op compute pipeline.
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({readBGL, writeBGL});
-
-            // It is invalid to use the same buffer as both readable and writable usages in a single
-            // dispatch
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetPipeline(cp);
-
-            pass.SetBindGroup(0, readBG);
-            pass.SetBindGroup(1, writeBG);
-            pass.DispatchWorkgroups(1);
-
-            pass.End();
             ASSERT_DEVICE_ERROR(encoder.Finish());
         }
     }
+}
 
-    // Test that using the same buffer as copy src/dst and writable/readable usage is allowed.
-    TEST_F(ResourceUsageTrackingTest, BufferCopyAndBufferUsageInPass) {
-        // Create buffers that will be used as both a copy src/dst buffer and a storage buffer
-        wgpu::Buffer bufferSrc =
-            CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc);
-        wgpu::Buffer bufferDst =
-            CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopyDst);
+// Test the use of a buffer as a storage buffer multiple times in the same synchronization
+// scope.
+TEST_F(ResourceUsageTrackingTest, BufferUsedAsStorageMultipleTimes) {
+    // Create buffer and bind group
+    wgpu::Buffer buffer = CreateBuffer(512, wgpu::BufferUsage::Storage);
 
-        // Create the bind group to use the buffer as storage
-        wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
-        wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, bufferSrc}});
-        wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
-        wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, bufferDst}});
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Compute,
+                  wgpu::BufferBindingType::Storage},
+                 {1, wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Compute,
+                  wgpu::BufferBindingType::Storage}});
+    wgpu::BindGroup bg =
+        utils::MakeBindGroup(device, bgl, {{0, buffer, 0, 4}, {1, buffer, 256, 4}});
 
-        // Use the buffer as both copy src and storage in render pass
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyBufferToBuffer(bufferSrc, 0, bufferDst, 0, 4);
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetBindGroup(0, bg0);
-            pass.End();
-            encoder.Finish();
-        }
-
-        // Use the buffer as both copy dst and readonly storage in compute pass
-        {
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl1});
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyBufferToBuffer(bufferSrc, 0, bufferDst, 0, 4);
-
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetBindGroup(0, bg1);
-            pass.SetPipeline(cp);
-            pass.DispatchWorkgroups(1);
-            pass.End();
-
-            encoder.Finish();
-        }
+    // test render pass
+    {
+        // It is valid to use multiple storage usages on the same buffer in render pass
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetBindGroup(0, bg);
+        pass.End();
+        encoder.Finish();
     }
 
-    // Test that all index buffers and vertex buffers take effect even though some buffers are
-    // not used because they are overwritten by another consecutive call.
-    TEST_F(ResourceUsageTrackingTest, BufferWithMultipleSetIndexOrVertexBuffer) {
-        // Create buffers that will be used as both vertex and index buffer.
-        wgpu::Buffer buffer0 = CreateBuffer(
-            4, wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Index | wgpu::BufferUsage::Storage);
+    // test compute pass
+    {
+        // It is valid to use multiple storage usages on the same buffer in a dispatch
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(cp);
+        pass.SetBindGroup(0, bg);
+        pass.DispatchWorkgroups(1);
+        pass.End();
+        encoder.Finish();
+    }
+}
+
+// Test that using the same buffer as both readable and writable in different passes is allowed
+TEST_F(ResourceUsageTrackingTest, BufferWithReadAndWriteUsageInDifferentPasses) {
+    // Test render pass
+    {
+        // Create buffers that will be used as index and storage buffers
+        wgpu::Buffer buffer0 =
+            CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
         wgpu::Buffer buffer1 =
-            CreateBuffer(4, wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Index);
+            CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
 
+        // Create bind groups to use the buffer as storage
         wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
             device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
-        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer0}});
+        wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl, {{0, buffer0}});
+        wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, buffer1}});
+
+        // Use these two buffers as both index and storage in different render passes
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+
+        wgpu::RenderPassEncoder pass0 = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass0.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
+        pass0.SetBindGroup(0, bg1);
+        pass0.End();
+
+        wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass1.SetIndexBuffer(buffer1, wgpu::IndexFormat::Uint32);
+        pass1.SetBindGroup(0, bg0);
+        pass1.End();
+
+        encoder.Finish();
+    }
+
+    // Test compute pass
+    {
+        // Create buffer and bind groups that will be used as storage and uniform bindings
+        wgpu::Buffer buffer =
+            CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Uniform);
+
+        wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Uniform}});
+        wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, buffer}});
+        wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, buffer}});
+
+        // Use the buffer as both storage and uniform in different compute passes
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+        wgpu::ComputePassEncoder pass0 = encoder.BeginComputePass();
+        pass0.SetBindGroup(0, bg0);
+        pass0.End();
+
+        wgpu::ComputePassEncoder pass1 = encoder.BeginComputePass();
+        pass1.SetBindGroup(1, bg1);
+        pass1.End();
+
+        encoder.Finish();
+    }
+
+    // Test render pass and compute pass mixed together with resource dependency.
+    {
+        // Create buffer and bind groups that will be used as storage and uniform bindings
+        wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Storage);
+
+        wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage}});
+        wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, buffer}});
+        wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, buffer}});
+
+        // Use the buffer as storage and uniform in render pass and compute pass respectively
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+        wgpu::ComputePassEncoder pass0 = encoder.BeginComputePass();
+        pass0.SetBindGroup(0, bg0);
+        pass0.End();
+
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass1.SetBindGroup(1, bg1);
+        pass1.End();
+
+        encoder.Finish();
+    }
+}
+
+// Test that it is invalid to use the same buffer as both readable and writable in different
+// draws in a single render pass. But it is valid in different dispatches in a single compute
+// pass.
+TEST_F(ResourceUsageTrackingTest, BufferWithReadAndWriteUsageInDifferentDrawsOrDispatches) {
+    // Test render pass
+    {
+        // Create a buffer and a bind group
+        wgpu::Buffer buffer =
+            CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}});
+
+        // Create a no-op render pipeline.
+        wgpu::RenderPipeline rp = CreateNoOpRenderPipeline();
+
+        // It is not allowed to use the same buffer as both readable and writable in different
+        // draws within the same render pass.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetPipeline(rp);
+
+        pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
+        pass.Draw(3);
+
+        pass.SetBindGroup(0, bg);
+        pass.Draw(3);
+
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // test compute pass
+    {
+        // Create a buffer and bind groups
+        wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Storage);
+
+        wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
+        wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, buffer}});
+        wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, buffer}});
+
+        // Create a no-op compute pipeline.
+        wgpu::ComputePipeline cp0 = CreateNoOpComputePipeline({bgl0});
+        wgpu::ComputePipeline cp1 = CreateNoOpComputePipeline({bgl1});
+
+        // It is valid to use the same buffer as both readable and writable in different
+        // dispatches within the same compute pass.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+
+        pass.SetPipeline(cp0);
+        pass.SetBindGroup(0, bg0);
+        pass.DispatchWorkgroups(1);
+
+        pass.SetPipeline(cp1);
+        pass.SetBindGroup(0, bg1);
+        pass.DispatchWorkgroups(1);
+
+        pass.End();
+        encoder.Finish();
+    }
+}
+
+// Test that it is invalid to use the same buffer as both readable and writable in a single
+// draw or dispatch.
+TEST_F(ResourceUsageTrackingTest, BufferWithReadAndWriteUsageInSingleDrawOrDispatch) {
+    // Test render pass
+    {
+        // Create a buffer and a bind group
+        wgpu::Buffer buffer =
+            CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
+        wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, buffer}});
+
+        // Create a no-op render pipeline.
+        wgpu::RenderPipeline rp = CreateNoOpRenderPipeline();
+
+        // It is invalid to use the same buffer as both readable and writable usages in a single
+        // draw
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetPipeline(rp);
+
+        pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
+        pass.SetBindGroup(0, writeBG);
+        pass.Draw(3);
+
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // test compute pass
+    {
+        // Create a buffer and bind groups
+        wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Storage);
+
+        wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
+        wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, buffer}});
+        wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, buffer}});
+
+        // Create a no-op compute pipeline.
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({readBGL, writeBGL});
+
+        // It is invalid to use the same buffer as both readable and writable usages in a single
+        // dispatch
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(cp);
+
+        pass.SetBindGroup(0, readBG);
+        pass.SetBindGroup(1, writeBG);
+        pass.DispatchWorkgroups(1);
+
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Test that using the same buffer as copy src/dst and writable/readable usage is allowed.
+TEST_F(ResourceUsageTrackingTest, BufferCopyAndBufferUsageInPass) {
+    // Create buffers that will be used as both a copy src/dst buffer and a storage buffer
+    wgpu::Buffer bufferSrc =
+        CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopySrc);
+    wgpu::Buffer bufferDst =
+        CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::CopyDst);
+
+    // Create the bind group to use the buffer as storage
+    wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
+    wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, bufferSrc}});
+    wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
+    wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, bufferDst}});
+
+    // Use the buffer as both copy src and storage in render pass
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(bufferSrc, 0, bufferDst, 0, 4);
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetBindGroup(0, bg0);
+        pass.End();
+        encoder.Finish();
+    }
+
+    // Use the buffer as both copy dst and readonly storage in compute pass
+    {
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl1});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToBuffer(bufferSrc, 0, bufferDst, 0, 4);
+
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetBindGroup(0, bg1);
+        pass.SetPipeline(cp);
+        pass.DispatchWorkgroups(1);
+        pass.End();
+
+        encoder.Finish();
+    }
+}
+
+// Test that all index buffers and vertex buffers take effect even though some buffers are
+// not used because they are overwritten by another consecutive call.
+TEST_F(ResourceUsageTrackingTest, BufferWithMultipleSetIndexOrVertexBuffer) {
+    // Create buffers that will be used as both vertex and index buffer.
+    wgpu::Buffer buffer0 = CreateBuffer(
+        4, wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Index | wgpu::BufferUsage::Storage);
+    wgpu::Buffer buffer1 = CreateBuffer(4, wgpu::BufferUsage::Vertex | wgpu::BufferUsage::Index);
+
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
+    wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer0}});
+
+    PlaceholderRenderPass PlaceholderRenderPass(device);
+
+    // Set index buffer twice. The second one overwrites the first one. No buffer is used as
+    // both read and write in the same pass. But the overwritten index buffer (buffer0) still
+    // take effect during resource tracking.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
+        pass.SetIndexBuffer(buffer1, wgpu::IndexFormat::Uint32);
+        pass.SetBindGroup(0, bg);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Set index buffer twice. The second one overwrites the first one. buffer0 is used as both
+    // read and write in the same pass
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetIndexBuffer(buffer1, wgpu::IndexFormat::Uint32);
+        pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
+        pass.SetBindGroup(0, bg);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Set vertex buffer on the same index twice. The second one overwrites the first one. No
+    // buffer is used as both read and write in the same pass. But the overwritten vertex buffer
+    // (buffer0) still take effect during resource tracking.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetVertexBuffer(0, buffer0);
+        pass.SetVertexBuffer(0, buffer1);
+        pass.SetBindGroup(0, bg);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Set vertex buffer on the same index twice. The second one overwrites the first one.
+    // buffer0 is used as both read and write in the same pass
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetVertexBuffer(0, buffer1);
+        pass.SetVertexBuffer(0, buffer0);
+        pass.SetBindGroup(0, bg);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Test that all consecutive SetBindGroup()s take effect even though some bind groups are not
+// used because they are overwritten by a consecutive call.
+TEST_F(ResourceUsageTrackingTest, BufferWithMultipleSetBindGroupsOnSameIndex) {
+    // test render pass
+    {
+        // Create buffers that will be used as index and storage buffers
+        wgpu::Buffer buffer0 =
+            CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
+        wgpu::Buffer buffer1 =
+            CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
+
+        // Create the bind group to use the buffer as storage
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl, {{0, buffer0}});
+        wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, buffer1}});
 
         PlaceholderRenderPass PlaceholderRenderPass(device);
 
-        // Set index buffer twice. The second one overwrites the first one. No buffer is used as
-        // both read and write in the same pass. But the overwritten index buffer (buffer0) still
-        // take effect during resource tracking.
+        // Set bind group on the same index twice. The second one overwrites the first one.
+        // No buffer is used as both read and write in the same pass. But the overwritten
+        // bind group still take effect during resource tracking.
         {
             wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
             wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
             pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
-            pass.SetIndexBuffer(buffer1, wgpu::IndexFormat::Uint32);
-            pass.SetBindGroup(0, bg);
+            pass.SetBindGroup(0, bg0);
+            pass.SetBindGroup(0, bg1);
             pass.End();
             ASSERT_DEVICE_ERROR(encoder.Finish());
         }
 
-        // Set index buffer twice. The second one overwrites the first one. buffer0 is used as both
-        // read and write in the same pass
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetIndexBuffer(buffer1, wgpu::IndexFormat::Uint32);
-            pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
-            pass.SetBindGroup(0, bg);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        // Set vertex buffer on the same index twice. The second one overwrites the first one. No
-        // buffer is used as both read and write in the same pass. But the overwritten vertex buffer
-        // (buffer0) still take effect during resource tracking.
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetVertexBuffer(0, buffer0);
-            pass.SetVertexBuffer(0, buffer1);
-            pass.SetBindGroup(0, bg);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        // Set vertex buffer on the same index twice. The second one overwrites the first one.
+        // Set bind group on the same index twice. The second one overwrites the first one.
         // buffer0 is used as both read and write in the same pass
         {
             wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
             wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetVertexBuffer(0, buffer1);
-            pass.SetVertexBuffer(0, buffer0);
-            pass.SetBindGroup(0, bg);
+            pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
+            pass.SetBindGroup(0, bg1);
+            pass.SetBindGroup(0, bg0);
             pass.End();
             ASSERT_DEVICE_ERROR(encoder.Finish());
         }
     }
 
-    // Test that all consecutive SetBindGroup()s take effect even though some bind groups are not
-    // used because they are overwritten by a consecutive call.
-    TEST_F(ResourceUsageTrackingTest, BufferWithMultipleSetBindGroupsOnSameIndex) {
-        // test render pass
+    // test compute pass
+    {
+        // Create buffers that will be used as readonly and writable storage buffers
+        wgpu::Buffer buffer0 = CreateBuffer(512, wgpu::BufferUsage::Storage);
+        wgpu::Buffer buffer1 = CreateBuffer(4, wgpu::BufferUsage::Storage);
+
+        // Create the bind group to use the buffer as storage
+        wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
+        wgpu::BindGroup writeBG0 = utils::MakeBindGroup(device, writeBGL, {{0, buffer0, 0, 4}});
+        wgpu::BindGroup readBG0 = utils::MakeBindGroup(device, readBGL, {{0, buffer0, 256, 4}});
+        wgpu::BindGroup readBG1 = utils::MakeBindGroup(device, readBGL, {{0, buffer1, 0, 4}});
+
+        // Create a no-op compute pipeline.
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({writeBGL, readBGL});
+
+        // Set bind group against the same index twice. The second one overwrites the first one.
+        // Then no buffer is used as both read and write in the same dispatch. But the
+        // overwritten bind group still take effect.
         {
-            // Create buffers that will be used as index and storage buffers
-            wgpu::Buffer buffer0 =
-                CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
-            wgpu::Buffer buffer1 =
-                CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
-
-            // Create the bind group to use the buffer as storage
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl, {{0, buffer0}});
-            wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, buffer1}});
-
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-
-            // Set bind group on the same index twice. The second one overwrites the first one.
-            // No buffer is used as both read and write in the same pass. But the overwritten
-            // bind group still take effect during resource tracking.
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-                pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
-                pass.SetBindGroup(0, bg0);
-                pass.SetBindGroup(0, bg1);
-                pass.End();
-                ASSERT_DEVICE_ERROR(encoder.Finish());
-            }
-
-            // Set bind group on the same index twice. The second one overwrites the first one.
-            // buffer0 is used as both read and write in the same pass
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-                pass.SetIndexBuffer(buffer0, wgpu::IndexFormat::Uint32);
-                pass.SetBindGroup(0, bg1);
-                pass.SetBindGroup(0, bg0);
-                pass.End();
-                ASSERT_DEVICE_ERROR(encoder.Finish());
-            }
-        }
-
-        // test compute pass
-        {
-            // Create buffers that will be used as readonly and writable storage buffers
-            wgpu::Buffer buffer0 = CreateBuffer(512, wgpu::BufferUsage::Storage);
-            wgpu::Buffer buffer1 = CreateBuffer(4, wgpu::BufferUsage::Storage);
-
-            // Create the bind group to use the buffer as storage
-            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
-            wgpu::BindGroup writeBG0 = utils::MakeBindGroup(device, writeBGL, {{0, buffer0, 0, 4}});
-            wgpu::BindGroup readBG0 = utils::MakeBindGroup(device, readBGL, {{0, buffer0, 256, 4}});
-            wgpu::BindGroup readBG1 = utils::MakeBindGroup(device, readBGL, {{0, buffer1, 0, 4}});
-
-            // Create a no-op compute pipeline.
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({writeBGL, readBGL});
-
-            // Set bind group against the same index twice. The second one overwrites the first one.
-            // Then no buffer is used as both read and write in the same dispatch. But the
-            // overwritten bind group still take effect.
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-                pass.SetBindGroup(0, writeBG0);
-                pass.SetBindGroup(1, readBG0);
-                pass.SetBindGroup(1, readBG1);
-                pass.SetPipeline(cp);
-                pass.DispatchWorkgroups(1);
-                pass.End();
-                encoder.Finish();
-            }
-
-            // Set bind group against the same index twice. The second one overwrites the first one.
-            // Then buffer0 is used as both read and write in the same dispatch
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-                pass.SetBindGroup(0, writeBG0);
-                pass.SetBindGroup(1, readBG1);
-                pass.SetBindGroup(1, readBG0);
-                pass.SetPipeline(cp);
-                pass.DispatchWorkgroups(1);
-                pass.End();
-                ASSERT_DEVICE_ERROR(encoder.Finish());
-            }
-        }
-    }
-
-    // Test that it is invalid to have resource usage conflicts even when all bindings are not
-    // visible to the programmable pass where it is used.
-    TEST_F(ResourceUsageTrackingTest, BufferUsageConflictBetweenInvisibleStagesInBindGroup) {
-        wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Storage);
-
-        // Test render pass for bind group. The conflict of readonly storage and storage usage
-        // doesn't reside in render related stages at all
-        {
-            // Create a bind group whose bindings are not visible in render pass
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
-                         {1, wgpu::ShaderStage::None, wgpu::BufferBindingType::ReadOnlyStorage}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}, {1, buffer}});
-
-            // These two bindings are invisible in render pass. But we still track these bindings.
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetBindGroup(0, bg);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        // Test compute pass for bind group. The conflict of readonly storage and storage usage
-        // doesn't reside in compute related stage at all
-        {
-            // Create a bind group whose bindings are not visible in compute pass
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage},
-                         {1, wgpu::ShaderStage::None, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}, {1, buffer}});
-
-            // Create a no-op compute pipeline.
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
-
-            // These two bindings are invisible in the dispatch. But we still track these bindings.
             wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
             wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetBindGroup(0, writeBG0);
+            pass.SetBindGroup(1, readBG0);
+            pass.SetBindGroup(1, readBG1);
             pass.SetPipeline(cp);
-            pass.SetBindGroup(0, bg);
+            pass.DispatchWorkgroups(1);
+            pass.End();
+            encoder.Finish();
+        }
+
+        // Set bind group against the same index twice. The second one overwrites the first one.
+        // Then buffer0 is used as both read and write in the same dispatch
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetBindGroup(0, writeBG0);
+            pass.SetBindGroup(1, readBG1);
+            pass.SetBindGroup(1, readBG0);
+            pass.SetPipeline(cp);
             pass.DispatchWorkgroups(1);
             pass.End();
             ASSERT_DEVICE_ERROR(encoder.Finish());
         }
     }
+}
 
-    // Test that it is invalid to have resource usage conflicts even when one of the bindings is not
-    // visible to the programmable pass where it is used.
-    TEST_F(ResourceUsageTrackingTest, BufferUsageConflictWithInvisibleStageInBindGroup) {
-        // Test render pass for bind group and index buffer. The conflict of storage and index
-        // buffer usage resides between fragment stage and compute stage. But the compute stage
-        // binding is not visible in render pass.
-        {
-            wgpu::Buffer buffer =
-                CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}});
+// Test that it is invalid to have resource usage conflicts even when all bindings are not
+// visible to the programmable pass where it is used.
+TEST_F(ResourceUsageTrackingTest, BufferUsageConflictBetweenInvisibleStagesInBindGroup) {
+    wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Storage);
 
-            // Buffer usage in compute stage in bind group conflicts with index buffer. And binding
-            // for compute stage is not visible in render pass. But we still track this binding.
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
-            pass.SetBindGroup(0, bg);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
+    // Test render pass for bind group. The conflict of readonly storage and storage usage
+    // doesn't reside in render related stages at all
+    {
+        // Create a bind group whose bindings are not visible in render pass
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage},
+                     {1, wgpu::ShaderStage::None, wgpu::BufferBindingType::ReadOnlyStorage}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}, {1, buffer}});
 
-        // Test compute pass for bind group. The conflict of readonly storage and storage buffer
-        // usage resides between compute stage and fragment stage. But the fragment stage binding is
-        // not visible in the dispatch.
-        {
-            wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Storage);
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage},
-                         {1, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}, {1, buffer}});
-
-            // Create a no-op compute pipeline.
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
-
-            // Buffer usage in compute stage conflicts with buffer usage in fragment stage. And
-            // binding for fragment stage is not visible in the dispatch. But we still track this
-            // invisible binding.
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetPipeline(cp);
-            pass.SetBindGroup(0, bg);
-            pass.DispatchWorkgroups(1);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
+        // These two bindings are invisible in render pass. But we still track these bindings.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetBindGroup(0, bg);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
     }
 
-    // Test that it is invalid to have resource usage conflicts even when one of the bindings is not
-    // used in the pipeline.
-    TEST_F(ResourceUsageTrackingTest, BufferUsageConflictWithUnusedPipelineBindings) {
+    // Test compute pass for bind group. The conflict of readonly storage and storage usage
+    // doesn't reside in compute related stage at all
+    {
+        // Create a bind group whose bindings are not visible in compute pass
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage},
+                     {1, wgpu::ShaderStage::None, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}, {1, buffer}});
+
+        // Create a no-op compute pipeline.
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
+
+        // These two bindings are invisible in the dispatch. But we still track these bindings.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(cp);
+        pass.SetBindGroup(0, bg);
+        pass.DispatchWorkgroups(1);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Test that it is invalid to have resource usage conflicts even when one of the bindings is not
+// visible to the programmable pass where it is used.
+TEST_F(ResourceUsageTrackingTest, BufferUsageConflictWithInvisibleStageInBindGroup) {
+    // Test render pass for bind group and index buffer. The conflict of storage and index
+    // buffer usage resides between fragment stage and compute stage. But the compute stage
+    // binding is not visible in render pass.
+    {
+        wgpu::Buffer buffer =
+            CreateBuffer(4, wgpu::BufferUsage::Storage | wgpu::BufferUsage::Index);
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}});
+
+        // Buffer usage in compute stage in bind group conflicts with index buffer. And binding
+        // for compute stage is not visible in render pass. But we still track this binding.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetIndexBuffer(buffer, wgpu::IndexFormat::Uint32);
+        pass.SetBindGroup(0, bg);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Test compute pass for bind group. The conflict of readonly storage and storage buffer
+    // usage resides between compute stage and fragment stage. But the fragment stage binding is
+    // not visible in the dispatch.
+    {
         wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Storage);
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage},
+                     {1, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, buffer}, {1, buffer}});
 
-        // Test render pass for bind groups with unused bindings. The conflict of readonly storage
-        // and storage usages resides in different bind groups, although some bindings may not be
-        // used because its bind group layout is not designated in pipeline layout.
-        {
-            // Create bind groups. The bindings are visible for render pass.
-            wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage}});
-            wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, buffer}});
-            wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, buffer}});
+        // Create a no-op compute pipeline.
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
 
-            // Create a passthrough render pipeline with a readonly buffer
-            wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+        // Buffer usage in compute stage conflicts with buffer usage in fragment stage. And
+        // binding for fragment stage is not visible in the dispatch. But we still track this
+        // invisible binding.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(cp);
+        pass.SetBindGroup(0, bg);
+        pass.DispatchWorkgroups(1);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Test that it is invalid to have resource usage conflicts even when one of the bindings is not
+// used in the pipeline.
+TEST_F(ResourceUsageTrackingTest, BufferUsageConflictWithUnusedPipelineBindings) {
+    wgpu::Buffer buffer = CreateBuffer(4, wgpu::BufferUsage::Storage);
+
+    // Test render pass for bind groups with unused bindings. The conflict of readonly storage
+    // and storage usages resides in different bind groups, although some bindings may not be
+    // used because its bind group layout is not designated in pipeline layout.
+    {
+        // Create bind groups. The bindings are visible for render pass.
+        wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::ReadOnlyStorage}});
+        wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, buffer}});
+        wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, buffer}});
+
+        // Create a passthrough render pipeline with a readonly buffer
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
                 @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
                     return vec4<f32>();
                 })");
 
-            wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
                 struct RBuffer {
                     value : f32
                 }
                 @group(0) @binding(0) var<storage, read> rBuffer : RBuffer;
                 @stage(fragment) fn main() {
                 })");
-            utils::ComboRenderPipelineDescriptor pipelineDescriptor;
-            pipelineDescriptor.vertex.module = vsModule;
-            pipelineDescriptor.cFragment.module = fsModule;
-            pipelineDescriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
-            pipelineDescriptor.layout = utils::MakeBasicPipelineLayout(device, &bgl0);
-            wgpu::RenderPipeline rp = device.CreateRenderPipeline(&pipelineDescriptor);
+        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+        pipelineDescriptor.vertex.module = vsModule;
+        pipelineDescriptor.cFragment.module = fsModule;
+        pipelineDescriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+        pipelineDescriptor.layout = utils::MakeBasicPipelineLayout(device, &bgl0);
+        wgpu::RenderPipeline rp = device.CreateRenderPipeline(&pipelineDescriptor);
 
-            // Resource in bg1 conflicts with resources used in bg0. However, bindings in bg1 is
-            // not used in pipeline. But we still track this binding.
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetBindGroup(0, bg0);
-            pass.SetBindGroup(1, bg1);
-            pass.SetPipeline(rp);
-            pass.Draw(3);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        // Test that an unused bind group is not used to detect conflicts between bindings in
-        // compute passes.
-        {
-            // Create bind groups. The bindings are visible for compute pass.
-            wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
-            wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
-            wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, buffer}});
-            wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, buffer}});
-
-            // Create a compute pipeline with only one of the two BGLs.
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl0});
-
-            // Resource in bg1 conflicts with resources used in bg0. However, the binding in bg1 is
-            // not used in pipeline so no error is produced in the dispatch.
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetBindGroup(0, bg0);
-            pass.SetBindGroup(1, bg1);
-            pass.SetPipeline(cp);
-            pass.DispatchWorkgroups(1);
-            pass.End();
-            encoder.Finish();
-        }
+        // Resource in bg1 conflicts with resources used in bg0. However, bindings in bg1 is
+        // not used in pipeline. But we still track this binding.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetBindGroup(0, bg0);
+        pass.SetBindGroup(1, bg1);
+        pass.SetPipeline(rp);
+        pass.Draw(3);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
     }
 
-    // Test that it is invalid to use the same texture as both readable and writable in the same
-    // render pass. It is invalid in the same dispatch in compute pass.
-    TEST_F(ResourceUsageTrackingTest, TextureWithReadAndWriteUsage) {
-        // Test render pass
-        {
-            // Create a texture
-            wgpu::Texture texture = CreateTexture(wgpu::TextureUsage::TextureBinding |
-                                                  wgpu::TextureUsage::RenderAttachment);
-            wgpu::TextureView view = texture.CreateView();
+    // Test that an unused bind group is not used to detect conflicts between bindings in
+    // compute passes.
+    {
+        // Create bind groups. The bindings are visible for compute pass.
+        wgpu::BindGroupLayout bgl0 = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
+        wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+        wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl0, {{0, buffer}});
+        wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl1, {{0, buffer}});
 
-            // Create a bind group to use the texture as sampled binding
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Vertex, wgpu::TextureSampleType::Float}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}});
+        // Create a compute pipeline with only one of the two BGLs.
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl0});
 
-            // Create a render pass to use the texture as a render target
-            utils::ComboRenderPassDescriptor renderPass({view});
-
-            // It is invalid to use the texture as both sampled and render target in the same pass
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-            pass.SetBindGroup(0, bg);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        // Test compute pass
-        {
-            // Create a texture
-            wgpu::Texture texture = CreateTexture(wgpu::TextureUsage::TextureBinding |
-                                                  wgpu::TextureUsage::StorageBinding);
-            wgpu::TextureView view = texture.CreateView();
-
-            // Create a bind group to use the texture as sampled and writeonly bindings
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float},
-                 {1, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}, {1, view}});
-
-            // Create a no-op compute pipeline
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
-
-            // It is valid to use the texture as both sampled and writeonly storage in a single
-            // compute pass if dispatch command is not called.
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-                pass.SetBindGroup(0, bg);
-                pass.End();
-                encoder.Finish();
-            }
-
-            // It is invalid to use the texture as both sampled and writeonly storage in a single
-            // dispatch
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-                pass.SetPipeline(cp);
-                pass.SetBindGroup(0, bg);
-                pass.DispatchWorkgroups(1);
-                pass.End();
-                ASSERT_DEVICE_ERROR(encoder.Finish());
-            }
-        }
+        // Resource in bg1 conflicts with resources used in bg0. However, the binding in bg1 is
+        // not used in pipeline so no error is produced in the dispatch.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetBindGroup(0, bg0);
+        pass.SetBindGroup(1, bg1);
+        pass.SetPipeline(cp);
+        pass.DispatchWorkgroups(1);
+        pass.End();
+        encoder.Finish();
     }
+}
 
-    // Test that it is invalid to use the same texture as both readable and writable depth/stencil
-    // attachment in the same render pass. But it is valid to use it as both readable and readonly
-    // depth/stencil attachment in the same render pass.
-    // Note that depth/stencil attachment is a special render attachment, it can be readonly.
-    TEST_F(ResourceUsageTrackingTest, TextureWithSamplingAndDepthStencilAttachment) {
+// Test that it is invalid to use the same texture as both readable and writable in the same
+// render pass. It is invalid in the same dispatch in compute pass.
+TEST_F(ResourceUsageTrackingTest, TextureWithReadAndWriteUsage) {
+    // Test render pass
+    {
         // Create a texture
-        wgpu::Texture texture =
-            CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment,
-                          wgpu::TextureFormat::Depth32Float);
+        wgpu::Texture texture = CreateTexture(wgpu::TextureUsage::TextureBinding |
+                                              wgpu::TextureUsage::RenderAttachment);
         wgpu::TextureView view = texture.CreateView();
 
         // Create a bind group to use the texture as sampled binding
         wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Depth}});
+            device, {{0, wgpu::ShaderStage::Vertex, wgpu::TextureSampleType::Float}});
         wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}});
 
         // Create a render pass to use the texture as a render target
-        utils::ComboRenderPassDescriptor passDescriptor({}, view);
-        passDescriptor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
-        passDescriptor.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
-        passDescriptor.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
-        passDescriptor.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+        utils::ComboRenderPassDescriptor renderPass({view});
 
-        // It is invalid to use the texture as both sampled and writeable depth/stencil attachment
-        // in the same pass
+        // It is invalid to use the texture as both sampled and render target in the same pass
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetBindGroup(0, bg);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Test compute pass
+    {
+        // Create a texture
+        wgpu::Texture texture =
+            CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding);
+        wgpu::TextureView view = texture.CreateView();
+
+        // Create a bind group to use the texture as sampled and writeonly bindings
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device,
+            {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float},
+             {1, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}, {1, view}});
+
+        // Create a no-op compute pipeline
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
+
+        // It is valid to use the texture as both sampled and writeonly storage in a single
+        // compute pass if dispatch command is not called.
         {
             wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&passDescriptor);
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
             pass.SetBindGroup(0, bg);
             pass.End();
+            encoder.Finish();
+        }
+
+        // It is invalid to use the texture as both sampled and writeonly storage in a single
+        // dispatch
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetPipeline(cp);
+            pass.SetBindGroup(0, bg);
+            pass.DispatchWorkgroups(1);
+            pass.End();
             ASSERT_DEVICE_ERROR(encoder.Finish());
         }
+    }
+}
 
-        // It is valid to use the texture as both sampled and readonly depth/stencil attachment in
+// Test that it is invalid to use the same texture as both readable and writable depth/stencil
+// attachment in the same render pass. But it is valid to use it as both readable and readonly
+// depth/stencil attachment in the same render pass.
+// Note that depth/stencil attachment is a special render attachment, it can be readonly.
+TEST_F(ResourceUsageTrackingTest, TextureWithSamplingAndDepthStencilAttachment) {
+    // Create a texture
+    wgpu::Texture texture =
+        CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment,
+                      wgpu::TextureFormat::Depth32Float);
+    wgpu::TextureView view = texture.CreateView();
+
+    // Create a bind group to use the texture as sampled binding
+    wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Depth}});
+    wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}});
+
+    // Create a render pass to use the texture as a render target
+    utils::ComboRenderPassDescriptor passDescriptor({}, view);
+    passDescriptor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Load;
+    passDescriptor.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
+    passDescriptor.cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+    passDescriptor.cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+
+    // It is invalid to use the texture as both sampled and writeable depth/stencil attachment
+    // in the same pass
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&passDescriptor);
+        pass.SetBindGroup(0, bg);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // It is valid to use the texture as both sampled and readonly depth/stencil attachment in
+    // the same pass
+    {
+        passDescriptor.cDepthStencilAttachmentInfo.depthReadOnly = true;
+        passDescriptor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+        passDescriptor.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&passDescriptor);
+        pass.SetBindGroup(0, bg);
+        pass.End();
+        encoder.Finish();
+    }
+}
+
+// Test using multiple writable usages on the same texture in a single pass/dispatch
+TEST_F(ResourceUsageTrackingTest, TextureWithMultipleWriteUsage) {
+    // Test render pass
+    {
+        // Create a texture
+        wgpu::Texture texture = CreateTexture(wgpu::TextureUsage::StorageBinding |
+                                              wgpu::TextureUsage::RenderAttachment);
+        wgpu::TextureView view = texture.CreateView();
+
+        // Create a bind group to use the texture as writeonly storage binding
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device,
+            {{0, wgpu::ShaderStage::Fragment, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}});
+
+        // It is invalid to use the texture as both writeonly storage and render target in
         // the same pass
         {
-            passDescriptor.cDepthStencilAttachmentInfo.depthReadOnly = true;
-            passDescriptor.cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
-            passDescriptor.cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&passDescriptor);
-            pass.SetBindGroup(0, bg);
-            pass.End();
-            encoder.Finish();
-        }
-    }
-
-    // Test using multiple writable usages on the same texture in a single pass/dispatch
-    TEST_F(ResourceUsageTrackingTest, TextureWithMultipleWriteUsage) {
-        // Test render pass
-        {
-            // Create a texture
-            wgpu::Texture texture = CreateTexture(wgpu::TextureUsage::StorageBinding |
-                                                  wgpu::TextureUsage::RenderAttachment);
-            wgpu::TextureView view = texture.CreateView();
-
-            // Create a bind group to use the texture as writeonly storage binding
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Fragment, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}});
-
-            // It is invalid to use the texture as both writeonly storage and render target in
-            // the same pass
-            {
-                utils::ComboRenderPassDescriptor renderPass({view});
-
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-                pass.SetBindGroup(0, bg);
-                pass.End();
-                ASSERT_DEVICE_ERROR(encoder.Finish());
-            }
-
-            // It is valid to use multiple writeonly storage usages on the same texture in render
-            // pass
-            {
-                wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, view}});
-
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                PlaceholderRenderPass PlaceholderRenderPass(device);
-                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-                pass.SetBindGroup(0, bg);
-                pass.SetBindGroup(1, bg1);
-                pass.End();
-                encoder.Finish();
-            }
-        }
-
-        // Test compute pass
-        {
-            // Create a texture
-            wgpu::Texture texture = CreateTexture(wgpu::TextureUsage::StorageBinding);
-            wgpu::TextureView view = texture.CreateView();
-
-            // Create a bind group to use the texture as sampled and writeonly bindings
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat},
-                 {1, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}, {1, view}});
-
-            // Create a no-op compute pipeline
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
-
-            // It is valid to use the texture as multiple writeonly storage usages in a single
-            // dispatch
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetPipeline(cp);
-            pass.SetBindGroup(0, bg);
-            pass.DispatchWorkgroups(1);
-            pass.End();
-            encoder.Finish();
-        }
-    }
-
-    // Test that a single subresource of a texture cannot be used as a render attachment more than
-    // once in the same pass.
-    TEST_F(ResourceUsageTrackingTest, TextureWithMultipleRenderAttachmentUsage) {
-        // Create a texture with two array layers
-        wgpu::TextureDescriptor descriptor;
-        descriptor.dimension = wgpu::TextureDimension::e2D;
-        descriptor.size = {1, 1, 2};
-        descriptor.usage = wgpu::TextureUsage::RenderAttachment;
-        descriptor.format = kFormat;
-
-        wgpu::Texture texture = device.CreateTexture(&descriptor);
-
-        wgpu::TextureViewDescriptor viewDesc = {};
-        viewDesc.arrayLayerCount = 1;
-
-        wgpu::TextureView viewLayer0 = texture.CreateView(&viewDesc);
-
-        viewDesc.baseArrayLayer = 1;
-        wgpu::TextureView viewLayer1 = texture.CreateView(&viewDesc);
-
-        // Control: It is valid to use layer0 as a render target for one attachment, and
-        // layer1 as the second attachment in the same pass
-        {
-            utils::ComboRenderPassDescriptor renderPass({viewLayer0, viewLayer1});
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-            pass.End();
-            encoder.Finish();
-        }
-
-        // Control: It is valid to use layer0 as a render target in separate passes.
-        {
-            utils::ComboRenderPassDescriptor renderPass({viewLayer0});
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass0 = encoder.BeginRenderPass(&renderPass);
-            pass0.End();
-            wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&renderPass);
-            pass1.End();
-            encoder.Finish();
-        }
-
-        // It is invalid to use layer0 as a render target for both attachments in the same pass
-        {
-            utils::ComboRenderPassDescriptor renderPass({viewLayer0, viewLayer0});
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        // It is invalid to use layer1 as a render target for both attachments in the same pass
-        {
-            utils::ComboRenderPassDescriptor renderPass({viewLayer1, viewLayer1});
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-    }
-
-    // Test that using the same texture as both readable and writable in different passes is
-    // allowed
-    TEST_F(ResourceUsageTrackingTest, TextureWithReadAndWriteUsageInDifferentPasses) {
-        // Test render pass
-        {
-            // Create textures that will be used as both a sampled texture and a render target
-            wgpu::Texture t0 = CreateTexture(wgpu::TextureUsage::TextureBinding |
-                                             wgpu::TextureUsage::RenderAttachment);
-            wgpu::TextureView v0 = t0.CreateView();
-            wgpu::Texture t1 = CreateTexture(wgpu::TextureUsage::TextureBinding |
-                                             wgpu::TextureUsage::RenderAttachment);
-            wgpu::TextureView v1 = t1.CreateView();
-
-            // Create bind groups to use the texture as sampled
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Vertex, wgpu::TextureSampleType::Float}});
-            wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl, {{0, v0}});
-            wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, v1}});
-
-            // Create render passes that will use the textures as render attachments
-            utils::ComboRenderPassDescriptor renderPass0({v1});
-            utils::ComboRenderPassDescriptor renderPass1({v0});
-
-            // Use the textures as both sampled and render attachments in different passes
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-
-            wgpu::RenderPassEncoder pass0 = encoder.BeginRenderPass(&renderPass0);
-            pass0.SetBindGroup(0, bg0);
-            pass0.End();
-
-            wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&renderPass1);
-            pass1.SetBindGroup(0, bg1);
-            pass1.End();
-
-            encoder.Finish();
-        }
-
-        // Test compute pass
-        {
-            // Create a texture that will be used storage texture
-            wgpu::Texture texture = CreateTexture(wgpu::TextureUsage::TextureBinding |
-                                                  wgpu::TextureUsage::StorageBinding);
-            wgpu::TextureView view = texture.CreateView();
-
-            // Create bind groups to use the texture as sampled and writeonly bindings
-            wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
-            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
-            wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, view}});
-            wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
-
-            // Use the textures as both sampled and writeonly storages in different passes
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-
-            wgpu::ComputePassEncoder pass0 = encoder.BeginComputePass();
-            pass0.SetBindGroup(0, readBG);
-            pass0.End();
-
-            wgpu::ComputePassEncoder pass1 = encoder.BeginComputePass();
-            pass1.SetBindGroup(0, writeBG);
-            pass1.End();
-
-            encoder.Finish();
-        }
-
-        // Test compute pass and render pass mixed together with resource dependency
-        {
-            // Create a texture that will be used a storage texture
-            wgpu::Texture texture = CreateTexture(wgpu::TextureUsage::TextureBinding |
-                                                  wgpu::TextureUsage::StorageBinding);
-            wgpu::TextureView view = texture.CreateView();
-
-            // Create bind groups to use the texture as sampled and writeonly bindings
-            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
-            wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
-            wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
-            wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, view}});
-
-            // Use the texture as writeonly and sampled storage in compute pass and render
-            // pass respectively
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-
-            wgpu::ComputePassEncoder pass0 = encoder.BeginComputePass();
-            pass0.SetBindGroup(0, writeBG);
-            pass0.End();
-
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass1.SetBindGroup(0, readBG);
-            pass1.End();
-
-            encoder.Finish();
-        }
-    }
-
-    // Test that it is invalid to use the same texture as both readable and writable in different
-    // draws in a single render pass. But it is valid in different dispatches in a single compute
-    // pass.
-    TEST_F(ResourceUsageTrackingTest, TextureWithReadAndWriteUsageOnDifferentDrawsOrDispatches) {
-        // Create a texture that will be used both as a sampled texture and a storage texture
-        wgpu::Texture texture =
-            CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding);
-        wgpu::TextureView view = texture.CreateView();
-
-        // Test render pass
-        {
-            // Create bind groups to use the texture as sampled and writeonly storage bindings
-            wgpu::BindGroupLayout sampledBGL = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
-            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Fragment, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
-            wgpu::BindGroup sampledBG = utils::MakeBindGroup(device, sampledBGL, {{0, view}});
-            wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
-
-            // Create a no-op render pipeline.
-            wgpu::RenderPipeline rp = CreateNoOpRenderPipeline();
-
-            // It is not allowed to use the same texture as both readable and writable in different
-            // draws within the same render pass.
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetPipeline(rp);
-
-            pass.SetBindGroup(0, sampledBG);
-            pass.Draw(3);
-
-            pass.SetBindGroup(0, writeBG);
-            pass.Draw(3);
-
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        // Test compute pass
-        {
-            // Create bind groups to use the texture as sampled and writeonly storage bindings
-            wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
-            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
-            wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, view}});
-            wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
-
-            // Create a no-op compute pipeline.
-            wgpu::ComputePipeline readCp = CreateNoOpComputePipeline({readBGL});
-            wgpu::ComputePipeline writeCp = CreateNoOpComputePipeline({writeBGL});
-
-            // It is valid to use the same texture as both readable and writable in different
-            // dispatches within the same compute pass.
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-
-            pass.SetPipeline(readCp);
-            pass.SetBindGroup(0, readBG);
-            pass.DispatchWorkgroups(1);
-
-            pass.SetPipeline(writeCp);
-            pass.SetBindGroup(0, writeBG);
-            pass.DispatchWorkgroups(1);
-
-            pass.End();
-            encoder.Finish();
-        }
-    }
-
-    // Test that it is invalid to use the same texture as both readable and writable in a single
-    // draw or dispatch.
-    TEST_F(ResourceUsageTrackingTest, TextureWithReadAndWriteUsageInSingleDrawOrDispatch) {
-        // Create a texture that will be used both as a sampled texture and a storage texture
-        wgpu::Texture texture =
-            CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding);
-        wgpu::TextureView view = texture.CreateView();
-
-        // Test render pass
-        {
-            // Create the bind group to use the texture as sampled and writeonly storage bindings
-            wgpu::BindGroupLayout sampledBGL = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
-            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Fragment, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
-            wgpu::BindGroup sampledBG = utils::MakeBindGroup(device, sampledBGL, {{0, view}});
-            wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
-
-            // Create a no-op render pipeline.
-            wgpu::RenderPipeline rp = CreateNoOpRenderPipeline();
-
-            // It is invalid to use the same texture as both readable and writable usages in a
-            // single draw
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetPipeline(rp);
-
-            pass.SetBindGroup(0, sampledBG);
-            pass.SetBindGroup(1, writeBG);
-            pass.Draw(3);
-
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        // Test compute pass
-        {
-            // Create the bind group to use the texture as sampled and writeonly storage bindings
-            wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
-            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
-            wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, view}});
-            wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
-
-            // Create a no-op compute pipeline.
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({readBGL, writeBGL});
-
-            // It is invalid to use the same texture as both readable and writable usages in a
-            // single dispatch
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetPipeline(cp);
-
-            pass.SetBindGroup(0, readBG);
-            pass.SetBindGroup(1, writeBG);
-            pass.DispatchWorkgroups(1);
-
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-    }
-
-    // Test that using a single texture as copy src/dst and writable/readable usage in pass is
-    // allowed.
-    TEST_F(ResourceUsageTrackingTest, TextureCopyAndTextureUsageInPass) {
-        // Create textures that will be used as both a sampled texture and a render target
-        wgpu::Texture texture0 = CreateTexture(wgpu::TextureUsage::CopySrc);
-        wgpu::Texture texture1 =
-            CreateTexture(wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::TextureBinding |
-                          wgpu::TextureUsage::RenderAttachment);
-        wgpu::TextureView view0 = texture0.CreateView();
-        wgpu::TextureView view1 = texture1.CreateView();
-
-        wgpu::ImageCopyTexture srcView = utils::CreateImageCopyTexture(texture0, 0, {0, 0, 0});
-        wgpu::ImageCopyTexture dstView = utils::CreateImageCopyTexture(texture1, 0, {0, 0, 0});
-        wgpu::Extent3D copySize = {1, 1, 1};
-
-        // Use the texture as both copy dst and render attachment in render pass
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyTextureToTexture(&srcView, &dstView, &copySize);
-            utils::ComboRenderPassDescriptor renderPass({view1});
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-            pass.End();
-            encoder.Finish();
-        }
-
-        // Use the texture as both copy dst and readable usage in compute pass
-        {
-            // Create the bind group to use the texture as sampled
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view1}});
-
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyTextureToTexture(&srcView, &dstView, &copySize);
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetBindGroup(0, bg);
-            pass.SetPipeline(cp);
-            pass.DispatchWorkgroups(1);
-            pass.End();
-            encoder.Finish();
-        }
-    }
-
-    // Test that all consecutive SetBindGroup()s take effect even though some bind groups are not
-    // used because they are overwritten by a consecutive call.
-    TEST_F(ResourceUsageTrackingTest, TextureWithMultipleSetBindGroupsOnSameIndex) {
-        // Test render pass
-        {
-            // Create textures that will be used as both a sampled texture and a render target
-            wgpu::Texture texture0 = CreateTexture(wgpu::TextureUsage::TextureBinding |
-                                                   wgpu::TextureUsage::RenderAttachment);
-            wgpu::TextureView view0 = texture0.CreateView();
-            wgpu::Texture texture1 = CreateTexture(wgpu::TextureUsage::TextureBinding |
-                                                   wgpu::TextureUsage::RenderAttachment);
-            wgpu::TextureView view1 = texture1.CreateView();
-
-            // Create the bind group to use the texture as sampled
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Vertex, wgpu::TextureSampleType::Float}});
-            wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl, {{0, view0}});
-            wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, view1}});
-
-            // Create the render pass that will use the texture as an render attachment
-            utils::ComboRenderPassDescriptor renderPass({view0});
-
-            // Set bind group on the same index twice. The second one overwrites the first one.
-            // No texture is used as both sampled and render attachment in the same pass. But the
-            // overwritten texture still take effect during resource tracking.
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-                pass.SetBindGroup(0, bg0);
-                pass.SetBindGroup(0, bg1);
-                pass.End();
-                ASSERT_DEVICE_ERROR(encoder.Finish());
-            }
-
-            // Set bind group on the same index twice. The second one overwrites the first one.
-            // texture0 is used as both sampled and render attachment in the same pass
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-                pass.SetBindGroup(0, bg1);
-                pass.SetBindGroup(0, bg0);
-                pass.End();
-                ASSERT_DEVICE_ERROR(encoder.Finish());
-            }
-        }
-
-        // Test compute pass
-        {
-            // Create a texture that will be used both as storage texture
-            wgpu::Texture texture0 = CreateTexture(wgpu::TextureUsage::TextureBinding |
-                                                   wgpu::TextureUsage::StorageBinding);
-            wgpu::TextureView view0 = texture0.CreateView();
-            wgpu::Texture texture1 = CreateTexture(wgpu::TextureUsage::TextureBinding);
-            wgpu::TextureView view1 = texture1.CreateView();
-
-            // Create the bind group to use the texture as sampled and writeonly bindings
-            wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
-
-            wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
-
-            wgpu::BindGroup writeBG0 = utils::MakeBindGroup(device, writeBGL, {{0, view0}});
-            wgpu::BindGroup readBG0 = utils::MakeBindGroup(device, readBGL, {{0, view0}});
-            wgpu::BindGroup readBG1 = utils::MakeBindGroup(device, readBGL, {{0, view1}});
-
-            // Create a no-op compute pipeline.
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({writeBGL, readBGL});
-
-            // Set bind group on the same index twice. The second one overwrites the first one.
-            // No texture is used as both sampled and writeonly storage in the same dispatch so
-            // there are no errors.
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-                pass.SetBindGroup(0, writeBG0);
-                pass.SetBindGroup(1, readBG0);
-                pass.SetBindGroup(1, readBG1);
-                pass.SetPipeline(cp);
-                pass.DispatchWorkgroups(1);
-                pass.End();
-                encoder.Finish();
-            }
-
-            // Set bind group on the same index twice. The second one overwrites the first one.
-            // texture0 is used as both writeonly and sampled storage in the same dispatch, which
-            // is an error.
-            {
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-                pass.SetBindGroup(0, writeBG0);
-                pass.SetBindGroup(1, readBG1);
-                pass.SetBindGroup(1, readBG0);
-                pass.SetPipeline(cp);
-                pass.DispatchWorkgroups(1);
-                pass.End();
-                ASSERT_DEVICE_ERROR(encoder.Finish());
-            }
-        }
-    }
-
-    // Test that it is invalid to have resource usage conflicts even when all bindings are not
-    // visible to the programmable pass where it is used.
-    TEST_F(ResourceUsageTrackingTest, TextureUsageConflictBetweenInvisibleStagesInBindGroup) {
-        // Create texture and texture view
-        wgpu::Texture texture =
-            CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding);
-        wgpu::TextureView view = texture.CreateView();
-
-        // Test render pass for bind group. The conflict of sampled storage and writeonly storage
-        // usage doesn't reside in render related stages at all
-        {
-            // Create a bind group whose bindings are not visible in render pass
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float},
-                 {1, wgpu::ShaderStage::None, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}, {1, view}});
-
-            // These two bindings are invisible in render pass. But we still track these bindings.
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetBindGroup(0, bg);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        // Test compute pass for bind group. The conflict of sampled storage and writeonly storage
-        // usage doesn't reside in compute related stage at all
-        {
-            // Create a bind group whose bindings are not visible in compute pass
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float},
-                 {1, wgpu::ShaderStage::None, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}, {1, view}});
-
-            // Create a no-op compute pipeline.
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
-
-            // These two bindings are invisible in compute pass. But we still track these bindings.
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetPipeline(cp);
-            pass.SetBindGroup(0, bg);
-            pass.DispatchWorkgroups(1);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-    }
-
-    // Test that it is invalid to have resource usage conflicts even when one of the bindings is not
-    // visible to the programmable pass where it is used.
-    TEST_F(ResourceUsageTrackingTest, TextureUsageConflictWithInvisibleStageInBindGroup) {
-        // Create texture and texture view
-        wgpu::Texture texture =
-            CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding |
-                          wgpu::TextureUsage::RenderAttachment);
-        wgpu::TextureView view = texture.CreateView();
-
-        // Test render pass
-        {
-            // Create the render pass that will use the texture as an render attachment
             utils::ComboRenderPassDescriptor renderPass({view});
 
-            // Create a bind group which use the texture as sampled storage in compute stage
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}});
-
-            // Texture usage in compute stage in bind group conflicts with render target. And
-            // binding for compute stage is not visible in render pass. But we still track this
-            // binding.
             wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
             wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
             pass.SetBindGroup(0, bg);
@@ -1532,162 +949,733 @@
             ASSERT_DEVICE_ERROR(encoder.Finish());
         }
 
-        // Test compute pass
+        // It is valid to use multiple writeonly storage usages on the same texture in render
+        // pass
         {
-            // Create a bind group which contains both fragment and compute stages
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device,
-                {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float},
-                 {1, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
-            wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}, {1, view}});
+            wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, view}});
 
-            // Create a no-op compute pipeline.
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
-
-            // Texture usage in compute stage conflicts with texture usage in fragment stage. And
-            // binding for fragment stage is not visible in compute pass. But we still track this
-            // invisible binding.
             wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetPipeline(cp);
+            PlaceholderRenderPass PlaceholderRenderPass(device);
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
             pass.SetBindGroup(0, bg);
-            pass.DispatchWorkgroups(1);
+            pass.SetBindGroup(1, bg1);
             pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
+            encoder.Finish();
         }
     }
 
-    // Test that it is invalid to have resource usage conflicts even when one of the bindings is not
-    // used in the pipeline.
-    TEST_F(ResourceUsageTrackingTest, TextureUsageConflictWithUnusedPipelineBindings) {
-        // Create texture and texture view
+    // Test compute pass
+    {
+        // Create a texture
+        wgpu::Texture texture = CreateTexture(wgpu::TextureUsage::StorageBinding);
+        wgpu::TextureView view = texture.CreateView();
+
+        // Create a bind group to use the texture as sampled and writeonly bindings
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device,
+            {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat},
+             {1, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}, {1, view}});
+
+        // Create a no-op compute pipeline
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
+
+        // It is valid to use the texture as multiple writeonly storage usages in a single
+        // dispatch
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(cp);
+        pass.SetBindGroup(0, bg);
+        pass.DispatchWorkgroups(1);
+        pass.End();
+        encoder.Finish();
+    }
+}
+
+// Test that a single subresource of a texture cannot be used as a render attachment more than
+// once in the same pass.
+TEST_F(ResourceUsageTrackingTest, TextureWithMultipleRenderAttachmentUsage) {
+    // Create a texture with two array layers
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.size = {1, 1, 2};
+    descriptor.usage = wgpu::TextureUsage::RenderAttachment;
+    descriptor.format = kFormat;
+
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+    wgpu::TextureViewDescriptor viewDesc = {};
+    viewDesc.arrayLayerCount = 1;
+
+    wgpu::TextureView viewLayer0 = texture.CreateView(&viewDesc);
+
+    viewDesc.baseArrayLayer = 1;
+    wgpu::TextureView viewLayer1 = texture.CreateView(&viewDesc);
+
+    // Control: It is valid to use layer0 as a render target for one attachment, and
+    // layer1 as the second attachment in the same pass
+    {
+        utils::ComboRenderPassDescriptor renderPass({viewLayer0, viewLayer1});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.End();
+        encoder.Finish();
+    }
+
+    // Control: It is valid to use layer0 as a render target in separate passes.
+    {
+        utils::ComboRenderPassDescriptor renderPass({viewLayer0});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass0 = encoder.BeginRenderPass(&renderPass);
+        pass0.End();
+        wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&renderPass);
+        pass1.End();
+        encoder.Finish();
+    }
+
+    // It is invalid to use layer0 as a render target for both attachments in the same pass
+    {
+        utils::ComboRenderPassDescriptor renderPass({viewLayer0, viewLayer0});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // It is invalid to use layer1 as a render target for both attachments in the same pass
+    {
+        utils::ComboRenderPassDescriptor renderPass({viewLayer1, viewLayer1});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Test that using the same texture as both readable and writable in different passes is
+// allowed
+TEST_F(ResourceUsageTrackingTest, TextureWithReadAndWriteUsageInDifferentPasses) {
+    // Test render pass
+    {
+        // Create textures that will be used as both a sampled texture and a render target
+        wgpu::Texture t0 = CreateTexture(wgpu::TextureUsage::TextureBinding |
+                                         wgpu::TextureUsage::RenderAttachment);
+        wgpu::TextureView v0 = t0.CreateView();
+        wgpu::Texture t1 = CreateTexture(wgpu::TextureUsage::TextureBinding |
+                                         wgpu::TextureUsage::RenderAttachment);
+        wgpu::TextureView v1 = t1.CreateView();
+
+        // Create bind groups to use the texture as sampled
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Vertex, wgpu::TextureSampleType::Float}});
+        wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl, {{0, v0}});
+        wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, v1}});
+
+        // Create render passes that will use the textures as render attachments
+        utils::ComboRenderPassDescriptor renderPass0({v1});
+        utils::ComboRenderPassDescriptor renderPass1({v0});
+
+        // Use the textures as both sampled and render attachments in different passes
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+        wgpu::RenderPassEncoder pass0 = encoder.BeginRenderPass(&renderPass0);
+        pass0.SetBindGroup(0, bg0);
+        pass0.End();
+
+        wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&renderPass1);
+        pass1.SetBindGroup(0, bg1);
+        pass1.End();
+
+        encoder.Finish();
+    }
+
+    // Test compute pass
+    {
+        // Create a texture that will be used storage texture
         wgpu::Texture texture =
             CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding);
         wgpu::TextureView view = texture.CreateView();
 
-        // Create bind groups.
+        // Create bind groups to use the texture as sampled and writeonly bindings
         wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Compute,
-                      wgpu::TextureSampleType::Float}});
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
         wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Compute,
-                      wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+            device,
+            {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
         wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, view}});
         wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
 
-        // Test render pass
+        // Use the textures as both sampled and writeonly storages in different passes
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+        wgpu::ComputePassEncoder pass0 = encoder.BeginComputePass();
+        pass0.SetBindGroup(0, readBG);
+        pass0.End();
+
+        wgpu::ComputePassEncoder pass1 = encoder.BeginComputePass();
+        pass1.SetBindGroup(0, writeBG);
+        pass1.End();
+
+        encoder.Finish();
+    }
+
+    // Test compute pass and render pass mixed together with resource dependency
+    {
+        // Create a texture that will be used a storage texture
+        wgpu::Texture texture =
+            CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding);
+        wgpu::TextureView view = texture.CreateView();
+
+        // Create bind groups to use the texture as sampled and writeonly bindings
+        wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+            device,
+            {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+        wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
+        wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
+        wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, view}});
+
+        // Use the texture as writeonly and sampled storage in compute pass and render
+        // pass respectively
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+
+        wgpu::ComputePassEncoder pass0 = encoder.BeginComputePass();
+        pass0.SetBindGroup(0, writeBG);
+        pass0.End();
+
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass1.SetBindGroup(0, readBG);
+        pass1.End();
+
+        encoder.Finish();
+    }
+}
+
+// Test that it is invalid to use the same texture as both readable and writable in different
+// draws in a single render pass. But it is valid in different dispatches in a single compute
+// pass.
+TEST_F(ResourceUsageTrackingTest, TextureWithReadAndWriteUsageOnDifferentDrawsOrDispatches) {
+    // Create a texture that will be used both as a sampled texture and a storage texture
+    wgpu::Texture texture =
+        CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding);
+    wgpu::TextureView view = texture.CreateView();
+
+    // Test render pass
+    {
+        // Create bind groups to use the texture as sampled and writeonly storage bindings
+        wgpu::BindGroupLayout sampledBGL = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
+        wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+            device,
+            {{0, wgpu::ShaderStage::Fragment, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+        wgpu::BindGroup sampledBG = utils::MakeBindGroup(device, sampledBGL, {{0, view}});
+        wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
+
+        // Create a no-op render pipeline.
+        wgpu::RenderPipeline rp = CreateNoOpRenderPipeline();
+
+        // It is not allowed to use the same texture as both readable and writable in different
+        // draws within the same render pass.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetPipeline(rp);
+
+        pass.SetBindGroup(0, sampledBG);
+        pass.Draw(3);
+
+        pass.SetBindGroup(0, writeBG);
+        pass.Draw(3);
+
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Test compute pass
+    {
+        // Create bind groups to use the texture as sampled and writeonly storage bindings
+        wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
+        wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+            device,
+            {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+        wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, view}});
+        wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
+
+        // Create a no-op compute pipeline.
+        wgpu::ComputePipeline readCp = CreateNoOpComputePipeline({readBGL});
+        wgpu::ComputePipeline writeCp = CreateNoOpComputePipeline({writeBGL});
+
+        // It is valid to use the same texture as both readable and writable in different
+        // dispatches within the same compute pass.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+
+        pass.SetPipeline(readCp);
+        pass.SetBindGroup(0, readBG);
+        pass.DispatchWorkgroups(1);
+
+        pass.SetPipeline(writeCp);
+        pass.SetBindGroup(0, writeBG);
+        pass.DispatchWorkgroups(1);
+
+        pass.End();
+        encoder.Finish();
+    }
+}
+
+// Test that it is invalid to use the same texture as both readable and writable in a single
+// draw or dispatch.
+TEST_F(ResourceUsageTrackingTest, TextureWithReadAndWriteUsageInSingleDrawOrDispatch) {
+    // Create a texture that will be used both as a sampled texture and a storage texture
+    wgpu::Texture texture =
+        CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding);
+    wgpu::TextureView view = texture.CreateView();
+
+    // Test render pass
+    {
+        // Create the bind group to use the texture as sampled and writeonly storage bindings
+        wgpu::BindGroupLayout sampledBGL = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
+        wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+            device,
+            {{0, wgpu::ShaderStage::Fragment, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+        wgpu::BindGroup sampledBG = utils::MakeBindGroup(device, sampledBGL, {{0, view}});
+        wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
+
+        // Create a no-op render pipeline.
+        wgpu::RenderPipeline rp = CreateNoOpRenderPipeline();
+
+        // It is invalid to use the same texture as both readable and writable usages in a
+        // single draw
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetPipeline(rp);
+
+        pass.SetBindGroup(0, sampledBG);
+        pass.SetBindGroup(1, writeBG);
+        pass.Draw(3);
+
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Test compute pass
+    {
+        // Create the bind group to use the texture as sampled and writeonly storage bindings
+        wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
+        wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+            device,
+            {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+        wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, view}});
+        wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
+
+        // Create a no-op compute pipeline.
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({readBGL, writeBGL});
+
+        // It is invalid to use the same texture as both readable and writable usages in a
+        // single dispatch
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(cp);
+
+        pass.SetBindGroup(0, readBG);
+        pass.SetBindGroup(1, writeBG);
+        pass.DispatchWorkgroups(1);
+
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Test that using a single texture as copy src/dst and writable/readable usage in pass is
+// allowed.
+TEST_F(ResourceUsageTrackingTest, TextureCopyAndTextureUsageInPass) {
+    // Create textures that will be used as both a sampled texture and a render target
+    wgpu::Texture texture0 = CreateTexture(wgpu::TextureUsage::CopySrc);
+    wgpu::Texture texture1 =
+        CreateTexture(wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::TextureBinding |
+                      wgpu::TextureUsage::RenderAttachment);
+    wgpu::TextureView view0 = texture0.CreateView();
+    wgpu::TextureView view1 = texture1.CreateView();
+
+    wgpu::ImageCopyTexture srcView = utils::CreateImageCopyTexture(texture0, 0, {0, 0, 0});
+    wgpu::ImageCopyTexture dstView = utils::CreateImageCopyTexture(texture1, 0, {0, 0, 0});
+    wgpu::Extent3D copySize = {1, 1, 1};
+
+    // Use the texture as both copy dst and render attachment in render pass
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyTextureToTexture(&srcView, &dstView, &copySize);
+        utils::ComboRenderPassDescriptor renderPass({view1});
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.End();
+        encoder.Finish();
+    }
+
+    // Use the texture as both copy dst and readable usage in compute pass
+    {
+        // Create the bind group to use the texture as sampled
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view1}});
+
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyTextureToTexture(&srcView, &dstView, &copySize);
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetBindGroup(0, bg);
+        pass.SetPipeline(cp);
+        pass.DispatchWorkgroups(1);
+        pass.End();
+        encoder.Finish();
+    }
+}
+
+// Test that all consecutive SetBindGroup()s take effect even though some bind groups are not
+// used because they are overwritten by a consecutive call.
+TEST_F(ResourceUsageTrackingTest, TextureWithMultipleSetBindGroupsOnSameIndex) {
+    // Test render pass
+    {
+        // Create textures that will be used as both a sampled texture and a render target
+        wgpu::Texture texture0 = CreateTexture(wgpu::TextureUsage::TextureBinding |
+                                               wgpu::TextureUsage::RenderAttachment);
+        wgpu::TextureView view0 = texture0.CreateView();
+        wgpu::Texture texture1 = CreateTexture(wgpu::TextureUsage::TextureBinding |
+                                               wgpu::TextureUsage::RenderAttachment);
+        wgpu::TextureView view1 = texture1.CreateView();
+
+        // Create the bind group to use the texture as sampled
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Vertex, wgpu::TextureSampleType::Float}});
+        wgpu::BindGroup bg0 = utils::MakeBindGroup(device, bgl, {{0, view0}});
+        wgpu::BindGroup bg1 = utils::MakeBindGroup(device, bgl, {{0, view1}});
+
+        // Create the render pass that will use the texture as an render attachment
+        utils::ComboRenderPassDescriptor renderPass({view0});
+
+        // Set bind group on the same index twice. The second one overwrites the first one.
+        // No texture is used as both sampled and render attachment in the same pass. But the
+        // overwritten texture still take effect during resource tracking.
         {
-            // Create a passthrough render pipeline with a sampled storage texture
-            wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+            pass.SetBindGroup(0, bg0);
+            pass.SetBindGroup(0, bg1);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+
+        // Set bind group on the same index twice. The second one overwrites the first one.
+        // texture0 is used as both sampled and render attachment in the same pass
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+            pass.SetBindGroup(0, bg1);
+            pass.SetBindGroup(0, bg0);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+    }
+
+    // Test compute pass
+    {
+        // Create a texture that will be used both as storage texture
+        wgpu::Texture texture0 =
+            CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding);
+        wgpu::TextureView view0 = texture0.CreateView();
+        wgpu::Texture texture1 = CreateTexture(wgpu::TextureUsage::TextureBinding);
+        wgpu::TextureView view1 = texture1.CreateView();
+
+        // Create the bind group to use the texture as sampled and writeonly bindings
+        wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+            device,
+            {{0, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+
+        wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
+
+        wgpu::BindGroup writeBG0 = utils::MakeBindGroup(device, writeBGL, {{0, view0}});
+        wgpu::BindGroup readBG0 = utils::MakeBindGroup(device, readBGL, {{0, view0}});
+        wgpu::BindGroup readBG1 = utils::MakeBindGroup(device, readBGL, {{0, view1}});
+
+        // Create a no-op compute pipeline.
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({writeBGL, readBGL});
+
+        // Set bind group on the same index twice. The second one overwrites the first one.
+        // No texture is used as both sampled and writeonly storage in the same dispatch so
+        // there are no errors.
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetBindGroup(0, writeBG0);
+            pass.SetBindGroup(1, readBG0);
+            pass.SetBindGroup(1, readBG1);
+            pass.SetPipeline(cp);
+            pass.DispatchWorkgroups(1);
+            pass.End();
+            encoder.Finish();
+        }
+
+        // Set bind group on the same index twice. The second one overwrites the first one.
+        // texture0 is used as both writeonly and sampled storage in the same dispatch, which
+        // is an error.
+        {
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+            pass.SetBindGroup(0, writeBG0);
+            pass.SetBindGroup(1, readBG1);
+            pass.SetBindGroup(1, readBG0);
+            pass.SetPipeline(cp);
+            pass.DispatchWorkgroups(1);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+    }
+}
+
+// Test that it is invalid to have resource usage conflicts even when all bindings are not
+// visible to the programmable pass where it is used.
+TEST_F(ResourceUsageTrackingTest, TextureUsageConflictBetweenInvisibleStagesInBindGroup) {
+    // Create texture and texture view
+    wgpu::Texture texture =
+        CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding);
+    wgpu::TextureView view = texture.CreateView();
+
+    // Test render pass for bind group. The conflict of sampled storage and writeonly storage
+    // usage doesn't reside in render related stages at all
+    {
+        // Create a bind group whose bindings are not visible in render pass
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float},
+                     {1, wgpu::ShaderStage::None, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}, {1, view}});
+
+        // These two bindings are invisible in render pass. But we still track these bindings.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetBindGroup(0, bg);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Test compute pass for bind group. The conflict of sampled storage and writeonly storage
+    // usage doesn't reside in compute related stage at all
+    {
+        // Create a bind group whose bindings are not visible in compute pass
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float},
+                     {1, wgpu::ShaderStage::None, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}, {1, view}});
+
+        // Create a no-op compute pipeline.
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
+
+        // These two bindings are invisible in compute pass. But we still track these bindings.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(cp);
+        pass.SetBindGroup(0, bg);
+        pass.DispatchWorkgroups(1);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Test that it is invalid to have resource usage conflicts even when one of the bindings is not
+// visible to the programmable pass where it is used.
+TEST_F(ResourceUsageTrackingTest, TextureUsageConflictWithInvisibleStageInBindGroup) {
+    // Create texture and texture view
+    wgpu::Texture texture =
+        CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding |
+                      wgpu::TextureUsage::RenderAttachment);
+    wgpu::TextureView view = texture.CreateView();
+
+    // Test render pass
+    {
+        // Create the render pass that will use the texture as an render attachment
+        utils::ComboRenderPassDescriptor renderPass({view});
+
+        // Create a bind group which use the texture as sampled storage in compute stage
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Compute, wgpu::TextureSampleType::Float}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}});
+
+        // Texture usage in compute stage in bind group conflicts with render target. And
+        // binding for compute stage is not visible in render pass. But we still track this
+        // binding.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.SetBindGroup(0, bg);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Test compute pass
+    {
+        // Create a bind group which contains both fragment and compute stages
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device,
+            {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float},
+             {1, wgpu::ShaderStage::Compute, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+        wgpu::BindGroup bg = utils::MakeBindGroup(device, bgl, {{0, view}, {1, view}});
+
+        // Create a no-op compute pipeline.
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({bgl});
+
+        // Texture usage in compute stage conflicts with texture usage in fragment stage. And
+        // binding for fragment stage is not visible in compute pass. But we still track this
+        // invisible binding.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(cp);
+        pass.SetBindGroup(0, bg);
+        pass.DispatchWorkgroups(1);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
+
+// Test that it is invalid to have resource usage conflicts even when one of the bindings is not
+// used in the pipeline.
+TEST_F(ResourceUsageTrackingTest, TextureUsageConflictWithUnusedPipelineBindings) {
+    // Create texture and texture view
+    wgpu::Texture texture =
+        CreateTexture(wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::StorageBinding);
+    wgpu::TextureView view = texture.CreateView();
+
+    // Create bind groups.
+    wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Compute,
+                  wgpu::TextureSampleType::Float}});
+    wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment | wgpu::ShaderStage::Compute,
+                  wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+    wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, view}});
+    wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, view}});
+
+    // Test render pass
+    {
+        // Create a passthrough render pipeline with a sampled storage texture
+        wgpu::ShaderModule vsModule = utils::CreateShaderModule(device, R"(
                 @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
                     return vec4<f32>();
                 })");
 
-            wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
+        wgpu::ShaderModule fsModule = utils::CreateShaderModule(device, R"(
                 @group(0) @binding(0) var tex : texture_2d<f32>;
                 @stage(fragment) fn main() {
                 })");
-            utils::ComboRenderPipelineDescriptor pipelineDescriptor;
-            pipelineDescriptor.vertex.module = vsModule;
-            pipelineDescriptor.cFragment.module = fsModule;
-            pipelineDescriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
-            pipelineDescriptor.layout = utils::MakeBasicPipelineLayout(device, &readBGL);
-            wgpu::RenderPipeline rp = device.CreateRenderPipeline(&pipelineDescriptor);
+        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+        pipelineDescriptor.vertex.module = vsModule;
+        pipelineDescriptor.cFragment.module = fsModule;
+        pipelineDescriptor.cTargets[0].writeMask = wgpu::ColorWriteMask::None;
+        pipelineDescriptor.layout = utils::MakeBasicPipelineLayout(device, &readBGL);
+        wgpu::RenderPipeline rp = device.CreateRenderPipeline(&pipelineDescriptor);
 
-            // Texture binding in readBG conflicts with texture binding in writeBG. The binding
-            // in writeBG is not used in pipeline. But we still track this binding.
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetBindGroup(0, readBG);
-            pass.SetBindGroup(1, writeBG);
-            pass.SetPipeline(rp);
-            pass.Draw(3);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        // Test compute pass
-        {
-            wgpu::ComputePipeline cp = CreateNoOpComputePipeline({readBGL});
-
-            // Texture binding in readBG conflicts with texture binding in writeBG. The binding
-            // in writeBG is not used in pipeline's layout so it isn't an error.
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetBindGroup(0, readBG);
-            pass.SetBindGroup(1, writeBG);
-            pass.SetPipeline(cp);
-            pass.DispatchWorkgroups(1);
-            pass.End();
-            encoder.Finish();
-        }
+        // Texture binding in readBG conflicts with texture binding in writeBG. The binding
+        // in writeBG is not used in pipeline. But we still track this binding.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetBindGroup(0, readBG);
+        pass.SetBindGroup(1, writeBG);
+        pass.SetPipeline(rp);
+        pass.Draw(3);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
     }
 
-    // Test that using an indirect buffer is disallowed with a writable usage (like storage) but
-    // allowed with a readable usage (like readonly storage).
-    TEST_F(ResourceUsageTrackingTest, IndirectBufferWithReadOrWriteStorage) {
-        wgpu::Buffer buffer =
-            CreateBuffer(20, wgpu::BufferUsage::Indirect | wgpu::BufferUsage::Storage);
+    // Test compute pass
+    {
+        wgpu::ComputePipeline cp = CreateNoOpComputePipeline({readBGL});
 
-        wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
-        wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
-
-        wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, buffer}});
-        wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, buffer}});
-
-        // Test pipelines
-        wgpu::RenderPipeline rp = CreateNoOpRenderPipeline();
-        wgpu::ComputePipeline readCp = CreateNoOpComputePipeline({readBGL});
-        wgpu::ComputePipeline writeCp = CreateNoOpComputePipeline({writeBGL});
-
-        // Test that indirect + readonly is allowed in the same render pass.
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetPipeline(rp);
-            pass.SetBindGroup(0, readBG);
-            pass.DrawIndirect(buffer, 0);
-            pass.End();
-            encoder.Finish();
-        }
-
-        // Test that indirect + writable is disallowed in the same render pass.
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            PlaceholderRenderPass PlaceholderRenderPass(device);
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
-            pass.SetPipeline(rp);
-            pass.SetBindGroup(0, writeBG);
-            pass.DrawIndirect(buffer, 0);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        // Test that indirect + readonly is allowed in the same dispatch
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetPipeline(readCp);
-            pass.SetBindGroup(0, readBG);
-            pass.DispatchWorkgroupsIndirect(buffer, 0);
-            pass.End();
-            encoder.Finish();
-        }
-
-        // Test that indirect + writable is disallowed in the same dispatch
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
-            pass.SetPipeline(writeCp);
-            pass.SetBindGroup(0, writeBG);
-            pass.DispatchWorkgroupsIndirect(buffer, 0);
-            pass.End();
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
+        // Texture binding in readBG conflicts with texture binding in writeBG. The binding
+        // in writeBG is not used in pipeline's layout so it isn't an error.
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetBindGroup(0, readBG);
+        pass.SetBindGroup(1, writeBG);
+        pass.SetPipeline(cp);
+        pass.DispatchWorkgroups(1);
+        pass.End();
+        encoder.Finish();
     }
+}
+
+// Test that using an indirect buffer is disallowed with a writable usage (like storage) but
+// allowed with a readable usage (like readonly storage).
+TEST_F(ResourceUsageTrackingTest, IndirectBufferWithReadOrWriteStorage) {
+    wgpu::Buffer buffer =
+        CreateBuffer(20, wgpu::BufferUsage::Indirect | wgpu::BufferUsage::Storage);
+
+    wgpu::BindGroupLayout readBGL = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::ReadOnlyStorage}});
+    wgpu::BindGroupLayout writeBGL = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Compute, wgpu::BufferBindingType::Storage}});
+
+    wgpu::BindGroup readBG = utils::MakeBindGroup(device, readBGL, {{0, buffer}});
+    wgpu::BindGroup writeBG = utils::MakeBindGroup(device, writeBGL, {{0, buffer}});
+
+    // Test pipelines
+    wgpu::RenderPipeline rp = CreateNoOpRenderPipeline();
+    wgpu::ComputePipeline readCp = CreateNoOpComputePipeline({readBGL});
+    wgpu::ComputePipeline writeCp = CreateNoOpComputePipeline({writeBGL});
+
+    // Test that indirect + readonly is allowed in the same render pass.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetPipeline(rp);
+        pass.SetBindGroup(0, readBG);
+        pass.DrawIndirect(buffer, 0);
+        pass.End();
+        encoder.Finish();
+    }
+
+    // Test that indirect + writable is disallowed in the same render pass.
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        PlaceholderRenderPass PlaceholderRenderPass(device);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&PlaceholderRenderPass);
+        pass.SetPipeline(rp);
+        pass.SetBindGroup(0, writeBG);
+        pass.DrawIndirect(buffer, 0);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+
+    // Test that indirect + readonly is allowed in the same dispatch
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(readCp);
+        pass.SetBindGroup(0, readBG);
+        pass.DispatchWorkgroupsIndirect(buffer, 0);
+        pass.End();
+        encoder.Finish();
+    }
+
+    // Test that indirect + writable is disallowed in the same dispatch
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::ComputePassEncoder pass = encoder.BeginComputePass();
+        pass.SetPipeline(writeCp);
+        pass.SetBindGroup(0, writeBG);
+        pass.DispatchWorkgroupsIndirect(buffer, 0);
+        pass.End();
+        ASSERT_DEVICE_ERROR(encoder.Finish());
+    }
+}
 
 }  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/SamplerValidationTests.cpp b/src/dawn/tests/unittests/validation/SamplerValidationTests.cpp
index b304d11..8fa577a 100644
--- a/src/dawn/tests/unittests/validation/SamplerValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/SamplerValidationTests.cpp
@@ -20,105 +20,105 @@
 
 namespace {
 
-    class SamplerValidationTest : public ValidationTest {};
+class SamplerValidationTest : public ValidationTest {};
 
-    // Test NaN and INFINITY values are not allowed
-    TEST_F(SamplerValidationTest, InvalidLOD) {
-        { device.CreateSampler(); }
-        {
-            wgpu::SamplerDescriptor samplerDesc;
-            samplerDesc.lodMinClamp = NAN;
-            ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
-        }
-        {
-            wgpu::SamplerDescriptor samplerDesc;
-            samplerDesc.lodMaxClamp = NAN;
-            ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
-        }
-        {
-            wgpu::SamplerDescriptor samplerDesc;
-            samplerDesc.lodMaxClamp = INFINITY;
-            device.CreateSampler(&samplerDesc);
-        }
-        {
-            wgpu::SamplerDescriptor samplerDesc;
-            samplerDesc.lodMaxClamp = INFINITY;
-            samplerDesc.lodMinClamp = INFINITY;
-            device.CreateSampler(&samplerDesc);
-        }
+// Test NaN and INFINITY values are not allowed
+TEST_F(SamplerValidationTest, InvalidLOD) {
+    { device.CreateSampler(); }
+    {
+        wgpu::SamplerDescriptor samplerDesc;
+        samplerDesc.lodMinClamp = NAN;
+        ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
     }
+    {
+        wgpu::SamplerDescriptor samplerDesc;
+        samplerDesc.lodMaxClamp = NAN;
+        ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
+    }
+    {
+        wgpu::SamplerDescriptor samplerDesc;
+        samplerDesc.lodMaxClamp = INFINITY;
+        device.CreateSampler(&samplerDesc);
+    }
+    {
+        wgpu::SamplerDescriptor samplerDesc;
+        samplerDesc.lodMaxClamp = INFINITY;
+        samplerDesc.lodMinClamp = INFINITY;
+        device.CreateSampler(&samplerDesc);
+    }
+}
 
-    TEST_F(SamplerValidationTest, InvalidFilterAnisotropic) {
-        wgpu::SamplerDescriptor kValidAnisoSamplerDesc = {};
-        kValidAnisoSamplerDesc.maxAnisotropy = 2;
-        kValidAnisoSamplerDesc.minFilter = wgpu::FilterMode::Linear;
-        kValidAnisoSamplerDesc.magFilter = wgpu::FilterMode::Linear;
-        kValidAnisoSamplerDesc.mipmapFilter = wgpu::FilterMode::Linear;
-        {
-            // when maxAnisotropy > 1, min, mag, mipmap filter should be linear
-            device.CreateSampler(&kValidAnisoSamplerDesc);
-        }
-        {
-            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
-            samplerDesc.maxAnisotropy = 0;
-            ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
-        }
-        {
-            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
-            samplerDesc.minFilter = wgpu::FilterMode::Nearest;
-            samplerDesc.magFilter = wgpu::FilterMode::Nearest;
-            samplerDesc.mipmapFilter = wgpu::FilterMode::Nearest;
-            ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
-        }
-        {
-            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
-            samplerDesc.minFilter = wgpu::FilterMode::Nearest;
-            ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
-        }
-        {
-            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
-            samplerDesc.magFilter = wgpu::FilterMode::Nearest;
-            ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
-        }
-        {
-            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
-            samplerDesc.mipmapFilter = wgpu::FilterMode::Nearest;
-            ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
-        }
+TEST_F(SamplerValidationTest, InvalidFilterAnisotropic) {
+    wgpu::SamplerDescriptor kValidAnisoSamplerDesc = {};
+    kValidAnisoSamplerDesc.maxAnisotropy = 2;
+    kValidAnisoSamplerDesc.minFilter = wgpu::FilterMode::Linear;
+    kValidAnisoSamplerDesc.magFilter = wgpu::FilterMode::Linear;
+    kValidAnisoSamplerDesc.mipmapFilter = wgpu::FilterMode::Linear;
+    {
+        // when maxAnisotropy > 1, min, mag, mipmap filter should be linear
+        device.CreateSampler(&kValidAnisoSamplerDesc);
     }
+    {
+        wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+        samplerDesc.maxAnisotropy = 0;
+        ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
+    }
+    {
+        wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+        samplerDesc.minFilter = wgpu::FilterMode::Nearest;
+        samplerDesc.magFilter = wgpu::FilterMode::Nearest;
+        samplerDesc.mipmapFilter = wgpu::FilterMode::Nearest;
+        ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
+    }
+    {
+        wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+        samplerDesc.minFilter = wgpu::FilterMode::Nearest;
+        ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
+    }
+    {
+        wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+        samplerDesc.magFilter = wgpu::FilterMode::Nearest;
+        ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
+    }
+    {
+        wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+        samplerDesc.mipmapFilter = wgpu::FilterMode::Nearest;
+        ASSERT_DEVICE_ERROR(device.CreateSampler(&samplerDesc));
+    }
+}
 
-    TEST_F(SamplerValidationTest, ValidFilterAnisotropic) {
-        wgpu::SamplerDescriptor kValidAnisoSamplerDesc = {};
-        kValidAnisoSamplerDesc.maxAnisotropy = 2;
-        kValidAnisoSamplerDesc.minFilter = wgpu::FilterMode::Linear;
-        kValidAnisoSamplerDesc.magFilter = wgpu::FilterMode::Linear;
-        kValidAnisoSamplerDesc.mipmapFilter = wgpu::FilterMode::Linear;
-        { device.CreateSampler(); }
-        {
-            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
-            samplerDesc.maxAnisotropy = 16;
-            device.CreateSampler(&samplerDesc);
-        }
-        {
-            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
-            samplerDesc.maxAnisotropy = 32;
-            device.CreateSampler(&samplerDesc);
-        }
-        {
-            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
-            samplerDesc.maxAnisotropy = 0x7FFF;
-            device.CreateSampler(&samplerDesc);
-        }
-        {
-            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
-            samplerDesc.maxAnisotropy = 0x8000;
-            device.CreateSampler(&samplerDesc);
-        }
-        {
-            wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
-            samplerDesc.maxAnisotropy = 0xFFFF;
-            device.CreateSampler(&samplerDesc);
-        }
+TEST_F(SamplerValidationTest, ValidFilterAnisotropic) {
+    wgpu::SamplerDescriptor kValidAnisoSamplerDesc = {};
+    kValidAnisoSamplerDesc.maxAnisotropy = 2;
+    kValidAnisoSamplerDesc.minFilter = wgpu::FilterMode::Linear;
+    kValidAnisoSamplerDesc.magFilter = wgpu::FilterMode::Linear;
+    kValidAnisoSamplerDesc.mipmapFilter = wgpu::FilterMode::Linear;
+    { device.CreateSampler(); }
+    {
+        wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+        samplerDesc.maxAnisotropy = 16;
+        device.CreateSampler(&samplerDesc);
     }
+    {
+        wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+        samplerDesc.maxAnisotropy = 32;
+        device.CreateSampler(&samplerDesc);
+    }
+    {
+        wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+        samplerDesc.maxAnisotropy = 0x7FFF;
+        device.CreateSampler(&samplerDesc);
+    }
+    {
+        wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+        samplerDesc.maxAnisotropy = 0x8000;
+        device.CreateSampler(&samplerDesc);
+    }
+    {
+        wgpu::SamplerDescriptor samplerDesc = kValidAnisoSamplerDesc;
+        samplerDesc.maxAnisotropy = 0xFFFF;
+        device.CreateSampler(&samplerDesc);
+    }
+}
 
 }  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/ShaderModuleValidationTests.cpp b/src/dawn/tests/unittests/validation/ShaderModuleValidationTests.cpp
index 16e443a..02b7979 100644
--- a/src/dawn/tests/unittests/validation/ShaderModuleValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/ShaderModuleValidationTests.cpp
@@ -15,9 +15,9 @@
 #include <sstream>
 #include <string>
 
-#include "dawn/tests/unittests/validation/ValidationTest.h"
 #include "dawn/common/Constants.h"
 #include "dawn/native/ShaderModule.h"
+#include "dawn/tests/unittests/validation/ValidationTest.h"
 #include "dawn/utils/ComboRenderPipelineDescriptor.h"
 #include "dawn/utils/WGPUHelpers.h"
 
diff --git a/src/dawn/tests/unittests/validation/TextureSubresourceTests.cpp b/src/dawn/tests/unittests/validation/TextureSubresourceTests.cpp
index 4986d98..78f5b66 100644
--- a/src/dawn/tests/unittests/validation/TextureSubresourceTests.cpp
+++ b/src/dawn/tests/unittests/validation/TextureSubresourceTests.cpp
@@ -18,119 +18,118 @@
 
 namespace {
 
-    class TextureSubresourceTest : public ValidationTest {
-      public:
-        static constexpr uint32_t kSize = 32u;
-        static constexpr wgpu::TextureFormat kFormat = wgpu::TextureFormat::RGBA8Unorm;
+class TextureSubresourceTest : public ValidationTest {
+  public:
+    static constexpr uint32_t kSize = 32u;
+    static constexpr wgpu::TextureFormat kFormat = wgpu::TextureFormat::RGBA8Unorm;
 
-        wgpu::Texture CreateTexture(uint32_t mipLevelCount,
-                                    uint32_t arrayLayerCount,
-                                    wgpu::TextureUsage usage) {
-            wgpu::TextureDescriptor texDesc;
-            texDesc.dimension = wgpu::TextureDimension::e2D;
-            texDesc.size = {kSize, kSize, arrayLayerCount};
-            texDesc.sampleCount = 1;
-            texDesc.mipLevelCount = mipLevelCount;
-            texDesc.usage = usage;
-            texDesc.format = kFormat;
-            return device.CreateTexture(&texDesc);
-        }
-
-        wgpu::TextureView CreateTextureView(wgpu::Texture texture,
-                                            uint32_t baseMipLevel,
-                                            uint32_t baseArrayLayer) {
-            wgpu::TextureViewDescriptor viewDesc;
-            viewDesc.format = kFormat;
-            viewDesc.baseArrayLayer = baseArrayLayer;
-            viewDesc.arrayLayerCount = 1;
-            viewDesc.baseMipLevel = baseMipLevel;
-            viewDesc.mipLevelCount = 1;
-            viewDesc.dimension = wgpu::TextureViewDimension::e2D;
-            return texture.CreateView(&viewDesc);
-        }
-
-        void TestRenderPass(const wgpu::TextureView& renderView,
-                            const wgpu::TextureView& samplerView) {
-            // Create bind group
-            wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
-                device, {{0, wgpu::ShaderStage::Vertex, wgpu::TextureSampleType::Float}});
-
-            utils::ComboRenderPassDescriptor renderPassDesc({renderView});
-
-            // It is valid to read from and write into different subresources of the same texture
-            {
-                wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, samplerView}});
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
-                pass.SetBindGroup(0, bindGroup);
-                pass.End();
-                encoder.Finish();
-            }
-
-            // It is not currently possible to test that it is valid to have multiple reads from a
-            // subresource while there is a single write in another subresource.
-
-            // It is invalid to read and write into the same subresources
-            {
-                wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, renderView}});
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
-                pass.SetBindGroup(0, bindGroup);
-                pass.End();
-                ASSERT_DEVICE_ERROR(encoder.Finish());
-            }
-
-            // It is valid to write into and then read from the same level of a texture in different
-            // render passes
-            {
-                wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, samplerView}});
-
-                wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
-                    device, {{0, wgpu::ShaderStage::Fragment, wgpu::StorageTextureAccess::WriteOnly,
-                              kFormat}});
-                wgpu::BindGroup bindGroup1 = utils::MakeBindGroup(device, bgl1, {{0, samplerView}});
-
-                wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-                wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&renderPassDesc);
-                pass1.SetBindGroup(0, bindGroup1);
-                pass1.End();
-
-                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
-                pass.SetBindGroup(0, bindGroup);
-                pass.End();
-
-                encoder.Finish();
-            }
-        }
-    };
-
-    // Test different mipmap levels
-    TEST_F(TextureSubresourceTest, MipmapLevelsTest) {
-        // Create texture with 2 mipmap levels and 1 layer
-        wgpu::Texture texture = CreateTexture(2, 1,
-                                              wgpu::TextureUsage::TextureBinding |
-                                                  wgpu::TextureUsage::RenderAttachment |
-                                                  wgpu::TextureUsage::StorageBinding);
-
-        // Create two views on different mipmap levels.
-        wgpu::TextureView samplerView = CreateTextureView(texture, 0, 0);
-        wgpu::TextureView renderView = CreateTextureView(texture, 1, 0);
-        TestRenderPass(samplerView, renderView);
+    wgpu::Texture CreateTexture(uint32_t mipLevelCount,
+                                uint32_t arrayLayerCount,
+                                wgpu::TextureUsage usage) {
+        wgpu::TextureDescriptor texDesc;
+        texDesc.dimension = wgpu::TextureDimension::e2D;
+        texDesc.size = {kSize, kSize, arrayLayerCount};
+        texDesc.sampleCount = 1;
+        texDesc.mipLevelCount = mipLevelCount;
+        texDesc.usage = usage;
+        texDesc.format = kFormat;
+        return device.CreateTexture(&texDesc);
     }
 
-    // Test different array layers
-    TEST_F(TextureSubresourceTest, ArrayLayersTest) {
-        // Create texture with 1 mipmap level and 2 layers
-        wgpu::Texture texture = CreateTexture(1, 2,
-                                              wgpu::TextureUsage::TextureBinding |
-                                                  wgpu::TextureUsage::RenderAttachment |
-                                                  wgpu::TextureUsage::StorageBinding);
-
-        // Create two views on different layers.
-        wgpu::TextureView samplerView = CreateTextureView(texture, 0, 0);
-        wgpu::TextureView renderView = CreateTextureView(texture, 0, 1);
-
-        TestRenderPass(samplerView, renderView);
+    wgpu::TextureView CreateTextureView(wgpu::Texture texture,
+                                        uint32_t baseMipLevel,
+                                        uint32_t baseArrayLayer) {
+        wgpu::TextureViewDescriptor viewDesc;
+        viewDesc.format = kFormat;
+        viewDesc.baseArrayLayer = baseArrayLayer;
+        viewDesc.arrayLayerCount = 1;
+        viewDesc.baseMipLevel = baseMipLevel;
+        viewDesc.mipLevelCount = 1;
+        viewDesc.dimension = wgpu::TextureViewDimension::e2D;
+        return texture.CreateView(&viewDesc);
     }
 
+    void TestRenderPass(const wgpu::TextureView& renderView, const wgpu::TextureView& samplerView) {
+        // Create bind group
+        wgpu::BindGroupLayout bgl = utils::MakeBindGroupLayout(
+            device, {{0, wgpu::ShaderStage::Vertex, wgpu::TextureSampleType::Float}});
+
+        utils::ComboRenderPassDescriptor renderPassDesc({renderView});
+
+        // It is valid to read from and write into different subresources of the same texture
+        {
+            wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, samplerView}});
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
+            pass.SetBindGroup(0, bindGroup);
+            pass.End();
+            encoder.Finish();
+        }
+
+        // It is not currently possible to test that it is valid to have multiple reads from a
+        // subresource while there is a single write in another subresource.
+
+        // It is invalid to read and write into the same subresources
+        {
+            wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, renderView}});
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
+            pass.SetBindGroup(0, bindGroup);
+            pass.End();
+            ASSERT_DEVICE_ERROR(encoder.Finish());
+        }
+
+        // It is valid to write into and then read from the same level of a texture in different
+        // render passes
+        {
+            wgpu::BindGroup bindGroup = utils::MakeBindGroup(device, bgl, {{0, samplerView}});
+
+            wgpu::BindGroupLayout bgl1 = utils::MakeBindGroupLayout(
+                device,
+                {{0, wgpu::ShaderStage::Fragment, wgpu::StorageTextureAccess::WriteOnly, kFormat}});
+            wgpu::BindGroup bindGroup1 = utils::MakeBindGroup(device, bgl1, {{0, samplerView}});
+
+            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+            wgpu::RenderPassEncoder pass1 = encoder.BeginRenderPass(&renderPassDesc);
+            pass1.SetBindGroup(0, bindGroup1);
+            pass1.End();
+
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
+            pass.SetBindGroup(0, bindGroup);
+            pass.End();
+
+            encoder.Finish();
+        }
+    }
+};
+
+// Test different mipmap levels
+TEST_F(TextureSubresourceTest, MipmapLevelsTest) {
+    // Create texture with 2 mipmap levels and 1 layer
+    wgpu::Texture texture =
+        CreateTexture(2, 1,
+                      wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment |
+                          wgpu::TextureUsage::StorageBinding);
+
+    // Create two views on different mipmap levels.
+    wgpu::TextureView samplerView = CreateTextureView(texture, 0, 0);
+    wgpu::TextureView renderView = CreateTextureView(texture, 1, 0);
+    TestRenderPass(samplerView, renderView);
+}
+
+// Test different array layers
+TEST_F(TextureSubresourceTest, ArrayLayersTest) {
+    // Create texture with 1 mipmap level and 2 layers
+    wgpu::Texture texture =
+        CreateTexture(1, 2,
+                      wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment |
+                          wgpu::TextureUsage::StorageBinding);
+
+    // Create two views on different layers.
+    wgpu::TextureView samplerView = CreateTextureView(texture, 0, 0);
+    wgpu::TextureView renderView = CreateTextureView(texture, 0, 1);
+
+    TestRenderPass(samplerView, renderView);
+}
+
 }  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/TextureValidationTests.cpp b/src/dawn/tests/unittests/validation/TextureValidationTests.cpp
index ac0192e..3de9c273 100644
--- a/src/dawn/tests/unittests/validation/TextureValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/TextureValidationTests.cpp
@@ -22,890 +22,888 @@
 
 namespace {
 
-    constexpr wgpu::TextureFormat kNonRenderableColorFormats[] = {
-        wgpu::TextureFormat::RG11B10Ufloat, wgpu::TextureFormat::RGB9E5Ufloat,
-        wgpu::TextureFormat::R8Snorm,       wgpu::TextureFormat::RG8Snorm,
-        wgpu::TextureFormat::RGBA8Snorm,
-    };
+constexpr wgpu::TextureFormat kNonRenderableColorFormats[] = {
+    wgpu::TextureFormat::RG11B10Ufloat, wgpu::TextureFormat::RGB9E5Ufloat,
+    wgpu::TextureFormat::R8Snorm,       wgpu::TextureFormat::RG8Snorm,
+    wgpu::TextureFormat::RGBA8Snorm,
+};
 
-    wgpu::TextureDimension kDimensions[] = {
-        wgpu::TextureDimension::e1D,
-        wgpu::TextureDimension::e3D,
-    };
+wgpu::TextureDimension kDimensions[] = {
+    wgpu::TextureDimension::e1D,
+    wgpu::TextureDimension::e3D,
+};
 
-    class TextureValidationTest : public ValidationTest {
-      protected:
-        void SetUp() override {
-            ValidationTest::SetUp();
+class TextureValidationTest : public ValidationTest {
+  protected:
+    void SetUp() override {
+        ValidationTest::SetUp();
 
-            queue = device.GetQueue();
-        }
-
-        wgpu::TextureDescriptor CreateDefaultTextureDescriptor() {
-            wgpu::TextureDescriptor descriptor;
-            descriptor.size.width = kWidth;
-            descriptor.size.height = kHeight;
-            descriptor.size.depthOrArrayLayers = kDefaultDepth;
-            descriptor.mipLevelCount = kDefaultMipLevels;
-            descriptor.sampleCount = kDefaultSampleCount;
-            descriptor.dimension = wgpu::TextureDimension::e2D;
-            descriptor.format = kDefaultTextureFormat;
-            descriptor.usage =
-                wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::TextureBinding;
-            return descriptor;
-        }
-
-        wgpu::Queue queue;
-
-      private:
-        // Choose the LCM of all current compressed texture format texel dimensions as the
-        // dimensions of the default texture.
-        static constexpr uint32_t kWidth = 120;
-        static constexpr uint32_t kHeight = 120;
-        static constexpr uint32_t kDefaultDepth = 1;
-        static constexpr uint32_t kDefaultMipLevels = 1;
-        static constexpr uint32_t kDefaultSampleCount = 1;
-
-        static constexpr wgpu::TextureFormat kDefaultTextureFormat =
-            wgpu::TextureFormat::RGBA8Unorm;
-    };
-
-    // Test the validation of non-zero texture usage
-    TEST_F(TextureValidationTest, UsageNonZero) {
-        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-
-        // Descriptor with proper usage is allowed
-        {
-            descriptor.usage = wgpu::TextureUsage::RenderAttachment;
-
-            device.CreateTexture(&descriptor);
-        }
-
-        // It is an error to create a texture with zero usage
-        {
-            descriptor.usage = wgpu::TextureUsage::None;
-
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
+        queue = device.GetQueue();
     }
 
-    // Test the validation of sample count
-    TEST_F(TextureValidationTest, SampleCount) {
-        wgpu::TextureDescriptor defaultDescriptor = CreateDefaultTextureDescriptor();
-
-        // sampleCount == 1 is allowed.
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.sampleCount = 1;
-
-            device.CreateTexture(&descriptor);
-        }
-
-        // sampleCount == 4 is allowed.
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.sampleCount = 4;
-
-            device.CreateTexture(&descriptor);
-        }
-
-        // It is an error to create a texture with an invalid sampleCount.
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.sampleCount = 3;
-
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-
-        // It is an error to create a multisampled texture with mipLevelCount > 1.
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.sampleCount = 4;
-            descriptor.mipLevelCount = 2;
-
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-
-        // It is an error to create a multisampled 1D or 3D texture.
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.sampleCount = 4;
-
-            descriptor.size.height = 1;
-            descriptor.dimension = wgpu::TextureDimension::e1D;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-
-            descriptor.dimension = wgpu::TextureDimension::e3D;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-
-        // It is an error to create a multisample texture when the format cannot support
-        // multisample.
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.sampleCount = 4;
-            descriptor.usage = wgpu::TextureUsage::TextureBinding;
-
-            for (wgpu::TextureFormat format : utils::kFormatsInCoreSpec) {
-                descriptor.format = format;
-                if (utils::TextureFormatSupportsMultisampling(format)) {
-                    device.CreateTexture(&descriptor);
-                } else {
-                    ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-                }
-            }
-        }
-
-        // Currently we do not support multisampled 2D textures with depth > 1.
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.sampleCount = 4;
-            descriptor.size.depthOrArrayLayers = 2;
-
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-
-        // It is an error to set TextureUsage::StorageBinding when sampleCount > 1.
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.sampleCount = 4;
-            descriptor.usage |= wgpu::TextureUsage::StorageBinding;
-
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
+    wgpu::TextureDescriptor CreateDefaultTextureDescriptor() {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.size.width = kWidth;
+        descriptor.size.height = kHeight;
+        descriptor.size.depthOrArrayLayers = kDefaultDepth;
+        descriptor.mipLevelCount = kDefaultMipLevels;
+        descriptor.sampleCount = kDefaultSampleCount;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.format = kDefaultTextureFormat;
+        descriptor.usage =
+            wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::TextureBinding;
+        return descriptor;
     }
 
-    // Test the validation of the mip level count
-    TEST_F(TextureValidationTest, MipLevelCount) {
-        wgpu::TextureDescriptor defaultDescriptor = CreateDefaultTextureDescriptor();
-        defaultDescriptor.usage = wgpu::TextureUsage::TextureBinding;
+    wgpu::Queue queue;
 
-        // mipLevelCount == 1 is allowed
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.width = 32;
-            descriptor.size.height = 32;
-            descriptor.mipLevelCount = 1;
+  private:
+    // Choose the LCM of all current compressed texture format texel dimensions as the
+    // dimensions of the default texture.
+    static constexpr uint32_t kWidth = 120;
+    static constexpr uint32_t kHeight = 120;
+    static constexpr uint32_t kDefaultDepth = 1;
+    static constexpr uint32_t kDefaultMipLevels = 1;
+    static constexpr uint32_t kDefaultSampleCount = 1;
 
-            device.CreateTexture(&descriptor);
-        }
+    static constexpr wgpu::TextureFormat kDefaultTextureFormat = wgpu::TextureFormat::RGBA8Unorm;
+};
 
-        // mipLevelCount == 0 is an error
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.width = 32;
-            descriptor.size.height = 32;
-            descriptor.mipLevelCount = 0;
+// Test the validation of non-zero texture usage
+TEST_F(TextureValidationTest, UsageNonZero) {
+    wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
 
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
+    // Descriptor with proper usage is allowed
+    {
+        descriptor.usage = wgpu::TextureUsage::RenderAttachment;
 
-        // Full mip chains are allowed
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.width = 32;
-            descriptor.size.height = 32;
-            // Mip level sizes: 32, 16, 8, 4, 2, 1
-            descriptor.mipLevelCount = 6;
-
-            device.CreateTexture(&descriptor);
-        }
-
-        // Test non-power-of-two width
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            // Mip level width: 31, 15, 7, 3, 1
-            descriptor.size.width = 31;
-            descriptor.size.height = 4;
-
-            // Full mip chains on non-power-of-two width are allowed
-            descriptor.mipLevelCount = 5;
-            device.CreateTexture(&descriptor);
-
-            // Too big mip chains on non-power-of-two width are disallowed
-            descriptor.mipLevelCount = 6;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-
-        // Test non-power-of-two height
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.width = 4;
-            // Mip level height: 31, 15, 7, 3, 1
-            descriptor.size.height = 31;
-
-            // Full mip chains on non-power-of-two height are allowed
-            descriptor.mipLevelCount = 5;
-            device.CreateTexture(&descriptor);
-
-            // Too big mip chains on non-power-of-two height are disallowed
-            descriptor.mipLevelCount = 6;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-
-        // Undefined shift check if miplevel is bigger than the integer bit width.
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.width = 32;
-            descriptor.size.height = 32;
-            descriptor.mipLevelCount = 100;
-
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-
-        // Non square mip map halves the resolution until a 1x1 dimension
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.width = 32;
-            descriptor.size.height = 8;
-            // Mip maps: 32 * 8, 16 * 4, 8 * 2, 4 * 1, 2 * 1, 1 * 1
-            descriptor.mipLevelCount = 6;
-
-            device.CreateTexture(&descriptor);
-        }
-
-        // Non square mip map for a 3D textures
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.width = 32;
-            descriptor.size.height = 8;
-            descriptor.size.depthOrArrayLayers = 64;
-            descriptor.dimension = wgpu::TextureDimension::e3D;
-            // Non square mip map halves width, height and depth until a 1x1x1 dimension for a 3D
-            // texture. So there are 7 mipmaps at most: 32 * 8 * 64, 16 * 4 * 32, 8 * 2 * 16,
-            // 4 * 1 * 8, 2 * 1 * 4, 1 * 1 * 2, 1 * 1 * 1.
-            descriptor.mipLevelCount = 7;
-            device.CreateTexture(&descriptor);
-        }
-
-        // Non square mip map for 2D textures with depth > 1
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.width = 32;
-            descriptor.size.height = 8;
-            descriptor.size.depthOrArrayLayers = 64;
-            // Non square mip map halves width and height until a 1x1 dimension for a 2D texture,
-            // even its depth > 1. So there are 6 mipmaps at most: 32 * 8, 16 * 4, 8 * 2, 4 * 1, 2 *
-            // 1, 1 * 1.
-            descriptor.dimension = wgpu::TextureDimension::e2D;
-            descriptor.mipLevelCount = 7;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-            descriptor.mipLevelCount = 6;
-            device.CreateTexture(&descriptor);
-        }
-
-        // Mip level equal to the maximum for a 2D texture is allowed
-        {
-            uint32_t maxTextureDimension2D = GetSupportedLimits().limits.maxTextureDimension2D;
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.width = maxTextureDimension2D;
-            descriptor.size.height = maxTextureDimension2D;
-            descriptor.mipLevelCount = Log2(maxTextureDimension2D) + 1u;
-
-            device.CreateTexture(&descriptor);
-        }
-
-        // Mip level exceeding the maximum for a 2D texture not allowed
-        {
-            uint32_t maxTextureDimension2D = GetSupportedLimits().limits.maxTextureDimension2D;
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.width = maxTextureDimension2D;
-            descriptor.size.height = maxTextureDimension2D;
-            descriptor.mipLevelCount = Log2(maxTextureDimension2D) + 2u;
-
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-
-        // 1D textures can only have a single mip level.
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.dimension = wgpu::TextureDimension::e1D;
-            descriptor.size.width = 32;
-            descriptor.size.height = 1;
-
-            // Having a single mip level is allowed.
-            descriptor.mipLevelCount = 1;
-            device.CreateTexture(&descriptor);
-
-            // Having more than 1 is an error.
-            descriptor.mipLevelCount = 2;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
+        device.CreateTexture(&descriptor);
     }
 
-    // Test the validation of array layer count
-    TEST_F(TextureValidationTest, ArrayLayerCount) {
-        wgpu::TextureDescriptor defaultDescriptor = CreateDefaultTextureDescriptor();
-        wgpu::Limits supportedLimits = GetSupportedLimits().limits;
+    // It is an error to create a texture with zero usage
+    {
+        descriptor.usage = wgpu::TextureUsage::None;
 
-        // Array layer count exceeding maxTextureArrayLayers is not allowed for 2D texture
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+}
 
-            descriptor.size.depthOrArrayLayers = supportedLimits.maxTextureArrayLayers + 1u;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
+// Test the validation of sample count
+TEST_F(TextureValidationTest, SampleCount) {
+    wgpu::TextureDescriptor defaultDescriptor = CreateDefaultTextureDescriptor();
 
-        // Array layer count less than maxTextureArrayLayers is allowed
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.depthOrArrayLayers = supportedLimits.maxTextureArrayLayers >> 1;
-            device.CreateTexture(&descriptor);
-        }
+    // sampleCount == 1 is allowed.
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.sampleCount = 1;
 
-        // Array layer count equal to maxTextureArrayLayers is allowed
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.depthOrArrayLayers = supportedLimits.maxTextureArrayLayers;
-            device.CreateTexture(&descriptor);
-        }
+        device.CreateTexture(&descriptor);
     }
 
-    // Test the validation of 1D texture size
-    TEST_F(TextureValidationTest, 1DTextureSize) {
-        wgpu::Limits supportedLimits = GetSupportedLimits().limits;
+    // sampleCount == 4 is allowed.
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.sampleCount = 4;
 
-        wgpu::TextureDescriptor defaultDescriptor;
-        defaultDescriptor.size = {4, 1, 1};
-        defaultDescriptor.dimension = wgpu::TextureDimension::e1D;
-        defaultDescriptor.usage = wgpu::TextureUsage::CopySrc;
-        defaultDescriptor.format = wgpu::TextureFormat::RGBA8Unorm;
-
-        // Width must be in [1, kMaxTextureDimension1D]
-        {
-            wgpu::TextureDescriptor desc = defaultDescriptor;
-            desc.size.width = 0;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
-            desc.size.width = 1;
-            device.CreateTexture(&desc);
-
-            desc.size.width = supportedLimits.maxTextureDimension1D;
-            device.CreateTexture(&desc);
-            desc.size.width = supportedLimits.maxTextureDimension1D + 1u;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
-        }
-
-        // Height must be 1
-        {
-            wgpu::TextureDescriptor desc = defaultDescriptor;
-            desc.size.height = 2;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
-
-            desc.size.height = 0;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
-        }
-
-        // DepthOrArrayLayers must be 1
-        {
-            wgpu::TextureDescriptor desc = defaultDescriptor;
-            desc.size.depthOrArrayLayers = 2;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
-
-            desc.size.depthOrArrayLayers = 0;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
-        }
+        device.CreateTexture(&descriptor);
     }
 
-    // Test the validation of 2D texture size
-    TEST_F(TextureValidationTest, 2DTextureSize) {
-        wgpu::TextureDescriptor defaultDescriptor = CreateDefaultTextureDescriptor();
-        wgpu::Limits supportedLimits = GetSupportedLimits().limits;
+    // It is an error to create a texture with an invalid sampleCount.
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.sampleCount = 3;
 
-        // Out-of-bound texture dimension is not allowed
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.width = supportedLimits.maxTextureDimension2D + 1u;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-
-            descriptor.size.width = 1;
-            descriptor.size.height = supportedLimits.maxTextureDimension2D + 1u;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-
-        // Zero-sized texture is not allowed
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size = {0, 1, 1};
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-
-            descriptor.size = {1, 0, 1};
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-
-            descriptor.size = {1, 1, 0};
-            // 2D texture with depth=0 is not allowed
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-
-        // Texture size less than max dimension is allowed
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.width = supportedLimits.maxTextureDimension2D >> 1;
-            descriptor.size.height = supportedLimits.maxTextureDimension2D >> 1;
-            device.CreateTexture(&descriptor);
-        }
-
-        // Texture size equal to max dimension is allowed
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-            descriptor.size.width = supportedLimits.maxTextureDimension2D;
-            descriptor.size.height = supportedLimits.maxTextureDimension2D;
-            descriptor.dimension = wgpu::TextureDimension::e2D;
-            device.CreateTexture(&descriptor);
-        }
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
     }
 
-    // Test the validation of 3D texture size
-    TEST_F(TextureValidationTest, 3DTextureSize) {
-        wgpu::TextureDescriptor defaultDescriptor = CreateDefaultTextureDescriptor();
-        defaultDescriptor.dimension = wgpu::TextureDimension::e3D;
-        defaultDescriptor.usage = wgpu::TextureUsage::TextureBinding;
-        wgpu::Limits supportedLimits = GetSupportedLimits().limits;
+    // It is an error to create a multisampled texture with mipLevelCount > 1.
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.sampleCount = 4;
+        descriptor.mipLevelCount = 2;
 
-        // Out-of-bound texture dimension is not allowed
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-
-            descriptor.size = {supportedLimits.maxTextureDimension3D + 1u, 1, 1};
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-
-            descriptor.size = {1, supportedLimits.maxTextureDimension3D + 1u, 1};
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-
-            descriptor.size = {1, 1, supportedLimits.maxTextureDimension3D + 1u};
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-
-        // Zero-sized texture is not allowed
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-
-            descriptor.size = {0, 1, 1};
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-
-            descriptor.size = {1, 0, 1};
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-
-            descriptor.size = {1, 1, 0};
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-
-        // Texture size less than max dimension is allowed
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-
-            descriptor.size = {supportedLimits.maxTextureDimension3D >> 1,
-                               supportedLimits.maxTextureDimension3D >> 1,
-                               supportedLimits.maxTextureDimension3D >> 1};
-            device.CreateTexture(&descriptor);
-        }
-
-        // Texture size equal to max dimension is allowed
-        {
-            wgpu::TextureDescriptor descriptor = defaultDescriptor;
-
-            descriptor.size = {supportedLimits.maxTextureDimension3D,
-                               supportedLimits.maxTextureDimension3D,
-                               supportedLimits.maxTextureDimension3D};
-            device.CreateTexture(&descriptor);
-        }
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
     }
 
-    // Test that depth/stencil formats are invalid for 1D and 3D texture
-    TEST_F(TextureValidationTest, DepthStencilFormatsFor1DAnd3D) {
-        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+    // It is an error to create a multisampled 1D or 3D texture.
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.sampleCount = 4;
 
-        wgpu::TextureFormat depthStencilFormats[] = {
-            wgpu::TextureFormat::Stencil8,     wgpu::TextureFormat::Depth16Unorm,
-            wgpu::TextureFormat::Depth24Plus,  wgpu::TextureFormat::Depth24PlusStencil8,
-            wgpu::TextureFormat::Depth32Float,
-        };
+        descriptor.size.height = 1;
+        descriptor.dimension = wgpu::TextureDimension::e1D;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
 
-        for (wgpu::TextureDimension dimension : kDimensions) {
-            for (wgpu::TextureFormat format : depthStencilFormats) {
-                descriptor.format = format;
-                descriptor.dimension = dimension;
+        descriptor.dimension = wgpu::TextureDimension::e3D;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+
+    // It is an error to create a multisample texture when the format cannot support
+    // multisample.
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.sampleCount = 4;
+        descriptor.usage = wgpu::TextureUsage::TextureBinding;
+
+        for (wgpu::TextureFormat format : utils::kFormatsInCoreSpec) {
+            descriptor.format = format;
+            if (utils::TextureFormatSupportsMultisampling(format)) {
+                device.CreateTexture(&descriptor);
+            } else {
                 ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
             }
         }
     }
 
-    // Test that it is valid to destroy a texture
-    TEST_F(TextureValidationTest, DestroyTexture) {
-        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-        wgpu::Texture texture = device.CreateTexture(&descriptor);
-        texture.Destroy();
+    // Currently we do not support multisampled 2D textures with depth > 1.
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.sampleCount = 4;
+        descriptor.size.depthOrArrayLayers = 2;
+
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
     }
 
-    // Test that it's valid to destroy a destroyed texture
-    TEST_F(TextureValidationTest, DestroyDestroyedTexture) {
-        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-        wgpu::Texture texture = device.CreateTexture(&descriptor);
-        texture.Destroy();
-        texture.Destroy();
+    // It is an error to set TextureUsage::StorageBinding when sampleCount > 1.
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.sampleCount = 4;
+        descriptor.usage |= wgpu::TextureUsage::StorageBinding;
+
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+}
+
+// Test the validation of the mip level count
+TEST_F(TextureValidationTest, MipLevelCount) {
+    wgpu::TextureDescriptor defaultDescriptor = CreateDefaultTextureDescriptor();
+    defaultDescriptor.usage = wgpu::TextureUsage::TextureBinding;
+
+    // mipLevelCount == 1 is allowed
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.width = 32;
+        descriptor.size.height = 32;
+        descriptor.mipLevelCount = 1;
+
+        device.CreateTexture(&descriptor);
     }
 
-    // Test that it's invalid to submit a destroyed texture in a queue
-    // in the case of destroy, encode, submit
-    TEST_F(TextureValidationTest, DestroyEncodeSubmit) {
-        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-        wgpu::Texture texture = device.CreateTexture(&descriptor);
-        wgpu::TextureView textureView = texture.CreateView();
+    // mipLevelCount == 0 is an error
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.width = 32;
+        descriptor.size.height = 32;
+        descriptor.mipLevelCount = 0;
 
-        utils::ComboRenderPassDescriptor renderPass({textureView});
-
-        // Destroy the texture
-        texture.Destroy();
-
-        wgpu::CommandEncoder encoder_post_destroy = device.CreateCommandEncoder();
-        {
-            wgpu::RenderPassEncoder pass = encoder_post_destroy.BeginRenderPass(&renderPass);
-            pass.End();
-        }
-        wgpu::CommandBuffer commands = encoder_post_destroy.Finish();
-
-        // Submit should fail due to destroyed texture
-        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
     }
 
-    // Test that it's invalid to submit a destroyed texture in a queue
-    // in the case of encode, destroy, submit
-    TEST_F(TextureValidationTest, EncodeDestroySubmit) {
-        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-        wgpu::Texture texture = device.CreateTexture(&descriptor);
-        wgpu::TextureView textureView = texture.CreateView();
+    // Full mip chains are allowed
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.width = 32;
+        descriptor.size.height = 32;
+        // Mip level sizes: 32, 16, 8, 4, 2, 1
+        descriptor.mipLevelCount = 6;
 
-        utils::ComboRenderPassDescriptor renderPass({textureView});
-
-        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-        {
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
-            pass.End();
-        }
-        wgpu::CommandBuffer commands = encoder.Finish();
-
-        // Destroy the texture
-        texture.Destroy();
-
-        // Submit should fail due to destroyed texture
-        ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+        device.CreateTexture(&descriptor);
     }
 
-    // Test it is an error to create an RenderAttachment texture with a non-renderable format.
-    TEST_F(TextureValidationTest, NonRenderableAndRenderAttachment) {
-        wgpu::TextureDescriptor descriptor;
-        descriptor.size = {1, 1, 1};
-        descriptor.usage = wgpu::TextureUsage::RenderAttachment;
+    // Test non-power-of-two width
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        // Mip level width: 31, 15, 7, 3, 1
+        descriptor.size.width = 31;
+        descriptor.size.height = 4;
 
-        // Succeeds because RGBA8Unorm is renderable
-        descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        // Full mip chains on non-power-of-two width are allowed
+        descriptor.mipLevelCount = 5;
         device.CreateTexture(&descriptor);
 
-        for (wgpu::TextureFormat format : kNonRenderableColorFormats) {
-            // Fails because `format` is non-renderable
-            descriptor.format = format;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-    }
-
-    // Test it is an error to create a Storage texture with any format that doesn't support
-    // TextureUsage::StorageBinding texture usages.
-    TEST_F(TextureValidationTest, TextureFormatNotSupportTextureUsageStorage) {
-        wgpu::TextureDescriptor descriptor;
-        descriptor.size = {1, 1, 1};
-        descriptor.usage = wgpu::TextureUsage::StorageBinding;
-
-        for (wgpu::TextureFormat format : utils::kAllTextureFormats) {
-            descriptor.format = format;
-            if (utils::TextureFormatSupportsStorageTexture(format)) {
-                device.CreateTexture(&descriptor);
-            } else {
-                ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-            }
-        }
-    }
-
-    // Test it is an error to create a RenderAttachment texture with the texture dimensions that
-    // doesn't support TextureUsage::RenderAttachment texture usages.
-    TEST_F(TextureValidationTest, TextureDimensionNotSupportRenderAttachment) {
-        wgpu::TextureDescriptor descriptor;
-        descriptor.size = {1, 1, 1};
-        descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
-        descriptor.usage = wgpu::TextureUsage::RenderAttachment;
-
-        constexpr std::array<wgpu::TextureDimension, 3> kTextureDimensions = {
-            {wgpu::TextureDimension::e1D, wgpu::TextureDimension::e2D,
-             wgpu::TextureDimension::e3D}};
-        for (wgpu::TextureDimension dimension : kTextureDimensions) {
-            descriptor.dimension = dimension;
-            if (dimension == wgpu::TextureDimension::e2D) {
-                device.CreateTexture(&descriptor);
-            } else {
-                ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-            }
-        }
-    }
-
-    // Test it is an error to create a texture with format "Undefined".
-    TEST_F(TextureValidationTest, TextureFormatUndefined) {
-        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-        descriptor.format = wgpu::TextureFormat::Undefined;
+        // Too big mip chains on non-power-of-two width are disallowed
+        descriptor.mipLevelCount = 6;
         ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
     }
 
-    // Test that the creation of a texture with depth24unorm-stencil8 will fail when the feature
-    // Depth24UnormStencil8 is not enabled.
-    TEST_F(TextureValidationTest, UseD24S8FormatWithoutEnablingFeature) {
+    // Test non-power-of-two height
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.width = 4;
+        // Mip level height: 31, 15, 7, 3, 1
+        descriptor.size.height = 31;
+
+        // Full mip chains on non-power-of-two height are allowed
+        descriptor.mipLevelCount = 5;
+        device.CreateTexture(&descriptor);
+
+        // Too big mip chains on non-power-of-two height are disallowed
+        descriptor.mipLevelCount = 6;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+
+    // Undefined shift check if miplevel is bigger than the integer bit width.
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.width = 32;
+        descriptor.size.height = 32;
+        descriptor.mipLevelCount = 100;
+
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+
+    // Non square mip map halves the resolution until a 1x1 dimension
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.width = 32;
+        descriptor.size.height = 8;
+        // Mip maps: 32 * 8, 16 * 4, 8 * 2, 4 * 1, 2 * 1, 1 * 1
+        descriptor.mipLevelCount = 6;
+
+        device.CreateTexture(&descriptor);
+    }
+
+    // Non square mip map for a 3D textures
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.width = 32;
+        descriptor.size.height = 8;
+        descriptor.size.depthOrArrayLayers = 64;
+        descriptor.dimension = wgpu::TextureDimension::e3D;
+        // Non square mip map halves width, height and depth until a 1x1x1 dimension for a 3D
+        // texture. So there are 7 mipmaps at most: 32 * 8 * 64, 16 * 4 * 32, 8 * 2 * 16,
+        // 4 * 1 * 8, 2 * 1 * 4, 1 * 1 * 2, 1 * 1 * 1.
+        descriptor.mipLevelCount = 7;
+        device.CreateTexture(&descriptor);
+    }
+
+    // Non square mip map for 2D textures with depth > 1
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.width = 32;
+        descriptor.size.height = 8;
+        descriptor.size.depthOrArrayLayers = 64;
+        // Non square mip map halves width and height until a 1x1 dimension for a 2D texture,
+        // even its depth > 1. So there are 6 mipmaps at most: 32 * 8, 16 * 4, 8 * 2, 4 * 1, 2 *
+        // 1, 1 * 1.
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.mipLevelCount = 7;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        descriptor.mipLevelCount = 6;
+        device.CreateTexture(&descriptor);
+    }
+
+    // Mip level equal to the maximum for a 2D texture is allowed
+    {
+        uint32_t maxTextureDimension2D = GetSupportedLimits().limits.maxTextureDimension2D;
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.width = maxTextureDimension2D;
+        descriptor.size.height = maxTextureDimension2D;
+        descriptor.mipLevelCount = Log2(maxTextureDimension2D) + 1u;
+
+        device.CreateTexture(&descriptor);
+    }
+
+    // Mip level exceeding the maximum for a 2D texture not allowed
+    {
+        uint32_t maxTextureDimension2D = GetSupportedLimits().limits.maxTextureDimension2D;
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.width = maxTextureDimension2D;
+        descriptor.size.height = maxTextureDimension2D;
+        descriptor.mipLevelCount = Log2(maxTextureDimension2D) + 2u;
+
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+
+    // 1D textures can only have a single mip level.
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.dimension = wgpu::TextureDimension::e1D;
+        descriptor.size.width = 32;
+        descriptor.size.height = 1;
+
+        // Having a single mip level is allowed.
+        descriptor.mipLevelCount = 1;
+        device.CreateTexture(&descriptor);
+
+        // Having more than 1 is an error.
+        descriptor.mipLevelCount = 2;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+}
+
+// Test the validation of array layer count
+TEST_F(TextureValidationTest, ArrayLayerCount) {
+    wgpu::TextureDescriptor defaultDescriptor = CreateDefaultTextureDescriptor();
+    wgpu::Limits supportedLimits = GetSupportedLimits().limits;
+
+    // Array layer count exceeding maxTextureArrayLayers is not allowed for 2D texture
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+
+        descriptor.size.depthOrArrayLayers = supportedLimits.maxTextureArrayLayers + 1u;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+
+    // Array layer count less than maxTextureArrayLayers is allowed
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.depthOrArrayLayers = supportedLimits.maxTextureArrayLayers >> 1;
+        device.CreateTexture(&descriptor);
+    }
+
+    // Array layer count equal to maxTextureArrayLayers is allowed
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.depthOrArrayLayers = supportedLimits.maxTextureArrayLayers;
+        device.CreateTexture(&descriptor);
+    }
+}
+
+// Test the validation of 1D texture size
+TEST_F(TextureValidationTest, 1DTextureSize) {
+    wgpu::Limits supportedLimits = GetSupportedLimits().limits;
+
+    wgpu::TextureDescriptor defaultDescriptor;
+    defaultDescriptor.size = {4, 1, 1};
+    defaultDescriptor.dimension = wgpu::TextureDimension::e1D;
+    defaultDescriptor.usage = wgpu::TextureUsage::CopySrc;
+    defaultDescriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+
+    // Width must be in [1, kMaxTextureDimension1D]
+    {
+        wgpu::TextureDescriptor desc = defaultDescriptor;
+        desc.size.width = 0;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
+        desc.size.width = 1;
+        device.CreateTexture(&desc);
+
+        desc.size.width = supportedLimits.maxTextureDimension1D;
+        device.CreateTexture(&desc);
+        desc.size.width = supportedLimits.maxTextureDimension1D + 1u;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
+    }
+
+    // Height must be 1
+    {
+        wgpu::TextureDescriptor desc = defaultDescriptor;
+        desc.size.height = 2;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
+
+        desc.size.height = 0;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
+    }
+
+    // DepthOrArrayLayers must be 1
+    {
+        wgpu::TextureDescriptor desc = defaultDescriptor;
+        desc.size.depthOrArrayLayers = 2;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
+
+        desc.size.depthOrArrayLayers = 0;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&desc));
+    }
+}
+
+// Test the validation of 2D texture size
+TEST_F(TextureValidationTest, 2DTextureSize) {
+    wgpu::TextureDescriptor defaultDescriptor = CreateDefaultTextureDescriptor();
+    wgpu::Limits supportedLimits = GetSupportedLimits().limits;
+
+    // Out-of-bound texture dimension is not allowed
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.width = supportedLimits.maxTextureDimension2D + 1u;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+
+        descriptor.size.width = 1;
+        descriptor.size.height = supportedLimits.maxTextureDimension2D + 1u;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+
+    // Zero-sized texture is not allowed
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size = {0, 1, 1};
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+
+        descriptor.size = {1, 0, 1};
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+
+        descriptor.size = {1, 1, 0};
+        // 2D texture with depth=0 is not allowed
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+
+    // Texture size less than max dimension is allowed
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.width = supportedLimits.maxTextureDimension2D >> 1;
+        descriptor.size.height = supportedLimits.maxTextureDimension2D >> 1;
+        device.CreateTexture(&descriptor);
+    }
+
+    // Texture size equal to max dimension is allowed
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+        descriptor.size.width = supportedLimits.maxTextureDimension2D;
+        descriptor.size.height = supportedLimits.maxTextureDimension2D;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        device.CreateTexture(&descriptor);
+    }
+}
+
+// Test the validation of 3D texture size
+TEST_F(TextureValidationTest, 3DTextureSize) {
+    wgpu::TextureDescriptor defaultDescriptor = CreateDefaultTextureDescriptor();
+    defaultDescriptor.dimension = wgpu::TextureDimension::e3D;
+    defaultDescriptor.usage = wgpu::TextureUsage::TextureBinding;
+    wgpu::Limits supportedLimits = GetSupportedLimits().limits;
+
+    // Out-of-bound texture dimension is not allowed
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+
+        descriptor.size = {supportedLimits.maxTextureDimension3D + 1u, 1, 1};
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+
+        descriptor.size = {1, supportedLimits.maxTextureDimension3D + 1u, 1};
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+
+        descriptor.size = {1, 1, supportedLimits.maxTextureDimension3D + 1u};
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+
+    // Zero-sized texture is not allowed
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+
+        descriptor.size = {0, 1, 1};
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+
+        descriptor.size = {1, 0, 1};
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+
+        descriptor.size = {1, 1, 0};
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+
+    // Texture size less than max dimension is allowed
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+
+        descriptor.size = {supportedLimits.maxTextureDimension3D >> 1,
+                           supportedLimits.maxTextureDimension3D >> 1,
+                           supportedLimits.maxTextureDimension3D >> 1};
+        device.CreateTexture(&descriptor);
+    }
+
+    // Texture size equal to max dimension is allowed
+    {
+        wgpu::TextureDescriptor descriptor = defaultDescriptor;
+
+        descriptor.size = {supportedLimits.maxTextureDimension3D,
+                           supportedLimits.maxTextureDimension3D,
+                           supportedLimits.maxTextureDimension3D};
+        device.CreateTexture(&descriptor);
+    }
+}
+
+// Test that depth/stencil formats are invalid for 1D and 3D texture
+TEST_F(TextureValidationTest, DepthStencilFormatsFor1DAnd3D) {
+    wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+
+    wgpu::TextureFormat depthStencilFormats[] = {
+        wgpu::TextureFormat::Stencil8,     wgpu::TextureFormat::Depth16Unorm,
+        wgpu::TextureFormat::Depth24Plus,  wgpu::TextureFormat::Depth24PlusStencil8,
+        wgpu::TextureFormat::Depth32Float,
+    };
+
+    for (wgpu::TextureDimension dimension : kDimensions) {
+        for (wgpu::TextureFormat format : depthStencilFormats) {
+            descriptor.format = format;
+            descriptor.dimension = dimension;
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+    }
+}
+
+// Test that it is valid to destroy a texture
+TEST_F(TextureValidationTest, DestroyTexture) {
+    wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+    texture.Destroy();
+}
+
+// Test that it's valid to destroy a destroyed texture
+TEST_F(TextureValidationTest, DestroyDestroyedTexture) {
+    wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+    texture.Destroy();
+    texture.Destroy();
+}
+
+// Test that it's invalid to submit a destroyed texture in a queue
+// in the case of destroy, encode, submit
+TEST_F(TextureValidationTest, DestroyEncodeSubmit) {
+    wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+    wgpu::TextureView textureView = texture.CreateView();
+
+    utils::ComboRenderPassDescriptor renderPass({textureView});
+
+    // Destroy the texture
+    texture.Destroy();
+
+    wgpu::CommandEncoder encoder_post_destroy = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder_post_destroy.BeginRenderPass(&renderPass);
+        pass.End();
+    }
+    wgpu::CommandBuffer commands = encoder_post_destroy.Finish();
+
+    // Submit should fail due to destroyed texture
+    ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+}
+
+// Test that it's invalid to submit a destroyed texture in a queue
+// in the case of encode, destroy, submit
+TEST_F(TextureValidationTest, EncodeDestroySubmit) {
+    wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+    wgpu::TextureView textureView = texture.CreateView();
+
+    utils::ComboRenderPassDescriptor renderPass({textureView});
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass);
+        pass.End();
+    }
+    wgpu::CommandBuffer commands = encoder.Finish();
+
+    // Destroy the texture
+    texture.Destroy();
+
+    // Submit should fail due to destroyed texture
+    ASSERT_DEVICE_ERROR(queue.Submit(1, &commands));
+}
+
+// Test it is an error to create an RenderAttachment texture with a non-renderable format.
+TEST_F(TextureValidationTest, NonRenderableAndRenderAttachment) {
+    wgpu::TextureDescriptor descriptor;
+    descriptor.size = {1, 1, 1};
+    descriptor.usage = wgpu::TextureUsage::RenderAttachment;
+
+    // Succeeds because RGBA8Unorm is renderable
+    descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+    device.CreateTexture(&descriptor);
+
+    for (wgpu::TextureFormat format : kNonRenderableColorFormats) {
+        // Fails because `format` is non-renderable
+        descriptor.format = format;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+}
+
+// Test it is an error to create a Storage texture with any format that doesn't support
+// TextureUsage::StorageBinding texture usages.
+TEST_F(TextureValidationTest, TextureFormatNotSupportTextureUsageStorage) {
+    wgpu::TextureDescriptor descriptor;
+    descriptor.size = {1, 1, 1};
+    descriptor.usage = wgpu::TextureUsage::StorageBinding;
+
+    for (wgpu::TextureFormat format : utils::kAllTextureFormats) {
+        descriptor.format = format;
+        if (utils::TextureFormatSupportsStorageTexture(format)) {
+            device.CreateTexture(&descriptor);
+        } else {
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+    }
+}
+
+// Test it is an error to create a RenderAttachment texture with the texture dimensions that
+// doesn't support TextureUsage::RenderAttachment texture usages.
+TEST_F(TextureValidationTest, TextureDimensionNotSupportRenderAttachment) {
+    wgpu::TextureDescriptor descriptor;
+    descriptor.size = {1, 1, 1};
+    descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+    descriptor.usage = wgpu::TextureUsage::RenderAttachment;
+
+    constexpr std::array<wgpu::TextureDimension, 3> kTextureDimensions = {
+        {wgpu::TextureDimension::e1D, wgpu::TextureDimension::e2D, wgpu::TextureDimension::e3D}};
+    for (wgpu::TextureDimension dimension : kTextureDimensions) {
+        descriptor.dimension = dimension;
+        if (dimension == wgpu::TextureDimension::e2D) {
+            device.CreateTexture(&descriptor);
+        } else {
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
+    }
+}
+
+// Test it is an error to create a texture with format "Undefined".
+TEST_F(TextureValidationTest, TextureFormatUndefined) {
+    wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+    descriptor.format = wgpu::TextureFormat::Undefined;
+    ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+}
+
+// Test that the creation of a texture with depth24unorm-stencil8 will fail when the feature
+// Depth24UnormStencil8 is not enabled.
+TEST_F(TextureValidationTest, UseD24S8FormatWithoutEnablingFeature) {
+    wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+    descriptor.format = wgpu::TextureFormat::Depth24UnormStencil8;
+    ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+}
+
+// Test that the creation of a texture with depth32float-stencil8 will fail when the feature
+// Depth32FloatStencil8 is not enabled.
+TEST_F(TextureValidationTest, UseD32S8FormatWithoutEnablingFeature) {
+    wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+    descriptor.format = wgpu::TextureFormat::Depth32FloatStencil8;
+    ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+}
+
+// Test that the creation of a texture with BC format will fail when the feature
+// textureCompressionBC is not enabled.
+TEST_F(TextureValidationTest, UseBCFormatWithoutEnablingFeature) {
+    for (wgpu::TextureFormat format : utils::kBCFormats) {
         wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+        descriptor.format = format;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+}
+
+// Test that the creation of a texture with ETC2 format will fail when the feature
+// textureCompressionETC2 is not enabled.
+TEST_F(TextureValidationTest, UseETC2FormatWithoutEnablingFeature) {
+    for (wgpu::TextureFormat format : utils::kETC2Formats) {
+        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+        descriptor.format = format;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+}
+
+// Test that the creation of a texture with ASTC format will fail when the feature
+// textureCompressionASTC is not enabled.
+TEST_F(TextureValidationTest, UseASTCFormatWithoutEnablingFeature) {
+    for (wgpu::TextureFormat format : utils::kASTCFormats) {
+        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+        descriptor.format = format;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+}
+
+class D24S8TextureFormatsValidationTests : public TextureValidationTest {
+  protected:
+    WGPUDevice CreateTestDevice() override {
+        wgpu::DeviceDescriptor descriptor;
+        wgpu::FeatureName requiredFeatures[1] = {wgpu::FeatureName::Depth24UnormStencil8};
+        descriptor.requiredFeatures = requiredFeatures;
+        descriptor.requiredFeaturesCount = 1;
+        return adapter.CreateDevice(&descriptor);
+    }
+};
+
+// Test that depth24unorm-stencil8 format is invalid for 3D texture
+TEST_F(D24S8TextureFormatsValidationTests, DepthStencilFormatsFor3D) {
+    wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+
+    for (wgpu::TextureDimension dimension : kDimensions) {
         descriptor.format = wgpu::TextureFormat::Depth24UnormStencil8;
+        descriptor.dimension = dimension;
         ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
     }
+}
 
-    // Test that the creation of a texture with depth32float-stencil8 will fail when the feature
-    // Depth32FloatStencil8 is not enabled.
-    TEST_F(TextureValidationTest, UseD32S8FormatWithoutEnablingFeature) {
-        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+class D32S8TextureFormatsValidationTests : public TextureValidationTest {
+  protected:
+    WGPUDevice CreateTestDevice() override {
+        wgpu::DeviceDescriptor descriptor;
+        wgpu::FeatureName requiredFeatures[1] = {wgpu::FeatureName::Depth32FloatStencil8};
+        descriptor.requiredFeatures = requiredFeatures;
+        descriptor.requiredFeaturesCount = 1;
+        return adapter.CreateDevice(&descriptor);
+    }
+};
+
+// Test that depth32float-stencil8 format is invalid for 3D texture
+TEST_F(D32S8TextureFormatsValidationTests, DepthStencilFormatsFor3D) {
+    wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+
+    for (wgpu::TextureDimension dimension : kDimensions) {
         descriptor.format = wgpu::TextureFormat::Depth32FloatStencil8;
+        descriptor.dimension = dimension;
         ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
     }
+}
 
-    // Test that the creation of a texture with BC format will fail when the feature
-    // textureCompressionBC is not enabled.
-    TEST_F(TextureValidationTest, UseBCFormatWithoutEnablingFeature) {
-        for (wgpu::TextureFormat format : utils::kBCFormats) {
-            wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-            descriptor.format = format;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
+class CompressedTextureFormatsValidationTests : public TextureValidationTest {
+  protected:
+    WGPUDevice CreateTestDevice() override {
+        wgpu::DeviceDescriptor descriptor;
+        wgpu::FeatureName requiredFeatures[3] = {wgpu::FeatureName::TextureCompressionBC,
+                                                 wgpu::FeatureName::TextureCompressionETC2,
+                                                 wgpu::FeatureName::TextureCompressionASTC};
+        descriptor.requiredFeatures = requiredFeatures;
+        descriptor.requiredFeaturesCount = 3;
+
+        // TODO(dawn:814): Remove when 1D texture support is complete.
+        const char* kDisallowUnsafeApis = "disallow_unsafe_apis";
+        wgpu::DawnTogglesDeviceDescriptor togglesDesc;
+        togglesDesc.forceDisabledToggles = &kDisallowUnsafeApis;
+        togglesDesc.forceDisabledTogglesCount = 1;
+
+        descriptor.nextInChain = &togglesDesc;
+
+        return adapter.CreateDevice(&descriptor);
     }
 
-    // Test that the creation of a texture with ETC2 format will fail when the feature
-    // textureCompressionETC2 is not enabled.
-    TEST_F(TextureValidationTest, UseETC2FormatWithoutEnablingFeature) {
-        for (wgpu::TextureFormat format : utils::kETC2Formats) {
-            wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-            descriptor.format = format;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
+    wgpu::TextureDescriptor CreateDefaultTextureDescriptor() {
+        wgpu::TextureDescriptor descriptor =
+            TextureValidationTest::CreateDefaultTextureDescriptor();
+        descriptor.usage = wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst |
+                           wgpu::TextureUsage::TextureBinding;
+        descriptor.size.width = kWidth;
+        descriptor.size.height = kHeight;
+        return descriptor;
     }
 
-    // Test that the creation of a texture with ASTC format will fail when the feature
-    // textureCompressionASTC is not enabled.
-    TEST_F(TextureValidationTest, UseASTCFormatWithoutEnablingFeature) {
-        for (wgpu::TextureFormat format : utils::kASTCFormats) {
-            wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-            descriptor.format = format;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-    }
+  private:
+    // Choose the LCM of all current compressed texture format texel dimensions as the
+    // dimensions of the default texture.
+    static constexpr uint32_t kWidth = 120;
+    static constexpr uint32_t kHeight = 120;
+};
 
-    class D24S8TextureFormatsValidationTests : public TextureValidationTest {
-      protected:
-        WGPUDevice CreateTestDevice() override {
-            wgpu::DeviceDescriptor descriptor;
-            wgpu::FeatureName requiredFeatures[1] = {wgpu::FeatureName::Depth24UnormStencil8};
-            descriptor.requiredFeatures = requiredFeatures;
-            descriptor.requiredFeaturesCount = 1;
-            return adapter.CreateDevice(&descriptor);
-        }
+// Test that only CopySrc, CopyDst and Sampled are accepted as usage in compressed formats.
+TEST_F(CompressedTextureFormatsValidationTests, TextureUsage) {
+    wgpu::TextureUsage invalidUsages[] = {
+        wgpu::TextureUsage::RenderAttachment,
+        wgpu::TextureUsage::StorageBinding,
+        wgpu::TextureUsage::Present,
     };
-
-    // Test that depth24unorm-stencil8 format is invalid for 3D texture
-    TEST_F(D24S8TextureFormatsValidationTests, DepthStencilFormatsFor3D) {
-        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-
-        for (wgpu::TextureDimension dimension : kDimensions) {
-            descriptor.format = wgpu::TextureFormat::Depth24UnormStencil8;
-            descriptor.dimension = dimension;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-    }
-
-    class D32S8TextureFormatsValidationTests : public TextureValidationTest {
-      protected:
-        WGPUDevice CreateTestDevice() override {
-            wgpu::DeviceDescriptor descriptor;
-            wgpu::FeatureName requiredFeatures[1] = {wgpu::FeatureName::Depth32FloatStencil8};
-            descriptor.requiredFeatures = requiredFeatures;
-            descriptor.requiredFeaturesCount = 1;
-            return adapter.CreateDevice(&descriptor);
-        }
-    };
-
-    // Test that depth32float-stencil8 format is invalid for 3D texture
-    TEST_F(D32S8TextureFormatsValidationTests, DepthStencilFormatsFor3D) {
-        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-
-        for (wgpu::TextureDimension dimension : kDimensions) {
-            descriptor.format = wgpu::TextureFormat::Depth32FloatStencil8;
-            descriptor.dimension = dimension;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-        }
-    }
-
-    class CompressedTextureFormatsValidationTests : public TextureValidationTest {
-      protected:
-        WGPUDevice CreateTestDevice() override {
-            wgpu::DeviceDescriptor descriptor;
-            wgpu::FeatureName requiredFeatures[3] = {wgpu::FeatureName::TextureCompressionBC,
-                                                     wgpu::FeatureName::TextureCompressionETC2,
-                                                     wgpu::FeatureName::TextureCompressionASTC};
-            descriptor.requiredFeatures = requiredFeatures;
-            descriptor.requiredFeaturesCount = 3;
-
-            // TODO(dawn:814): Remove when 1D texture support is complete.
-            const char* kDisallowUnsafeApis = "disallow_unsafe_apis";
-            wgpu::DawnTogglesDeviceDescriptor togglesDesc;
-            togglesDesc.forceDisabledToggles = &kDisallowUnsafeApis;
-            togglesDesc.forceDisabledTogglesCount = 1;
-
-            descriptor.nextInChain = &togglesDesc;
-
-            return adapter.CreateDevice(&descriptor);
-        }
-
-        wgpu::TextureDescriptor CreateDefaultTextureDescriptor() {
-            wgpu::TextureDescriptor descriptor =
-                TextureValidationTest::CreateDefaultTextureDescriptor();
-            descriptor.usage = wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst |
-                               wgpu::TextureUsage::TextureBinding;
-            descriptor.size.width = kWidth;
-            descriptor.size.height = kHeight;
-            return descriptor;
-        }
-
-      private:
-        // Choose the LCM of all current compressed texture format texel dimensions as the
-        // dimensions of the default texture.
-        static constexpr uint32_t kWidth = 120;
-        static constexpr uint32_t kHeight = 120;
-    };
-
-    // Test that only CopySrc, CopyDst and Sampled are accepted as usage in compressed formats.
-    TEST_F(CompressedTextureFormatsValidationTests, TextureUsage) {
-        wgpu::TextureUsage invalidUsages[] = {
-            wgpu::TextureUsage::RenderAttachment,
-            wgpu::TextureUsage::StorageBinding,
-            wgpu::TextureUsage::Present,
-        };
-        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
-            for (wgpu::TextureUsage usage : invalidUsages) {
-                wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-                descriptor.format = format;
-                descriptor.usage = usage;
-                ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-            }
-        }
-    }
-
-    // Test that using various MipLevelCount is allowed for compressed formats.
-    TEST_F(CompressedTextureFormatsValidationTests, MipLevelCount) {
-        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
-            for (uint32_t mipLevels : {1, 3, 6}) {
-                wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-                descriptor.format = format;
-                descriptor.mipLevelCount = mipLevels;
-                device.CreateTexture(&descriptor);
-            }
-        }
-    }
-
-    // Test that it is invalid to specify SampleCount>1 in compressed formats.
-    TEST_F(CompressedTextureFormatsValidationTests, SampleCount) {
-        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        for (wgpu::TextureUsage usage : invalidUsages) {
             wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
             descriptor.format = format;
-            descriptor.sampleCount = 4;
+            descriptor.usage = usage;
             ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
         }
     }
+}
 
-    // Test that it is allowed to create a 2D texture with depth>1 in compressed formats.
-    TEST_F(CompressedTextureFormatsValidationTests, 2DArrayTexture) {
-        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+// Test that using various MipLevelCount is allowed for compressed formats.
+TEST_F(CompressedTextureFormatsValidationTests, MipLevelCount) {
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        for (uint32_t mipLevels : {1, 3, 6}) {
             wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
             descriptor.format = format;
-            descriptor.size.depthOrArrayLayers = 6;
+            descriptor.mipLevelCount = mipLevels;
             device.CreateTexture(&descriptor);
         }
     }
+}
 
-    // Test that it is not allowed to create a 1D texture in compressed formats.
-    TEST_F(CompressedTextureFormatsValidationTests, 1DTexture) {
-        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
-            wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-            descriptor.format = format;
-            // Unfortunately we can't use the block height here otherwise validation for the max
-            // texture 1D size will trigger. We check the error message below to make sure the
-            // correct code path is covered.
-            descriptor.size.height = 1;
-            descriptor.size.depthOrArrayLayers = 1;
-            descriptor.dimension = wgpu::TextureDimension::e1D;
-            ASSERT_DEVICE_ERROR(
-                device.CreateTexture(&descriptor),
-                testing::HasSubstr(
-                    "The dimension (TextureDimension::e1D) of a texture with a compressed format"));
-        }
+// Test that it is invalid to specify SampleCount>1 in compressed formats.
+TEST_F(CompressedTextureFormatsValidationTests, SampleCount) {
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+        descriptor.format = format;
+        descriptor.sampleCount = 4;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
     }
+}
 
-    // Test that it is not allowed to create a 3D texture in compressed formats.
-    TEST_F(CompressedTextureFormatsValidationTests, 3DTexture) {
-        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+// Test that it is allowed to create a 2D texture with depth>1 in compressed formats.
+TEST_F(CompressedTextureFormatsValidationTests, 2DArrayTexture) {
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+        descriptor.format = format;
+        descriptor.size.depthOrArrayLayers = 6;
+        device.CreateTexture(&descriptor);
+    }
+}
+
+// Test that it is not allowed to create a 1D texture in compressed formats.
+TEST_F(CompressedTextureFormatsValidationTests, 1DTexture) {
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+        descriptor.format = format;
+        // Unfortunately we can't use the block height here otherwise validation for the max
+        // texture 1D size will trigger. We check the error message below to make sure the
+        // correct code path is covered.
+        descriptor.size.height = 1;
+        descriptor.size.depthOrArrayLayers = 1;
+        descriptor.dimension = wgpu::TextureDimension::e1D;
+        ASSERT_DEVICE_ERROR(
+            device.CreateTexture(&descriptor),
+            testing::HasSubstr(
+                "The dimension (TextureDimension::e1D) of a texture with a compressed format"));
+    }
+}
+
+// Test that it is not allowed to create a 3D texture in compressed formats.
+TEST_F(CompressedTextureFormatsValidationTests, 3DTexture) {
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+        descriptor.format = format;
+        descriptor.size.depthOrArrayLayers = 4;
+        descriptor.dimension = wgpu::TextureDimension::e3D;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+    }
+}
+
+// Test that it is invalid to use numbers for a texture's width/height that are not multiples
+// of the compressed block sizes.
+TEST_F(CompressedTextureFormatsValidationTests, TextureSize) {
+    for (wgpu::TextureFormat format : utils::kCompressedFormats) {
+        uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+        uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
+
+        // Test that the default size (120 x 120) is valid for all formats.
+        {
             wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
             descriptor.format = format;
-            descriptor.size.depthOrArrayLayers = 4;
-            descriptor.dimension = wgpu::TextureDimension::e3D;
+            ASSERT_TRUE(descriptor.size.width % blockWidth == 0 &&
+                        descriptor.size.height % blockHeight == 0);
+            device.CreateTexture(&descriptor);
+        }
+
+        // Test that invalid width should cause an error. Note that if the block width of the
+        // compression type is even, we test that alignment to half the width is not sufficient.
+        {
+            wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+            descriptor.format = format;
+            descriptor.size.width =
+                blockWidth % 2 == 0 ? blockWidth - (blockWidth / 2) : blockWidth - 1;
             ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
         }
-    }
 
-    // Test that it is invalid to use numbers for a texture's width/height that are not multiples
-    // of the compressed block sizes.
-    TEST_F(CompressedTextureFormatsValidationTests, TextureSize) {
-        for (wgpu::TextureFormat format : utils::kCompressedFormats) {
-            uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
-            uint32_t blockHeight = utils::GetTextureFormatBlockHeight(format);
+        // Test that invalid width should cause an error. Note that if the block height of the
+        // compression type is even, we test that alignment to half the height is not
+        // sufficient.
+        {
+            wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+            descriptor.format = format;
+            descriptor.size.height =
+                blockHeight % 2 == 0 ? blockHeight - (blockHeight / 2) : blockHeight - 1;
+            ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
+        }
 
-            // Test that the default size (120 x 120) is valid for all formats.
-            {
-                wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-                descriptor.format = format;
-                ASSERT_TRUE(descriptor.size.width % blockWidth == 0 &&
-                            descriptor.size.height % blockHeight == 0);
-                device.CreateTexture(&descriptor);
-            }
-
-            // Test that invalid width should cause an error. Note that if the block width of the
-            // compression type is even, we test that alignment to half the width is not sufficient.
-            {
-                wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-                descriptor.format = format;
-                descriptor.size.width =
-                    blockWidth % 2 == 0 ? blockWidth - (blockWidth / 2) : blockWidth - 1;
-                ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-            }
-
-            // Test that invalid width should cause an error. Note that if the block height of the
-            // compression type is even, we test that alignment to half the height is not
-            // sufficient.
-            {
-                wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-                descriptor.format = format;
-                descriptor.size.height =
-                    blockHeight % 2 == 0 ? blockHeight - (blockHeight / 2) : blockHeight - 1;
-                ASSERT_DEVICE_ERROR(device.CreateTexture(&descriptor));
-            }
-
-            // Test a working dimension based on some constant multipliers to the dimensions.
-            {
-                constexpr uint32_t kWidthMultiplier = 3;
-                constexpr uint32_t kHeightMultiplier = 8;
-                wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
-                descriptor.format = format;
-                descriptor.size.width = kWidthMultiplier * blockWidth;
-                descriptor.size.height = kHeightMultiplier * blockHeight;
-                device.CreateTexture(&descriptor);
-            }
+        // Test a working dimension based on some constant multipliers to the dimensions.
+        {
+            constexpr uint32_t kWidthMultiplier = 3;
+            constexpr uint32_t kHeightMultiplier = 8;
+            wgpu::TextureDescriptor descriptor = CreateDefaultTextureDescriptor();
+            descriptor.format = format;
+            descriptor.size.width = kWidthMultiplier * blockWidth;
+            descriptor.size.height = kHeightMultiplier * blockHeight;
+            device.CreateTexture(&descriptor);
         }
     }
+}
 
 }  // namespace
diff --git a/src/dawn/tests/unittests/validation/TextureViewValidationTests.cpp b/src/dawn/tests/unittests/validation/TextureViewValidationTests.cpp
index 51d1c06..d56647b 100644
--- a/src/dawn/tests/unittests/validation/TextureViewValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/TextureViewValidationTests.cpp
@@ -18,999 +18,996 @@
 
 namespace {
 
-    class TextureViewValidationTest : public ValidationTest {};
+class TextureViewValidationTest : public ValidationTest {};
 
-    constexpr uint32_t kWidth = 32u;
-    constexpr uint32_t kHeight = 32u;
-    constexpr uint32_t kDepth = 6u;
-    constexpr uint32_t kDefaultMipLevels = 6u;
+constexpr uint32_t kWidth = 32u;
+constexpr uint32_t kHeight = 32u;
+constexpr uint32_t kDepth = 6u;
+constexpr uint32_t kDefaultMipLevels = 6u;
 
-    constexpr wgpu::TextureFormat kDefaultTextureFormat = wgpu::TextureFormat::RGBA8Unorm;
+constexpr wgpu::TextureFormat kDefaultTextureFormat = wgpu::TextureFormat::RGBA8Unorm;
 
-    wgpu::Texture Create2DArrayTexture(wgpu::Device& device,
-                                       uint32_t arrayLayerCount,
-                                       uint32_t width = kWidth,
-                                       uint32_t height = kHeight,
-                                       uint32_t mipLevelCount = kDefaultMipLevels,
-                                       uint32_t sampleCount = 1) {
-        wgpu::TextureDescriptor descriptor;
-        descriptor.dimension = wgpu::TextureDimension::e2D;
-        descriptor.size.width = width;
-        descriptor.size.height = height;
-        descriptor.size.depthOrArrayLayers = arrayLayerCount;
-        descriptor.sampleCount = sampleCount;
-        descriptor.format = kDefaultTextureFormat;
-        descriptor.mipLevelCount = mipLevelCount;
-        descriptor.usage = wgpu::TextureUsage::TextureBinding;
-        return device.CreateTexture(&descriptor);
-    }
+wgpu::Texture Create2DArrayTexture(wgpu::Device& device,
+                                   uint32_t arrayLayerCount,
+                                   uint32_t width = kWidth,
+                                   uint32_t height = kHeight,
+                                   uint32_t mipLevelCount = kDefaultMipLevels,
+                                   uint32_t sampleCount = 1) {
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.size.width = width;
+    descriptor.size.height = height;
+    descriptor.size.depthOrArrayLayers = arrayLayerCount;
+    descriptor.sampleCount = sampleCount;
+    descriptor.format = kDefaultTextureFormat;
+    descriptor.mipLevelCount = mipLevelCount;
+    descriptor.usage = wgpu::TextureUsage::TextureBinding;
+    return device.CreateTexture(&descriptor);
+}
 
-    wgpu::Texture Create3DTexture(wgpu::Device& device) {
-        wgpu::TextureDescriptor descriptor;
-        descriptor.dimension = wgpu::TextureDimension::e3D;
-        descriptor.size = {kWidth, kHeight, kDepth};
-        descriptor.sampleCount = 1;
-        descriptor.format = kDefaultTextureFormat;
+wgpu::Texture Create3DTexture(wgpu::Device& device) {
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e3D;
+    descriptor.size = {kWidth, kHeight, kDepth};
+    descriptor.sampleCount = 1;
+    descriptor.format = kDefaultTextureFormat;
+    descriptor.mipLevelCount = kDefaultMipLevels;
+    descriptor.usage = wgpu::TextureUsage::TextureBinding;
+    return device.CreateTexture(&descriptor);
+}
+
+wgpu::Texture Create1DTexture(wgpu::Device& device) {
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e1D;
+    descriptor.size = {kWidth, 1, 1};
+    descriptor.format = kDefaultTextureFormat;
+    descriptor.usage = wgpu::TextureUsage::TextureBinding;
+    return device.CreateTexture(&descriptor);
+}
+
+wgpu::Texture CreateDepthStencilTexture(wgpu::Device& device, wgpu::TextureFormat format) {
+    wgpu::TextureDescriptor descriptor = {};
+    descriptor.size = {kWidth, kHeight, kDepth};
+    descriptor.usage = wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment;
+    descriptor.mipLevelCount = kDefaultMipLevels;
+    descriptor.format = format;
+    return device.CreateTexture(&descriptor);
+}
+
+wgpu::TextureViewDescriptor CreateDefaultViewDescriptor(wgpu::TextureViewDimension dimension) {
+    wgpu::TextureViewDescriptor descriptor;
+    descriptor.format = kDefaultTextureFormat;
+    descriptor.dimension = dimension;
+    descriptor.baseMipLevel = 0;
+    if (dimension != wgpu::TextureViewDimension::e1D) {
         descriptor.mipLevelCount = kDefaultMipLevels;
-        descriptor.usage = wgpu::TextureUsage::TextureBinding;
-        return device.CreateTexture(&descriptor);
+    }
+    descriptor.baseArrayLayer = 0;
+    descriptor.arrayLayerCount = 1;
+    return descriptor;
+}
+
+// Test creating texture view on a 2D non-array texture
+TEST_F(TextureViewValidationTest, CreateTextureViewOnTexture2D) {
+    wgpu::Texture texture = Create2DArrayTexture(device, 1);
+
+    wgpu::TextureViewDescriptor base2DTextureViewDescriptor =
+        CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2D);
+
+    // It is an error to create a view with zero 'arrayLayerCount'.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
+        descriptor.arrayLayerCount = 0;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
     }
 
-    wgpu::Texture Create1DTexture(wgpu::Device& device) {
-        wgpu::TextureDescriptor descriptor;
-        descriptor.dimension = wgpu::TextureDimension::e1D;
-        descriptor.size = {kWidth, 1, 1};
-        descriptor.format = kDefaultTextureFormat;
-        descriptor.usage = wgpu::TextureUsage::TextureBinding;
-        return device.CreateTexture(&descriptor);
+    // It is an error to create a view with zero 'mipLevelCount'.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
+        descriptor.mipLevelCount = 0;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
     }
 
-    wgpu::Texture CreateDepthStencilTexture(wgpu::Device& device, wgpu::TextureFormat format) {
-        wgpu::TextureDescriptor descriptor = {};
-        descriptor.size = {kWidth, kHeight, kDepth};
-        descriptor.usage =
-            wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment;
-        descriptor.mipLevelCount = kDefaultMipLevels;
-        descriptor.format = format;
-        return device.CreateTexture(&descriptor);
-    }
-
-    wgpu::TextureViewDescriptor CreateDefaultViewDescriptor(wgpu::TextureViewDimension dimension) {
-        wgpu::TextureViewDescriptor descriptor;
-        descriptor.format = kDefaultTextureFormat;
-        descriptor.dimension = dimension;
-        descriptor.baseMipLevel = 0;
-        if (dimension != wgpu::TextureViewDimension::e1D) {
-            descriptor.mipLevelCount = kDefaultMipLevels;
-        }
-        descriptor.baseArrayLayer = 0;
+    // It is OK to create a 2D texture view on a 2D texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
         descriptor.arrayLayerCount = 1;
-        return descriptor;
-    }
-
-    // Test creating texture view on a 2D non-array texture
-    TEST_F(TextureViewValidationTest, CreateTextureViewOnTexture2D) {
-        wgpu::Texture texture = Create2DArrayTexture(device, 1);
-
-        wgpu::TextureViewDescriptor base2DTextureViewDescriptor =
-            CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2D);
-
-        // It is an error to create a view with zero 'arrayLayerCount'.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
-            descriptor.arrayLayerCount = 0;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is an error to create a view with zero 'mipLevelCount'.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
-            descriptor.mipLevelCount = 0;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is OK to create a 2D texture view on a 2D texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
-            descriptor.arrayLayerCount = 1;
-            texture.CreateView(&descriptor);
-        }
-
-        // It is an error to view a layer past the end of the texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
-            descriptor.arrayLayerCount = 2;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is OK to create a 1-layer 2D array texture view on a 2D texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
-            descriptor.arrayLayerCount = 1;
-            texture.CreateView(&descriptor);
-        }
-
-        // It is an error to create a 3D texture view on a 2D texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::e3D;
-            descriptor.arrayLayerCount = 1;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // baseMipLevel == k && mipLevelCount == WGPU_MIP_LEVEL_COUNT_UNDEFINED means to use levels
-        // k..end.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
-            descriptor.mipLevelCount = WGPU_MIP_LEVEL_COUNT_UNDEFINED;
-
-            descriptor.baseMipLevel = 0;
-            texture.CreateView(&descriptor);
-            descriptor.baseMipLevel = 1;
-            texture.CreateView(&descriptor);
-            descriptor.baseMipLevel = kDefaultMipLevels - 1;
-            texture.CreateView(&descriptor);
-            descriptor.baseMipLevel = kDefaultMipLevels;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is an error to make the mip level out of range.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
-            descriptor.baseMipLevel = 0;
-            descriptor.mipLevelCount = kDefaultMipLevels + 1;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            descriptor.baseMipLevel = 1;
-            descriptor.mipLevelCount = kDefaultMipLevels;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            descriptor.baseMipLevel = kDefaultMipLevels - 1;
-            descriptor.mipLevelCount = 2;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            descriptor.baseMipLevel = kDefaultMipLevels;
-            descriptor.mipLevelCount = 1;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-    }
-
-    // Test creating texture view on a 2D array texture
-    TEST_F(TextureViewValidationTest, CreateTextureViewOnTexture2DArray) {
-        constexpr uint32_t kDefaultArrayLayers = 6;
-
-        wgpu::Texture texture = Create2DArrayTexture(device, kDefaultArrayLayers);
-
-        wgpu::TextureViewDescriptor base2DArrayTextureViewDescriptor =
-            CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2DArray);
-
-        // It is an error to create a view with zero 'arrayLayerCount'.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::e2D;
-            descriptor.arrayLayerCount = 0;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is an error to create a view with zero 'mipLevelCount'.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::e2D;
-            descriptor.mipLevelCount = 0;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is OK to create a 2D texture view on a 2D array texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::e2D;
-            descriptor.arrayLayerCount = 1;
-            texture.CreateView(&descriptor);
-        }
-
-        // It is OK to create a 2D array texture view on a 2D array texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.arrayLayerCount = kDefaultArrayLayers;
-            texture.CreateView(&descriptor);
-        }
-
-        // It is an error to create a 3D texture view on a 2D array texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::e3D;
-            descriptor.arrayLayerCount = 1;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is an error to create a 1D texture view on a 2D array texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::e1D;
-            descriptor.arrayLayerCount = 1;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // baseArrayLayer == k && arrayLayerCount == wgpu::kArrayLayerCountUndefined means to use
-        // layers k..end.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.arrayLayerCount = wgpu::kArrayLayerCountUndefined;
-
-            descriptor.baseArrayLayer = 0;
-            texture.CreateView(&descriptor);
-            descriptor.baseArrayLayer = 1;
-            texture.CreateView(&descriptor);
-            descriptor.baseArrayLayer = kDefaultArrayLayers - 1;
-            texture.CreateView(&descriptor);
-            descriptor.baseArrayLayer = kDefaultArrayLayers;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is an error for the array layer range of the view to exceed that of the texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.baseArrayLayer = 0;
-            descriptor.arrayLayerCount = kDefaultArrayLayers + 1;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            descriptor.baseArrayLayer = 1;
-            descriptor.arrayLayerCount = kDefaultArrayLayers;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            descriptor.baseArrayLayer = kDefaultArrayLayers - 1;
-            descriptor.arrayLayerCount = 2;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            descriptor.baseArrayLayer = kDefaultArrayLayers;
-            descriptor.arrayLayerCount = 1;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-    }
-
-    // Test creating texture view on a 3D texture
-    TEST_F(TextureViewValidationTest, CreateTextureViewOnTexture3D) {
-        wgpu::Texture texture = Create3DTexture(device);
-
-        wgpu::TextureViewDescriptor base3DTextureViewDescriptor =
-            CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e3D);
-
-        // It is an error to create a view with zero 'arrayLayerCount'.
-        {
-            wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
-            descriptor.arrayLayerCount = 0;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is an error to create a view with zero 'mipLevelCount'.
-        {
-            wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
-            descriptor.mipLevelCount = 0;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is OK to create a 3D texture view on a 3D texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
-            texture.CreateView(&descriptor);
-        }
-
-        // It is an error to create a 1D/2D/2DArray/Cube/CubeArray texture view on a 3D texture.
-        {
-            wgpu::TextureViewDimension invalidDimensions[] = {
-                wgpu::TextureViewDimension::e1D,       wgpu::TextureViewDimension::e2D,
-                wgpu::TextureViewDimension::e2DArray,  wgpu::TextureViewDimension::Cube,
-                wgpu::TextureViewDimension::CubeArray,
-            };
-            for (wgpu::TextureViewDimension dimension : invalidDimensions) {
-                wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
-                descriptor.dimension = dimension;
-                if (dimension == wgpu::TextureViewDimension::Cube ||
-                    dimension == wgpu::TextureViewDimension::CubeArray) {
-                    descriptor.arrayLayerCount = 6;
-                }
-                ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            }
-        }
-
-        // baseMipLevel == k && mipLevelCount == WGPU_MIP_LEVEL_COUNT_UNDEFINED means to use levels
-        // k..end.
-        {
-            wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
-            descriptor.mipLevelCount = WGPU_MIP_LEVEL_COUNT_UNDEFINED;
-
-            descriptor.baseMipLevel = 0;
-            texture.CreateView(&descriptor);
-            descriptor.baseMipLevel = 1;
-            texture.CreateView(&descriptor);
-            descriptor.baseMipLevel = kDefaultMipLevels - 1;
-            texture.CreateView(&descriptor);
-            descriptor.baseMipLevel = kDefaultMipLevels;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is an error to make the mip level out of range.
-        {
-            wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
-            descriptor.baseMipLevel = 0;
-            descriptor.mipLevelCount = kDefaultMipLevels + 1;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            descriptor.baseMipLevel = 1;
-            descriptor.mipLevelCount = kDefaultMipLevels;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            descriptor.baseMipLevel = kDefaultMipLevels - 1;
-            descriptor.mipLevelCount = 2;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            descriptor.baseMipLevel = kDefaultMipLevels;
-            descriptor.mipLevelCount = 1;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // baseArrayLayer == k && arrayLayerCount == wgpu::kArrayLayerCountUndefined means to use
-        // layers k..end. But baseArrayLayer must be 0, and arrayLayerCount must be 1 at most for 3D
-        // texture view.
-        {
-            wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
-            descriptor.arrayLayerCount = wgpu::kArrayLayerCountUndefined;
-            descriptor.baseArrayLayer = 0;
-            texture.CreateView(&descriptor);
-            descriptor.baseArrayLayer = 1;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-
-            descriptor.baseArrayLayer = 0;
-            descriptor.arrayLayerCount = 1;
-            texture.CreateView(&descriptor);
-            descriptor.arrayLayerCount = 2;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            descriptor.arrayLayerCount = kDepth;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-    }
-
-    // Test creating texture view on a 1D texture
-    TEST_F(TextureViewValidationTest, CreateTextureViewOnTexture1D) {
-        wgpu::Texture texture = Create1DTexture(device);
-
-        wgpu::TextureViewDescriptor base1DTextureViewDescriptor =
-            CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e1D);
-
-        // It is an error to create a view with zero 'arrayLayerCount'.
-        {
-            wgpu::TextureViewDescriptor descriptor = base1DTextureViewDescriptor;
-            descriptor.arrayLayerCount = 0;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is an error to create a view with zero 'mipLevelCount'.
-        {
-            wgpu::TextureViewDescriptor descriptor = base1DTextureViewDescriptor;
-            descriptor.mipLevelCount = 0;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is OK to create a 1D texture view on a 1D texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = base1DTextureViewDescriptor;
-            texture.CreateView(&descriptor);
-        }
-
-        // It is an error to create a 2D/2DArray/Cube/CubeArray/3D texture view on a 1D texture.
-        {
-            wgpu::TextureViewDimension invalidDimensions[] = {
-                wgpu::TextureViewDimension::e2D,  wgpu::TextureViewDimension::e2DArray,
-                wgpu::TextureViewDimension::Cube, wgpu::TextureViewDimension::CubeArray,
-                wgpu::TextureViewDimension::e3D,
-            };
-            for (wgpu::TextureViewDimension dimension : invalidDimensions) {
-                wgpu::TextureViewDescriptor descriptor = base1DTextureViewDescriptor;
-                descriptor.dimension = dimension;
-                ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            }
-        }
-
-        // No tests for setting mip levels / array layer ranges because 1D textures can only have
-        // a single mip and layer.
-    }
-
-    // Test creating texture view on a multisampled 2D texture
-    TEST_F(TextureViewValidationTest, CreateTextureViewOnMultisampledTexture2D) {
-        wgpu::Texture texture =
-            Create2DArrayTexture(device, /* arrayLayerCount */ 1, kWidth, kHeight,
-                                 /* mipLevelCount */ 1, /* sampleCount */ 4);
-
-        // It is OK to create a 2D texture view on a multisampled 2D texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = {};
-            texture.CreateView(&descriptor);
-        }
-
-        // It is an error to create a 1-layer 2D array texture view on a multisampled 2D texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = {};
-            descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
-            descriptor.arrayLayerCount = 1;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is an error to create a 1D texture view on a multisampled 2D texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = {};
-            descriptor.dimension = wgpu::TextureViewDimension::e1D;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is an error to create a 3D texture view on a multisampled 2D texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = {};
-            descriptor.dimension = wgpu::TextureViewDimension::e3D;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-    }
-
-    // Using the "none" ("default") values validates the same as explicitly
-    // specifying the values they're supposed to default to.
-    // Variant for a 2D texture with more than 1 array layer.
-    TEST_F(TextureViewValidationTest, TextureViewDescriptorDefaults2DArray) {
-        constexpr uint32_t kDefaultArrayLayers = 8;
-        wgpu::Texture texture = Create2DArrayTexture(device, kDefaultArrayLayers);
-
-        { texture.CreateView(); }
-        {
-            wgpu::TextureViewDescriptor descriptor;
-            descriptor.format = wgpu::TextureFormat::Undefined;
-            texture.CreateView(&descriptor);
-            descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
-            texture.CreateView(&descriptor);
-            descriptor.format = wgpu::TextureFormat::R8Unorm;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-        {
-            wgpu::TextureViewDescriptor descriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::Undefined;
-            texture.CreateView(&descriptor);
-            descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
-            texture.CreateView(&descriptor);
-            // Setting view dimension to 2D, its arrayLayer will default to 1. And view creation
-            // will success.
-            descriptor.dimension = wgpu::TextureViewDimension::e2D;
-            texture.CreateView(&descriptor);
-            // Setting view dimension to Cube, its arrayLayer will default to 6.
-            descriptor.dimension = wgpu::TextureViewDimension::Cube;
-            texture.CreateView(&descriptor);
-            descriptor.baseArrayLayer = 2;
-            texture.CreateView(&descriptor);
-            descriptor.baseArrayLayer = 3;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            // Setting view dimension to CubeArray, its arrayLayer will default to
-            // size.depthOrArrayLayers (kDefaultArrayLayers) - baseArrayLayer.
-            descriptor.dimension = wgpu::TextureViewDimension::CubeArray;
-            descriptor.baseArrayLayer = 0;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            descriptor.baseArrayLayer = 2;
-            texture.CreateView(&descriptor);
-            descriptor.baseArrayLayer = 3;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-        {
-            wgpu::TextureViewDescriptor descriptor;
-
-            // Setting array layers to non-0 means the dimensionality will
-            // default to 2D so by itself it causes an error.
-            descriptor.arrayLayerCount = kDefaultArrayLayers;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
-            texture.CreateView(&descriptor);
-
-            descriptor.mipLevelCount = kDefaultMipLevels;
-            texture.CreateView(&descriptor);
-        }
-    }
-
-    // Using the "none" ("default") values validates the same as explicitly
-    // specifying the values they're supposed to default to.
-    // Variant for a 2D texture with only 1 array layer.
-    TEST_F(TextureViewValidationTest, TextureViewDescriptorDefaults2DNonArray) {
-        constexpr uint32_t kDefaultArrayLayers = 1;
-        wgpu::Texture texture = Create2DArrayTexture(device, kDefaultArrayLayers);
-
-        { texture.CreateView(); }
-        {
-            wgpu::TextureViewDescriptor descriptor;
-            descriptor.format = wgpu::TextureFormat::Undefined;
-            texture.CreateView(&descriptor);
-            descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
-            texture.CreateView(&descriptor);
-            descriptor.format = wgpu::TextureFormat::R8Unorm;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-        {
-            wgpu::TextureViewDescriptor descriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::Undefined;
-            texture.CreateView(&descriptor);
-            descriptor.dimension = wgpu::TextureViewDimension::e2D;
-            texture.CreateView(&descriptor);
-            descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
-            texture.CreateView(&descriptor);
-        }
-        {
-            wgpu::TextureViewDescriptor descriptor;
-            descriptor.arrayLayerCount = wgpu::kArrayLayerCountUndefined;
-            texture.CreateView(&descriptor);
-            descriptor.arrayLayerCount = 1;
-            texture.CreateView(&descriptor);
-            descriptor.arrayLayerCount = 2;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-        {
-            wgpu::TextureViewDescriptor descriptor;
-            descriptor.mipLevelCount = kDefaultMipLevels;
-            texture.CreateView(&descriptor);
-            descriptor.arrayLayerCount = kDefaultArrayLayers;
-            texture.CreateView(&descriptor);
-        }
-    }
-
-    // Using the "none" ("default") values validates the same as explicitly
-    // specifying the values they're supposed to default to.
-    // Variant for a 3D texture.
-    TEST_F(TextureViewValidationTest, TextureViewDescriptorDefaults3D) {
-        wgpu::Texture texture = Create3DTexture(device);
-
-        { texture.CreateView(); }
-        {
-            wgpu::TextureViewDescriptor descriptor;
-            descriptor.format = wgpu::TextureFormat::Undefined;
-            texture.CreateView(&descriptor);
-            descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
-            texture.CreateView(&descriptor);
-            descriptor.format = wgpu::TextureFormat::R8Unorm;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-        {
-            wgpu::TextureViewDescriptor descriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::Undefined;
-            texture.CreateView(&descriptor);
-            descriptor.dimension = wgpu::TextureViewDimension::e3D;
-            texture.CreateView(&descriptor);
-            descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-            descriptor.dimension = wgpu::TextureViewDimension::e2D;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-        {
-            wgpu::TextureViewDescriptor descriptor;
-            descriptor.arrayLayerCount = wgpu::kArrayLayerCountUndefined;
-            texture.CreateView(&descriptor);
-            descriptor.arrayLayerCount = 1;
-            texture.CreateView(&descriptor);
-            descriptor.arrayLayerCount = 2;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-        {
-            wgpu::TextureViewDescriptor descriptor;
-            descriptor.mipLevelCount = kDefaultMipLevels;
-            texture.CreateView(&descriptor);
-            descriptor.arrayLayerCount = kDepth;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-    }
-
-    // Regression test for crbug.com/1314049. Format default depends on the aspect.
-    // Test that computing the default does not crash if the aspect is invalid.
-    TEST_F(TextureViewValidationTest, TextureViewDescriptorDefaultsInvalidAspect) {
-        wgpu::Texture texture =
-            CreateDepthStencilTexture(device, wgpu::TextureFormat::Depth24PlusStencil8);
-
-        wgpu::TextureViewDescriptor viewDesc = {};
-        viewDesc.aspect = static_cast<wgpu::TextureAspect>(-1);
-
-        // Validation should catch the invalid aspect.
-        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc),
-                            testing::HasSubstr("Invalid value for WGPUTextureAspect"));
-    }
-
-    // Test creating cube map texture view
-    TEST_F(TextureViewValidationTest, CreateCubeMapTextureView) {
-        constexpr uint32_t kDefaultArrayLayers = 16;
-
-        wgpu::Texture texture = Create2DArrayTexture(device, kDefaultArrayLayers);
-
-        wgpu::TextureViewDescriptor base2DArrayTextureViewDescriptor =
-            CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2DArray);
-
-        // It is an error to create a view with zero 'arrayLayerCount'.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::Cube;
-            descriptor.arrayLayerCount = 0;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is an error to create a view with zero 'mipLevelCount'.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::Cube;
-            descriptor.mipLevelCount = 0;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is OK to create a cube map texture view with arrayLayerCount == 6.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::Cube;
-            descriptor.arrayLayerCount = 6;
-            texture.CreateView(&descriptor);
-        }
-
-        // It is an error to create a cube map texture view with arrayLayerCount != 6.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::Cube;
-            descriptor.arrayLayerCount = 3;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is OK to create a cube map array texture view with arrayLayerCount % 6 == 0.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::CubeArray;
-            descriptor.arrayLayerCount = 12;
-            texture.CreateView(&descriptor);
-        }
-
-        // It is an error to create a cube map array texture view with arrayLayerCount % 6 != 0.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::CubeArray;
-            descriptor.arrayLayerCount = 11;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
-        }
-
-        // It is an error to create a cube map texture view with width != height.
-        {
-            wgpu::Texture nonSquareTexture = Create2DArrayTexture(device, 18, 32, 16, 5);
-
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::Cube;
-            descriptor.arrayLayerCount = 6;
-            ASSERT_DEVICE_ERROR(nonSquareTexture.CreateView(&descriptor));
-        }
-
-        // It is an error to create a cube map array texture view with width != height.
-        {
-            wgpu::Texture nonSquareTexture = Create2DArrayTexture(device, 18, 32, 16, 5);
-
-            wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
-            descriptor.dimension = wgpu::TextureViewDimension::CubeArray;
-            descriptor.arrayLayerCount = 12;
-            ASSERT_DEVICE_ERROR(nonSquareTexture.CreateView(&descriptor));
-        }
-    }
-
-    // Test the format compatibility rules when creating a texture view.
-    TEST_F(TextureViewValidationTest, TextureViewFormatCompatibility) {
-        wgpu::TextureDescriptor textureDesc = {};
-        textureDesc.size.width = 4;
-        textureDesc.size.height = 4;
-        textureDesc.usage = wgpu::TextureUsage::TextureBinding;
-
-        wgpu::TextureViewDescriptor viewDesc = {};
-
-        // It is an error to create an sRGB texture view from an RGB texture, without viewFormats.
-        {
-            textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
-            viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
-            wgpu::Texture texture = device.CreateTexture(&textureDesc);
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-        }
-
-        // It is an error to create an RGB texture view from an sRGB texture, without viewFormats.
-        {
-            textureDesc.format = wgpu::TextureFormat::BGRA8UnormSrgb;
-            viewDesc.format = wgpu::TextureFormat::BGRA8Unorm;
-            wgpu::Texture texture = device.CreateTexture(&textureDesc);
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-        }
-
-        // It is an error to create a texture view with a depth-stencil format of an RGBA texture.
-        {
-            textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
-            viewDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
-            wgpu::Texture texture = device.CreateTexture(&textureDesc);
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-        }
-
-        // It is an error to create a texture view with a depth format of a depth-stencil texture.
-        {
-            textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
-            viewDesc.format = wgpu::TextureFormat::Depth24Plus;
-            wgpu::Texture texture = device.CreateTexture(&textureDesc);
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-        }
-
-        // It is invalid to create a texture view with a combined depth-stencil format if only
-        // the depth aspect is selected.
-        {
-            textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
-            viewDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
-            viewDesc.aspect = wgpu::TextureAspect::DepthOnly;
-            wgpu::Texture texture = device.CreateTexture(&textureDesc);
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-        }
-
-        // It is invalid to create a texture view with a combined depth-stencil format if only
-        // the stencil aspect is selected.
-        {
-            textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
-            viewDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
-            viewDesc.aspect = wgpu::TextureAspect::StencilOnly;
-            wgpu::Texture texture = device.CreateTexture(&textureDesc);
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-        }
-
-        // Regression test for crbug.com/1312780.
-        // viewFormat is not supported (Null backend does not support any optional features).
-        {
-            textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
-            viewDesc.format = wgpu::TextureFormat::Depth24UnormStencil8;
-            wgpu::Texture texture = device.CreateTexture(&textureDesc);
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc), testing::HasSubstr("Unsupported"));
-        }
-
-        // It is valid to create a texture view with a depth format of a depth-stencil texture
-        // if the depth only aspect is selected.
-        {
-            textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
-            viewDesc.format = wgpu::TextureFormat::Depth24Plus;
-            viewDesc.aspect = wgpu::TextureAspect::DepthOnly;
-            wgpu::Texture texture = device.CreateTexture(&textureDesc);
-            texture.CreateView(&viewDesc);
-
-            viewDesc = {};
-        }
-
-        // Prep for testing a single view format in viewFormats.
-        wgpu::TextureFormat viewFormat;
-        textureDesc.viewFormats = &viewFormat;
-        textureDesc.viewFormatCount = 1;
-
-        // An aspect format is not a valid view format of a depth-stencil texture.
-        {
-            textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
-            viewFormat = wgpu::TextureFormat::Depth24Plus;
-            ASSERT_DEVICE_ERROR(device.CreateTexture(&textureDesc));
-        }
-
-        // Test that a RGBA texture can be viewed as both RGBA and RGBASrgb, but not BGRA or
-        // BGRASrgb
-        {
-            textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
-            viewFormat = wgpu::TextureFormat::RGBA8UnormSrgb;
-            wgpu::Texture texture = device.CreateTexture(&textureDesc);
-
-            viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
-            texture.CreateView(&viewDesc);
-
-            viewDesc.format = wgpu::TextureFormat::RGBA8Unorm;
-            texture.CreateView(&viewDesc);
-
-            viewDesc.format = wgpu::TextureFormat::BGRA8Unorm;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-
-            viewDesc.format = wgpu::TextureFormat::BGRA8UnormSrgb;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-        }
-
-        // Test that a BGRASrgb texture can be viewed as both BGRA and BGRASrgb, but not RGBA or
-        // RGBASrgb
-        {
-            textureDesc.format = wgpu::TextureFormat::BGRA8UnormSrgb;
-            viewFormat = wgpu::TextureFormat::BGRA8Unorm;
-            wgpu::Texture texture = device.CreateTexture(&textureDesc);
-
-            viewDesc.format = wgpu::TextureFormat::BGRA8Unorm;
-            texture.CreateView(&viewDesc);
-
-            viewDesc.format = wgpu::TextureFormat::BGRA8UnormSrgb;
-            texture.CreateView(&viewDesc);
-
-            viewDesc.format = wgpu::TextureFormat::RGBA8Unorm;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-
-            viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-        }
-
-        // Test an RGBA format may be viewed as RGBA (same)
-        {
-            textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
-            viewFormat = wgpu::TextureFormat::RGBA8Unorm;
-            wgpu::Texture texture = device.CreateTexture(&textureDesc);
-
-            viewDesc.format = wgpu::TextureFormat::RGBA8Unorm;
-            texture.CreateView(&viewDesc);
-
-            viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-        }
-
-        // Test that duplicate, and multiple view formats are allowed.
-        {
-            std::array<wgpu::TextureFormat, 5> viewFormats = {
-                wgpu::TextureFormat::RGBA8UnormSrgb, wgpu::TextureFormat::RGBA8Unorm,
-                wgpu::TextureFormat::RGBA8Unorm,     wgpu::TextureFormat::RGBA8UnormSrgb,
-                wgpu::TextureFormat::RGBA8Unorm,
-            };
-            textureDesc.viewFormats = viewFormats.data();
-            textureDesc.viewFormatCount = viewFormats.size();
-
-            textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
-            wgpu::Texture texture = device.CreateTexture(&textureDesc);
-
-            viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
-            texture.CreateView(&viewDesc);
-
-            viewDesc.format = wgpu::TextureFormat::RGBA8Unorm;
-            texture.CreateView(&viewDesc);
-
-            viewDesc.format = wgpu::TextureFormat::BGRA8Unorm;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-
-            viewDesc.format = wgpu::TextureFormat::BGRA8UnormSrgb;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-        }
-    }
-
-    // Test that it's valid to create a texture view from a destroyed texture
-    TEST_F(TextureViewValidationTest, DestroyCreateTextureView) {
-        wgpu::Texture texture = Create2DArrayTexture(device, 1);
-        wgpu::TextureViewDescriptor descriptor =
-            CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2D);
-        texture.Destroy();
         texture.CreateView(&descriptor);
     }
 
-    // Test that the selected TextureAspects must exist in the texture format
-    TEST_F(TextureViewValidationTest, AspectMustExist) {
-        wgpu::TextureDescriptor descriptor = {};
-        descriptor.size = {1, 1, 1};
-        descriptor.usage =
-            wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment;
-
-        // Can select: All and DepthOnly from Depth32Float, but not StencilOnly
-        {
-            descriptor.format = wgpu::TextureFormat::Depth32Float;
-            wgpu::Texture texture = device.CreateTexture(&descriptor);
-
-            wgpu::TextureViewDescriptor viewDescriptor = {};
-            viewDescriptor.aspect = wgpu::TextureAspect::All;
-            texture.CreateView(&viewDescriptor);
-
-            viewDescriptor.aspect = wgpu::TextureAspect::DepthOnly;
-            texture.CreateView(&viewDescriptor);
-
-            viewDescriptor.aspect = wgpu::TextureAspect::StencilOnly;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDescriptor));
-        }
-
-        // Can select: All, DepthOnly, and StencilOnly from Depth24PlusStencil8
-        {
-            descriptor.format = wgpu::TextureFormat::Depth24PlusStencil8;
-            wgpu::Texture texture = device.CreateTexture(&descriptor);
-
-            wgpu::TextureViewDescriptor viewDescriptor = {};
-            viewDescriptor.aspect = wgpu::TextureAspect::All;
-            texture.CreateView(&viewDescriptor);
-
-            viewDescriptor.aspect = wgpu::TextureAspect::DepthOnly;
-            texture.CreateView(&viewDescriptor);
-
-            viewDescriptor.aspect = wgpu::TextureAspect::StencilOnly;
-            texture.CreateView(&viewDescriptor);
-        }
-
-        // Can select: All from RGBA8Unorm
-        {
-            descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
-            wgpu::Texture texture = device.CreateTexture(&descriptor);
-
-            wgpu::TextureViewDescriptor viewDescriptor = {};
-            viewDescriptor.aspect = wgpu::TextureAspect::All;
-            texture.CreateView(&viewDescriptor);
-
-            viewDescriptor.aspect = wgpu::TextureAspect::DepthOnly;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDescriptor));
-
-            viewDescriptor.aspect = wgpu::TextureAspect::StencilOnly;
-            ASSERT_DEVICE_ERROR(texture.CreateView(&viewDescriptor));
-        }
+    // It is an error to view a layer past the end of the texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
+        descriptor.arrayLayerCount = 2;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
     }
 
-    class D24S8TextureViewValidationTests : public ValidationTest {
-      protected:
-        WGPUDevice CreateTestDevice() override {
-            wgpu::DeviceDescriptor descriptor;
-            wgpu::FeatureName requiredFeatures[1] = {wgpu::FeatureName::Depth24UnormStencil8};
-            descriptor.requiredFeatures = requiredFeatures;
-            descriptor.requiredFeaturesCount = 1;
-            return adapter.CreateDevice(&descriptor);
-        }
-    };
-
-    // Test that the selected TextureAspects must exist in the Depth24UnormStencil8 texture format
-    TEST_F(D24S8TextureViewValidationTests, AspectMustExist) {
-        wgpu::Texture texture =
-            CreateDepthStencilTexture(device, wgpu::TextureFormat::Depth24UnormStencil8);
-
-        // Can select: All, DepthOnly, and StencilOnly from Depth24UnormStencil8
-        {
-            wgpu::TextureViewDescriptor viewDescriptor = {};
-            viewDescriptor.aspect = wgpu::TextureAspect::All;
-            texture.CreateView(&viewDescriptor);
-
-            viewDescriptor.aspect = wgpu::TextureAspect::DepthOnly;
-            texture.CreateView(&viewDescriptor);
-
-            viewDescriptor.aspect = wgpu::TextureAspect::StencilOnly;
-            texture.CreateView(&viewDescriptor);
-        }
+    // It is OK to create a 1-layer 2D array texture view on a 2D texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
+        descriptor.arrayLayerCount = 1;
+        texture.CreateView(&descriptor);
     }
 
-    // Test the format compatibility rules when creating a texture view.
-    TEST_F(D24S8TextureViewValidationTests, TextureViewFormatCompatibility) {
-        wgpu::Texture texture =
-            CreateDepthStencilTexture(device, wgpu::TextureFormat::Depth24UnormStencil8);
+    // It is an error to create a 3D texture view on a 2D texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::e3D;
+        descriptor.arrayLayerCount = 1;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
 
-        wgpu::TextureViewDescriptor base2DTextureViewDescriptor =
-            CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2D);
+    // baseMipLevel == k && mipLevelCount == WGPU_MIP_LEVEL_COUNT_UNDEFINED means to use levels
+    // k..end.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
+        descriptor.mipLevelCount = WGPU_MIP_LEVEL_COUNT_UNDEFINED;
 
-        // It is an error to create a texture view in color format on a depth-stencil texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
-            descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        descriptor.baseMipLevel = 0;
+        texture.CreateView(&descriptor);
+        descriptor.baseMipLevel = 1;
+        texture.CreateView(&descriptor);
+        descriptor.baseMipLevel = kDefaultMipLevels - 1;
+        texture.CreateView(&descriptor);
+        descriptor.baseMipLevel = kDefaultMipLevels;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is an error to make the mip level out of range.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
+        descriptor.baseMipLevel = 0;
+        descriptor.mipLevelCount = kDefaultMipLevels + 1;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        descriptor.baseMipLevel = 1;
+        descriptor.mipLevelCount = kDefaultMipLevels;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        descriptor.baseMipLevel = kDefaultMipLevels - 1;
+        descriptor.mipLevelCount = 2;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        descriptor.baseMipLevel = kDefaultMipLevels;
+        descriptor.mipLevelCount = 1;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+}
+
+// Test creating texture view on a 2D array texture
+TEST_F(TextureViewValidationTest, CreateTextureViewOnTexture2DArray) {
+    constexpr uint32_t kDefaultArrayLayers = 6;
+
+    wgpu::Texture texture = Create2DArrayTexture(device, kDefaultArrayLayers);
+
+    wgpu::TextureViewDescriptor base2DArrayTextureViewDescriptor =
+        CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2DArray);
+
+    // It is an error to create a view with zero 'arrayLayerCount'.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::e2D;
+        descriptor.arrayLayerCount = 0;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is an error to create a view with zero 'mipLevelCount'.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::e2D;
+        descriptor.mipLevelCount = 0;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is OK to create a 2D texture view on a 2D array texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::e2D;
+        descriptor.arrayLayerCount = 1;
+        texture.CreateView(&descriptor);
+    }
+
+    // It is OK to create a 2D array texture view on a 2D array texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.arrayLayerCount = kDefaultArrayLayers;
+        texture.CreateView(&descriptor);
+    }
+
+    // It is an error to create a 3D texture view on a 2D array texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::e3D;
+        descriptor.arrayLayerCount = 1;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is an error to create a 1D texture view on a 2D array texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::e1D;
+        descriptor.arrayLayerCount = 1;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // baseArrayLayer == k && arrayLayerCount == wgpu::kArrayLayerCountUndefined means to use
+    // layers k..end.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.arrayLayerCount = wgpu::kArrayLayerCountUndefined;
+
+        descriptor.baseArrayLayer = 0;
+        texture.CreateView(&descriptor);
+        descriptor.baseArrayLayer = 1;
+        texture.CreateView(&descriptor);
+        descriptor.baseArrayLayer = kDefaultArrayLayers - 1;
+        texture.CreateView(&descriptor);
+        descriptor.baseArrayLayer = kDefaultArrayLayers;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is an error for the array layer range of the view to exceed that of the texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.baseArrayLayer = 0;
+        descriptor.arrayLayerCount = kDefaultArrayLayers + 1;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        descriptor.baseArrayLayer = 1;
+        descriptor.arrayLayerCount = kDefaultArrayLayers;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        descriptor.baseArrayLayer = kDefaultArrayLayers - 1;
+        descriptor.arrayLayerCount = 2;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        descriptor.baseArrayLayer = kDefaultArrayLayers;
+        descriptor.arrayLayerCount = 1;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+}
+
+// Test creating texture view on a 3D texture
+TEST_F(TextureViewValidationTest, CreateTextureViewOnTexture3D) {
+    wgpu::Texture texture = Create3DTexture(device);
+
+    wgpu::TextureViewDescriptor base3DTextureViewDescriptor =
+        CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e3D);
+
+    // It is an error to create a view with zero 'arrayLayerCount'.
+    {
+        wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
+        descriptor.arrayLayerCount = 0;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is an error to create a view with zero 'mipLevelCount'.
+    {
+        wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
+        descriptor.mipLevelCount = 0;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is OK to create a 3D texture view on a 3D texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
+        texture.CreateView(&descriptor);
+    }
+
+    // It is an error to create a 1D/2D/2DArray/Cube/CubeArray texture view on a 3D texture.
+    {
+        wgpu::TextureViewDimension invalidDimensions[] = {
+            wgpu::TextureViewDimension::e1D,       wgpu::TextureViewDimension::e2D,
+            wgpu::TextureViewDimension::e2DArray,  wgpu::TextureViewDimension::Cube,
+            wgpu::TextureViewDimension::CubeArray,
+        };
+        for (wgpu::TextureViewDimension dimension : invalidDimensions) {
+            wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
+            descriptor.dimension = dimension;
+            if (dimension == wgpu::TextureViewDimension::Cube ||
+                dimension == wgpu::TextureViewDimension::CubeArray) {
+                descriptor.arrayLayerCount = 6;
+            }
             ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
         }
     }
 
-    class D32S8TextureViewValidationTests : public ValidationTest {
-      protected:
-        WGPUDevice CreateTestDevice() override {
-            wgpu::DeviceDescriptor descriptor;
-            wgpu::FeatureName requiredFeatures[1] = {wgpu::FeatureName::Depth32FloatStencil8};
-            descriptor.requiredFeatures = requiredFeatures;
-            descriptor.requiredFeaturesCount = 1;
-            return adapter.CreateDevice(&descriptor);
-        }
-    };
+    // baseMipLevel == k && mipLevelCount == WGPU_MIP_LEVEL_COUNT_UNDEFINED means to use levels
+    // k..end.
+    {
+        wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
+        descriptor.mipLevelCount = WGPU_MIP_LEVEL_COUNT_UNDEFINED;
 
-    // Test that the selected TextureAspects must exist in the Depth32FloatStencil8 texture format
-    TEST_F(D32S8TextureViewValidationTests, AspectMustExist) {
-        wgpu::Texture texture =
-            CreateDepthStencilTexture(device, wgpu::TextureFormat::Depth32FloatStencil8);
-
-        // Can select: All, DepthOnly, and StencilOnly from Depth32FloatStencil8
-        {
-            wgpu::TextureViewDescriptor viewDescriptor = {};
-            viewDescriptor.aspect = wgpu::TextureAspect::All;
-            texture.CreateView(&viewDescriptor);
-
-            viewDescriptor.aspect = wgpu::TextureAspect::DepthOnly;
-            texture.CreateView(&viewDescriptor);
-
-            viewDescriptor.aspect = wgpu::TextureAspect::StencilOnly;
-            texture.CreateView(&viewDescriptor);
-        }
+        descriptor.baseMipLevel = 0;
+        texture.CreateView(&descriptor);
+        descriptor.baseMipLevel = 1;
+        texture.CreateView(&descriptor);
+        descriptor.baseMipLevel = kDefaultMipLevels - 1;
+        texture.CreateView(&descriptor);
+        descriptor.baseMipLevel = kDefaultMipLevels;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
     }
 
-    // Test the format compatibility rules when creating a texture view.
-    TEST_F(D32S8TextureViewValidationTests, TextureViewFormatCompatibility) {
-        wgpu::Texture texture =
-            CreateDepthStencilTexture(device, wgpu::TextureFormat::Depth32FloatStencil8);
+    // It is an error to make the mip level out of range.
+    {
+        wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
+        descriptor.baseMipLevel = 0;
+        descriptor.mipLevelCount = kDefaultMipLevels + 1;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        descriptor.baseMipLevel = 1;
+        descriptor.mipLevelCount = kDefaultMipLevels;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        descriptor.baseMipLevel = kDefaultMipLevels - 1;
+        descriptor.mipLevelCount = 2;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        descriptor.baseMipLevel = kDefaultMipLevels;
+        descriptor.mipLevelCount = 1;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
 
-        wgpu::TextureViewDescriptor base2DTextureViewDescriptor =
-            CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2D);
+    // baseArrayLayer == k && arrayLayerCount == wgpu::kArrayLayerCountUndefined means to use
+    // layers k..end. But baseArrayLayer must be 0, and arrayLayerCount must be 1 at most for 3D
+    // texture view.
+    {
+        wgpu::TextureViewDescriptor descriptor = base3DTextureViewDescriptor;
+        descriptor.arrayLayerCount = wgpu::kArrayLayerCountUndefined;
+        descriptor.baseArrayLayer = 0;
+        texture.CreateView(&descriptor);
+        descriptor.baseArrayLayer = 1;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
 
-        // It is an error to create a texture view in color format on a depth-stencil texture.
-        {
-            wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
-            descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        descriptor.baseArrayLayer = 0;
+        descriptor.arrayLayerCount = 1;
+        texture.CreateView(&descriptor);
+        descriptor.arrayLayerCount = 2;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        descriptor.arrayLayerCount = kDepth;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+}
+
+// Test creating texture view on a 1D texture
+TEST_F(TextureViewValidationTest, CreateTextureViewOnTexture1D) {
+    wgpu::Texture texture = Create1DTexture(device);
+
+    wgpu::TextureViewDescriptor base1DTextureViewDescriptor =
+        CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e1D);
+
+    // It is an error to create a view with zero 'arrayLayerCount'.
+    {
+        wgpu::TextureViewDescriptor descriptor = base1DTextureViewDescriptor;
+        descriptor.arrayLayerCount = 0;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is an error to create a view with zero 'mipLevelCount'.
+    {
+        wgpu::TextureViewDescriptor descriptor = base1DTextureViewDescriptor;
+        descriptor.mipLevelCount = 0;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is OK to create a 1D texture view on a 1D texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = base1DTextureViewDescriptor;
+        texture.CreateView(&descriptor);
+    }
+
+    // It is an error to create a 2D/2DArray/Cube/CubeArray/3D texture view on a 1D texture.
+    {
+        wgpu::TextureViewDimension invalidDimensions[] = {
+            wgpu::TextureViewDimension::e2D,  wgpu::TextureViewDimension::e2DArray,
+            wgpu::TextureViewDimension::Cube, wgpu::TextureViewDimension::CubeArray,
+            wgpu::TextureViewDimension::e3D,
+        };
+        for (wgpu::TextureViewDimension dimension : invalidDimensions) {
+            wgpu::TextureViewDescriptor descriptor = base1DTextureViewDescriptor;
+            descriptor.dimension = dimension;
             ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
         }
     }
 
+    // No tests for setting mip levels / array layer ranges because 1D textures can only have
+    // a single mip and layer.
+}
+
+// Test creating texture view on a multisampled 2D texture
+TEST_F(TextureViewValidationTest, CreateTextureViewOnMultisampledTexture2D) {
+    wgpu::Texture texture = Create2DArrayTexture(device, /* arrayLayerCount */ 1, kWidth, kHeight,
+                                                 /* mipLevelCount */ 1, /* sampleCount */ 4);
+
+    // It is OK to create a 2D texture view on a multisampled 2D texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = {};
+        texture.CreateView(&descriptor);
+    }
+
+    // It is an error to create a 1-layer 2D array texture view on a multisampled 2D texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = {};
+        descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
+        descriptor.arrayLayerCount = 1;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is an error to create a 1D texture view on a multisampled 2D texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = {};
+        descriptor.dimension = wgpu::TextureViewDimension::e1D;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is an error to create a 3D texture view on a multisampled 2D texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = {};
+        descriptor.dimension = wgpu::TextureViewDimension::e3D;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+}
+
+// Using the "none" ("default") values validates the same as explicitly
+// specifying the values they're supposed to default to.
+// Variant for a 2D texture with more than 1 array layer.
+TEST_F(TextureViewValidationTest, TextureViewDescriptorDefaults2DArray) {
+    constexpr uint32_t kDefaultArrayLayers = 8;
+    wgpu::Texture texture = Create2DArrayTexture(device, kDefaultArrayLayers);
+
+    { texture.CreateView(); }
+    {
+        wgpu::TextureViewDescriptor descriptor;
+        descriptor.format = wgpu::TextureFormat::Undefined;
+        texture.CreateView(&descriptor);
+        descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        texture.CreateView(&descriptor);
+        descriptor.format = wgpu::TextureFormat::R8Unorm;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+    {
+        wgpu::TextureViewDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::Undefined;
+        texture.CreateView(&descriptor);
+        descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
+        texture.CreateView(&descriptor);
+        // Setting view dimension to 2D, its arrayLayer will default to 1. And view creation
+        // will success.
+        descriptor.dimension = wgpu::TextureViewDimension::e2D;
+        texture.CreateView(&descriptor);
+        // Setting view dimension to Cube, its arrayLayer will default to 6.
+        descriptor.dimension = wgpu::TextureViewDimension::Cube;
+        texture.CreateView(&descriptor);
+        descriptor.baseArrayLayer = 2;
+        texture.CreateView(&descriptor);
+        descriptor.baseArrayLayer = 3;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        // Setting view dimension to CubeArray, its arrayLayer will default to
+        // size.depthOrArrayLayers (kDefaultArrayLayers) - baseArrayLayer.
+        descriptor.dimension = wgpu::TextureViewDimension::CubeArray;
+        descriptor.baseArrayLayer = 0;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        descriptor.baseArrayLayer = 2;
+        texture.CreateView(&descriptor);
+        descriptor.baseArrayLayer = 3;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+    {
+        wgpu::TextureViewDescriptor descriptor;
+
+        // Setting array layers to non-0 means the dimensionality will
+        // default to 2D so by itself it causes an error.
+        descriptor.arrayLayerCount = kDefaultArrayLayers;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
+        texture.CreateView(&descriptor);
+
+        descriptor.mipLevelCount = kDefaultMipLevels;
+        texture.CreateView(&descriptor);
+    }
+}
+
+// Using the "none" ("default") values validates the same as explicitly
+// specifying the values they're supposed to default to.
+// Variant for a 2D texture with only 1 array layer.
+TEST_F(TextureViewValidationTest, TextureViewDescriptorDefaults2DNonArray) {
+    constexpr uint32_t kDefaultArrayLayers = 1;
+    wgpu::Texture texture = Create2DArrayTexture(device, kDefaultArrayLayers);
+
+    { texture.CreateView(); }
+    {
+        wgpu::TextureViewDescriptor descriptor;
+        descriptor.format = wgpu::TextureFormat::Undefined;
+        texture.CreateView(&descriptor);
+        descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        texture.CreateView(&descriptor);
+        descriptor.format = wgpu::TextureFormat::R8Unorm;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+    {
+        wgpu::TextureViewDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::Undefined;
+        texture.CreateView(&descriptor);
+        descriptor.dimension = wgpu::TextureViewDimension::e2D;
+        texture.CreateView(&descriptor);
+        descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
+        texture.CreateView(&descriptor);
+    }
+    {
+        wgpu::TextureViewDescriptor descriptor;
+        descriptor.arrayLayerCount = wgpu::kArrayLayerCountUndefined;
+        texture.CreateView(&descriptor);
+        descriptor.arrayLayerCount = 1;
+        texture.CreateView(&descriptor);
+        descriptor.arrayLayerCount = 2;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+    {
+        wgpu::TextureViewDescriptor descriptor;
+        descriptor.mipLevelCount = kDefaultMipLevels;
+        texture.CreateView(&descriptor);
+        descriptor.arrayLayerCount = kDefaultArrayLayers;
+        texture.CreateView(&descriptor);
+    }
+}
+
+// Using the "none" ("default") values validates the same as explicitly
+// specifying the values they're supposed to default to.
+// Variant for a 3D texture.
+TEST_F(TextureViewValidationTest, TextureViewDescriptorDefaults3D) {
+    wgpu::Texture texture = Create3DTexture(device);
+
+    { texture.CreateView(); }
+    {
+        wgpu::TextureViewDescriptor descriptor;
+        descriptor.format = wgpu::TextureFormat::Undefined;
+        texture.CreateView(&descriptor);
+        descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        texture.CreateView(&descriptor);
+        descriptor.format = wgpu::TextureFormat::R8Unorm;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+    {
+        wgpu::TextureViewDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::Undefined;
+        texture.CreateView(&descriptor);
+        descriptor.dimension = wgpu::TextureViewDimension::e3D;
+        texture.CreateView(&descriptor);
+        descriptor.dimension = wgpu::TextureViewDimension::e2DArray;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+        descriptor.dimension = wgpu::TextureViewDimension::e2D;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+    {
+        wgpu::TextureViewDescriptor descriptor;
+        descriptor.arrayLayerCount = wgpu::kArrayLayerCountUndefined;
+        texture.CreateView(&descriptor);
+        descriptor.arrayLayerCount = 1;
+        texture.CreateView(&descriptor);
+        descriptor.arrayLayerCount = 2;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+    {
+        wgpu::TextureViewDescriptor descriptor;
+        descriptor.mipLevelCount = kDefaultMipLevels;
+        texture.CreateView(&descriptor);
+        descriptor.arrayLayerCount = kDepth;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+}
+
+// Regression test for crbug.com/1314049. Format default depends on the aspect.
+// Test that computing the default does not crash if the aspect is invalid.
+TEST_F(TextureViewValidationTest, TextureViewDescriptorDefaultsInvalidAspect) {
+    wgpu::Texture texture =
+        CreateDepthStencilTexture(device, wgpu::TextureFormat::Depth24PlusStencil8);
+
+    wgpu::TextureViewDescriptor viewDesc = {};
+    viewDesc.aspect = static_cast<wgpu::TextureAspect>(-1);
+
+    // Validation should catch the invalid aspect.
+    ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc),
+                        testing::HasSubstr("Invalid value for WGPUTextureAspect"));
+}
+
+// Test creating cube map texture view
+TEST_F(TextureViewValidationTest, CreateCubeMapTextureView) {
+    constexpr uint32_t kDefaultArrayLayers = 16;
+
+    wgpu::Texture texture = Create2DArrayTexture(device, kDefaultArrayLayers);
+
+    wgpu::TextureViewDescriptor base2DArrayTextureViewDescriptor =
+        CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2DArray);
+
+    // It is an error to create a view with zero 'arrayLayerCount'.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::Cube;
+        descriptor.arrayLayerCount = 0;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is an error to create a view with zero 'mipLevelCount'.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::Cube;
+        descriptor.mipLevelCount = 0;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is OK to create a cube map texture view with arrayLayerCount == 6.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::Cube;
+        descriptor.arrayLayerCount = 6;
+        texture.CreateView(&descriptor);
+    }
+
+    // It is an error to create a cube map texture view with arrayLayerCount != 6.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::Cube;
+        descriptor.arrayLayerCount = 3;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is OK to create a cube map array texture view with arrayLayerCount % 6 == 0.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::CubeArray;
+        descriptor.arrayLayerCount = 12;
+        texture.CreateView(&descriptor);
+    }
+
+    // It is an error to create a cube map array texture view with arrayLayerCount % 6 != 0.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::CubeArray;
+        descriptor.arrayLayerCount = 11;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+
+    // It is an error to create a cube map texture view with width != height.
+    {
+        wgpu::Texture nonSquareTexture = Create2DArrayTexture(device, 18, 32, 16, 5);
+
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::Cube;
+        descriptor.arrayLayerCount = 6;
+        ASSERT_DEVICE_ERROR(nonSquareTexture.CreateView(&descriptor));
+    }
+
+    // It is an error to create a cube map array texture view with width != height.
+    {
+        wgpu::Texture nonSquareTexture = Create2DArrayTexture(device, 18, 32, 16, 5);
+
+        wgpu::TextureViewDescriptor descriptor = base2DArrayTextureViewDescriptor;
+        descriptor.dimension = wgpu::TextureViewDimension::CubeArray;
+        descriptor.arrayLayerCount = 12;
+        ASSERT_DEVICE_ERROR(nonSquareTexture.CreateView(&descriptor));
+    }
+}
+
+// Test the format compatibility rules when creating a texture view.
+TEST_F(TextureViewValidationTest, TextureViewFormatCompatibility) {
+    wgpu::TextureDescriptor textureDesc = {};
+    textureDesc.size.width = 4;
+    textureDesc.size.height = 4;
+    textureDesc.usage = wgpu::TextureUsage::TextureBinding;
+
+    wgpu::TextureViewDescriptor viewDesc = {};
+
+    // It is an error to create an sRGB texture view from an RGB texture, without viewFormats.
+    {
+        textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+        viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
+        wgpu::Texture texture = device.CreateTexture(&textureDesc);
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+    }
+
+    // It is an error to create an RGB texture view from an sRGB texture, without viewFormats.
+    {
+        textureDesc.format = wgpu::TextureFormat::BGRA8UnormSrgb;
+        viewDesc.format = wgpu::TextureFormat::BGRA8Unorm;
+        wgpu::Texture texture = device.CreateTexture(&textureDesc);
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+    }
+
+    // It is an error to create a texture view with a depth-stencil format of an RGBA texture.
+    {
+        textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+        viewDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        wgpu::Texture texture = device.CreateTexture(&textureDesc);
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+    }
+
+    // It is an error to create a texture view with a depth format of a depth-stencil texture.
+    {
+        textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        viewDesc.format = wgpu::TextureFormat::Depth24Plus;
+        wgpu::Texture texture = device.CreateTexture(&textureDesc);
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+    }
+
+    // It is invalid to create a texture view with a combined depth-stencil format if only
+    // the depth aspect is selected.
+    {
+        textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        viewDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        viewDesc.aspect = wgpu::TextureAspect::DepthOnly;
+        wgpu::Texture texture = device.CreateTexture(&textureDesc);
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+    }
+
+    // It is invalid to create a texture view with a combined depth-stencil format if only
+    // the stencil aspect is selected.
+    {
+        textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        viewDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        viewDesc.aspect = wgpu::TextureAspect::StencilOnly;
+        wgpu::Texture texture = device.CreateTexture(&textureDesc);
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+    }
+
+    // Regression test for crbug.com/1312780.
+    // viewFormat is not supported (Null backend does not support any optional features).
+    {
+        textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        viewDesc.format = wgpu::TextureFormat::Depth24UnormStencil8;
+        wgpu::Texture texture = device.CreateTexture(&textureDesc);
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc), testing::HasSubstr("Unsupported"));
+    }
+
+    // It is valid to create a texture view with a depth format of a depth-stencil texture
+    // if the depth only aspect is selected.
+    {
+        textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        viewDesc.format = wgpu::TextureFormat::Depth24Plus;
+        viewDesc.aspect = wgpu::TextureAspect::DepthOnly;
+        wgpu::Texture texture = device.CreateTexture(&textureDesc);
+        texture.CreateView(&viewDesc);
+
+        viewDesc = {};
+    }
+
+    // Prep for testing a single view format in viewFormats.
+    wgpu::TextureFormat viewFormat;
+    textureDesc.viewFormats = &viewFormat;
+    textureDesc.viewFormatCount = 1;
+
+    // An aspect format is not a valid view format of a depth-stencil texture.
+    {
+        textureDesc.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        viewFormat = wgpu::TextureFormat::Depth24Plus;
+        ASSERT_DEVICE_ERROR(device.CreateTexture(&textureDesc));
+    }
+
+    // Test that a RGBA texture can be viewed as both RGBA and RGBASrgb, but not BGRA or
+    // BGRASrgb
+    {
+        textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+        viewFormat = wgpu::TextureFormat::RGBA8UnormSrgb;
+        wgpu::Texture texture = device.CreateTexture(&textureDesc);
+
+        viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
+        texture.CreateView(&viewDesc);
+
+        viewDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+        texture.CreateView(&viewDesc);
+
+        viewDesc.format = wgpu::TextureFormat::BGRA8Unorm;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+
+        viewDesc.format = wgpu::TextureFormat::BGRA8UnormSrgb;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+    }
+
+    // Test that a BGRASrgb texture can be viewed as both BGRA and BGRASrgb, but not RGBA or
+    // RGBASrgb
+    {
+        textureDesc.format = wgpu::TextureFormat::BGRA8UnormSrgb;
+        viewFormat = wgpu::TextureFormat::BGRA8Unorm;
+        wgpu::Texture texture = device.CreateTexture(&textureDesc);
+
+        viewDesc.format = wgpu::TextureFormat::BGRA8Unorm;
+        texture.CreateView(&viewDesc);
+
+        viewDesc.format = wgpu::TextureFormat::BGRA8UnormSrgb;
+        texture.CreateView(&viewDesc);
+
+        viewDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+
+        viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+    }
+
+    // Test an RGBA format may be viewed as RGBA (same)
+    {
+        textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+        viewFormat = wgpu::TextureFormat::RGBA8Unorm;
+        wgpu::Texture texture = device.CreateTexture(&textureDesc);
+
+        viewDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+        texture.CreateView(&viewDesc);
+
+        viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+    }
+
+    // Test that duplicate, and multiple view formats are allowed.
+    {
+        std::array<wgpu::TextureFormat, 5> viewFormats = {
+            wgpu::TextureFormat::RGBA8UnormSrgb, wgpu::TextureFormat::RGBA8Unorm,
+            wgpu::TextureFormat::RGBA8Unorm,     wgpu::TextureFormat::RGBA8UnormSrgb,
+            wgpu::TextureFormat::RGBA8Unorm,
+        };
+        textureDesc.viewFormats = viewFormats.data();
+        textureDesc.viewFormatCount = viewFormats.size();
+
+        textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+        wgpu::Texture texture = device.CreateTexture(&textureDesc);
+
+        viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
+        texture.CreateView(&viewDesc);
+
+        viewDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+        texture.CreateView(&viewDesc);
+
+        viewDesc.format = wgpu::TextureFormat::BGRA8Unorm;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+
+        viewDesc.format = wgpu::TextureFormat::BGRA8UnormSrgb;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+    }
+}
+
+// Test that it's valid to create a texture view from a destroyed texture
+TEST_F(TextureViewValidationTest, DestroyCreateTextureView) {
+    wgpu::Texture texture = Create2DArrayTexture(device, 1);
+    wgpu::TextureViewDescriptor descriptor =
+        CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2D);
+    texture.Destroy();
+    texture.CreateView(&descriptor);
+}
+
+// Test that the selected TextureAspects must exist in the texture format
+TEST_F(TextureViewValidationTest, AspectMustExist) {
+    wgpu::TextureDescriptor descriptor = {};
+    descriptor.size = {1, 1, 1};
+    descriptor.usage = wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment;
+
+    // Can select: All and DepthOnly from Depth32Float, but not StencilOnly
+    {
+        descriptor.format = wgpu::TextureFormat::Depth32Float;
+        wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+        wgpu::TextureViewDescriptor viewDescriptor = {};
+        viewDescriptor.aspect = wgpu::TextureAspect::All;
+        texture.CreateView(&viewDescriptor);
+
+        viewDescriptor.aspect = wgpu::TextureAspect::DepthOnly;
+        texture.CreateView(&viewDescriptor);
+
+        viewDescriptor.aspect = wgpu::TextureAspect::StencilOnly;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDescriptor));
+    }
+
+    // Can select: All, DepthOnly, and StencilOnly from Depth24PlusStencil8
+    {
+        descriptor.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+        wgpu::TextureViewDescriptor viewDescriptor = {};
+        viewDescriptor.aspect = wgpu::TextureAspect::All;
+        texture.CreateView(&viewDescriptor);
+
+        viewDescriptor.aspect = wgpu::TextureAspect::DepthOnly;
+        texture.CreateView(&viewDescriptor);
+
+        viewDescriptor.aspect = wgpu::TextureAspect::StencilOnly;
+        texture.CreateView(&viewDescriptor);
+    }
+
+    // Can select: All from RGBA8Unorm
+    {
+        descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+        wgpu::TextureViewDescriptor viewDescriptor = {};
+        viewDescriptor.aspect = wgpu::TextureAspect::All;
+        texture.CreateView(&viewDescriptor);
+
+        viewDescriptor.aspect = wgpu::TextureAspect::DepthOnly;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDescriptor));
+
+        viewDescriptor.aspect = wgpu::TextureAspect::StencilOnly;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDescriptor));
+    }
+}
+
+class D24S8TextureViewValidationTests : public ValidationTest {
+  protected:
+    WGPUDevice CreateTestDevice() override {
+        wgpu::DeviceDescriptor descriptor;
+        wgpu::FeatureName requiredFeatures[1] = {wgpu::FeatureName::Depth24UnormStencil8};
+        descriptor.requiredFeatures = requiredFeatures;
+        descriptor.requiredFeaturesCount = 1;
+        return adapter.CreateDevice(&descriptor);
+    }
+};
+
+// Test that the selected TextureAspects must exist in the Depth24UnormStencil8 texture format
+TEST_F(D24S8TextureViewValidationTests, AspectMustExist) {
+    wgpu::Texture texture =
+        CreateDepthStencilTexture(device, wgpu::TextureFormat::Depth24UnormStencil8);
+
+    // Can select: All, DepthOnly, and StencilOnly from Depth24UnormStencil8
+    {
+        wgpu::TextureViewDescriptor viewDescriptor = {};
+        viewDescriptor.aspect = wgpu::TextureAspect::All;
+        texture.CreateView(&viewDescriptor);
+
+        viewDescriptor.aspect = wgpu::TextureAspect::DepthOnly;
+        texture.CreateView(&viewDescriptor);
+
+        viewDescriptor.aspect = wgpu::TextureAspect::StencilOnly;
+        texture.CreateView(&viewDescriptor);
+    }
+}
+
+// Test the format compatibility rules when creating a texture view.
+TEST_F(D24S8TextureViewValidationTests, TextureViewFormatCompatibility) {
+    wgpu::Texture texture =
+        CreateDepthStencilTexture(device, wgpu::TextureFormat::Depth24UnormStencil8);
+
+    wgpu::TextureViewDescriptor base2DTextureViewDescriptor =
+        CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2D);
+
+    // It is an error to create a texture view in color format on a depth-stencil texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
+        descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+}
+
+class D32S8TextureViewValidationTests : public ValidationTest {
+  protected:
+    WGPUDevice CreateTestDevice() override {
+        wgpu::DeviceDescriptor descriptor;
+        wgpu::FeatureName requiredFeatures[1] = {wgpu::FeatureName::Depth32FloatStencil8};
+        descriptor.requiredFeatures = requiredFeatures;
+        descriptor.requiredFeaturesCount = 1;
+        return adapter.CreateDevice(&descriptor);
+    }
+};
+
+// Test that the selected TextureAspects must exist in the Depth32FloatStencil8 texture format
+TEST_F(D32S8TextureViewValidationTests, AspectMustExist) {
+    wgpu::Texture texture =
+        CreateDepthStencilTexture(device, wgpu::TextureFormat::Depth32FloatStencil8);
+
+    // Can select: All, DepthOnly, and StencilOnly from Depth32FloatStencil8
+    {
+        wgpu::TextureViewDescriptor viewDescriptor = {};
+        viewDescriptor.aspect = wgpu::TextureAspect::All;
+        texture.CreateView(&viewDescriptor);
+
+        viewDescriptor.aspect = wgpu::TextureAspect::DepthOnly;
+        texture.CreateView(&viewDescriptor);
+
+        viewDescriptor.aspect = wgpu::TextureAspect::StencilOnly;
+        texture.CreateView(&viewDescriptor);
+    }
+}
+
+// Test the format compatibility rules when creating a texture view.
+TEST_F(D32S8TextureViewValidationTests, TextureViewFormatCompatibility) {
+    wgpu::Texture texture =
+        CreateDepthStencilTexture(device, wgpu::TextureFormat::Depth32FloatStencil8);
+
+    wgpu::TextureViewDescriptor base2DTextureViewDescriptor =
+        CreateDefaultViewDescriptor(wgpu::TextureViewDimension::e2D);
+
+    // It is an error to create a texture view in color format on a depth-stencil texture.
+    {
+        wgpu::TextureViewDescriptor descriptor = base2DTextureViewDescriptor;
+        descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        ASSERT_DEVICE_ERROR(texture.CreateView(&descriptor));
+    }
+}
+
 }  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/ToggleValidationTests.cpp b/src/dawn/tests/unittests/validation/ToggleValidationTests.cpp
index 34acac4..1c0d9be 100644
--- a/src/dawn/tests/unittests/validation/ToggleValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/ToggleValidationTests.cpp
@@ -18,74 +18,33 @@
 
 namespace {
 
-    class ToggleValidationTest : public ValidationTest {};
+class ToggleValidationTest : public ValidationTest {};
 
-    // Tests querying the detail of a toggle from dawn::native::InstanceBase works correctly.
-    TEST_F(ToggleValidationTest, QueryToggleInfo) {
-        // Query with a valid toggle name
-        {
-            const char* kValidToggleName = "emulate_store_and_msaa_resolve";
-            const dawn::native::ToggleInfo* toggleInfo = instance->GetToggleInfo(kValidToggleName);
-            ASSERT_NE(nullptr, toggleInfo);
-            ASSERT_NE(nullptr, toggleInfo->name);
-            ASSERT_NE(nullptr, toggleInfo->description);
-            ASSERT_NE(nullptr, toggleInfo->url);
-        }
-
-        // Query with an invalid toggle name
-        {
-            const char* kInvalidToggleName = "!@#$%^&*";
-            const dawn::native::ToggleInfo* toggleInfo =
-                instance->GetToggleInfo(kInvalidToggleName);
-            ASSERT_EQ(nullptr, toggleInfo);
-        }
+// Tests querying the detail of a toggle from dawn::native::InstanceBase works correctly.
+TEST_F(ToggleValidationTest, QueryToggleInfo) {
+    // Query with a valid toggle name
+    {
+        const char* kValidToggleName = "emulate_store_and_msaa_resolve";
+        const dawn::native::ToggleInfo* toggleInfo = instance->GetToggleInfo(kValidToggleName);
+        ASSERT_NE(nullptr, toggleInfo);
+        ASSERT_NE(nullptr, toggleInfo->name);
+        ASSERT_NE(nullptr, toggleInfo->description);
+        ASSERT_NE(nullptr, toggleInfo->url);
     }
 
-    // Tests overriding toggles when creating a device works correctly.
-    TEST_F(ToggleValidationTest, OverrideToggleUsage) {
-        // Create device with a valid name of a toggle
-        {
-            const char* kValidToggleName = "emulate_store_and_msaa_resolve";
-            wgpu::DeviceDescriptor descriptor;
-            wgpu::DawnTogglesDeviceDescriptor togglesDesc;
-            descriptor.nextInChain = &togglesDesc;
-            togglesDesc.forceEnabledToggles = &kValidToggleName;
-            togglesDesc.forceEnabledTogglesCount = 1;
-
-            WGPUDevice deviceWithToggle = adapter.CreateDevice(&descriptor);
-            std::vector<const char*> toggleNames = dawn::native::GetTogglesUsed(deviceWithToggle);
-            bool validToggleExists = false;
-            for (const char* toggle : toggleNames) {
-                if (strcmp(toggle, kValidToggleName) == 0) {
-                    validToggleExists = true;
-                }
-            }
-            ASSERT_EQ(validToggleExists, true);
-        }
-
-        // Create device with an invalid toggle name
-        {
-            const char* kInvalidToggleName = "!@#$%^&*";
-            wgpu::DeviceDescriptor descriptor;
-            wgpu::DawnTogglesDeviceDescriptor togglesDesc;
-            descriptor.nextInChain = &togglesDesc;
-            togglesDesc.forceEnabledToggles = &kInvalidToggleName;
-            togglesDesc.forceEnabledTogglesCount = 1;
-
-            WGPUDevice deviceWithToggle = adapter.CreateDevice(&descriptor);
-            std::vector<const char*> toggleNames = dawn::native::GetTogglesUsed(deviceWithToggle);
-            bool InvalidToggleExists = false;
-            for (const char* toggle : toggleNames) {
-                if (strcmp(toggle, kInvalidToggleName) == 0) {
-                    InvalidToggleExists = true;
-                }
-            }
-            ASSERT_EQ(InvalidToggleExists, false);
-        }
+    // Query with an invalid toggle name
+    {
+        const char* kInvalidToggleName = "!@#$%^&*";
+        const dawn::native::ToggleInfo* toggleInfo = instance->GetToggleInfo(kInvalidToggleName);
+        ASSERT_EQ(nullptr, toggleInfo);
     }
+}
 
-    TEST_F(ToggleValidationTest, TurnOffVsyncWithToggle) {
-        const char* kValidToggleName = "turn_off_vsync";
+// Tests overriding toggles when creating a device works correctly.
+TEST_F(ToggleValidationTest, OverrideToggleUsage) {
+    // Create device with a valid name of a toggle
+    {
+        const char* kValidToggleName = "emulate_store_and_msaa_resolve";
         wgpu::DeviceDescriptor descriptor;
         wgpu::DawnTogglesDeviceDescriptor togglesDesc;
         descriptor.nextInChain = &togglesDesc;
@@ -102,4 +61,44 @@
         }
         ASSERT_EQ(validToggleExists, true);
     }
+
+    // Create device with an invalid toggle name
+    {
+        const char* kInvalidToggleName = "!@#$%^&*";
+        wgpu::DeviceDescriptor descriptor;
+        wgpu::DawnTogglesDeviceDescriptor togglesDesc;
+        descriptor.nextInChain = &togglesDesc;
+        togglesDesc.forceEnabledToggles = &kInvalidToggleName;
+        togglesDesc.forceEnabledTogglesCount = 1;
+
+        WGPUDevice deviceWithToggle = adapter.CreateDevice(&descriptor);
+        std::vector<const char*> toggleNames = dawn::native::GetTogglesUsed(deviceWithToggle);
+        bool InvalidToggleExists = false;
+        for (const char* toggle : toggleNames) {
+            if (strcmp(toggle, kInvalidToggleName) == 0) {
+                InvalidToggleExists = true;
+            }
+        }
+        ASSERT_EQ(InvalidToggleExists, false);
+    }
+}
+
+TEST_F(ToggleValidationTest, TurnOffVsyncWithToggle) {
+    const char* kValidToggleName = "turn_off_vsync";
+    wgpu::DeviceDescriptor descriptor;
+    wgpu::DawnTogglesDeviceDescriptor togglesDesc;
+    descriptor.nextInChain = &togglesDesc;
+    togglesDesc.forceEnabledToggles = &kValidToggleName;
+    togglesDesc.forceEnabledTogglesCount = 1;
+
+    WGPUDevice deviceWithToggle = adapter.CreateDevice(&descriptor);
+    std::vector<const char*> toggleNames = dawn::native::GetTogglesUsed(deviceWithToggle);
+    bool validToggleExists = false;
+    for (const char* toggle : toggleNames) {
+        if (strcmp(toggle, kValidToggleName) == 0) {
+            validToggleExists = true;
+        }
+    }
+    ASSERT_EQ(validToggleExists, true);
+}
 }  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/UnsafeAPIValidationTests.cpp b/src/dawn/tests/unittests/validation/UnsafeAPIValidationTests.cpp
index 0fee9c6..95b5645 100644
--- a/src/dawn/tests/unittests/validation/UnsafeAPIValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/UnsafeAPIValidationTests.cpp
@@ -21,7 +21,7 @@
 #include "dawn/utils/WGPUHelpers.h"
 
 namespace {
-    using testing::HasSubstr;
+using testing::HasSubstr;
 }  // anonymous namespace
 
 class UnsafeAPIValidationTest : public ValidationTest {
diff --git a/src/dawn/tests/unittests/validation/ValidationTest.cpp b/src/dawn/tests/unittests/validation/ValidationTest.cpp
index d027057..b7b23b2 100644
--- a/src/dawn/tests/unittests/validation/ValidationTest.cpp
+++ b/src/dawn/tests/unittests/validation/ValidationTest.cpp
@@ -26,9 +26,10 @@
 
 namespace {
 
-    bool gUseWire = false;
-    std::string gWireTraceDir = "";
-    std::unique_ptr<ToggleParser> gToggleParser = nullptr;
+bool gUseWire = false;
+// NOLINTNEXTLINE(runtime/string)
+std::string gWireTraceDir = "";
+std::unique_ptr<ToggleParser> gToggleParser = nullptr;
 
 }  // namespace
 
@@ -79,8 +80,7 @@
 }
 
 ValidationTest::ValidationTest()
-    : mWireHelper(utils::CreateWireHelper(gUseWire, gWireTraceDir.c_str())) {
-}
+    : mWireHelper(utils::CreateWireHelper(gUseWire, gWireTraceDir.c_str())) {}
 
 void ValidationTest::SetUp() {
     instance = std::make_unique<dawn::native::Instance>();
diff --git a/src/dawn/tests/unittests/validation/ValidationTest.h b/src/dawn/tests/unittests/validation/ValidationTest.h
index e202772..f373b28 100644
--- a/src/dawn/tests/unittests/validation/ValidationTest.h
+++ b/src/dawn/tests/unittests/validation/ValidationTest.h
@@ -18,11 +18,11 @@
 #include <memory>
 #include <string>
 
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
 #include "dawn/common/Log.h"
 #include "dawn/native/DawnNative.h"
 #include "dawn/webgpu_cpp.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
 
 // Argument helpers to allow macro overriding.
 #define UNIMPLEMENTED_MACRO(...) UNREACHABLE()
@@ -88,7 +88,7 @@
 #define EXPECT_DEPRECATION_WARNING(statement) EXPECT_DEPRECATION_WARNINGS(statement, 1)
 
 namespace utils {
-    class WireHelper;
+class WireHelper;
 }  // namespace utils
 
 void InitDawnValidationTestEnvironment(int argc, char** argv);
diff --git a/src/dawn/tests/unittests/validation/VideoViewsValidationTests.cpp b/src/dawn/tests/unittests/validation/VideoViewsValidationTests.cpp
index 088ed60..6075b94 100644
--- a/src/dawn/tests/unittests/validation/VideoViewsValidationTests.cpp
+++ b/src/dawn/tests/unittests/validation/VideoViewsValidationTests.cpp
@@ -19,326 +19,322 @@
 
 namespace {
 
-    class VideoViewsValidation : public ValidationTest {
-      protected:
-        WGPUDevice CreateTestDevice() override {
-            wgpu::DeviceDescriptor descriptor;
-            wgpu::FeatureName requiredFeatures[1] = {wgpu::FeatureName::DawnMultiPlanarFormats};
-            descriptor.requiredFeatures = requiredFeatures;
-            descriptor.requiredFeaturesCount = 1;
-            return adapter.CreateDevice(&descriptor);
-        }
-
-        wgpu::Texture CreateVideoTextureForTest(wgpu::TextureFormat format,
-                                                wgpu::TextureUsage usage) {
-            wgpu::TextureDescriptor descriptor;
-            descriptor.dimension = wgpu::TextureDimension::e2D;
-            descriptor.size.width = 1;
-            descriptor.size.height = 1;
-            descriptor.format = format;
-            descriptor.usage = usage;
-            return device.CreateTexture(&descriptor);
-        }
-    };
-
-    // Test texture views compatibility rules.
-    TEST_F(VideoViewsValidation, CreateViewFails) {
-        wgpu::Texture videoTexture = CreateVideoTextureForTest(
-            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
-
-        wgpu::TextureViewDescriptor viewDesc = {};
-
-        // Correct plane index but incompatible view format.
-        viewDesc.format = wgpu::TextureFormat::R8Uint;
-        viewDesc.aspect = wgpu::TextureAspect::Plane0Only;
-        ASSERT_DEVICE_ERROR(videoTexture.CreateView(&viewDesc));
-
-        // Compatible view format but wrong plane index.
-        viewDesc.format = wgpu::TextureFormat::R8Unorm;
-        viewDesc.aspect = wgpu::TextureAspect::Plane1Only;
-        ASSERT_DEVICE_ERROR(videoTexture.CreateView(&viewDesc));
-
-        // Compatible view format but wrong aspect.
-        viewDesc.format = wgpu::TextureFormat::R8Unorm;
-        viewDesc.aspect = wgpu::TextureAspect::All;
-        ASSERT_DEVICE_ERROR(videoTexture.CreateView(&viewDesc));
-
-        // Create a single plane texture.
-        wgpu::TextureDescriptor desc;
-        desc.format = wgpu::TextureFormat::RGBA8Unorm;
-        desc.dimension = wgpu::TextureDimension::e2D;
-        desc.usage = wgpu::TextureUsage::TextureBinding;
-        desc.size = {1, 1, 1};
-
-        wgpu::Texture texture = device.CreateTexture(&desc);
-
-        // Plane aspect specified with non-planar texture.
-        viewDesc.aspect = wgpu::TextureAspect::Plane0Only;
-        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-
-        viewDesc.aspect = wgpu::TextureAspect::Plane1Only;
-        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-
-        // Planar views with non-planar texture.
-        viewDesc.aspect = wgpu::TextureAspect::Plane0Only;
-        viewDesc.format = wgpu::TextureFormat::R8Unorm;
-        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
-
-        viewDesc.aspect = wgpu::TextureAspect::Plane1Only;
-        viewDesc.format = wgpu::TextureFormat::RG8Unorm;
-        ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+class VideoViewsValidation : public ValidationTest {
+  protected:
+    WGPUDevice CreateTestDevice() override {
+        wgpu::DeviceDescriptor descriptor;
+        wgpu::FeatureName requiredFeatures[1] = {wgpu::FeatureName::DawnMultiPlanarFormats};
+        descriptor.requiredFeatures = requiredFeatures;
+        descriptor.requiredFeaturesCount = 1;
+        return adapter.CreateDevice(&descriptor);
     }
 
-    // Test texture views compatibility rules.
-    TEST_F(VideoViewsValidation, CreateViewSucceeds) {
-        wgpu::Texture yuvTexture = CreateVideoTextureForTest(
-            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
-
-        // Per plane view formats unspecified.
-        wgpu::TextureViewDescriptor planeViewDesc = {};
-        planeViewDesc.aspect = wgpu::TextureAspect::Plane0Only;
-        wgpu::TextureView plane0View = yuvTexture.CreateView(&planeViewDesc);
-
-        planeViewDesc.aspect = wgpu::TextureAspect::Plane1Only;
-        wgpu::TextureView plane1View = yuvTexture.CreateView(&planeViewDesc);
-
-        ASSERT_NE(plane0View.Get(), nullptr);
-        ASSERT_NE(plane1View.Get(), nullptr);
-
-        // Per plane view formats specified.
-        planeViewDesc.aspect = wgpu::TextureAspect::Plane0Only;
-        planeViewDesc.format = wgpu::TextureFormat::R8Unorm;
-        plane0View = yuvTexture.CreateView(&planeViewDesc);
-
-        planeViewDesc.aspect = wgpu::TextureAspect::Plane1Only;
-        planeViewDesc.format = wgpu::TextureFormat::RG8Unorm;
-        plane1View = yuvTexture.CreateView(&planeViewDesc);
-
-        ASSERT_NE(plane0View.Get(), nullptr);
-        ASSERT_NE(plane1View.Get(), nullptr);
+    wgpu::Texture CreateVideoTextureForTest(wgpu::TextureFormat format, wgpu::TextureUsage usage) {
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.size.width = 1;
+        descriptor.size.height = 1;
+        descriptor.format = format;
+        descriptor.usage = usage;
+        return device.CreateTexture(&descriptor);
     }
+};
 
-    // Test copying from one multi-planar format into another fails.
-    TEST_F(VideoViewsValidation, T2TCopyAllAspectsFails) {
-        wgpu::Texture srcTexture = CreateVideoTextureForTest(
-            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+// Test texture views compatibility rules.
+TEST_F(VideoViewsValidation, CreateViewFails) {
+    wgpu::Texture videoTexture = CreateVideoTextureForTest(
+        wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
 
-        wgpu::Texture dstTexture = CreateVideoTextureForTest(
-            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+    wgpu::TextureViewDescriptor viewDesc = {};
 
-        wgpu::ImageCopyTexture copySrc = utils::CreateImageCopyTexture(srcTexture, 0, {0, 0, 0});
+    // Correct plane index but incompatible view format.
+    viewDesc.format = wgpu::TextureFormat::R8Uint;
+    viewDesc.aspect = wgpu::TextureAspect::Plane0Only;
+    ASSERT_DEVICE_ERROR(videoTexture.CreateView(&viewDesc));
 
-        wgpu::ImageCopyTexture copyDst = utils::CreateImageCopyTexture(dstTexture, 0, {0, 0, 0});
+    // Compatible view format but wrong plane index.
+    viewDesc.format = wgpu::TextureFormat::R8Unorm;
+    viewDesc.aspect = wgpu::TextureAspect::Plane1Only;
+    ASSERT_DEVICE_ERROR(videoTexture.CreateView(&viewDesc));
 
-        wgpu::Extent3D copySize = {1, 1, 1};
+    // Compatible view format but wrong aspect.
+    viewDesc.format = wgpu::TextureFormat::R8Unorm;
+    viewDesc.aspect = wgpu::TextureAspect::All;
+    ASSERT_DEVICE_ERROR(videoTexture.CreateView(&viewDesc));
 
+    // Create a single plane texture.
+    wgpu::TextureDescriptor desc;
+    desc.format = wgpu::TextureFormat::RGBA8Unorm;
+    desc.dimension = wgpu::TextureDimension::e2D;
+    desc.usage = wgpu::TextureUsage::TextureBinding;
+    desc.size = {1, 1, 1};
+
+    wgpu::Texture texture = device.CreateTexture(&desc);
+
+    // Plane aspect specified with non-planar texture.
+    viewDesc.aspect = wgpu::TextureAspect::Plane0Only;
+    ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+
+    viewDesc.aspect = wgpu::TextureAspect::Plane1Only;
+    ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+
+    // Planar views with non-planar texture.
+    viewDesc.aspect = wgpu::TextureAspect::Plane0Only;
+    viewDesc.format = wgpu::TextureFormat::R8Unorm;
+    ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+
+    viewDesc.aspect = wgpu::TextureAspect::Plane1Only;
+    viewDesc.format = wgpu::TextureFormat::RG8Unorm;
+    ASSERT_DEVICE_ERROR(texture.CreateView(&viewDesc));
+}
+
+// Test texture views compatibility rules.
+TEST_F(VideoViewsValidation, CreateViewSucceeds) {
+    wgpu::Texture yuvTexture = CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                                         wgpu::TextureUsage::TextureBinding);
+
+    // Per plane view formats unspecified.
+    wgpu::TextureViewDescriptor planeViewDesc = {};
+    planeViewDesc.aspect = wgpu::TextureAspect::Plane0Only;
+    wgpu::TextureView plane0View = yuvTexture.CreateView(&planeViewDesc);
+
+    planeViewDesc.aspect = wgpu::TextureAspect::Plane1Only;
+    wgpu::TextureView plane1View = yuvTexture.CreateView(&planeViewDesc);
+
+    ASSERT_NE(plane0View.Get(), nullptr);
+    ASSERT_NE(plane1View.Get(), nullptr);
+
+    // Per plane view formats specified.
+    planeViewDesc.aspect = wgpu::TextureAspect::Plane0Only;
+    planeViewDesc.format = wgpu::TextureFormat::R8Unorm;
+    plane0View = yuvTexture.CreateView(&planeViewDesc);
+
+    planeViewDesc.aspect = wgpu::TextureAspect::Plane1Only;
+    planeViewDesc.format = wgpu::TextureFormat::RG8Unorm;
+    plane1View = yuvTexture.CreateView(&planeViewDesc);
+
+    ASSERT_NE(plane0View.Get(), nullptr);
+    ASSERT_NE(plane1View.Get(), nullptr);
+}
+
+// Test copying from one multi-planar format into another fails.
+TEST_F(VideoViewsValidation, T2TCopyAllAspectsFails) {
+    wgpu::Texture srcTexture = CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                                         wgpu::TextureUsage::TextureBinding);
+
+    wgpu::Texture dstTexture = CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                                         wgpu::TextureUsage::TextureBinding);
+
+    wgpu::ImageCopyTexture copySrc = utils::CreateImageCopyTexture(srcTexture, 0, {0, 0, 0});
+
+    wgpu::ImageCopyTexture copyDst = utils::CreateImageCopyTexture(dstTexture, 0, {0, 0, 0});
+
+    wgpu::Extent3D copySize = {1, 1, 1};
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyTextureToTexture(&copySrc, &copyDst, &copySize);
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
+
+// Test copying from one multi-planar format into another per plane fails.
+TEST_F(VideoViewsValidation, T2TCopyPlaneAspectFails) {
+    wgpu::Texture srcTexture = CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                                         wgpu::TextureUsage::TextureBinding);
+
+    wgpu::Texture dstTexture = CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                                         wgpu::TextureUsage::TextureBinding);
+
+    wgpu::ImageCopyTexture copySrc =
+        utils::CreateImageCopyTexture(srcTexture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane0Only);
+
+    wgpu::ImageCopyTexture copyDst =
+        utils::CreateImageCopyTexture(dstTexture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane1Only);
+
+    wgpu::Extent3D copySize = {1, 1, 1};
+
+    {
         wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
         encoder.CopyTextureToTexture(&copySrc, &copyDst, &copySize);
         ASSERT_DEVICE_ERROR(encoder.Finish());
     }
 
-    // Test copying from one multi-planar format into another per plane fails.
-    TEST_F(VideoViewsValidation, T2TCopyPlaneAspectFails) {
-        wgpu::Texture srcTexture = CreateVideoTextureForTest(
-            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+    copySrc =
+        utils::CreateImageCopyTexture(srcTexture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane1Only);
 
-        wgpu::Texture dstTexture = CreateVideoTextureForTest(
-            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
-
-        wgpu::ImageCopyTexture copySrc = utils::CreateImageCopyTexture(
-            srcTexture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane0Only);
-
-        wgpu::ImageCopyTexture copyDst = utils::CreateImageCopyTexture(
-            dstTexture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane1Only);
-
-        wgpu::Extent3D copySize = {1, 1, 1};
-
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyTextureToTexture(&copySrc, &copyDst, &copySize);
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        copySrc = utils::CreateImageCopyTexture(srcTexture, 0, {0, 0, 0},
-                                                wgpu::TextureAspect::Plane1Only);
-
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyTextureToTexture(&copySrc, &copyDst, &copySize);
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyTextureToTexture(&copySrc, &copyDst, &copySize);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
     }
+}
 
-    // Test copying from a multi-planar format to a buffer fails.
-    TEST_F(VideoViewsValidation, T2BCopyAllAspectsFails) {
-        wgpu::Texture srcTexture = CreateVideoTextureForTest(
-            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+// Test copying from a multi-planar format to a buffer fails.
+TEST_F(VideoViewsValidation, T2BCopyAllAspectsFails) {
+    wgpu::Texture srcTexture = CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                                         wgpu::TextureUsage::TextureBinding);
 
-        wgpu::BufferDescriptor bufferDescriptor;
-        bufferDescriptor.size = 1;
-        bufferDescriptor.usage = wgpu::BufferUsage::CopyDst;
-        wgpu::Buffer dstBuffer = device.CreateBuffer(&bufferDescriptor);
+    wgpu::BufferDescriptor bufferDescriptor;
+    bufferDescriptor.size = 1;
+    bufferDescriptor.usage = wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer dstBuffer = device.CreateBuffer(&bufferDescriptor);
 
-        wgpu::ImageCopyTexture copySrc = utils::CreateImageCopyTexture(srcTexture, 0, {0, 0, 0});
+    wgpu::ImageCopyTexture copySrc = utils::CreateImageCopyTexture(srcTexture, 0, {0, 0, 0});
 
-        wgpu::ImageCopyBuffer copyDst = utils::CreateImageCopyBuffer(dstBuffer, 0, 4);
+    wgpu::ImageCopyBuffer copyDst = utils::CreateImageCopyBuffer(dstBuffer, 0, 4);
 
-        wgpu::Extent3D copySize = {1, 1, 1};
+    wgpu::Extent3D copySize = {1, 1, 1};
 
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyTextureToBuffer(&copySrc, &copyDst, &copySize);
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
+
+// Test copying from multi-planar format per plane to a buffer fails.
+TEST_F(VideoViewsValidation, T2BCopyPlaneAspectsFails) {
+    wgpu::Texture srcTexture = CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                                         wgpu::TextureUsage::TextureBinding);
+
+    wgpu::BufferDescriptor bufferDescriptor;
+    bufferDescriptor.size = 1;
+    bufferDescriptor.usage = wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer dstBuffer = device.CreateBuffer(&bufferDescriptor);
+
+    wgpu::ImageCopyTexture copySrc =
+        utils::CreateImageCopyTexture(srcTexture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane0Only);
+
+    wgpu::ImageCopyBuffer copyDst = utils::CreateImageCopyBuffer(dstBuffer, 0, 4);
+
+    wgpu::Extent3D copySize = {1, 1, 1};
+
+    {
         wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
         encoder.CopyTextureToBuffer(&copySrc, &copyDst, &copySize);
         ASSERT_DEVICE_ERROR(encoder.Finish());
     }
 
-    // Test copying from multi-planar format per plane to a buffer fails.
-    TEST_F(VideoViewsValidation, T2BCopyPlaneAspectsFails) {
-        wgpu::Texture srcTexture = CreateVideoTextureForTest(
-            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+    copySrc =
+        utils::CreateImageCopyTexture(srcTexture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane1Only);
 
-        wgpu::BufferDescriptor bufferDescriptor;
-        bufferDescriptor.size = 1;
-        bufferDescriptor.usage = wgpu::BufferUsage::CopyDst;
-        wgpu::Buffer dstBuffer = device.CreateBuffer(&bufferDescriptor);
-
-        wgpu::ImageCopyTexture copySrc = utils::CreateImageCopyTexture(
-            srcTexture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane0Only);
-
-        wgpu::ImageCopyBuffer copyDst = utils::CreateImageCopyBuffer(dstBuffer, 0, 4);
-
-        wgpu::Extent3D copySize = {1, 1, 1};
-
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyTextureToBuffer(&copySrc, &copyDst, &copySize);
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        copySrc = utils::CreateImageCopyTexture(srcTexture, 0, {0, 0, 0},
-                                                wgpu::TextureAspect::Plane1Only);
-
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyTextureToBuffer(&copySrc, &copyDst, &copySize);
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyTextureToBuffer(&copySrc, &copyDst, &copySize);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
     }
+}
 
-    // Test copying from a buffer to a multi-planar format fails.
-    TEST_F(VideoViewsValidation, B2TCopyAllAspectsFails) {
-        std::vector<uint8_t> placeholderData(4, 0);
+// Test copying from a buffer to a multi-planar format fails.
+TEST_F(VideoViewsValidation, B2TCopyAllAspectsFails) {
+    std::vector<uint8_t> placeholderData(4, 0);
 
-        wgpu::Buffer srcBuffer = utils::CreateBufferFromData(
-            device, placeholderData.data(), placeholderData.size(), wgpu::BufferUsage::CopySrc);
+    wgpu::Buffer srcBuffer = utils::CreateBufferFromData(
+        device, placeholderData.data(), placeholderData.size(), wgpu::BufferUsage::CopySrc);
 
-        wgpu::Texture dstTexture = CreateVideoTextureForTest(
-            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+    wgpu::Texture dstTexture = CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                                         wgpu::TextureUsage::TextureBinding);
 
-        wgpu::ImageCopyBuffer copySrc = utils::CreateImageCopyBuffer(srcBuffer, 0, 12, 4);
+    wgpu::ImageCopyBuffer copySrc = utils::CreateImageCopyBuffer(srcBuffer, 0, 12, 4);
 
-        wgpu::ImageCopyTexture copyDst = utils::CreateImageCopyTexture(dstTexture, 0, {0, 0, 0});
+    wgpu::ImageCopyTexture copyDst = utils::CreateImageCopyTexture(dstTexture, 0, {0, 0, 0});
 
-        wgpu::Extent3D copySize = {1, 1, 1};
+    wgpu::Extent3D copySize = {1, 1, 1};
 
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyBufferToTexture(&copySrc, &copyDst, &copySize);
+    ASSERT_DEVICE_ERROR(encoder.Finish());
+}
+
+// Test copying from a buffer to a multi-planar format per plane fails.
+TEST_F(VideoViewsValidation, B2TCopyPlaneAspectsFails) {
+    std::vector<uint8_t> placeholderData(4, 0);
+
+    wgpu::Buffer srcBuffer = utils::CreateBufferFromData(
+        device, placeholderData.data(), placeholderData.size(), wgpu::BufferUsage::CopySrc);
+
+    wgpu::Texture dstTexture = CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                                         wgpu::TextureUsage::TextureBinding);
+
+    wgpu::ImageCopyBuffer copySrc = utils::CreateImageCopyBuffer(srcBuffer, 0, 12, 4);
+
+    wgpu::ImageCopyTexture copyDst =
+        utils::CreateImageCopyTexture(dstTexture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane0Only);
+
+    wgpu::Extent3D copySize = {1, 1, 1};
+
+    {
         wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
         encoder.CopyBufferToTexture(&copySrc, &copyDst, &copySize);
         ASSERT_DEVICE_ERROR(encoder.Finish());
     }
 
-    // Test copying from a buffer to a multi-planar format per plane fails.
-    TEST_F(VideoViewsValidation, B2TCopyPlaneAspectsFails) {
-        std::vector<uint8_t> placeholderData(4, 0);
+    copyDst =
+        utils::CreateImageCopyTexture(dstTexture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane1Only);
 
-        wgpu::Buffer srcBuffer = utils::CreateBufferFromData(
-            device, placeholderData.data(), placeholderData.size(), wgpu::BufferUsage::CopySrc);
-
-        wgpu::Texture dstTexture = CreateVideoTextureForTest(
-            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
-
-        wgpu::ImageCopyBuffer copySrc = utils::CreateImageCopyBuffer(srcBuffer, 0, 12, 4);
-
-        wgpu::ImageCopyTexture copyDst = utils::CreateImageCopyTexture(
-            dstTexture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane0Only);
-
-        wgpu::Extent3D copySize = {1, 1, 1};
-
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyBufferToTexture(&copySrc, &copyDst, &copySize);
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
-
-        copyDst = utils::CreateImageCopyTexture(dstTexture, 0, {0, 0, 0},
-                                                wgpu::TextureAspect::Plane1Only);
-
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyBufferToTexture(&copySrc, &copyDst, &copySize);
-            ASSERT_DEVICE_ERROR(encoder.Finish());
-        }
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyBufferToTexture(&copySrc, &copyDst, &copySize);
+        ASSERT_DEVICE_ERROR(encoder.Finish());
     }
+}
 
-    // Tests which multi-planar formats are allowed to be sampled.
-    TEST_F(VideoViewsValidation, SamplingMultiPlanarTexture) {
-        wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(
-            device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
+// Tests which multi-planar formats are allowed to be sampled.
+TEST_F(VideoViewsValidation, SamplingMultiPlanarTexture) {
+    wgpu::BindGroupLayout layout = utils::MakeBindGroupLayout(
+        device, {{0, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}});
 
-        // R8BG8Biplanar420Unorm is allowed to be sampled, if plane 0 or plane 1 is selected.
-        wgpu::Texture texture = CreateVideoTextureForTest(
-            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+    // R8BG8Biplanar420Unorm is allowed to be sampled, if plane 0 or plane 1 is selected.
+    wgpu::Texture texture = CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                                      wgpu::TextureUsage::TextureBinding);
 
-        wgpu::TextureViewDescriptor desc = {};
+    wgpu::TextureViewDescriptor desc = {};
 
-        desc.aspect = wgpu::TextureAspect::Plane0Only;
-        utils::MakeBindGroup(device, layout, {{0, texture.CreateView(&desc)}});
+    desc.aspect = wgpu::TextureAspect::Plane0Only;
+    utils::MakeBindGroup(device, layout, {{0, texture.CreateView(&desc)}});
 
-        desc.aspect = wgpu::TextureAspect::Plane1Only;
-        utils::MakeBindGroup(device, layout, {{0, texture.CreateView(&desc)}});
-    }
+    desc.aspect = wgpu::TextureAspect::Plane1Only;
+    utils::MakeBindGroup(device, layout, {{0, texture.CreateView(&desc)}});
+}
 
-    // Tests creating a texture with a multi-plane format.
-    TEST_F(VideoViewsValidation, CreateTextureFails) {
-        // multi-planar formats are NOT allowed to be renderable.
-        ASSERT_DEVICE_ERROR(CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
-                                                      wgpu::TextureUsage::RenderAttachment));
-    }
+// Tests creating a texture with a multi-plane format.
+TEST_F(VideoViewsValidation, CreateTextureFails) {
+    // multi-planar formats are NOT allowed to be renderable.
+    ASSERT_DEVICE_ERROR(CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                                  wgpu::TextureUsage::RenderAttachment));
+}
 
-    // Tests writing into a multi-planar format fails.
-    TEST_F(VideoViewsValidation, WriteTextureAllAspectsFails) {
-        wgpu::Texture texture = CreateVideoTextureForTest(
-            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+// Tests writing into a multi-planar format fails.
+TEST_F(VideoViewsValidation, WriteTextureAllAspectsFails) {
+    wgpu::Texture texture = CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                                      wgpu::TextureUsage::TextureBinding);
 
-        wgpu::TextureDataLayout textureDataLayout = utils::CreateTextureDataLayout(0, 4, 4);
+    wgpu::TextureDataLayout textureDataLayout = utils::CreateTextureDataLayout(0, 4, 4);
 
-        wgpu::ImageCopyTexture imageCopyTexture =
-            utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
+    wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
 
-        std::vector<uint8_t> placeholderData(4, 0);
-        wgpu::Extent3D writeSize = {1, 1, 1};
+    std::vector<uint8_t> placeholderData(4, 0);
+    wgpu::Extent3D writeSize = {1, 1, 1};
 
-        wgpu::Queue queue = device.GetQueue();
+    wgpu::Queue queue = device.GetQueue();
 
-        ASSERT_DEVICE_ERROR(queue.WriteTexture(&imageCopyTexture, placeholderData.data(),
-                                               placeholderData.size(), &textureDataLayout,
-                                               &writeSize));
-    }
+    ASSERT_DEVICE_ERROR(queue.WriteTexture(&imageCopyTexture, placeholderData.data(),
+                                           placeholderData.size(), &textureDataLayout, &writeSize));
+}
 
-    // Tests writing into a multi-planar format per plane fails.
-    TEST_F(VideoViewsValidation, WriteTexturePlaneAspectsFails) {
-        wgpu::Texture texture = CreateVideoTextureForTest(
-            wgpu::TextureFormat::R8BG8Biplanar420Unorm, wgpu::TextureUsage::TextureBinding);
+// Tests writing into a multi-planar format per plane fails.
+TEST_F(VideoViewsValidation, WriteTexturePlaneAspectsFails) {
+    wgpu::Texture texture = CreateVideoTextureForTest(wgpu::TextureFormat::R8BG8Biplanar420Unorm,
+                                                      wgpu::TextureUsage::TextureBinding);
 
-        wgpu::TextureDataLayout textureDataLayout = utils::CreateTextureDataLayout(0, 12, 4);
-        wgpu::ImageCopyTexture imageCopyTexture =
-            utils::CreateImageCopyTexture(texture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane0Only);
+    wgpu::TextureDataLayout textureDataLayout = utils::CreateTextureDataLayout(0, 12, 4);
+    wgpu::ImageCopyTexture imageCopyTexture =
+        utils::CreateImageCopyTexture(texture, 0, {0, 0, 0}, wgpu::TextureAspect::Plane0Only);
 
-        std::vector<uint8_t> placeholderData(4, 0);
-        wgpu::Extent3D writeSize = {1, 1, 1};
+    std::vector<uint8_t> placeholderData(4, 0);
+    wgpu::Extent3D writeSize = {1, 1, 1};
 
-        wgpu::Queue queue = device.GetQueue();
+    wgpu::Queue queue = device.GetQueue();
 
-        ASSERT_DEVICE_ERROR(queue.WriteTexture(&imageCopyTexture, placeholderData.data(),
-                                               placeholderData.size(), &textureDataLayout,
-                                               &writeSize));
-    }
+    ASSERT_DEVICE_ERROR(queue.WriteTexture(&imageCopyTexture, placeholderData.data(),
+                                           placeholderData.size(), &textureDataLayout, &writeSize));
+}
 
 }  // anonymous namespace
diff --git a/src/dawn/tests/unittests/validation/WriteBufferTests.cpp b/src/dawn/tests/unittests/validation/WriteBufferTests.cpp
index a3a5f89..6876854 100644
--- a/src/dawn/tests/unittests/validation/WriteBufferTests.cpp
+++ b/src/dawn/tests/unittests/validation/WriteBufferTests.cpp
@@ -20,86 +20,86 @@
 
 namespace {
 
-    class WriteBufferTest : public ValidationTest {
-      public:
-        wgpu::Buffer CreateWritableBuffer(uint64_t size) {
-            wgpu::BufferDescriptor desc;
-            desc.usage = wgpu::BufferUsage::CopyDst;
-            desc.size = size;
-            return device.CreateBuffer(&desc);
-        }
-
-        wgpu::CommandBuffer EncodeWriteBuffer(wgpu::Buffer buffer,
-                                              uint64_t bufferOffset,
-                                              uint64_t size) {
-            std::vector<uint8_t> data(size);
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.WriteBuffer(buffer, bufferOffset, data.data(), size);
-            return encoder.Finish();
-        }
-    };
-
-    // Tests that the buffer offset is validated to be a multiple of 4 bytes.
-    TEST_F(WriteBufferTest, OffsetAlignment) {
-        wgpu::Buffer buffer = CreateWritableBuffer(64);
-        EncodeWriteBuffer(buffer, 0, 4);
-        EncodeWriteBuffer(buffer, 4, 4);
-        EncodeWriteBuffer(buffer, 60, 4);
-        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 1, 4));
-        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 2, 4));
-        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 3, 4));
-        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 5, 4));
-        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 11, 4));
-    }
-
-    // Tests that the buffer size is validated to be a multiple of 4 bytes.
-    TEST_F(WriteBufferTest, SizeAlignment) {
-        wgpu::Buffer buffer = CreateWritableBuffer(64);
-        EncodeWriteBuffer(buffer, 0, 64);
-        EncodeWriteBuffer(buffer, 4, 60);
-        EncodeWriteBuffer(buffer, 40, 24);
-        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 0, 63));
-        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 4, 1));
-        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 4, 2));
-        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 40, 23));
-    }
-
-    // Tests that the buffer size and offset are validated to fit within the bounds of the buffer.
-    TEST_F(WriteBufferTest, BufferBounds) {
-        wgpu::Buffer buffer = CreateWritableBuffer(64);
-        EncodeWriteBuffer(buffer, 0, 64);
-        EncodeWriteBuffer(buffer, 4, 60);
-        EncodeWriteBuffer(buffer, 40, 24);
-        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 0, 68));
-        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 4, 64));
-        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 60, 8));
-        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 64, 4));
-    }
-
-    // Tests that the destination buffer's usage is validated to contain CopyDst.
-    TEST_F(WriteBufferTest, RequireCopyDstUsage) {
-        wgpu::BufferDescriptor desc;
-        desc.usage = wgpu::BufferUsage::CopySrc;
-        desc.size = 64;
-        wgpu::Buffer buffer = device.CreateBuffer(&desc);
-
-        ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 0, 64));
-    }
-
-    // Tests that the destination buffer's state is validated at submission.
-    TEST_F(WriteBufferTest, ValidBufferState) {
+class WriteBufferTest : public ValidationTest {
+  public:
+    wgpu::Buffer CreateWritableBuffer(uint64_t size) {
         wgpu::BufferDescriptor desc;
         desc.usage = wgpu::BufferUsage::CopyDst;
-        desc.size = 64;
-        desc.mappedAtCreation = true;
-        wgpu::Buffer buffer = device.CreateBuffer(&desc);
-
-        wgpu::CommandBuffer commands = EncodeWriteBuffer(buffer, 0, 64);
-        ASSERT_DEVICE_ERROR(device.GetQueue().Submit(1, &commands));
-
-        commands = EncodeWriteBuffer(buffer, 0, 64);
-        buffer.Unmap();
-        device.GetQueue().Submit(1, &commands);
+        desc.size = size;
+        return device.CreateBuffer(&desc);
     }
 
+    wgpu::CommandBuffer EncodeWriteBuffer(wgpu::Buffer buffer,
+                                          uint64_t bufferOffset,
+                                          uint64_t size) {
+        std::vector<uint8_t> data(size);
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.WriteBuffer(buffer, bufferOffset, data.data(), size);
+        return encoder.Finish();
+    }
+};
+
+// Tests that the buffer offset is validated to be a multiple of 4 bytes.
+TEST_F(WriteBufferTest, OffsetAlignment) {
+    wgpu::Buffer buffer = CreateWritableBuffer(64);
+    EncodeWriteBuffer(buffer, 0, 4);
+    EncodeWriteBuffer(buffer, 4, 4);
+    EncodeWriteBuffer(buffer, 60, 4);
+    ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 1, 4));
+    ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 2, 4));
+    ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 3, 4));
+    ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 5, 4));
+    ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 11, 4));
+}
+
+// Tests that the buffer size is validated to be a multiple of 4 bytes.
+TEST_F(WriteBufferTest, SizeAlignment) {
+    wgpu::Buffer buffer = CreateWritableBuffer(64);
+    EncodeWriteBuffer(buffer, 0, 64);
+    EncodeWriteBuffer(buffer, 4, 60);
+    EncodeWriteBuffer(buffer, 40, 24);
+    ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 0, 63));
+    ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 4, 1));
+    ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 4, 2));
+    ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 40, 23));
+}
+
+// Tests that the buffer size and offset are validated to fit within the bounds of the buffer.
+TEST_F(WriteBufferTest, BufferBounds) {
+    wgpu::Buffer buffer = CreateWritableBuffer(64);
+    EncodeWriteBuffer(buffer, 0, 64);
+    EncodeWriteBuffer(buffer, 4, 60);
+    EncodeWriteBuffer(buffer, 40, 24);
+    ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 0, 68));
+    ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 4, 64));
+    ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 60, 8));
+    ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 64, 4));
+}
+
+// Tests that the destination buffer's usage is validated to contain CopyDst.
+TEST_F(WriteBufferTest, RequireCopyDstUsage) {
+    wgpu::BufferDescriptor desc;
+    desc.usage = wgpu::BufferUsage::CopySrc;
+    desc.size = 64;
+    wgpu::Buffer buffer = device.CreateBuffer(&desc);
+
+    ASSERT_DEVICE_ERROR(EncodeWriteBuffer(buffer, 0, 64));
+}
+
+// Tests that the destination buffer's state is validated at submission.
+TEST_F(WriteBufferTest, ValidBufferState) {
+    wgpu::BufferDescriptor desc;
+    desc.usage = wgpu::BufferUsage::CopyDst;
+    desc.size = 64;
+    desc.mappedAtCreation = true;
+    wgpu::Buffer buffer = device.CreateBuffer(&desc);
+
+    wgpu::CommandBuffer commands = EncodeWriteBuffer(buffer, 0, 64);
+    ASSERT_DEVICE_ERROR(device.GetQueue().Submit(1, &commands));
+
+    commands = EncodeWriteBuffer(buffer, 0, 64);
+    buffer.Unmap();
+    device.GetQueue().Submit(1, &commands);
+}
+
 }  // namespace
diff --git a/src/dawn/tests/unittests/wire/WireAdapterTests.cpp b/src/dawn/tests/unittests/wire/WireAdapterTests.cpp
index 72bda9f..aa56c39 100644
--- a/src/dawn/tests/unittests/wire/WireAdapterTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireAdapterTests.cpp
@@ -23,318 +23,317 @@
 
 #include "webgpu/webgpu_cpp.h"
 
-namespace dawn::wire { namespace {
+namespace dawn::wire {
+namespace {
 
-    using testing::_;
-    using testing::Invoke;
-    using testing::InvokeWithoutArgs;
-    using testing::MockCallback;
-    using testing::NotNull;
-    using testing::Return;
-    using testing::SaveArg;
-    using testing::StrEq;
-    using testing::WithArg;
+using testing::_;
+using testing::Invoke;
+using testing::InvokeWithoutArgs;
+using testing::MockCallback;
+using testing::NotNull;
+using testing::Return;
+using testing::SaveArg;
+using testing::StrEq;
+using testing::WithArg;
 
-    class WireAdapterTests : public WireTest {
-      protected:
-        // Bootstrap the tests and create a fake adapter.
-        void SetUp() override {
-            WireTest::SetUp();
+class WireAdapterTests : public WireTest {
+  protected:
+    // Bootstrap the tests and create a fake adapter.
+    void SetUp() override {
+        WireTest::SetUp();
 
-            auto reservation = GetWireClient()->ReserveInstance();
-            instance = wgpu::Instance::Acquire(reservation.instance);
+        auto reservation = GetWireClient()->ReserveInstance();
+        instance = wgpu::Instance::Acquire(reservation.instance);
 
-            WGPUInstance apiInstance = api.GetNewInstance();
-            EXPECT_CALL(api, InstanceReference(apiInstance));
-            EXPECT_TRUE(GetWireServer()->InjectInstance(apiInstance, reservation.id,
-                                                        reservation.generation));
+        WGPUInstance apiInstance = api.GetNewInstance();
+        EXPECT_CALL(api, InstanceReference(apiInstance));
+        EXPECT_TRUE(
+            GetWireServer()->InjectInstance(apiInstance, reservation.id, reservation.generation));
 
-            wgpu::RequestAdapterOptions options = {};
-            MockCallback<WGPURequestAdapterCallback> cb;
-            auto* userdata = cb.MakeUserdata(this);
-            instance.RequestAdapter(&options, cb.Callback(), userdata);
-
-            // Expect the server to receive the message. Then, mock a fake reply.
-            apiAdapter = api.GetNewAdapter();
-            EXPECT_CALL(api, OnInstanceRequestAdapter(apiInstance, NotNull(), NotNull(), NotNull()))
-                .WillOnce(InvokeWithoutArgs([&]() {
-                    EXPECT_CALL(api, AdapterGetProperties(apiAdapter, NotNull()))
-                        .WillOnce(WithArg<1>(Invoke([&](WGPUAdapterProperties* properties) {
-                            *properties = {};
-                            properties->name = "";
-                            properties->driverDescription = "";
-                        })));
-
-                    EXPECT_CALL(api, AdapterGetLimits(apiAdapter, NotNull()))
-                        .WillOnce(WithArg<1>(Invoke([&](WGPUSupportedLimits* limits) {
-                            *limits = {};
-                            return true;
-                        })));
-
-                    EXPECT_CALL(api, AdapterEnumerateFeatures(apiAdapter, nullptr))
-                        .WillOnce(Return(0))
-                        .WillOnce(Return(0));
-                    api.CallInstanceRequestAdapterCallback(
-                        apiInstance, WGPURequestAdapterStatus_Success, apiAdapter, nullptr);
-                }));
-            FlushClient();
-
-            // Expect the callback in the client.
-            WGPUAdapter cAdapter;
-            EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, NotNull(), nullptr, this))
-                .WillOnce(SaveArg<1>(&cAdapter));
-            FlushServer();
-
-            EXPECT_NE(cAdapter, nullptr);
-            adapter = wgpu::Adapter::Acquire(cAdapter);
-        }
-
-        void TearDown() override {
-            adapter = nullptr;
-            instance = nullptr;
-            WireTest::TearDown();
-        }
-
-        WGPUAdapter apiAdapter;
-        wgpu::Instance instance;
-        wgpu::Adapter adapter;
-    };
-
-    // Test that the DeviceDescriptor is passed from the client to the server.
-    TEST_F(WireAdapterTests, RequestDevicePassesDescriptor) {
-        MockCallback<WGPURequestDeviceCallback> cb;
+        wgpu::RequestAdapterOptions options = {};
+        MockCallback<WGPURequestAdapterCallback> cb;
         auto* userdata = cb.MakeUserdata(this);
-
-        // Test an empty descriptor
-        {
-            wgpu::DeviceDescriptor desc = {};
-            adapter.RequestDevice(&desc, cb.Callback(), userdata);
-
-            EXPECT_CALL(api, OnAdapterRequestDevice(apiAdapter, NotNull(), NotNull(), NotNull()))
-                .WillOnce(WithArg<1>(Invoke([](const WGPUDeviceDescriptor* apiDesc) {
-                    EXPECT_EQ(apiDesc->label, nullptr);
-                    EXPECT_EQ(apiDesc->requiredFeaturesCount, 0u);
-                    EXPECT_EQ(apiDesc->requiredLimits, nullptr);
-                })));
-            FlushClient();
-        }
-
-        // Test a non-empty descriptor
-        {
-            wgpu::RequiredLimits limits = {};
-            limits.limits.maxStorageTexturesPerShaderStage = 5;
-
-            std::vector<wgpu::FeatureName> features = {wgpu::FeatureName::TextureCompressionETC2,
-                                                       wgpu::FeatureName::TextureCompressionASTC};
-
-            wgpu::DeviceDescriptor desc = {};
-            desc.label = "hello device";
-            desc.requiredLimits = &limits;
-            desc.requiredFeaturesCount = features.size();
-            desc.requiredFeatures = features.data();
-
-            adapter.RequestDevice(&desc, cb.Callback(), userdata);
-
-            EXPECT_CALL(api, OnAdapterRequestDevice(apiAdapter, NotNull(), NotNull(), NotNull()))
-                .WillOnce(WithArg<1>(Invoke([&](const WGPUDeviceDescriptor* apiDesc) {
-                    EXPECT_STREQ(apiDesc->label, desc.label);
-
-                    ASSERT_EQ(apiDesc->requiredFeaturesCount, features.size());
-                    for (uint32_t i = 0; i < features.size(); ++i) {
-                        EXPECT_EQ(apiDesc->requiredFeatures[i],
-                                  static_cast<WGPUFeatureName>(features[i]));
-                    }
-
-                    ASSERT_NE(apiDesc->requiredLimits, nullptr);
-                    EXPECT_EQ(apiDesc->requiredLimits->nextInChain, nullptr);
-                    EXPECT_EQ(apiDesc->requiredLimits->limits.maxStorageTexturesPerShaderStage,
-                              limits.limits.maxStorageTexturesPerShaderStage);
-                })));
-            FlushClient();
-        }
-
-        // Delete the adapter now, or it'll call the mock callback after it's deleted.
-        adapter = nullptr;
-    }
-
-    // Test that RequestDevice forwards the device information to the client.
-    TEST_F(WireAdapterTests, RequestDeviceSuccess) {
-        MockCallback<WGPURequestDeviceCallback> cb;
-        auto* userdata = cb.MakeUserdata(this);
-
-        wgpu::SupportedLimits fakeLimits = {};
-        fakeLimits.limits.maxTextureDimension1D = 433;
-        fakeLimits.limits.maxVertexAttributes = 1243;
-
-        std::initializer_list<wgpu::FeatureName> fakeFeatures = {
-            wgpu::FeatureName::Depth32FloatStencil8,
-            wgpu::FeatureName::TextureCompressionBC,
-        };
-
-        wgpu::DeviceDescriptor desc = {};
-        adapter.RequestDevice(&desc, cb.Callback(), userdata);
+        instance.RequestAdapter(&options, cb.Callback(), userdata);
 
         // Expect the server to receive the message. Then, mock a fake reply.
-        WGPUDevice apiDevice = api.GetNewDevice();
-        EXPECT_CALL(api, OnAdapterRequestDevice(apiAdapter, NotNull(), NotNull(), NotNull()))
+        apiAdapter = api.GetNewAdapter();
+        EXPECT_CALL(api, OnInstanceRequestAdapter(apiInstance, NotNull(), NotNull(), NotNull()))
             .WillOnce(InvokeWithoutArgs([&]() {
-                // Set on device creation to forward callbacks to the client.
-                EXPECT_CALL(api,
-                            OnDeviceSetUncapturedErrorCallback(apiDevice, NotNull(), NotNull()))
-                    .Times(1);
-                EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, NotNull(), NotNull()))
-                    .Times(1);
-                EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, NotNull(), NotNull()))
-                    .Times(1);
+                EXPECT_CALL(api, AdapterGetProperties(apiAdapter, NotNull()))
+                    .WillOnce(WithArg<1>(Invoke([&](WGPUAdapterProperties* properties) {
+                        *properties = {};
+                        properties->name = "";
+                        properties->driverDescription = "";
+                    })));
 
-                EXPECT_CALL(api, DeviceGetLimits(apiDevice, NotNull()))
+                EXPECT_CALL(api, AdapterGetLimits(apiAdapter, NotNull()))
                     .WillOnce(WithArg<1>(Invoke([&](WGPUSupportedLimits* limits) {
-                        *reinterpret_cast<wgpu::SupportedLimits*>(limits) = fakeLimits;
+                        *limits = {};
                         return true;
                     })));
 
-                EXPECT_CALL(api, DeviceEnumerateFeatures(apiDevice, nullptr))
-                    .WillOnce(Return(fakeFeatures.size()));
-
-                EXPECT_CALL(api, DeviceEnumerateFeatures(apiDevice, NotNull()))
-                    .WillOnce(WithArg<1>(Invoke([&](WGPUFeatureName* features) {
-                        for (wgpu::FeatureName feature : fakeFeatures) {
-                            *(features++) = static_cast<WGPUFeatureName>(feature);
-                        }
-                        return fakeFeatures.size();
-                    })));
-
-                api.CallAdapterRequestDeviceCallback(apiAdapter, WGPURequestDeviceStatus_Success,
-                                                     apiDevice, nullptr);
-            }));
-        FlushClient();
-
-        // Expect the callback in the client and all the device information to match.
-        EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Success, NotNull(), nullptr, this))
-            .WillOnce(WithArg<1>(Invoke([&](WGPUDevice cDevice) {
-                wgpu::Device device = wgpu::Device::Acquire(cDevice);
-
-                wgpu::SupportedLimits limits;
-                EXPECT_TRUE(device.GetLimits(&limits));
-                EXPECT_EQ(limits.limits.maxTextureDimension1D,
-                          fakeLimits.limits.maxTextureDimension1D);
-                EXPECT_EQ(limits.limits.maxVertexAttributes, fakeLimits.limits.maxVertexAttributes);
-
-                std::vector<wgpu::FeatureName> features;
-                features.resize(device.EnumerateFeatures(nullptr));
-                ASSERT_EQ(features.size(), fakeFeatures.size());
-                EXPECT_EQ(device.EnumerateFeatures(&features[0]), features.size());
-
-                std::unordered_set<wgpu::FeatureName> featureSet(fakeFeatures);
-                for (wgpu::FeatureName feature : features) {
-                    EXPECT_EQ(featureSet.erase(feature), 1u);
-                }
-            })));
-        FlushServer();
-
-        // Cleared when the device is destroyed.
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr)).Times(1);
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr)).Times(1);
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr)).Times(1);
-    }
-
-    // Test that features requested that the implementation supports, but not the
-    // wire reject the callback.
-    TEST_F(WireAdapterTests, RequestFeatureUnsupportedByWire) {
-        MockCallback<WGPURequestDeviceCallback> cb;
-        auto* userdata = cb.MakeUserdata(this);
-
-        std::initializer_list<wgpu::FeatureName> fakeFeatures = {
-            // Some value that is not a valid feature
-            static_cast<wgpu::FeatureName>(-2),
-            wgpu::FeatureName::TextureCompressionASTC,
-        };
-
-        wgpu::DeviceDescriptor desc = {};
-        adapter.RequestDevice(&desc, cb.Callback(), userdata);
-
-        // Expect the server to receive the message. Then, mock a fake reply.
-        // The reply contains features that the device implementation supports, but the
-        // wire does not.
-        WGPUDevice apiDevice = api.GetNewDevice();
-        EXPECT_CALL(api, OnAdapterRequestDevice(apiAdapter, NotNull(), NotNull(), NotNull()))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                EXPECT_CALL(api, DeviceEnumerateFeatures(apiDevice, nullptr))
-                    .WillOnce(Return(fakeFeatures.size()));
-
-                EXPECT_CALL(api, DeviceEnumerateFeatures(apiDevice, NotNull()))
-                    .WillOnce(WithArg<1>(Invoke([&](WGPUFeatureName* features) {
-                        for (wgpu::FeatureName feature : fakeFeatures) {
-                            *(features++) = static_cast<WGPUFeatureName>(feature);
-                        }
-                        return fakeFeatures.size();
-                    })));
-
-                // The device was actually created, but the wire didn't support its features.
-                // Expect it to be released.
-                EXPECT_CALL(api, DeviceRelease(apiDevice));
-
-                // Fake successful creation. The client still receives a failure due to
-                // unsupported features.
-                api.CallAdapterRequestDeviceCallback(apiAdapter, WGPURequestDeviceStatus_Success,
-                                                     apiDevice, nullptr);
-            }));
-        FlushClient();
-
-        // Expect an error callback since the feature is not supported.
-        EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Error, nullptr, NotNull(), this)).Times(1);
-        FlushServer();
-    }
-
-    // Test that RequestDevice errors forward to the client.
-    TEST_F(WireAdapterTests, RequestDeviceError) {
-        MockCallback<WGPURequestDeviceCallback> cb;
-        auto* userdata = cb.MakeUserdata(this);
-
-        wgpu::DeviceDescriptor desc = {};
-        adapter.RequestDevice(&desc, cb.Callback(), userdata);
-
-        // Expect the server to receive the message. Then, mock an error.
-        EXPECT_CALL(api, OnAdapterRequestDevice(apiAdapter, NotNull(), NotNull(), NotNull()))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallAdapterRequestDeviceCallback(apiAdapter, WGPURequestDeviceStatus_Error,
-                                                     nullptr, "Request device failed");
+                EXPECT_CALL(api, AdapterEnumerateFeatures(apiAdapter, nullptr))
+                    .WillOnce(Return(0))
+                    .WillOnce(Return(0));
+                api.CallInstanceRequestAdapterCallback(
+                    apiInstance, WGPURequestAdapterStatus_Success, apiAdapter, nullptr);
             }));
         FlushClient();
 
         // Expect the callback in the client.
-        EXPECT_CALL(
-            cb, Call(WGPURequestDeviceStatus_Error, nullptr, StrEq("Request device failed"), this))
-            .Times(1);
+        WGPUAdapter cAdapter;
+        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, NotNull(), nullptr, this))
+            .WillOnce(SaveArg<1>(&cAdapter));
         FlushServer();
+
+        EXPECT_NE(cAdapter, nullptr);
+        adapter = wgpu::Adapter::Acquire(cAdapter);
     }
 
-    // Test that RequestDevice receives unknown status if the adapter is deleted
-    // before the callback happens.
-    TEST_F(WireAdapterTests, RequestDeviceAdapterDestroyedBeforeCallback) {
-        MockCallback<WGPURequestDeviceCallback> cb;
-        auto* userdata = cb.MakeUserdata(this);
-
-        wgpu::DeviceDescriptor desc = {};
-        adapter.RequestDevice(&desc, cb.Callback(), userdata);
-
-        EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Unknown, nullptr, NotNull(), this)).Times(1);
+    void TearDown() override {
         adapter = nullptr;
+        instance = nullptr;
+        WireTest::TearDown();
     }
 
-    // Test that RequestDevice receives unknown status if the wire is disconnected
-    // before the callback happens.
-    TEST_F(WireAdapterTests, RequestDeviceWireDisconnectedBeforeCallback) {
-        MockCallback<WGPURequestDeviceCallback> cb;
-        auto* userdata = cb.MakeUserdata(this);
+    WGPUAdapter apiAdapter;
+    wgpu::Instance instance;
+    wgpu::Adapter adapter;
+};
 
+// Test that the DeviceDescriptor is passed from the client to the server.
+TEST_F(WireAdapterTests, RequestDevicePassesDescriptor) {
+    MockCallback<WGPURequestDeviceCallback> cb;
+    auto* userdata = cb.MakeUserdata(this);
+
+    // Test an empty descriptor
+    {
         wgpu::DeviceDescriptor desc = {};
         adapter.RequestDevice(&desc, cb.Callback(), userdata);
 
-        EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Unknown, nullptr, NotNull(), this)).Times(1);
-        GetWireClient()->Disconnect();
+        EXPECT_CALL(api, OnAdapterRequestDevice(apiAdapter, NotNull(), NotNull(), NotNull()))
+            .WillOnce(WithArg<1>(Invoke([](const WGPUDeviceDescriptor* apiDesc) {
+                EXPECT_EQ(apiDesc->label, nullptr);
+                EXPECT_EQ(apiDesc->requiredFeaturesCount, 0u);
+                EXPECT_EQ(apiDesc->requiredLimits, nullptr);
+            })));
+        FlushClient();
     }
 
-    // TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented.
-    // NOLINTNEXTLINE(readability/namespace)
-}}  // namespace dawn::wire::
+    // Test a non-empty descriptor
+    {
+        wgpu::RequiredLimits limits = {};
+        limits.limits.maxStorageTexturesPerShaderStage = 5;
+
+        std::vector<wgpu::FeatureName> features = {wgpu::FeatureName::TextureCompressionETC2,
+                                                   wgpu::FeatureName::TextureCompressionASTC};
+
+        wgpu::DeviceDescriptor desc = {};
+        desc.label = "hello device";
+        desc.requiredLimits = &limits;
+        desc.requiredFeaturesCount = features.size();
+        desc.requiredFeatures = features.data();
+
+        adapter.RequestDevice(&desc, cb.Callback(), userdata);
+
+        EXPECT_CALL(api, OnAdapterRequestDevice(apiAdapter, NotNull(), NotNull(), NotNull()))
+            .WillOnce(WithArg<1>(Invoke([&](const WGPUDeviceDescriptor* apiDesc) {
+                EXPECT_STREQ(apiDesc->label, desc.label);
+
+                ASSERT_EQ(apiDesc->requiredFeaturesCount, features.size());
+                for (uint32_t i = 0; i < features.size(); ++i) {
+                    EXPECT_EQ(apiDesc->requiredFeatures[i],
+                              static_cast<WGPUFeatureName>(features[i]));
+                }
+
+                ASSERT_NE(apiDesc->requiredLimits, nullptr);
+                EXPECT_EQ(apiDesc->requiredLimits->nextInChain, nullptr);
+                EXPECT_EQ(apiDesc->requiredLimits->limits.maxStorageTexturesPerShaderStage,
+                          limits.limits.maxStorageTexturesPerShaderStage);
+            })));
+        FlushClient();
+    }
+
+    // Delete the adapter now, or it'll call the mock callback after it's deleted.
+    adapter = nullptr;
+}
+
+// Test that RequestDevice forwards the device information to the client.
+TEST_F(WireAdapterTests, RequestDeviceSuccess) {
+    MockCallback<WGPURequestDeviceCallback> cb;
+    auto* userdata = cb.MakeUserdata(this);
+
+    wgpu::SupportedLimits fakeLimits = {};
+    fakeLimits.limits.maxTextureDimension1D = 433;
+    fakeLimits.limits.maxVertexAttributes = 1243;
+
+    std::initializer_list<wgpu::FeatureName> fakeFeatures = {
+        wgpu::FeatureName::Depth32FloatStencil8,
+        wgpu::FeatureName::TextureCompressionBC,
+    };
+
+    wgpu::DeviceDescriptor desc = {};
+    adapter.RequestDevice(&desc, cb.Callback(), userdata);
+
+    // Expect the server to receive the message. Then, mock a fake reply.
+    WGPUDevice apiDevice = api.GetNewDevice();
+    EXPECT_CALL(api, OnAdapterRequestDevice(apiAdapter, NotNull(), NotNull(), NotNull()))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            // Set on device creation to forward callbacks to the client.
+            EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, NotNull(), NotNull()))
+                .Times(1);
+            EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, NotNull(), NotNull())).Times(1);
+            EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, NotNull(), NotNull()))
+                .Times(1);
+
+            EXPECT_CALL(api, DeviceGetLimits(apiDevice, NotNull()))
+                .WillOnce(WithArg<1>(Invoke([&](WGPUSupportedLimits* limits) {
+                    *reinterpret_cast<wgpu::SupportedLimits*>(limits) = fakeLimits;
+                    return true;
+                })));
+
+            EXPECT_CALL(api, DeviceEnumerateFeatures(apiDevice, nullptr))
+                .WillOnce(Return(fakeFeatures.size()));
+
+            EXPECT_CALL(api, DeviceEnumerateFeatures(apiDevice, NotNull()))
+                .WillOnce(WithArg<1>(Invoke([&](WGPUFeatureName* features) {
+                    for (wgpu::FeatureName feature : fakeFeatures) {
+                        *(features++) = static_cast<WGPUFeatureName>(feature);
+                    }
+                    return fakeFeatures.size();
+                })));
+
+            api.CallAdapterRequestDeviceCallback(apiAdapter, WGPURequestDeviceStatus_Success,
+                                                 apiDevice, nullptr);
+        }));
+    FlushClient();
+
+    // Expect the callback in the client and all the device information to match.
+    EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Success, NotNull(), nullptr, this))
+        .WillOnce(WithArg<1>(Invoke([&](WGPUDevice cDevice) {
+            wgpu::Device device = wgpu::Device::Acquire(cDevice);
+
+            wgpu::SupportedLimits limits;
+            EXPECT_TRUE(device.GetLimits(&limits));
+            EXPECT_EQ(limits.limits.maxTextureDimension1D, fakeLimits.limits.maxTextureDimension1D);
+            EXPECT_EQ(limits.limits.maxVertexAttributes, fakeLimits.limits.maxVertexAttributes);
+
+            std::vector<wgpu::FeatureName> features;
+            features.resize(device.EnumerateFeatures(nullptr));
+            ASSERT_EQ(features.size(), fakeFeatures.size());
+            EXPECT_EQ(device.EnumerateFeatures(&features[0]), features.size());
+
+            std::unordered_set<wgpu::FeatureName> featureSet(fakeFeatures);
+            for (wgpu::FeatureName feature : features) {
+                EXPECT_EQ(featureSet.erase(feature), 1u);
+            }
+        })));
+    FlushServer();
+
+    // Cleared when the device is destroyed.
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr)).Times(1);
+}
+
+// Test that features requested that the implementation supports, but not the
+// wire reject the callback.
+TEST_F(WireAdapterTests, RequestFeatureUnsupportedByWire) {
+    MockCallback<WGPURequestDeviceCallback> cb;
+    auto* userdata = cb.MakeUserdata(this);
+
+    std::initializer_list<wgpu::FeatureName> fakeFeatures = {
+        // Some value that is not a valid feature
+        static_cast<wgpu::FeatureName>(-2),
+        wgpu::FeatureName::TextureCompressionASTC,
+    };
+
+    wgpu::DeviceDescriptor desc = {};
+    adapter.RequestDevice(&desc, cb.Callback(), userdata);
+
+    // Expect the server to receive the message. Then, mock a fake reply.
+    // The reply contains features that the device implementation supports, but the
+    // wire does not.
+    WGPUDevice apiDevice = api.GetNewDevice();
+    EXPECT_CALL(api, OnAdapterRequestDevice(apiAdapter, NotNull(), NotNull(), NotNull()))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            EXPECT_CALL(api, DeviceEnumerateFeatures(apiDevice, nullptr))
+                .WillOnce(Return(fakeFeatures.size()));
+
+            EXPECT_CALL(api, DeviceEnumerateFeatures(apiDevice, NotNull()))
+                .WillOnce(WithArg<1>(Invoke([&](WGPUFeatureName* features) {
+                    for (wgpu::FeatureName feature : fakeFeatures) {
+                        *(features++) = static_cast<WGPUFeatureName>(feature);
+                    }
+                    return fakeFeatures.size();
+                })));
+
+            // The device was actually created, but the wire didn't support its features.
+            // Expect it to be released.
+            EXPECT_CALL(api, DeviceRelease(apiDevice));
+
+            // Fake successful creation. The client still receives a failure due to
+            // unsupported features.
+            api.CallAdapterRequestDeviceCallback(apiAdapter, WGPURequestDeviceStatus_Success,
+                                                 apiDevice, nullptr);
+        }));
+    FlushClient();
+
+    // Expect an error callback since the feature is not supported.
+    EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Error, nullptr, NotNull(), this)).Times(1);
+    FlushServer();
+}
+
+// Test that RequestDevice errors forward to the client.
+TEST_F(WireAdapterTests, RequestDeviceError) {
+    MockCallback<WGPURequestDeviceCallback> cb;
+    auto* userdata = cb.MakeUserdata(this);
+
+    wgpu::DeviceDescriptor desc = {};
+    adapter.RequestDevice(&desc, cb.Callback(), userdata);
+
+    // Expect the server to receive the message. Then, mock an error.
+    EXPECT_CALL(api, OnAdapterRequestDevice(apiAdapter, NotNull(), NotNull(), NotNull()))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallAdapterRequestDeviceCallback(apiAdapter, WGPURequestDeviceStatus_Error, nullptr,
+                                                 "Request device failed");
+        }));
+    FlushClient();
+
+    // Expect the callback in the client.
+    EXPECT_CALL(cb,
+                Call(WGPURequestDeviceStatus_Error, nullptr, StrEq("Request device failed"), this))
+        .Times(1);
+    FlushServer();
+}
+
+// Test that RequestDevice receives unknown status if the adapter is deleted
+// before the callback happens.
+TEST_F(WireAdapterTests, RequestDeviceAdapterDestroyedBeforeCallback) {
+    MockCallback<WGPURequestDeviceCallback> cb;
+    auto* userdata = cb.MakeUserdata(this);
+
+    wgpu::DeviceDescriptor desc = {};
+    adapter.RequestDevice(&desc, cb.Callback(), userdata);
+
+    EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Unknown, nullptr, NotNull(), this)).Times(1);
+    adapter = nullptr;
+}
+
+// Test that RequestDevice receives unknown status if the wire is disconnected
+// before the callback happens.
+TEST_F(WireAdapterTests, RequestDeviceWireDisconnectedBeforeCallback) {
+    MockCallback<WGPURequestDeviceCallback> cb;
+    auto* userdata = cb.MakeUserdata(this);
+
+    wgpu::DeviceDescriptor desc = {};
+    adapter.RequestDevice(&desc, cb.Callback(), userdata);
+
+    EXPECT_CALL(cb, Call(WGPURequestDeviceStatus_Unknown, nullptr, NotNull(), this)).Times(1);
+    GetWireClient()->Disconnect();
+}
+
+// TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented.
+// NOLINTNEXTLINE(readability/namespace)
+}  // namespace
+}  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireArgumentTests.cpp b/src/dawn/tests/unittests/wire/WireArgumentTests.cpp
index c208085..4f3be8b 100644
--- a/src/dawn/tests/unittests/wire/WireArgumentTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireArgumentTests.cpp
@@ -15,256 +15,244 @@
 #include <array>
 #include <string>
 
-#include "dawn/tests/unittests/wire/WireTest.h"
 #include "dawn/common/Constants.h"
+#include "dawn/tests/unittests/wire/WireTest.h"
 
 namespace dawn::wire {
 
-    using testing::_;
-    using testing::Return;
-    using testing::Sequence;
+using testing::_;
+using testing::Return;
+using testing::Sequence;
 
-    class WireArgumentTests : public WireTest {
-      public:
-        WireArgumentTests() {
-        }
-        ~WireArgumentTests() override = default;
-    };
+class WireArgumentTests : public WireTest {
+  public:
+    WireArgumentTests() {}
+    ~WireArgumentTests() override = default;
+};
 
-    // Test that the wire is able to send numerical values
-    TEST_F(WireArgumentTests, ValueArgument) {
-        WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
-        WGPUComputePassEncoder pass = wgpuCommandEncoderBeginComputePass(encoder, nullptr);
-        wgpuComputePassEncoderDispatch(pass, 1, 2, 3);
+// Test that the wire is able to send numerical values
+TEST_F(WireArgumentTests, ValueArgument) {
+    WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+    WGPUComputePassEncoder pass = wgpuCommandEncoderBeginComputePass(encoder, nullptr);
+    wgpuComputePassEncoderDispatch(pass, 1, 2, 3);
 
-        WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
-        EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
-            .WillOnce(Return(apiEncoder));
+    WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)).WillOnce(Return(apiEncoder));
 
-        WGPUComputePassEncoder apiPass = api.GetNewComputePassEncoder();
-        EXPECT_CALL(api, CommandEncoderBeginComputePass(apiEncoder, nullptr))
-            .WillOnce(Return(apiPass));
+    WGPUComputePassEncoder apiPass = api.GetNewComputePassEncoder();
+    EXPECT_CALL(api, CommandEncoderBeginComputePass(apiEncoder, nullptr)).WillOnce(Return(apiPass));
 
-        EXPECT_CALL(api, ComputePassEncoderDispatch(apiPass, 1, 2, 3)).Times(1);
+    EXPECT_CALL(api, ComputePassEncoderDispatch(apiPass, 1, 2, 3)).Times(1);
 
-        FlushClient();
-    }
+    FlushClient();
+}
 
-    // Test that the wire is able to send arrays of numerical values
-    TEST_F(WireArgumentTests, ValueArrayArgument) {
-        // Create a bindgroup.
-        WGPUBindGroupLayoutDescriptor bglDescriptor = {};
-        bglDescriptor.entryCount = 0;
-        bglDescriptor.entries = nullptr;
+// Test that the wire is able to send arrays of numerical values
+TEST_F(WireArgumentTests, ValueArrayArgument) {
+    // Create a bindgroup.
+    WGPUBindGroupLayoutDescriptor bglDescriptor = {};
+    bglDescriptor.entryCount = 0;
+    bglDescriptor.entries = nullptr;
 
-        WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bglDescriptor);
-        WGPUBindGroupLayout apiBgl = api.GetNewBindGroupLayout();
-        EXPECT_CALL(api, DeviceCreateBindGroupLayout(apiDevice, _)).WillOnce(Return(apiBgl));
+    WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bglDescriptor);
+    WGPUBindGroupLayout apiBgl = api.GetNewBindGroupLayout();
+    EXPECT_CALL(api, DeviceCreateBindGroupLayout(apiDevice, _)).WillOnce(Return(apiBgl));
 
-        WGPUBindGroupDescriptor bindGroupDescriptor = {};
-        bindGroupDescriptor.layout = bgl;
-        bindGroupDescriptor.entryCount = 0;
-        bindGroupDescriptor.entries = nullptr;
+    WGPUBindGroupDescriptor bindGroupDescriptor = {};
+    bindGroupDescriptor.layout = bgl;
+    bindGroupDescriptor.entryCount = 0;
+    bindGroupDescriptor.entries = nullptr;
 
-        WGPUBindGroup bindGroup = wgpuDeviceCreateBindGroup(device, &bindGroupDescriptor);
-        WGPUBindGroup apiBindGroup = api.GetNewBindGroup();
-        EXPECT_CALL(api, DeviceCreateBindGroup(apiDevice, _)).WillOnce(Return(apiBindGroup));
+    WGPUBindGroup bindGroup = wgpuDeviceCreateBindGroup(device, &bindGroupDescriptor);
+    WGPUBindGroup apiBindGroup = api.GetNewBindGroup();
+    EXPECT_CALL(api, DeviceCreateBindGroup(apiDevice, _)).WillOnce(Return(apiBindGroup));
 
-        // Use the bindgroup in SetBindGroup that takes an array of value offsets.
-        WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
-        WGPUComputePassEncoder pass = wgpuCommandEncoderBeginComputePass(encoder, nullptr);
+    // Use the bindgroup in SetBindGroup that takes an array of value offsets.
+    WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+    WGPUComputePassEncoder pass = wgpuCommandEncoderBeginComputePass(encoder, nullptr);
 
-        std::array<uint32_t, 4> testOffsets = {0, 42, 0xDEAD'BEEFu, 0xFFFF'FFFFu};
-        wgpuComputePassEncoderSetBindGroup(pass, 0, bindGroup, testOffsets.size(),
-                                           testOffsets.data());
+    std::array<uint32_t, 4> testOffsets = {0, 42, 0xDEAD'BEEFu, 0xFFFF'FFFFu};
+    wgpuComputePassEncoderSetBindGroup(pass, 0, bindGroup, testOffsets.size(), testOffsets.data());
 
-        WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
-        EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
-            .WillOnce(Return(apiEncoder));
+    WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)).WillOnce(Return(apiEncoder));
 
-        WGPUComputePassEncoder apiPass = api.GetNewComputePassEncoder();
-        EXPECT_CALL(api, CommandEncoderBeginComputePass(apiEncoder, nullptr))
-            .WillOnce(Return(apiPass));
+    WGPUComputePassEncoder apiPass = api.GetNewComputePassEncoder();
+    EXPECT_CALL(api, CommandEncoderBeginComputePass(apiEncoder, nullptr)).WillOnce(Return(apiPass));
 
-        EXPECT_CALL(api, ComputePassEncoderSetBindGroup(
-                             apiPass, 0, apiBindGroup, testOffsets.size(),
-                             MatchesLambda([testOffsets](const uint32_t* offsets) -> bool {
-                                 for (size_t i = 0; i < testOffsets.size(); i++) {
-                                     if (offsets[i] != testOffsets[i]) {
-                                         return false;
-                                     }
+    EXPECT_CALL(api, ComputePassEncoderSetBindGroup(
+                         apiPass, 0, apiBindGroup, testOffsets.size(),
+                         MatchesLambda([testOffsets](const uint32_t* offsets) -> bool {
+                             for (size_t i = 0; i < testOffsets.size(); i++) {
+                                 if (offsets[i] != testOffsets[i]) {
+                                     return false;
                                  }
-                                 return true;
-                             })));
+                             }
+                             return true;
+                         })));
 
-        FlushClient();
-    }
+    FlushClient();
+}
 
-    // Test that the wire is able to send C strings
-    TEST_F(WireArgumentTests, CStringArgument) {
-        // Create shader module
-        WGPUShaderModuleDescriptor vertexDescriptor = {};
-        WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
-        WGPUShaderModule apiVsModule = api.GetNewShaderModule();
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
+// Test that the wire is able to send C strings
+TEST_F(WireArgumentTests, CStringArgument) {
+    // Create shader module
+    WGPUShaderModuleDescriptor vertexDescriptor = {};
+    WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
+    WGPUShaderModule apiVsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
 
-        // Create the color state descriptor
-        WGPUBlendComponent blendComponent = {};
-        blendComponent.operation = WGPUBlendOperation_Add;
-        blendComponent.srcFactor = WGPUBlendFactor_One;
-        blendComponent.dstFactor = WGPUBlendFactor_One;
-        WGPUBlendState blendState = {};
-        blendState.alpha = blendComponent;
-        blendState.color = blendComponent;
-        WGPUColorTargetState colorTargetState = {};
-        colorTargetState.format = WGPUTextureFormat_RGBA8Unorm;
-        colorTargetState.blend = &blendState;
-        colorTargetState.writeMask = WGPUColorWriteMask_All;
+    // Create the color state descriptor
+    WGPUBlendComponent blendComponent = {};
+    blendComponent.operation = WGPUBlendOperation_Add;
+    blendComponent.srcFactor = WGPUBlendFactor_One;
+    blendComponent.dstFactor = WGPUBlendFactor_One;
+    WGPUBlendState blendState = {};
+    blendState.alpha = blendComponent;
+    blendState.color = blendComponent;
+    WGPUColorTargetState colorTargetState = {};
+    colorTargetState.format = WGPUTextureFormat_RGBA8Unorm;
+    colorTargetState.blend = &blendState;
+    colorTargetState.writeMask = WGPUColorWriteMask_All;
 
-        // Create the depth-stencil state
-        WGPUStencilFaceState stencilFace = {};
-        stencilFace.compare = WGPUCompareFunction_Always;
-        stencilFace.failOp = WGPUStencilOperation_Keep;
-        stencilFace.depthFailOp = WGPUStencilOperation_Keep;
-        stencilFace.passOp = WGPUStencilOperation_Keep;
+    // Create the depth-stencil state
+    WGPUStencilFaceState stencilFace = {};
+    stencilFace.compare = WGPUCompareFunction_Always;
+    stencilFace.failOp = WGPUStencilOperation_Keep;
+    stencilFace.depthFailOp = WGPUStencilOperation_Keep;
+    stencilFace.passOp = WGPUStencilOperation_Keep;
 
-        WGPUDepthStencilState depthStencilState = {};
-        depthStencilState.format = WGPUTextureFormat_Depth24PlusStencil8;
-        depthStencilState.depthWriteEnabled = false;
-        depthStencilState.depthCompare = WGPUCompareFunction_Always;
-        depthStencilState.stencilBack = stencilFace;
-        depthStencilState.stencilFront = stencilFace;
-        depthStencilState.stencilReadMask = 0xff;
-        depthStencilState.stencilWriteMask = 0xff;
-        depthStencilState.depthBias = 0;
-        depthStencilState.depthBiasSlopeScale = 0.0;
-        depthStencilState.depthBiasClamp = 0.0;
+    WGPUDepthStencilState depthStencilState = {};
+    depthStencilState.format = WGPUTextureFormat_Depth24PlusStencil8;
+    depthStencilState.depthWriteEnabled = false;
+    depthStencilState.depthCompare = WGPUCompareFunction_Always;
+    depthStencilState.stencilBack = stencilFace;
+    depthStencilState.stencilFront = stencilFace;
+    depthStencilState.stencilReadMask = 0xff;
+    depthStencilState.stencilWriteMask = 0xff;
+    depthStencilState.depthBias = 0;
+    depthStencilState.depthBiasSlopeScale = 0.0;
+    depthStencilState.depthBiasClamp = 0.0;
 
-        // Create the pipeline layout
-        WGPUPipelineLayoutDescriptor layoutDescriptor = {};
-        layoutDescriptor.bindGroupLayoutCount = 0;
-        layoutDescriptor.bindGroupLayouts = nullptr;
-        WGPUPipelineLayout layout = wgpuDeviceCreatePipelineLayout(device, &layoutDescriptor);
-        WGPUPipelineLayout apiLayout = api.GetNewPipelineLayout();
-        EXPECT_CALL(api, DeviceCreatePipelineLayout(apiDevice, _)).WillOnce(Return(apiLayout));
+    // Create the pipeline layout
+    WGPUPipelineLayoutDescriptor layoutDescriptor = {};
+    layoutDescriptor.bindGroupLayoutCount = 0;
+    layoutDescriptor.bindGroupLayouts = nullptr;
+    WGPUPipelineLayout layout = wgpuDeviceCreatePipelineLayout(device, &layoutDescriptor);
+    WGPUPipelineLayout apiLayout = api.GetNewPipelineLayout();
+    EXPECT_CALL(api, DeviceCreatePipelineLayout(apiDevice, _)).WillOnce(Return(apiLayout));
 
-        // Create pipeline
-        WGPURenderPipelineDescriptor pipelineDescriptor = {};
+    // Create pipeline
+    WGPURenderPipelineDescriptor pipelineDescriptor = {};
 
-        pipelineDescriptor.vertex.module = vsModule;
-        pipelineDescriptor.vertex.entryPoint = "main";
-        pipelineDescriptor.vertex.bufferCount = 0;
-        pipelineDescriptor.vertex.buffers = nullptr;
+    pipelineDescriptor.vertex.module = vsModule;
+    pipelineDescriptor.vertex.entryPoint = "main";
+    pipelineDescriptor.vertex.bufferCount = 0;
+    pipelineDescriptor.vertex.buffers = nullptr;
 
-        WGPUFragmentState fragment = {};
-        fragment.module = vsModule;
-        fragment.entryPoint = "main";
-        fragment.targetCount = 1;
-        fragment.targets = &colorTargetState;
-        pipelineDescriptor.fragment = &fragment;
+    WGPUFragmentState fragment = {};
+    fragment.module = vsModule;
+    fragment.entryPoint = "main";
+    fragment.targetCount = 1;
+    fragment.targets = &colorTargetState;
+    pipelineDescriptor.fragment = &fragment;
 
-        pipelineDescriptor.multisample.count = 1;
-        pipelineDescriptor.multisample.mask = 0xFFFFFFFF;
-        pipelineDescriptor.multisample.alphaToCoverageEnabled = false;
-        pipelineDescriptor.layout = layout;
-        pipelineDescriptor.primitive.topology = WGPUPrimitiveTopology_TriangleList;
-        pipelineDescriptor.primitive.frontFace = WGPUFrontFace_CCW;
-        pipelineDescriptor.primitive.cullMode = WGPUCullMode_None;
-        pipelineDescriptor.depthStencil = &depthStencilState;
+    pipelineDescriptor.multisample.count = 1;
+    pipelineDescriptor.multisample.mask = 0xFFFFFFFF;
+    pipelineDescriptor.multisample.alphaToCoverageEnabled = false;
+    pipelineDescriptor.layout = layout;
+    pipelineDescriptor.primitive.topology = WGPUPrimitiveTopology_TriangleList;
+    pipelineDescriptor.primitive.frontFace = WGPUFrontFace_CCW;
+    pipelineDescriptor.primitive.cullMode = WGPUCullMode_None;
+    pipelineDescriptor.depthStencil = &depthStencilState;
 
-        wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor);
+    wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor);
 
-        WGPURenderPipeline apiPlaceholderPipeline = api.GetNewRenderPipeline();
-        EXPECT_CALL(
-            api, DeviceCreateRenderPipeline(
-                     apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool {
-                         return desc->vertex.entryPoint == std::string("main");
-                     })))
-            .WillOnce(Return(apiPlaceholderPipeline));
+    WGPURenderPipeline apiPlaceholderPipeline = api.GetNewRenderPipeline();
+    EXPECT_CALL(api,
+                DeviceCreateRenderPipeline(
+                    apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool {
+                        return desc->vertex.entryPoint == std::string("main");
+                    })))
+        .WillOnce(Return(apiPlaceholderPipeline));
 
-        FlushClient();
-    }
+    FlushClient();
+}
 
-    // Test that the wire is able to send objects as value arguments
-    TEST_F(WireArgumentTests, ObjectAsValueArgument) {
+// Test that the wire is able to send objects as value arguments
+TEST_F(WireArgumentTests, ObjectAsValueArgument) {
+    WGPUCommandEncoder cmdBufEncoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+    WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)).WillOnce(Return(apiEncoder));
+
+    WGPUBufferDescriptor descriptor = {};
+    descriptor.size = 8;
+    descriptor.usage =
+        static_cast<WGPUBufferUsage>(WGPUBufferUsage_CopySrc | WGPUBufferUsage_CopyDst);
+
+    WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
+    WGPUBuffer apiBuffer = api.GetNewBuffer();
+    EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _))
+        .WillOnce(Return(apiBuffer))
+        .RetiresOnSaturation();
+
+    wgpuCommandEncoderCopyBufferToBuffer(cmdBufEncoder, buffer, 0, buffer, 4, 4);
+    EXPECT_CALL(api, CommandEncoderCopyBufferToBuffer(apiEncoder, apiBuffer, 0, apiBuffer, 4, 4));
+
+    FlushClient();
+}
+
+// Test that the wire is able to send array of objects
+TEST_F(WireArgumentTests, ObjectsAsPointerArgument) {
+    WGPUCommandBuffer cmdBufs[2];
+    WGPUCommandBuffer apiCmdBufs[2];
+
+    // Create two command buffers we need to use a GMock sequence otherwise the order of the
+    // CreateCommandEncoder might be swapped since they are equivalent in term of matchers
+    Sequence s;
+    for (int i = 0; i < 2; ++i) {
         WGPUCommandEncoder cmdBufEncoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
-        WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
+        cmdBufs[i] = wgpuCommandEncoderFinish(cmdBufEncoder, nullptr);
+
+        WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
         EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
-            .WillOnce(Return(apiEncoder));
+            .InSequence(s)
+            .WillOnce(Return(apiCmdBufEncoder));
 
-        WGPUBufferDescriptor descriptor = {};
-        descriptor.size = 8;
-        descriptor.usage =
-            static_cast<WGPUBufferUsage>(WGPUBufferUsage_CopySrc | WGPUBufferUsage_CopyDst);
-
-        WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
-        WGPUBuffer apiBuffer = api.GetNewBuffer();
-        EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _))
-            .WillOnce(Return(apiBuffer))
-            .RetiresOnSaturation();
-
-        wgpuCommandEncoderCopyBufferToBuffer(cmdBufEncoder, buffer, 0, buffer, 4, 4);
-        EXPECT_CALL(api,
-                    CommandEncoderCopyBufferToBuffer(apiEncoder, apiBuffer, 0, apiBuffer, 4, 4));
-
-        FlushClient();
+        apiCmdBufs[i] = api.GetNewCommandBuffer();
+        EXPECT_CALL(api, CommandEncoderFinish(apiCmdBufEncoder, nullptr))
+            .WillOnce(Return(apiCmdBufs[i]));
     }
 
-    // Test that the wire is able to send array of objects
-    TEST_F(WireArgumentTests, ObjectsAsPointerArgument) {
-        WGPUCommandBuffer cmdBufs[2];
-        WGPUCommandBuffer apiCmdBufs[2];
+    // Submit command buffer and check we got a call with both API-side command buffers
+    wgpuQueueSubmit(queue, 2, cmdBufs);
 
-        // Create two command buffers we need to use a GMock sequence otherwise the order of the
-        // CreateCommandEncoder might be swapped since they are equivalent in term of matchers
-        Sequence s;
-        for (int i = 0; i < 2; ++i) {
-            WGPUCommandEncoder cmdBufEncoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
-            cmdBufs[i] = wgpuCommandEncoderFinish(cmdBufEncoder, nullptr);
+    EXPECT_CALL(
+        api, QueueSubmit(apiQueue, 2, MatchesLambda([=](const WGPUCommandBuffer* cmdBufs) -> bool {
+                             return cmdBufs[0] == apiCmdBufs[0] && cmdBufs[1] == apiCmdBufs[1];
+                         })));
 
-            WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
-            EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
-                .InSequence(s)
-                .WillOnce(Return(apiCmdBufEncoder));
+    FlushClient();
+}
 
-            apiCmdBufs[i] = api.GetNewCommandBuffer();
-            EXPECT_CALL(api, CommandEncoderFinish(apiCmdBufEncoder, nullptr))
-                .WillOnce(Return(apiCmdBufs[i]));
-        }
+// Test that the wire is able to send structures that contain pure values (non-objects)
+TEST_F(WireArgumentTests, StructureOfValuesArgument) {
+    WGPUSamplerDescriptor descriptor = {};
+    descriptor.magFilter = WGPUFilterMode_Linear;
+    descriptor.minFilter = WGPUFilterMode_Nearest;
+    descriptor.mipmapFilter = WGPUFilterMode_Linear;
+    descriptor.addressModeU = WGPUAddressMode_ClampToEdge;
+    descriptor.addressModeV = WGPUAddressMode_Repeat;
+    descriptor.addressModeW = WGPUAddressMode_MirrorRepeat;
+    descriptor.lodMinClamp = kLodMin;
+    descriptor.lodMaxClamp = kLodMax;
+    descriptor.compare = WGPUCompareFunction_Never;
 
-        // Submit command buffer and check we got a call with both API-side command buffers
-        wgpuQueueSubmit(queue, 2, cmdBufs);
+    wgpuDeviceCreateSampler(device, &descriptor);
 
-        EXPECT_CALL(
-            api,
-            QueueSubmit(apiQueue, 2, MatchesLambda([=](const WGPUCommandBuffer* cmdBufs) -> bool {
-                            return cmdBufs[0] == apiCmdBufs[0] && cmdBufs[1] == apiCmdBufs[1];
-                        })));
-
-        FlushClient();
-    }
-
-    // Test that the wire is able to send structures that contain pure values (non-objects)
-    TEST_F(WireArgumentTests, StructureOfValuesArgument) {
-        WGPUSamplerDescriptor descriptor = {};
-        descriptor.magFilter = WGPUFilterMode_Linear;
-        descriptor.minFilter = WGPUFilterMode_Nearest;
-        descriptor.mipmapFilter = WGPUFilterMode_Linear;
-        descriptor.addressModeU = WGPUAddressMode_ClampToEdge;
-        descriptor.addressModeV = WGPUAddressMode_Repeat;
-        descriptor.addressModeW = WGPUAddressMode_MirrorRepeat;
-        descriptor.lodMinClamp = kLodMin;
-        descriptor.lodMaxClamp = kLodMax;
-        descriptor.compare = WGPUCompareFunction_Never;
-
-        wgpuDeviceCreateSampler(device, &descriptor);
-
-        WGPUSampler apiPlaceholderSampler = api.GetNewSampler();
-        EXPECT_CALL(
-            api, DeviceCreateSampler(
-                     apiDevice,
-                     MatchesLambda(
-                         [](const WGPUSamplerDescriptor* desc) -> bool {
+    WGPUSampler apiPlaceholderSampler = api.GetNewSampler();
+    EXPECT_CALL(api, DeviceCreateSampler(
+                         apiDevice, MatchesLambda([](const WGPUSamplerDescriptor* desc) -> bool {
                              return desc->nextInChain == nullptr &&
                                     desc->magFilter == WGPUFilterMode_Linear &&
                                     desc->minFilter == WGPUFilterMode_Nearest &&
@@ -275,111 +263,110 @@
                                     desc->compare == WGPUCompareFunction_Never &&
                                     desc->lodMinClamp == kLodMin && desc->lodMaxClamp == kLodMax;
                          })))
-            .WillOnce(Return(apiPlaceholderSampler));
+        .WillOnce(Return(apiPlaceholderSampler));
 
-        FlushClient();
-    }
+    FlushClient();
+}
 
-    // Test that the wire is able to send structures that contain objects
-    TEST_F(WireArgumentTests, StructureOfObjectArrayArgument) {
-        WGPUBindGroupLayoutDescriptor bglDescriptor = {};
-        bglDescriptor.entryCount = 0;
-        bglDescriptor.entries = nullptr;
+// Test that the wire is able to send structures that contain objects
+TEST_F(WireArgumentTests, StructureOfObjectArrayArgument) {
+    WGPUBindGroupLayoutDescriptor bglDescriptor = {};
+    bglDescriptor.entryCount = 0;
+    bglDescriptor.entries = nullptr;
 
-        WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bglDescriptor);
-        WGPUBindGroupLayout apiBgl = api.GetNewBindGroupLayout();
-        EXPECT_CALL(api, DeviceCreateBindGroupLayout(apiDevice, _)).WillOnce(Return(apiBgl));
+    WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bglDescriptor);
+    WGPUBindGroupLayout apiBgl = api.GetNewBindGroupLayout();
+    EXPECT_CALL(api, DeviceCreateBindGroupLayout(apiDevice, _)).WillOnce(Return(apiBgl));
 
-        WGPUPipelineLayoutDescriptor descriptor = {};
-        descriptor.bindGroupLayoutCount = 1;
-        descriptor.bindGroupLayouts = &bgl;
+    WGPUPipelineLayoutDescriptor descriptor = {};
+    descriptor.bindGroupLayoutCount = 1;
+    descriptor.bindGroupLayouts = &bgl;
 
-        wgpuDeviceCreatePipelineLayout(device, &descriptor);
+    wgpuDeviceCreatePipelineLayout(device, &descriptor);
 
-        WGPUPipelineLayout apiPlaceholderLayout = api.GetNewPipelineLayout();
-        EXPECT_CALL(
-            api, DeviceCreatePipelineLayout(
-                     apiDevice,
-                     MatchesLambda([apiBgl](const WGPUPipelineLayoutDescriptor* desc) -> bool {
-                         return desc->nextInChain == nullptr && desc->bindGroupLayoutCount == 1 &&
-                                desc->bindGroupLayouts[0] == apiBgl;
-                     })))
-            .WillOnce(Return(apiPlaceholderLayout));
+    WGPUPipelineLayout apiPlaceholderLayout = api.GetNewPipelineLayout();
+    EXPECT_CALL(api, DeviceCreatePipelineLayout(
+                         apiDevice,
+                         MatchesLambda([apiBgl](const WGPUPipelineLayoutDescriptor* desc) -> bool {
+                             return desc->nextInChain == nullptr &&
+                                    desc->bindGroupLayoutCount == 1 &&
+                                    desc->bindGroupLayouts[0] == apiBgl;
+                         })))
+        .WillOnce(Return(apiPlaceholderLayout));
 
-        FlushClient();
-    }
+    FlushClient();
+}
 
-    // Test that the wire is able to send structures that contain objects
-    TEST_F(WireArgumentTests, StructureOfStructureArrayArgument) {
-        static constexpr int NUM_BINDINGS = 3;
-        WGPUBindGroupLayoutEntry entries[NUM_BINDINGS]{
-            {nullptr,
-             0,
-             WGPUShaderStage_Vertex,
-             {},
-             {nullptr, WGPUSamplerBindingType_Filtering},
-             {},
-             {}},
-            {nullptr,
-             1,
-             WGPUShaderStage_Vertex,
-             {},
-             {},
-             {nullptr, WGPUTextureSampleType_Float, WGPUTextureViewDimension_2D, false},
-             {}},
-            {nullptr,
-             2,
-             static_cast<WGPUShaderStage>(WGPUShaderStage_Vertex | WGPUShaderStage_Fragment),
-             {nullptr, WGPUBufferBindingType_Uniform, false, 0},
-             {},
-             {},
-             {}},
-        };
-        WGPUBindGroupLayoutDescriptor bglDescriptor = {};
-        bglDescriptor.entryCount = NUM_BINDINGS;
-        bglDescriptor.entries = entries;
+// Test that the wire is able to send structures that contain objects
+TEST_F(WireArgumentTests, StructureOfStructureArrayArgument) {
+    static constexpr int NUM_BINDINGS = 3;
+    WGPUBindGroupLayoutEntry entries[NUM_BINDINGS]{
+        {nullptr,
+         0,
+         WGPUShaderStage_Vertex,
+         {},
+         {nullptr, WGPUSamplerBindingType_Filtering},
+         {},
+         {}},
+        {nullptr,
+         1,
+         WGPUShaderStage_Vertex,
+         {},
+         {},
+         {nullptr, WGPUTextureSampleType_Float, WGPUTextureViewDimension_2D, false},
+         {}},
+        {nullptr,
+         2,
+         static_cast<WGPUShaderStage>(WGPUShaderStage_Vertex | WGPUShaderStage_Fragment),
+         {nullptr, WGPUBufferBindingType_Uniform, false, 0},
+         {},
+         {},
+         {}},
+    };
+    WGPUBindGroupLayoutDescriptor bglDescriptor = {};
+    bglDescriptor.entryCount = NUM_BINDINGS;
+    bglDescriptor.entries = entries;
 
-        wgpuDeviceCreateBindGroupLayout(device, &bglDescriptor);
-        WGPUBindGroupLayout apiBgl = api.GetNewBindGroupLayout();
-        EXPECT_CALL(api,
-                    DeviceCreateBindGroupLayout(
-                        apiDevice,
-                        MatchesLambda([entries](const WGPUBindGroupLayoutDescriptor* desc) -> bool {
-                            for (int i = 0; i < NUM_BINDINGS; ++i) {
-                                const auto& a = desc->entries[i];
-                                const auto& b = entries[i];
-                                if (a.binding != b.binding || a.visibility != b.visibility ||
-                                    a.buffer.type != b.buffer.type ||
-                                    a.sampler.type != b.sampler.type ||
-                                    a.texture.sampleType != b.texture.sampleType) {
-                                    return false;
-                                }
-                            }
-                            return desc->nextInChain == nullptr && desc->entryCount == 3;
-                        })))
-            .WillOnce(Return(apiBgl));
+    wgpuDeviceCreateBindGroupLayout(device, &bglDescriptor);
+    WGPUBindGroupLayout apiBgl = api.GetNewBindGroupLayout();
+    EXPECT_CALL(
+        api,
+        DeviceCreateBindGroupLayout(
+            apiDevice, MatchesLambda([entries](const WGPUBindGroupLayoutDescriptor* desc) -> bool {
+                for (int i = 0; i < NUM_BINDINGS; ++i) {
+                    const auto& a = desc->entries[i];
+                    const auto& b = entries[i];
+                    if (a.binding != b.binding || a.visibility != b.visibility ||
+                        a.buffer.type != b.buffer.type || a.sampler.type != b.sampler.type ||
+                        a.texture.sampleType != b.texture.sampleType) {
+                        return false;
+                    }
+                }
+                return desc->nextInChain == nullptr && desc->entryCount == 3;
+            })))
+        .WillOnce(Return(apiBgl));
 
-        FlushClient();
-    }
+    FlushClient();
+}
 
-    // Test passing nullptr instead of objects - array of objects version
-    TEST_F(WireArgumentTests, DISABLED_NullptrInArray) {
-        WGPUBindGroupLayout nullBGL = nullptr;
+// Test passing nullptr instead of objects - array of objects version
+TEST_F(WireArgumentTests, DISABLED_NullptrInArray) {
+    WGPUBindGroupLayout nullBGL = nullptr;
 
-        WGPUPipelineLayoutDescriptor descriptor = {};
-        descriptor.bindGroupLayoutCount = 1;
-        descriptor.bindGroupLayouts = &nullBGL;
+    WGPUPipelineLayoutDescriptor descriptor = {};
+    descriptor.bindGroupLayoutCount = 1;
+    descriptor.bindGroupLayouts = &nullBGL;
 
-        wgpuDeviceCreatePipelineLayout(device, &descriptor);
-        EXPECT_CALL(
-            api, DeviceCreatePipelineLayout(
-                     apiDevice, MatchesLambda([](const WGPUPipelineLayoutDescriptor* desc) -> bool {
-                         return desc->nextInChain == nullptr && desc->bindGroupLayoutCount == 1 &&
-                                desc->bindGroupLayouts[0] == nullptr;
-                     })))
-            .WillOnce(Return(nullptr));
+    wgpuDeviceCreatePipelineLayout(device, &descriptor);
+    EXPECT_CALL(api,
+                DeviceCreatePipelineLayout(
+                    apiDevice, MatchesLambda([](const WGPUPipelineLayoutDescriptor* desc) -> bool {
+                        return desc->nextInChain == nullptr && desc->bindGroupLayoutCount == 1 &&
+                               desc->bindGroupLayouts[0] == nullptr;
+                    })))
+        .WillOnce(Return(nullptr));
 
-        FlushClient();
-    }
+    FlushClient();
+}
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireBasicTests.cpp b/src/dawn/tests/unittests/wire/WireBasicTests.cpp
index 9a05acf..34a9c7f 100644
--- a/src/dawn/tests/unittests/wire/WireBasicTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireBasicTests.cpp
@@ -16,69 +16,67 @@
 
 namespace dawn::wire {
 
-    using testing::Return;
+using testing::Return;
 
-    class WireBasicTests : public WireTest {
-      public:
-        WireBasicTests() {
-        }
-        ~WireBasicTests() override = default;
-    };
+class WireBasicTests : public WireTest {
+  public:
+    WireBasicTests() {}
+    ~WireBasicTests() override = default;
+};
 
-    // One call gets forwarded correctly.
-    TEST_F(WireBasicTests, CallForwarded) {
-        wgpuDeviceCreateCommandEncoder(device, nullptr);
+// One call gets forwarded correctly.
+TEST_F(WireBasicTests, CallForwarded) {
+    wgpuDeviceCreateCommandEncoder(device, nullptr);
 
-        WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
-        EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
-            .WillOnce(Return(apiCmdBufEncoder));
+    WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
+        .WillOnce(Return(apiCmdBufEncoder));
 
-        FlushClient();
-    }
+    FlushClient();
+}
 
-    // Test that calling methods on a new object works as expected.
-    TEST_F(WireBasicTests, CreateThenCall) {
-        WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
-        wgpuCommandEncoderFinish(encoder, nullptr);
+// Test that calling methods on a new object works as expected.
+TEST_F(WireBasicTests, CreateThenCall) {
+    WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+    wgpuCommandEncoderFinish(encoder, nullptr);
 
-        WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
-        EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
-            .WillOnce(Return(apiCmdBufEncoder));
+    WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
+        .WillOnce(Return(apiCmdBufEncoder));
 
-        WGPUCommandBuffer apiCmdBuf = api.GetNewCommandBuffer();
-        EXPECT_CALL(api, CommandEncoderFinish(apiCmdBufEncoder, nullptr))
-            .WillOnce(Return(apiCmdBuf));
+    WGPUCommandBuffer apiCmdBuf = api.GetNewCommandBuffer();
+    EXPECT_CALL(api, CommandEncoderFinish(apiCmdBufEncoder, nullptr)).WillOnce(Return(apiCmdBuf));
 
-        FlushClient();
-    }
+    FlushClient();
+}
 
-    // Test that client reference/release do not call the backend API.
-    TEST_F(WireBasicTests, RefCountKeptInClient) {
-        WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+// Test that client reference/release do not call the backend API.
+TEST_F(WireBasicTests, RefCountKeptInClient) {
+    WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
 
-        wgpuCommandEncoderReference(encoder);
-        wgpuCommandEncoderRelease(encoder);
+    wgpuCommandEncoderReference(encoder);
+    wgpuCommandEncoderRelease(encoder);
 
-        WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
-        EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
-            .WillOnce(Return(apiCmdBufEncoder));
+    WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
+        .WillOnce(Return(apiCmdBufEncoder));
 
-        FlushClient();
-    }
+    FlushClient();
+}
 
-    // Test that client reference/release do not call the backend API.
-    TEST_F(WireBasicTests, ReleaseCalledOnRefCount0) {
-        WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+// Test that client reference/release do not call the backend API.
+TEST_F(WireBasicTests, ReleaseCalledOnRefCount0) {
+    WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
 
-        wgpuCommandEncoderRelease(encoder);
+    wgpuCommandEncoderRelease(encoder);
 
-        WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
-        EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
-            .WillOnce(Return(apiCmdBufEncoder));
+    WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
+        .WillOnce(Return(apiCmdBufEncoder));
 
-        EXPECT_CALL(api, CommandEncoderRelease(apiCmdBufEncoder));
+    EXPECT_CALL(api, CommandEncoderRelease(apiCmdBufEncoder));
 
-        FlushClient();
-    }
+    FlushClient();
+}
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireBufferMappingTests.cpp b/src/dawn/tests/unittests/wire/WireBufferMappingTests.cpp
index c4fb948..e49ee7d 100644
--- a/src/dawn/tests/unittests/wire/WireBufferMappingTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireBufferMappingTests.cpp
@@ -20,840 +20,801 @@
 
 namespace dawn::wire {
 
-    using testing::_;
-    using testing::InvokeWithoutArgs;
-    using testing::Mock;
-    using testing::Return;
-    using testing::StrictMock;
+using testing::_;
+using testing::InvokeWithoutArgs;
+using testing::Mock;
+using testing::Return;
+using testing::StrictMock;
 
-    namespace {
+namespace {
 
-        // Mock class to add expectations on the wire calling callbacks
-        class MockBufferMapCallback {
-          public:
-            MOCK_METHOD(void, Call, (WGPUBufferMapAsyncStatus status, void* userdata));
-        };
+// Mock class to add expectations on the wire calling callbacks
+class MockBufferMapCallback {
+  public:
+    MOCK_METHOD(void, Call, (WGPUBufferMapAsyncStatus status, void* userdata));
+};
 
-        std::unique_ptr<StrictMock<MockBufferMapCallback>> mockBufferMapCallback;
-        void ToMockBufferMapCallback(WGPUBufferMapAsyncStatus status, void* userdata) {
-            mockBufferMapCallback->Call(status, userdata);
-        }
+std::unique_ptr<StrictMock<MockBufferMapCallback>> mockBufferMapCallback;
+void ToMockBufferMapCallback(WGPUBufferMapAsyncStatus status, void* userdata) {
+    mockBufferMapCallback->Call(status, userdata);
+}
 
-    }  // anonymous namespace
+}  // anonymous namespace
 
-    class WireBufferMappingTests : public WireTest {
-      public:
-        WireBufferMappingTests() {
-        }
-        ~WireBufferMappingTests() override = default;
+class WireBufferMappingTests : public WireTest {
+  public:
+    WireBufferMappingTests() {}
+    ~WireBufferMappingTests() override = default;
 
-        void SetUp() override {
-            WireTest::SetUp();
+    void SetUp() override {
+        WireTest::SetUp();
 
-            mockBufferMapCallback = std::make_unique<StrictMock<MockBufferMapCallback>>();
-            apiBuffer = api.GetNewBuffer();
-        }
-
-        void TearDown() override {
-            WireTest::TearDown();
-
-            // Delete mock so that expectations are checked
-            mockBufferMapCallback = nullptr;
-        }
-
-        void FlushClient() {
-            WireTest::FlushClient();
-            Mock::VerifyAndClearExpectations(&mockBufferMapCallback);
-        }
-
-        void FlushServer() {
-            WireTest::FlushServer();
-            Mock::VerifyAndClearExpectations(&mockBufferMapCallback);
-        }
-
-        void SetupBuffer(WGPUBufferUsageFlags usage) {
-            WGPUBufferDescriptor descriptor = {};
-            descriptor.size = kBufferSize;
-            descriptor.usage = usage;
-
-            buffer = wgpuDeviceCreateBuffer(device, &descriptor);
-
-            EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _))
-                .WillOnce(Return(apiBuffer))
-                .RetiresOnSaturation();
-            FlushClient();
-        }
-
-      protected:
-        static constexpr uint64_t kBufferSize = sizeof(uint32_t);
-        // A successfully created buffer
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-    };
-
-    // Tests specific to mapping for reading
-    class WireBufferMappingReadTests : public WireBufferMappingTests {
-      public:
-        WireBufferMappingReadTests() {
-        }
-        ~WireBufferMappingReadTests() override = default;
-
-        void SetUp() override {
-            WireBufferMappingTests::SetUp();
-
-            SetupBuffer(WGPUBufferUsage_MapRead);
-        }
-    };
-
-    // Check mapping for reading a succesfully created buffer
-    TEST_F(WireBufferMappingReadTests, MappingForReadSuccessBuffer) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        uint32_t bufferContent = 31337;
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&bufferContent));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
-
-        FlushServer();
-
-        EXPECT_EQ(bufferContent, *static_cast<const uint32_t*>(
-                                     wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)));
-
-        wgpuBufferUnmap(buffer);
-        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-
-        FlushClient();
+        mockBufferMapCallback = std::make_unique<StrictMock<MockBufferMapCallback>>();
+        apiBuffer = api.GetNewBuffer();
     }
 
-    // Check that things work correctly when a validation error happens when mapping the buffer for
-    // reading
-    TEST_F(WireBufferMappingReadTests, ErrorWhileMappingForRead) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
+    void TearDown() override {
+        WireTest::TearDown();
 
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error);
-            }));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
-
-        FlushServer();
-
-        EXPECT_EQ(nullptr, wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize));
+        // Delete mock so that expectations are checked
+        mockBufferMapCallback = nullptr;
     }
 
-    // Check that the map read callback is called with UNKNOWN when the buffer is destroyed before
-    // the request is finished
-    TEST_F(WireBufferMappingReadTests, DestroyBeforeReadRequestEnd) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        // Return success
-        uint32_t bufferContent = 0;
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&bufferContent));
-
-        // Destroy before the client gets the success, so the callback is called with
-        // DestroyedBeforeCallback.
-        EXPECT_CALL(*mockBufferMapCallback,
-                    Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, _))
-            .Times(1);
-        wgpuBufferRelease(buffer);
-        EXPECT_CALL(api, BufferRelease(apiBuffer));
-
-        FlushClient();
-        FlushServer();
+    void FlushClient() {
+        WireTest::FlushClient();
+        Mock::VerifyAndClearExpectations(&mockBufferMapCallback);
     }
 
-    // Check the map read callback is called with "UnmappedBeforeCallback" when the map request
-    // would have worked, but Unmap was called
-    TEST_F(WireBufferMappingReadTests, UnmapCalledTooEarlyForRead) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        uint32_t bufferContent = 31337;
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&bufferContent));
-
-        // Oh no! We are calling Unmap too early! However the callback gets fired only after we get
-        // an answer from the server.
-        wgpuBufferUnmap(buffer);
-        EXPECT_CALL(api, BufferUnmap(apiBuffer));
-
-        FlushClient();
-
-        // The callback shouldn't get called with success, even when the request succeeded on the
-        // server side
-        EXPECT_CALL(*mockBufferMapCallback,
-                    Call(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback, _))
-            .Times(1);
-
-        FlushServer();
+    void FlushServer() {
+        WireTest::FlushServer();
+        Mock::VerifyAndClearExpectations(&mockBufferMapCallback);
     }
 
-    // Check that even if Unmap() was called early client-side, we correctly surface server-side
-    // validation errors.
-    TEST_F(WireBufferMappingReadTests, UnmapCalledTooEarlyForReadButServerSideError) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error);
-            }));
-
-        // Oh no! We are calling Unmap too early! However the callback gets fired only after we get
-        // an answer from the server that the mapAsync call was an error.
-        wgpuBufferUnmap(buffer);
-        EXPECT_CALL(api, BufferUnmap(apiBuffer));
-
-        FlushClient();
-
-        // The callback should be called with the server-side error and not the
-        // UnmappedBeforeCallback.
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
-
-        FlushServer();
-    }
-
-    // Check the map read callback is called with "DestroyedBeforeCallback" when the map request
-    // would have worked, but Destroy was called
-    TEST_F(WireBufferMappingReadTests, DestroyCalledTooEarlyForRead) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        uint32_t bufferContent = 31337;
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&bufferContent));
-
-        // Oh no! We are calling Unmap too early! However the callback gets fired only after we get
-        // an answer from the server.
-        wgpuBufferDestroy(buffer);
-        EXPECT_CALL(api, BufferDestroy(apiBuffer));
-
-        FlushClient();
-
-        // The callback shouldn't get called with success, even when the request succeeded on the
-        // server side
-        EXPECT_CALL(*mockBufferMapCallback,
-                    Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, _))
-            .Times(1);
-
-        FlushServer();
-    }
-
-    // Check that even if Destroy() was called early client-side, we correctly surface server-side
-    // validation errors.
-    TEST_F(WireBufferMappingReadTests, DestroyCalledTooEarlyForReadButServerSideError) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error);
-            }));
-
-        // Oh no! We are calling Destroy too early! However the callback gets fired only after we
-        // get an answer from the server that the mapAsync call was an error.
-        wgpuBufferDestroy(buffer);
-        EXPECT_CALL(api, BufferDestroy(apiBuffer));
-
-        FlushClient();
-
-        // The callback should be called with the server-side error and not the
-        // DestroyedBeforCallback..
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
-
-        FlushServer();
-    }
-
-    // Check that an error map read while a buffer is already mapped won't changed the result of get
-    // mapped range
-    TEST_F(WireBufferMappingReadTests, MappingForReadingErrorWhileAlreadyMappedUnchangeMapData) {
-        // Successful map
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        uint32_t bufferContent = 31337;
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&bufferContent));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
-
-        FlushServer();
-
-        // Map failure while the buffer is already mapped
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error);
-            }));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
-
-        FlushServer();
-
-        EXPECT_EQ(bufferContent, *static_cast<const uint32_t*>(
-                                     wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)));
-    }
-
-    // Test that the MapReadCallback isn't fired twice when unmap() is called inside the callback
-    TEST_F(WireBufferMappingReadTests, UnmapInsideMapReadCallback) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        uint32_t bufferContent = 31337;
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&bufferContent));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
-            .WillOnce(InvokeWithoutArgs([&]() { wgpuBufferUnmap(buffer); }));
-
-        FlushServer();
-
-        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-
-        FlushClient();
-    }
-
-    // Test that the MapReadCallback isn't fired twice the buffer external refcount reaches 0 in the
-    // callback
-    TEST_F(WireBufferMappingReadTests, DestroyInsideMapReadCallback) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        uint32_t bufferContent = 31337;
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&bufferContent));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
-            .WillOnce(InvokeWithoutArgs([&]() { wgpuBufferRelease(buffer); }));
-
-        FlushServer();
-
-        EXPECT_CALL(api, BufferRelease(apiBuffer));
-
-        FlushClient();
-    }
-
-    // Tests specific to mapping for writing
-    class WireBufferMappingWriteTests : public WireBufferMappingTests {
-      public:
-        WireBufferMappingWriteTests() {
-        }
-        ~WireBufferMappingWriteTests() override = default;
-
-        void SetUp() override {
-            WireBufferMappingTests::SetUp();
-
-            SetupBuffer(WGPUBufferUsage_MapWrite);
-        }
-    };
-
-    // Check mapping for writing a succesfully created buffer
-    TEST_F(WireBufferMappingWriteTests, MappingForWriteSuccessBuffer) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        uint32_t serverBufferContent = 31337;
-        uint32_t updatedContent = 4242;
-
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&serverBufferContent));
-
-        FlushClient();
-
-        // The map write callback always gets a buffer full of zeroes.
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
-
-        FlushServer();
-
-        uint32_t* lastMapWritePointer =
-            static_cast<uint32_t*>(wgpuBufferGetMappedRange(buffer, 0, kBufferSize));
-        ASSERT_EQ(0u, *lastMapWritePointer);
-
-        // Write something to the mapped pointer
-        *lastMapWritePointer = updatedContent;
-
-        wgpuBufferUnmap(buffer);
-        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-
-        FlushClient();
-
-        // After the buffer is unmapped, the content of the buffer is updated on the server
-        ASSERT_EQ(serverBufferContent, updatedContent);
-    }
-
-    // Check that things work correctly when a validation error happens when mapping the buffer for
-    // writing
-    TEST_F(WireBufferMappingWriteTests, ErrorWhileMappingForWrite) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error);
-            }));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
-
-        FlushServer();
-
-        EXPECT_EQ(nullptr, wgpuBufferGetMappedRange(buffer, 0, kBufferSize));
-    }
-
-    // Check that the map write callback is called with "DestroyedBeforeCallback" when the buffer is
-    // destroyed before the request is finished
-    TEST_F(WireBufferMappingWriteTests, DestroyBeforeWriteRequestEnd) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        // Return success
-        uint32_t bufferContent = 31337;
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&bufferContent));
-
-        // Destroy before the client gets the success, so the callback is called with
-        // DestroyedBeforeCallback.
-        EXPECT_CALL(*mockBufferMapCallback,
-                    Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, _))
-            .Times(1);
-        wgpuBufferRelease(buffer);
-        EXPECT_CALL(api, BufferRelease(apiBuffer));
-
-        FlushClient();
-        FlushServer();
-    }
-
-    // Check the map write callback is called with "UnmappedBeforeCallback" when the map request
-    // would have worked, but Unmap was called
-    TEST_F(WireBufferMappingWriteTests, UnmapCalledTooEarlyForWrite) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        uint32_t bufferContent = 31337;
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&bufferContent));
-
-        FlushClient();
-
-        // Oh no! We are calling Unmap too early!
-        EXPECT_CALL(*mockBufferMapCallback,
-                    Call(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback, _))
-            .Times(1);
-        wgpuBufferUnmap(buffer);
-
-        // The callback shouldn't get called, even when the request succeeded on the server side
-        FlushServer();
-    }
-
-    // Check that an error map write while a buffer is already mapped
-    TEST_F(WireBufferMappingWriteTests, MappingForWritingErrorWhileAlreadyMapped) {
-        // Successful map
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        uint32_t bufferContent = 31337;
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&bufferContent));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
-
-        FlushServer();
-
-        // Map failure while the buffer is already mapped
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error);
-            }));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
-
-        FlushServer();
-
-        EXPECT_NE(nullptr, static_cast<const uint32_t*>(
-                               wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)));
-    }
-
-    // Test that the MapWriteCallback isn't fired twice when unmap() is called inside the callback
-    TEST_F(WireBufferMappingWriteTests, UnmapInsideMapWriteCallback) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        uint32_t bufferContent = 31337;
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&bufferContent));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
-            .WillOnce(InvokeWithoutArgs([&]() { wgpuBufferUnmap(buffer); }));
-
-        FlushServer();
-
-        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-
-        FlushClient();
-    }
-
-    // Test that the MapWriteCallback isn't fired twice the buffer external refcount reaches 0 in
-    // the callback
-    TEST_F(WireBufferMappingWriteTests, DestroyInsideMapWriteCallback) {
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        uint32_t bufferContent = 31337;
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&bufferContent));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
-            .WillOnce(InvokeWithoutArgs([&]() { wgpuBufferRelease(buffer); }));
-
-        FlushServer();
-
-        EXPECT_CALL(api, BufferRelease(apiBuffer));
-
-        FlushClient();
-    }
-
-    // Test successful buffer creation with mappedAtCreation=true
-    TEST_F(WireBufferMappingTests, MappedAtCreationSuccess) {
+    void SetupBuffer(WGPUBufferUsageFlags usage) {
         WGPUBufferDescriptor descriptor = {};
-        descriptor.size = 4;
+        descriptor.size = kBufferSize;
+        descriptor.usage = usage;
+
+        buffer = wgpuDeviceCreateBuffer(device, &descriptor);
+
+        EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _))
+            .WillOnce(Return(apiBuffer))
+            .RetiresOnSaturation();
+        FlushClient();
+    }
+
+  protected:
+    static constexpr uint64_t kBufferSize = sizeof(uint32_t);
+    // A successfully created buffer
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+};
+
+// Tests specific to mapping for reading
+class WireBufferMappingReadTests : public WireBufferMappingTests {
+  public:
+    WireBufferMappingReadTests() {}
+    ~WireBufferMappingReadTests() override = default;
+
+    void SetUp() override {
+        WireBufferMappingTests::SetUp();
+
+        SetupBuffer(WGPUBufferUsage_MapRead);
+    }
+};
+
+// Check mapping for reading a succesfully created buffer
+TEST_F(WireBufferMappingReadTests, MappingForReadSuccessBuffer) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    FlushServer();
+
+    EXPECT_EQ(bufferContent,
+              *static_cast<const uint32_t*>(wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)));
+
+    wgpuBufferUnmap(buffer);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+}
+
+// Check that things work correctly when a validation error happens when mapping the buffer for
+// reading
+TEST_F(WireBufferMappingReadTests, ErrorWhileMappingForRead) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs(
+            [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
+
+    FlushServer();
+
+    EXPECT_EQ(nullptr, wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize));
+}
+
+// Check that the map read callback is called with UNKNOWN when the buffer is destroyed before
+// the request is finished
+TEST_F(WireBufferMappingReadTests, DestroyBeforeReadRequestEnd) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // Return success
+    uint32_t bufferContent = 0;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    // Destroy before the client gets the success, so the callback is called with
+    // DestroyedBeforeCallback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, _))
+        .Times(1);
+    wgpuBufferRelease(buffer);
+    EXPECT_CALL(api, BufferRelease(apiBuffer));
+
+    FlushClient();
+    FlushServer();
+}
+
+// Check the map read callback is called with "UnmappedBeforeCallback" when the map request
+// would have worked, but Unmap was called
+TEST_F(WireBufferMappingReadTests, UnmapCalledTooEarlyForRead) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    // Oh no! We are calling Unmap too early! However the callback gets fired only after we get
+    // an answer from the server.
+    wgpuBufferUnmap(buffer);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer));
+
+    FlushClient();
+
+    // The callback shouldn't get called with success, even when the request succeeded on the
+    // server side
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback, _))
+        .Times(1);
+
+    FlushServer();
+}
+
+// Check that even if Unmap() was called early client-side, we correctly surface server-side
+// validation errors.
+TEST_F(WireBufferMappingReadTests, UnmapCalledTooEarlyForReadButServerSideError) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs(
+            [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
+
+    // Oh no! We are calling Unmap too early! However the callback gets fired only after we get
+    // an answer from the server that the mapAsync call was an error.
+    wgpuBufferUnmap(buffer);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer));
+
+    FlushClient();
+
+    // The callback should be called with the server-side error and not the
+    // UnmappedBeforeCallback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
+
+    FlushServer();
+}
+
+// Check the map read callback is called with "DestroyedBeforeCallback" when the map request
+// would have worked, but Destroy was called
+TEST_F(WireBufferMappingReadTests, DestroyCalledTooEarlyForRead) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    // Oh no! We are calling Unmap too early! However the callback gets fired only after we get
+    // an answer from the server.
+    wgpuBufferDestroy(buffer);
+    EXPECT_CALL(api, BufferDestroy(apiBuffer));
+
+    FlushClient();
+
+    // The callback shouldn't get called with success, even when the request succeeded on the
+    // server side
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, _))
+        .Times(1);
+
+    FlushServer();
+}
+
+// Check that even if Destroy() was called early client-side, we correctly surface server-side
+// validation errors.
+TEST_F(WireBufferMappingReadTests, DestroyCalledTooEarlyForReadButServerSideError) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs(
+            [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
+
+    // Oh no! We are calling Destroy too early! However the callback gets fired only after we
+    // get an answer from the server that the mapAsync call was an error.
+    wgpuBufferDestroy(buffer);
+    EXPECT_CALL(api, BufferDestroy(apiBuffer));
+
+    FlushClient();
+
+    // The callback should be called with the server-side error and not the
+    // DestroyedBeforCallback..
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
+
+    FlushServer();
+}
+
+// Check that an error map read while a buffer is already mapped won't changed the result of get
+// mapped range
+TEST_F(WireBufferMappingReadTests, MappingForReadingErrorWhileAlreadyMappedUnchangeMapData) {
+    // Successful map
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    FlushServer();
+
+    // Map failure while the buffer is already mapped
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs(
+            [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
+
+    FlushServer();
+
+    EXPECT_EQ(bufferContent,
+              *static_cast<const uint32_t*>(wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)));
+}
+
+// Test that the MapReadCallback isn't fired twice when unmap() is called inside the callback
+TEST_F(WireBufferMappingReadTests, UnmapInsideMapReadCallback) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
+        .WillOnce(InvokeWithoutArgs([&]() { wgpuBufferUnmap(buffer); }));
+
+    FlushServer();
+
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+}
+
+// Test that the MapReadCallback isn't fired twice the buffer external refcount reaches 0 in the
+// callback
+TEST_F(WireBufferMappingReadTests, DestroyInsideMapReadCallback) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
+        .WillOnce(InvokeWithoutArgs([&]() { wgpuBufferRelease(buffer); }));
+
+    FlushServer();
+
+    EXPECT_CALL(api, BufferRelease(apiBuffer));
+
+    FlushClient();
+}
+
+// Tests specific to mapping for writing
+class WireBufferMappingWriteTests : public WireBufferMappingTests {
+  public:
+    WireBufferMappingWriteTests() {}
+    ~WireBufferMappingWriteTests() override = default;
+
+    void SetUp() override {
+        WireBufferMappingTests::SetUp();
+
+        SetupBuffer(WGPUBufferUsage_MapWrite);
+    }
+};
+
+// Check mapping for writing a succesfully created buffer
+TEST_F(WireBufferMappingWriteTests, MappingForWriteSuccessBuffer) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t serverBufferContent = 31337;
+    uint32_t updatedContent = 4242;
+
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&serverBufferContent));
+
+    FlushClient();
+
+    // The map write callback always gets a buffer full of zeroes.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    FlushServer();
+
+    uint32_t* lastMapWritePointer =
+        static_cast<uint32_t*>(wgpuBufferGetMappedRange(buffer, 0, kBufferSize));
+    ASSERT_EQ(0u, *lastMapWritePointer);
+
+    // Write something to the mapped pointer
+    *lastMapWritePointer = updatedContent;
+
+    wgpuBufferUnmap(buffer);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+
+    // After the buffer is unmapped, the content of the buffer is updated on the server
+    ASSERT_EQ(serverBufferContent, updatedContent);
+}
+
+// Check that things work correctly when a validation error happens when mapping the buffer for
+// writing
+TEST_F(WireBufferMappingWriteTests, ErrorWhileMappingForWrite) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs(
+            [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
+
+    FlushServer();
+
+    EXPECT_EQ(nullptr, wgpuBufferGetMappedRange(buffer, 0, kBufferSize));
+}
+
+// Check that the map write callback is called with "DestroyedBeforeCallback" when the buffer is
+// destroyed before the request is finished
+TEST_F(WireBufferMappingWriteTests, DestroyBeforeWriteRequestEnd) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // Return success
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    // Destroy before the client gets the success, so the callback is called with
+    // DestroyedBeforeCallback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, _))
+        .Times(1);
+    wgpuBufferRelease(buffer);
+    EXPECT_CALL(api, BufferRelease(apiBuffer));
+
+    FlushClient();
+    FlushServer();
+}
+
+// Check the map write callback is called with "UnmappedBeforeCallback" when the map request
+// would have worked, but Unmap was called
+TEST_F(WireBufferMappingWriteTests, UnmapCalledTooEarlyForWrite) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    FlushClient();
+
+    // Oh no! We are calling Unmap too early!
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_UnmappedBeforeCallback, _))
+        .Times(1);
+    wgpuBufferUnmap(buffer);
+
+    // The callback shouldn't get called, even when the request succeeded on the server side
+    FlushServer();
+}
+
+// Check that an error map write while a buffer is already mapped
+TEST_F(WireBufferMappingWriteTests, MappingForWritingErrorWhileAlreadyMapped) {
+    // Successful map
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    FlushServer();
+
+    // Map failure while the buffer is already mapped
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs(
+            [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
+
+    FlushServer();
+
+    EXPECT_NE(nullptr,
+              static_cast<const uint32_t*>(wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)));
+}
+
+// Test that the MapWriteCallback isn't fired twice when unmap() is called inside the callback
+TEST_F(WireBufferMappingWriteTests, UnmapInsideMapWriteCallback) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
+        .WillOnce(InvokeWithoutArgs([&]() { wgpuBufferUnmap(buffer); }));
+
+    FlushServer();
+
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+}
+
+// Test that the MapWriteCallback isn't fired twice the buffer external refcount reaches 0 in
+// the callback
+TEST_F(WireBufferMappingWriteTests, DestroyInsideMapWriteCallback) {
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    uint32_t bufferContent = 31337;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&bufferContent));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _))
+        .WillOnce(InvokeWithoutArgs([&]() { wgpuBufferRelease(buffer); }));
+
+    FlushServer();
+
+    EXPECT_CALL(api, BufferRelease(apiBuffer));
+
+    FlushClient();
+}
+
+// Test successful buffer creation with mappedAtCreation=true
+TEST_F(WireBufferMappingTests, MappedAtCreationSuccess) {
+    WGPUBufferDescriptor descriptor = {};
+    descriptor.size = 4;
+    descriptor.mappedAtCreation = true;
+
+    WGPUBuffer apiBuffer = api.GetNewBuffer();
+    uint32_t apiBufferData = 1234;
+
+    WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
+
+    EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, 4)).WillOnce(Return(&apiBufferData));
+
+    FlushClient();
+
+    wgpuBufferUnmap(buffer);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+}
+
+// Test that releasing a buffer mapped at creation does not call Unmap
+TEST_F(WireBufferMappingTests, MappedAtCreationReleaseBeforeUnmap) {
+    WGPUBufferDescriptor descriptor = {};
+    descriptor.size = 4;
+    descriptor.mappedAtCreation = true;
+
+    WGPUBuffer apiBuffer = api.GetNewBuffer();
+    uint32_t apiBufferData = 1234;
+
+    WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
+
+    EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, 4)).WillOnce(Return(&apiBufferData));
+
+    FlushClient();
+
+    wgpuBufferRelease(buffer);
+    EXPECT_CALL(api, BufferRelease(apiBuffer)).Times(1);
+
+    FlushClient();
+}
+
+// Test that it is valid to map a buffer after it is mapped at creation and unmapped
+TEST_F(WireBufferMappingTests, MappedAtCreationThenMapSuccess) {
+    WGPUBufferDescriptor descriptor = {};
+    descriptor.size = 4;
+    descriptor.usage = WGPUMapMode_Write;
+    descriptor.mappedAtCreation = true;
+
+    WGPUBuffer apiBuffer = api.GetNewBuffer();
+    uint32_t apiBufferData = 1234;
+
+    WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
+
+    EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, 4)).WillOnce(Return(&apiBufferData));
+
+    FlushClient();
+
+    wgpuBufferUnmap(buffer);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&apiBufferData));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    FlushServer();
+}
+
+// Test that it is invalid to map a buffer after mappedAtCreation but before Unmap
+TEST_F(WireBufferMappingTests, MappedAtCreationThenMapFailure) {
+    WGPUBufferDescriptor descriptor = {};
+    descriptor.size = 4;
+    descriptor.mappedAtCreation = true;
+
+    WGPUBuffer apiBuffer = api.GetNewBuffer();
+    uint32_t apiBufferData = 1234;
+
+    WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
+
+    EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, 4)).WillOnce(Return(&apiBufferData));
+
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs(
+            [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
+
+    FlushServer();
+
+    EXPECT_NE(nullptr,
+              static_cast<const uint32_t*>(wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)));
+
+    wgpuBufferUnmap(buffer);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+}
+
+// Check that trying to create a buffer of size MAX_SIZE_T is an error handling in the client
+// and never gets to the server-side.
+TEST_F(WireBufferMappingTests, MaxSizeMappableBufferOOMDirectly) {
+    size_t kOOMSize = std::numeric_limits<size_t>::max();
+    WGPUBuffer apiBuffer = api.GetNewBuffer();
+
+    // Check for CreateBufferMapped.
+    {
+        WGPUBufferDescriptor descriptor = {};
+        descriptor.usage = WGPUBufferUsage_CopySrc;
+        descriptor.size = kOOMSize;
         descriptor.mappedAtCreation = true;
 
-        WGPUBuffer apiBuffer = api.GetNewBuffer();
-        uint32_t apiBufferData = 1234;
-
-        WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
-
-        EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, 4)).WillOnce(Return(&apiBufferData));
-
-        FlushClient();
-
-        wgpuBufferUnmap(buffer);
-        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-
+        wgpuDeviceCreateBuffer(device, &descriptor);
+        EXPECT_CALL(api, DeviceInjectError(apiDevice, WGPUErrorType_OutOfMemory, _));
+        EXPECT_CALL(api, DeviceCreateErrorBuffer(apiDevice)).WillOnce(Return(apiBuffer));
         FlushClient();
     }
 
-    // Test that releasing a buffer mapped at creation does not call Unmap
-    TEST_F(WireBufferMappingTests, MappedAtCreationReleaseBeforeUnmap) {
+    // Check for MapRead usage.
+    {
         WGPUBufferDescriptor descriptor = {};
-        descriptor.size = 4;
-        descriptor.mappedAtCreation = true;
+        descriptor.usage = WGPUBufferUsage_MapRead;
+        descriptor.size = kOOMSize;
 
-        WGPUBuffer apiBuffer = api.GetNewBuffer();
-        uint32_t apiBufferData = 1234;
-
-        WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
-
-        EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, 4)).WillOnce(Return(&apiBufferData));
-
-        FlushClient();
-
-        wgpuBufferRelease(buffer);
-        EXPECT_CALL(api, BufferRelease(apiBuffer)).Times(1);
-
+        wgpuDeviceCreateBuffer(device, &descriptor);
+        EXPECT_CALL(api, DeviceInjectError(apiDevice, WGPUErrorType_OutOfMemory, _));
+        EXPECT_CALL(api, DeviceCreateErrorBuffer(apiDevice)).WillOnce(Return(apiBuffer));
         FlushClient();
     }
 
-    // Test that it is valid to map a buffer after it is mapped at creation and unmapped
-    TEST_F(WireBufferMappingTests, MappedAtCreationThenMapSuccess) {
+    // Check for MapWrite usage.
+    {
         WGPUBufferDescriptor descriptor = {};
-        descriptor.size = 4;
-        descriptor.usage = WGPUMapMode_Write;
-        descriptor.mappedAtCreation = true;
+        descriptor.usage = WGPUBufferUsage_MapWrite;
+        descriptor.size = kOOMSize;
 
-        WGPUBuffer apiBuffer = api.GetNewBuffer();
-        uint32_t apiBufferData = 1234;
-
-        WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
-
-        EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, 4)).WillOnce(Return(&apiBufferData));
-
-        FlushClient();
-
-        wgpuBufferUnmap(buffer);
-        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-
-        FlushClient();
-
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&apiBufferData));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
-
-        FlushServer();
-    }
-
-    // Test that it is invalid to map a buffer after mappedAtCreation but before Unmap
-    TEST_F(WireBufferMappingTests, MappedAtCreationThenMapFailure) {
-        WGPUBufferDescriptor descriptor = {};
-        descriptor.size = 4;
-        descriptor.mappedAtCreation = true;
-
-        WGPUBuffer apiBuffer = api.GetNewBuffer();
-        uint32_t apiBufferData = 1234;
-
-        WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
-
-        EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, 4)).WillOnce(Return(&apiBufferData));
-
-        FlushClient();
-
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error);
-            }));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
-
-        FlushServer();
-
-        EXPECT_NE(nullptr, static_cast<const uint32_t*>(
-                               wgpuBufferGetConstMappedRange(buffer, 0, kBufferSize)));
-
-        wgpuBufferUnmap(buffer);
-        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-
+        wgpuDeviceCreateBuffer(device, &descriptor);
+        EXPECT_CALL(api, DeviceInjectError(apiDevice, WGPUErrorType_OutOfMemory, _));
+        EXPECT_CALL(api, DeviceCreateErrorBuffer(apiDevice)).WillOnce(Return(apiBuffer));
         FlushClient();
     }
+}
 
-    // Check that trying to create a buffer of size MAX_SIZE_T is an error handling in the client
-    // and never gets to the server-side.
-    TEST_F(WireBufferMappingTests, MaxSizeMappableBufferOOMDirectly) {
-        size_t kOOMSize = std::numeric_limits<size_t>::max();
-        WGPUBuffer apiBuffer = api.GetNewBuffer();
+// Test that registering a callback then wire disconnect calls the callback with
+// DeviceLost.
+TEST_F(WireBufferMappingTests, MapThenDisconnect) {
+    SetupBuffer(WGPUMapMode_Write);
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, this);
 
-        // Check for CreateBufferMapped.
-        {
-            WGPUBufferDescriptor descriptor = {};
-            descriptor.usage = WGPUBufferUsage_CopySrc;
-            descriptor.size = kOOMSize;
-            descriptor.mappedAtCreation = true;
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)).Times(1);
 
-            wgpuDeviceCreateBuffer(device, &descriptor);
-            EXPECT_CALL(api, DeviceInjectError(apiDevice, WGPUErrorType_OutOfMemory, _));
-            EXPECT_CALL(api, DeviceCreateErrorBuffer(apiDevice)).WillOnce(Return(apiBuffer));
-            FlushClient();
-        }
+    FlushClient();
 
-        // Check for MapRead usage.
-        {
-            WGPUBufferDescriptor descriptor = {};
-            descriptor.usage = WGPUBufferUsage_MapRead;
-            descriptor.size = kOOMSize;
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, this)).Times(1);
+    GetWireClient()->Disconnect();
+}
 
-            wgpuDeviceCreateBuffer(device, &descriptor);
-            EXPECT_CALL(api, DeviceInjectError(apiDevice, WGPUErrorType_OutOfMemory, _));
-            EXPECT_CALL(api, DeviceCreateErrorBuffer(apiDevice)).WillOnce(Return(apiBuffer));
-            FlushClient();
-        }
+// Test that registering a callback after wire disconnect calls the callback with
+// DeviceLost.
+TEST_F(WireBufferMappingTests, MapAfterDisconnect) {
+    SetupBuffer(WGPUMapMode_Read);
 
-        // Check for MapWrite usage.
-        {
-            WGPUBufferDescriptor descriptor = {};
-            descriptor.usage = WGPUBufferUsage_MapWrite;
-            descriptor.size = kOOMSize;
+    GetWireClient()->Disconnect();
 
-            wgpuDeviceCreateBuffer(device, &descriptor);
-            EXPECT_CALL(api, DeviceInjectError(apiDevice, WGPUErrorType_OutOfMemory, _));
-            EXPECT_CALL(api, DeviceCreateErrorBuffer(apiDevice)).WillOnce(Return(apiBuffer));
-            FlushClient();
-        }
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, this)).Times(1);
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, this);
+}
+
+// Hack to pass in test context into user callback
+struct TestData {
+    WireBufferMappingTests* pTest;
+    WGPUBuffer* pTestBuffer;
+    size_t numRequests;
+};
+
+static void ToMockBufferMapCallbackWithNewRequests(WGPUBufferMapAsyncStatus status,
+                                                   void* userdata) {
+    TestData* testData = reinterpret_cast<TestData*>(userdata);
+    // Mimic the user callback is sending new requests
+    ASSERT_NE(testData, nullptr);
+    ASSERT_NE(testData->pTest, nullptr);
+    ASSERT_NE(testData->pTestBuffer, nullptr);
+
+    mockBufferMapCallback->Call(status, testData->pTest);
+
+    // Send the requests a number of times
+    for (size_t i = 0; i < testData->numRequests; i++) {
+        wgpuBufferMapAsync(*(testData->pTestBuffer), WGPUMapMode_Write, 0, sizeof(uint32_t),
+                           ToMockBufferMapCallback, testData->pTest);
     }
+}
 
-    // Test that registering a callback then wire disconnect calls the callback with
-    // DeviceLost.
-    TEST_F(WireBufferMappingTests, MapThenDisconnect) {
-        SetupBuffer(WGPUMapMode_Write);
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           this);
+// Test that requests inside user callbacks before disconnect are called
+TEST_F(WireBufferMappingTests, MapInsideCallbackBeforeDisconnect) {
+    SetupBuffer(WGPUMapMode_Write);
+    TestData testData = {this, &buffer, 10};
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize,
+                       ToMockBufferMapCallbackWithNewRequests, &testData);
 
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)).Times(1);
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)).Times(1);
 
-        FlushClient();
+    FlushClient();
 
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, this))
-            .Times(1);
-        GetWireClient()->Disconnect();
-    }
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, this))
+        .Times(1 + testData.numRequests);
+    GetWireClient()->Disconnect();
+}
 
-    // Test that registering a callback after wire disconnect calls the callback with
-    // DeviceLost.
-    TEST_F(WireBufferMappingTests, MapAfterDisconnect) {
-        SetupBuffer(WGPUMapMode_Read);
+// Test that requests inside user callbacks before object destruction are called
+TEST_F(WireBufferMappingWriteTests, MapInsideCallbackBeforeDestruction) {
+    TestData testData = {this, &buffer, 10};
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize,
+                       ToMockBufferMapCallbackWithNewRequests, &testData);
 
-        GetWireClient()->Disconnect();
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)).Times(1);
 
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, this))
-            .Times(1);
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, this);
-    }
+    FlushClient();
 
-    // Hack to pass in test context into user callback
-    struct TestData {
-        WireBufferMappingTests* pTest;
-        WGPUBuffer* pTestBuffer;
-        size_t numRequests;
-    };
-
-    static void ToMockBufferMapCallbackWithNewRequests(WGPUBufferMapAsyncStatus status,
-                                                       void* userdata) {
-        TestData* testData = reinterpret_cast<TestData*>(userdata);
-        // Mimic the user callback is sending new requests
-        ASSERT_NE(testData, nullptr);
-        ASSERT_NE(testData->pTest, nullptr);
-        ASSERT_NE(testData->pTestBuffer, nullptr);
-
-        mockBufferMapCallback->Call(status, testData->pTest);
-
-        // Send the requests a number of times
-        for (size_t i = 0; i < testData->numRequests; i++) {
-            wgpuBufferMapAsync(*(testData->pTestBuffer), WGPUMapMode_Write, 0, sizeof(uint32_t),
-                               ToMockBufferMapCallback, testData->pTest);
-        }
-    }
-
-    // Test that requests inside user callbacks before disconnect are called
-    TEST_F(WireBufferMappingTests, MapInsideCallbackBeforeDisconnect) {
-        SetupBuffer(WGPUMapMode_Write);
-        TestData testData = {this, &buffer, 10};
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize,
-                           ToMockBufferMapCallbackWithNewRequests, &testData);
-
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)).Times(1);
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, this))
-            .Times(1 + testData.numRequests);
-        GetWireClient()->Disconnect();
-    }
-
-    // Test that requests inside user callbacks before object destruction are called
-    TEST_F(WireBufferMappingWriteTests, MapInsideCallbackBeforeDestruction) {
-        TestData testData = {this, &buffer, 10};
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize,
-                           ToMockBufferMapCallbackWithNewRequests, &testData);
-
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize)).Times(1);
-
-        FlushClient();
-
-        EXPECT_CALL(*mockBufferMapCallback,
-                    Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, this))
-            .Times(1 + testData.numRequests);
-        wgpuBufferRelease(buffer);
-    }
+    EXPECT_CALL(*mockBufferMapCallback,
+                Call(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback, this))
+        .Times(1 + testData.numRequests);
+    wgpuBufferRelease(buffer);
+}
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireCreatePipelineAsyncTests.cpp b/src/dawn/tests/unittests/wire/WireCreatePipelineAsyncTests.cpp
index e031890..4d7f7cd 100644
--- a/src/dawn/tests/unittests/wire/WireCreatePipelineAsyncTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireCreatePipelineAsyncTests.cpp
@@ -18,368 +18,368 @@
 #include "dawn/wire/WireClient.h"
 
 namespace dawn::wire {
-    namespace {
+namespace {
 
-        using testing::_;
-        using testing::InvokeWithoutArgs;
-        using testing::Mock;
-        using testing::Return;
-        using testing::Sequence;
-        using testing::StrEq;
-        using testing::StrictMock;
+using testing::_;
+using testing::InvokeWithoutArgs;
+using testing::Mock;
+using testing::Return;
+using testing::Sequence;
+using testing::StrEq;
+using testing::StrictMock;
 
-        // Mock class to add expectations on the wire calling callbacks
-        class MockCreateComputePipelineAsyncCallback {
-          public:
-            MOCK_METHOD(void,
-                        Call,
-                        (WGPUCreatePipelineAsyncStatus status,
-                         WGPUComputePipeline pipeline,
-                         const char* message,
-                         void* userdata));
-        };
+// Mock class to add expectations on the wire calling callbacks
+class MockCreateComputePipelineAsyncCallback {
+  public:
+    MOCK_METHOD(void,
+                Call,
+                (WGPUCreatePipelineAsyncStatus status,
+                 WGPUComputePipeline pipeline,
+                 const char* message,
+                 void* userdata));
+};
 
-        std::unique_ptr<StrictMock<MockCreateComputePipelineAsyncCallback>>
-            mockCreateComputePipelineAsyncCallback;
-        void ToMockCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus status,
-                                                      WGPUComputePipeline pipeline,
-                                                      const char* message,
-                                                      void* userdata) {
-            mockCreateComputePipelineAsyncCallback->Call(status, pipeline, message, userdata);
-        }
+std::unique_ptr<StrictMock<MockCreateComputePipelineAsyncCallback>>
+    mockCreateComputePipelineAsyncCallback;
+void ToMockCreateComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus status,
+                                              WGPUComputePipeline pipeline,
+                                              const char* message,
+                                              void* userdata) {
+    mockCreateComputePipelineAsyncCallback->Call(status, pipeline, message, userdata);
+}
 
-        class MockCreateRenderPipelineAsyncCallback {
-          public:
-            MOCK_METHOD(void,
-                        Call,
-                        (WGPUCreatePipelineAsyncStatus status,
-                         WGPURenderPipeline pipeline,
-                         const char* message,
-                         void* userdata));
-        };
+class MockCreateRenderPipelineAsyncCallback {
+  public:
+    MOCK_METHOD(void,
+                Call,
+                (WGPUCreatePipelineAsyncStatus status,
+                 WGPURenderPipeline pipeline,
+                 const char* message,
+                 void* userdata));
+};
 
-        std::unique_ptr<StrictMock<MockCreateRenderPipelineAsyncCallback>>
-            mockCreateRenderPipelineAsyncCallback;
-        void ToMockCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus status,
-                                                     WGPURenderPipeline pipeline,
-                                                     const char* message,
-                                                     void* userdata) {
-            mockCreateRenderPipelineAsyncCallback->Call(status, pipeline, message, userdata);
-        }
+std::unique_ptr<StrictMock<MockCreateRenderPipelineAsyncCallback>>
+    mockCreateRenderPipelineAsyncCallback;
+void ToMockCreateRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus status,
+                                             WGPURenderPipeline pipeline,
+                                             const char* message,
+                                             void* userdata) {
+    mockCreateRenderPipelineAsyncCallback->Call(status, pipeline, message, userdata);
+}
 
-    }  // anonymous namespace
+}  // anonymous namespace
 
-    class WireCreatePipelineAsyncTest : public WireTest {
-      public:
-        void SetUp() override {
-            WireTest::SetUp();
+class WireCreatePipelineAsyncTest : public WireTest {
+  public:
+    void SetUp() override {
+        WireTest::SetUp();
 
-            mockCreateComputePipelineAsyncCallback =
-                std::make_unique<StrictMock<MockCreateComputePipelineAsyncCallback>>();
-            mockCreateRenderPipelineAsyncCallback =
-                std::make_unique<StrictMock<MockCreateRenderPipelineAsyncCallback>>();
-        }
-
-        void TearDown() override {
-            WireTest::TearDown();
-
-            // Delete mock so that expectations are checked
-            mockCreateComputePipelineAsyncCallback = nullptr;
-            mockCreateRenderPipelineAsyncCallback = nullptr;
-        }
-
-        void FlushClient() {
-            WireTest::FlushClient();
-            Mock::VerifyAndClearExpectations(&mockCreateComputePipelineAsyncCallback);
-        }
-
-        void FlushServer() {
-            WireTest::FlushServer();
-            Mock::VerifyAndClearExpectations(&mockCreateComputePipelineAsyncCallback);
-        }
-    };
-
-    // Test when creating a compute pipeline with CreateComputePipelineAsync() successfully.
-    TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncSuccess) {
-        WGPUShaderModuleDescriptor csDescriptor{};
-        WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor);
-        WGPUShaderModule apiCsModule = api.GetNewShaderModule();
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule));
-
-        WGPUComputePipelineDescriptor descriptor{};
-        descriptor.compute.module = csModule;
-        descriptor.compute.entryPoint = "main";
-
-        wgpuDeviceCreateComputePipelineAsync(device, &descriptor,
-                                             ToMockCreateComputePipelineAsyncCallback, this);
-
-        EXPECT_CALL(api, OnDeviceCreateComputePipelineAsync(apiDevice, _, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallDeviceCreateComputePipelineAsyncCallback(
-                    apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, "");
-            }));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockCreateComputePipelineAsyncCallback,
-                    Call(WGPUCreatePipelineAsyncStatus_Success, _, StrEq(""), this))
-            .Times(1);
-
-        FlushServer();
+        mockCreateComputePipelineAsyncCallback =
+            std::make_unique<StrictMock<MockCreateComputePipelineAsyncCallback>>();
+        mockCreateRenderPipelineAsyncCallback =
+            std::make_unique<StrictMock<MockCreateRenderPipelineAsyncCallback>>();
     }
 
-    // Test when creating a compute pipeline with CreateComputePipelineAsync() results in an error.
-    TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncError) {
-        WGPUShaderModuleDescriptor csDescriptor{};
-        WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor);
-        WGPUShaderModule apiCsModule = api.GetNewShaderModule();
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule));
+    void TearDown() override {
+        WireTest::TearDown();
 
-        WGPUComputePipelineDescriptor descriptor{};
-        descriptor.compute.module = csModule;
-        descriptor.compute.entryPoint = "main";
-
-        wgpuDeviceCreateComputePipelineAsync(device, &descriptor,
-                                             ToMockCreateComputePipelineAsyncCallback, this);
-
-        EXPECT_CALL(api, OnDeviceCreateComputePipelineAsync(apiDevice, _, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallDeviceCreateComputePipelineAsyncCallback(
-                    apiDevice, WGPUCreatePipelineAsyncStatus_Error, nullptr, "Some error message");
-            }));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockCreateComputePipelineAsyncCallback,
-                    Call(WGPUCreatePipelineAsyncStatus_Error, _, StrEq("Some error message"), this))
-            .Times(1);
-
-        FlushServer();
+        // Delete mock so that expectations are checked
+        mockCreateComputePipelineAsyncCallback = nullptr;
+        mockCreateRenderPipelineAsyncCallback = nullptr;
     }
 
-    // Test when creating a render pipeline with CreateRenderPipelineAsync() successfully.
-    TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncSuccess) {
-        WGPUShaderModuleDescriptor vertexDescriptor = {};
-        WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
-        WGPUShaderModule apiVsModule = api.GetNewShaderModule();
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
-
-        WGPURenderPipelineDescriptor pipelineDescriptor{};
-        pipelineDescriptor.vertex.module = vsModule;
-        pipelineDescriptor.vertex.entryPoint = "main";
-
-        WGPUFragmentState fragment = {};
-        fragment.module = vsModule;
-        fragment.entryPoint = "main";
-        pipelineDescriptor.fragment = &fragment;
-
-        wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
-                                            ToMockCreateRenderPipelineAsyncCallback, this);
-        EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallDeviceCreateRenderPipelineAsyncCallback(
-                    apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, "");
-            }));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
-                    Call(WGPUCreatePipelineAsyncStatus_Success, _, StrEq(""), this))
-            .Times(1);
-
-        FlushServer();
+    void FlushClient() {
+        WireTest::FlushClient();
+        Mock::VerifyAndClearExpectations(&mockCreateComputePipelineAsyncCallback);
     }
 
-    // Test when creating a render pipeline with CreateRenderPipelineAsync() results in an error.
-    TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncError) {
-        WGPUShaderModuleDescriptor vertexDescriptor = {};
-        WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
-        WGPUShaderModule apiVsModule = api.GetNewShaderModule();
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
-
-        WGPURenderPipelineDescriptor pipelineDescriptor{};
-        pipelineDescriptor.vertex.module = vsModule;
-        pipelineDescriptor.vertex.entryPoint = "main";
-
-        WGPUFragmentState fragment = {};
-        fragment.module = vsModule;
-        fragment.entryPoint = "main";
-        pipelineDescriptor.fragment = &fragment;
-
-        wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
-                                            ToMockCreateRenderPipelineAsyncCallback, this);
-        EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallDeviceCreateRenderPipelineAsyncCallback(
-                    apiDevice, WGPUCreatePipelineAsyncStatus_Error, nullptr, "Some error message");
-            }));
-
-        FlushClient();
-
-        EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
-                    Call(WGPUCreatePipelineAsyncStatus_Error, _, StrEq("Some error message"), this))
-            .Times(1);
-
-        FlushServer();
+    void FlushServer() {
+        WireTest::FlushServer();
+        Mock::VerifyAndClearExpectations(&mockCreateComputePipelineAsyncCallback);
     }
+};
 
-    // Test that registering a callback then wire disconnect calls the callback with
-    // DeviceLost.
-    TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncThenDisconnect) {
-        WGPUShaderModuleDescriptor vertexDescriptor = {};
-        WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
-        WGPUShaderModule apiVsModule = api.GetNewShaderModule();
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
+// Test when creating a compute pipeline with CreateComputePipelineAsync() successfully.
+TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncSuccess) {
+    WGPUShaderModuleDescriptor csDescriptor{};
+    WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor);
+    WGPUShaderModule apiCsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule));
 
-        WGPUFragmentState fragment = {};
-        fragment.module = vsModule;
-        fragment.entryPoint = "main";
+    WGPUComputePipelineDescriptor descriptor{};
+    descriptor.compute.module = csModule;
+    descriptor.compute.entryPoint = "main";
 
-        WGPURenderPipelineDescriptor pipelineDescriptor{};
-        pipelineDescriptor.vertex.module = vsModule;
-        pipelineDescriptor.vertex.entryPoint = "main";
-        pipelineDescriptor.fragment = &fragment;
+    wgpuDeviceCreateComputePipelineAsync(device, &descriptor,
+                                         ToMockCreateComputePipelineAsyncCallback, this);
 
-        wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
-                                            ToMockCreateRenderPipelineAsyncCallback, this);
-        EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallDeviceCreateRenderPipelineAsyncCallback(
-                    apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, "");
-            }));
+    EXPECT_CALL(api, OnDeviceCreateComputePipelineAsync(apiDevice, _, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallDeviceCreateComputePipelineAsyncCallback(
+                apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, "");
+        }));
 
-        FlushClient();
+    FlushClient();
 
-        EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
-                    Call(WGPUCreatePipelineAsyncStatus_DeviceLost, _, _, this))
-            .Times(1);
-        GetWireClient()->Disconnect();
-    }
+    EXPECT_CALL(*mockCreateComputePipelineAsyncCallback,
+                Call(WGPUCreatePipelineAsyncStatus_Success, _, StrEq(""), this))
+        .Times(1);
 
-    // Test that registering a callback then wire disconnect calls the callback with
-    // DeviceLost.
-    TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncThenDisconnect) {
-        WGPUShaderModuleDescriptor csDescriptor{};
-        WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor);
-        WGPUShaderModule apiCsModule = api.GetNewShaderModule();
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule));
+    FlushServer();
+}
 
-        WGPUComputePipelineDescriptor descriptor{};
-        descriptor.compute.module = csModule;
-        descriptor.compute.entryPoint = "main";
+// Test when creating a compute pipeline with CreateComputePipelineAsync() results in an error.
+TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncError) {
+    WGPUShaderModuleDescriptor csDescriptor{};
+    WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor);
+    WGPUShaderModule apiCsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule));
 
-        wgpuDeviceCreateComputePipelineAsync(device, &descriptor,
-                                             ToMockCreateComputePipelineAsyncCallback, this);
-        EXPECT_CALL(api, OnDeviceCreateComputePipelineAsync(apiDevice, _, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallDeviceCreateComputePipelineAsyncCallback(
-                    apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, "");
-            }));
+    WGPUComputePipelineDescriptor descriptor{};
+    descriptor.compute.module = csModule;
+    descriptor.compute.entryPoint = "main";
 
-        FlushClient();
+    wgpuDeviceCreateComputePipelineAsync(device, &descriptor,
+                                         ToMockCreateComputePipelineAsyncCallback, this);
 
-        EXPECT_CALL(*mockCreateComputePipelineAsyncCallback,
-                    Call(WGPUCreatePipelineAsyncStatus_DeviceLost, _, _, this))
-            .Times(1);
-        GetWireClient()->Disconnect();
-    }
+    EXPECT_CALL(api, OnDeviceCreateComputePipelineAsync(apiDevice, _, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallDeviceCreateComputePipelineAsyncCallback(
+                apiDevice, WGPUCreatePipelineAsyncStatus_Error, nullptr, "Some error message");
+        }));
 
-    // Test that registering a callback after wire disconnect calls the callback with
-    // DeviceLost.
-    TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncAfterDisconnect) {
-        WGPUShaderModuleDescriptor vertexDescriptor = {};
-        WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
-        WGPUShaderModule apiVsModule = api.GetNewShaderModule();
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
+    FlushClient();
 
-        WGPUFragmentState fragment = {};
-        fragment.module = vsModule;
-        fragment.entryPoint = "main";
+    EXPECT_CALL(*mockCreateComputePipelineAsyncCallback,
+                Call(WGPUCreatePipelineAsyncStatus_Error, _, StrEq("Some error message"), this))
+        .Times(1);
 
-        WGPURenderPipelineDescriptor pipelineDescriptor{};
-        pipelineDescriptor.vertex.module = vsModule;
-        pipelineDescriptor.vertex.entryPoint = "main";
-        pipelineDescriptor.fragment = &fragment;
+    FlushServer();
+}
 
-        FlushClient();
+// Test when creating a render pipeline with CreateRenderPipelineAsync() successfully.
+TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncSuccess) {
+    WGPUShaderModuleDescriptor vertexDescriptor = {};
+    WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
+    WGPUShaderModule apiVsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
 
-        GetWireClient()->Disconnect();
+    WGPURenderPipelineDescriptor pipelineDescriptor{};
+    pipelineDescriptor.vertex.module = vsModule;
+    pipelineDescriptor.vertex.entryPoint = "main";
 
-        EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
-                    Call(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr, _, this))
-            .Times(1);
-        wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
-                                            ToMockCreateRenderPipelineAsyncCallback, this);
-    }
+    WGPUFragmentState fragment = {};
+    fragment.module = vsModule;
+    fragment.entryPoint = "main";
+    pipelineDescriptor.fragment = &fragment;
 
-    // Test that registering a callback after wire disconnect calls the callback with
-    // DeviceLost.
-    TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncAfterDisconnect) {
-        WGPUShaderModuleDescriptor csDescriptor{};
-        WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor);
-        WGPUShaderModule apiCsModule = api.GetNewShaderModule();
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule));
+    wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
+                                        ToMockCreateRenderPipelineAsyncCallback, this);
+    EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallDeviceCreateRenderPipelineAsyncCallback(
+                apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, "");
+        }));
 
-        WGPUComputePipelineDescriptor descriptor{};
-        descriptor.compute.module = csModule;
-        descriptor.compute.entryPoint = "main";
+    FlushClient();
 
-        FlushClient();
+    EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
+                Call(WGPUCreatePipelineAsyncStatus_Success, _, StrEq(""), this))
+        .Times(1);
 
-        GetWireClient()->Disconnect();
+    FlushServer();
+}
 
-        EXPECT_CALL(*mockCreateComputePipelineAsyncCallback,
-                    Call(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr, _, this))
-            .Times(1);
+// Test when creating a render pipeline with CreateRenderPipelineAsync() results in an error.
+TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncError) {
+    WGPUShaderModuleDescriptor vertexDescriptor = {};
+    WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
+    WGPUShaderModule apiVsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
 
-        wgpuDeviceCreateComputePipelineAsync(device, &descriptor,
-                                             ToMockCreateComputePipelineAsyncCallback, this);
-    }
+    WGPURenderPipelineDescriptor pipelineDescriptor{};
+    pipelineDescriptor.vertex.module = vsModule;
+    pipelineDescriptor.vertex.entryPoint = "main";
 
-    TEST_F(WireCreatePipelineAsyncTest, DeviceDeletedBeforeCallback) {
-        WGPUShaderModuleDescriptor vertexDescriptor = {};
-        WGPUShaderModule module = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
-        WGPUShaderModule apiModule = api.GetNewShaderModule();
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiModule));
+    WGPUFragmentState fragment = {};
+    fragment.module = vsModule;
+    fragment.entryPoint = "main";
+    pipelineDescriptor.fragment = &fragment;
 
-        WGPURenderPipelineDescriptor pipelineDescriptor{};
-        pipelineDescriptor.vertex.module = module;
-        pipelineDescriptor.vertex.entryPoint = "main";
+    wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
+                                        ToMockCreateRenderPipelineAsyncCallback, this);
+    EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallDeviceCreateRenderPipelineAsyncCallback(
+                apiDevice, WGPUCreatePipelineAsyncStatus_Error, nullptr, "Some error message");
+        }));
 
-        WGPUFragmentState fragment = {};
-        fragment.module = module;
-        fragment.entryPoint = "main";
-        pipelineDescriptor.fragment = &fragment;
+    FlushClient();
 
-        wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
-                                            ToMockCreateRenderPipelineAsyncCallback, this);
+    EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
+                Call(WGPUCreatePipelineAsyncStatus_Error, _, StrEq("Some error message"), this))
+        .Times(1);
 
-        EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _));
-        FlushClient();
+    FlushServer();
+}
 
-        EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
-                    Call(WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr, _, this))
-            .Times(1);
+// Test that registering a callback then wire disconnect calls the callback with
+// DeviceLost.
+TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncThenDisconnect) {
+    WGPUShaderModuleDescriptor vertexDescriptor = {};
+    WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
+    WGPUShaderModule apiVsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
 
-        wgpuDeviceRelease(device);
+    WGPUFragmentState fragment = {};
+    fragment.module = vsModule;
+    fragment.entryPoint = "main";
 
-        // Expect release on all objects created by the client.
-        Sequence s1, s2;
-        EXPECT_CALL(api, QueueRelease(apiQueue)).Times(1).InSequence(s1);
-        EXPECT_CALL(api, ShaderModuleRelease(apiModule)).Times(1).InSequence(s2);
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr))
-            .Times(1)
-            .InSequence(s1, s2);
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr))
-            .Times(1)
-            .InSequence(s1, s2);
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr))
-            .Times(1)
-            .InSequence(s1, s2);
-        EXPECT_CALL(api, DeviceRelease(apiDevice)).Times(1).InSequence(s1, s2);
+    WGPURenderPipelineDescriptor pipelineDescriptor{};
+    pipelineDescriptor.vertex.module = vsModule;
+    pipelineDescriptor.vertex.entryPoint = "main";
+    pipelineDescriptor.fragment = &fragment;
 
-        FlushClient();
-        DefaultApiDeviceWasReleased();
-    }
+    wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
+                                        ToMockCreateRenderPipelineAsyncCallback, this);
+    EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallDeviceCreateRenderPipelineAsyncCallback(
+                apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, "");
+        }));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
+                Call(WGPUCreatePipelineAsyncStatus_DeviceLost, _, _, this))
+        .Times(1);
+    GetWireClient()->Disconnect();
+}
+
+// Test that registering a callback then wire disconnect calls the callback with
+// DeviceLost.
+TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncThenDisconnect) {
+    WGPUShaderModuleDescriptor csDescriptor{};
+    WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor);
+    WGPUShaderModule apiCsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule));
+
+    WGPUComputePipelineDescriptor descriptor{};
+    descriptor.compute.module = csModule;
+    descriptor.compute.entryPoint = "main";
+
+    wgpuDeviceCreateComputePipelineAsync(device, &descriptor,
+                                         ToMockCreateComputePipelineAsyncCallback, this);
+    EXPECT_CALL(api, OnDeviceCreateComputePipelineAsync(apiDevice, _, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallDeviceCreateComputePipelineAsyncCallback(
+                apiDevice, WGPUCreatePipelineAsyncStatus_Success, nullptr, "");
+        }));
+
+    FlushClient();
+
+    EXPECT_CALL(*mockCreateComputePipelineAsyncCallback,
+                Call(WGPUCreatePipelineAsyncStatus_DeviceLost, _, _, this))
+        .Times(1);
+    GetWireClient()->Disconnect();
+}
+
+// Test that registering a callback after wire disconnect calls the callback with
+// DeviceLost.
+TEST_F(WireCreatePipelineAsyncTest, CreateRenderPipelineAsyncAfterDisconnect) {
+    WGPUShaderModuleDescriptor vertexDescriptor = {};
+    WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
+    WGPUShaderModule apiVsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
+
+    WGPUFragmentState fragment = {};
+    fragment.module = vsModule;
+    fragment.entryPoint = "main";
+
+    WGPURenderPipelineDescriptor pipelineDescriptor{};
+    pipelineDescriptor.vertex.module = vsModule;
+    pipelineDescriptor.vertex.entryPoint = "main";
+    pipelineDescriptor.fragment = &fragment;
+
+    FlushClient();
+
+    GetWireClient()->Disconnect();
+
+    EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
+                Call(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr, _, this))
+        .Times(1);
+    wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
+                                        ToMockCreateRenderPipelineAsyncCallback, this);
+}
+
+// Test that registering a callback after wire disconnect calls the callback with
+// DeviceLost.
+TEST_F(WireCreatePipelineAsyncTest, CreateComputePipelineAsyncAfterDisconnect) {
+    WGPUShaderModuleDescriptor csDescriptor{};
+    WGPUShaderModule csModule = wgpuDeviceCreateShaderModule(device, &csDescriptor);
+    WGPUShaderModule apiCsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiCsModule));
+
+    WGPUComputePipelineDescriptor descriptor{};
+    descriptor.compute.module = csModule;
+    descriptor.compute.entryPoint = "main";
+
+    FlushClient();
+
+    GetWireClient()->Disconnect();
+
+    EXPECT_CALL(*mockCreateComputePipelineAsyncCallback,
+                Call(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr, _, this))
+        .Times(1);
+
+    wgpuDeviceCreateComputePipelineAsync(device, &descriptor,
+                                         ToMockCreateComputePipelineAsyncCallback, this);
+}
+
+TEST_F(WireCreatePipelineAsyncTest, DeviceDeletedBeforeCallback) {
+    WGPUShaderModuleDescriptor vertexDescriptor = {};
+    WGPUShaderModule module = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
+    WGPUShaderModule apiModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiModule));
+
+    WGPURenderPipelineDescriptor pipelineDescriptor{};
+    pipelineDescriptor.vertex.module = module;
+    pipelineDescriptor.vertex.entryPoint = "main";
+
+    WGPUFragmentState fragment = {};
+    fragment.module = module;
+    fragment.entryPoint = "main";
+    pipelineDescriptor.fragment = &fragment;
+
+    wgpuDeviceCreateRenderPipelineAsync(device, &pipelineDescriptor,
+                                        ToMockCreateRenderPipelineAsyncCallback, this);
+
+    EXPECT_CALL(api, OnDeviceCreateRenderPipelineAsync(apiDevice, _, _, _));
+    FlushClient();
+
+    EXPECT_CALL(*mockCreateRenderPipelineAsyncCallback,
+                Call(WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr, _, this))
+        .Times(1);
+
+    wgpuDeviceRelease(device);
+
+    // Expect release on all objects created by the client.
+    Sequence s1, s2;
+    EXPECT_CALL(api, QueueRelease(apiQueue)).Times(1).InSequence(s1);
+    EXPECT_CALL(api, ShaderModuleRelease(apiModule)).Times(1).InSequence(s2);
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr))
+        .Times(1)
+        .InSequence(s1, s2);
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr))
+        .Times(1)
+        .InSequence(s1, s2);
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr))
+        .Times(1)
+        .InSequence(s1, s2);
+    EXPECT_CALL(api, DeviceRelease(apiDevice)).Times(1).InSequence(s1, s2);
+
+    FlushClient();
+    DefaultApiDeviceWasReleased();
+}
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireDestroyObjectTests.cpp b/src/dawn/tests/unittests/wire/WireDestroyObjectTests.cpp
index ad34939..4f2f947 100644
--- a/src/dawn/tests/unittests/wire/WireDestroyObjectTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireDestroyObjectTests.cpp
@@ -17,47 +17,46 @@
 
 namespace dawn::wire {
 
-    using testing::Return;
-    using testing::Sequence;
+using testing::Return;
+using testing::Sequence;
 
-    class WireDestroyObjectTests : public WireTest {};
+class WireDestroyObjectTests : public WireTest {};
 
-    // Test that destroying the device also destroys child objects.
-    TEST_F(WireDestroyObjectTests, DestroyDeviceDestroysChildren) {
-        WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
+// Test that destroying the device also destroys child objects.
+TEST_F(WireDestroyObjectTests, DestroyDeviceDestroysChildren) {
+    WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
 
-        WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
-        EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
-            .WillOnce(Return(apiEncoder));
+    WGPUCommandEncoder apiEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr)).WillOnce(Return(apiEncoder));
 
-        FlushClient();
+    FlushClient();
 
-        // Release the device. It should cause the command encoder to be destroyed.
-        wgpuDeviceRelease(device);
+    // Release the device. It should cause the command encoder to be destroyed.
+    wgpuDeviceRelease(device);
 
-        Sequence s1, s2;
-        // The device and child objects should be released.
-        EXPECT_CALL(api, CommandEncoderRelease(apiEncoder)).InSequence(s1);
-        EXPECT_CALL(api, QueueRelease(apiQueue)).InSequence(s2);
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr))
-            .Times(1)
-            .InSequence(s1, s2);
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr))
-            .Times(1)
-            .InSequence(s1, s2);
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr))
-            .Times(1)
-            .InSequence(s1, s2);
-        EXPECT_CALL(api, DeviceRelease(apiDevice)).InSequence(s1, s2);
+    Sequence s1, s2;
+    // The device and child objects should be released.
+    EXPECT_CALL(api, CommandEncoderRelease(apiEncoder)).InSequence(s1);
+    EXPECT_CALL(api, QueueRelease(apiQueue)).InSequence(s2);
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr))
+        .Times(1)
+        .InSequence(s1, s2);
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr))
+        .Times(1)
+        .InSequence(s1, s2);
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr))
+        .Times(1)
+        .InSequence(s1, s2);
+    EXPECT_CALL(api, DeviceRelease(apiDevice)).InSequence(s1, s2);
 
-        FlushClient();
+    FlushClient();
 
-        // Signal that we already released and cleared callbacks for |apiDevice|
-        DefaultApiDeviceWasReleased();
+    // Signal that we already released and cleared callbacks for |apiDevice|
+    DefaultApiDeviceWasReleased();
 
-        // Using the command encoder should be an error.
-        wgpuCommandEncoderFinish(encoder, nullptr);
-        FlushClient(false);
-    }
+    // Using the command encoder should be an error.
+    wgpuCommandEncoderFinish(encoder, nullptr);
+    FlushClient(false);
+}
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireDisconnectTests.cpp b/src/dawn/tests/unittests/wire/WireDisconnectTests.cpp
index d8f397c..2218194 100644
--- a/src/dawn/tests/unittests/wire/WireDisconnectTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireDisconnectTests.cpp
@@ -20,165 +20,165 @@
 
 namespace dawn::wire {
 
-    using testing::_;
-    using testing::Exactly;
-    using testing::InvokeWithoutArgs;
-    using testing::MockCallback;
-    using testing::Return;
-    using testing::Sequence;
-    using testing::StrEq;
+using testing::_;
+using testing::Exactly;
+using testing::InvokeWithoutArgs;
+using testing::MockCallback;
+using testing::Return;
+using testing::Sequence;
+using testing::StrEq;
 
-    namespace {
+namespace {
 
-        class WireDisconnectTests : public WireTest {};
+class WireDisconnectTests : public WireTest {};
 
-    }  // anonymous namespace
+}  // anonymous namespace
 
-    // Test that commands are not received if the client disconnects.
-    TEST_F(WireDisconnectTests, CommandsAfterDisconnect) {
-        // Check that commands work at all.
-        wgpuDeviceCreateCommandEncoder(device, nullptr);
+// Test that commands are not received if the client disconnects.
+TEST_F(WireDisconnectTests, CommandsAfterDisconnect) {
+    // Check that commands work at all.
+    wgpuDeviceCreateCommandEncoder(device, nullptr);
 
-        WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
-        EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
-            .WillOnce(Return(apiCmdBufEncoder));
-        FlushClient();
+    WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
+        .WillOnce(Return(apiCmdBufEncoder));
+    FlushClient();
 
-        // Disconnect.
-        GetWireClient()->Disconnect();
+    // Disconnect.
+    GetWireClient()->Disconnect();
 
-        // Command is not received because client disconnected.
-        wgpuDeviceCreateCommandEncoder(device, nullptr);
-        EXPECT_CALL(api, DeviceCreateCommandEncoder(_, _)).Times(Exactly(0));
-        FlushClient();
-    }
+    // Command is not received because client disconnected.
+    wgpuDeviceCreateCommandEncoder(device, nullptr);
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(_, _)).Times(Exactly(0));
+    FlushClient();
+}
 
-    // Test that commands that are serialized before a disconnect but flushed
-    // after are received.
-    TEST_F(WireDisconnectTests, FlushAfterDisconnect) {
-        // Check that commands work at all.
-        wgpuDeviceCreateCommandEncoder(device, nullptr);
+// Test that commands that are serialized before a disconnect but flushed
+// after are received.
+TEST_F(WireDisconnectTests, FlushAfterDisconnect) {
+    // Check that commands work at all.
+    wgpuDeviceCreateCommandEncoder(device, nullptr);
 
-        // Disconnect.
-        GetWireClient()->Disconnect();
+    // Disconnect.
+    GetWireClient()->Disconnect();
 
-        // Already-serialized commmands are still received.
-        WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
-        EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
-            .WillOnce(Return(apiCmdBufEncoder));
-        FlushClient();
-    }
+    // Already-serialized commmands are still received.
+    WGPUCommandEncoder apiCmdBufEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
+        .WillOnce(Return(apiCmdBufEncoder));
+    FlushClient();
+}
 
-    // Check that disconnecting the wire client calls the device lost callback exacty once.
-    TEST_F(WireDisconnectTests, CallsDeviceLostCallback) {
-        MockCallback<WGPUDeviceLostCallback> mockDeviceLostCallback;
-        wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(),
-                                        mockDeviceLostCallback.MakeUserdata(this));
+// Check that disconnecting the wire client calls the device lost callback exacty once.
+TEST_F(WireDisconnectTests, CallsDeviceLostCallback) {
+    MockCallback<WGPUDeviceLostCallback> mockDeviceLostCallback;
+    wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(),
+                                    mockDeviceLostCallback.MakeUserdata(this));
 
-        // Disconnect the wire client. We should receive device lost only once.
-        EXPECT_CALL(mockDeviceLostCallback, Call(WGPUDeviceLostReason_Undefined, _, this))
-            .Times(Exactly(1));
-        GetWireClient()->Disconnect();
-        GetWireClient()->Disconnect();
-    }
+    // Disconnect the wire client. We should receive device lost only once.
+    EXPECT_CALL(mockDeviceLostCallback, Call(WGPUDeviceLostReason_Undefined, _, this))
+        .Times(Exactly(1));
+    GetWireClient()->Disconnect();
+    GetWireClient()->Disconnect();
+}
 
-    // Check that disconnecting the wire client after a device loss does not trigger the callback
-    // again.
-    TEST_F(WireDisconnectTests, ServerLostThenDisconnect) {
-        MockCallback<WGPUDeviceLostCallback> mockDeviceLostCallback;
-        wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(),
-                                        mockDeviceLostCallback.MakeUserdata(this));
+// Check that disconnecting the wire client after a device loss does not trigger the callback
+// again.
+TEST_F(WireDisconnectTests, ServerLostThenDisconnect) {
+    MockCallback<WGPUDeviceLostCallback> mockDeviceLostCallback;
+    wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(),
+                                    mockDeviceLostCallback.MakeUserdata(this));
 
-        api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined,
-                                                    "some reason");
+    api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined,
+                                                "some reason");
 
-        // Flush the device lost return command.
-        EXPECT_CALL(mockDeviceLostCallback,
-                    Call(WGPUDeviceLostReason_Undefined, StrEq("some reason"), this))
-            .Times(Exactly(1));
-        FlushServer();
+    // Flush the device lost return command.
+    EXPECT_CALL(mockDeviceLostCallback,
+                Call(WGPUDeviceLostReason_Undefined, StrEq("some reason"), this))
+        .Times(Exactly(1));
+    FlushServer();
 
-        // Disconnect the client. We shouldn't see the lost callback again.
-        EXPECT_CALL(mockDeviceLostCallback, Call(_, _, _)).Times(Exactly(0));
-        GetWireClient()->Disconnect();
-    }
+    // Disconnect the client. We shouldn't see the lost callback again.
+    EXPECT_CALL(mockDeviceLostCallback, Call(_, _, _)).Times(Exactly(0));
+    GetWireClient()->Disconnect();
+}
 
-    // Check that disconnecting the wire client inside the device loss callback does not trigger the
-    // callback again.
-    TEST_F(WireDisconnectTests, ServerLostThenDisconnectInCallback) {
-        MockCallback<WGPUDeviceLostCallback> mockDeviceLostCallback;
-        wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(),
-                                        mockDeviceLostCallback.MakeUserdata(this));
+// Check that disconnecting the wire client inside the device loss callback does not trigger the
+// callback again.
+TEST_F(WireDisconnectTests, ServerLostThenDisconnectInCallback) {
+    MockCallback<WGPUDeviceLostCallback> mockDeviceLostCallback;
+    wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(),
+                                    mockDeviceLostCallback.MakeUserdata(this));
 
-        api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined,
-                                                    "lost reason");
+    api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined,
+                                                "lost reason");
 
-        // Disconnect the client inside the lost callback. We should see the callback
-        // only once.
-        EXPECT_CALL(mockDeviceLostCallback,
-                    Call(WGPUDeviceLostReason_Undefined, StrEq("lost reason"), this))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                EXPECT_CALL(mockDeviceLostCallback, Call(_, _, _)).Times(Exactly(0));
-                GetWireClient()->Disconnect();
-            }));
-        FlushServer();
-    }
+    // Disconnect the client inside the lost callback. We should see the callback
+    // only once.
+    EXPECT_CALL(mockDeviceLostCallback,
+                Call(WGPUDeviceLostReason_Undefined, StrEq("lost reason"), this))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            EXPECT_CALL(mockDeviceLostCallback, Call(_, _, _)).Times(Exactly(0));
+            GetWireClient()->Disconnect();
+        }));
+    FlushServer();
+}
 
-    // Check that a device loss after a disconnect does not trigger the callback again.
-    TEST_F(WireDisconnectTests, DisconnectThenServerLost) {
-        MockCallback<WGPUDeviceLostCallback> mockDeviceLostCallback;
-        wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(),
-                                        mockDeviceLostCallback.MakeUserdata(this));
+// Check that a device loss after a disconnect does not trigger the callback again.
+TEST_F(WireDisconnectTests, DisconnectThenServerLost) {
+    MockCallback<WGPUDeviceLostCallback> mockDeviceLostCallback;
+    wgpuDeviceSetDeviceLostCallback(device, mockDeviceLostCallback.Callback(),
+                                    mockDeviceLostCallback.MakeUserdata(this));
 
-        // Disconnect the client. We should see the callback once.
-        EXPECT_CALL(mockDeviceLostCallback, Call(WGPUDeviceLostReason_Undefined, _, this))
-            .Times(Exactly(1));
-        GetWireClient()->Disconnect();
+    // Disconnect the client. We should see the callback once.
+    EXPECT_CALL(mockDeviceLostCallback, Call(WGPUDeviceLostReason_Undefined, _, this))
+        .Times(Exactly(1));
+    GetWireClient()->Disconnect();
 
-        // Lose the device on the server. The client callback shouldn't be
-        // called again.
-        api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined,
-                                                    "lost reason");
-        EXPECT_CALL(mockDeviceLostCallback, Call(_, _, _)).Times(Exactly(0));
-        FlushServer();
-    }
+    // Lose the device on the server. The client callback shouldn't be
+    // called again.
+    api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined,
+                                                "lost reason");
+    EXPECT_CALL(mockDeviceLostCallback, Call(_, _, _)).Times(Exactly(0));
+    FlushServer();
+}
 
-    // Test that client objects are all destroyed if the WireClient is destroyed.
-    TEST_F(WireDisconnectTests, DeleteClientDestroysObjects) {
-        WGPUSamplerDescriptor desc = {};
-        wgpuDeviceCreateCommandEncoder(device, nullptr);
-        wgpuDeviceCreateSampler(device, &desc);
+// Test that client objects are all destroyed if the WireClient is destroyed.
+TEST_F(WireDisconnectTests, DeleteClientDestroysObjects) {
+    WGPUSamplerDescriptor desc = {};
+    wgpuDeviceCreateCommandEncoder(device, nullptr);
+    wgpuDeviceCreateSampler(device, &desc);
 
-        WGPUCommandEncoder apiCommandEncoder = api.GetNewCommandEncoder();
-        EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
-            .WillOnce(Return(apiCommandEncoder));
+    WGPUCommandEncoder apiCommandEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(apiDevice, nullptr))
+        .WillOnce(Return(apiCommandEncoder));
 
-        WGPUSampler apiSampler = api.GetNewSampler();
-        EXPECT_CALL(api, DeviceCreateSampler(apiDevice, _)).WillOnce(Return(apiSampler));
+    WGPUSampler apiSampler = api.GetNewSampler();
+    EXPECT_CALL(api, DeviceCreateSampler(apiDevice, _)).WillOnce(Return(apiSampler));
 
-        FlushClient();
+    FlushClient();
 
-        DeleteClient();
+    DeleteClient();
 
-        // Expect release on all objects created by the client.
-        Sequence s1, s2, s3;
-        EXPECT_CALL(api, QueueRelease(apiQueue)).Times(1).InSequence(s1);
-        EXPECT_CALL(api, CommandEncoderRelease(apiCommandEncoder)).Times(1).InSequence(s2);
-        EXPECT_CALL(api, SamplerRelease(apiSampler)).Times(1).InSequence(s3);
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr))
-            .Times(1)
-            .InSequence(s1, s2);
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr))
-            .Times(1)
-            .InSequence(s1, s2);
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr))
-            .Times(1)
-            .InSequence(s1, s2);
-        EXPECT_CALL(api, DeviceRelease(apiDevice)).Times(1).InSequence(s1, s2, s3);
-        FlushClient();
+    // Expect release on all objects created by the client.
+    Sequence s1, s2, s3;
+    EXPECT_CALL(api, QueueRelease(apiQueue)).Times(1).InSequence(s1);
+    EXPECT_CALL(api, CommandEncoderRelease(apiCommandEncoder)).Times(1).InSequence(s2);
+    EXPECT_CALL(api, SamplerRelease(apiSampler)).Times(1).InSequence(s3);
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(apiDevice, nullptr, nullptr))
+        .Times(1)
+        .InSequence(s1, s2);
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(apiDevice, nullptr, nullptr))
+        .Times(1)
+        .InSequence(s1, s2);
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(apiDevice, nullptr, nullptr))
+        .Times(1)
+        .InSequence(s1, s2);
+    EXPECT_CALL(api, DeviceRelease(apiDevice)).Times(1).InSequence(s1, s2, s3);
+    FlushClient();
 
-        // Signal that we already released and cleared callbacks for |apiDevice|
-        DefaultApiDeviceWasReleased();
-    }
+    // Signal that we already released and cleared callbacks for |apiDevice|
+    DefaultApiDeviceWasReleased();
+}
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireErrorCallbackTests.cpp b/src/dawn/tests/unittests/wire/WireErrorCallbackTests.cpp
index 045125d..06a5d88 100644
--- a/src/dawn/tests/unittests/wire/WireErrorCallbackTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireErrorCallbackTests.cpp
@@ -19,304 +19,293 @@
 
 namespace dawn::wire {
 
-    using testing::_;
-    using testing::DoAll;
-    using testing::Mock;
-    using testing::Return;
-    using testing::SaveArg;
-    using testing::StrEq;
-    using testing::StrictMock;
+using testing::_;
+using testing::DoAll;
+using testing::Mock;
+using testing::Return;
+using testing::SaveArg;
+using testing::StrEq;
+using testing::StrictMock;
 
-    namespace {
+namespace {
 
-        // Mock classes to add expectations on the wire calling callbacks
-        class MockDeviceErrorCallback {
-          public:
-            MOCK_METHOD(void, Call, (WGPUErrorType type, const char* message, void* userdata));
-        };
+// Mock classes to add expectations on the wire calling callbacks
+class MockDeviceErrorCallback {
+  public:
+    MOCK_METHOD(void, Call, (WGPUErrorType type, const char* message, void* userdata));
+};
 
-        std::unique_ptr<StrictMock<MockDeviceErrorCallback>> mockDeviceErrorCallback;
-        void ToMockDeviceErrorCallback(WGPUErrorType type, const char* message, void* userdata) {
-            mockDeviceErrorCallback->Call(type, message, userdata);
-        }
+std::unique_ptr<StrictMock<MockDeviceErrorCallback>> mockDeviceErrorCallback;
+void ToMockDeviceErrorCallback(WGPUErrorType type, const char* message, void* userdata) {
+    mockDeviceErrorCallback->Call(type, message, userdata);
+}
 
-        class MockDevicePopErrorScopeCallback {
-          public:
-            MOCK_METHOD(void, Call, (WGPUErrorType type, const char* message, void* userdata));
-        };
+class MockDevicePopErrorScopeCallback {
+  public:
+    MOCK_METHOD(void, Call, (WGPUErrorType type, const char* message, void* userdata));
+};
 
-        std::unique_ptr<StrictMock<MockDevicePopErrorScopeCallback>>
-            mockDevicePopErrorScopeCallback;
-        void ToMockDevicePopErrorScopeCallback(WGPUErrorType type,
-                                               const char* message,
-                                               void* userdata) {
-            mockDevicePopErrorScopeCallback->Call(type, message, userdata);
-        }
+std::unique_ptr<StrictMock<MockDevicePopErrorScopeCallback>> mockDevicePopErrorScopeCallback;
+void ToMockDevicePopErrorScopeCallback(WGPUErrorType type, const char* message, void* userdata) {
+    mockDevicePopErrorScopeCallback->Call(type, message, userdata);
+}
 
-        class MockDeviceLoggingCallback {
-          public:
-            MOCK_METHOD(void, Call, (WGPULoggingType type, const char* message, void* userdata));
-        };
+class MockDeviceLoggingCallback {
+  public:
+    MOCK_METHOD(void, Call, (WGPULoggingType type, const char* message, void* userdata));
+};
 
-        std::unique_ptr<StrictMock<MockDeviceLoggingCallback>> mockDeviceLoggingCallback;
-        void ToMockDeviceLoggingCallback(WGPULoggingType type,
-                                         const char* message,
-                                         void* userdata) {
-            mockDeviceLoggingCallback->Call(type, message, userdata);
-        }
+std::unique_ptr<StrictMock<MockDeviceLoggingCallback>> mockDeviceLoggingCallback;
+void ToMockDeviceLoggingCallback(WGPULoggingType type, const char* message, void* userdata) {
+    mockDeviceLoggingCallback->Call(type, message, userdata);
+}
 
-        class MockDeviceLostCallback {
-          public:
-            MOCK_METHOD(void,
-                        Call,
-                        (WGPUDeviceLostReason reason, const char* message, void* userdata));
-        };
+class MockDeviceLostCallback {
+  public:
+    MOCK_METHOD(void, Call, (WGPUDeviceLostReason reason, const char* message, void* userdata));
+};
 
-        std::unique_ptr<StrictMock<MockDeviceLostCallback>> mockDeviceLostCallback;
-        void ToMockDeviceLostCallback(WGPUDeviceLostReason reason,
-                                      const char* message,
-                                      void* userdata) {
-            mockDeviceLostCallback->Call(reason, message, userdata);
-        }
+std::unique_ptr<StrictMock<MockDeviceLostCallback>> mockDeviceLostCallback;
+void ToMockDeviceLostCallback(WGPUDeviceLostReason reason, const char* message, void* userdata) {
+    mockDeviceLostCallback->Call(reason, message, userdata);
+}
 
-    }  // anonymous namespace
+}  // anonymous namespace
 
-    class WireErrorCallbackTests : public WireTest {
-      public:
-        WireErrorCallbackTests() {
-        }
-        ~WireErrorCallbackTests() override = default;
+class WireErrorCallbackTests : public WireTest {
+  public:
+    WireErrorCallbackTests() {}
+    ~WireErrorCallbackTests() override = default;
 
-        void SetUp() override {
-            WireTest::SetUp();
+    void SetUp() override {
+        WireTest::SetUp();
 
-            mockDeviceErrorCallback = std::make_unique<StrictMock<MockDeviceErrorCallback>>();
-            mockDeviceLoggingCallback = std::make_unique<StrictMock<MockDeviceLoggingCallback>>();
-            mockDevicePopErrorScopeCallback =
-                std::make_unique<StrictMock<MockDevicePopErrorScopeCallback>>();
-            mockDeviceLostCallback = std::make_unique<StrictMock<MockDeviceLostCallback>>();
-        }
-
-        void TearDown() override {
-            WireTest::TearDown();
-
-            mockDeviceErrorCallback = nullptr;
-            mockDeviceLoggingCallback = nullptr;
-            mockDevicePopErrorScopeCallback = nullptr;
-            mockDeviceLostCallback = nullptr;
-        }
-
-        void FlushServer() {
-            WireTest::FlushServer();
-
-            Mock::VerifyAndClearExpectations(&mockDeviceErrorCallback);
-            Mock::VerifyAndClearExpectations(&mockDevicePopErrorScopeCallback);
-        }
-    };
-
-    // Test the return wire for device error callbacks
-    TEST_F(WireErrorCallbackTests, DeviceErrorCallback) {
-        wgpuDeviceSetUncapturedErrorCallback(device, ToMockDeviceErrorCallback, this);
-
-        // Setting the error callback should stay on the client side and do nothing
-        FlushClient();
-
-        // Calling the callback on the server side will result in the callback being called on the
-        // client side
-        api.CallDeviceSetUncapturedErrorCallbackCallback(apiDevice, WGPUErrorType_Validation,
-                                                         "Some error message");
-
-        EXPECT_CALL(*mockDeviceErrorCallback,
-                    Call(WGPUErrorType_Validation, StrEq("Some error message"), this))
-            .Times(1);
-
-        FlushServer();
+        mockDeviceErrorCallback = std::make_unique<StrictMock<MockDeviceErrorCallback>>();
+        mockDeviceLoggingCallback = std::make_unique<StrictMock<MockDeviceLoggingCallback>>();
+        mockDevicePopErrorScopeCallback =
+            std::make_unique<StrictMock<MockDevicePopErrorScopeCallback>>();
+        mockDeviceLostCallback = std::make_unique<StrictMock<MockDeviceLostCallback>>();
     }
 
-    // Test the return wire for device user warning callbacks
-    TEST_F(WireErrorCallbackTests, DeviceLoggingCallback) {
-        wgpuDeviceSetLoggingCallback(device, ToMockDeviceLoggingCallback, this);
+    void TearDown() override {
+        WireTest::TearDown();
 
-        // Setting the injected warning callback should stay on the client side and do nothing
-        FlushClient();
-
-        // Calling the callback on the server side will result in the callback being called on the
-        // client side
-        api.CallDeviceSetLoggingCallbackCallback(apiDevice, WGPULoggingType_Info, "Some message");
-
-        EXPECT_CALL(*mockDeviceLoggingCallback,
-                    Call(WGPULoggingType_Info, StrEq("Some message"), this))
-            .Times(1);
-
-        FlushServer();
+        mockDeviceErrorCallback = nullptr;
+        mockDeviceLoggingCallback = nullptr;
+        mockDevicePopErrorScopeCallback = nullptr;
+        mockDeviceLostCallback = nullptr;
     }
 
-    // Test the return wire for error scopes.
-    TEST_F(WireErrorCallbackTests, PushPopErrorScopeCallback) {
-        EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1);
+    void FlushServer() {
+        WireTest::FlushServer();
+
+        Mock::VerifyAndClearExpectations(&mockDeviceErrorCallback);
+        Mock::VerifyAndClearExpectations(&mockDevicePopErrorScopeCallback);
+    }
+};
+
+// Test the return wire for device error callbacks
+TEST_F(WireErrorCallbackTests, DeviceErrorCallback) {
+    wgpuDeviceSetUncapturedErrorCallback(device, ToMockDeviceErrorCallback, this);
+
+    // Setting the error callback should stay on the client side and do nothing
+    FlushClient();
+
+    // Calling the callback on the server side will result in the callback being called on the
+    // client side
+    api.CallDeviceSetUncapturedErrorCallbackCallback(apiDevice, WGPUErrorType_Validation,
+                                                     "Some error message");
+
+    EXPECT_CALL(*mockDeviceErrorCallback,
+                Call(WGPUErrorType_Validation, StrEq("Some error message"), this))
+        .Times(1);
+
+    FlushServer();
+}
+
+// Test the return wire for device user warning callbacks
+TEST_F(WireErrorCallbackTests, DeviceLoggingCallback) {
+    wgpuDeviceSetLoggingCallback(device, ToMockDeviceLoggingCallback, this);
+
+    // Setting the injected warning callback should stay on the client side and do nothing
+    FlushClient();
+
+    // Calling the callback on the server side will result in the callback being called on the
+    // client side
+    api.CallDeviceSetLoggingCallbackCallback(apiDevice, WGPULoggingType_Info, "Some message");
+
+    EXPECT_CALL(*mockDeviceLoggingCallback, Call(WGPULoggingType_Info, StrEq("Some message"), this))
+        .Times(1);
+
+    FlushServer();
+}
+
+// Test the return wire for error scopes.
+TEST_F(WireErrorCallbackTests, PushPopErrorScopeCallback) {
+    EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1);
+    wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
+    FlushClient();
+
+    WGPUErrorCallback callback;
+    void* userdata;
+    EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _))
+        .WillOnce(DoAll(SaveArg<1>(&callback), SaveArg<2>(&userdata), Return(true)));
+    wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
+    FlushClient();
+
+    EXPECT_CALL(*mockDevicePopErrorScopeCallback,
+                Call(WGPUErrorType_Validation, StrEq("Some error message"), this))
+        .Times(1);
+    callback(WGPUErrorType_Validation, "Some error message", userdata);
+    FlushServer();
+}
+
+// Test the return wire for error scopes when callbacks return in a various orders.
+TEST_F(WireErrorCallbackTests, PopErrorScopeCallbackOrdering) {
+    // Two error scopes are popped, and the first one returns first.
+    {
+        EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(2);
+        wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
         wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
         FlushClient();
 
-        WGPUErrorCallback callback;
-        void* userdata;
+        WGPUErrorCallback callback1;
+        WGPUErrorCallback callback2;
+        void* userdata1;
+        void* userdata2;
         EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _))
-            .WillOnce(DoAll(SaveArg<1>(&callback), SaveArg<2>(&userdata), Return(true)));
+            .WillOnce(DoAll(SaveArg<1>(&callback1), SaveArg<2>(&userdata1), Return(true)))
+            .WillOnce(DoAll(SaveArg<1>(&callback2), SaveArg<2>(&userdata2), Return(true)));
         wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
+        wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this + 1);
         FlushClient();
 
         EXPECT_CALL(*mockDevicePopErrorScopeCallback,
-                    Call(WGPUErrorType_Validation, StrEq("Some error message"), this))
+                    Call(WGPUErrorType_Validation, StrEq("First error message"), this))
             .Times(1);
-        callback(WGPUErrorType_Validation, "Some error message", userdata);
+        callback1(WGPUErrorType_Validation, "First error message", userdata1);
+        FlushServer();
+
+        EXPECT_CALL(*mockDevicePopErrorScopeCallback,
+                    Call(WGPUErrorType_Validation, StrEq("Second error message"), this + 1))
+            .Times(1);
+        callback2(WGPUErrorType_Validation, "Second error message", userdata2);
         FlushServer();
     }
 
-    // Test the return wire for error scopes when callbacks return in a various orders.
-    TEST_F(WireErrorCallbackTests, PopErrorScopeCallbackOrdering) {
-        // Two error scopes are popped, and the first one returns first.
-        {
-            EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(2);
-            wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
-            wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
-            FlushClient();
-
-            WGPUErrorCallback callback1;
-            WGPUErrorCallback callback2;
-            void* userdata1;
-            void* userdata2;
-            EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _))
-                .WillOnce(DoAll(SaveArg<1>(&callback1), SaveArg<2>(&userdata1), Return(true)))
-                .WillOnce(DoAll(SaveArg<1>(&callback2), SaveArg<2>(&userdata2), Return(true)));
-            wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
-            wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this + 1);
-            FlushClient();
-
-            EXPECT_CALL(*mockDevicePopErrorScopeCallback,
-                        Call(WGPUErrorType_Validation, StrEq("First error message"), this))
-                .Times(1);
-            callback1(WGPUErrorType_Validation, "First error message", userdata1);
-            FlushServer();
-
-            EXPECT_CALL(*mockDevicePopErrorScopeCallback,
-                        Call(WGPUErrorType_Validation, StrEq("Second error message"), this + 1))
-                .Times(1);
-            callback2(WGPUErrorType_Validation, "Second error message", userdata2);
-            FlushServer();
-        }
-
-        // Two error scopes are popped, and the second one returns first.
-        {
-            EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(2);
-            wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
-            wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
-            FlushClient();
-
-            WGPUErrorCallback callback1;
-            WGPUErrorCallback callback2;
-            void* userdata1;
-            void* userdata2;
-            EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _))
-                .WillOnce(DoAll(SaveArg<1>(&callback1), SaveArg<2>(&userdata1), Return(true)))
-                .WillOnce(DoAll(SaveArg<1>(&callback2), SaveArg<2>(&userdata2), Return(true)));
-            wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
-            wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this + 1);
-            FlushClient();
-
-            EXPECT_CALL(*mockDevicePopErrorScopeCallback,
-                        Call(WGPUErrorType_Validation, StrEq("Second error message"), this + 1))
-                .Times(1);
-            callback2(WGPUErrorType_Validation, "Second error message", userdata2);
-            FlushServer();
-
-            EXPECT_CALL(*mockDevicePopErrorScopeCallback,
-                        Call(WGPUErrorType_Validation, StrEq("First error message"), this))
-                .Times(1);
-            callback1(WGPUErrorType_Validation, "First error message", userdata1);
-            FlushServer();
-        }
-    }
-
-    // Test the return wire for error scopes in flight when the device is destroyed.
-    TEST_F(WireErrorCallbackTests, PopErrorScopeDeviceInFlightDestroy) {
-        EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1);
+    // Two error scopes are popped, and the second one returns first.
+    {
+        EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(2);
+        wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
         wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
         FlushClient();
 
-        EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _)).WillOnce(Return(true));
-        wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
-        FlushClient();
-
-        // Incomplete callback called in Device destructor. This is resolved after the end of this
-        // test.
-        EXPECT_CALL(*mockDevicePopErrorScopeCallback,
-                    Call(WGPUErrorType_Unknown, ValidStringMessage(), this))
-            .Times(1);
-    }
-
-    // Test that registering a callback then wire disconnect calls the callback with
-    // DeviceLost.
-    TEST_F(WireErrorCallbackTests, PopErrorScopeThenDisconnect) {
-        EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1);
-        wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
-
-        EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _)).WillOnce(Return(true));
-        wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
-        FlushClient();
-
-        EXPECT_CALL(*mockDevicePopErrorScopeCallback,
-                    Call(WGPUErrorType_DeviceLost, ValidStringMessage(), this))
-            .Times(1);
-        GetWireClient()->Disconnect();
-    }
-
-    // Test that registering a callback after wire disconnect calls the callback with
-    // DeviceLost.
-    TEST_F(WireErrorCallbackTests, PopErrorScopeAfterDisconnect) {
-        EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1);
-        wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
-        FlushClient();
-
-        GetWireClient()->Disconnect();
-
-        EXPECT_CALL(*mockDevicePopErrorScopeCallback,
-                    Call(WGPUErrorType_DeviceLost, ValidStringMessage(), this))
-            .Times(1);
-        wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
-    }
-
-    // Empty stack (We are emulating the errors that would be callback-ed from native).
-    TEST_F(WireErrorCallbackTests, PopErrorScopeEmptyStack) {
-        WGPUErrorCallback callback;
-        void* userdata;
+        WGPUErrorCallback callback1;
+        WGPUErrorCallback callback2;
+        void* userdata1;
+        void* userdata2;
         EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _))
-            .WillOnce(DoAll(SaveArg<1>(&callback), SaveArg<2>(&userdata), Return(true)));
+            .WillOnce(DoAll(SaveArg<1>(&callback1), SaveArg<2>(&userdata1), Return(true)))
+            .WillOnce(DoAll(SaveArg<1>(&callback2), SaveArg<2>(&userdata2), Return(true)));
         wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
+        wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this + 1);
         FlushClient();
 
         EXPECT_CALL(*mockDevicePopErrorScopeCallback,
-                    Call(WGPUErrorType_Validation, StrEq("No error scopes to pop"), this))
+                    Call(WGPUErrorType_Validation, StrEq("Second error message"), this + 1))
             .Times(1);
-        callback(WGPUErrorType_Validation, "No error scopes to pop", userdata);
+        callback2(WGPUErrorType_Validation, "Second error message", userdata2);
+        FlushServer();
+
+        EXPECT_CALL(*mockDevicePopErrorScopeCallback,
+                    Call(WGPUErrorType_Validation, StrEq("First error message"), this))
+            .Times(1);
+        callback1(WGPUErrorType_Validation, "First error message", userdata1);
         FlushServer();
     }
+}
 
-    // Test the return wire for device lost callback
-    TEST_F(WireErrorCallbackTests, DeviceLostCallback) {
-        wgpuDeviceSetDeviceLostCallback(device, ToMockDeviceLostCallback, this);
+// Test the return wire for error scopes in flight when the device is destroyed.
+TEST_F(WireErrorCallbackTests, PopErrorScopeDeviceInFlightDestroy) {
+    EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1);
+    wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
+    FlushClient();
 
-        // Setting the error callback should stay on the client side and do nothing
-        FlushClient();
+    EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _)).WillOnce(Return(true));
+    wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
+    FlushClient();
 
-        // Calling the callback on the server side will result in the callback being called on the
-        // client side
-        api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined,
-                                                    "Some error message");
+    // Incomplete callback called in Device destructor. This is resolved after the end of this
+    // test.
+    EXPECT_CALL(*mockDevicePopErrorScopeCallback,
+                Call(WGPUErrorType_Unknown, ValidStringMessage(), this))
+        .Times(1);
+}
 
-        EXPECT_CALL(*mockDeviceLostCallback,
-                    Call(WGPUDeviceLostReason_Undefined, StrEq("Some error message"), this))
-            .Times(1);
+// Test that registering a callback then wire disconnect calls the callback with
+// DeviceLost.
+TEST_F(WireErrorCallbackTests, PopErrorScopeThenDisconnect) {
+    EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1);
+    wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
 
-        FlushServer();
-    }
+    EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _)).WillOnce(Return(true));
+    wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
+    FlushClient();
+
+    EXPECT_CALL(*mockDevicePopErrorScopeCallback,
+                Call(WGPUErrorType_DeviceLost, ValidStringMessage(), this))
+        .Times(1);
+    GetWireClient()->Disconnect();
+}
+
+// Test that registering a callback after wire disconnect calls the callback with
+// DeviceLost.
+TEST_F(WireErrorCallbackTests, PopErrorScopeAfterDisconnect) {
+    EXPECT_CALL(api, DevicePushErrorScope(apiDevice, WGPUErrorFilter_Validation)).Times(1);
+    wgpuDevicePushErrorScope(device, WGPUErrorFilter_Validation);
+    FlushClient();
+
+    GetWireClient()->Disconnect();
+
+    EXPECT_CALL(*mockDevicePopErrorScopeCallback,
+                Call(WGPUErrorType_DeviceLost, ValidStringMessage(), this))
+        .Times(1);
+    wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
+}
+
+// Empty stack (We are emulating the errors that would be callback-ed from native).
+TEST_F(WireErrorCallbackTests, PopErrorScopeEmptyStack) {
+    WGPUErrorCallback callback;
+    void* userdata;
+    EXPECT_CALL(api, OnDevicePopErrorScope(apiDevice, _, _))
+        .WillOnce(DoAll(SaveArg<1>(&callback), SaveArg<2>(&userdata), Return(true)));
+    wgpuDevicePopErrorScope(device, ToMockDevicePopErrorScopeCallback, this);
+    FlushClient();
+
+    EXPECT_CALL(*mockDevicePopErrorScopeCallback,
+                Call(WGPUErrorType_Validation, StrEq("No error scopes to pop"), this))
+        .Times(1);
+    callback(WGPUErrorType_Validation, "No error scopes to pop", userdata);
+    FlushServer();
+}
+
+// Test the return wire for device lost callback
+TEST_F(WireErrorCallbackTests, DeviceLostCallback) {
+    wgpuDeviceSetDeviceLostCallback(device, ToMockDeviceLostCallback, this);
+
+    // Setting the error callback should stay on the client side and do nothing
+    FlushClient();
+
+    // Calling the callback on the server side will result in the callback being called on the
+    // client side
+    api.CallDeviceSetDeviceLostCallbackCallback(apiDevice, WGPUDeviceLostReason_Undefined,
+                                                "Some error message");
+
+    EXPECT_CALL(*mockDeviceLostCallback,
+                Call(WGPUDeviceLostReason_Undefined, StrEq("Some error message"), this))
+        .Times(1);
+
+    FlushServer();
+}
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireExtensionTests.cpp b/src/dawn/tests/unittests/wire/WireExtensionTests.cpp
index 63a78c4..6060ce5 100644
--- a/src/dawn/tests/unittests/wire/WireExtensionTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireExtensionTests.cpp
@@ -16,79 +16,78 @@
 
 namespace dawn::wire {
 
-    using testing::_;
-    using testing::Invoke;
-    using testing::NotNull;
-    using testing::Return;
-    using testing::Unused;
+using testing::_;
+using testing::Invoke;
+using testing::NotNull;
+using testing::Return;
+using testing::Unused;
 
-    class WireExtensionTests : public WireTest {
-      public:
-        WireExtensionTests() {
-        }
-        ~WireExtensionTests() override = default;
-    };
+class WireExtensionTests : public WireTest {
+  public:
+    WireExtensionTests() {}
+    ~WireExtensionTests() override = default;
+};
 
-    // Serialize/Deserializes a chained struct correctly.
-    TEST_F(WireExtensionTests, ChainedStruct) {
-        WGPUShaderModuleDescriptor shaderModuleDesc = {};
-        WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
-        WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
-        FlushClient();
+// Serialize/Deserializes a chained struct correctly.
+TEST_F(WireExtensionTests, ChainedStruct) {
+    WGPUShaderModuleDescriptor shaderModuleDesc = {};
+    WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
+    WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
+    FlushClient();
 
-        WGPUPrimitiveDepthClampingState clientExt = {};
-        clientExt.chain.sType = WGPUSType_PrimitiveDepthClampingState;
-        clientExt.chain.next = nullptr;
-        clientExt.clampDepth = true;
+    WGPUPrimitiveDepthClampingState clientExt = {};
+    clientExt.chain.sType = WGPUSType_PrimitiveDepthClampingState;
+    clientExt.chain.next = nullptr;
+    clientExt.clampDepth = true;
 
-        WGPURenderPipelineDescriptor renderPipelineDesc = {};
-        renderPipelineDesc.vertex.module = shaderModule;
-        renderPipelineDesc.vertex.entryPoint = "main";
-        renderPipelineDesc.primitive.nextInChain = &clientExt.chain;
+    WGPURenderPipelineDescriptor renderPipelineDesc = {};
+    renderPipelineDesc.vertex.module = shaderModule;
+    renderPipelineDesc.vertex.entryPoint = "main";
+    renderPipelineDesc.primitive.nextInChain = &clientExt.chain;
 
-        wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
-        EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
-            .WillOnce(Invoke(
-                [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
-                    const auto* ext = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(
-                        serverDesc->primitive.nextInChain);
-                    EXPECT_EQ(ext->chain.sType, clientExt.chain.sType);
-                    EXPECT_EQ(ext->clampDepth, true);
-                    EXPECT_EQ(ext->chain.next, nullptr);
+    wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
+    EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
+        .WillOnce(Invoke(
+            [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
+                const auto* ext = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(
+                    serverDesc->primitive.nextInChain);
+                EXPECT_EQ(ext->chain.sType, clientExt.chain.sType);
+                EXPECT_EQ(ext->clampDepth, true);
+                EXPECT_EQ(ext->chain.next, nullptr);
 
-                    return api.GetNewRenderPipeline();
-                }));
-        FlushClient();
-    }
+                return api.GetNewRenderPipeline();
+            }));
+    FlushClient();
+}
 
-    // Serialize/Deserializes multiple chained structs correctly.
-    TEST_F(WireExtensionTests, MutlipleChainedStructs) {
-        WGPUShaderModuleDescriptor shaderModuleDesc = {};
-        WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
-        WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
-        FlushClient();
+// Serialize/Deserializes multiple chained structs correctly.
+TEST_F(WireExtensionTests, MutlipleChainedStructs) {
+    WGPUShaderModuleDescriptor shaderModuleDesc = {};
+    WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
+    WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
+    FlushClient();
 
-        WGPUPrimitiveDepthClampingState clientExt2 = {};
-        clientExt2.chain.sType = WGPUSType_PrimitiveDepthClampingState;
-        clientExt2.chain.next = nullptr;
-        clientExt2.clampDepth = false;
+    WGPUPrimitiveDepthClampingState clientExt2 = {};
+    clientExt2.chain.sType = WGPUSType_PrimitiveDepthClampingState;
+    clientExt2.chain.next = nullptr;
+    clientExt2.clampDepth = false;
 
-        WGPUPrimitiveDepthClampingState clientExt1 = {};
-        clientExt1.chain.sType = WGPUSType_PrimitiveDepthClampingState;
-        clientExt1.chain.next = &clientExt2.chain;
-        clientExt1.clampDepth = true;
+    WGPUPrimitiveDepthClampingState clientExt1 = {};
+    clientExt1.chain.sType = WGPUSType_PrimitiveDepthClampingState;
+    clientExt1.chain.next = &clientExt2.chain;
+    clientExt1.clampDepth = true;
 
-        WGPURenderPipelineDescriptor renderPipelineDesc = {};
-        renderPipelineDesc.vertex.module = shaderModule;
-        renderPipelineDesc.vertex.entryPoint = "main";
-        renderPipelineDesc.primitive.nextInChain = &clientExt1.chain;
+    WGPURenderPipelineDescriptor renderPipelineDesc = {};
+    renderPipelineDesc.vertex.module = shaderModule;
+    renderPipelineDesc.vertex.entryPoint = "main";
+    renderPipelineDesc.primitive.nextInChain = &clientExt1.chain;
 
-        wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
-        EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
-            .WillOnce(Invoke([&](Unused, const WGPURenderPipelineDescriptor* serverDesc)
-                                 -> WGPURenderPipeline {
+    wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
+    EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
+        .WillOnce(Invoke(
+            [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
                 const auto* ext1 = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(
                     serverDesc->primitive.nextInChain);
                 EXPECT_EQ(ext1->chain.sType, clientExt1.chain.sType);
@@ -102,17 +101,17 @@
 
                 return api.GetNewRenderPipeline();
             }));
-        FlushClient();
+    FlushClient();
 
-        // Swap the order of the chained structs.
-        renderPipelineDesc.primitive.nextInChain = &clientExt2.chain;
-        clientExt2.chain.next = &clientExt1.chain;
-        clientExt1.chain.next = nullptr;
+    // Swap the order of the chained structs.
+    renderPipelineDesc.primitive.nextInChain = &clientExt2.chain;
+    clientExt2.chain.next = &clientExt1.chain;
+    clientExt1.chain.next = nullptr;
 
-        wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
-        EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
-            .WillOnce(Invoke([&](Unused, const WGPURenderPipelineDescriptor* serverDesc)
-                                 -> WGPURenderPipeline {
+    wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
+    EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
+        .WillOnce(Invoke(
+            [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
                 const auto* ext2 = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(
                     serverDesc->primitive.nextInChain);
                 EXPECT_EQ(ext2->chain.sType, clientExt2.chain.sType);
@@ -126,123 +125,123 @@
 
                 return api.GetNewRenderPipeline();
             }));
-        FlushClient();
-    }
+    FlushClient();
+}
 
-    // Test that a chained struct with Invalid sType passes through as Invalid.
-    TEST_F(WireExtensionTests, InvalidSType) {
-        WGPUShaderModuleDescriptor shaderModuleDesc = {};
-        WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
-        WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
-        FlushClient();
+// Test that a chained struct with Invalid sType passes through as Invalid.
+TEST_F(WireExtensionTests, InvalidSType) {
+    WGPUShaderModuleDescriptor shaderModuleDesc = {};
+    WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
+    WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
+    FlushClient();
 
-        WGPUPrimitiveDepthClampingState clientExt = {};
-        clientExt.chain.sType = WGPUSType_Invalid;
-        clientExt.chain.next = nullptr;
+    WGPUPrimitiveDepthClampingState clientExt = {};
+    clientExt.chain.sType = WGPUSType_Invalid;
+    clientExt.chain.next = nullptr;
 
-        WGPURenderPipelineDescriptor renderPipelineDesc = {};
-        renderPipelineDesc.vertex.module = shaderModule;
-        renderPipelineDesc.vertex.entryPoint = "main";
-        renderPipelineDesc.primitive.nextInChain = &clientExt.chain;
+    WGPURenderPipelineDescriptor renderPipelineDesc = {};
+    renderPipelineDesc.vertex.module = shaderModule;
+    renderPipelineDesc.vertex.entryPoint = "main";
+    renderPipelineDesc.primitive.nextInChain = &clientExt.chain;
 
-        wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
-        EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
-            .WillOnce(Invoke(
-                [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
-                    EXPECT_EQ(serverDesc->primitive.nextInChain->sType, WGPUSType_Invalid);
-                    EXPECT_EQ(serverDesc->primitive.nextInChain->next, nullptr);
-                    return api.GetNewRenderPipeline();
-                }));
-        FlushClient();
-    }
+    wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
+    EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
+        .WillOnce(Invoke(
+            [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
+                EXPECT_EQ(serverDesc->primitive.nextInChain->sType, WGPUSType_Invalid);
+                EXPECT_EQ(serverDesc->primitive.nextInChain->next, nullptr);
+                return api.GetNewRenderPipeline();
+            }));
+    FlushClient();
+}
 
-    // Test that a chained struct with unknown sType passes through as Invalid.
-    TEST_F(WireExtensionTests, UnknownSType) {
-        WGPUShaderModuleDescriptor shaderModuleDesc = {};
-        WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
-        WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
-        FlushClient();
+// Test that a chained struct with unknown sType passes through as Invalid.
+TEST_F(WireExtensionTests, UnknownSType) {
+    WGPUShaderModuleDescriptor shaderModuleDesc = {};
+    WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
+    WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
+    FlushClient();
 
-        WGPUPrimitiveDepthClampingState clientExt = {};
-        clientExt.chain.sType = static_cast<WGPUSType>(-1);
-        clientExt.chain.next = nullptr;
+    WGPUPrimitiveDepthClampingState clientExt = {};
+    clientExt.chain.sType = static_cast<WGPUSType>(-1);
+    clientExt.chain.next = nullptr;
 
-        WGPURenderPipelineDescriptor renderPipelineDesc = {};
-        renderPipelineDesc.vertex.module = shaderModule;
-        renderPipelineDesc.vertex.entryPoint = "main";
-        renderPipelineDesc.primitive.nextInChain = &clientExt.chain;
+    WGPURenderPipelineDescriptor renderPipelineDesc = {};
+    renderPipelineDesc.vertex.module = shaderModule;
+    renderPipelineDesc.vertex.entryPoint = "main";
+    renderPipelineDesc.primitive.nextInChain = &clientExt.chain;
 
-        wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
-        EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
-            .WillOnce(Invoke(
-                [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
-                    EXPECT_EQ(serverDesc->primitive.nextInChain->sType, WGPUSType_Invalid);
-                    EXPECT_EQ(serverDesc->primitive.nextInChain->next, nullptr);
-                    return api.GetNewRenderPipeline();
-                }));
-        FlushClient();
-    }
+    wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
+    EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
+        .WillOnce(Invoke(
+            [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
+                EXPECT_EQ(serverDesc->primitive.nextInChain->sType, WGPUSType_Invalid);
+                EXPECT_EQ(serverDesc->primitive.nextInChain->next, nullptr);
+                return api.GetNewRenderPipeline();
+            }));
+    FlushClient();
+}
 
-    // Test that if both an invalid and valid stype are passed on the chain, only the invalid
-    // sType passes through as Invalid.
-    TEST_F(WireExtensionTests, ValidAndInvalidSTypeInChain) {
-        WGPUShaderModuleDescriptor shaderModuleDesc = {};
-        WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
-        WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
-        FlushClient();
+// Test that if both an invalid and valid stype are passed on the chain, only the invalid
+// sType passes through as Invalid.
+TEST_F(WireExtensionTests, ValidAndInvalidSTypeInChain) {
+    WGPUShaderModuleDescriptor shaderModuleDesc = {};
+    WGPUShaderModule apiShaderModule = api.GetNewShaderModule();
+    WGPUShaderModule shaderModule = wgpuDeviceCreateShaderModule(device, &shaderModuleDesc);
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiShaderModule));
+    FlushClient();
 
-        WGPUPrimitiveDepthClampingState clientExt2 = {};
-        clientExt2.chain.sType = WGPUSType_Invalid;
-        clientExt2.chain.next = nullptr;
+    WGPUPrimitiveDepthClampingState clientExt2 = {};
+    clientExt2.chain.sType = WGPUSType_Invalid;
+    clientExt2.chain.next = nullptr;
 
-        WGPUPrimitiveDepthClampingState clientExt1 = {};
-        clientExt1.chain.sType = WGPUSType_PrimitiveDepthClampingState;
-        clientExt1.chain.next = &clientExt2.chain;
-        clientExt1.clampDepth = true;
+    WGPUPrimitiveDepthClampingState clientExt1 = {};
+    clientExt1.chain.sType = WGPUSType_PrimitiveDepthClampingState;
+    clientExt1.chain.next = &clientExt2.chain;
+    clientExt1.clampDepth = true;
 
-        WGPURenderPipelineDescriptor renderPipelineDesc = {};
-        renderPipelineDesc.vertex.module = shaderModule;
-        renderPipelineDesc.vertex.entryPoint = "main";
-        renderPipelineDesc.primitive.nextInChain = &clientExt1.chain;
+    WGPURenderPipelineDescriptor renderPipelineDesc = {};
+    renderPipelineDesc.vertex.module = shaderModule;
+    renderPipelineDesc.vertex.entryPoint = "main";
+    renderPipelineDesc.primitive.nextInChain = &clientExt1.chain;
 
-        wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
-        EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
-            .WillOnce(Invoke(
-                [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
-                    const auto* ext = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(
-                        serverDesc->primitive.nextInChain);
-                    EXPECT_EQ(ext->chain.sType, clientExt1.chain.sType);
-                    EXPECT_EQ(ext->clampDepth, true);
+    wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
+    EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
+        .WillOnce(Invoke(
+            [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
+                const auto* ext = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(
+                    serverDesc->primitive.nextInChain);
+                EXPECT_EQ(ext->chain.sType, clientExt1.chain.sType);
+                EXPECT_EQ(ext->clampDepth, true);
 
-                    EXPECT_EQ(ext->chain.next->sType, WGPUSType_Invalid);
-                    EXPECT_EQ(ext->chain.next->next, nullptr);
-                    return api.GetNewRenderPipeline();
-                }));
-        FlushClient();
+                EXPECT_EQ(ext->chain.next->sType, WGPUSType_Invalid);
+                EXPECT_EQ(ext->chain.next->next, nullptr);
+                return api.GetNewRenderPipeline();
+            }));
+    FlushClient();
 
-        // Swap the order of the chained structs.
-        renderPipelineDesc.primitive.nextInChain = &clientExt2.chain;
-        clientExt2.chain.next = &clientExt1.chain;
-        clientExt1.chain.next = nullptr;
+    // Swap the order of the chained structs.
+    renderPipelineDesc.primitive.nextInChain = &clientExt2.chain;
+    clientExt2.chain.next = &clientExt1.chain;
+    clientExt1.chain.next = nullptr;
 
-        wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
-        EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
-            .WillOnce(Invoke(
-                [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
-                    EXPECT_EQ(serverDesc->primitive.nextInChain->sType, WGPUSType_Invalid);
+    wgpuDeviceCreateRenderPipeline(device, &renderPipelineDesc);
+    EXPECT_CALL(api, DeviceCreateRenderPipeline(apiDevice, NotNull()))
+        .WillOnce(Invoke(
+            [&](Unused, const WGPURenderPipelineDescriptor* serverDesc) -> WGPURenderPipeline {
+                EXPECT_EQ(serverDesc->primitive.nextInChain->sType, WGPUSType_Invalid);
 
-                    const auto* ext = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(
-                        serverDesc->primitive.nextInChain->next);
-                    EXPECT_EQ(ext->chain.sType, clientExt1.chain.sType);
-                    EXPECT_EQ(ext->clampDepth, true);
-                    EXPECT_EQ(ext->chain.next, nullptr);
+                const auto* ext = reinterpret_cast<const WGPUPrimitiveDepthClampingState*>(
+                    serverDesc->primitive.nextInChain->next);
+                EXPECT_EQ(ext->chain.sType, clientExt1.chain.sType);
+                EXPECT_EQ(ext->clampDepth, true);
+                EXPECT_EQ(ext->chain.next, nullptr);
 
-                    return api.GetNewRenderPipeline();
-                }));
-        FlushClient();
-    }
+                return api.GetNewRenderPipeline();
+            }));
+    FlushClient();
+}
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireInjectDeviceTests.cpp b/src/dawn/tests/unittests/wire/WireInjectDeviceTests.cpp
index e12bdd6..f2a50da 100644
--- a/src/dawn/tests/unittests/wire/WireInjectDeviceTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireInjectDeviceTests.cpp
@@ -19,271 +19,259 @@
 
 namespace dawn::wire {
 
-    using testing::_;
-    using testing::Exactly;
-    using testing::Mock;
-    using testing::Return;
+using testing::_;
+using testing::Exactly;
+using testing::Mock;
+using testing::Return;
 
-    class WireInjectDeviceTests : public WireTest {
-      public:
-        WireInjectDeviceTests() {
-        }
-        ~WireInjectDeviceTests() override = default;
-    };
+class WireInjectDeviceTests : public WireTest {
+  public:
+    WireInjectDeviceTests() {}
+    ~WireInjectDeviceTests() override = default;
+};
 
-    // Test that reserving and injecting a device makes calls on the client object forward to the
-    // server object correctly.
-    TEST_F(WireInjectDeviceTests, CallAfterReserveInject) {
+// Test that reserving and injecting a device makes calls on the client object forward to the
+// server object correctly.
+TEST_F(WireInjectDeviceTests, CallAfterReserveInject) {
+    ReservedDevice reservation = GetWireClient()->ReserveDevice();
+
+    WGPUDevice serverDevice = api.GetNewDevice();
+    EXPECT_CALL(api, DeviceReference(serverDevice));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _));
+    ASSERT_TRUE(
+        GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
+
+    WGPUBufferDescriptor bufferDesc = {};
+    wgpuDeviceCreateBuffer(reservation.device, &bufferDesc);
+    WGPUBuffer serverBuffer = api.GetNewBuffer();
+    EXPECT_CALL(api, DeviceCreateBuffer(serverDevice, _)).WillOnce(Return(serverBuffer));
+    FlushClient();
+
+    // Called on shutdown.
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr))
+        .Times(Exactly(1));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)).Times(Exactly(1));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr))
+        .Times(Exactly(1));
+}
+
+// Test that reserve correctly returns different IDs each time.
+TEST_F(WireInjectDeviceTests, ReserveDifferentIDs) {
+    ReservedDevice reservation1 = GetWireClient()->ReserveDevice();
+    ReservedDevice reservation2 = GetWireClient()->ReserveDevice();
+
+    ASSERT_NE(reservation1.id, reservation2.id);
+    ASSERT_NE(reservation1.device, reservation2.device);
+}
+
+// Test that injecting the same id without a destroy first fails.
+TEST_F(WireInjectDeviceTests, InjectExistingID) {
+    ReservedDevice reservation = GetWireClient()->ReserveDevice();
+
+    WGPUDevice serverDevice = api.GetNewDevice();
+    EXPECT_CALL(api, DeviceReference(serverDevice));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _));
+    ASSERT_TRUE(
+        GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
+
+    // ID already in use, call fails.
+    ASSERT_FALSE(
+        GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
+
+    // Called on shutdown.
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr))
+        .Times(Exactly(1));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)).Times(Exactly(1));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr))
+        .Times(Exactly(1));
+}
+
+// Test that the server only borrows the device and does a single reference-release
+TEST_F(WireInjectDeviceTests, InjectedDeviceLifetime) {
+    ReservedDevice reservation = GetWireClient()->ReserveDevice();
+
+    // Injecting the device adds a reference
+    WGPUDevice serverDevice = api.GetNewDevice();
+    EXPECT_CALL(api, DeviceReference(serverDevice));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _));
+    ASSERT_TRUE(
+        GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
+
+    // Releasing the device removes a single reference and clears its error callbacks.
+    wgpuDeviceRelease(reservation.device);
+    EXPECT_CALL(api, DeviceRelease(serverDevice));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr)).Times(1);
+    FlushClient();
+
+    // Deleting the server doesn't release a second reference.
+    DeleteServer();
+    Mock::VerifyAndClearExpectations(&api);
+}
+
+// Test that it is an error to get the primary queue of a device before it has been
+// injected on the server.
+TEST_F(WireInjectDeviceTests, GetQueueBeforeInject) {
+    ReservedDevice reservation = GetWireClient()->ReserveDevice();
+
+    wgpuDeviceGetQueue(reservation.device);
+    FlushClient(false);
+}
+
+// Test that it is valid to get the primary queue of a device after it has been
+// injected on the server.
+TEST_F(WireInjectDeviceTests, GetQueueAfterInject) {
+    ReservedDevice reservation = GetWireClient()->ReserveDevice();
+
+    WGPUDevice serverDevice = api.GetNewDevice();
+    EXPECT_CALL(api, DeviceReference(serverDevice));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _));
+    ASSERT_TRUE(
+        GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
+
+    wgpuDeviceGetQueue(reservation.device);
+
+    WGPUQueue apiQueue = api.GetNewQueue();
+    EXPECT_CALL(api, DeviceGetQueue(serverDevice)).WillOnce(Return(apiQueue));
+    FlushClient();
+
+    // Called on shutdown.
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr))
+        .Times(Exactly(1));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)).Times(Exactly(1));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr))
+        .Times(Exactly(1));
+}
+
+// Test that the list of live devices can be reflected using GetDevice.
+TEST_F(WireInjectDeviceTests, ReflectLiveDevices) {
+    // Reserve two devices.
+    ReservedDevice reservation1 = GetWireClient()->ReserveDevice();
+    ReservedDevice reservation2 = GetWireClient()->ReserveDevice();
+
+    // Inject both devices.
+
+    WGPUDevice serverDevice1 = api.GetNewDevice();
+    EXPECT_CALL(api, DeviceReference(serverDevice1));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, _, _));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, _, _));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, _, _));
+    ASSERT_TRUE(
+        GetWireServer()->InjectDevice(serverDevice1, reservation1.id, reservation1.generation));
+
+    WGPUDevice serverDevice2 = api.GetNewDevice();
+    EXPECT_CALL(api, DeviceReference(serverDevice2));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, _, _));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, _, _));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, _, _));
+    ASSERT_TRUE(
+        GetWireServer()->InjectDevice(serverDevice2, reservation2.id, reservation2.generation));
+
+    // Test that both devices can be reflected.
+    ASSERT_EQ(serverDevice1, GetWireServer()->GetDevice(reservation1.id, reservation1.generation));
+    ASSERT_EQ(serverDevice2, GetWireServer()->GetDevice(reservation2.id, reservation2.generation));
+
+    // Release the first device
+    wgpuDeviceRelease(reservation1.device);
+    EXPECT_CALL(api, DeviceRelease(serverDevice1));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, nullptr, nullptr)).Times(1);
+    FlushClient();
+
+    // The first device should no longer reflect, but the second should
+    ASSERT_EQ(nullptr, GetWireServer()->GetDevice(reservation1.id, reservation1.generation));
+    ASSERT_EQ(serverDevice2, GetWireServer()->GetDevice(reservation2.id, reservation2.generation));
+
+    // Called on shutdown.
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, nullptr, nullptr)).Times(1);
+}
+
+// This is a regression test where a second device reservation invalidated pointers into the
+// KnownObjects std::vector of devices. The fix was to store pointers to heap allocated
+// objects instead.
+TEST_F(WireInjectDeviceTests, TrackChildObjectsWithTwoReservedDevices) {
+    // Reserve one device, inject it, and get the primary queue.
+    ReservedDevice reservation1 = GetWireClient()->ReserveDevice();
+
+    WGPUDevice serverDevice1 = api.GetNewDevice();
+    EXPECT_CALL(api, DeviceReference(serverDevice1));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, _, _));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, _, _));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, _, _));
+    ASSERT_TRUE(
+        GetWireServer()->InjectDevice(serverDevice1, reservation1.id, reservation1.generation));
+
+    WGPUCommandEncoder commandEncoder =
+        wgpuDeviceCreateCommandEncoder(reservation1.device, nullptr);
+
+    WGPUCommandEncoder serverCommandEncoder = api.GetNewCommandEncoder();
+    EXPECT_CALL(api, DeviceCreateCommandEncoder(serverDevice1, _))
+        .WillOnce(Return(serverCommandEncoder));
+    FlushClient();
+
+    // Reserve a second device, and inject it.
+    ReservedDevice reservation2 = GetWireClient()->ReserveDevice();
+
+    WGPUDevice serverDevice2 = api.GetNewDevice();
+    EXPECT_CALL(api, DeviceReference(serverDevice2));
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, _, _));
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, _, _));
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, _, _));
+    ASSERT_TRUE(
+        GetWireServer()->InjectDevice(serverDevice2, reservation2.id, reservation2.generation));
+
+    // Release the encoder. This should work without error because it stores a stable
+    // pointer to its device's list of child objects. On destruction, it removes itself from the
+    // list.
+    wgpuCommandEncoderRelease(commandEncoder);
+    EXPECT_CALL(api, CommandEncoderRelease(serverCommandEncoder));
+    FlushClient();
+
+    // Called on shutdown.
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, nullptr, nullptr)).Times(1);
+    EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, nullptr, nullptr)).Times(1);
+}
+
+// Test that a device reservation can be reclaimed. This is necessary to
+// avoid leaking ObjectIDs for reservations that are never injected.
+TEST_F(WireInjectDeviceTests, ReclaimDeviceReservation) {
+    // Test that doing a reservation and full release is an error.
+    {
         ReservedDevice reservation = GetWireClient()->ReserveDevice();
-
-        WGPUDevice serverDevice = api.GetNewDevice();
-        EXPECT_CALL(api, DeviceReference(serverDevice));
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _));
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _));
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _));
-        ASSERT_TRUE(
-            GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
-
-        WGPUBufferDescriptor bufferDesc = {};
-        wgpuDeviceCreateBuffer(reservation.device, &bufferDesc);
-        WGPUBuffer serverBuffer = api.GetNewBuffer();
-        EXPECT_CALL(api, DeviceCreateBuffer(serverDevice, _)).WillOnce(Return(serverBuffer));
-        FlushClient();
-
-        // Called on shutdown.
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr))
-            .Times(Exactly(1));
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr))
-            .Times(Exactly(1));
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr))
-            .Times(Exactly(1));
-    }
-
-    // Test that reserve correctly returns different IDs each time.
-    TEST_F(WireInjectDeviceTests, ReserveDifferentIDs) {
-        ReservedDevice reservation1 = GetWireClient()->ReserveDevice();
-        ReservedDevice reservation2 = GetWireClient()->ReserveDevice();
-
-        ASSERT_NE(reservation1.id, reservation2.id);
-        ASSERT_NE(reservation1.device, reservation2.device);
-    }
-
-    // Test that injecting the same id without a destroy first fails.
-    TEST_F(WireInjectDeviceTests, InjectExistingID) {
-        ReservedDevice reservation = GetWireClient()->ReserveDevice();
-
-        WGPUDevice serverDevice = api.GetNewDevice();
-        EXPECT_CALL(api, DeviceReference(serverDevice));
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _));
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _));
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _));
-        ASSERT_TRUE(
-            GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
-
-        // ID already in use, call fails.
-        ASSERT_FALSE(
-            GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
-
-        // Called on shutdown.
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr))
-            .Times(Exactly(1));
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr))
-            .Times(Exactly(1));
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr))
-            .Times(Exactly(1));
-    }
-
-    // Test that the server only borrows the device and does a single reference-release
-    TEST_F(WireInjectDeviceTests, InjectedDeviceLifetime) {
-        ReservedDevice reservation = GetWireClient()->ReserveDevice();
-
-        // Injecting the device adds a reference
-        WGPUDevice serverDevice = api.GetNewDevice();
-        EXPECT_CALL(api, DeviceReference(serverDevice));
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _));
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _));
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _));
-        ASSERT_TRUE(
-            GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
-
-        // Releasing the device removes a single reference and clears its error callbacks.
         wgpuDeviceRelease(reservation.device);
-        EXPECT_CALL(api, DeviceRelease(serverDevice));
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr))
-            .Times(1);
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr)).Times(1);
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr)).Times(1);
-        FlushClient();
-
-        // Deleting the server doesn't release a second reference.
-        DeleteServer();
-        Mock::VerifyAndClearExpectations(&api);
-    }
-
-    // Test that it is an error to get the primary queue of a device before it has been
-    // injected on the server.
-    TEST_F(WireInjectDeviceTests, GetQueueBeforeInject) {
-        ReservedDevice reservation = GetWireClient()->ReserveDevice();
-
-        wgpuDeviceGetQueue(reservation.device);
         FlushClient(false);
     }
 
-    // Test that it is valid to get the primary queue of a device after it has been
-    // injected on the server.
-    TEST_F(WireInjectDeviceTests, GetQueueAfterInject) {
-        ReservedDevice reservation = GetWireClient()->ReserveDevice();
-
-        WGPUDevice serverDevice = api.GetNewDevice();
-        EXPECT_CALL(api, DeviceReference(serverDevice));
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, _, _));
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, _, _));
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, _, _));
-        ASSERT_TRUE(
-            GetWireServer()->InjectDevice(serverDevice, reservation.id, reservation.generation));
-
-        wgpuDeviceGetQueue(reservation.device);
-
-        WGPUQueue apiQueue = api.GetNewQueue();
-        EXPECT_CALL(api, DeviceGetQueue(serverDevice)).WillOnce(Return(apiQueue));
-        FlushClient();
-
-        // Called on shutdown.
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice, nullptr, nullptr))
-            .Times(Exactly(1));
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice, nullptr, nullptr))
-            .Times(Exactly(1));
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice, nullptr, nullptr))
-            .Times(Exactly(1));
-    }
-
-    // Test that the list of live devices can be reflected using GetDevice.
-    TEST_F(WireInjectDeviceTests, ReflectLiveDevices) {
-        // Reserve two devices.
+    // Test that doing a reservation and then reclaiming it recycles the ID.
+    {
         ReservedDevice reservation1 = GetWireClient()->ReserveDevice();
+        GetWireClient()->ReclaimDeviceReservation(reservation1);
+
         ReservedDevice reservation2 = GetWireClient()->ReserveDevice();
 
-        // Inject both devices.
+        // The ID is the same, but the generation is still different.
+        ASSERT_EQ(reservation1.id, reservation2.id);
+        ASSERT_NE(reservation1.generation, reservation2.generation);
 
-        WGPUDevice serverDevice1 = api.GetNewDevice();
-        EXPECT_CALL(api, DeviceReference(serverDevice1));
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, _, _));
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, _, _));
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, _, _));
-        ASSERT_TRUE(
-            GetWireServer()->InjectDevice(serverDevice1, reservation1.id, reservation1.generation));
-
-        WGPUDevice serverDevice2 = api.GetNewDevice();
-        EXPECT_CALL(api, DeviceReference(serverDevice2));
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, _, _));
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, _, _));
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, _, _));
-        ASSERT_TRUE(
-            GetWireServer()->InjectDevice(serverDevice2, reservation2.id, reservation2.generation));
-
-        // Test that both devices can be reflected.
-        ASSERT_EQ(serverDevice1,
-                  GetWireServer()->GetDevice(reservation1.id, reservation1.generation));
-        ASSERT_EQ(serverDevice2,
-                  GetWireServer()->GetDevice(reservation2.id, reservation2.generation));
-
-        // Release the first device
-        wgpuDeviceRelease(reservation1.device);
-        EXPECT_CALL(api, DeviceRelease(serverDevice1));
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, nullptr, nullptr))
-            .Times(1);
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, nullptr, nullptr)).Times(1);
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, nullptr, nullptr)).Times(1);
+        // No errors should occur.
         FlushClient();
-
-        // The first device should no longer reflect, but the second should
-        ASSERT_EQ(nullptr, GetWireServer()->GetDevice(reservation1.id, reservation1.generation));
-        ASSERT_EQ(serverDevice2,
-                  GetWireServer()->GetDevice(reservation2.id, reservation2.generation));
-
-        // Called on shutdown.
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, nullptr, nullptr))
-            .Times(1);
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, nullptr, nullptr)).Times(1);
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, nullptr, nullptr)).Times(1);
     }
-
-    // This is a regression test where a second device reservation invalidated pointers into the
-    // KnownObjects std::vector of devices. The fix was to store pointers to heap allocated
-    // objects instead.
-    TEST_F(WireInjectDeviceTests, TrackChildObjectsWithTwoReservedDevices) {
-        // Reserve one device, inject it, and get the primary queue.
-        ReservedDevice reservation1 = GetWireClient()->ReserveDevice();
-
-        WGPUDevice serverDevice1 = api.GetNewDevice();
-        EXPECT_CALL(api, DeviceReference(serverDevice1));
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, _, _));
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, _, _));
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, _, _));
-        ASSERT_TRUE(
-            GetWireServer()->InjectDevice(serverDevice1, reservation1.id, reservation1.generation));
-
-        WGPUCommandEncoder commandEncoder =
-            wgpuDeviceCreateCommandEncoder(reservation1.device, nullptr);
-
-        WGPUCommandEncoder serverCommandEncoder = api.GetNewCommandEncoder();
-        EXPECT_CALL(api, DeviceCreateCommandEncoder(serverDevice1, _))
-            .WillOnce(Return(serverCommandEncoder));
-        FlushClient();
-
-        // Reserve a second device, and inject it.
-        ReservedDevice reservation2 = GetWireClient()->ReserveDevice();
-
-        WGPUDevice serverDevice2 = api.GetNewDevice();
-        EXPECT_CALL(api, DeviceReference(serverDevice2));
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, _, _));
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, _, _));
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, _, _));
-        ASSERT_TRUE(
-            GetWireServer()->InjectDevice(serverDevice2, reservation2.id, reservation2.generation));
-
-        // Release the encoder. This should work without error because it stores a stable
-        // pointer to its device's list of child objects. On destruction, it removes itself from the
-        // list.
-        wgpuCommandEncoderRelease(commandEncoder);
-        EXPECT_CALL(api, CommandEncoderRelease(serverCommandEncoder));
-        FlushClient();
-
-        // Called on shutdown.
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice1, nullptr, nullptr))
-            .Times(1);
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice1, nullptr, nullptr)).Times(1);
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice1, nullptr, nullptr)).Times(1);
-        EXPECT_CALL(api, OnDeviceSetUncapturedErrorCallback(serverDevice2, nullptr, nullptr))
-            .Times(1);
-        EXPECT_CALL(api, OnDeviceSetLoggingCallback(serverDevice2, nullptr, nullptr)).Times(1);
-        EXPECT_CALL(api, OnDeviceSetDeviceLostCallback(serverDevice2, nullptr, nullptr)).Times(1);
-    }
-
-    // Test that a device reservation can be reclaimed. This is necessary to
-    // avoid leaking ObjectIDs for reservations that are never injected.
-    TEST_F(WireInjectDeviceTests, ReclaimDeviceReservation) {
-        // Test that doing a reservation and full release is an error.
-        {
-            ReservedDevice reservation = GetWireClient()->ReserveDevice();
-            wgpuDeviceRelease(reservation.device);
-            FlushClient(false);
-        }
-
-        // Test that doing a reservation and then reclaiming it recycles the ID.
-        {
-            ReservedDevice reservation1 = GetWireClient()->ReserveDevice();
-            GetWireClient()->ReclaimDeviceReservation(reservation1);
-
-            ReservedDevice reservation2 = GetWireClient()->ReserveDevice();
-
-            // The ID is the same, but the generation is still different.
-            ASSERT_EQ(reservation1.id, reservation2.id);
-            ASSERT_NE(reservation1.generation, reservation2.generation);
-
-            // No errors should occur.
-            FlushClient();
-        }
-    }
+}
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireInjectInstanceTests.cpp b/src/dawn/tests/unittests/wire/WireInjectInstanceTests.cpp
index 64c8b1b..f9fdc52 100644
--- a/src/dawn/tests/unittests/wire/WireInjectInstanceTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireInjectInstanceTests.cpp
@@ -17,106 +17,107 @@
 #include "dawn/wire/WireClient.h"
 #include "dawn/wire/WireServer.h"
 
-namespace dawn::wire { namespace {
+namespace dawn::wire {
+namespace {
 
-    using testing::Mock;
-    using testing::NotNull;
-    using testing::Return;
+using testing::Mock;
+using testing::NotNull;
+using testing::Return;
 
-    class WireInjectInstanceTests : public WireTest {
-      public:
-        WireInjectInstanceTests() {
-        }
-        ~WireInjectInstanceTests() override = default;
-    };
+class WireInjectInstanceTests : public WireTest {
+  public:
+    WireInjectInstanceTests() {}
+    ~WireInjectInstanceTests() override = default;
+};
 
-    // Test that reserving and injecting an instance makes calls on the client object forward to the
-    // server object correctly.
-    TEST_F(WireInjectInstanceTests, CallAfterReserveInject) {
+// Test that reserving and injecting an instance makes calls on the client object forward to the
+// server object correctly.
+TEST_F(WireInjectInstanceTests, CallAfterReserveInject) {
+    ReservedInstance reservation = GetWireClient()->ReserveInstance();
+
+    WGPUInstance serverInstance = api.GetNewInstance();
+    EXPECT_CALL(api, InstanceReference(serverInstance));
+    ASSERT_TRUE(
+        GetWireServer()->InjectInstance(serverInstance, reservation.id, reservation.generation));
+
+    WGPUSurfaceDescriptor surfaceDesc = {};
+    wgpuInstanceCreateSurface(reservation.instance, &surfaceDesc);
+    WGPUSurface serverSurface = api.GetNewSurface();
+    EXPECT_CALL(api, InstanceCreateSurface(serverInstance, NotNull()))
+        .WillOnce(Return(serverSurface));
+    FlushClient();
+}
+
+// Test that reserve correctly returns different IDs each time.
+TEST_F(WireInjectInstanceTests, ReserveDifferentIDs) {
+    ReservedInstance reservation1 = GetWireClient()->ReserveInstance();
+    ReservedInstance reservation2 = GetWireClient()->ReserveInstance();
+
+    ASSERT_NE(reservation1.id, reservation2.id);
+    ASSERT_NE(reservation1.instance, reservation2.instance);
+}
+
+// Test that injecting the same id fails.
+TEST_F(WireInjectInstanceTests, InjectExistingID) {
+    ReservedInstance reservation = GetWireClient()->ReserveInstance();
+
+    WGPUInstance serverInstance = api.GetNewInstance();
+    EXPECT_CALL(api, InstanceReference(serverInstance));
+    ASSERT_TRUE(
+        GetWireServer()->InjectInstance(serverInstance, reservation.id, reservation.generation));
+
+    // ID already in use, call fails.
+    ASSERT_FALSE(
+        GetWireServer()->InjectInstance(serverInstance, reservation.id, reservation.generation));
+}
+
+// Test that the server only borrows the instance and does a single reference-release
+TEST_F(WireInjectInstanceTests, InjectedInstanceLifetime) {
+    ReservedInstance reservation = GetWireClient()->ReserveInstance();
+
+    // Injecting the instance adds a reference
+    WGPUInstance serverInstance = api.GetNewInstance();
+    EXPECT_CALL(api, InstanceReference(serverInstance));
+    ASSERT_TRUE(
+        GetWireServer()->InjectInstance(serverInstance, reservation.id, reservation.generation));
+
+    // Releasing the instance removes a single reference.
+    wgpuInstanceRelease(reservation.instance);
+    EXPECT_CALL(api, InstanceRelease(serverInstance));
+    FlushClient();
+
+    // Deleting the server doesn't release a second reference.
+    DeleteServer();
+    Mock::VerifyAndClearExpectations(&api);
+}
+
+// Test that a device reservation can be reclaimed. This is necessary to
+// avoid leaking ObjectIDs for reservations that are never injected.
+TEST_F(WireInjectInstanceTests, ReclaimInstanceReservation) {
+    // Test that doing a reservation and full release is an error.
+    {
         ReservedInstance reservation = GetWireClient()->ReserveInstance();
-
-        WGPUInstance serverInstance = api.GetNewInstance();
-        EXPECT_CALL(api, InstanceReference(serverInstance));
-        ASSERT_TRUE(GetWireServer()->InjectInstance(serverInstance, reservation.id,
-                                                    reservation.generation));
-
-        WGPUSurfaceDescriptor surfaceDesc = {};
-        wgpuInstanceCreateSurface(reservation.instance, &surfaceDesc);
-        WGPUSurface serverSurface = api.GetNewSurface();
-        EXPECT_CALL(api, InstanceCreateSurface(serverInstance, NotNull()))
-            .WillOnce(Return(serverSurface));
-        FlushClient();
+        wgpuInstanceRelease(reservation.instance);
+        FlushClient(false);
     }
 
-    // Test that reserve correctly returns different IDs each time.
-    TEST_F(WireInjectInstanceTests, ReserveDifferentIDs) {
+    // Test that doing a reservation and then reclaiming it recycles the ID.
+    {
         ReservedInstance reservation1 = GetWireClient()->ReserveInstance();
+        GetWireClient()->ReclaimInstanceReservation(reservation1);
+
         ReservedInstance reservation2 = GetWireClient()->ReserveInstance();
 
-        ASSERT_NE(reservation1.id, reservation2.id);
-        ASSERT_NE(reservation1.instance, reservation2.instance);
-    }
+        // The ID is the same, but the generation is still different.
+        ASSERT_EQ(reservation1.id, reservation2.id);
+        ASSERT_NE(reservation1.generation, reservation2.generation);
 
-    // Test that injecting the same id fails.
-    TEST_F(WireInjectInstanceTests, InjectExistingID) {
-        ReservedInstance reservation = GetWireClient()->ReserveInstance();
-
-        WGPUInstance serverInstance = api.GetNewInstance();
-        EXPECT_CALL(api, InstanceReference(serverInstance));
-        ASSERT_TRUE(GetWireServer()->InjectInstance(serverInstance, reservation.id,
-                                                    reservation.generation));
-
-        // ID already in use, call fails.
-        ASSERT_FALSE(GetWireServer()->InjectInstance(serverInstance, reservation.id,
-                                                     reservation.generation));
-    }
-
-    // Test that the server only borrows the instance and does a single reference-release
-    TEST_F(WireInjectInstanceTests, InjectedInstanceLifetime) {
-        ReservedInstance reservation = GetWireClient()->ReserveInstance();
-
-        // Injecting the instance adds a reference
-        WGPUInstance serverInstance = api.GetNewInstance();
-        EXPECT_CALL(api, InstanceReference(serverInstance));
-        ASSERT_TRUE(GetWireServer()->InjectInstance(serverInstance, reservation.id,
-                                                    reservation.generation));
-
-        // Releasing the instance removes a single reference.
-        wgpuInstanceRelease(reservation.instance);
-        EXPECT_CALL(api, InstanceRelease(serverInstance));
+        // No errors should occur.
         FlushClient();
-
-        // Deleting the server doesn't release a second reference.
-        DeleteServer();
-        Mock::VerifyAndClearExpectations(&api);
     }
+}
 
-    // Test that a device reservation can be reclaimed. This is necessary to
-    // avoid leaking ObjectIDs for reservations that are never injected.
-    TEST_F(WireInjectInstanceTests, ReclaimInstanceReservation) {
-        // Test that doing a reservation and full release is an error.
-        {
-            ReservedInstance reservation = GetWireClient()->ReserveInstance();
-            wgpuInstanceRelease(reservation.instance);
-            FlushClient(false);
-        }
-
-        // Test that doing a reservation and then reclaiming it recycles the ID.
-        {
-            ReservedInstance reservation1 = GetWireClient()->ReserveInstance();
-            GetWireClient()->ReclaimInstanceReservation(reservation1);
-
-            ReservedInstance reservation2 = GetWireClient()->ReserveInstance();
-
-            // The ID is the same, but the generation is still different.
-            ASSERT_EQ(reservation1.id, reservation2.id);
-            ASSERT_NE(reservation1.generation, reservation2.generation);
-
-            // No errors should occur.
-            FlushClient();
-        }
-    }
-
-    // TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented.
-    // NOLINTNEXTLINE(readability/namespace)
-}}  // namespace dawn::wire::
+// TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented.
+// NOLINTNEXTLINE(readability/namespace)
+}  // namespace
+}  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireInjectSwapChainTests.cpp b/src/dawn/tests/unittests/wire/WireInjectSwapChainTests.cpp
index 6ba058b..f06d53e 100644
--- a/src/dawn/tests/unittests/wire/WireInjectSwapChainTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireInjectSwapChainTests.cpp
@@ -19,101 +19,100 @@
 
 namespace dawn::wire {
 
-    using testing::Mock;
+using testing::Mock;
 
-    class WireInjectSwapChainTests : public WireTest {
-      public:
-        WireInjectSwapChainTests() {
-        }
-        ~WireInjectSwapChainTests() override = default;
-    };
+class WireInjectSwapChainTests : public WireTest {
+  public:
+    WireInjectSwapChainTests() {}
+    ~WireInjectSwapChainTests() override = default;
+};
 
-    // Test that reserving and injecting a swapchain makes calls on the client object forward to the
-    // server object correctly.
-    TEST_F(WireInjectSwapChainTests, CallAfterReserveInject) {
+// Test that reserving and injecting a swapchain makes calls on the client object forward to the
+// server object correctly.
+TEST_F(WireInjectSwapChainTests, CallAfterReserveInject) {
+    ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device);
+
+    WGPUSwapChain apiSwapchain = api.GetNewSwapChain();
+    EXPECT_CALL(api, SwapChainReference(apiSwapchain));
+    ASSERT_TRUE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id,
+                                                 reservation.generation, reservation.deviceId,
+                                                 reservation.deviceGeneration));
+
+    wgpuSwapChainPresent(reservation.swapchain);
+    EXPECT_CALL(api, SwapChainPresent(apiSwapchain));
+    FlushClient();
+}
+
+// Test that reserve correctly returns different IDs each time.
+TEST_F(WireInjectSwapChainTests, ReserveDifferentIDs) {
+    ReservedSwapChain reservation1 = GetWireClient()->ReserveSwapChain(device);
+    ReservedSwapChain reservation2 = GetWireClient()->ReserveSwapChain(device);
+
+    ASSERT_NE(reservation1.id, reservation2.id);
+    ASSERT_NE(reservation1.swapchain, reservation2.swapchain);
+}
+
+// Test that injecting the same id without a destroy first fails.
+TEST_F(WireInjectSwapChainTests, InjectExistingID) {
+    ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device);
+
+    WGPUSwapChain apiSwapchain = api.GetNewSwapChain();
+    EXPECT_CALL(api, SwapChainReference(apiSwapchain));
+    ASSERT_TRUE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id,
+                                                 reservation.generation, reservation.deviceId,
+                                                 reservation.deviceGeneration));
+
+    // ID already in use, call fails.
+    ASSERT_FALSE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id,
+                                                  reservation.generation, reservation.deviceId,
+                                                  reservation.deviceGeneration));
+}
+
+// Test that the server only borrows the swapchain and does a single reference-release
+TEST_F(WireInjectSwapChainTests, InjectedSwapChainLifetime) {
+    ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device);
+
+    // Injecting the swapchain adds a reference
+    WGPUSwapChain apiSwapchain = api.GetNewSwapChain();
+    EXPECT_CALL(api, SwapChainReference(apiSwapchain));
+    ASSERT_TRUE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id,
+                                                 reservation.generation, reservation.deviceId,
+                                                 reservation.deviceGeneration));
+
+    // Releasing the swapchain removes a single reference.
+    wgpuSwapChainRelease(reservation.swapchain);
+    EXPECT_CALL(api, SwapChainRelease(apiSwapchain));
+    FlushClient();
+
+    // Deleting the server doesn't release a second reference.
+    DeleteServer();
+    Mock::VerifyAndClearExpectations(&api);
+}
+
+// Test that a swapchain reservation can be reclaimed. This is necessary to
+// avoid leaking ObjectIDs for reservations that are never injected.
+TEST_F(WireInjectSwapChainTests, ReclaimSwapChainReservation) {
+    // Test that doing a reservation and full release is an error.
+    {
         ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device);
-
-        WGPUSwapChain apiSwapchain = api.GetNewSwapChain();
-        EXPECT_CALL(api, SwapChainReference(apiSwapchain));
-        ASSERT_TRUE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id,
-                                                     reservation.generation, reservation.deviceId,
-                                                     reservation.deviceGeneration));
-
-        wgpuSwapChainPresent(reservation.swapchain);
-        EXPECT_CALL(api, SwapChainPresent(apiSwapchain));
-        FlushClient();
+        wgpuSwapChainRelease(reservation.swapchain);
+        FlushClient(false);
     }
 
-    // Test that reserve correctly returns different IDs each time.
-    TEST_F(WireInjectSwapChainTests, ReserveDifferentIDs) {
+    // Test that doing a reservation and then reclaiming it recycles the ID.
+    {
         ReservedSwapChain reservation1 = GetWireClient()->ReserveSwapChain(device);
+        GetWireClient()->ReclaimSwapChainReservation(reservation1);
+
         ReservedSwapChain reservation2 = GetWireClient()->ReserveSwapChain(device);
 
-        ASSERT_NE(reservation1.id, reservation2.id);
-        ASSERT_NE(reservation1.swapchain, reservation2.swapchain);
-    }
+        // The ID is the same, but the generation is still different.
+        ASSERT_EQ(reservation1.id, reservation2.id);
+        ASSERT_NE(reservation1.generation, reservation2.generation);
 
-    // Test that injecting the same id without a destroy first fails.
-    TEST_F(WireInjectSwapChainTests, InjectExistingID) {
-        ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device);
-
-        WGPUSwapChain apiSwapchain = api.GetNewSwapChain();
-        EXPECT_CALL(api, SwapChainReference(apiSwapchain));
-        ASSERT_TRUE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id,
-                                                     reservation.generation, reservation.deviceId,
-                                                     reservation.deviceGeneration));
-
-        // ID already in use, call fails.
-        ASSERT_FALSE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id,
-                                                      reservation.generation, reservation.deviceId,
-                                                      reservation.deviceGeneration));
-    }
-
-    // Test that the server only borrows the swapchain and does a single reference-release
-    TEST_F(WireInjectSwapChainTests, InjectedSwapChainLifetime) {
-        ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device);
-
-        // Injecting the swapchain adds a reference
-        WGPUSwapChain apiSwapchain = api.GetNewSwapChain();
-        EXPECT_CALL(api, SwapChainReference(apiSwapchain));
-        ASSERT_TRUE(GetWireServer()->InjectSwapChain(apiSwapchain, reservation.id,
-                                                     reservation.generation, reservation.deviceId,
-                                                     reservation.deviceGeneration));
-
-        // Releasing the swapchain removes a single reference.
-        wgpuSwapChainRelease(reservation.swapchain);
-        EXPECT_CALL(api, SwapChainRelease(apiSwapchain));
+        // No errors should occur.
         FlushClient();
-
-        // Deleting the server doesn't release a second reference.
-        DeleteServer();
-        Mock::VerifyAndClearExpectations(&api);
     }
-
-    // Test that a swapchain reservation can be reclaimed. This is necessary to
-    // avoid leaking ObjectIDs for reservations that are never injected.
-    TEST_F(WireInjectSwapChainTests, ReclaimSwapChainReservation) {
-        // Test that doing a reservation and full release is an error.
-        {
-            ReservedSwapChain reservation = GetWireClient()->ReserveSwapChain(device);
-            wgpuSwapChainRelease(reservation.swapchain);
-            FlushClient(false);
-        }
-
-        // Test that doing a reservation and then reclaiming it recycles the ID.
-        {
-            ReservedSwapChain reservation1 = GetWireClient()->ReserveSwapChain(device);
-            GetWireClient()->ReclaimSwapChainReservation(reservation1);
-
-            ReservedSwapChain reservation2 = GetWireClient()->ReserveSwapChain(device);
-
-            // The ID is the same, but the generation is still different.
-            ASSERT_EQ(reservation1.id, reservation2.id);
-            ASSERT_NE(reservation1.generation, reservation2.generation);
-
-            // No errors should occur.
-            FlushClient();
-        }
-    }
+}
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireInjectTextureTests.cpp b/src/dawn/tests/unittests/wire/WireInjectTextureTests.cpp
index a15fd6c..baabaa5 100644
--- a/src/dawn/tests/unittests/wire/WireInjectTextureTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireInjectTextureTests.cpp
@@ -19,104 +19,99 @@
 
 namespace dawn::wire {
 
-    using testing::Mock;
-    using testing::Return;
+using testing::Mock;
+using testing::Return;
 
-    class WireInjectTextureTests : public WireTest {
-      public:
-        WireInjectTextureTests() {
-        }
-        ~WireInjectTextureTests() override = default;
-    };
+class WireInjectTextureTests : public WireTest {
+  public:
+    WireInjectTextureTests() {}
+    ~WireInjectTextureTests() override = default;
+};
 
-    // Test that reserving and injecting a texture makes calls on the client object forward to the
-    // server object correctly.
-    TEST_F(WireInjectTextureTests, CallAfterReserveInject) {
+// Test that reserving and injecting a texture makes calls on the client object forward to the
+// server object correctly.
+TEST_F(WireInjectTextureTests, CallAfterReserveInject) {
+    ReservedTexture reservation = GetWireClient()->ReserveTexture(device);
+
+    WGPUTexture apiTexture = api.GetNewTexture();
+    EXPECT_CALL(api, TextureReference(apiTexture));
+    ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id, reservation.generation,
+                                               reservation.deviceId, reservation.deviceGeneration));
+
+    wgpuTextureCreateView(reservation.texture, nullptr);
+    WGPUTextureView apiPlaceholderView = api.GetNewTextureView();
+    EXPECT_CALL(api, TextureCreateView(apiTexture, nullptr)).WillOnce(Return(apiPlaceholderView));
+    FlushClient();
+}
+
+// Test that reserve correctly returns different IDs each time.
+TEST_F(WireInjectTextureTests, ReserveDifferentIDs) {
+    ReservedTexture reservation1 = GetWireClient()->ReserveTexture(device);
+    ReservedTexture reservation2 = GetWireClient()->ReserveTexture(device);
+
+    ASSERT_NE(reservation1.id, reservation2.id);
+    ASSERT_NE(reservation1.texture, reservation2.texture);
+}
+
+// Test that injecting the same id without a destroy first fails.
+TEST_F(WireInjectTextureTests, InjectExistingID) {
+    ReservedTexture reservation = GetWireClient()->ReserveTexture(device);
+
+    WGPUTexture apiTexture = api.GetNewTexture();
+    EXPECT_CALL(api, TextureReference(apiTexture));
+    ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id, reservation.generation,
+                                               reservation.deviceId, reservation.deviceGeneration));
+
+    // ID already in use, call fails.
+    ASSERT_FALSE(GetWireServer()->InjectTexture(apiTexture, reservation.id, reservation.generation,
+                                                reservation.deviceId,
+                                                reservation.deviceGeneration));
+}
+
+// Test that the server only borrows the texture and does a single reference-release
+TEST_F(WireInjectTextureTests, InjectedTextureLifetime) {
+    ReservedTexture reservation = GetWireClient()->ReserveTexture(device);
+
+    // Injecting the texture adds a reference
+    WGPUTexture apiTexture = api.GetNewTexture();
+    EXPECT_CALL(api, TextureReference(apiTexture));
+    ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id, reservation.generation,
+                                               reservation.deviceId, reservation.deviceGeneration));
+
+    // Releasing the texture removes a single reference.
+    wgpuTextureRelease(reservation.texture);
+    EXPECT_CALL(api, TextureRelease(apiTexture));
+    FlushClient();
+
+    // Deleting the server doesn't release a second reference.
+    DeleteServer();
+    Mock::VerifyAndClearExpectations(&api);
+}
+
+// Test that a texture reservation can be reclaimed. This is necessary to
+// avoid leaking ObjectIDs for reservations that are never injected.
+TEST_F(WireInjectTextureTests, ReclaimTextureReservation) {
+    // Test that doing a reservation and full release is an error.
+    {
         ReservedTexture reservation = GetWireClient()->ReserveTexture(device);
-
-        WGPUTexture apiTexture = api.GetNewTexture();
-        EXPECT_CALL(api, TextureReference(apiTexture));
-        ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id,
-                                                   reservation.generation, reservation.deviceId,
-                                                   reservation.deviceGeneration));
-
-        wgpuTextureCreateView(reservation.texture, nullptr);
-        WGPUTextureView apiPlaceholderView = api.GetNewTextureView();
-        EXPECT_CALL(api, TextureCreateView(apiTexture, nullptr))
-            .WillOnce(Return(apiPlaceholderView));
-        FlushClient();
+        wgpuTextureRelease(reservation.texture);
+        FlushClient(false);
     }
 
-    // Test that reserve correctly returns different IDs each time.
-    TEST_F(WireInjectTextureTests, ReserveDifferentIDs) {
+    // Test that doing a reservation and then reclaiming it recycles the ID.
+    {
         ReservedTexture reservation1 = GetWireClient()->ReserveTexture(device);
+        GetWireClient()->ReclaimTextureReservation(reservation1);
+
         ReservedTexture reservation2 = GetWireClient()->ReserveTexture(device);
 
-        ASSERT_NE(reservation1.id, reservation2.id);
-        ASSERT_NE(reservation1.texture, reservation2.texture);
-    }
+        // The ID is the same, but the generation is still different.
+        ASSERT_EQ(reservation1.id, reservation2.id);
+        ASSERT_NE(reservation1.generation, reservation2.generation);
 
-    // Test that injecting the same id without a destroy first fails.
-    TEST_F(WireInjectTextureTests, InjectExistingID) {
-        ReservedTexture reservation = GetWireClient()->ReserveTexture(device);
-
-        WGPUTexture apiTexture = api.GetNewTexture();
-        EXPECT_CALL(api, TextureReference(apiTexture));
-        ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id,
-                                                   reservation.generation, reservation.deviceId,
-                                                   reservation.deviceGeneration));
-
-        // ID already in use, call fails.
-        ASSERT_FALSE(GetWireServer()->InjectTexture(apiTexture, reservation.id,
-                                                    reservation.generation, reservation.deviceId,
-                                                    reservation.deviceGeneration));
-    }
-
-    // Test that the server only borrows the texture and does a single reference-release
-    TEST_F(WireInjectTextureTests, InjectedTextureLifetime) {
-        ReservedTexture reservation = GetWireClient()->ReserveTexture(device);
-
-        // Injecting the texture adds a reference
-        WGPUTexture apiTexture = api.GetNewTexture();
-        EXPECT_CALL(api, TextureReference(apiTexture));
-        ASSERT_TRUE(GetWireServer()->InjectTexture(apiTexture, reservation.id,
-                                                   reservation.generation, reservation.deviceId,
-                                                   reservation.deviceGeneration));
-
-        // Releasing the texture removes a single reference.
-        wgpuTextureRelease(reservation.texture);
-        EXPECT_CALL(api, TextureRelease(apiTexture));
+        // No errors should occur.
         FlushClient();
-
-        // Deleting the server doesn't release a second reference.
-        DeleteServer();
-        Mock::VerifyAndClearExpectations(&api);
     }
-
-    // Test that a texture reservation can be reclaimed. This is necessary to
-    // avoid leaking ObjectIDs for reservations that are never injected.
-    TEST_F(WireInjectTextureTests, ReclaimTextureReservation) {
-        // Test that doing a reservation and full release is an error.
-        {
-            ReservedTexture reservation = GetWireClient()->ReserveTexture(device);
-            wgpuTextureRelease(reservation.texture);
-            FlushClient(false);
-        }
-
-        // Test that doing a reservation and then reclaiming it recycles the ID.
-        {
-            ReservedTexture reservation1 = GetWireClient()->ReserveTexture(device);
-            GetWireClient()->ReclaimTextureReservation(reservation1);
-
-            ReservedTexture reservation2 = GetWireClient()->ReserveTexture(device);
-
-            // The ID is the same, but the generation is still different.
-            ASSERT_EQ(reservation1.id, reservation2.id);
-            ASSERT_NE(reservation1.generation, reservation2.generation);
-
-            // No errors should occur.
-            FlushClient();
-        }
-    }
+}
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireInstanceTests.cpp b/src/dawn/tests/unittests/wire/WireInstanceTests.cpp
index 9ef1e29..16dea28 100644
--- a/src/dawn/tests/unittests/wire/WireInstanceTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireInstanceTests.cpp
@@ -23,273 +23,274 @@
 
 #include "webgpu/webgpu_cpp.h"
 
-namespace dawn::wire { namespace {
+namespace dawn::wire {
+namespace {
 
-    using testing::Invoke;
-    using testing::InvokeWithoutArgs;
-    using testing::MockCallback;
-    using testing::NotNull;
-    using testing::Return;
-    using testing::SetArgPointee;
-    using testing::StrEq;
-    using testing::WithArg;
+using testing::Invoke;
+using testing::InvokeWithoutArgs;
+using testing::MockCallback;
+using testing::NotNull;
+using testing::Return;
+using testing::SetArgPointee;
+using testing::StrEq;
+using testing::WithArg;
 
-    class WireInstanceBasicTest : public WireTest {};
-    class WireInstanceTests : public WireTest {
-      protected:
-        void SetUp() override {
-            WireTest::SetUp();
+class WireInstanceBasicTest : public WireTest {};
+class WireInstanceTests : public WireTest {
+  protected:
+    void SetUp() override {
+        WireTest::SetUp();
 
-            auto reservation = GetWireClient()->ReserveInstance();
-            instance = wgpu::Instance::Acquire(reservation.instance);
-
-            apiInstance = api.GetNewInstance();
-            EXPECT_CALL(api, InstanceReference(apiInstance));
-            EXPECT_TRUE(GetWireServer()->InjectInstance(apiInstance, reservation.id,
-                                                        reservation.generation));
-        }
-
-        void TearDown() override {
-            instance = nullptr;
-            WireTest::TearDown();
-        }
-
-        wgpu::Instance instance;
-        WGPUInstance apiInstance;
-    };
-
-    // Test that an Instance can be reserved and injected into the wire.
-    TEST_F(WireInstanceBasicTest, ReserveAndInject) {
         auto reservation = GetWireClient()->ReserveInstance();
-        wgpu::Instance instance = wgpu::Instance::Acquire(reservation.instance);
+        instance = wgpu::Instance::Acquire(reservation.instance);
 
-        WGPUInstance apiInstance = api.GetNewInstance();
+        apiInstance = api.GetNewInstance();
         EXPECT_CALL(api, InstanceReference(apiInstance));
         EXPECT_TRUE(
             GetWireServer()->InjectInstance(apiInstance, reservation.id, reservation.generation));
+    }
 
+    void TearDown() override {
         instance = nullptr;
+        WireTest::TearDown();
+    }
 
-        EXPECT_CALL(api, InstanceRelease(apiInstance));
+    wgpu::Instance instance;
+    WGPUInstance apiInstance;
+};
+
+// Test that an Instance can be reserved and injected into the wire.
+TEST_F(WireInstanceBasicTest, ReserveAndInject) {
+    auto reservation = GetWireClient()->ReserveInstance();
+    wgpu::Instance instance = wgpu::Instance::Acquire(reservation.instance);
+
+    WGPUInstance apiInstance = api.GetNewInstance();
+    EXPECT_CALL(api, InstanceReference(apiInstance));
+    EXPECT_TRUE(
+        GetWireServer()->InjectInstance(apiInstance, reservation.id, reservation.generation));
+
+    instance = nullptr;
+
+    EXPECT_CALL(api, InstanceRelease(apiInstance));
+    FlushClient();
+}
+
+// Test that RequestAdapterOptions are passed from the client to the server.
+TEST_F(WireInstanceTests, RequestAdapterPassesOptions) {
+    MockCallback<WGPURequestAdapterCallback> cb;
+    auto* userdata = cb.MakeUserdata(this);
+
+    for (wgpu::PowerPreference powerPreference :
+         {wgpu::PowerPreference::LowPower, wgpu::PowerPreference::HighPerformance}) {
+        wgpu::RequestAdapterOptions options = {};
+        options.powerPreference = powerPreference;
+
+        instance.RequestAdapter(&options, cb.Callback(), userdata);
+
+        EXPECT_CALL(api, OnInstanceRequestAdapter(apiInstance, NotNull(), NotNull(), NotNull()))
+            .WillOnce(WithArg<1>(Invoke([&](const WGPURequestAdapterOptions* apiOptions) {
+                EXPECT_EQ(apiOptions->powerPreference,
+                          static_cast<WGPUPowerPreference>(options.powerPreference));
+                EXPECT_EQ(apiOptions->forceFallbackAdapter, options.forceFallbackAdapter);
+            })));
         FlushClient();
     }
 
-    // Test that RequestAdapterOptions are passed from the client to the server.
-    TEST_F(WireInstanceTests, RequestAdapterPassesOptions) {
-        MockCallback<WGPURequestAdapterCallback> cb;
-        auto* userdata = cb.MakeUserdata(this);
+    // Delete the instance now, or it'll call the mock callback after it's deleted.
+    instance = nullptr;
+}
 
-        for (wgpu::PowerPreference powerPreference :
-             {wgpu::PowerPreference::LowPower, wgpu::PowerPreference::HighPerformance}) {
-            wgpu::RequestAdapterOptions options = {};
-            options.powerPreference = powerPreference;
+// Test that RequestAdapter forwards the adapter information to the client.
+TEST_F(WireInstanceTests, RequestAdapterSuccess) {
+    wgpu::RequestAdapterOptions options = {};
+    MockCallback<WGPURequestAdapterCallback> cb;
+    auto* userdata = cb.MakeUserdata(this);
+    instance.RequestAdapter(&options, cb.Callback(), userdata);
 
-            instance.RequestAdapter(&options, cb.Callback(), userdata);
+    wgpu::AdapterProperties fakeProperties = {};
+    fakeProperties.vendorID = 0x134;
+    fakeProperties.deviceID = 0x918;
+    fakeProperties.name = "fake adapter";
+    fakeProperties.driverDescription = "hello world";
+    fakeProperties.backendType = wgpu::BackendType::D3D12;
+    fakeProperties.adapterType = wgpu::AdapterType::IntegratedGPU;
 
-            EXPECT_CALL(api, OnInstanceRequestAdapter(apiInstance, NotNull(), NotNull(), NotNull()))
-                .WillOnce(WithArg<1>(Invoke([&](const WGPURequestAdapterOptions* apiOptions) {
-                    EXPECT_EQ(apiOptions->powerPreference,
-                              static_cast<WGPUPowerPreference>(options.powerPreference));
-                    EXPECT_EQ(apiOptions->forceFallbackAdapter, options.forceFallbackAdapter);
+    wgpu::SupportedLimits fakeLimits = {};
+    fakeLimits.limits.maxTextureDimension1D = 433;
+    fakeLimits.limits.maxVertexAttributes = 1243;
+
+    std::initializer_list<wgpu::FeatureName> fakeFeatures = {
+        wgpu::FeatureName::Depth32FloatStencil8,
+        wgpu::FeatureName::TextureCompressionBC,
+    };
+
+    // Expect the server to receive the message. Then, mock a fake reply.
+    WGPUAdapter apiAdapter = api.GetNewAdapter();
+    EXPECT_CALL(api, OnInstanceRequestAdapter(apiInstance, NotNull(), NotNull(), NotNull()))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            EXPECT_CALL(api, AdapterGetProperties(apiAdapter, NotNull()))
+                .WillOnce(
+                    SetArgPointee<1>(*reinterpret_cast<WGPUAdapterProperties*>(&fakeProperties)));
+
+            EXPECT_CALL(api, AdapterGetLimits(apiAdapter, NotNull()))
+                .WillOnce(WithArg<1>(Invoke([&](WGPUSupportedLimits* limits) {
+                    *reinterpret_cast<wgpu::SupportedLimits*>(limits) = fakeLimits;
+                    return true;
                 })));
-            FlushClient();
-        }
 
-        // Delete the instance now, or it'll call the mock callback after it's deleted.
-        instance = nullptr;
-    }
+            EXPECT_CALL(api, AdapterEnumerateFeatures(apiAdapter, nullptr))
+                .WillOnce(Return(fakeFeatures.size()));
 
-    // Test that RequestAdapter forwards the adapter information to the client.
-    TEST_F(WireInstanceTests, RequestAdapterSuccess) {
-        wgpu::RequestAdapterOptions options = {};
-        MockCallback<WGPURequestAdapterCallback> cb;
-        auto* userdata = cb.MakeUserdata(this);
-        instance.RequestAdapter(&options, cb.Callback(), userdata);
+            EXPECT_CALL(api, AdapterEnumerateFeatures(apiAdapter, NotNull()))
+                .WillOnce(WithArg<1>(Invoke([&](WGPUFeatureName* features) {
+                    for (wgpu::FeatureName feature : fakeFeatures) {
+                        *(features++) = static_cast<WGPUFeatureName>(feature);
+                    }
+                    return fakeFeatures.size();
+                })));
+            api.CallInstanceRequestAdapterCallback(apiInstance, WGPURequestAdapterStatus_Success,
+                                                   apiAdapter, nullptr);
+        }));
+    FlushClient();
 
-        wgpu::AdapterProperties fakeProperties = {};
-        fakeProperties.vendorID = 0x134;
-        fakeProperties.deviceID = 0x918;
-        fakeProperties.name = "fake adapter";
-        fakeProperties.driverDescription = "hello world";
-        fakeProperties.backendType = wgpu::BackendType::D3D12;
-        fakeProperties.adapterType = wgpu::AdapterType::IntegratedGPU;
+    // Expect the callback in the client and all the adapter information to match.
+    EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, NotNull(), nullptr, this))
+        .WillOnce(WithArg<1>(Invoke([&](WGPUAdapter cAdapter) {
+            wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
 
-        wgpu::SupportedLimits fakeLimits = {};
-        fakeLimits.limits.maxTextureDimension1D = 433;
-        fakeLimits.limits.maxVertexAttributes = 1243;
+            wgpu::AdapterProperties properties;
+            adapter.GetProperties(&properties);
+            EXPECT_EQ(properties.vendorID, fakeProperties.vendorID);
+            EXPECT_EQ(properties.deviceID, fakeProperties.deviceID);
+            EXPECT_STREQ(properties.name, fakeProperties.name);
+            EXPECT_STREQ(properties.driverDescription, fakeProperties.driverDescription);
+            EXPECT_EQ(properties.backendType, fakeProperties.backendType);
+            EXPECT_EQ(properties.adapterType, fakeProperties.adapterType);
 
-        std::initializer_list<wgpu::FeatureName> fakeFeatures = {
-            wgpu::FeatureName::Depth32FloatStencil8,
-            wgpu::FeatureName::TextureCompressionBC,
-        };
+            wgpu::SupportedLimits limits;
+            EXPECT_TRUE(adapter.GetLimits(&limits));
+            EXPECT_EQ(limits.limits.maxTextureDimension1D, fakeLimits.limits.maxTextureDimension1D);
+            EXPECT_EQ(limits.limits.maxVertexAttributes, fakeLimits.limits.maxVertexAttributes);
 
-        // Expect the server to receive the message. Then, mock a fake reply.
-        WGPUAdapter apiAdapter = api.GetNewAdapter();
-        EXPECT_CALL(api, OnInstanceRequestAdapter(apiInstance, NotNull(), NotNull(), NotNull()))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                EXPECT_CALL(api, AdapterGetProperties(apiAdapter, NotNull()))
-                    .WillOnce(SetArgPointee<1>(
-                        *reinterpret_cast<WGPUAdapterProperties*>(&fakeProperties)));
+            std::vector<wgpu::FeatureName> features;
+            features.resize(adapter.EnumerateFeatures(nullptr));
+            ASSERT_EQ(features.size(), fakeFeatures.size());
+            EXPECT_EQ(adapter.EnumerateFeatures(&features[0]), features.size());
 
-                EXPECT_CALL(api, AdapterGetLimits(apiAdapter, NotNull()))
-                    .WillOnce(WithArg<1>(Invoke([&](WGPUSupportedLimits* limits) {
-                        *reinterpret_cast<wgpu::SupportedLimits*>(limits) = fakeLimits;
-                        return true;
-                    })));
+            std::unordered_set<wgpu::FeatureName> featureSet(fakeFeatures);
+            for (wgpu::FeatureName feature : features) {
+                EXPECT_EQ(featureSet.erase(feature), 1u);
+            }
+        })));
+    FlushServer();
+}
 
-                EXPECT_CALL(api, AdapterEnumerateFeatures(apiAdapter, nullptr))
-                    .WillOnce(Return(fakeFeatures.size()));
+// Test that features returned by the implementation that aren't supported
+// in the wire are not exposed.
+TEST_F(WireInstanceTests, RequestAdapterWireLacksFeatureSupport) {
+    wgpu::RequestAdapterOptions options = {};
+    MockCallback<WGPURequestAdapterCallback> cb;
+    auto* userdata = cb.MakeUserdata(this);
+    instance.RequestAdapter(&options, cb.Callback(), userdata);
 
-                EXPECT_CALL(api, AdapterEnumerateFeatures(apiAdapter, NotNull()))
-                    .WillOnce(WithArg<1>(Invoke([&](WGPUFeatureName* features) {
-                        for (wgpu::FeatureName feature : fakeFeatures) {
-                            *(features++) = static_cast<WGPUFeatureName>(feature);
-                        }
-                        return fakeFeatures.size();
-                    })));
-                api.CallInstanceRequestAdapterCallback(
-                    apiInstance, WGPURequestAdapterStatus_Success, apiAdapter, nullptr);
-            }));
-        FlushClient();
+    std::initializer_list<wgpu::FeatureName> fakeFeatures = {
+        wgpu::FeatureName::Depth24UnormStencil8,
+        // Some value that is not a valid feature
+        static_cast<wgpu::FeatureName>(-2),
+    };
 
-        // Expect the callback in the client and all the adapter information to match.
-        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, NotNull(), nullptr, this))
-            .WillOnce(WithArg<1>(Invoke([&](WGPUAdapter cAdapter) {
-                wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
+    // Expect the server to receive the message. Then, mock a fake reply.
+    WGPUAdapter apiAdapter = api.GetNewAdapter();
+    EXPECT_CALL(api, OnInstanceRequestAdapter(apiInstance, NotNull(), NotNull(), NotNull()))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            EXPECT_CALL(api, AdapterGetProperties(apiAdapter, NotNull()))
+                .WillOnce(WithArg<1>(Invoke([&](WGPUAdapterProperties* properties) {
+                    *properties = {};
+                    properties->name = "";
+                    properties->driverDescription = "";
+                })));
 
-                wgpu::AdapterProperties properties;
-                adapter.GetProperties(&properties);
-                EXPECT_EQ(properties.vendorID, fakeProperties.vendorID);
-                EXPECT_EQ(properties.deviceID, fakeProperties.deviceID);
-                EXPECT_STREQ(properties.name, fakeProperties.name);
-                EXPECT_STREQ(properties.driverDescription, fakeProperties.driverDescription);
-                EXPECT_EQ(properties.backendType, fakeProperties.backendType);
-                EXPECT_EQ(properties.adapterType, fakeProperties.adapterType);
+            EXPECT_CALL(api, AdapterGetLimits(apiAdapter, NotNull()))
+                .WillOnce(WithArg<1>(Invoke([&](WGPUSupportedLimits* limits) {
+                    *limits = {};
+                    return true;
+                })));
 
-                wgpu::SupportedLimits limits;
-                EXPECT_TRUE(adapter.GetLimits(&limits));
-                EXPECT_EQ(limits.limits.maxTextureDimension1D,
-                          fakeLimits.limits.maxTextureDimension1D);
-                EXPECT_EQ(limits.limits.maxVertexAttributes, fakeLimits.limits.maxVertexAttributes);
+            EXPECT_CALL(api, AdapterEnumerateFeatures(apiAdapter, nullptr))
+                .WillOnce(Return(fakeFeatures.size()));
 
-                std::vector<wgpu::FeatureName> features;
-                features.resize(adapter.EnumerateFeatures(nullptr));
-                ASSERT_EQ(features.size(), fakeFeatures.size());
-                EXPECT_EQ(adapter.EnumerateFeatures(&features[0]), features.size());
+            EXPECT_CALL(api, AdapterEnumerateFeatures(apiAdapter, NotNull()))
+                .WillOnce(WithArg<1>(Invoke([&](WGPUFeatureName* features) {
+                    for (wgpu::FeatureName feature : fakeFeatures) {
+                        *(features++) = static_cast<WGPUFeatureName>(feature);
+                    }
+                    return fakeFeatures.size();
+                })));
+            api.CallInstanceRequestAdapterCallback(apiInstance, WGPURequestAdapterStatus_Success,
+                                                   apiAdapter, nullptr);
+        }));
+    FlushClient();
 
-                std::unordered_set<wgpu::FeatureName> featureSet(fakeFeatures);
-                for (wgpu::FeatureName feature : features) {
-                    EXPECT_EQ(featureSet.erase(feature), 1u);
-                }
-            })));
-        FlushServer();
-    }
+    // Expect the callback in the client and all the adapter information to match.
+    EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, NotNull(), nullptr, this))
+        .WillOnce(WithArg<1>(Invoke([&](WGPUAdapter cAdapter) {
+            wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
 
-    // Test that features returned by the implementation that aren't supported
-    // in the wire are not exposed.
-    TEST_F(WireInstanceTests, RequestAdapterWireLacksFeatureSupport) {
-        wgpu::RequestAdapterOptions options = {};
-        MockCallback<WGPURequestAdapterCallback> cb;
-        auto* userdata = cb.MakeUserdata(this);
-        instance.RequestAdapter(&options, cb.Callback(), userdata);
+            wgpu::FeatureName feature;
+            ASSERT_EQ(adapter.EnumerateFeatures(nullptr), 1u);
+            adapter.EnumerateFeatures(&feature);
 
-        std::initializer_list<wgpu::FeatureName> fakeFeatures = {
-            wgpu::FeatureName::Depth24UnormStencil8,
-            // Some value that is not a valid feature
-            static_cast<wgpu::FeatureName>(-2),
-        };
+            EXPECT_EQ(feature, wgpu::FeatureName::Depth24UnormStencil8);
+        })));
+    FlushServer();
+}
 
-        // Expect the server to receive the message. Then, mock a fake reply.
-        WGPUAdapter apiAdapter = api.GetNewAdapter();
-        EXPECT_CALL(api, OnInstanceRequestAdapter(apiInstance, NotNull(), NotNull(), NotNull()))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                EXPECT_CALL(api, AdapterGetProperties(apiAdapter, NotNull()))
-                    .WillOnce(WithArg<1>(Invoke([&](WGPUAdapterProperties* properties) {
-                        *properties = {};
-                        properties->name = "";
-                        properties->driverDescription = "";
-                    })));
+// Test that RequestAdapter errors forward to the client.
+TEST_F(WireInstanceTests, RequestAdapterError) {
+    wgpu::RequestAdapterOptions options = {};
+    MockCallback<WGPURequestAdapterCallback> cb;
+    auto* userdata = cb.MakeUserdata(this);
+    instance.RequestAdapter(&options, cb.Callback(), userdata);
 
-                EXPECT_CALL(api, AdapterGetLimits(apiAdapter, NotNull()))
-                    .WillOnce(WithArg<1>(Invoke([&](WGPUSupportedLimits* limits) {
-                        *limits = {};
-                        return true;
-                    })));
+    // Expect the server to receive the message. Then, mock an error.
+    EXPECT_CALL(api, OnInstanceRequestAdapter(apiInstance, NotNull(), NotNull(), NotNull()))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallInstanceRequestAdapterCallback(apiInstance, WGPURequestAdapterStatus_Error,
+                                                   nullptr, "Some error");
+        }));
+    FlushClient();
 
-                EXPECT_CALL(api, AdapterEnumerateFeatures(apiAdapter, nullptr))
-                    .WillOnce(Return(fakeFeatures.size()));
+    // Expect the callback in the client.
+    EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Error, nullptr, StrEq("Some error"), this))
+        .Times(1);
+    FlushServer();
+}
 
-                EXPECT_CALL(api, AdapterEnumerateFeatures(apiAdapter, NotNull()))
-                    .WillOnce(WithArg<1>(Invoke([&](WGPUFeatureName* features) {
-                        for (wgpu::FeatureName feature : fakeFeatures) {
-                            *(features++) = static_cast<WGPUFeatureName>(feature);
-                        }
-                        return fakeFeatures.size();
-                    })));
-                api.CallInstanceRequestAdapterCallback(
-                    apiInstance, WGPURequestAdapterStatus_Success, apiAdapter, nullptr);
-            }));
-        FlushClient();
+// Test that RequestAdapter receives unknown status if the instance is deleted
+// before the callback happens.
+TEST_F(WireInstanceTests, RequestAdapterInstanceDestroyedBeforeCallback) {
+    wgpu::RequestAdapterOptions options = {};
+    MockCallback<WGPURequestAdapterCallback> cb;
+    auto* userdata = cb.MakeUserdata(this);
+    instance.RequestAdapter(&options, cb.Callback(), userdata);
 
-        // Expect the callback in the client and all the adapter information to match.
-        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Success, NotNull(), nullptr, this))
-            .WillOnce(WithArg<1>(Invoke([&](WGPUAdapter cAdapter) {
-                wgpu::Adapter adapter = wgpu::Adapter::Acquire(cAdapter);
+    EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Unknown, nullptr, NotNull(), this)).Times(1);
+    instance = nullptr;
+}
 
-                wgpu::FeatureName feature;
-                ASSERT_EQ(adapter.EnumerateFeatures(nullptr), 1u);
-                adapter.EnumerateFeatures(&feature);
+// Test that RequestAdapter receives unknown status if the wire is disconnected
+// before the callback happens.
+TEST_F(WireInstanceTests, RequestAdapterWireDisconnectBeforeCallback) {
+    wgpu::RequestAdapterOptions options = {};
+    MockCallback<WGPURequestAdapterCallback> cb;
+    auto* userdata = cb.MakeUserdata(this);
+    instance.RequestAdapter(&options, cb.Callback(), userdata);
 
-                EXPECT_EQ(feature, wgpu::FeatureName::Depth24UnormStencil8);
-            })));
-        FlushServer();
-    }
+    EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Unknown, nullptr, NotNull(), this)).Times(1);
+    GetWireClient()->Disconnect();
+}
 
-    // Test that RequestAdapter errors forward to the client.
-    TEST_F(WireInstanceTests, RequestAdapterError) {
-        wgpu::RequestAdapterOptions options = {};
-        MockCallback<WGPURequestAdapterCallback> cb;
-        auto* userdata = cb.MakeUserdata(this);
-        instance.RequestAdapter(&options, cb.Callback(), userdata);
-
-        // Expect the server to receive the message. Then, mock an error.
-        EXPECT_CALL(api, OnInstanceRequestAdapter(apiInstance, NotNull(), NotNull(), NotNull()))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallInstanceRequestAdapterCallback(apiInstance, WGPURequestAdapterStatus_Error,
-                                                       nullptr, "Some error");
-            }));
-        FlushClient();
-
-        // Expect the callback in the client.
-        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Error, nullptr, StrEq("Some error"), this))
-            .Times(1);
-        FlushServer();
-    }
-
-    // Test that RequestAdapter receives unknown status if the instance is deleted
-    // before the callback happens.
-    TEST_F(WireInstanceTests, RequestAdapterInstanceDestroyedBeforeCallback) {
-        wgpu::RequestAdapterOptions options = {};
-        MockCallback<WGPURequestAdapterCallback> cb;
-        auto* userdata = cb.MakeUserdata(this);
-        instance.RequestAdapter(&options, cb.Callback(), userdata);
-
-        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Unknown, nullptr, NotNull(), this)).Times(1);
-        instance = nullptr;
-    }
-
-    // Test that RequestAdapter receives unknown status if the wire is disconnected
-    // before the callback happens.
-    TEST_F(WireInstanceTests, RequestAdapterWireDisconnectBeforeCallback) {
-        wgpu::RequestAdapterOptions options = {};
-        MockCallback<WGPURequestAdapterCallback> cb;
-        auto* userdata = cb.MakeUserdata(this);
-        instance.RequestAdapter(&options, cb.Callback(), userdata);
-
-        EXPECT_CALL(cb, Call(WGPURequestAdapterStatus_Unknown, nullptr, NotNull(), this)).Times(1);
-        GetWireClient()->Disconnect();
-    }
-
-    // TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented.
-    // NOLINTNEXTLINE(readability/namespace)
-}}  // namespace dawn::wire::
+// TODO(https://crbug.com/dawn/1381) Remove when namespaces are not indented.
+// NOLINTNEXTLINE(readability/namespace)
+}  // namespace
+}  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireMemoryTransferServiceTests.cpp b/src/dawn/tests/unittests/wire/WireMemoryTransferServiceTests.cpp
index 484c867..b2ce580 100644
--- a/src/dawn/tests/unittests/wire/WireMemoryTransferServiceTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireMemoryTransferServiceTests.cpp
@@ -21,1061 +21,1046 @@
 
 namespace dawn::wire {
 
-    using testing::_;
-    using testing::Eq;
-    using testing::InvokeWithoutArgs;
-    using testing::Mock;
-    using testing::Pointee;
-    using testing::Return;
-    using testing::StrictMock;
-    using testing::WithArg;
+using testing::_;
+using testing::Eq;
+using testing::InvokeWithoutArgs;
+using testing::Mock;
+using testing::Pointee;
+using testing::Return;
+using testing::StrictMock;
+using testing::WithArg;
 
-    namespace {
+namespace {
 
-        // Mock class to add expectations on the wire calling callbacks
-        class MockBufferMapCallback {
-          public:
-            MOCK_METHOD(void, Call, (WGPUBufferMapAsyncStatus status, void* userdata));
-        };
+// Mock class to add expectations on the wire calling callbacks
+class MockBufferMapCallback {
+  public:
+    MOCK_METHOD(void, Call, (WGPUBufferMapAsyncStatus status, void* userdata));
+};
 
-        std::unique_ptr<StrictMock<MockBufferMapCallback>> mockBufferMapCallback;
-        void ToMockBufferMapCallback(WGPUBufferMapAsyncStatus status, void* userdata) {
-            mockBufferMapCallback->Call(status, userdata);
-        }
+std::unique_ptr<StrictMock<MockBufferMapCallback>> mockBufferMapCallback;
+void ToMockBufferMapCallback(WGPUBufferMapAsyncStatus status, void* userdata) {
+    mockBufferMapCallback->Call(status, userdata);
+}
 
-    }  // anonymous namespace
+}  // anonymous namespace
 
-    // WireMemoryTransferServiceTests test the MemoryTransferService with buffer mapping.
-    // They test the basic success and error cases for buffer mapping, and they test
-    // mocked failures of each fallible MemoryTransferService method that an embedder
-    // could implement.
-    // The test harness defines multiple helpers for expecting operations on Read/Write handles
-    // and for mocking failures. The helpers are designed such that for a given run of a test,
-    // a Serialization expection has a corresponding Deserialization expectation for which the
-    // serialized data must match.
-    // There are tests which check for Success for every mapping operation which mock an entire
-    // mapping operation from map to unmap, and add all MemoryTransferService expectations. Tests
-    // which check for errors perform the same mapping operations but insert mocked failures for
-    // various mapping or MemoryTransferService operations.
-    class WireMemoryTransferServiceTests : public WireTest {
-      public:
-        WireMemoryTransferServiceTests() {
-        }
-        ~WireMemoryTransferServiceTests() override = default;
+// WireMemoryTransferServiceTests test the MemoryTransferService with buffer mapping.
+// They test the basic success and error cases for buffer mapping, and they test
+// mocked failures of each fallible MemoryTransferService method that an embedder
+// could implement.
+// The test harness defines multiple helpers for expecting operations on Read/Write handles
+// and for mocking failures. The helpers are designed such that for a given run of a test,
+// a Serialization expection has a corresponding Deserialization expectation for which the
+// serialized data must match.
+// There are tests which check for Success for every mapping operation which mock an entire
+// mapping operation from map to unmap, and add all MemoryTransferService expectations. Tests
+// which check for errors perform the same mapping operations but insert mocked failures for
+// various mapping or MemoryTransferService operations.
+class WireMemoryTransferServiceTests : public WireTest {
+  public:
+    WireMemoryTransferServiceTests() {}
+    ~WireMemoryTransferServiceTests() override = default;
 
-        client::MemoryTransferService* GetClientMemoryTransferService() override {
-            return &clientMemoryTransferService;
-        }
-
-        server::MemoryTransferService* GetServerMemoryTransferService() override {
-            return &serverMemoryTransferService;
-        }
-
-        void SetUp() override {
-            WireTest::SetUp();
-
-            mockBufferMapCallback = std::make_unique<StrictMock<MockBufferMapCallback>>();
-
-            // TODO(enga): Make this thread-safe.
-            mBufferContent++;
-            mMappedBufferContent = 0;
-            mUpdatedBufferContent++;
-            mSerializeCreateInfo++;
-            mReadHandleSerializeDataInfo++;
-            mWriteHandleSerializeDataInfo++;
-        }
-
-        void TearDown() override {
-            WireTest::TearDown();
-
-            // Delete mock so that expectations are checked
-            mockBufferMapCallback = nullptr;
-        }
-
-        void FlushClient(bool success = true) {
-            WireTest::FlushClient(success);
-            Mock::VerifyAndClearExpectations(&serverMemoryTransferService);
-        }
-
-        void FlushServer(bool success = true) {
-            WireTest::FlushServer(success);
-
-            Mock::VerifyAndClearExpectations(&mockBufferMapCallback);
-            Mock::VerifyAndClearExpectations(&clientMemoryTransferService);
-        }
-
-      protected:
-        using ClientReadHandle = client::MockMemoryTransferService::MockReadHandle;
-        using ServerReadHandle = server::MockMemoryTransferService::MockReadHandle;
-        using ClientWriteHandle = client::MockMemoryTransferService::MockWriteHandle;
-        using ServerWriteHandle = server::MockMemoryTransferService::MockWriteHandle;
-
-        std::pair<WGPUBuffer, WGPUBuffer> CreateBuffer(
-            WGPUBufferUsage usage = WGPUBufferUsage_None) {
-            WGPUBufferDescriptor descriptor = {};
-            descriptor.size = kBufferSize;
-            descriptor.usage = usage;
-
-            WGPUBuffer apiBuffer = api.GetNewBuffer();
-            WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
-
-            EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _))
-                .WillOnce(Return(apiBuffer))
-                .RetiresOnSaturation();
-
-            return std::make_pair(apiBuffer, buffer);
-        }
-
-        std::pair<WGPUBuffer, WGPUBuffer> CreateBufferMapped(
-            WGPUBufferUsage usage = WGPUBufferUsage_None) {
-            WGPUBufferDescriptor descriptor = {};
-            descriptor.size = sizeof(mBufferContent);
-            descriptor.mappedAtCreation = true;
-            descriptor.usage = usage;
-
-            WGPUBuffer apiBuffer = api.GetNewBuffer();
-
-            WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
-
-            EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer));
-            EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, sizeof(mBufferContent)))
-                .WillOnce(Return(&mMappedBufferContent));
-
-            return std::make_pair(apiBuffer, buffer);
-        }
-
-        ClientReadHandle* ExpectReadHandleCreation() {
-            // Create the handle first so we can use it in later expectations.
-            ClientReadHandle* handle = clientMemoryTransferService.NewReadHandle();
-
-            EXPECT_CALL(clientMemoryTransferService, OnCreateReadHandle(sizeof(mBufferContent)))
-                .WillOnce(InvokeWithoutArgs([=]() { return handle; }));
-
-            return handle;
-        }
-
-        void MockReadHandleCreationFailure() {
-            EXPECT_CALL(clientMemoryTransferService, OnCreateReadHandle(sizeof(mBufferContent)))
-                .WillOnce(InvokeWithoutArgs([=]() { return nullptr; }));
-        }
-
-        void ExpectReadHandleSerialization(ClientReadHandle* handle) {
-            EXPECT_CALL(clientMemoryTransferService, OnReadHandleSerializeCreateSize(handle))
-                .WillOnce(InvokeWithoutArgs([&]() { return sizeof(mSerializeCreateInfo); }));
-            EXPECT_CALL(clientMemoryTransferService, OnReadHandleSerializeCreate(handle, _))
-                .WillOnce(WithArg<1>([&](void* serializePointer) {
-                    memcpy(serializePointer, &mSerializeCreateInfo, sizeof(mSerializeCreateInfo));
-                    return sizeof(mSerializeCreateInfo);
-                }));
-        }
-
-        ServerReadHandle* ExpectServerReadHandleDeserialize() {
-            // Create the handle first so we can use it in later expectations.
-            ServerReadHandle* handle = serverMemoryTransferService.NewReadHandle();
-
-            EXPECT_CALL(serverMemoryTransferService,
-                        OnDeserializeReadHandle(Pointee(Eq(mSerializeCreateInfo)),
-                                                sizeof(mSerializeCreateInfo), _))
-                .WillOnce(WithArg<2>([=](server::MemoryTransferService::ReadHandle** readHandle) {
-                    *readHandle = handle;
-                    return true;
-                }));
-
-            return handle;
-        }
-
-        void MockServerReadHandleDeserializeFailure() {
-            EXPECT_CALL(serverMemoryTransferService,
-                        OnDeserializeReadHandle(Pointee(Eq(mSerializeCreateInfo)),
-                                                sizeof(mSerializeCreateInfo), _))
-                .WillOnce(InvokeWithoutArgs([&]() { return false; }));
-        }
-
-        void ExpectServerReadHandleSerializeDataUpdate(ServerReadHandle* handle) {
-            EXPECT_CALL(serverMemoryTransferService,
-                        OnReadHandleSizeOfSerializeDataUpdate(handle, _, _))
-                .WillOnce(
-                    InvokeWithoutArgs([&]() { return sizeof(mReadHandleSerializeDataInfo); }));
-            EXPECT_CALL(serverMemoryTransferService,
-                        OnReadHandleSerializeDataUpdate(handle, _, _, _, _))
-                .WillOnce(WithArg<4>([&](void* serializePointer) {
-                    memcpy(serializePointer, &mReadHandleSerializeDataInfo,
-                           sizeof(mReadHandleSerializeDataInfo));
-                    return sizeof(mReadHandleSerializeDataInfo);
-                }));
-        }
-
-        void ExpectClientReadHandleDeserializeDataUpdate(ClientReadHandle* handle,
-                                                         uint32_t* mappedData) {
-            EXPECT_CALL(
-                clientMemoryTransferService,
-                OnReadHandleDeserializeDataUpdate(handle, Pointee(Eq(mReadHandleSerializeDataInfo)),
-                                                  sizeof(mReadHandleSerializeDataInfo), _, _))
-                .WillOnce(Return(true));
-        }
-
-        void MockClientReadHandleDeserializeDataUpdateFailure(ClientReadHandle* handle) {
-            EXPECT_CALL(
-                clientMemoryTransferService,
-                OnReadHandleDeserializeDataUpdate(handle, Pointee(Eq(mReadHandleSerializeDataInfo)),
-                                                  sizeof(mReadHandleSerializeDataInfo), _, _))
-                .WillOnce(Return(false));
-        }
-
-        ClientWriteHandle* ExpectWriteHandleCreation(bool mappedAtCreation) {
-            // Create the handle first so we can use it in later expectations.
-            ClientWriteHandle* handle = clientMemoryTransferService.NewWriteHandle();
-
-            EXPECT_CALL(clientMemoryTransferService, OnCreateWriteHandle(sizeof(mBufferContent)))
-                .WillOnce(InvokeWithoutArgs([=]() { return handle; }));
-            if (mappedAtCreation) {
-                EXPECT_CALL(clientMemoryTransferService, OnWriteHandleGetData(handle))
-                    .WillOnce(Return(&mBufferContent));
-            }
-
-            return handle;
-        }
-
-        void MockWriteHandleCreationFailure() {
-            EXPECT_CALL(clientMemoryTransferService, OnCreateWriteHandle(sizeof(mBufferContent)))
-                .WillOnce(InvokeWithoutArgs([=]() { return nullptr; }));
-        }
-
-        void ExpectWriteHandleSerialization(ClientWriteHandle* handle) {
-            EXPECT_CALL(clientMemoryTransferService, OnWriteHandleSerializeCreateSize(handle))
-                .WillOnce(InvokeWithoutArgs([&]() { return sizeof(mSerializeCreateInfo); }));
-            EXPECT_CALL(clientMemoryTransferService, OnWriteHandleSerializeCreate(handle, _))
-                .WillOnce(WithArg<1>([&](void* serializePointer) {
-                    memcpy(serializePointer, &mSerializeCreateInfo, sizeof(mSerializeCreateInfo));
-                    return sizeof(mSerializeCreateInfo);
-                }));
-        }
-
-        ServerWriteHandle* ExpectServerWriteHandleDeserialization() {
-            // Create the handle first so it can be used in later expectations.
-            ServerWriteHandle* handle = serverMemoryTransferService.NewWriteHandle();
-
-            EXPECT_CALL(serverMemoryTransferService,
-                        OnDeserializeWriteHandle(Pointee(Eq(mSerializeCreateInfo)),
-                                                 sizeof(mSerializeCreateInfo), _))
-                .WillOnce(WithArg<2>([=](server::MemoryTransferService::WriteHandle** writeHandle) {
-                    *writeHandle = handle;
-                    return true;
-                }));
-
-            return handle;
-        }
-
-        void MockServerWriteHandleDeserializeFailure() {
-            EXPECT_CALL(serverMemoryTransferService,
-                        OnDeserializeWriteHandle(Pointee(Eq(mSerializeCreateInfo)),
-                                                 sizeof(mSerializeCreateInfo), _))
-                .WillOnce(Return(false));
-        }
-
-        void ExpectClientWriteHandleSerializeDataUpdate(ClientWriteHandle* handle) {
-            EXPECT_CALL(clientMemoryTransferService,
-                        OnWriteHandleSizeOfSerializeDataUpdate(handle, _, _))
-                .WillOnce(
-                    InvokeWithoutArgs([&]() { return sizeof(mWriteHandleSerializeDataInfo); }));
-            EXPECT_CALL(clientMemoryTransferService,
-                        OnWriteHandleSerializeDataUpdate(handle, _, _, _))
-                .WillOnce(WithArg<1>([&](void* serializePointer) {
-                    memcpy(serializePointer, &mWriteHandleSerializeDataInfo,
-                           sizeof(mWriteHandleSerializeDataInfo));
-                    return sizeof(mWriteHandleSerializeDataInfo);
-                }));
-        }
-
-        void ExpectServerWriteHandleDeserializeDataUpdate(ServerWriteHandle* handle,
-                                                          uint32_t expectedData) {
-            EXPECT_CALL(serverMemoryTransferService,
-                        OnWriteHandleDeserializeDataUpdate(
-                            handle, Pointee(Eq(mWriteHandleSerializeDataInfo)),
-                            sizeof(mWriteHandleSerializeDataInfo), _, _))
-                .WillOnce(Return(true));
-        }
-
-        void MockServerWriteHandleDeserializeDataUpdateFailure(ServerWriteHandle* handle) {
-            EXPECT_CALL(serverMemoryTransferService,
-                        OnWriteHandleDeserializeDataUpdate(
-                            handle, Pointee(Eq(mWriteHandleSerializeDataInfo)),
-                            sizeof(mWriteHandleSerializeDataInfo), _, _))
-                .WillOnce(Return(false));
-        }
-
-        // Arbitrary values used within tests to check if serialized data is correctly passed
-        // between the client and server. The static data changes between runs of the tests and
-        // test expectations will check that serialized values are passed to the respective
-        // deserialization function.
-        static uint32_t mSerializeCreateInfo;
-        static uint32_t mReadHandleSerializeDataInfo;
-        static uint32_t mWriteHandleSerializeDataInfo;
-
-        // Represents the buffer contents for the test.
-        static uint32_t mBufferContent;
-
-        static constexpr size_t kBufferSize = sizeof(mBufferContent);
-
-        // The client's zero-initialized buffer for writing.
-        uint32_t mMappedBufferContent = 0;
-
-        // |mMappedBufferContent| should be set equal to |mUpdatedBufferContent| when the client
-        // performs a write. Test expectations should check that |mBufferContent ==
-        // mUpdatedBufferContent| after all writes are flushed.
-        static uint32_t mUpdatedBufferContent;
-
-        StrictMock<dawn::wire::server::MockMemoryTransferService> serverMemoryTransferService;
-        StrictMock<dawn::wire::client::MockMemoryTransferService> clientMemoryTransferService;
-    };
-
-    uint32_t WireMemoryTransferServiceTests::mBufferContent = 1337;
-    uint32_t WireMemoryTransferServiceTests::mUpdatedBufferContent = 2349;
-    uint32_t WireMemoryTransferServiceTests::mSerializeCreateInfo = 4242;
-    uint32_t WireMemoryTransferServiceTests::mReadHandleSerializeDataInfo = 1394;
-    uint32_t WireMemoryTransferServiceTests::mWriteHandleSerializeDataInfo = 1235;
-
-    // Test successful mapping for reading.
-    TEST_F(WireMemoryTransferServiceTests, BufferMapReadSuccess) {
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-
-        // The client should create and serialize a ReadHandle on creation.
-        ClientReadHandle* clientHandle = ExpectReadHandleCreation();
-        ExpectReadHandleSerialization(clientHandle);
-
-        std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
-
-        // The server should deserialize the read handle from the client and then serialize
-        // an initialization message.
-        ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize();
-
-        FlushClient();
-
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        // The handle serialize data update on mapAsync cmd
-        ExpectServerReadHandleSerializeDataUpdate(serverHandle);
-
-        // Mock a successful callback
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(clientMemoryTransferService, OnReadHandleGetData(clientHandle))
-            .WillOnce(Return(&mBufferContent));
-        EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&mBufferContent));
-
-        FlushClient();
-
-        // The client receives a successful callback.
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
-
-        // The client should receive the handle data update message from the server.
-        ExpectClientReadHandleDeserializeDataUpdate(clientHandle, &mBufferContent);
-
-        FlushServer();
-
-        wgpuBufferUnmap(buffer);
-        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-
-        FlushClient();
-
-        // The handle is destroyed once the buffer is destroyed.
-        EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
-        EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1);
+    client::MemoryTransferService* GetClientMemoryTransferService() override {
+        return &clientMemoryTransferService;
     }
 
-    // Test ReadHandle destroy behavior
-    TEST_F(WireMemoryTransferServiceTests, BufferMapReadDestroy) {
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-
-        // The client should create and serialize a ReadHandle on creation.
-        ClientReadHandle* clientHandle = ExpectReadHandleCreation();
-        ExpectReadHandleSerialization(clientHandle);
-
-        std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
-
-        // The server should deserialize the read handle from the client and then serialize
-        // an initialization message.
-        ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize();
-
-        FlushClient();
-
-        // The handle is destroyed once the buffer is destroyed.
-        EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
-        wgpuBufferDestroy(buffer);
-        EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1);
-        EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1);
-
-        FlushClient();
+    server::MemoryTransferService* GetServerMemoryTransferService() override {
+        return &serverMemoryTransferService;
     }
 
-    // Test unsuccessful mapping for reading.
-    TEST_F(WireMemoryTransferServiceTests, BufferMapReadError) {
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
+    void SetUp() override {
+        WireTest::SetUp();
 
-        // The client should create and serialize a ReadHandle on creation.
-        ClientReadHandle* clientHandle = ExpectReadHandleCreation();
-        ExpectReadHandleSerialization(clientHandle);
+        mockBufferMapCallback = std::make_unique<StrictMock<MockBufferMapCallback>>();
 
-        // The server should deserialize the ReadHandle from the client.
-        ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize();
-
-        std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
-        FlushClient();
-
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        // Mock a failed callback.
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error);
-            }));
-
-        FlushClient();
-
-        // The client receives an error callback.
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
-
-        FlushServer();
-
-        wgpuBufferUnmap(buffer);
-
-        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-
-        FlushClient();
-
-        // The handle is destroyed once the buffer is destroyed.
-        EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
-        EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1);
+        // TODO(enga): Make this thread-safe.
+        mBufferContent++;
+        mMappedBufferContent = 0;
+        mUpdatedBufferContent++;
+        mSerializeCreateInfo++;
+        mReadHandleSerializeDataInfo++;
+        mWriteHandleSerializeDataInfo++;
     }
 
-    // Test ReadHandle creation failure.
-    TEST_F(WireMemoryTransferServiceTests, BufferMapReadHandleCreationFailure) {
-        // Mock a ReadHandle creation failure
-        MockReadHandleCreationFailure();
+    void TearDown() override {
+        WireTest::TearDown();
 
+        // Delete mock so that expectations are checked
+        mockBufferMapCallback = nullptr;
+    }
+
+    void FlushClient(bool success = true) {
+        WireTest::FlushClient(success);
+        Mock::VerifyAndClearExpectations(&serverMemoryTransferService);
+    }
+
+    void FlushServer(bool success = true) {
+        WireTest::FlushServer(success);
+
+        Mock::VerifyAndClearExpectations(&mockBufferMapCallback);
+        Mock::VerifyAndClearExpectations(&clientMemoryTransferService);
+    }
+
+  protected:
+    using ClientReadHandle = client::MockMemoryTransferService::MockReadHandle;
+    using ServerReadHandle = server::MockMemoryTransferService::MockReadHandle;
+    using ClientWriteHandle = client::MockMemoryTransferService::MockWriteHandle;
+    using ServerWriteHandle = server::MockMemoryTransferService::MockWriteHandle;
+
+    std::pair<WGPUBuffer, WGPUBuffer> CreateBuffer(WGPUBufferUsage usage = WGPUBufferUsage_None) {
         WGPUBufferDescriptor descriptor = {};
         descriptor.size = kBufferSize;
-        descriptor.usage = WGPUBufferUsage_MapRead;
+        descriptor.usage = usage;
 
-        wgpuDeviceCreateBuffer(device, &descriptor);
-    }
-
-    // Test MapRead DeserializeReadHandle failure.
-    TEST_F(WireMemoryTransferServiceTests, BufferMapReadDeserializeReadHandleFailure) {
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-
-        // The client should create and serialize a ReadHandle on mapping for reading..
-        ClientReadHandle* clientHandle = ExpectReadHandleCreation();
-        ExpectReadHandleSerialization(clientHandle);
-
-        std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
-
-        // Mock a Deserialization failure.
-        MockServerReadHandleDeserializeFailure();
-
-        FlushClient(false);
-
-        // The handle is destroyed once the buffer is destroyed.
-        EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
-    }
-
-    // Test read handle DeserializeDataUpdate failure.
-    TEST_F(WireMemoryTransferServiceTests, BufferMapReadDeserializeDataUpdateFailure) {
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-
-        // The client should create and serialize a ReadHandle on mapping for reading.
-        ClientReadHandle* clientHandle = ExpectReadHandleCreation();
-        ExpectReadHandleSerialization(clientHandle);
-
-        // The server should deserialize the read handle from the client and then serialize
-        // an initialization message.
-        ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize();
-
-        std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
-        FlushClient();
-
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        // The handle serialize data update on mapAsync cmd
-        ExpectServerReadHandleSerializeDataUpdate(serverHandle);
-
-        // Mock a successful callback
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&mBufferContent));
-
-        FlushClient();
-
-        // The client should receive the handle data update message from the server.
-        // Mock a deserialization failure.
-        MockClientReadHandleDeserializeDataUpdateFailure(clientHandle);
-
-        // Failed deserialization is a fatal failure and the client synchronously receives a
-        // DEVICE_LOST callback.
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, _)).Times(1);
-
-        FlushServer(false);
-
-        // The handle is destroyed once the buffer is destroyed.
-        EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
-        EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1);
-    }
-
-    // Test mapping for reading destroying the buffer before unmapping on the client side.
-    TEST_F(WireMemoryTransferServiceTests, BufferMapReadDestroyBeforeUnmap) {
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-
-        // The client should create and serialize a ReadHandle on mapping for reading..
-        ClientReadHandle* clientHandle = ExpectReadHandleCreation();
-        ExpectReadHandleSerialization(clientHandle);
-
-        // The server should deserialize the read handle from the client and then serialize
-        // an initialization message.
-        ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize();
-
-        std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
-        FlushClient();
-
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        // The handle serialize data update on mapAsync cmd
-        ExpectServerReadHandleSerializeDataUpdate(serverHandle);
-
-        // Mock a successful callback
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(clientMemoryTransferService, OnReadHandleGetData(clientHandle))
-            .WillOnce(Return(&mBufferContent));
-        EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&mBufferContent));
-
-        FlushClient();
-
-        // The client receives a successful callback.
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
-
-        // The client should receive the handle data update message from the server.
-        ExpectClientReadHandleDeserializeDataUpdate(clientHandle, &mBufferContent);
-
-        FlushServer();
-
-        // THIS IS THE TEST: destroy the buffer before unmapping and check it destroyed the mapping
-        // immediately, both in the client and server side.
-        {
-            EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
-            wgpuBufferDestroy(buffer);
-
-            EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1);
-            EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1);
-            FlushClient();
-
-            // The handle is already destroyed so unmap only results in a server unmap call.
-            wgpuBufferUnmap(buffer);
-
-            EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-            FlushClient();
-        }
-    }
-
-    // Test successful mapping for writing.
-    TEST_F(WireMemoryTransferServiceTests, BufferMapWriteSuccess) {
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-
-        ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
-        ExpectWriteHandleSerialization(clientHandle);
-
-        std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
-
-        // The server should then deserialize the WriteHandle from the client.
-        ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
-
-        FlushClient();
-
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        // Mock a successful callback.
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleGetData(clientHandle))
-            .WillOnce(Return(&mBufferContent));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&mMappedBufferContent));
-
-        FlushClient();
-
-        // The client receives a successful callback.
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
-
-        FlushServer();
-
-        // The client writes to the handle contents.
-        mMappedBufferContent = mUpdatedBufferContent;
-
-        // The client will then serialize data update and destroy the handle on Unmap()
-        ExpectClientWriteHandleSerializeDataUpdate(clientHandle);
-
-        wgpuBufferUnmap(buffer);
-
-        // The server deserializes the data update message.
-        ExpectServerWriteHandleDeserializeDataUpdate(serverHandle, mUpdatedBufferContent);
-
-        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-
-        FlushClient();
-
-        // The handle is destroyed once the buffer is destroyed.
-        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
-        EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
-    }
-
-    // Test WriteHandle destroy behavior
-    TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDestroy) {
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-
-        ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
-        ExpectWriteHandleSerialization(clientHandle);
-
-        std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
-
-        // The server should then deserialize the WriteHandle from the client.
-        ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
-
-        FlushClient();
-
-        // The handle is destroyed once the buffer is destroyed.
-        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
-        wgpuBufferDestroy(buffer);
-        EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
-        EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1);
-
-        FlushClient();
-    }
-
-    // Test unsuccessful MapWrite.
-    TEST_F(WireMemoryTransferServiceTests, BufferMapWriteError) {
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-
-        // The client should create and serialize a WriteHandle on buffer creation with MapWrite
-        // usage.
-        ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
-        ExpectWriteHandleSerialization(clientHandle);
-
-        std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
-
-        // The server should then deserialize the WriteHandle from the client.
-        ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
-
-        FlushClient();
-
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        // Mock an error callback.
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error);
-            }));
-
-        FlushClient();
-
-        // The client receives an error callback.
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
-
-        FlushServer();
-
-        wgpuBufferUnmap(buffer);
-
-        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-
-        FlushClient();
-
-        // The handle is destroyed once the buffer is destroyed.
-        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
-        EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
-    }
-
-    // Test WriteHandle creation failure.
-    TEST_F(WireMemoryTransferServiceTests, BufferMapWriteHandleCreationFailure) {
-        // Mock a WriteHandle creation failure
-        MockWriteHandleCreationFailure();
-
-        WGPUBufferDescriptor descriptor = {};
-        descriptor.size = kBufferSize;
-        descriptor.usage = WGPUBufferUsage_MapWrite;
-
-        wgpuDeviceCreateBuffer(device, &descriptor);
-    }
-
-    // Test MapWrite DeserializeWriteHandle failure.
-    TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDeserializeWriteHandleFailure) {
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-
-        // The client should create and serialize a WriteHandle on buffer creation with MapWrite
-        // usage.
-        ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
-        ExpectWriteHandleSerialization(clientHandle);
-
-        std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
-
-        // Mock a deserialization failure.
-        MockServerWriteHandleDeserializeFailure();
-
-        FlushClient(false);
-
-        // The handle is destroyed once the buffer is destroyed.
-        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
-    }
-
-    // Test MapWrite DeserializeDataUpdate failure.
-    TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDeserializeDataUpdateFailure) {
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-
-        ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
-        ExpectWriteHandleSerialization(clientHandle);
-
-        std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
-
-        // The server should then deserialize the WriteHandle from the client.
-        ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
-
-        FlushClient();
-
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        // Mock a successful callback.
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleGetData(clientHandle))
-            .WillOnce(Return(&mBufferContent));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&mMappedBufferContent));
-
-        FlushClient();
-
-        // The client receives a success callback.
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
-
-        FlushServer();
-
-        // The client writes to the handle contents.
-        mMappedBufferContent = mUpdatedBufferContent;
-
-        // The client will then serialize data update
-        ExpectClientWriteHandleSerializeDataUpdate(clientHandle);
-
-        wgpuBufferUnmap(buffer);
-
-        // The server deserializes the data update message. Mock a deserialization failure.
-        MockServerWriteHandleDeserializeDataUpdateFailure(serverHandle);
-
-        FlushClient(false);
-
-        // The handle is destroyed once the buffer is destroyed.
-        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
-        EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
-    }
-
-    // Test MapWrite destroying the buffer before unmapping on the client side.
-    TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDestroyBeforeUnmap) {
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-
-        ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
-        ExpectWriteHandleSerialization(clientHandle);
-
-        std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
-
-        // The server should then deserialize the WriteHandle from the client.
-        ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
-
-        FlushClient();
-
-        wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback,
-                           nullptr);
-
-        // Mock a successful callback.
-        EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
-            }));
-        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleGetData(clientHandle))
-            .WillOnce(Return(&mBufferContent));
-        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
-            .WillOnce(Return(&mMappedBufferContent));
-
-        FlushClient();
-
-        // The client receives a successful callback.
-        EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
-
-        FlushServer();
-
-        // The client writes to the handle contents.
-        mMappedBufferContent = mUpdatedBufferContent;
-
-        // THIS IS THE TEST: destroy the buffer before unmapping and check it destroyed the mapping
-        // immediately, both in the client and server side.
-        {
-            // The handle is destroyed once the buffer is destroyed.
-            EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
-
-            wgpuBufferDestroy(buffer);
-
-            EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
-            EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1);
-            FlushClient();
-
-            // The handle is already destroyed so unmap only results in a server unmap call.
-            wgpuBufferUnmap(buffer);
-
-            EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-            FlushClient();
-        }
-    }
-
-    // Test successful buffer creation with mappedAtCreation = true.
-    TEST_F(WireMemoryTransferServiceTests, MappedAtCreationSuccess) {
-        // The client should create and serialize a WriteHandle on createBufferMapped.
-        ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true);
-        ExpectWriteHandleSerialization(clientHandle);
-
-        // The server should then deserialize the WriteHandle from the client.
-        ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
-
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-        std::tie(apiBuffer, buffer) = CreateBufferMapped();
-        FlushClient();
-
-        // Update the mapped contents.
-        mMappedBufferContent = mUpdatedBufferContent;
-
-        // When the client Unmaps the buffer, it will serialize data update writes to the handle and
-        // destroy it.
-        ExpectClientWriteHandleSerializeDataUpdate(clientHandle);
-        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
-
-        wgpuBufferUnmap(buffer);
-
-        // The server deserializes the data update message.
-        ExpectServerWriteHandleDeserializeDataUpdate(serverHandle, mUpdatedBufferContent);
-
-        // After the handle is updated it can be destroyed.
-        EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
-        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-
-        FlushClient();
-    }
-
-    // Test buffer creation with mappedAtCreation WriteHandle creation failure.
-    TEST_F(WireMemoryTransferServiceTests, MappedAtCreationWriteHandleCreationFailure) {
-        // Mock a WriteHandle creation failure
-        MockWriteHandleCreationFailure();
-
-        WGPUBufferDescriptor descriptor = {};
-        descriptor.size = sizeof(mBufferContent);
-        descriptor.mappedAtCreation = true;
-
+        WGPUBuffer apiBuffer = api.GetNewBuffer();
         WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
-        EXPECT_EQ(nullptr, wgpuBufferGetMappedRange(buffer, 0, sizeof(mBufferContent)));
+
+        EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _))
+            .WillOnce(Return(apiBuffer))
+            .RetiresOnSaturation();
+
+        return std::make_pair(apiBuffer, buffer);
     }
 
-    // Test buffer creation with mappedAtCreation DeserializeWriteHandle failure.
-    TEST_F(WireMemoryTransferServiceTests, MappedAtCreationDeserializeWriteHandleFailure) {
-        // The client should create and serialize a WriteHandle on createBufferMapped.
-        ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true);
-        ExpectWriteHandleSerialization(clientHandle);
-
-        // The server should then deserialize the WriteHandle from the client.
-        MockServerWriteHandleDeserializeFailure();
-
+    std::pair<WGPUBuffer, WGPUBuffer> CreateBufferMapped(
+        WGPUBufferUsage usage = WGPUBufferUsage_None) {
         WGPUBufferDescriptor descriptor = {};
         descriptor.size = sizeof(mBufferContent);
         descriptor.mappedAtCreation = true;
+        descriptor.usage = usage;
 
         WGPUBuffer apiBuffer = api.GetNewBuffer();
 
-        wgpuDeviceCreateBuffer(device, &descriptor);
+        WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
 
         EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer));
-        // Now bufferGetMappedRange won't be called if deserialize writeHandle fails
+        EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, sizeof(mBufferContent)))
+            .WillOnce(Return(&mMappedBufferContent));
 
-        FlushClient(false);
-
-        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+        return std::make_pair(apiBuffer, buffer);
     }
 
-    // Test buffer creation with mappedAtCreation = true DeserializeDataUpdate failure.
-    TEST_F(WireMemoryTransferServiceTests, MappedAtCreationDeserializeDataUpdateFailure) {
-        // The client should create and serialize a WriteHandle on createBufferMapped.
-        ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true);
-        ExpectWriteHandleSerialization(clientHandle);
+    ClientReadHandle* ExpectReadHandleCreation() {
+        // Create the handle first so we can use it in later expectations.
+        ClientReadHandle* handle = clientMemoryTransferService.NewReadHandle();
 
-        // The server should then deserialize the WriteHandle from the client.
-        ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+        EXPECT_CALL(clientMemoryTransferService, OnCreateReadHandle(sizeof(mBufferContent)))
+            .WillOnce(InvokeWithoutArgs([=]() { return handle; }));
 
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-        std::tie(apiBuffer, buffer) = CreateBufferMapped();
-        FlushClient();
-
-        // Update the mapped contents.
-        mMappedBufferContent = mUpdatedBufferContent;
-
-        // When the client Unmaps the buffer, it will serialize data update writes to the handle and
-        // destroy it.
-        ExpectClientWriteHandleSerializeDataUpdate(clientHandle);
-        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
-
-        wgpuBufferUnmap(buffer);
-
-        // The server deserializes the data update message. Mock a deserialization failure.
-        MockServerWriteHandleDeserializeDataUpdateFailure(serverHandle);
-
-        FlushClient(false);
-
-        // Failed BufferUpdateMappedData cmd will early return so BufferUnmap is not processed.
-        // The server side writeHandle is destructed at buffer destruction.
-        EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
+        return handle;
     }
 
-    // Test mappedAtCreation=true destroying the buffer before unmapping on the client side.
-    TEST_F(WireMemoryTransferServiceTests, MappedAtCreationDestroyBeforeUnmap) {
-        // The client should create and serialize a WriteHandle on createBufferMapped.
-        ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true);
-        ExpectWriteHandleSerialization(clientHandle);
+    void MockReadHandleCreationFailure() {
+        EXPECT_CALL(clientMemoryTransferService, OnCreateReadHandle(sizeof(mBufferContent)))
+            .WillOnce(InvokeWithoutArgs([=]() { return nullptr; }));
+    }
 
-        // The server should then deserialize the WriteHandle from the client.
-        ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+    void ExpectReadHandleSerialization(ClientReadHandle* handle) {
+        EXPECT_CALL(clientMemoryTransferService, OnReadHandleSerializeCreateSize(handle))
+            .WillOnce(InvokeWithoutArgs([&]() { return sizeof(mSerializeCreateInfo); }));
+        EXPECT_CALL(clientMemoryTransferService, OnReadHandleSerializeCreate(handle, _))
+            .WillOnce(WithArg<1>([&](void* serializePointer) {
+                memcpy(serializePointer, &mSerializeCreateInfo, sizeof(mSerializeCreateInfo));
+                return sizeof(mSerializeCreateInfo);
+            }));
+    }
 
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-        std::tie(apiBuffer, buffer) = CreateBufferMapped();
-        FlushClient();
+    ServerReadHandle* ExpectServerReadHandleDeserialize() {
+        // Create the handle first so we can use it in later expectations.
+        ServerReadHandle* handle = serverMemoryTransferService.NewReadHandle();
 
-        // Update the mapped contents.
-        mMappedBufferContent = mUpdatedBufferContent;
+        EXPECT_CALL(serverMemoryTransferService,
+                    OnDeserializeReadHandle(Pointee(Eq(mSerializeCreateInfo)),
+                                            sizeof(mSerializeCreateInfo), _))
+            .WillOnce(WithArg<2>([=](server::MemoryTransferService::ReadHandle** readHandle) {
+                *readHandle = handle;
+                return true;
+            }));
 
-        // THIS IS THE TEST: destroy the buffer before unmapping and check it destroyed the mapping
-        // immediately, both in the client and server side.
-        {
-            EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
-            wgpuBufferDestroy(buffer);
+        return handle;
+    }
 
-            EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
-            EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1);
-            FlushClient();
+    void MockServerReadHandleDeserializeFailure() {
+        EXPECT_CALL(serverMemoryTransferService,
+                    OnDeserializeReadHandle(Pointee(Eq(mSerializeCreateInfo)),
+                                            sizeof(mSerializeCreateInfo), _))
+            .WillOnce(InvokeWithoutArgs([&]() { return false; }));
+    }
 
-            // The handle is already destroyed so unmap only results in a server unmap call.
-            wgpuBufferUnmap(buffer);
+    void ExpectServerReadHandleSerializeDataUpdate(ServerReadHandle* handle) {
+        EXPECT_CALL(serverMemoryTransferService,
+                    OnReadHandleSizeOfSerializeDataUpdate(handle, _, _))
+            .WillOnce(InvokeWithoutArgs([&]() { return sizeof(mReadHandleSerializeDataInfo); }));
+        EXPECT_CALL(serverMemoryTransferService,
+                    OnReadHandleSerializeDataUpdate(handle, _, _, _, _))
+            .WillOnce(WithArg<4>([&](void* serializePointer) {
+                memcpy(serializePointer, &mReadHandleSerializeDataInfo,
+                       sizeof(mReadHandleSerializeDataInfo));
+                return sizeof(mReadHandleSerializeDataInfo);
+            }));
+    }
 
-            EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-            FlushClient();
+    void ExpectClientReadHandleDeserializeDataUpdate(ClientReadHandle* handle,
+                                                     uint32_t* mappedData) {
+        EXPECT_CALL(
+            clientMemoryTransferService,
+            OnReadHandleDeserializeDataUpdate(handle, Pointee(Eq(mReadHandleSerializeDataInfo)),
+                                              sizeof(mReadHandleSerializeDataInfo), _, _))
+            .WillOnce(Return(true));
+    }
+
+    void MockClientReadHandleDeserializeDataUpdateFailure(ClientReadHandle* handle) {
+        EXPECT_CALL(
+            clientMemoryTransferService,
+            OnReadHandleDeserializeDataUpdate(handle, Pointee(Eq(mReadHandleSerializeDataInfo)),
+                                              sizeof(mReadHandleSerializeDataInfo), _, _))
+            .WillOnce(Return(false));
+    }
+
+    ClientWriteHandle* ExpectWriteHandleCreation(bool mappedAtCreation) {
+        // Create the handle first so we can use it in later expectations.
+        ClientWriteHandle* handle = clientMemoryTransferService.NewWriteHandle();
+
+        EXPECT_CALL(clientMemoryTransferService, OnCreateWriteHandle(sizeof(mBufferContent)))
+            .WillOnce(InvokeWithoutArgs([=]() { return handle; }));
+        if (mappedAtCreation) {
+            EXPECT_CALL(clientMemoryTransferService, OnWriteHandleGetData(handle))
+                .WillOnce(Return(&mBufferContent));
         }
+
+        return handle;
     }
 
-    // Test a buffer with mappedAtCreation and MapRead usage destroy WriteHandle on unmap and switch
-    // data pointer to ReadHandle
-    TEST_F(WireMemoryTransferServiceTests, MappedAtCreationAndMapReadSuccess) {
-        // The client should create and serialize a ReadHandle and a WriteHandle on
-        // createBufferMapped.
-        ClientReadHandle* clientReadHandle = ExpectReadHandleCreation();
-        ExpectReadHandleSerialization(clientReadHandle);
-        ClientWriteHandle* clientWriteHandle = ExpectWriteHandleCreation(true);
-        ExpectWriteHandleSerialization(clientWriteHandle);
-
-        // The server should then deserialize a ReadHandle and a WriteHandle from the client.
-        ServerReadHandle* serverReadHandle = ExpectServerReadHandleDeserialize();
-        ServerWriteHandle* serverWriteHandle = ExpectServerWriteHandleDeserialization();
-
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-        std::tie(apiBuffer, buffer) = CreateBufferMapped(WGPUBufferUsage_MapRead);
-        FlushClient();
-
-        // Update the mapped contents.
-        mMappedBufferContent = mUpdatedBufferContent;
-
-        // When the client Unmaps the buffer, it will serialize data update writes to the handle and
-        // destroy it.
-        ExpectClientWriteHandleSerializeDataUpdate(clientWriteHandle);
-        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientWriteHandle)).Times(1);
-        EXPECT_CALL(clientMemoryTransferService, OnReadHandleGetData(clientReadHandle))
-            .WillOnce(Return(&mBufferContent));
-        wgpuBufferUnmap(buffer);
-
-        // The server deserializes the data update message.
-        ExpectServerWriteHandleDeserializeDataUpdate(serverWriteHandle, mUpdatedBufferContent);
-        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-        EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverWriteHandle)).Times(1);
-        FlushClient();
-
-        // The ReadHandle will be destoryed on buffer destroy.
-        EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientReadHandle)).Times(1);
-        EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverReadHandle)).Times(1);
+    void MockWriteHandleCreationFailure() {
+        EXPECT_CALL(clientMemoryTransferService, OnCreateWriteHandle(sizeof(mBufferContent)))
+            .WillOnce(InvokeWithoutArgs([=]() { return nullptr; }));
     }
 
-    // Test WriteHandle preserves after unmap for a buffer with mappedAtCreation and MapWrite usage
-    TEST_F(WireMemoryTransferServiceTests, MappedAtCreationAndMapWriteSuccess) {
-        // The client should create and serialize a WriteHandle on createBufferMapped.
-        ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true);
+    void ExpectWriteHandleSerialization(ClientWriteHandle* handle) {
+        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleSerializeCreateSize(handle))
+            .WillOnce(InvokeWithoutArgs([&]() { return sizeof(mSerializeCreateInfo); }));
+        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleSerializeCreate(handle, _))
+            .WillOnce(WithArg<1>([&](void* serializePointer) {
+                memcpy(serializePointer, &mSerializeCreateInfo, sizeof(mSerializeCreateInfo));
+                return sizeof(mSerializeCreateInfo);
+            }));
+    }
 
-        ExpectWriteHandleSerialization(clientHandle);
+    ServerWriteHandle* ExpectServerWriteHandleDeserialization() {
+        // Create the handle first so it can be used in later expectations.
+        ServerWriteHandle* handle = serverMemoryTransferService.NewWriteHandle();
 
-        // The server should then deserialize the WriteHandle from the client.
-        ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+        EXPECT_CALL(serverMemoryTransferService,
+                    OnDeserializeWriteHandle(Pointee(Eq(mSerializeCreateInfo)),
+                                             sizeof(mSerializeCreateInfo), _))
+            .WillOnce(WithArg<2>([=](server::MemoryTransferService::WriteHandle** writeHandle) {
+                *writeHandle = handle;
+                return true;
+            }));
 
-        WGPUBuffer buffer;
-        WGPUBuffer apiBuffer;
-        std::tie(apiBuffer, buffer) = CreateBufferMapped(WGPUBufferUsage_MapWrite);
+        return handle;
+    }
+
+    void MockServerWriteHandleDeserializeFailure() {
+        EXPECT_CALL(serverMemoryTransferService,
+                    OnDeserializeWriteHandle(Pointee(Eq(mSerializeCreateInfo)),
+                                             sizeof(mSerializeCreateInfo), _))
+            .WillOnce(Return(false));
+    }
+
+    void ExpectClientWriteHandleSerializeDataUpdate(ClientWriteHandle* handle) {
+        EXPECT_CALL(clientMemoryTransferService,
+                    OnWriteHandleSizeOfSerializeDataUpdate(handle, _, _))
+            .WillOnce(InvokeWithoutArgs([&]() { return sizeof(mWriteHandleSerializeDataInfo); }));
+        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleSerializeDataUpdate(handle, _, _, _))
+            .WillOnce(WithArg<1>([&](void* serializePointer) {
+                memcpy(serializePointer, &mWriteHandleSerializeDataInfo,
+                       sizeof(mWriteHandleSerializeDataInfo));
+                return sizeof(mWriteHandleSerializeDataInfo);
+            }));
+    }
+
+    void ExpectServerWriteHandleDeserializeDataUpdate(ServerWriteHandle* handle,
+                                                      uint32_t expectedData) {
+        EXPECT_CALL(
+            serverMemoryTransferService,
+            OnWriteHandleDeserializeDataUpdate(handle, Pointee(Eq(mWriteHandleSerializeDataInfo)),
+                                               sizeof(mWriteHandleSerializeDataInfo), _, _))
+            .WillOnce(Return(true));
+    }
+
+    void MockServerWriteHandleDeserializeDataUpdateFailure(ServerWriteHandle* handle) {
+        EXPECT_CALL(
+            serverMemoryTransferService,
+            OnWriteHandleDeserializeDataUpdate(handle, Pointee(Eq(mWriteHandleSerializeDataInfo)),
+                                               sizeof(mWriteHandleSerializeDataInfo), _, _))
+            .WillOnce(Return(false));
+    }
+
+    // Arbitrary values used within tests to check if serialized data is correctly passed
+    // between the client and server. The static data changes between runs of the tests and
+    // test expectations will check that serialized values are passed to the respective
+    // deserialization function.
+    static uint32_t mSerializeCreateInfo;
+    static uint32_t mReadHandleSerializeDataInfo;
+    static uint32_t mWriteHandleSerializeDataInfo;
+
+    // Represents the buffer contents for the test.
+    static uint32_t mBufferContent;
+
+    static constexpr size_t kBufferSize = sizeof(mBufferContent);
+
+    // The client's zero-initialized buffer for writing.
+    uint32_t mMappedBufferContent = 0;
+
+    // |mMappedBufferContent| should be set equal to |mUpdatedBufferContent| when the client
+    // performs a write. Test expectations should check that |mBufferContent ==
+    // mUpdatedBufferContent| after all writes are flushed.
+    static uint32_t mUpdatedBufferContent;
+
+    StrictMock<dawn::wire::server::MockMemoryTransferService> serverMemoryTransferService;
+    StrictMock<dawn::wire::client::MockMemoryTransferService> clientMemoryTransferService;
+};
+
+uint32_t WireMemoryTransferServiceTests::mBufferContent = 1337;
+uint32_t WireMemoryTransferServiceTests::mUpdatedBufferContent = 2349;
+uint32_t WireMemoryTransferServiceTests::mSerializeCreateInfo = 4242;
+uint32_t WireMemoryTransferServiceTests::mReadHandleSerializeDataInfo = 1394;
+uint32_t WireMemoryTransferServiceTests::mWriteHandleSerializeDataInfo = 1235;
+
+// Test successful mapping for reading.
+TEST_F(WireMemoryTransferServiceTests, BufferMapReadSuccess) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    // The client should create and serialize a ReadHandle on creation.
+    ClientReadHandle* clientHandle = ExpectReadHandleCreation();
+    ExpectReadHandleSerialization(clientHandle);
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
+
+    // The server should deserialize the read handle from the client and then serialize
+    // an initialization message.
+    ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize();
+
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // The handle serialize data update on mapAsync cmd
+    ExpectServerReadHandleSerializeDataUpdate(serverHandle);
+
+    // Mock a successful callback
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(clientMemoryTransferService, OnReadHandleGetData(clientHandle))
+        .WillOnce(Return(&mBufferContent));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&mBufferContent));
+
+    FlushClient();
+
+    // The client receives a successful callback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    // The client should receive the handle data update message from the server.
+    ExpectClientReadHandleDeserializeDataUpdate(clientHandle, &mBufferContent);
+
+    FlushServer();
+
+    wgpuBufferUnmap(buffer);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
+    EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1);
+}
+
+// Test ReadHandle destroy behavior
+TEST_F(WireMemoryTransferServiceTests, BufferMapReadDestroy) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    // The client should create and serialize a ReadHandle on creation.
+    ClientReadHandle* clientHandle = ExpectReadHandleCreation();
+    ExpectReadHandleSerialization(clientHandle);
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
+
+    // The server should deserialize the read handle from the client and then serialize
+    // an initialization message.
+    ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize();
+
+    FlushClient();
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
+    wgpuBufferDestroy(buffer);
+    EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1);
+    EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1);
+
+    FlushClient();
+}
+
+// Test unsuccessful mapping for reading.
+TEST_F(WireMemoryTransferServiceTests, BufferMapReadError) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    // The client should create and serialize a ReadHandle on creation.
+    ClientReadHandle* clientHandle = ExpectReadHandleCreation();
+    ExpectReadHandleSerialization(clientHandle);
+
+    // The server should deserialize the ReadHandle from the client.
+    ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize();
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // Mock a failed callback.
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs(
+            [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
+
+    FlushClient();
+
+    // The client receives an error callback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
+
+    FlushServer();
+
+    wgpuBufferUnmap(buffer);
+
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
+    EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1);
+}
+
+// Test ReadHandle creation failure.
+TEST_F(WireMemoryTransferServiceTests, BufferMapReadHandleCreationFailure) {
+    // Mock a ReadHandle creation failure
+    MockReadHandleCreationFailure();
+
+    WGPUBufferDescriptor descriptor = {};
+    descriptor.size = kBufferSize;
+    descriptor.usage = WGPUBufferUsage_MapRead;
+
+    wgpuDeviceCreateBuffer(device, &descriptor);
+}
+
+// Test MapRead DeserializeReadHandle failure.
+TEST_F(WireMemoryTransferServiceTests, BufferMapReadDeserializeReadHandleFailure) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    // The client should create and serialize a ReadHandle on mapping for reading..
+    ClientReadHandle* clientHandle = ExpectReadHandleCreation();
+    ExpectReadHandleSerialization(clientHandle);
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
+
+    // Mock a Deserialization failure.
+    MockServerReadHandleDeserializeFailure();
+
+    FlushClient(false);
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
+}
+
+// Test read handle DeserializeDataUpdate failure.
+TEST_F(WireMemoryTransferServiceTests, BufferMapReadDeserializeDataUpdateFailure) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    // The client should create and serialize a ReadHandle on mapping for reading.
+    ClientReadHandle* clientHandle = ExpectReadHandleCreation();
+    ExpectReadHandleSerialization(clientHandle);
+
+    // The server should deserialize the read handle from the client and then serialize
+    // an initialization message.
+    ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize();
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // The handle serialize data update on mapAsync cmd
+    ExpectServerReadHandleSerializeDataUpdate(serverHandle);
+
+    // Mock a successful callback
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&mBufferContent));
+
+    FlushClient();
+
+    // The client should receive the handle data update message from the server.
+    // Mock a deserialization failure.
+    MockClientReadHandleDeserializeDataUpdateFailure(clientHandle);
+
+    // Failed deserialization is a fatal failure and the client synchronously receives a
+    // DEVICE_LOST callback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_DeviceLost, _)).Times(1);
+
+    FlushServer(false);
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
+    EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1);
+}
+
+// Test mapping for reading destroying the buffer before unmapping on the client side.
+TEST_F(WireMemoryTransferServiceTests, BufferMapReadDestroyBeforeUnmap) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    // The client should create and serialize a ReadHandle on mapping for reading..
+    ClientReadHandle* clientHandle = ExpectReadHandleCreation();
+    ExpectReadHandleSerialization(clientHandle);
+
+    // The server should deserialize the read handle from the client and then serialize
+    // an initialization message.
+    ServerReadHandle* serverHandle = ExpectServerReadHandleDeserialize();
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapRead);
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Read, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // The handle serialize data update on mapAsync cmd
+    ExpectServerReadHandleSerializeDataUpdate(serverHandle);
+
+    // Mock a successful callback
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Read, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(clientMemoryTransferService, OnReadHandleGetData(clientHandle))
+        .WillOnce(Return(&mBufferContent));
+    EXPECT_CALL(api, BufferGetConstMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&mBufferContent));
+
+    FlushClient();
+
+    // The client receives a successful callback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    // The client should receive the handle data update message from the server.
+    ExpectClientReadHandleDeserializeDataUpdate(clientHandle, &mBufferContent);
+
+    FlushServer();
+
+    // THIS IS THE TEST: destroy the buffer before unmapping and check it destroyed the mapping
+    // immediately, both in the client and server side.
+    {
+        EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientHandle)).Times(1);
+        wgpuBufferDestroy(buffer);
+
+        EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverHandle)).Times(1);
+        EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1);
         FlushClient();
 
-        // Update the mapped contents.
-        mMappedBufferContent = mUpdatedBufferContent;
-
-        // When the client Unmaps the buffer, it will serialize data update writes to the handle.
-        ExpectClientWriteHandleSerializeDataUpdate(clientHandle);
-
+        // The handle is already destroyed so unmap only results in a server unmap call.
         wgpuBufferUnmap(buffer);
 
-        // The server deserializes the data update message.
-        ExpectServerWriteHandleDeserializeDataUpdate(serverHandle, mUpdatedBufferContent);
         EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
-
         FlushClient();
+    }
+}
 
-        // The writeHandle is preserved after unmap and is destroyed once the buffer is destroyed.
+// Test successful mapping for writing.
+TEST_F(WireMemoryTransferServiceTests, BufferMapWriteSuccess) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
+
+    // The server should then deserialize the WriteHandle from the client.
+    ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // Mock a successful callback.
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleGetData(clientHandle))
+        .WillOnce(Return(&mBufferContent));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&mMappedBufferContent));
+
+    FlushClient();
+
+    // The client receives a successful callback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    FlushServer();
+
+    // The client writes to the handle contents.
+    mMappedBufferContent = mUpdatedBufferContent;
+
+    // The client will then serialize data update and destroy the handle on Unmap()
+    ExpectClientWriteHandleSerializeDataUpdate(clientHandle);
+
+    wgpuBufferUnmap(buffer);
+
+    // The server deserializes the data update message.
+    ExpectServerWriteHandleDeserializeDataUpdate(serverHandle, mUpdatedBufferContent);
+
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+    EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
+}
+
+// Test WriteHandle destroy behavior
+TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDestroy) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
+
+    // The server should then deserialize the WriteHandle from the client.
+    ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+
+    FlushClient();
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+    wgpuBufferDestroy(buffer);
+    EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
+    EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1);
+
+    FlushClient();
+}
+
+// Test unsuccessful MapWrite.
+TEST_F(WireMemoryTransferServiceTests, BufferMapWriteError) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    // The client should create and serialize a WriteHandle on buffer creation with MapWrite
+    // usage.
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
+
+    // The server should then deserialize the WriteHandle from the client.
+    ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // Mock an error callback.
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs(
+            [&]() { api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Error); }));
+
+    FlushClient();
+
+    // The client receives an error callback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Error, _)).Times(1);
+
+    FlushServer();
+
+    wgpuBufferUnmap(buffer);
+
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+    EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
+}
+
+// Test WriteHandle creation failure.
+TEST_F(WireMemoryTransferServiceTests, BufferMapWriteHandleCreationFailure) {
+    // Mock a WriteHandle creation failure
+    MockWriteHandleCreationFailure();
+
+    WGPUBufferDescriptor descriptor = {};
+    descriptor.size = kBufferSize;
+    descriptor.usage = WGPUBufferUsage_MapWrite;
+
+    wgpuDeviceCreateBuffer(device, &descriptor);
+}
+
+// Test MapWrite DeserializeWriteHandle failure.
+TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDeserializeWriteHandleFailure) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    // The client should create and serialize a WriteHandle on buffer creation with MapWrite
+    // usage.
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
+
+    // Mock a deserialization failure.
+    MockServerWriteHandleDeserializeFailure();
+
+    FlushClient(false);
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+}
+
+// Test MapWrite DeserializeDataUpdate failure.
+TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDeserializeDataUpdateFailure) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
+
+    // The server should then deserialize the WriteHandle from the client.
+    ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // Mock a successful callback.
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleGetData(clientHandle))
+        .WillOnce(Return(&mBufferContent));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&mMappedBufferContent));
+
+    FlushClient();
+
+    // The client receives a success callback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    FlushServer();
+
+    // The client writes to the handle contents.
+    mMappedBufferContent = mUpdatedBufferContent;
+
+    // The client will then serialize data update
+    ExpectClientWriteHandleSerializeDataUpdate(clientHandle);
+
+    wgpuBufferUnmap(buffer);
+
+    // The server deserializes the data update message. Mock a deserialization failure.
+    MockServerWriteHandleDeserializeDataUpdateFailure(serverHandle);
+
+    FlushClient(false);
+
+    // The handle is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+    EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
+}
+
+// Test MapWrite destroying the buffer before unmapping on the client side.
+TEST_F(WireMemoryTransferServiceTests, BufferMapWriteDestroyBeforeUnmap) {
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(false);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    std::tie(apiBuffer, buffer) = CreateBuffer(WGPUBufferUsage_MapWrite);
+
+    // The server should then deserialize the WriteHandle from the client.
+    ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+
+    FlushClient();
+
+    wgpuBufferMapAsync(buffer, WGPUMapMode_Write, 0, kBufferSize, ToMockBufferMapCallback, nullptr);
+
+    // Mock a successful callback.
+    EXPECT_CALL(api, OnBufferMapAsync(apiBuffer, WGPUMapMode_Write, 0, kBufferSize, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallBufferMapAsyncCallback(apiBuffer, WGPUBufferMapAsyncStatus_Success);
+        }));
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleGetData(clientHandle))
+        .WillOnce(Return(&mBufferContent));
+    EXPECT_CALL(api, BufferGetMappedRange(apiBuffer, 0, kBufferSize))
+        .WillOnce(Return(&mMappedBufferContent));
+
+    FlushClient();
+
+    // The client receives a successful callback.
+    EXPECT_CALL(*mockBufferMapCallback, Call(WGPUBufferMapAsyncStatus_Success, _)).Times(1);
+
+    FlushServer();
+
+    // The client writes to the handle contents.
+    mMappedBufferContent = mUpdatedBufferContent;
+
+    // THIS IS THE TEST: destroy the buffer before unmapping and check it destroyed the mapping
+    // immediately, both in the client and server side.
+    {
+        // The handle is destroyed once the buffer is destroyed.
         EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+
+        wgpuBufferDestroy(buffer);
+
         EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
+        EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1);
+        FlushClient();
+
+        // The handle is already destroyed so unmap only results in a server unmap call.
+        wgpuBufferUnmap(buffer);
+
+        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+        FlushClient();
     }
+}
+
+// Test successful buffer creation with mappedAtCreation = true.
+TEST_F(WireMemoryTransferServiceTests, MappedAtCreationSuccess) {
+    // The client should create and serialize a WriteHandle on createBufferMapped.
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    // The server should then deserialize the WriteHandle from the client.
+    ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+    std::tie(apiBuffer, buffer) = CreateBufferMapped();
+    FlushClient();
+
+    // Update the mapped contents.
+    mMappedBufferContent = mUpdatedBufferContent;
+
+    // When the client Unmaps the buffer, it will serialize data update writes to the handle and
+    // destroy it.
+    ExpectClientWriteHandleSerializeDataUpdate(clientHandle);
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+
+    wgpuBufferUnmap(buffer);
+
+    // The server deserializes the data update message.
+    ExpectServerWriteHandleDeserializeDataUpdate(serverHandle, mUpdatedBufferContent);
+
+    // After the handle is updated it can be destroyed.
+    EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+}
+
+// Test buffer creation with mappedAtCreation WriteHandle creation failure.
+TEST_F(WireMemoryTransferServiceTests, MappedAtCreationWriteHandleCreationFailure) {
+    // Mock a WriteHandle creation failure
+    MockWriteHandleCreationFailure();
+
+    WGPUBufferDescriptor descriptor = {};
+    descriptor.size = sizeof(mBufferContent);
+    descriptor.mappedAtCreation = true;
+
+    WGPUBuffer buffer = wgpuDeviceCreateBuffer(device, &descriptor);
+    EXPECT_EQ(nullptr, wgpuBufferGetMappedRange(buffer, 0, sizeof(mBufferContent)));
+}
+
+// Test buffer creation with mappedAtCreation DeserializeWriteHandle failure.
+TEST_F(WireMemoryTransferServiceTests, MappedAtCreationDeserializeWriteHandleFailure) {
+    // The client should create and serialize a WriteHandle on createBufferMapped.
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    // The server should then deserialize the WriteHandle from the client.
+    MockServerWriteHandleDeserializeFailure();
+
+    WGPUBufferDescriptor descriptor = {};
+    descriptor.size = sizeof(mBufferContent);
+    descriptor.mappedAtCreation = true;
+
+    WGPUBuffer apiBuffer = api.GetNewBuffer();
+
+    wgpuDeviceCreateBuffer(device, &descriptor);
+
+    EXPECT_CALL(api, DeviceCreateBuffer(apiDevice, _)).WillOnce(Return(apiBuffer));
+    // Now bufferGetMappedRange won't be called if deserialize writeHandle fails
+
+    FlushClient(false);
+
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+}
+
+// Test buffer creation with mappedAtCreation = true DeserializeDataUpdate failure.
+TEST_F(WireMemoryTransferServiceTests, MappedAtCreationDeserializeDataUpdateFailure) {
+    // The client should create and serialize a WriteHandle on createBufferMapped.
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    // The server should then deserialize the WriteHandle from the client.
+    ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+    std::tie(apiBuffer, buffer) = CreateBufferMapped();
+    FlushClient();
+
+    // Update the mapped contents.
+    mMappedBufferContent = mUpdatedBufferContent;
+
+    // When the client Unmaps the buffer, it will serialize data update writes to the handle and
+    // destroy it.
+    ExpectClientWriteHandleSerializeDataUpdate(clientHandle);
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+
+    wgpuBufferUnmap(buffer);
+
+    // The server deserializes the data update message. Mock a deserialization failure.
+    MockServerWriteHandleDeserializeDataUpdateFailure(serverHandle);
+
+    FlushClient(false);
+
+    // Failed BufferUpdateMappedData cmd will early return so BufferUnmap is not processed.
+    // The server side writeHandle is destructed at buffer destruction.
+    EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
+}
+
+// Test mappedAtCreation=true destroying the buffer before unmapping on the client side.
+TEST_F(WireMemoryTransferServiceTests, MappedAtCreationDestroyBeforeUnmap) {
+    // The client should create and serialize a WriteHandle on createBufferMapped.
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true);
+    ExpectWriteHandleSerialization(clientHandle);
+
+    // The server should then deserialize the WriteHandle from the client.
+    ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+    std::tie(apiBuffer, buffer) = CreateBufferMapped();
+    FlushClient();
+
+    // Update the mapped contents.
+    mMappedBufferContent = mUpdatedBufferContent;
+
+    // THIS IS THE TEST: destroy the buffer before unmapping and check it destroyed the mapping
+    // immediately, both in the client and server side.
+    {
+        EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+        wgpuBufferDestroy(buffer);
+
+        EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
+        EXPECT_CALL(api, BufferDestroy(apiBuffer)).Times(1);
+        FlushClient();
+
+        // The handle is already destroyed so unmap only results in a server unmap call.
+        wgpuBufferUnmap(buffer);
+
+        EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+        FlushClient();
+    }
+}
+
+// Test a buffer with mappedAtCreation and MapRead usage destroy WriteHandle on unmap and switch
+// data pointer to ReadHandle
+TEST_F(WireMemoryTransferServiceTests, MappedAtCreationAndMapReadSuccess) {
+    // The client should create and serialize a ReadHandle and a WriteHandle on
+    // createBufferMapped.
+    ClientReadHandle* clientReadHandle = ExpectReadHandleCreation();
+    ExpectReadHandleSerialization(clientReadHandle);
+    ClientWriteHandle* clientWriteHandle = ExpectWriteHandleCreation(true);
+    ExpectWriteHandleSerialization(clientWriteHandle);
+
+    // The server should then deserialize a ReadHandle and a WriteHandle from the client.
+    ServerReadHandle* serverReadHandle = ExpectServerReadHandleDeserialize();
+    ServerWriteHandle* serverWriteHandle = ExpectServerWriteHandleDeserialization();
+
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+    std::tie(apiBuffer, buffer) = CreateBufferMapped(WGPUBufferUsage_MapRead);
+    FlushClient();
+
+    // Update the mapped contents.
+    mMappedBufferContent = mUpdatedBufferContent;
+
+    // When the client Unmaps the buffer, it will serialize data update writes to the handle and
+    // destroy it.
+    ExpectClientWriteHandleSerializeDataUpdate(clientWriteHandle);
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientWriteHandle)).Times(1);
+    EXPECT_CALL(clientMemoryTransferService, OnReadHandleGetData(clientReadHandle))
+        .WillOnce(Return(&mBufferContent));
+    wgpuBufferUnmap(buffer);
+
+    // The server deserializes the data update message.
+    ExpectServerWriteHandleDeserializeDataUpdate(serverWriteHandle, mUpdatedBufferContent);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+    EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverWriteHandle)).Times(1);
+    FlushClient();
+
+    // The ReadHandle will be destoryed on buffer destroy.
+    EXPECT_CALL(clientMemoryTransferService, OnReadHandleDestroy(clientReadHandle)).Times(1);
+    EXPECT_CALL(serverMemoryTransferService, OnReadHandleDestroy(serverReadHandle)).Times(1);
+}
+
+// Test WriteHandle preserves after unmap for a buffer with mappedAtCreation and MapWrite usage
+TEST_F(WireMemoryTransferServiceTests, MappedAtCreationAndMapWriteSuccess) {
+    // The client should create and serialize a WriteHandle on createBufferMapped.
+    ClientWriteHandle* clientHandle = ExpectWriteHandleCreation(true);
+
+    ExpectWriteHandleSerialization(clientHandle);
+
+    // The server should then deserialize the WriteHandle from the client.
+    ServerWriteHandle* serverHandle = ExpectServerWriteHandleDeserialization();
+
+    WGPUBuffer buffer;
+    WGPUBuffer apiBuffer;
+    std::tie(apiBuffer, buffer) = CreateBufferMapped(WGPUBufferUsage_MapWrite);
+    FlushClient();
+
+    // Update the mapped contents.
+    mMappedBufferContent = mUpdatedBufferContent;
+
+    // When the client Unmaps the buffer, it will serialize data update writes to the handle.
+    ExpectClientWriteHandleSerializeDataUpdate(clientHandle);
+
+    wgpuBufferUnmap(buffer);
+
+    // The server deserializes the data update message.
+    ExpectServerWriteHandleDeserializeDataUpdate(serverHandle, mUpdatedBufferContent);
+    EXPECT_CALL(api, BufferUnmap(apiBuffer)).Times(1);
+
+    FlushClient();
+
+    // The writeHandle is preserved after unmap and is destroyed once the buffer is destroyed.
+    EXPECT_CALL(clientMemoryTransferService, OnWriteHandleDestroy(clientHandle)).Times(1);
+    EXPECT_CALL(serverMemoryTransferService, OnWriteHandleDestroy(serverHandle)).Times(1);
+}
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireOptionalTests.cpp b/src/dawn/tests/unittests/wire/WireOptionalTests.cpp
index b95a588..2061021 100644
--- a/src/dawn/tests/unittests/wire/WireOptionalTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireOptionalTests.cpp
@@ -16,171 +16,167 @@
 
 namespace dawn::wire {
 
-    using testing::_;
-    using testing::Return;
+using testing::_;
+using testing::Return;
 
-    class WireOptionalTests : public WireTest {
-      public:
-        WireOptionalTests() {
-        }
-        ~WireOptionalTests() override = default;
-    };
+class WireOptionalTests : public WireTest {
+  public:
+    WireOptionalTests() {}
+    ~WireOptionalTests() override = default;
+};
 
-    // Test passing nullptr instead of objects - object as value version
-    TEST_F(WireOptionalTests, OptionalObjectValue) {
-        WGPUBindGroupLayoutDescriptor bglDesc = {};
-        bglDesc.entryCount = 0;
-        WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bglDesc);
+// Test passing nullptr instead of objects - object as value version
+TEST_F(WireOptionalTests, OptionalObjectValue) {
+    WGPUBindGroupLayoutDescriptor bglDesc = {};
+    bglDesc.entryCount = 0;
+    WGPUBindGroupLayout bgl = wgpuDeviceCreateBindGroupLayout(device, &bglDesc);
 
-        WGPUBindGroupLayout apiBindGroupLayout = api.GetNewBindGroupLayout();
-        EXPECT_CALL(api, DeviceCreateBindGroupLayout(apiDevice, _))
-            .WillOnce(Return(apiBindGroupLayout));
+    WGPUBindGroupLayout apiBindGroupLayout = api.GetNewBindGroupLayout();
+    EXPECT_CALL(api, DeviceCreateBindGroupLayout(apiDevice, _))
+        .WillOnce(Return(apiBindGroupLayout));
 
-        // The `sampler`, `textureView` and `buffer` members of a binding are optional.
-        WGPUBindGroupEntry entry;
-        entry.binding = 0;
-        entry.sampler = nullptr;
-        entry.textureView = nullptr;
-        entry.buffer = nullptr;
-        entry.nextInChain = nullptr;
+    // The `sampler`, `textureView` and `buffer` members of a binding are optional.
+    WGPUBindGroupEntry entry;
+    entry.binding = 0;
+    entry.sampler = nullptr;
+    entry.textureView = nullptr;
+    entry.buffer = nullptr;
+    entry.nextInChain = nullptr;
 
-        WGPUBindGroupDescriptor bgDesc = {};
-        bgDesc.layout = bgl;
-        bgDesc.entryCount = 1;
-        bgDesc.entries = &entry;
+    WGPUBindGroupDescriptor bgDesc = {};
+    bgDesc.layout = bgl;
+    bgDesc.entryCount = 1;
+    bgDesc.entries = &entry;
 
-        wgpuDeviceCreateBindGroup(device, &bgDesc);
+    wgpuDeviceCreateBindGroup(device, &bgDesc);
 
-        WGPUBindGroup apiPlaceholderBindGroup = api.GetNewBindGroup();
-        EXPECT_CALL(api,
-                    DeviceCreateBindGroup(
-                        apiDevice, MatchesLambda([](const WGPUBindGroupDescriptor* desc) -> bool {
-                            return desc->nextInChain == nullptr && desc->entryCount == 1 &&
-                                   desc->entries[0].binding == 0 &&
-                                   desc->entries[0].sampler == nullptr &&
-                                   desc->entries[0].buffer == nullptr &&
-                                   desc->entries[0].textureView == nullptr;
-                        })))
-            .WillOnce(Return(apiPlaceholderBindGroup));
+    WGPUBindGroup apiPlaceholderBindGroup = api.GetNewBindGroup();
+    EXPECT_CALL(api, DeviceCreateBindGroup(
+                         apiDevice, MatchesLambda([](const WGPUBindGroupDescriptor* desc) -> bool {
+                             return desc->nextInChain == nullptr && desc->entryCount == 1 &&
+                                    desc->entries[0].binding == 0 &&
+                                    desc->entries[0].sampler == nullptr &&
+                                    desc->entries[0].buffer == nullptr &&
+                                    desc->entries[0].textureView == nullptr;
+                         })))
+        .WillOnce(Return(apiPlaceholderBindGroup));
 
-        FlushClient();
-    }
+    FlushClient();
+}
 
-    // Test that the wire is able to send optional pointers to structures
-    TEST_F(WireOptionalTests, OptionalStructPointer) {
-        // Create shader module
-        WGPUShaderModuleDescriptor vertexDescriptor = {};
-        WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
-        WGPUShaderModule apiVsModule = api.GetNewShaderModule();
-        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
+// Test that the wire is able to send optional pointers to structures
+TEST_F(WireOptionalTests, OptionalStructPointer) {
+    // Create shader module
+    WGPUShaderModuleDescriptor vertexDescriptor = {};
+    WGPUShaderModule vsModule = wgpuDeviceCreateShaderModule(device, &vertexDescriptor);
+    WGPUShaderModule apiVsModule = api.GetNewShaderModule();
+    EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _)).WillOnce(Return(apiVsModule));
 
-        // Create the color state descriptor
-        WGPUBlendComponent blendComponent = {};
-        blendComponent.operation = WGPUBlendOperation_Add;
-        blendComponent.srcFactor = WGPUBlendFactor_One;
-        blendComponent.dstFactor = WGPUBlendFactor_One;
-        WGPUBlendState blendState = {};
-        blendState.alpha = blendComponent;
-        blendState.color = blendComponent;
-        WGPUColorTargetState colorTargetState = {};
-        colorTargetState.format = WGPUTextureFormat_RGBA8Unorm;
-        colorTargetState.blend = &blendState;
-        colorTargetState.writeMask = WGPUColorWriteMask_All;
+    // Create the color state descriptor
+    WGPUBlendComponent blendComponent = {};
+    blendComponent.operation = WGPUBlendOperation_Add;
+    blendComponent.srcFactor = WGPUBlendFactor_One;
+    blendComponent.dstFactor = WGPUBlendFactor_One;
+    WGPUBlendState blendState = {};
+    blendState.alpha = blendComponent;
+    blendState.color = blendComponent;
+    WGPUColorTargetState colorTargetState = {};
+    colorTargetState.format = WGPUTextureFormat_RGBA8Unorm;
+    colorTargetState.blend = &blendState;
+    colorTargetState.writeMask = WGPUColorWriteMask_All;
 
-        // Create the depth-stencil state
-        WGPUStencilFaceState stencilFace = {};
-        stencilFace.compare = WGPUCompareFunction_Always;
-        stencilFace.failOp = WGPUStencilOperation_Keep;
-        stencilFace.depthFailOp = WGPUStencilOperation_Keep;
-        stencilFace.passOp = WGPUStencilOperation_Keep;
+    // Create the depth-stencil state
+    WGPUStencilFaceState stencilFace = {};
+    stencilFace.compare = WGPUCompareFunction_Always;
+    stencilFace.failOp = WGPUStencilOperation_Keep;
+    stencilFace.depthFailOp = WGPUStencilOperation_Keep;
+    stencilFace.passOp = WGPUStencilOperation_Keep;
 
-        WGPUDepthStencilState depthStencilState = {};
-        depthStencilState.format = WGPUTextureFormat_Depth24PlusStencil8;
-        depthStencilState.depthWriteEnabled = false;
-        depthStencilState.depthCompare = WGPUCompareFunction_Always;
-        depthStencilState.stencilBack = stencilFace;
-        depthStencilState.stencilFront = stencilFace;
-        depthStencilState.stencilReadMask = 0xff;
-        depthStencilState.stencilWriteMask = 0xff;
-        depthStencilState.depthBias = 0;
-        depthStencilState.depthBiasSlopeScale = 0.0;
-        depthStencilState.depthBiasClamp = 0.0;
+    WGPUDepthStencilState depthStencilState = {};
+    depthStencilState.format = WGPUTextureFormat_Depth24PlusStencil8;
+    depthStencilState.depthWriteEnabled = false;
+    depthStencilState.depthCompare = WGPUCompareFunction_Always;
+    depthStencilState.stencilBack = stencilFace;
+    depthStencilState.stencilFront = stencilFace;
+    depthStencilState.stencilReadMask = 0xff;
+    depthStencilState.stencilWriteMask = 0xff;
+    depthStencilState.depthBias = 0;
+    depthStencilState.depthBiasSlopeScale = 0.0;
+    depthStencilState.depthBiasClamp = 0.0;
 
-        // Create the pipeline layout
-        WGPUPipelineLayoutDescriptor layoutDescriptor = {};
-        layoutDescriptor.bindGroupLayoutCount = 0;
-        layoutDescriptor.bindGroupLayouts = nullptr;
-        WGPUPipelineLayout layout = wgpuDeviceCreatePipelineLayout(device, &layoutDescriptor);
-        WGPUPipelineLayout apiLayout = api.GetNewPipelineLayout();
-        EXPECT_CALL(api, DeviceCreatePipelineLayout(apiDevice, _)).WillOnce(Return(apiLayout));
+    // Create the pipeline layout
+    WGPUPipelineLayoutDescriptor layoutDescriptor = {};
+    layoutDescriptor.bindGroupLayoutCount = 0;
+    layoutDescriptor.bindGroupLayouts = nullptr;
+    WGPUPipelineLayout layout = wgpuDeviceCreatePipelineLayout(device, &layoutDescriptor);
+    WGPUPipelineLayout apiLayout = api.GetNewPipelineLayout();
+    EXPECT_CALL(api, DeviceCreatePipelineLayout(apiDevice, _)).WillOnce(Return(apiLayout));
 
-        // Create pipeline
-        WGPURenderPipelineDescriptor pipelineDescriptor = {};
+    // Create pipeline
+    WGPURenderPipelineDescriptor pipelineDescriptor = {};
 
-        pipelineDescriptor.vertex.module = vsModule;
-        pipelineDescriptor.vertex.entryPoint = "main";
-        pipelineDescriptor.vertex.bufferCount = 0;
-        pipelineDescriptor.vertex.buffers = nullptr;
+    pipelineDescriptor.vertex.module = vsModule;
+    pipelineDescriptor.vertex.entryPoint = "main";
+    pipelineDescriptor.vertex.bufferCount = 0;
+    pipelineDescriptor.vertex.buffers = nullptr;
 
-        WGPUFragmentState fragment = {};
-        fragment.module = vsModule;
-        fragment.entryPoint = "main";
-        fragment.targetCount = 1;
-        fragment.targets = &colorTargetState;
-        pipelineDescriptor.fragment = &fragment;
+    WGPUFragmentState fragment = {};
+    fragment.module = vsModule;
+    fragment.entryPoint = "main";
+    fragment.targetCount = 1;
+    fragment.targets = &colorTargetState;
+    pipelineDescriptor.fragment = &fragment;
 
-        pipelineDescriptor.multisample.count = 1;
-        pipelineDescriptor.multisample.mask = 0xFFFFFFFF;
-        pipelineDescriptor.multisample.alphaToCoverageEnabled = false;
-        pipelineDescriptor.layout = layout;
-        pipelineDescriptor.primitive.topology = WGPUPrimitiveTopology_TriangleList;
-        pipelineDescriptor.primitive.frontFace = WGPUFrontFace_CCW;
-        pipelineDescriptor.primitive.cullMode = WGPUCullMode_None;
+    pipelineDescriptor.multisample.count = 1;
+    pipelineDescriptor.multisample.mask = 0xFFFFFFFF;
+    pipelineDescriptor.multisample.alphaToCoverageEnabled = false;
+    pipelineDescriptor.layout = layout;
+    pipelineDescriptor.primitive.topology = WGPUPrimitiveTopology_TriangleList;
+    pipelineDescriptor.primitive.frontFace = WGPUFrontFace_CCW;
+    pipelineDescriptor.primitive.cullMode = WGPUCullMode_None;
 
-        // First case: depthStencil is not null.
-        pipelineDescriptor.depthStencil = &depthStencilState;
-        wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor);
+    // First case: depthStencil is not null.
+    pipelineDescriptor.depthStencil = &depthStencilState;
+    wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor);
 
-        WGPURenderPipeline apiPlaceholderPipeline = api.GetNewRenderPipeline();
-        EXPECT_CALL(
-            api,
-            DeviceCreateRenderPipeline(
-                apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool {
-                    return desc->depthStencil != nullptr &&
-                           desc->depthStencil->nextInChain == nullptr &&
-                           desc->depthStencil->depthWriteEnabled == false &&
-                           desc->depthStencil->depthCompare == WGPUCompareFunction_Always &&
-                           desc->depthStencil->stencilBack.compare == WGPUCompareFunction_Always &&
-                           desc->depthStencil->stencilBack.failOp == WGPUStencilOperation_Keep &&
-                           desc->depthStencil->stencilBack.depthFailOp ==
-                               WGPUStencilOperation_Keep &&
-                           desc->depthStencil->stencilBack.passOp == WGPUStencilOperation_Keep &&
-                           desc->depthStencil->stencilFront.compare == WGPUCompareFunction_Always &&
-                           desc->depthStencil->stencilFront.failOp == WGPUStencilOperation_Keep &&
-                           desc->depthStencil->stencilFront.depthFailOp ==
-                               WGPUStencilOperation_Keep &&
-                           desc->depthStencil->stencilFront.passOp == WGPUStencilOperation_Keep &&
-                           desc->depthStencil->stencilReadMask == 0xff &&
-                           desc->depthStencil->stencilWriteMask == 0xff &&
-                           desc->depthStencil->depthBias == 0 &&
-                           desc->depthStencil->depthBiasSlopeScale == 0.0 &&
-                           desc->depthStencil->depthBiasClamp == 0.0;
-                })))
-            .WillOnce(Return(apiPlaceholderPipeline));
+    WGPURenderPipeline apiPlaceholderPipeline = api.GetNewRenderPipeline();
+    EXPECT_CALL(
+        api,
+        DeviceCreateRenderPipeline(
+            apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool {
+                return desc->depthStencil != nullptr &&
+                       desc->depthStencil->nextInChain == nullptr &&
+                       desc->depthStencil->depthWriteEnabled == false &&
+                       desc->depthStencil->depthCompare == WGPUCompareFunction_Always &&
+                       desc->depthStencil->stencilBack.compare == WGPUCompareFunction_Always &&
+                       desc->depthStencil->stencilBack.failOp == WGPUStencilOperation_Keep &&
+                       desc->depthStencil->stencilBack.depthFailOp == WGPUStencilOperation_Keep &&
+                       desc->depthStencil->stencilBack.passOp == WGPUStencilOperation_Keep &&
+                       desc->depthStencil->stencilFront.compare == WGPUCompareFunction_Always &&
+                       desc->depthStencil->stencilFront.failOp == WGPUStencilOperation_Keep &&
+                       desc->depthStencil->stencilFront.depthFailOp == WGPUStencilOperation_Keep &&
+                       desc->depthStencil->stencilFront.passOp == WGPUStencilOperation_Keep &&
+                       desc->depthStencil->stencilReadMask == 0xff &&
+                       desc->depthStencil->stencilWriteMask == 0xff &&
+                       desc->depthStencil->depthBias == 0 &&
+                       desc->depthStencil->depthBiasSlopeScale == 0.0 &&
+                       desc->depthStencil->depthBiasClamp == 0.0;
+            })))
+        .WillOnce(Return(apiPlaceholderPipeline));
 
-        FlushClient();
+    FlushClient();
 
-        // Second case: depthStencil is null.
-        pipelineDescriptor.depthStencil = nullptr;
-        wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor);
-        EXPECT_CALL(
-            api, DeviceCreateRenderPipeline(
-                     apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool {
-                         return desc->depthStencil == nullptr;
-                     })))
-            .WillOnce(Return(apiPlaceholderPipeline));
+    // Second case: depthStencil is null.
+    pipelineDescriptor.depthStencil = nullptr;
+    wgpuDeviceCreateRenderPipeline(device, &pipelineDescriptor);
+    EXPECT_CALL(api,
+                DeviceCreateRenderPipeline(
+                    apiDevice, MatchesLambda([](const WGPURenderPipelineDescriptor* desc) -> bool {
+                        return desc->depthStencil == nullptr;
+                    })))
+        .WillOnce(Return(apiPlaceholderPipeline));
 
-        FlushClient();
-    }
+    FlushClient();
+}
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireQueueTests.cpp b/src/dawn/tests/unittests/wire/WireQueueTests.cpp
index 536ad77..7e2d677 100644
--- a/src/dawn/tests/unittests/wire/WireQueueTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireQueueTests.cpp
@@ -19,129 +19,128 @@
 
 namespace dawn::wire {
 
-    using testing::_;
-    using testing::InvokeWithoutArgs;
-    using testing::Mock;
+using testing::_;
+using testing::InvokeWithoutArgs;
+using testing::Mock;
 
-    class MockQueueWorkDoneCallback {
-      public:
-        MOCK_METHOD(void, Call, (WGPUQueueWorkDoneStatus status, void* userdata));
-    };
+class MockQueueWorkDoneCallback {
+  public:
+    MOCK_METHOD(void, Call, (WGPUQueueWorkDoneStatus status, void* userdata));
+};
 
-    static std::unique_ptr<MockQueueWorkDoneCallback> mockQueueWorkDoneCallback;
-    static void ToMockQueueWorkDone(WGPUQueueWorkDoneStatus status, void* userdata) {
-        mockQueueWorkDoneCallback->Call(status, userdata);
+static std::unique_ptr<MockQueueWorkDoneCallback> mockQueueWorkDoneCallback;
+static void ToMockQueueWorkDone(WGPUQueueWorkDoneStatus status, void* userdata) {
+    mockQueueWorkDoneCallback->Call(status, userdata);
+}
+
+class WireQueueTests : public WireTest {
+  protected:
+    void SetUp() override {
+        WireTest::SetUp();
+        mockQueueWorkDoneCallback = std::make_unique<MockQueueWorkDoneCallback>();
     }
 
-    class WireQueueTests : public WireTest {
-      protected:
-        void SetUp() override {
-            WireTest::SetUp();
-            mockQueueWorkDoneCallback = std::make_unique<MockQueueWorkDoneCallback>();
-        }
-
-        void TearDown() override {
-            WireTest::TearDown();
-            mockQueueWorkDoneCallback = nullptr;
-        }
-
-        void FlushServer() {
-            WireTest::FlushServer();
-            Mock::VerifyAndClearExpectations(&mockQueueWorkDoneCallback);
-        }
-    };
-
-    // Test that a successful OnSubmittedWorkDone call is forwarded to the client.
-    TEST_F(WireQueueTests, OnSubmittedWorkDoneSuccess) {
-        wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this);
-        EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Success);
-            }));
-        FlushClient();
-
-        EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Success, this))
-            .Times(1);
-        FlushServer();
+    void TearDown() override {
+        WireTest::TearDown();
+        mockQueueWorkDoneCallback = nullptr;
     }
 
-    // Test that an error OnSubmittedWorkDone call is forwarded as an error to the client.
-    TEST_F(WireQueueTests, OnSubmittedWorkDoneError) {
-        wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this);
-        EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Error);
-            }));
-        FlushClient();
-
-        EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Error, this)).Times(1);
-        FlushServer();
+    void FlushServer() {
+        WireTest::FlushServer();
+        Mock::VerifyAndClearExpectations(&mockQueueWorkDoneCallback);
     }
+};
 
-    // Test registering an OnSubmittedWorkDone then disconnecting the wire calls the callback with
-    // device loss
-    TEST_F(WireQueueTests, OnSubmittedWorkDoneBeforeDisconnect) {
-        wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this);
-        EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Error);
-            }));
-        FlushClient();
+// Test that a successful OnSubmittedWorkDone call is forwarded to the client.
+TEST_F(WireQueueTests, OnSubmittedWorkDoneSuccess) {
+    wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this);
+    EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Success);
+        }));
+    FlushClient();
 
-        EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_DeviceLost, this))
-            .Times(1);
-        GetWireClient()->Disconnect();
+    EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Success, this)).Times(1);
+    FlushServer();
+}
+
+// Test that an error OnSubmittedWorkDone call is forwarded as an error to the client.
+TEST_F(WireQueueTests, OnSubmittedWorkDoneError) {
+    wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this);
+    EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Error);
+        }));
+    FlushClient();
+
+    EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_Error, this)).Times(1);
+    FlushServer();
+}
+
+// Test registering an OnSubmittedWorkDone then disconnecting the wire calls the callback with
+// device loss
+TEST_F(WireQueueTests, OnSubmittedWorkDoneBeforeDisconnect) {
+    wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this);
+    EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Error);
+        }));
+    FlushClient();
+
+    EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_DeviceLost, this))
+        .Times(1);
+    GetWireClient()->Disconnect();
+}
+
+// Test registering an OnSubmittedWorkDone after disconnecting the wire calls the callback with
+// device loss
+TEST_F(WireQueueTests, OnSubmittedWorkDoneAfterDisconnect) {
+    GetWireClient()->Disconnect();
+
+    EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_DeviceLost, this))
+        .Times(1);
+    wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this);
+}
+
+// Hack to pass in test context into user callback
+struct TestData {
+    WireQueueTests* pTest;
+    WGPUQueue* pTestQueue;
+    size_t numRequests;
+};
+
+static void ToMockQueueWorkDoneWithNewRequests(WGPUQueueWorkDoneStatus status, void* userdata) {
+    TestData* testData = reinterpret_cast<TestData*>(userdata);
+    // Mimic the user callback is sending new requests
+    ASSERT_NE(testData, nullptr);
+    ASSERT_NE(testData->pTest, nullptr);
+    ASSERT_NE(testData->pTestQueue, nullptr);
+    mockQueueWorkDoneCallback->Call(status, testData->pTest);
+
+    // Send the requests a number of times
+    for (size_t i = 0; i < testData->numRequests; i++) {
+        wgpuQueueOnSubmittedWorkDone(*(testData->pTestQueue), 0u, ToMockQueueWorkDone,
+                                     testData->pTest);
     }
+}
 
-    // Test registering an OnSubmittedWorkDone after disconnecting the wire calls the callback with
-    // device loss
-    TEST_F(WireQueueTests, OnSubmittedWorkDoneAfterDisconnect) {
-        GetWireClient()->Disconnect();
+// Test that requests inside user callbacks before disconnect are called
+TEST_F(WireQueueTests, OnSubmittedWorkDoneInsideCallbackBeforeDisconnect) {
+    TestData testData = {this, &queue, 10};
+    wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDoneWithNewRequests, &testData);
+    EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Error);
+        }));
+    FlushClient();
 
-        EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_DeviceLost, this))
-            .Times(1);
-        wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDone, this);
-    }
+    EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_DeviceLost, this))
+        .Times(1 + testData.numRequests);
+    GetWireClient()->Disconnect();
+}
 
-    // Hack to pass in test context into user callback
-    struct TestData {
-        WireQueueTests* pTest;
-        WGPUQueue* pTestQueue;
-        size_t numRequests;
-    };
-
-    static void ToMockQueueWorkDoneWithNewRequests(WGPUQueueWorkDoneStatus status, void* userdata) {
-        TestData* testData = reinterpret_cast<TestData*>(userdata);
-        // Mimic the user callback is sending new requests
-        ASSERT_NE(testData, nullptr);
-        ASSERT_NE(testData->pTest, nullptr);
-        ASSERT_NE(testData->pTestQueue, nullptr);
-        mockQueueWorkDoneCallback->Call(status, testData->pTest);
-
-        // Send the requests a number of times
-        for (size_t i = 0; i < testData->numRequests; i++) {
-            wgpuQueueOnSubmittedWorkDone(*(testData->pTestQueue), 0u, ToMockQueueWorkDone,
-                                         testData->pTest);
-        }
-    }
-
-    // Test that requests inside user callbacks before disconnect are called
-    TEST_F(WireQueueTests, OnSubmittedWorkDoneInsideCallbackBeforeDisconnect) {
-        TestData testData = {this, &queue, 10};
-        wgpuQueueOnSubmittedWorkDone(queue, 0u, ToMockQueueWorkDoneWithNewRequests, &testData);
-        EXPECT_CALL(api, OnQueueOnSubmittedWorkDone(apiQueue, 0u, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallQueueOnSubmittedWorkDoneCallback(apiQueue, WGPUQueueWorkDoneStatus_Error);
-            }));
-        FlushClient();
-
-        EXPECT_CALL(*mockQueueWorkDoneCallback, Call(WGPUQueueWorkDoneStatus_DeviceLost, this))
-            .Times(1 + testData.numRequests);
-        GetWireClient()->Disconnect();
-    }
-
-    // Only one default queue is supported now so we cannot test ~Queue triggering ClearAllCallbacks
-    // since it is always destructed after the test TearDown, and we cannot create a new queue obj
-    // with wgpuDeviceGetQueue
+// Only one default queue is supported now so we cannot test ~Queue triggering ClearAllCallbacks
+// since it is always destructed after the test TearDown, and we cannot create a new queue obj
+// with wgpuDeviceGetQueue
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireShaderModuleTests.cpp b/src/dawn/tests/unittests/wire/WireShaderModuleTests.cpp
index 48c93d2..67d258b 100644
--- a/src/dawn/tests/unittests/wire/WireShaderModuleTests.cpp
+++ b/src/dawn/tests/unittests/wire/WireShaderModuleTests.cpp
@@ -19,226 +19,224 @@
 
 namespace dawn::wire {
 
-    using testing::_;
-    using testing::InvokeWithoutArgs;
-    using testing::Mock;
-    using testing::Return;
-    using testing::StrictMock;
+using testing::_;
+using testing::InvokeWithoutArgs;
+using testing::Mock;
+using testing::Return;
+using testing::StrictMock;
 
-    namespace {
+namespace {
 
-        // Mock class to add expectations on the wire calling callbacks
-        class MockCompilationInfoCallback {
-          public:
-            MOCK_METHOD(void,
-                        Call,
-                        (WGPUCompilationInfoRequestStatus status,
-                         const WGPUCompilationInfo* info,
-                         void* userdata));
-        };
+// Mock class to add expectations on the wire calling callbacks
+class MockCompilationInfoCallback {
+  public:
+    MOCK_METHOD(void,
+                Call,
+                (WGPUCompilationInfoRequestStatus status,
+                 const WGPUCompilationInfo* info,
+                 void* userdata));
+};
 
-        std::unique_ptr<StrictMock<MockCompilationInfoCallback>> mockCompilationInfoCallback;
-        void ToMockGetCompilationInfoCallback(WGPUCompilationInfoRequestStatus status,
-                                              const WGPUCompilationInfo* info,
-                                              void* userdata) {
-            mockCompilationInfoCallback->Call(status, info, userdata);
-        }
+std::unique_ptr<StrictMock<MockCompilationInfoCallback>> mockCompilationInfoCallback;
+void ToMockGetCompilationInfoCallback(WGPUCompilationInfoRequestStatus status,
+                                      const WGPUCompilationInfo* info,
+                                      void* userdata) {
+    mockCompilationInfoCallback->Call(status, info, userdata);
+}
 
-    }  // anonymous namespace
+}  // anonymous namespace
 
-    class WireShaderModuleTests : public WireTest {
-      public:
-        WireShaderModuleTests() {
-        }
-        ~WireShaderModuleTests() override = default;
+class WireShaderModuleTests : public WireTest {
+  public:
+    WireShaderModuleTests() {}
+    ~WireShaderModuleTests() override = default;
 
-        void SetUp() override {
-            WireTest::SetUp();
+    void SetUp() override {
+        WireTest::SetUp();
 
-            mockCompilationInfoCallback =
-                std::make_unique<StrictMock<MockCompilationInfoCallback>>();
-            apiShaderModule = api.GetNewShaderModule();
+        mockCompilationInfoCallback = std::make_unique<StrictMock<MockCompilationInfoCallback>>();
+        apiShaderModule = api.GetNewShaderModule();
 
-            WGPUShaderModuleDescriptor descriptor = {};
-            shaderModule = wgpuDeviceCreateShaderModule(device, &descriptor);
+        WGPUShaderModuleDescriptor descriptor = {};
+        shaderModule = wgpuDeviceCreateShaderModule(device, &descriptor);
 
-            EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _))
-                .WillOnce(Return(apiShaderModule))
-                .RetiresOnSaturation();
-            FlushClient();
-        }
-
-        void TearDown() override {
-            WireTest::TearDown();
-
-            // Delete mock so that expectations are checked
-            mockCompilationInfoCallback = nullptr;
-        }
-
-        void FlushClient() {
-            WireTest::FlushClient();
-            Mock::VerifyAndClearExpectations(&mockCompilationInfoCallback);
-        }
-
-        void FlushServer() {
-            WireTest::FlushServer();
-            Mock::VerifyAndClearExpectations(&mockCompilationInfoCallback);
-        }
-
-      protected:
-        WGPUShaderModule shaderModule;
-        WGPUShaderModule apiShaderModule;
-    };
-
-    // Check getting CompilationInfo for a successfully created shader module
-    TEST_F(WireShaderModuleTests, GetCompilationInfo) {
-        wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockGetCompilationInfoCallback, nullptr);
-
-        WGPUCompilationMessage message = {
-            nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8};
-        WGPUCompilationInfo compilationInfo;
-        compilationInfo.nextInChain = nullptr;
-        compilationInfo.messageCount = 1;
-        compilationInfo.messages = &message;
-
-        EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallShaderModuleGetCompilationInfoCallback(
-                    apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo);
-            }));
-
+        EXPECT_CALL(api, DeviceCreateShaderModule(apiDevice, _))
+            .WillOnce(Return(apiShaderModule))
+            .RetiresOnSaturation();
         FlushClient();
-
-        EXPECT_CALL(*mockCompilationInfoCallback,
-                    Call(WGPUCompilationInfoRequestStatus_Success,
-                         MatchesLambda([&](const WGPUCompilationInfo* info) -> bool {
-                             if (info->messageCount != compilationInfo.messageCount) {
-                                 return false;
-                             }
-                             const WGPUCompilationMessage* infoMessage = &info->messages[0];
-                             return strcmp(infoMessage->message, message.message) == 0 &&
-                                    infoMessage->nextInChain == message.nextInChain &&
-                                    infoMessage->type == message.type &&
-                                    infoMessage->lineNum == message.lineNum &&
-                                    infoMessage->linePos == message.linePos &&
-                                    infoMessage->offset == message.offset &&
-                                    infoMessage->length == message.length;
-                         }),
-                         _))
-            .Times(1);
-        FlushServer();
     }
 
-    // Test that calling GetCompilationInfo then disconnecting the wire calls the callback with a
-    // device loss.
-    TEST_F(WireShaderModuleTests, GetCompilationInfoBeforeDisconnect) {
-        wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockGetCompilationInfoCallback, nullptr);
+    void TearDown() override {
+        WireTest::TearDown();
 
-        WGPUCompilationMessage message = {
-            nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8};
-        WGPUCompilationInfo compilationInfo;
-        compilationInfo.nextInChain = nullptr;
-        compilationInfo.messageCount = 1;
-        compilationInfo.messages = &message;
-
-        EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallShaderModuleGetCompilationInfoCallback(
-                    apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo);
-            }));
-        FlushClient();
-
-        EXPECT_CALL(*mockCompilationInfoCallback,
-                    Call(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, _));
-        GetWireClient()->Disconnect();
+        // Delete mock so that expectations are checked
+        mockCompilationInfoCallback = nullptr;
     }
 
-    // Test that calling GetCompilationInfo after disconnecting the wire calls the callback with a
-    // device loss.
-    TEST_F(WireShaderModuleTests, GetCompilationInfoAfterDisconnect) {
-        GetWireClient()->Disconnect();
-        EXPECT_CALL(*mockCompilationInfoCallback,
-                    Call(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, _));
-        wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockGetCompilationInfoCallback, nullptr);
+    void FlushClient() {
+        WireTest::FlushClient();
+        Mock::VerifyAndClearExpectations(&mockCompilationInfoCallback);
     }
 
-    // Hack to pass in test context into user callback
-    struct TestData {
-        WireShaderModuleTests* pTest;
-        WGPUShaderModule* pTestShaderModule;
-        size_t numRequests;
-    };
-
-    static void ToMockBufferMapCallbackWithNewRequests(WGPUCompilationInfoRequestStatus status,
-                                                       const WGPUCompilationInfo* info,
-                                                       void* userdata) {
-        TestData* testData = reinterpret_cast<TestData*>(userdata);
-        // Mimic the user callback is sending new requests
-        ASSERT_NE(testData, nullptr);
-        ASSERT_NE(testData->pTest, nullptr);
-        ASSERT_NE(testData->pTestShaderModule, nullptr);
-
-        mockCompilationInfoCallback->Call(status, info, testData->pTest);
-
-        // Send the requests a number of times
-        for (size_t i = 0; i < testData->numRequests; i++) {
-            wgpuShaderModuleGetCompilationInfo(*(testData->pTestShaderModule),
-                                               ToMockGetCompilationInfoCallback, nullptr);
-        }
+    void FlushServer() {
+        WireTest::FlushServer();
+        Mock::VerifyAndClearExpectations(&mockCompilationInfoCallback);
     }
 
-    // Test that requests inside user callbacks before disconnect are called
-    TEST_F(WireShaderModuleTests, GetCompilationInfoInsideCallbackBeforeDisconnect) {
-        TestData testData = {this, &shaderModule, 10};
+  protected:
+    WGPUShaderModule shaderModule;
+    WGPUShaderModule apiShaderModule;
+};
 
-        wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockBufferMapCallbackWithNewRequests,
-                                           &testData);
+// Check getting CompilationInfo for a successfully created shader module
+TEST_F(WireShaderModuleTests, GetCompilationInfo) {
+    wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockGetCompilationInfoCallback, nullptr);
 
-        WGPUCompilationMessage message = {
-            nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8};
-        WGPUCompilationInfo compilationInfo;
-        compilationInfo.nextInChain = nullptr;
-        compilationInfo.messageCount = 1;
-        compilationInfo.messages = &message;
+    WGPUCompilationMessage message = {
+        nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8};
+    WGPUCompilationInfo compilationInfo;
+    compilationInfo.nextInChain = nullptr;
+    compilationInfo.messageCount = 1;
+    compilationInfo.messages = &message;
 
-        EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallShaderModuleGetCompilationInfoCallback(
-                    apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo);
-            }));
-        FlushClient();
+    EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallShaderModuleGetCompilationInfoCallback(
+                apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo);
+        }));
 
-        EXPECT_CALL(*mockCompilationInfoCallback,
-                    Call(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, _))
-            .Times(1 + testData.numRequests);
-        GetWireClient()->Disconnect();
+    FlushClient();
+
+    EXPECT_CALL(*mockCompilationInfoCallback,
+                Call(WGPUCompilationInfoRequestStatus_Success,
+                     MatchesLambda([&](const WGPUCompilationInfo* info) -> bool {
+                         if (info->messageCount != compilationInfo.messageCount) {
+                             return false;
+                         }
+                         const WGPUCompilationMessage* infoMessage = &info->messages[0];
+                         return strcmp(infoMessage->message, message.message) == 0 &&
+                                infoMessage->nextInChain == message.nextInChain &&
+                                infoMessage->type == message.type &&
+                                infoMessage->lineNum == message.lineNum &&
+                                infoMessage->linePos == message.linePos &&
+                                infoMessage->offset == message.offset &&
+                                infoMessage->length == message.length;
+                     }),
+                     _))
+        .Times(1);
+    FlushServer();
+}
+
+// Test that calling GetCompilationInfo then disconnecting the wire calls the callback with a
+// device loss.
+TEST_F(WireShaderModuleTests, GetCompilationInfoBeforeDisconnect) {
+    wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockGetCompilationInfoCallback, nullptr);
+
+    WGPUCompilationMessage message = {
+        nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8};
+    WGPUCompilationInfo compilationInfo;
+    compilationInfo.nextInChain = nullptr;
+    compilationInfo.messageCount = 1;
+    compilationInfo.messages = &message;
+
+    EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallShaderModuleGetCompilationInfoCallback(
+                apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo);
+        }));
+    FlushClient();
+
+    EXPECT_CALL(*mockCompilationInfoCallback,
+                Call(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, _));
+    GetWireClient()->Disconnect();
+}
+
+// Test that calling GetCompilationInfo after disconnecting the wire calls the callback with a
+// device loss.
+TEST_F(WireShaderModuleTests, GetCompilationInfoAfterDisconnect) {
+    GetWireClient()->Disconnect();
+    EXPECT_CALL(*mockCompilationInfoCallback,
+                Call(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, _));
+    wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockGetCompilationInfoCallback, nullptr);
+}
+
+// Hack to pass in test context into user callback
+struct TestData {
+    WireShaderModuleTests* pTest;
+    WGPUShaderModule* pTestShaderModule;
+    size_t numRequests;
+};
+
+static void ToMockBufferMapCallbackWithNewRequests(WGPUCompilationInfoRequestStatus status,
+                                                   const WGPUCompilationInfo* info,
+                                                   void* userdata) {
+    TestData* testData = reinterpret_cast<TestData*>(userdata);
+    // Mimic the user callback is sending new requests
+    ASSERT_NE(testData, nullptr);
+    ASSERT_NE(testData->pTest, nullptr);
+    ASSERT_NE(testData->pTestShaderModule, nullptr);
+
+    mockCompilationInfoCallback->Call(status, info, testData->pTest);
+
+    // Send the requests a number of times
+    for (size_t i = 0; i < testData->numRequests; i++) {
+        wgpuShaderModuleGetCompilationInfo(*(testData->pTestShaderModule),
+                                           ToMockGetCompilationInfoCallback, nullptr);
     }
+}
 
-    // Test that requests inside user callbacks before object destruction are called
-    TEST_F(WireShaderModuleTests, GetCompilationInfoInsideCallbackBeforeDestruction) {
-        TestData testData = {this, &shaderModule, 10};
+// Test that requests inside user callbacks before disconnect are called
+TEST_F(WireShaderModuleTests, GetCompilationInfoInsideCallbackBeforeDisconnect) {
+    TestData testData = {this, &shaderModule, 10};
 
-        wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockBufferMapCallbackWithNewRequests,
-                                           &testData);
+    wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockBufferMapCallbackWithNewRequests,
+                                       &testData);
 
-        WGPUCompilationMessage message = {
-            nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8};
-        WGPUCompilationInfo compilationInfo;
-        compilationInfo.nextInChain = nullptr;
-        compilationInfo.messageCount = 1;
-        compilationInfo.messages = &message;
+    WGPUCompilationMessage message = {
+        nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8};
+    WGPUCompilationInfo compilationInfo;
+    compilationInfo.nextInChain = nullptr;
+    compilationInfo.messageCount = 1;
+    compilationInfo.messages = &message;
 
-        EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _))
-            .WillOnce(InvokeWithoutArgs([&]() {
-                api.CallShaderModuleGetCompilationInfoCallback(
-                    apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo);
-            }));
-        FlushClient();
+    EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallShaderModuleGetCompilationInfoCallback(
+                apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo);
+        }));
+    FlushClient();
 
-        EXPECT_CALL(*mockCompilationInfoCallback,
-                    Call(WGPUCompilationInfoRequestStatus_Unknown, nullptr, _))
-            .Times(1 + testData.numRequests);
-        wgpuShaderModuleRelease(shaderModule);
-    }
+    EXPECT_CALL(*mockCompilationInfoCallback,
+                Call(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, _))
+        .Times(1 + testData.numRequests);
+    GetWireClient()->Disconnect();
+}
+
+// Test that requests inside user callbacks before object destruction are called
+TEST_F(WireShaderModuleTests, GetCompilationInfoInsideCallbackBeforeDestruction) {
+    TestData testData = {this, &shaderModule, 10};
+
+    wgpuShaderModuleGetCompilationInfo(shaderModule, ToMockBufferMapCallbackWithNewRequests,
+                                       &testData);
+
+    WGPUCompilationMessage message = {
+        nullptr, "Test Message", WGPUCompilationMessageType_Info, 2, 4, 6, 8};
+    WGPUCompilationInfo compilationInfo;
+    compilationInfo.nextInChain = nullptr;
+    compilationInfo.messageCount = 1;
+    compilationInfo.messages = &message;
+
+    EXPECT_CALL(api, OnShaderModuleGetCompilationInfo(apiShaderModule, _, _))
+        .WillOnce(InvokeWithoutArgs([&]() {
+            api.CallShaderModuleGetCompilationInfoCallback(
+                apiShaderModule, WGPUCompilationInfoRequestStatus_Success, &compilationInfo);
+        }));
+    FlushClient();
+
+    EXPECT_CALL(*mockCompilationInfoCallback,
+                Call(WGPUCompilationInfoRequestStatus_Unknown, nullptr, _))
+        .Times(1 + testData.numRequests);
+    wgpuShaderModuleRelease(shaderModule);
+}
 
 }  // namespace dawn::wire
diff --git a/src/dawn/tests/unittests/wire/WireTest.cpp b/src/dawn/tests/unittests/wire/WireTest.cpp
index 9397139..4a9f0d6 100644
--- a/src/dawn/tests/unittests/wire/WireTest.cpp
+++ b/src/dawn/tests/unittests/wire/WireTest.cpp
@@ -25,11 +25,9 @@
 using testing::Mock;
 using testing::Return;
 
-WireTest::WireTest() {
-}
+WireTest::WireTest() {}
 
-WireTest::~WireTest() {
-}
+WireTest::~WireTest() {}
 
 dawn::wire::client::MemoryTransferService* WireTest::GetClientMemoryTransferService() {
     return nullptr;
diff --git a/src/dawn/tests/unittests/wire/WireTest.h b/src/dawn/tests/unittests/wire/WireTest.h
index cd91c3e..0cbd156 100644
--- a/src/dawn/tests/unittests/wire/WireTest.h
+++ b/src/dawn/tests/unittests/wire/WireTest.h
@@ -40,12 +40,9 @@
 template <typename Lambda, typename Arg>
 class LambdaMatcherImpl : public testing::MatcherInterface<Arg> {
   public:
-    explicit LambdaMatcherImpl(Lambda lambda) : mLambda(lambda) {
-    }
+    explicit LambdaMatcherImpl(Lambda lambda) : mLambda(lambda) {}
 
-    void DescribeTo(std::ostream* os) const override {
-        *os << "with a custom matcher";
-    }
+    void DescribeTo(std::ostream* os) const override { *os << "with a custom matcher"; }
 
     bool MatchAndExplain(Arg value, testing::MatchResultListener* listener) const override {
         if (!mLambda(value)) {
@@ -71,8 +68,7 @@
 
 class StringMessageMatcher : public testing::MatcherInterface<const char*> {
   public:
-    StringMessageMatcher() {
-    }
+    StringMessageMatcher() {}
 
     bool MatchAndExplain(const char* message,
                          testing::MatchResultListener* listener) const override {
@@ -87,13 +83,9 @@
         return true;
     }
 
-    void DescribeTo(std::ostream* os) const override {
-        *os << "valid error message";
-    }
+    void DescribeTo(std::ostream* os) const override { *os << "valid error message"; }
 
-    void DescribeNegationTo(std::ostream* os) const override {
-        *os << "invalid error message";
-    }
+    void DescribeNegationTo(std::ostream* os) const override { *os << "invalid error message"; }
 };
 
 inline testing::Matcher<const char*> ValidStringMessage() {
@@ -101,18 +93,18 @@
 }
 
 namespace dawn::wire {
-    class WireClient;
-    class WireServer;
-    namespace client {
-        class MemoryTransferService;
-    }  // namespace client
-    namespace server {
-        class MemoryTransferService;
-    }  // namespace server
+class WireClient;
+class WireServer;
+namespace client {
+class MemoryTransferService;
+}  // namespace client
+namespace server {
+class MemoryTransferService;
+}  // namespace server
 }  // namespace dawn::wire
 
 namespace utils {
-    class TerribleCommandBuffer;
+class TerribleCommandBuffer;
 }
 
 class WireTest : public testing::Test {
diff --git a/src/dawn/tests/white_box/D3D12DescriptorHeapTests.cpp b/src/dawn/tests/white_box/D3D12DescriptorHeapTests.cpp
index cc020bc..16f8c54 100644
--- a/src/dawn/tests/white_box/D3D12DescriptorHeapTests.cpp
+++ b/src/dawn/tests/white_box/D3D12DescriptorHeapTests.cpp
@@ -28,21 +28,21 @@
 
 namespace dawn::native::d3d12 {
 
-    constexpr uint32_t kRTSize = 4;
+constexpr uint32_t kRTSize = 4;
 
-    // Pooling tests are required to advance the GPU completed serial to reuse heaps.
-    // This requires Tick() to be called at-least |kFrameDepth| times. This constant
-    // should be updated if the internals of Tick() change.
-    constexpr uint32_t kFrameDepth = 2;
+// Pooling tests are required to advance the GPU completed serial to reuse heaps.
+// This requires Tick() to be called at-least |kFrameDepth| times. This constant
+// should be updated if the internals of Tick() change.
+constexpr uint32_t kFrameDepth = 2;
 
-    class D3D12DescriptorHeapTests : public DawnTest {
-      protected:
-        void SetUp() override {
-            DawnTest::SetUp();
-            DAWN_TEST_UNSUPPORTED_IF(UsesWire());
-            mD3DDevice = reinterpret_cast<Device*>(device.Get());
+class D3D12DescriptorHeapTests : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+        DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+        mD3DDevice = reinterpret_cast<Device*>(device.Get());
 
-            mSimpleVSModule = utils::CreateShaderModule(device, R"(
+        mSimpleVSModule = utils::CreateShaderModule(device, R"(
 
             @stage(vertex) fn main(
                 @builtin(vertex_index) VertexIndex : u32
@@ -55,7 +55,7 @@
                 return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
             })");
 
-            mSimpleFSModule = utils::CreateShaderModule(device, R"(
+        mSimpleFSModule = utils::CreateShaderModule(device, R"(
             struct U {
                 color : vec4<f32>
             }
@@ -64,397 +64,390 @@
             @stage(fragment) fn main() -> @location(0) vec4<f32> {
                 return colorBuffer.color;
             })");
-        }
-
-        utils::BasicRenderPass MakeRenderPass(uint32_t width,
-                                              uint32_t height,
-                                              wgpu::TextureFormat format) {
-            DAWN_ASSERT(width > 0 && height > 0);
-
-            wgpu::TextureDescriptor descriptor;
-            descriptor.dimension = wgpu::TextureDimension::e2D;
-            descriptor.size.width = width;
-            descriptor.size.height = height;
-            descriptor.size.depthOrArrayLayers = 1;
-            descriptor.sampleCount = 1;
-            descriptor.format = format;
-            descriptor.mipLevelCount = 1;
-            descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
-            wgpu::Texture color = device.CreateTexture(&descriptor);
-
-            return utils::BasicRenderPass(width, height, color);
-        }
-
-        std::array<float, 4> GetSolidColor(uint32_t n) const {
-            ASSERT(n >> 24 == 0);
-            float b = (n & 0xFF) / 255.0f;
-            float g = ((n >> 8) & 0xFF) / 255.0f;
-            float r = ((n >> 16) & 0xFF) / 255.0f;
-            return {r, g, b, 1};
-        }
-
-        Device* mD3DDevice = nullptr;
-
-        wgpu::ShaderModule mSimpleVSModule;
-        wgpu::ShaderModule mSimpleFSModule;
-    };
-
-    class PlaceholderStagingDescriptorAllocator {
-      public:
-        PlaceholderStagingDescriptorAllocator(Device* device,
-                                              uint32_t descriptorCount,
-                                              uint32_t allocationsPerHeap)
-            : mAllocator(device,
-                         descriptorCount,
-                         allocationsPerHeap * descriptorCount,
-                         D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER) {
-        }
-
-        CPUDescriptorHeapAllocation AllocateCPUDescriptors() {
-            dawn::native::ResultOrError<CPUDescriptorHeapAllocation> result =
-                mAllocator.AllocateCPUDescriptors();
-            return (result.IsSuccess()) ? result.AcquireSuccess() : CPUDescriptorHeapAllocation{};
-        }
-
-        void Deallocate(CPUDescriptorHeapAllocation& allocation) {
-            mAllocator.Deallocate(&allocation);
-        }
-
-      private:
-        StagingDescriptorAllocator mAllocator;
-    };
-
-    // Verify the shader visible view heaps switch over within a single submit.
-    TEST_P(D3D12DescriptorHeapTests, SwitchOverViewHeap) {
-        DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
-            dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
-
-        utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
-
-        // Fill in a view heap with "view only" bindgroups (1x view per group) by creating a
-        // view bindgroup each draw. After HEAP_SIZE + 1 draws, the heaps must switch over.
-        renderPipelineDescriptor.vertex.module = mSimpleVSModule;
-        renderPipelineDescriptor.cFragment.module = mSimpleFSModule;
-
-        wgpu::RenderPipeline renderPipeline =
-            device.CreateRenderPipeline(&renderPipelineDescriptor);
-        utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
-
-        Device* d3dDevice = reinterpret_cast<Device*>(device.Get());
-        ShaderVisibleDescriptorAllocator* allocator =
-            d3dDevice->GetViewShaderVisibleDescriptorAllocator();
-        const uint64_t heapSize = allocator->GetShaderVisibleHeapSizeForTesting();
-
-        const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
-
-        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-        {
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
-
-            pass.SetPipeline(renderPipeline);
-
-            std::array<float, 4> redColor = {1, 0, 0, 1};
-            wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
-                device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
-
-            for (uint32_t i = 0; i < heapSize + 1; ++i) {
-                pass.SetBindGroup(0,
-                                  utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
-                                                       {{0, uniformBuffer, 0, sizeof(redColor)}}));
-                pass.Draw(3);
-            }
-
-            pass.End();
-        }
-
-        wgpu::CommandBuffer commands = encoder.Finish();
-        queue.Submit(1, &commands);
-
-        EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(), heapSerial + HeapVersionID(1));
     }
 
-    // Verify the shader visible sampler heaps does not switch over within a single submit.
-    TEST_P(D3D12DescriptorHeapTests, NoSwitchOverSamplerHeap) {
-        utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+    utils::BasicRenderPass MakeRenderPass(uint32_t width,
+                                          uint32_t height,
+                                          wgpu::TextureFormat format) {
+        DAWN_ASSERT(width > 0 && height > 0);
 
-        // Fill in a sampler heap with "sampler only" bindgroups (1x sampler per group) by creating
-        // a sampler bindgroup each draw. After HEAP_SIZE + 1 draws, the heaps WILL NOT switch over
-        // because the sampler heap allocations are de-duplicated.
-        renderPipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"(
+        wgpu::TextureDescriptor descriptor;
+        descriptor.dimension = wgpu::TextureDimension::e2D;
+        descriptor.size.width = width;
+        descriptor.size.height = height;
+        descriptor.size.depthOrArrayLayers = 1;
+        descriptor.sampleCount = 1;
+        descriptor.format = format;
+        descriptor.mipLevelCount = 1;
+        descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+        wgpu::Texture color = device.CreateTexture(&descriptor);
+
+        return utils::BasicRenderPass(width, height, color);
+    }
+
+    std::array<float, 4> GetSolidColor(uint32_t n) const {
+        ASSERT(n >> 24 == 0);
+        float b = (n & 0xFF) / 255.0f;
+        float g = ((n >> 8) & 0xFF) / 255.0f;
+        float r = ((n >> 16) & 0xFF) / 255.0f;
+        return {r, g, b, 1};
+    }
+
+    Device* mD3DDevice = nullptr;
+
+    wgpu::ShaderModule mSimpleVSModule;
+    wgpu::ShaderModule mSimpleFSModule;
+};
+
+class PlaceholderStagingDescriptorAllocator {
+  public:
+    PlaceholderStagingDescriptorAllocator(Device* device,
+                                          uint32_t descriptorCount,
+                                          uint32_t allocationsPerHeap)
+        : mAllocator(device,
+                     descriptorCount,
+                     allocationsPerHeap * descriptorCount,
+                     D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER) {}
+
+    CPUDescriptorHeapAllocation AllocateCPUDescriptors() {
+        dawn::native::ResultOrError<CPUDescriptorHeapAllocation> result =
+            mAllocator.AllocateCPUDescriptors();
+        return (result.IsSuccess()) ? result.AcquireSuccess() : CPUDescriptorHeapAllocation{};
+    }
+
+    void Deallocate(CPUDescriptorHeapAllocation& allocation) { mAllocator.Deallocate(&allocation); }
+
+  private:
+    StagingDescriptorAllocator mAllocator;
+};
+
+// Verify the shader visible view heaps switch over within a single submit.
+TEST_P(D3D12DescriptorHeapTests, SwitchOverViewHeap) {
+    DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
+        dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+
+    utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+
+    // Fill in a view heap with "view only" bindgroups (1x view per group) by creating a
+    // view bindgroup each draw. After HEAP_SIZE + 1 draws, the heaps must switch over.
+    renderPipelineDescriptor.vertex.module = mSimpleVSModule;
+    renderPipelineDescriptor.cFragment.module = mSimpleFSModule;
+
+    wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&renderPipelineDescriptor);
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    Device* d3dDevice = reinterpret_cast<Device*>(device.Get());
+    ShaderVisibleDescriptorAllocator* allocator =
+        d3dDevice->GetViewShaderVisibleDescriptorAllocator();
+    const uint64_t heapSize = allocator->GetShaderVisibleHeapSizeForTesting();
+
+    const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+        pass.SetPipeline(renderPipeline);
+
+        std::array<float, 4> redColor = {1, 0, 0, 1};
+        wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
+            device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
+
+        for (uint32_t i = 0; i < heapSize + 1; ++i) {
+            pass.SetBindGroup(0, utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
+                                                      {{0, uniformBuffer, 0, sizeof(redColor)}}));
+            pass.Draw(3);
+        }
+
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(), heapSerial + HeapVersionID(1));
+}
+
+// Verify the shader visible sampler heaps does not switch over within a single submit.
+TEST_P(D3D12DescriptorHeapTests, NoSwitchOverSamplerHeap) {
+    utils::ComboRenderPipelineDescriptor renderPipelineDescriptor;
+
+    // Fill in a sampler heap with "sampler only" bindgroups (1x sampler per group) by creating
+    // a sampler bindgroup each draw. After HEAP_SIZE + 1 draws, the heaps WILL NOT switch over
+    // because the sampler heap allocations are de-duplicated.
+    renderPipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"(
             @stage(vertex) fn main() -> @builtin(position) vec4<f32> {
                 return vec4<f32>(0.0, 0.0, 0.0, 1.0);
             })");
 
-        renderPipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
+    renderPipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
             @group(0) @binding(0) var sampler0 : sampler;
             @stage(fragment) fn main() -> @location(0) vec4<f32> {
                 _ = sampler0;
                 return vec4<f32>(0.0, 0.0, 0.0, 0.0);
             })");
 
-        wgpu::RenderPipeline renderPipeline =
-            device.CreateRenderPipeline(&renderPipelineDescriptor);
-        utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+    wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&renderPipelineDescriptor);
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
 
-        wgpu::Sampler sampler = device.CreateSampler();
+    wgpu::Sampler sampler = device.CreateSampler();
 
-        Device* d3dDevice = reinterpret_cast<Device*>(device.Get());
-        ShaderVisibleDescriptorAllocator* allocator =
-            d3dDevice->GetSamplerShaderVisibleDescriptorAllocator();
-        const uint64_t samplerHeapSize = allocator->GetShaderVisibleHeapSizeForTesting();
+    Device* d3dDevice = reinterpret_cast<Device*>(device.Get());
+    ShaderVisibleDescriptorAllocator* allocator =
+        d3dDevice->GetSamplerShaderVisibleDescriptorAllocator();
+    const uint64_t samplerHeapSize = allocator->GetShaderVisibleHeapSizeForTesting();
 
-        const HeapVersionID HeapVersionID = allocator->GetShaderVisibleHeapSerialForTesting();
+    const HeapVersionID HeapVersionID = allocator->GetShaderVisibleHeapSerialForTesting();
 
-        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-        {
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
 
-            pass.SetPipeline(renderPipeline);
+        pass.SetPipeline(renderPipeline);
 
-            for (uint32_t i = 0; i < samplerHeapSize + 1; ++i) {
-                pass.SetBindGroup(0,
-                                  utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
-                                                       {{0, sampler}}));
-                pass.Draw(3);
-            }
-
-            pass.End();
+        for (uint32_t i = 0; i < samplerHeapSize + 1; ++i) {
+            pass.SetBindGroup(0, utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
+                                                      {{0, sampler}}));
+            pass.Draw(3);
         }
 
-        wgpu::CommandBuffer commands = encoder.Finish();
-        queue.Submit(1, &commands);
-
-        EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(), HeapVersionID);
+        pass.End();
     }
 
-    // Verify shader-visible heaps can be recycled for multiple submits.
-    TEST_P(D3D12DescriptorHeapTests, PoolHeapsInMultipleSubmits) {
-        // Use small heaps to count only pool-allocated switches.
-        DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
-            dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
 
-        ShaderVisibleDescriptorAllocator* allocator =
-            mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
+    EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(), HeapVersionID);
+}
 
-        std::list<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
+// Verify shader-visible heaps can be recycled for multiple submits.
+TEST_P(D3D12DescriptorHeapTests, PoolHeapsInMultipleSubmits) {
+    // Use small heaps to count only pool-allocated switches.
+    DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
+        dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
 
-        EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
+    ShaderVisibleDescriptorAllocator* allocator =
+        mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
 
-        // Allocate + increment internal serials up to |kFrameDepth| and ensure heaps are always
-        // unique.
-        for (uint32_t i = 0; i < kFrameDepth; i++) {
-            EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
-            ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
-            EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
-            heaps.push_back(heap);
-            // CheckPassedSerials() will update the last internally completed serial.
-            EXPECT_TRUE(mD3DDevice->CheckPassedSerials().IsSuccess());
-            // NextSerial() will increment the last internally submitted serial.
-            EXPECT_TRUE(mD3DDevice->NextSerial().IsSuccess());
-        }
+    std::list<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
 
-        // Repeat up to |kFrameDepth| again but ensure heaps are the same in the expected order
-        // (oldest heaps are recycled first). The "+ 1" is so we also include the very first heap in
-        // the check.
-        for (uint32_t i = 0; i < kFrameDepth + 1; i++) {
-            EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
-            ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
-            EXPECT_TRUE(heaps.front() == heap);
-            heaps.pop_front();
-            EXPECT_TRUE(mD3DDevice->CheckPassedSerials().IsSuccess());
-            EXPECT_TRUE(mD3DDevice->NextSerial().IsSuccess());
-        }
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
 
-        EXPECT_TRUE(heaps.empty());
-        EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kFrameDepth);
+    // Allocate + increment internal serials up to |kFrameDepth| and ensure heaps are always
+    // unique.
+    for (uint32_t i = 0; i < kFrameDepth; i++) {
+        EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
+        ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
+        EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
+        heaps.push_back(heap);
+        // CheckPassedSerials() will update the last internally completed serial.
+        EXPECT_TRUE(mD3DDevice->CheckPassedSerials().IsSuccess());
+        // NextSerial() will increment the last internally submitted serial.
+        EXPECT_TRUE(mD3DDevice->NextSerial().IsSuccess());
     }
 
-    // Verify shader-visible heaps do not recycle in a pending submit.
-    TEST_P(D3D12DescriptorHeapTests, PoolHeapsInPendingSubmit) {
-        // Use small heaps to count only pool-allocated switches.
-        DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
-            dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
-
-        constexpr uint32_t kNumOfSwitches = 5;
-
-        ShaderVisibleDescriptorAllocator* allocator =
-            mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
-
-        const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
-
-        std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
-
-        EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
-
-        // Switch-over |kNumOfSwitches| and ensure heaps are always unique.
-        for (uint32_t i = 0; i < kNumOfSwitches; i++) {
-            EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
-            ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
-            EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
-            heaps.insert(heap);
-        }
-
-        // After |kNumOfSwitches|, no heaps are recycled.
-        EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
-                  heapSerial + HeapVersionID(kNumOfSwitches));
-        EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfSwitches);
+    // Repeat up to |kFrameDepth| again but ensure heaps are the same in the expected order
+    // (oldest heaps are recycled first). The "+ 1" is so we also include the very first heap in
+    // the check.
+    for (uint32_t i = 0; i < kFrameDepth + 1; i++) {
+        EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
+        ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
+        EXPECT_TRUE(heaps.front() == heap);
+        heaps.pop_front();
+        EXPECT_TRUE(mD3DDevice->CheckPassedSerials().IsSuccess());
+        EXPECT_TRUE(mD3DDevice->NextSerial().IsSuccess());
     }
 
-    // Verify switching shader-visible heaps do not recycle in a pending submit but do so
-    // once no longer pending.
-    TEST_P(D3D12DescriptorHeapTests, PoolHeapsInPendingAndMultipleSubmits) {
-        // Use small heaps to count only pool-allocated switches.
-        DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
-            dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+    EXPECT_TRUE(heaps.empty());
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kFrameDepth);
+}
 
-        constexpr uint32_t kNumOfSwitches = 5;
+// Verify shader-visible heaps do not recycle in a pending submit.
+TEST_P(D3D12DescriptorHeapTests, PoolHeapsInPendingSubmit) {
+    // Use small heaps to count only pool-allocated switches.
+    DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
+        dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
 
-        ShaderVisibleDescriptorAllocator* allocator =
-            mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
-        const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
+    constexpr uint32_t kNumOfSwitches = 5;
 
-        std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
+    ShaderVisibleDescriptorAllocator* allocator =
+        mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
 
-        EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
+    const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
 
-        // Switch-over |kNumOfSwitches| to create a pool of unique heaps.
-        for (uint32_t i = 0; i < kNumOfSwitches; i++) {
-            EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
-            ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
-            EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
-            heaps.insert(heap);
-        }
+    std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
 
-        EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
-                  heapSerial + HeapVersionID(kNumOfSwitches));
-        EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfSwitches);
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
 
-        // Ensure switched-over heaps can be recycled by advancing the GPU by at-least
-        // |kFrameDepth|.
-        for (uint32_t i = 0; i < kFrameDepth; i++) {
-            mD3DDevice->APITick();
-        }
-
-        // Switch-over |kNumOfSwitches| again reusing the same heaps.
-        for (uint32_t i = 0; i < kNumOfSwitches; i++) {
-            EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
-            ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
-            EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) != heaps.end());
-            heaps.erase(heap);
-        }
-
-        // After switching-over |kNumOfSwitches| x 2, ensure no additional heaps exist.
-        EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
-                  heapSerial + HeapVersionID(kNumOfSwitches * 2));
-        EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfSwitches);
+    // Switch-over |kNumOfSwitches| and ensure heaps are always unique.
+    for (uint32_t i = 0; i < kNumOfSwitches; i++) {
+        EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
+        ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
+        EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
+        heaps.insert(heap);
     }
 
-    // Verify shader-visible heaps do not recycle in multiple submits.
-    TEST_P(D3D12DescriptorHeapTests, GrowHeapsInMultipleSubmits) {
-        ShaderVisibleDescriptorAllocator* allocator =
-            mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
+    // After |kNumOfSwitches|, no heaps are recycled.
+    EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
+              heapSerial + HeapVersionID(kNumOfSwitches));
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfSwitches);
+}
 
-        const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
+// Verify switching shader-visible heaps do not recycle in a pending submit but do so
+// once no longer pending.
+TEST_P(D3D12DescriptorHeapTests, PoolHeapsInPendingAndMultipleSubmits) {
+    // Use small heaps to count only pool-allocated switches.
+    DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
+        dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
 
-        std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
+    constexpr uint32_t kNumOfSwitches = 5;
 
-        EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
+    ShaderVisibleDescriptorAllocator* allocator =
+        mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
+    const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
 
-        // Growth: Allocate + Tick() and ensure heaps are always unique.
-        while (allocator->GetShaderVisiblePoolSizeForTesting() == 0) {
-            EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
-            ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
-            EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
-            heaps.insert(heap);
-            mD3DDevice->APITick();
-        }
+    std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
 
-        // Verify the number of switches equals the size of heaps allocated (minus the initial).
-        EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 1u);
-        EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
-                  heapSerial + HeapVersionID(heaps.size() - 1));
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
+
+    // Switch-over |kNumOfSwitches| to create a pool of unique heaps.
+    for (uint32_t i = 0; i < kNumOfSwitches; i++) {
+        EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
+        ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
+        EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
+        heaps.insert(heap);
     }
 
-    // Verify shader-visible heaps do not recycle in a pending submit.
-    TEST_P(D3D12DescriptorHeapTests, GrowHeapsInPendingSubmit) {
-        ShaderVisibleDescriptorAllocator* allocator =
-            mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
+    EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
+              heapSerial + HeapVersionID(kNumOfSwitches));
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfSwitches);
 
-        const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
-
-        std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
-
-        EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
-
-        // Growth: Allocate new heaps.
-        while (allocator->GetShaderVisiblePoolSizeForTesting() == 0) {
-            EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
-            ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
-            EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
-            heaps.insert(heap);
-        }
-
-        // Verify the number of switches equals the size of heaps allocated (minus the initial).
-        EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 1u);
-        EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
-                  heapSerial + HeapVersionID(heaps.size() - 1));
+    // Ensure switched-over heaps can be recycled by advancing the GPU by at-least
+    // |kFrameDepth|.
+    for (uint32_t i = 0; i < kFrameDepth; i++) {
+        mD3DDevice->APITick();
     }
 
-    // Verify switching shader-visible heaps do not recycle in a pending submit but do so
-    // once no longer pending.
-    // Switches over many times until |kNumOfPooledHeaps| heaps are pool-allocated.
-    TEST_P(D3D12DescriptorHeapTests, GrowAndPoolHeapsInPendingAndMultipleSubmits) {
-        ShaderVisibleDescriptorAllocator* allocator =
-            mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
-
-        std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
-
-        EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
-
-        uint32_t kNumOfPooledHeaps = 5;
-        while (allocator->GetShaderVisiblePoolSizeForTesting() < kNumOfPooledHeaps) {
-            EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
-            ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
-            EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
-            heaps.insert(heap);
-        }
-
-        EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfPooledHeaps);
-
-        // Ensure switched-over heaps can be recycled by advancing the GPU by at-least
-        // |kFrameDepth|.
-        for (uint32_t i = 0; i < kFrameDepth; i++) {
-            mD3DDevice->APITick();
-        }
-
-        // Switch-over the pool-allocated heaps.
-        for (uint32_t i = 0; i < kNumOfPooledHeaps; i++) {
-            EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
-            ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
-            EXPECT_FALSE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
-        }
-
-        EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfPooledHeaps);
+    // Switch-over |kNumOfSwitches| again reusing the same heaps.
+    for (uint32_t i = 0; i < kNumOfSwitches; i++) {
+        EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
+        ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
+        EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) != heaps.end());
+        heaps.erase(heap);
     }
 
-    // Verify encoding multiple heaps worth of bindgroups.
-    // Shader-visible heaps will switch out |kNumOfHeaps| times.
-    TEST_P(D3D12DescriptorHeapTests, EncodeManyUBO) {
-        // This test draws a solid color triangle |heapSize| times. Each draw uses a new bindgroup
-        // that has its own UBO with a "color value" in the range [1... heapSize]. After |heapSize|
-        // draws, the result is the arithmetic sum of the sequence after the framebuffer is blended
-        // by accumulation. By checking for this sum, we ensure each bindgroup was encoded
-        // correctly.
-        DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
-            dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+    // After switching-over |kNumOfSwitches| x 2, ensure no additional heaps exist.
+    EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
+              heapSerial + HeapVersionID(kNumOfSwitches * 2));
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfSwitches);
+}
 
-        utils::BasicRenderPass renderPass =
-            MakeRenderPass(kRTSize, kRTSize, wgpu::TextureFormat::R16Float);
+// Verify shader-visible heaps do not recycle in multiple submits.
+TEST_P(D3D12DescriptorHeapTests, GrowHeapsInMultipleSubmits) {
+    ShaderVisibleDescriptorAllocator* allocator =
+        mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
 
-        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
-        pipelineDescriptor.vertex.module = mSimpleVSModule;
+    const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
 
-        pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
+    std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
+
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
+
+    // Growth: Allocate + Tick() and ensure heaps are always unique.
+    while (allocator->GetShaderVisiblePoolSizeForTesting() == 0) {
+        EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
+        ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
+        EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
+        heaps.insert(heap);
+        mD3DDevice->APITick();
+    }
+
+    // Verify the number of switches equals the size of heaps allocated (minus the initial).
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 1u);
+    EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
+              heapSerial + HeapVersionID(heaps.size() - 1));
+}
+
+// Verify shader-visible heaps do not recycle in a pending submit.
+TEST_P(D3D12DescriptorHeapTests, GrowHeapsInPendingSubmit) {
+    ShaderVisibleDescriptorAllocator* allocator =
+        mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
+
+    const HeapVersionID heapSerial = allocator->GetShaderVisibleHeapSerialForTesting();
+
+    std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
+
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
+
+    // Growth: Allocate new heaps.
+    while (allocator->GetShaderVisiblePoolSizeForTesting() == 0) {
+        EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
+        ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
+        EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
+        heaps.insert(heap);
+    }
+
+    // Verify the number of switches equals the size of heaps allocated (minus the initial).
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 1u);
+    EXPECT_EQ(allocator->GetShaderVisibleHeapSerialForTesting(),
+              heapSerial + HeapVersionID(heaps.size() - 1));
+}
+
+// Verify switching shader-visible heaps do not recycle in a pending submit but do so
+// once no longer pending.
+// Switches over many times until |kNumOfPooledHeaps| heaps are pool-allocated.
+TEST_P(D3D12DescriptorHeapTests, GrowAndPoolHeapsInPendingAndMultipleSubmits) {
+    ShaderVisibleDescriptorAllocator* allocator =
+        mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
+
+    std::set<ComPtr<ID3D12DescriptorHeap>> heaps = {allocator->GetShaderVisibleHeap()};
+
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), 0u);
+
+    uint32_t kNumOfPooledHeaps = 5;
+    while (allocator->GetShaderVisiblePoolSizeForTesting() < kNumOfPooledHeaps) {
+        EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
+        ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
+        EXPECT_TRUE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
+        heaps.insert(heap);
+    }
+
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfPooledHeaps);
+
+    // Ensure switched-over heaps can be recycled by advancing the GPU by at-least
+    // |kFrameDepth|.
+    for (uint32_t i = 0; i < kFrameDepth; i++) {
+        mD3DDevice->APITick();
+    }
+
+    // Switch-over the pool-allocated heaps.
+    for (uint32_t i = 0; i < kNumOfPooledHeaps; i++) {
+        EXPECT_TRUE(allocator->AllocateAndSwitchShaderVisibleHeap().IsSuccess());
+        ComPtr<ID3D12DescriptorHeap> heap = allocator->GetShaderVisibleHeap();
+        EXPECT_FALSE(std::find(heaps.begin(), heaps.end(), heap) == heaps.end());
+    }
+
+    EXPECT_EQ(allocator->GetShaderVisiblePoolSizeForTesting(), kNumOfPooledHeaps);
+}
+
+// Verify encoding multiple heaps worth of bindgroups.
+// Shader-visible heaps will switch out |kNumOfHeaps| times.
+TEST_P(D3D12DescriptorHeapTests, EncodeManyUBO) {
+    // This test draws a solid color triangle |heapSize| times. Each draw uses a new bindgroup
+    // that has its own UBO with a "color value" in the range [1... heapSize]. After |heapSize|
+    // draws, the result is the arithmetic sum of the sequence after the framebuffer is blended
+    // by accumulation. By checking for this sum, we ensure each bindgroup was encoded
+    // correctly.
+    DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
+        dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+
+    utils::BasicRenderPass renderPass =
+        MakeRenderPass(kRTSize, kRTSize, wgpu::TextureFormat::R16Float);
+
+    utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+    pipelineDescriptor.vertex.module = mSimpleVSModule;
+
+    pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
         struct U {
             heapSize : f32
         }
@@ -464,42 +457,130 @@
             return vec4<f32>(buffer0.heapSize, 0.0, 0.0, 1.0);
         })");
 
-        wgpu::BlendState blend;
-        blend.color.operation = wgpu::BlendOperation::Add;
-        blend.color.srcFactor = wgpu::BlendFactor::One;
-        blend.color.dstFactor = wgpu::BlendFactor::One;
-        blend.alpha.operation = wgpu::BlendOperation::Add;
-        blend.alpha.srcFactor = wgpu::BlendFactor::One;
-        blend.alpha.dstFactor = wgpu::BlendFactor::One;
+    wgpu::BlendState blend;
+    blend.color.operation = wgpu::BlendOperation::Add;
+    blend.color.srcFactor = wgpu::BlendFactor::One;
+    blend.color.dstFactor = wgpu::BlendFactor::One;
+    blend.alpha.operation = wgpu::BlendOperation::Add;
+    blend.alpha.srcFactor = wgpu::BlendFactor::One;
+    blend.alpha.dstFactor = wgpu::BlendFactor::One;
 
-        pipelineDescriptor.cTargets[0].format = wgpu::TextureFormat::R16Float;
-        pipelineDescriptor.cTargets[0].blend = &blend;
+    pipelineDescriptor.cTargets[0].format = wgpu::TextureFormat::R16Float;
+    pipelineDescriptor.cTargets[0].blend = &blend;
 
-        wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&pipelineDescriptor);
+    wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&pipelineDescriptor);
 
-        const uint32_t heapSize = mD3DDevice->GetViewShaderVisibleDescriptorAllocator()
+    const uint32_t heapSize =
+        mD3DDevice->GetViewShaderVisibleDescriptorAllocator()->GetShaderVisibleHeapSizeForTesting();
+
+    constexpr uint32_t kNumOfHeaps = 2;
+
+    const uint32_t numOfEncodedBindGroups = kNumOfHeaps * heapSize;
+
+    std::vector<wgpu::BindGroup> bindGroups;
+    for (uint32_t i = 0; i < numOfEncodedBindGroups; i++) {
+        const float color = i + 1;
+        wgpu::Buffer uniformBuffer =
+            utils::CreateBufferFromData(device, &color, sizeof(color), wgpu::BufferUsage::Uniform);
+        bindGroups.push_back(utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
+                                                  {{0, uniformBuffer}}));
+    }
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+        pass.SetPipeline(renderPipeline);
+
+        for (uint32_t i = 0; i < numOfEncodedBindGroups; ++i) {
+            pass.SetBindGroup(0, bindGroups[i]);
+            pass.Draw(3);
+        }
+
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    float colorSum = numOfEncodedBindGroups * (numOfEncodedBindGroups + 1) / 2;
+    EXPECT_PIXEL_FLOAT16_EQ(colorSum, renderPass.color, 0, 0);
+}
+
+// Verify encoding one bindgroup then a heaps worth in different submits.
+// Shader-visible heaps should switch out once upon encoding 1 + |heapSize| descriptors.
+// The first descriptor's memory will be reused when the second submit encodes |heapSize|
+// descriptors.
+TEST_P(D3D12DescriptorHeapTests, EncodeUBOOverflowMultipleSubmit) {
+    DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
+        dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+
+    // TODO(crbug.com/dawn/742): Test output is wrong with D3D12 + WARP.
+    DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsWARP());
+
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+    pipelineDescriptor.vertex.module = mSimpleVSModule;
+    pipelineDescriptor.cFragment.module = mSimpleFSModule;
+    pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
+
+    wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&pipelineDescriptor);
+
+    // Encode the first descriptor and submit.
+    {
+        std::array<float, 4> greenColor = {0, 1, 0, 1};
+        wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
+            device, &greenColor, sizeof(greenColor), wgpu::BufferUsage::Uniform);
+
+        wgpu::BindGroup bindGroup = utils::MakeBindGroup(
+            device, renderPipeline.GetBindGroupLayout(0), {{0, uniformBuffer}});
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        {
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+            pass.SetPipeline(renderPipeline);
+            pass.SetBindGroup(0, bindGroup);
+            pass.Draw(3);
+            pass.End();
+        }
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+    }
+
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 0, 0);
+
+    // Encode a heap worth of descriptors.
+    {
+        const uint32_t heapSize = mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator()
                                       ->GetShaderVisibleHeapSizeForTesting();
 
-        constexpr uint32_t kNumOfHeaps = 2;
-
-        const uint32_t numOfEncodedBindGroups = kNumOfHeaps * heapSize;
-
         std::vector<wgpu::BindGroup> bindGroups;
-        for (uint32_t i = 0; i < numOfEncodedBindGroups; i++) {
-            const float color = i + 1;
-            wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(device, &color, sizeof(color),
-                                                                     wgpu::BufferUsage::Uniform);
+        for (uint32_t i = 0; i < heapSize - 1; i++) {
+            std::array<float, 4> fillColor = GetSolidColor(i + 1);  // Avoid black
+            wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
+                device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform);
+
             bindGroups.push_back(utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
                                                       {{0, uniformBuffer}}));
         }
 
+        std::array<float, 4> redColor = {1, 0, 0, 1};
+        wgpu::Buffer lastUniformBuffer = utils::CreateBufferFromData(
+            device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
+
+        bindGroups.push_back(utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
+                                                  {{0, lastUniformBuffer, 0, sizeof(redColor)}}));
+
         wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
         {
             wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
 
             pass.SetPipeline(renderPipeline);
 
-            for (uint32_t i = 0; i < numOfEncodedBindGroups; ++i) {
+            for (uint32_t i = 0; i < heapSize; ++i) {
                 pass.SetBindGroup(0, bindGroups[i]);
                 pass.Draw(3);
             }
@@ -509,135 +590,133 @@
 
         wgpu::CommandBuffer commands = encoder.Finish();
         queue.Submit(1, &commands);
-
-        float colorSum = numOfEncodedBindGroups * (numOfEncodedBindGroups + 1) / 2;
-        EXPECT_PIXEL_FLOAT16_EQ(colorSum, renderPass.color, 0, 0);
     }
 
-    // Verify encoding one bindgroup then a heaps worth in different submits.
-    // Shader-visible heaps should switch out once upon encoding 1 + |heapSize| descriptors.
-    // The first descriptor's memory will be reused when the second submit encodes |heapSize|
-    // descriptors.
-    TEST_P(D3D12DescriptorHeapTests, EncodeUBOOverflowMultipleSubmit) {
-        DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
-            dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kRed, renderPass.color, 0, 0);
+}
 
-        // TODO(crbug.com/dawn/742): Test output is wrong with D3D12 + WARP.
-        DAWN_SUPPRESS_TEST_IF(IsD3D12() && IsWARP());
+// Verify encoding a heaps worth of bindgroups plus one more then reuse the first
+// bindgroup in the same submit.
+// Shader-visible heaps should switch out once then re-encode the first descriptor at a new
+// offset in the heap.
+TEST_P(D3D12DescriptorHeapTests, EncodeReuseUBOOverflow) {
+    DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
+        dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
 
-        utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
 
-        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
-        pipelineDescriptor.vertex.module = mSimpleVSModule;
-        pipelineDescriptor.cFragment.module = mSimpleFSModule;
-        pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
+    utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+    pipelineDescriptor.vertex.module = mSimpleVSModule;
+    pipelineDescriptor.cFragment.module = mSimpleFSModule;
+    pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
 
-        wgpu::RenderPipeline renderPipeline = device.CreateRenderPipeline(&pipelineDescriptor);
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor);
 
-        // Encode the first descriptor and submit.
+    std::array<float, 4> redColor = {1, 0, 0, 1};
+    wgpu::Buffer firstUniformBuffer = utils::CreateBufferFromData(
+        device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
+
+    std::vector<wgpu::BindGroup> bindGroups = {utils::MakeBindGroup(
+        device, pipeline.GetBindGroupLayout(0), {{0, firstUniformBuffer, 0, sizeof(redColor)}})};
+
+    const uint32_t heapSize =
+        mD3DDevice->GetViewShaderVisibleDescriptorAllocator()->GetShaderVisibleHeapSizeForTesting();
+
+    for (uint32_t i = 0; i < heapSize; i++) {
+        const std::array<float, 4>& fillColor = GetSolidColor(i + 1);  // Avoid black
+        wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
+            device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform);
+        bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                  {{0, uniformBuffer, 0, sizeof(fillColor)}}));
+    }
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+        pass.SetPipeline(pipeline);
+
+        // Encode a heap worth of descriptors plus one more.
+        for (uint32_t i = 0; i < heapSize + 1; ++i) {
+            pass.SetBindGroup(0, bindGroups[i]);
+            pass.Draw(3);
+        }
+
+        // Re-encode the first bindgroup again.
+        pass.SetBindGroup(0, bindGroups[0]);
+        pass.Draw(3);
+
+        pass.End();
+    }
+
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    // Make sure the first bindgroup was encoded correctly.
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kRed, renderPass.color, 0, 0);
+}
+
+// Verify encoding a heaps worth of bindgroups plus one more in the first submit then reuse the
+// first bindgroup again in the second submit.
+// Shader-visible heaps should switch out once then re-encode the
+// first descriptor at the same offset in the heap.
+TEST_P(D3D12DescriptorHeapTests, EncodeReuseUBOMultipleSubmits) {
+    DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
+        dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+
+    utils::ComboRenderPipelineDescriptor pipelineDescriptor;
+    pipelineDescriptor.vertex.module = mSimpleVSModule;
+    pipelineDescriptor.cFragment.module = mSimpleFSModule;
+    pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
+
+    wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor);
+
+    // Encode heap worth of descriptors plus one more.
+    std::array<float, 4> redColor = {1, 0, 0, 1};
+
+    wgpu::Buffer firstUniformBuffer = utils::CreateBufferFromData(
+        device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
+
+    std::vector<wgpu::BindGroup> bindGroups = {utils::MakeBindGroup(
+        device, pipeline.GetBindGroupLayout(0), {{0, firstUniformBuffer, 0, sizeof(redColor)}})};
+
+    const uint32_t heapSize =
+        mD3DDevice->GetViewShaderVisibleDescriptorAllocator()->GetShaderVisibleHeapSizeForTesting();
+
+    for (uint32_t i = 0; i < heapSize; i++) {
+        std::array<float, 4> fillColor = GetSolidColor(i + 1);  // Avoid black
+        wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
+            device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform);
+
+        bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                  {{0, uniformBuffer, 0, sizeof(fillColor)}}));
+    }
+
+    {
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
         {
-            std::array<float, 4> greenColor = {0, 1, 0, 1};
-            wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
-                device, &greenColor, sizeof(greenColor), wgpu::BufferUsage::Uniform);
+            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
 
-            wgpu::BindGroup bindGroup = utils::MakeBindGroup(
-                device, renderPipeline.GetBindGroupLayout(0), {{0, uniformBuffer}});
+            pass.SetPipeline(pipeline);
 
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            {
-                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
-
-                pass.SetPipeline(renderPipeline);
-                pass.SetBindGroup(0, bindGroup);
+            for (uint32_t i = 0; i < heapSize + 1; ++i) {
+                pass.SetBindGroup(0, bindGroups[i]);
                 pass.Draw(3);
-                pass.End();
             }
 
-            wgpu::CommandBuffer commands = encoder.Finish();
-            queue.Submit(1, &commands);
+            pass.End();
         }
 
-        EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 0, 0);
-
-        // Encode a heap worth of descriptors.
-        {
-            const uint32_t heapSize = mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator()
-                                          ->GetShaderVisibleHeapSizeForTesting();
-
-            std::vector<wgpu::BindGroup> bindGroups;
-            for (uint32_t i = 0; i < heapSize - 1; i++) {
-                std::array<float, 4> fillColor = GetSolidColor(i + 1);  // Avoid black
-                wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
-                    device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform);
-
-                bindGroups.push_back(utils::MakeBindGroup(
-                    device, renderPipeline.GetBindGroupLayout(0), {{0, uniformBuffer}}));
-            }
-
-            std::array<float, 4> redColor = {1, 0, 0, 1};
-            wgpu::Buffer lastUniformBuffer = utils::CreateBufferFromData(
-                device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
-
-            bindGroups.push_back(
-                utils::MakeBindGroup(device, renderPipeline.GetBindGroupLayout(0),
-                                     {{0, lastUniformBuffer, 0, sizeof(redColor)}}));
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            {
-                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
-
-                pass.SetPipeline(renderPipeline);
-
-                for (uint32_t i = 0; i < heapSize; ++i) {
-                    pass.SetBindGroup(0, bindGroups[i]);
-                    pass.Draw(3);
-                }
-
-                pass.End();
-            }
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-            queue.Submit(1, &commands);
-        }
-
-        EXPECT_PIXEL_RGBA8_EQ(RGBA8::kRed, renderPass.color, 0, 0);
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
     }
 
-    // Verify encoding a heaps worth of bindgroups plus one more then reuse the first
-    // bindgroup in the same submit.
-    // Shader-visible heaps should switch out once then re-encode the first descriptor at a new
-    // offset in the heap.
-    TEST_P(D3D12DescriptorHeapTests, EncodeReuseUBOOverflow) {
-        DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
-            dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
-
-        utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
-
-        utils::ComboRenderPipelineDescriptor pipelineDescriptor;
-        pipelineDescriptor.vertex.module = mSimpleVSModule;
-        pipelineDescriptor.cFragment.module = mSimpleFSModule;
-        pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
-
-        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor);
-
-        std::array<float, 4> redColor = {1, 0, 0, 1};
-        wgpu::Buffer firstUniformBuffer = utils::CreateBufferFromData(
-            device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
-
-        std::vector<wgpu::BindGroup> bindGroups = {
-            utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
-                                 {{0, firstUniformBuffer, 0, sizeof(redColor)}})};
-
-        const uint32_t heapSize = mD3DDevice->GetViewShaderVisibleDescriptorAllocator()
-                                      ->GetShaderVisibleHeapSizeForTesting();
-
-        for (uint32_t i = 0; i < heapSize; i++) {
-            const std::array<float, 4>& fillColor = GetSolidColor(i + 1);  // Avoid black
-            wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
-                device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform);
-            bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
-                                                      {{0, uniformBuffer, 0, sizeof(fillColor)}}));
-        }
+    // Re-encode the first bindgroup again.
+    {
+        std::array<float, 4> greenColor = {0, 1, 0, 1};
+        queue.WriteBuffer(firstUniformBuffer, 0, &greenColor, sizeof(greenColor));
 
         wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
         {
@@ -645,13 +724,6 @@
 
             pass.SetPipeline(pipeline);
 
-            // Encode a heap worth of descriptors plus one more.
-            for (uint32_t i = 0; i < heapSize + 1; ++i) {
-                pass.SetBindGroup(0, bindGroups[i]);
-                pass.Draw(3);
-            }
-
-            // Re-encode the first bindgroup again.
             pass.SetBindGroup(0, bindGroups[0]);
             pass.Draw(3);
 
@@ -660,137 +732,55 @@
 
         wgpu::CommandBuffer commands = encoder.Finish();
         queue.Submit(1, &commands);
-
-        // Make sure the first bindgroup was encoded correctly.
-        EXPECT_PIXEL_RGBA8_EQ(RGBA8::kRed, renderPass.color, 0, 0);
     }
 
-    // Verify encoding a heaps worth of bindgroups plus one more in the first submit then reuse the
-    // first bindgroup again in the second submit.
-    // Shader-visible heaps should switch out once then re-encode the
-    // first descriptor at the same offset in the heap.
-    TEST_P(D3D12DescriptorHeapTests, EncodeReuseUBOMultipleSubmits) {
-        DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
-            dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
+    // Make sure the first bindgroup was re-encoded correctly.
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 0, 0);
+}
 
-        utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+// Verify encoding many sampler and ubo worth of bindgroups.
+// Shader-visible heaps should switch out |kNumOfViewHeaps| times.
+TEST_P(D3D12DescriptorHeapTests, EncodeManyUBOAndSamplers) {
+    DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
+        dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
 
+    // Create a solid filled texture.
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.size.width = kRTSize;
+    descriptor.size.height = kRTSize;
+    descriptor.size.depthOrArrayLayers = 1;
+    descriptor.sampleCount = 1;
+    descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+    descriptor.mipLevelCount = 1;
+    descriptor.usage = wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::RenderAttachment |
+                       wgpu::TextureUsage::CopySrc;
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+    wgpu::TextureView textureView = texture.CreateView();
+
+    {
+        utils::BasicRenderPass renderPass = utils::BasicRenderPass(kRTSize, kRTSize, texture);
+
+        utils::ComboRenderPassDescriptor renderPassDesc({textureView});
+        renderPassDesc.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
+        renderPassDesc.cColorAttachments[0].clearValue = {0.0f, 1.0f, 0.0f, 1.0f};
+        renderPass.renderPassInfo.cColorAttachments[0].view = textureView;
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        auto pass = encoder.BeginRenderPass(&renderPassDesc);
+        pass.End();
+
+        wgpu::CommandBuffer commandBuffer = encoder.Finish();
+        queue.Submit(1, &commandBuffer);
+
+        RGBA8 filled(0, 255, 0, 255);
+        EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 0, 0);
+    }
+
+    {
         utils::ComboRenderPipelineDescriptor pipelineDescriptor;
-        pipelineDescriptor.vertex.module = mSimpleVSModule;
-        pipelineDescriptor.cFragment.module = mSimpleFSModule;
-        pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
 
-        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor);
-
-        // Encode heap worth of descriptors plus one more.
-        std::array<float, 4> redColor = {1, 0, 0, 1};
-
-        wgpu::Buffer firstUniformBuffer = utils::CreateBufferFromData(
-            device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
-
-        std::vector<wgpu::BindGroup> bindGroups = {
-            utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
-                                 {{0, firstUniformBuffer, 0, sizeof(redColor)}})};
-
-        const uint32_t heapSize = mD3DDevice->GetViewShaderVisibleDescriptorAllocator()
-                                      ->GetShaderVisibleHeapSizeForTesting();
-
-        for (uint32_t i = 0; i < heapSize; i++) {
-            std::array<float, 4> fillColor = GetSolidColor(i + 1);  // Avoid black
-            wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
-                device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform);
-
-            bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
-                                                      {{0, uniformBuffer, 0, sizeof(fillColor)}}));
-        }
-
-        {
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            {
-                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
-
-                pass.SetPipeline(pipeline);
-
-                for (uint32_t i = 0; i < heapSize + 1; ++i) {
-                    pass.SetBindGroup(0, bindGroups[i]);
-                    pass.Draw(3);
-                }
-
-                pass.End();
-            }
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-            queue.Submit(1, &commands);
-        }
-
-        // Re-encode the first bindgroup again.
-        {
-            std::array<float, 4> greenColor = {0, 1, 0, 1};
-            queue.WriteBuffer(firstUniformBuffer, 0, &greenColor, sizeof(greenColor));
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            {
-                wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
-
-                pass.SetPipeline(pipeline);
-
-                pass.SetBindGroup(0, bindGroups[0]);
-                pass.Draw(3);
-
-                pass.End();
-            }
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-            queue.Submit(1, &commands);
-        }
-
-        // Make sure the first bindgroup was re-encoded correctly.
-        EXPECT_PIXEL_RGBA8_EQ(RGBA8::kGreen, renderPass.color, 0, 0);
-    }
-
-    // Verify encoding many sampler and ubo worth of bindgroups.
-    // Shader-visible heaps should switch out |kNumOfViewHeaps| times.
-    TEST_P(D3D12DescriptorHeapTests, EncodeManyUBOAndSamplers) {
-        DAWN_TEST_UNSUPPORTED_IF(!mD3DDevice->IsToggleEnabled(
-            dawn::native::Toggle::UseD3D12SmallShaderVisibleHeapForTesting));
-
-        // Create a solid filled texture.
-        wgpu::TextureDescriptor descriptor;
-        descriptor.dimension = wgpu::TextureDimension::e2D;
-        descriptor.size.width = kRTSize;
-        descriptor.size.height = kRTSize;
-        descriptor.size.depthOrArrayLayers = 1;
-        descriptor.sampleCount = 1;
-        descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
-        descriptor.mipLevelCount = 1;
-        descriptor.usage = wgpu::TextureUsage::TextureBinding |
-                           wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
-        wgpu::Texture texture = device.CreateTexture(&descriptor);
-        wgpu::TextureView textureView = texture.CreateView();
-
-        {
-            utils::BasicRenderPass renderPass = utils::BasicRenderPass(kRTSize, kRTSize, texture);
-
-            utils::ComboRenderPassDescriptor renderPassDesc({textureView});
-            renderPassDesc.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
-            renderPassDesc.cColorAttachments[0].clearValue = {0.0f, 1.0f, 0.0f, 1.0f};
-            renderPass.renderPassInfo.cColorAttachments[0].view = textureView;
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            auto pass = encoder.BeginRenderPass(&renderPassDesc);
-            pass.End();
-
-            wgpu::CommandBuffer commandBuffer = encoder.Finish();
-            queue.Submit(1, &commandBuffer);
-
-            RGBA8 filled(0, 255, 0, 255);
-            EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 0, 0);
-        }
-
-        {
-            utils::ComboRenderPipelineDescriptor pipelineDescriptor;
-
-            pipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"(
+        pipelineDescriptor.vertex.module = utils::CreateShaderModule(device, R"(
             struct U {
                 transform : mat2x2<f32>
             }
@@ -806,7 +796,7 @@
                 );
                 return vec4<f32>(buffer0.transform * (pos[VertexIndex]), 0.0, 1.0);
             })");
-            pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
+        pipelineDescriptor.cFragment.module = utils::CreateShaderModule(device, R"(
             struct U {
                 color : vec4<f32>
             }
@@ -820,251 +810,247 @@
                 return textureSample(texture0, sampler0, FragCoord.xy) + buffer0.color;
             })");
 
-            utils::BasicRenderPass renderPass =
-                utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
-            pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
+        utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(device, kRTSize, kRTSize);
+        pipelineDescriptor.cTargets[0].format = renderPass.colorFormat;
 
-            wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor);
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDescriptor);
 
-            // Encode a heap worth of descriptors |kNumOfHeaps| times.
-            constexpr float transform[] = {1.f, 0.f, 0.f, 1.f};
-            wgpu::Buffer transformBuffer = utils::CreateBufferFromData(
-                device, &transform, sizeof(transform), wgpu::BufferUsage::Uniform);
+        // Encode a heap worth of descriptors |kNumOfHeaps| times.
+        constexpr float transform[] = {1.f, 0.f, 0.f, 1.f};
+        wgpu::Buffer transformBuffer = utils::CreateBufferFromData(
+            device, &transform, sizeof(transform), wgpu::BufferUsage::Uniform);
 
-            wgpu::SamplerDescriptor samplerDescriptor;
-            wgpu::Sampler sampler = device.CreateSampler(&samplerDescriptor);
+        wgpu::SamplerDescriptor samplerDescriptor;
+        wgpu::Sampler sampler = device.CreateSampler(&samplerDescriptor);
 
-            ShaderVisibleDescriptorAllocator* viewAllocator =
-                mD3DDevice->GetViewShaderVisibleDescriptorAllocator();
+        ShaderVisibleDescriptorAllocator* viewAllocator =
+            mD3DDevice->GetViewShaderVisibleDescriptorAllocator();
 
-            ShaderVisibleDescriptorAllocator* samplerAllocator =
-                mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
+        ShaderVisibleDescriptorAllocator* samplerAllocator =
+            mD3DDevice->GetSamplerShaderVisibleDescriptorAllocator();
 
-            const HeapVersionID viewHeapSerial =
-                viewAllocator->GetShaderVisibleHeapSerialForTesting();
-            const HeapVersionID samplerHeapSerial =
-                samplerAllocator->GetShaderVisibleHeapSerialForTesting();
+        const HeapVersionID viewHeapSerial = viewAllocator->GetShaderVisibleHeapSerialForTesting();
+        const HeapVersionID samplerHeapSerial =
+            samplerAllocator->GetShaderVisibleHeapSerialForTesting();
 
-            const uint32_t viewHeapSize = viewAllocator->GetShaderVisibleHeapSizeForTesting();
+        const uint32_t viewHeapSize = viewAllocator->GetShaderVisibleHeapSizeForTesting();
 
-            // "Small" view heap is always 2 x sampler heap size and encodes 3x the descriptors per
-            // group. This means the count of heaps switches is determined by the total number of
-            // views to encode. Compute the number of bindgroups to encode by counting the required
-            // views for |kNumOfViewHeaps| heaps worth.
-            constexpr uint32_t kViewsPerBindGroup = 3;
-            constexpr uint32_t kNumOfViewHeaps = 5;
+        // "Small" view heap is always 2 x sampler heap size and encodes 3x the descriptors per
+        // group. This means the count of heaps switches is determined by the total number of
+        // views to encode. Compute the number of bindgroups to encode by counting the required
+        // views for |kNumOfViewHeaps| heaps worth.
+        constexpr uint32_t kViewsPerBindGroup = 3;
+        constexpr uint32_t kNumOfViewHeaps = 5;
 
-            const uint32_t numOfEncodedBindGroups =
-                (viewHeapSize * kNumOfViewHeaps) / kViewsPerBindGroup;
+        const uint32_t numOfEncodedBindGroups =
+            (viewHeapSize * kNumOfViewHeaps) / kViewsPerBindGroup;
 
-            std::vector<wgpu::BindGroup> bindGroups;
-            for (uint32_t i = 0; i < numOfEncodedBindGroups - 1; i++) {
-                std::array<float, 4> fillColor = GetSolidColor(i + 1);  // Avoid black
-                wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
-                    device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform);
+        std::vector<wgpu::BindGroup> bindGroups;
+        for (uint32_t i = 0; i < numOfEncodedBindGroups - 1; i++) {
+            std::array<float, 4> fillColor = GetSolidColor(i + 1);  // Avoid black
+            wgpu::Buffer uniformBuffer = utils::CreateBufferFromData(
+                device, &fillColor, sizeof(fillColor), wgpu::BufferUsage::Uniform);
 
-                bindGroups.push_back(
-                    utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
-                                         {{0, transformBuffer, 0, sizeof(transform)},
-                                          {1, sampler},
-                                          {2, textureView},
-                                          {3, uniformBuffer, 0, sizeof(fillColor)}}));
-            }
-
-            std::array<float, 4> redColor = {1, 0, 0, 1};
-            wgpu::Buffer lastUniformBuffer = utils::CreateBufferFromData(
-                device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
-
-            bindGroups.push_back(
-                utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
-                                     {{0, transformBuffer, 0, sizeof(transform)},
-                                      {1, sampler},
-                                      {2, textureView},
-                                      {3, lastUniformBuffer, 0, sizeof(redColor)}}));
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
-
-            pass.SetPipeline(pipeline);
-
-            for (uint32_t i = 0; i < numOfEncodedBindGroups; ++i) {
-                pass.SetBindGroup(0, bindGroups[i]);
-                pass.Draw(3);
-            }
-
-            pass.End();
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-            queue.Submit(1, &commands);
-
-            // Final accumulated color is result of sampled + UBO color.
-            RGBA8 filled(255, 255, 0, 255);
-            RGBA8 notFilled(0, 0, 0, 0);
-            EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 0, 0);
-            EXPECT_PIXEL_RGBA8_EQ(notFilled, renderPass.color, kRTSize - 1, 0);
-
-            EXPECT_EQ(viewAllocator->GetShaderVisiblePoolSizeForTesting(), kNumOfViewHeaps);
-            EXPECT_EQ(viewAllocator->GetShaderVisibleHeapSerialForTesting(),
-                      viewHeapSerial + HeapVersionID(kNumOfViewHeaps));
-
-            EXPECT_EQ(samplerAllocator->GetShaderVisiblePoolSizeForTesting(), 0u);
-            EXPECT_EQ(samplerAllocator->GetShaderVisibleHeapSerialForTesting(), samplerHeapSerial);
+            bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                      {{0, transformBuffer, 0, sizeof(transform)},
+                                                       {1, sampler},
+                                                       {2, textureView},
+                                                       {3, uniformBuffer, 0, sizeof(fillColor)}}));
         }
+
+        std::array<float, 4> redColor = {1, 0, 0, 1};
+        wgpu::Buffer lastUniformBuffer = utils::CreateBufferFromData(
+            device, &redColor, sizeof(redColor), wgpu::BufferUsage::Uniform);
+
+        bindGroups.push_back(utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0),
+                                                  {{0, transformBuffer, 0, sizeof(transform)},
+                                                   {1, sampler},
+                                                   {2, textureView},
+                                                   {3, lastUniformBuffer, 0, sizeof(redColor)}}));
+
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+
+        pass.SetPipeline(pipeline);
+
+        for (uint32_t i = 0; i < numOfEncodedBindGroups; ++i) {
+            pass.SetBindGroup(0, bindGroups[i]);
+            pass.Draw(3);
+        }
+
+        pass.End();
+
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
+
+        // Final accumulated color is result of sampled + UBO color.
+        RGBA8 filled(255, 255, 0, 255);
+        RGBA8 notFilled(0, 0, 0, 0);
+        EXPECT_PIXEL_RGBA8_EQ(filled, renderPass.color, 0, 0);
+        EXPECT_PIXEL_RGBA8_EQ(notFilled, renderPass.color, kRTSize - 1, 0);
+
+        EXPECT_EQ(viewAllocator->GetShaderVisiblePoolSizeForTesting(), kNumOfViewHeaps);
+        EXPECT_EQ(viewAllocator->GetShaderVisibleHeapSerialForTesting(),
+                  viewHeapSerial + HeapVersionID(kNumOfViewHeaps));
+
+        EXPECT_EQ(samplerAllocator->GetShaderVisiblePoolSizeForTesting(), 0u);
+        EXPECT_EQ(samplerAllocator->GetShaderVisibleHeapSerialForTesting(), samplerHeapSerial);
+    }
+}
+
+// Verify a single allocate/deallocate.
+// One non-shader visible heap will be created.
+TEST_P(D3D12DescriptorHeapTests, Single) {
+    constexpr uint32_t kDescriptorCount = 4;
+    constexpr uint32_t kAllocationsPerHeap = 3;
+    PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount,
+                                                    kAllocationsPerHeap);
+
+    CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
+    EXPECT_EQ(allocation.GetHeapIndex(), 0u);
+    EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
+
+    allocator.Deallocate(allocation);
+    EXPECT_FALSE(allocation.IsValid());
+}
+
+// Verify allocating many times causes the pool to increase in size.
+// Creates |kNumOfHeaps| non-shader visible heaps.
+TEST_P(D3D12DescriptorHeapTests, Sequential) {
+    constexpr uint32_t kDescriptorCount = 4;
+    constexpr uint32_t kAllocationsPerHeap = 3;
+    PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount,
+                                                    kAllocationsPerHeap);
+
+    // Allocate |kNumOfHeaps| worth.
+    constexpr uint32_t kNumOfHeaps = 2;
+
+    std::set<uint32_t> allocatedHeaps;
+
+    std::vector<CPUDescriptorHeapAllocation> allocations;
+    for (uint32_t i = 0; i < kAllocationsPerHeap * kNumOfHeaps; i++) {
+        CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
+        EXPECT_EQ(allocation.GetHeapIndex(), i / kAllocationsPerHeap);
+        EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
+        allocations.push_back(allocation);
+        allocatedHeaps.insert(allocation.GetHeapIndex());
     }
 
-    // Verify a single allocate/deallocate.
-    // One non-shader visible heap will be created.
-    TEST_P(D3D12DescriptorHeapTests, Single) {
-        constexpr uint32_t kDescriptorCount = 4;
-        constexpr uint32_t kAllocationsPerHeap = 3;
-        PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount,
-                                                        kAllocationsPerHeap);
+    EXPECT_EQ(allocatedHeaps.size(), kNumOfHeaps);
 
+    // Deallocate all.
+    for (CPUDescriptorHeapAllocation& allocation : allocations) {
+        allocator.Deallocate(allocation);
+        EXPECT_FALSE(allocation.IsValid());
+    }
+}
+
+// Verify that re-allocating a number of allocations < pool size, all heaps are reused.
+// Creates and reuses |kNumofHeaps| non-shader visible heaps.
+TEST_P(D3D12DescriptorHeapTests, ReuseFreedHeaps) {
+    constexpr uint32_t kDescriptorCount = 4;
+    constexpr uint32_t kAllocationsPerHeap = 25;
+    PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount,
+                                                    kAllocationsPerHeap);
+
+    constexpr uint32_t kNumofHeaps = 10;
+
+    std::list<CPUDescriptorHeapAllocation> allocations;
+    std::set<size_t> allocationPtrs;
+
+    // Allocate |kNumofHeaps| heaps worth.
+    for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
         CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
-        EXPECT_EQ(allocation.GetHeapIndex(), 0u);
-        EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
+        allocations.push_back(allocation);
+        EXPECT_TRUE(allocationPtrs.insert(allocation.OffsetFrom(0, 0).ptr).second);
+    }
 
+    // Deallocate all.
+    for (CPUDescriptorHeapAllocation& allocation : allocations) {
         allocator.Deallocate(allocation);
         EXPECT_FALSE(allocation.IsValid());
     }
 
-    // Verify allocating many times causes the pool to increase in size.
-    // Creates |kNumOfHeaps| non-shader visible heaps.
-    TEST_P(D3D12DescriptorHeapTests, Sequential) {
-        constexpr uint32_t kDescriptorCount = 4;
-        constexpr uint32_t kAllocationsPerHeap = 3;
-        PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount,
-                                                        kAllocationsPerHeap);
+    allocations.clear();
 
-        // Allocate |kNumOfHeaps| worth.
-        constexpr uint32_t kNumOfHeaps = 2;
+    // Re-allocate all again.
+    std::set<size_t> reallocatedPtrs;
+    for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
+        CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
+        allocations.push_back(allocation);
+        EXPECT_TRUE(reallocatedPtrs.insert(allocation.OffsetFrom(0, 0).ptr).second);
+        EXPECT_TRUE(std::find(allocationPtrs.begin(), allocationPtrs.end(),
+                              allocation.OffsetFrom(0, 0).ptr) != allocationPtrs.end());
+    }
 
-        std::set<uint32_t> allocatedHeaps;
+    // Deallocate all again.
+    for (CPUDescriptorHeapAllocation& allocation : allocations) {
+        allocator.Deallocate(allocation);
+        EXPECT_FALSE(allocation.IsValid());
+    }
+}
 
-        std::vector<CPUDescriptorHeapAllocation> allocations;
-        for (uint32_t i = 0; i < kAllocationsPerHeap * kNumOfHeaps; i++) {
-            CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
-            EXPECT_EQ(allocation.GetHeapIndex(), i / kAllocationsPerHeap);
-            EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
+// Verify allocating then deallocating many times.
+TEST_P(D3D12DescriptorHeapTests, AllocateDeallocateMany) {
+    constexpr uint32_t kDescriptorCount = 4;
+    constexpr uint32_t kAllocationsPerHeap = 25;
+    PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount,
+                                                    kAllocationsPerHeap);
+
+    std::list<CPUDescriptorHeapAllocation> list3;
+    std::list<CPUDescriptorHeapAllocation> list5;
+    std::list<CPUDescriptorHeapAllocation> allocations;
+
+    constexpr uint32_t kNumofHeaps = 2;
+
+    // Allocate |kNumofHeaps| heaps worth.
+    for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
+        CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
+        EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
+        if (i % 3 == 0) {
+            list3.push_back(allocation);
+        } else {
             allocations.push_back(allocation);
-            allocatedHeaps.insert(allocation.GetHeapIndex());
-        }
-
-        EXPECT_EQ(allocatedHeaps.size(), kNumOfHeaps);
-
-        // Deallocate all.
-        for (CPUDescriptorHeapAllocation& allocation : allocations) {
-            allocator.Deallocate(allocation);
-            EXPECT_FALSE(allocation.IsValid());
         }
     }
 
-    // Verify that re-allocating a number of allocations < pool size, all heaps are reused.
-    // Creates and reuses |kNumofHeaps| non-shader visible heaps.
-    TEST_P(D3D12DescriptorHeapTests, ReuseFreedHeaps) {
-        constexpr uint32_t kDescriptorCount = 4;
-        constexpr uint32_t kAllocationsPerHeap = 25;
-        PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount,
-                                                        kAllocationsPerHeap);
+    // Deallocate every 3rd allocation.
+    for (auto it = list3.begin(); it != list3.end(); it = list3.erase(it)) {
+        allocator.Deallocate(*it);
+    }
 
-        constexpr uint32_t kNumofHeaps = 10;
-
-        std::list<CPUDescriptorHeapAllocation> allocations;
-        std::set<size_t> allocationPtrs;
-
-        // Allocate |kNumofHeaps| heaps worth.
-        for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
-            CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
+    // Allocate again.
+    for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
+        CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
+        EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
+        if (i % 5 == 0) {
+            list5.push_back(allocation);
+        } else {
             allocations.push_back(allocation);
-            EXPECT_TRUE(allocationPtrs.insert(allocation.OffsetFrom(0, 0).ptr).second);
-        }
-
-        // Deallocate all.
-        for (CPUDescriptorHeapAllocation& allocation : allocations) {
-            allocator.Deallocate(allocation);
-            EXPECT_FALSE(allocation.IsValid());
-        }
-
-        allocations.clear();
-
-        // Re-allocate all again.
-        std::set<size_t> reallocatedPtrs;
-        for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
-            CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
-            allocations.push_back(allocation);
-            EXPECT_TRUE(reallocatedPtrs.insert(allocation.OffsetFrom(0, 0).ptr).second);
-            EXPECT_TRUE(std::find(allocationPtrs.begin(), allocationPtrs.end(),
-                                  allocation.OffsetFrom(0, 0).ptr) != allocationPtrs.end());
-        }
-
-        // Deallocate all again.
-        for (CPUDescriptorHeapAllocation& allocation : allocations) {
-            allocator.Deallocate(allocation);
-            EXPECT_FALSE(allocation.IsValid());
         }
     }
 
-    // Verify allocating then deallocating many times.
-    TEST_P(D3D12DescriptorHeapTests, AllocateDeallocateMany) {
-        constexpr uint32_t kDescriptorCount = 4;
-        constexpr uint32_t kAllocationsPerHeap = 25;
-        PlaceholderStagingDescriptorAllocator allocator(mD3DDevice, kDescriptorCount,
-                                                        kAllocationsPerHeap);
-
-        std::list<CPUDescriptorHeapAllocation> list3;
-        std::list<CPUDescriptorHeapAllocation> list5;
-        std::list<CPUDescriptorHeapAllocation> allocations;
-
-        constexpr uint32_t kNumofHeaps = 2;
-
-        // Allocate |kNumofHeaps| heaps worth.
-        for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
-            CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
-            EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
-            if (i % 3 == 0) {
-                list3.push_back(allocation);
-            } else {
-                allocations.push_back(allocation);
-            }
-        }
-
-        // Deallocate every 3rd allocation.
-        for (auto it = list3.begin(); it != list3.end(); it = list3.erase(it)) {
-            allocator.Deallocate(*it);
-        }
-
-        // Allocate again.
-        for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
-            CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
-            EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
-            if (i % 5 == 0) {
-                list5.push_back(allocation);
-            } else {
-                allocations.push_back(allocation);
-            }
-        }
-
-        // Deallocate every 5th allocation.
-        for (auto it = list5.begin(); it != list5.end(); it = list5.erase(it)) {
-            allocator.Deallocate(*it);
-        }
-
-        // Allocate again.
-        for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
-            CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
-            EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
-            allocations.push_back(allocation);
-        }
-
-        // Deallocate remaining.
-        for (CPUDescriptorHeapAllocation& allocation : allocations) {
-            allocator.Deallocate(allocation);
-            EXPECT_FALSE(allocation.IsValid());
-        }
+    // Deallocate every 5th allocation.
+    for (auto it = list5.begin(); it != list5.end(); it = list5.erase(it)) {
+        allocator.Deallocate(*it);
     }
 
-    DAWN_INSTANTIATE_TEST(D3D12DescriptorHeapTests,
-                          D3D12Backend(),
-                          D3D12Backend({"use_d3d12_small_shader_visible_heap"}));
+    // Allocate again.
+    for (uint32_t i = 0; i < kAllocationsPerHeap * kNumofHeaps; i++) {
+        CPUDescriptorHeapAllocation allocation = allocator.AllocateCPUDescriptors();
+        EXPECT_NE(allocation.OffsetFrom(0, 0).ptr, 0u);
+        allocations.push_back(allocation);
+    }
+
+    // Deallocate remaining.
+    for (CPUDescriptorHeapAllocation& allocation : allocations) {
+        allocator.Deallocate(allocation);
+        EXPECT_FALSE(allocation.IsValid());
+    }
+}
+
+DAWN_INSTANTIATE_TEST(D3D12DescriptorHeapTests,
+                      D3D12Backend(),
+                      D3D12Backend({"use_d3d12_small_shader_visible_heap"}));
 
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/tests/white_box/D3D12GPUTimestampCalibrationTests.cpp b/src/dawn/tests/white_box/D3D12GPUTimestampCalibrationTests.cpp
index b87d564..2bad627 100644
--- a/src/dawn/tests/white_box/D3D12GPUTimestampCalibrationTests.cpp
+++ b/src/dawn/tests/white_box/D3D12GPUTimestampCalibrationTests.cpp
@@ -21,100 +21,100 @@
 #include "dawn/utils/WGPUHelpers.h"
 
 namespace dawn::native::d3d12 {
-    namespace {
-        class ExpectBetweenTimestamps : public ::detail::Expectation {
-          public:
-            ~ExpectBetweenTimestamps() override = default;
+namespace {
+class ExpectBetweenTimestamps : public ::detail::Expectation {
+  public:
+    ~ExpectBetweenTimestamps() override = default;
 
-            ExpectBetweenTimestamps(uint64_t value0, uint64_t value1) {
-                mValue0 = value0;
-                mValue1 = value1;
-            }
-
-            // Expect the actual results are between mValue0 and mValue1.
-            testing::AssertionResult Check(const void* data, size_t size) override {
-                const uint64_t* actual = static_cast<const uint64_t*>(data);
-                for (size_t i = 0; i < size / sizeof(uint64_t); ++i) {
-                    if (actual[i] < mValue0 || actual[i] > mValue1) {
-                        return testing::AssertionFailure()
-                               << "Expected data[" << i << "] to be between " << mValue0 << " and "
-                               << mValue1 << ", actual " << actual[i] << std::endl;
-                    }
-                }
-
-                return testing::AssertionSuccess();
-            }
-
-          private:
-            uint64_t mValue0;
-            uint64_t mValue1;
-        };
-
-    }  // anonymous namespace
-
-    class D3D12GPUTimestampCalibrationTests : public DawnTest {
-      protected:
-        void SetUp() override {
-            DawnTest::SetUp();
-
-            DAWN_TEST_UNSUPPORTED_IF(UsesWire());
-            // Requires that timestamp query feature is enabled and timestamp query conversion is
-            // disabled.
-            DAWN_TEST_UNSUPPORTED_IF(!SupportsFeatures({wgpu::FeatureName::TimestampQuery}) ||
-                                     !HasToggleEnabled("disable_timestamp_query_conversion"));
-        }
-
-        std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
-            std::vector<wgpu::FeatureName> requiredFeatures = {};
-            if (SupportsFeatures({wgpu::FeatureName::TimestampQuery})) {
-                requiredFeatures.push_back(wgpu::FeatureName::TimestampQuery);
-            }
-            return requiredFeatures;
-        }
-    };
-
-    // Check that the timestamps got by timestamp query are between the two timestamps from
-    // GetClockCalibration() after the timestamp conversion is disabled.
-    TEST_P(D3D12GPUTimestampCalibrationTests, TimestampsInOrder) {
-        constexpr uint32_t kQueryCount = 2;
-
-        wgpu::QuerySetDescriptor querySetDescriptor;
-        querySetDescriptor.count = kQueryCount;
-        querySetDescriptor.type = wgpu::QueryType::Timestamp;
-        wgpu::QuerySet querySet = device.CreateQuerySet(&querySetDescriptor);
-
-        wgpu::BufferDescriptor bufferDescriptor;
-        bufferDescriptor.size = kQueryCount * sizeof(uint64_t);
-        bufferDescriptor.usage = wgpu::BufferUsage::QueryResolve | wgpu::BufferUsage::CopySrc |
-                                 wgpu::BufferUsage::CopyDst;
-        wgpu::Buffer destination = device.CreateBuffer(&bufferDescriptor);
-
-        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-        encoder.WriteTimestamp(querySet, 0);
-        encoder.WriteTimestamp(querySet, 1);
-        wgpu::CommandBuffer commands = encoder.Finish();
-
-        Device* d3DDevice = reinterpret_cast<Device*>(device.Get());
-        uint64_t gpuTimestamp0, gpuTimestamp1;
-        uint64_t cpuTimestamp0, cpuTimestamp1;
-        d3DDevice->GetCommandQueue()->GetClockCalibration(&gpuTimestamp0, &cpuTimestamp0);
-        queue.Submit(1, &commands);
-        WaitForAllOperations();
-        d3DDevice->GetCommandQueue()->GetClockCalibration(&gpuTimestamp1, &cpuTimestamp1);
-
-        // Separate resolve queryset to reduce the execution time of the queue with WriteTimestamp,
-        // so that the timestamp in the querySet will be closer to both gpuTimestamps from
-        // GetClockCalibration.
-        wgpu::CommandEncoder resolveEncoder = device.CreateCommandEncoder();
-        resolveEncoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, 0);
-        wgpu::CommandBuffer resolveCommands = resolveEncoder.Finish();
-        queue.Submit(1, &resolveCommands);
-
-        EXPECT_BUFFER(destination, 0, kQueryCount * sizeof(uint64_t),
-                      new ExpectBetweenTimestamps(gpuTimestamp0, gpuTimestamp1));
+    ExpectBetweenTimestamps(uint64_t value0, uint64_t value1) {
+        mValue0 = value0;
+        mValue1 = value1;
     }
 
-    DAWN_INSTANTIATE_TEST(D3D12GPUTimestampCalibrationTests,
-                          D3D12Backend({"disable_timestamp_query_conversion"}));
+    // Expect the actual results are between mValue0 and mValue1.
+    testing::AssertionResult Check(const void* data, size_t size) override {
+        const uint64_t* actual = static_cast<const uint64_t*>(data);
+        for (size_t i = 0; i < size / sizeof(uint64_t); ++i) {
+            if (actual[i] < mValue0 || actual[i] > mValue1) {
+                return testing::AssertionFailure()
+                       << "Expected data[" << i << "] to be between " << mValue0 << " and "
+                       << mValue1 << ", actual " << actual[i] << std::endl;
+            }
+        }
+
+        return testing::AssertionSuccess();
+    }
+
+  private:
+    uint64_t mValue0;
+    uint64_t mValue1;
+};
+
+}  // anonymous namespace
+
+class D3D12GPUTimestampCalibrationTests : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+
+        DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+        // Requires that timestamp query feature is enabled and timestamp query conversion is
+        // disabled.
+        DAWN_TEST_UNSUPPORTED_IF(!SupportsFeatures({wgpu::FeatureName::TimestampQuery}) ||
+                                 !HasToggleEnabled("disable_timestamp_query_conversion"));
+    }
+
+    std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+        std::vector<wgpu::FeatureName> requiredFeatures = {};
+        if (SupportsFeatures({wgpu::FeatureName::TimestampQuery})) {
+            requiredFeatures.push_back(wgpu::FeatureName::TimestampQuery);
+        }
+        return requiredFeatures;
+    }
+};
+
+// Check that the timestamps got by timestamp query are between the two timestamps from
+// GetClockCalibration() after the timestamp conversion is disabled.
+TEST_P(D3D12GPUTimestampCalibrationTests, TimestampsInOrder) {
+    constexpr uint32_t kQueryCount = 2;
+
+    wgpu::QuerySetDescriptor querySetDescriptor;
+    querySetDescriptor.count = kQueryCount;
+    querySetDescriptor.type = wgpu::QueryType::Timestamp;
+    wgpu::QuerySet querySet = device.CreateQuerySet(&querySetDescriptor);
+
+    wgpu::BufferDescriptor bufferDescriptor;
+    bufferDescriptor.size = kQueryCount * sizeof(uint64_t);
+    bufferDescriptor.usage =
+        wgpu::BufferUsage::QueryResolve | wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer destination = device.CreateBuffer(&bufferDescriptor);
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.WriteTimestamp(querySet, 0);
+    encoder.WriteTimestamp(querySet, 1);
+    wgpu::CommandBuffer commands = encoder.Finish();
+
+    Device* d3DDevice = reinterpret_cast<Device*>(device.Get());
+    uint64_t gpuTimestamp0, gpuTimestamp1;
+    uint64_t cpuTimestamp0, cpuTimestamp1;
+    d3DDevice->GetCommandQueue()->GetClockCalibration(&gpuTimestamp0, &cpuTimestamp0);
+    queue.Submit(1, &commands);
+    WaitForAllOperations();
+    d3DDevice->GetCommandQueue()->GetClockCalibration(&gpuTimestamp1, &cpuTimestamp1);
+
+    // Separate resolve queryset to reduce the execution time of the queue with WriteTimestamp,
+    // so that the timestamp in the querySet will be closer to both gpuTimestamps from
+    // GetClockCalibration.
+    wgpu::CommandEncoder resolveEncoder = device.CreateCommandEncoder();
+    resolveEncoder.ResolveQuerySet(querySet, 0, kQueryCount, destination, 0);
+    wgpu::CommandBuffer resolveCommands = resolveEncoder.Finish();
+    queue.Submit(1, &resolveCommands);
+
+    EXPECT_BUFFER(destination, 0, kQueryCount * sizeof(uint64_t),
+                  new ExpectBetweenTimestamps(gpuTimestamp0, gpuTimestamp1));
+}
+
+DAWN_INSTANTIATE_TEST(D3D12GPUTimestampCalibrationTests,
+                      D3D12Backend({"disable_timestamp_query_conversion"}));
 
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/tests/white_box/D3D12ResourceHeapTests.cpp b/src/dawn/tests/white_box/D3D12ResourceHeapTests.cpp
index 911b2eb..d1a3ff0 100644
--- a/src/dawn/tests/white_box/D3D12ResourceHeapTests.cpp
+++ b/src/dawn/tests/white_box/D3D12ResourceHeapTests.cpp
@@ -20,91 +20,89 @@
 
 namespace dawn::native::d3d12 {
 
-    class D3D12ResourceHeapTests : public DawnTest {
-      protected:
-        void SetUp() override {
-            DawnTest::SetUp();
-            DAWN_TEST_UNSUPPORTED_IF(UsesWire());
-        }
-
-        std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
-            mIsBCFormatSupported = SupportsFeatures({wgpu::FeatureName::TextureCompressionBC});
-            if (!mIsBCFormatSupported) {
-                return {};
-            }
-
-            return {wgpu::FeatureName::TextureCompressionBC};
-        }
-
-        bool IsBCFormatSupported() const {
-            return mIsBCFormatSupported;
-        }
-
-      private:
-        bool mIsBCFormatSupported = false;
-    };
-
-    // Verify that creating a small compressed textures will be 4KB aligned.
-    TEST_P(D3D12ResourceHeapTests, AlignSmallCompressedTexture) {
-        DAWN_TEST_UNSUPPORTED_IF(!IsBCFormatSupported());
-
-        // TODO(http://crbug.com/dawn/282): Investigate GPU/driver rejections of small alignment.
-        DAWN_SUPPRESS_TEST_IF(IsIntel() || IsNvidia() || IsWARP());
-
-        wgpu::TextureDescriptor descriptor;
-        descriptor.dimension = wgpu::TextureDimension::e2D;
-        descriptor.size.width = 8;
-        descriptor.size.height = 8;
-        descriptor.size.depthOrArrayLayers = 1;
-        descriptor.sampleCount = 1;
-        descriptor.format = wgpu::TextureFormat::BC1RGBAUnorm;
-        descriptor.mipLevelCount = 1;
-        descriptor.usage = wgpu::TextureUsage::TextureBinding;
-
-        // Create a smaller one that allows use of the smaller alignment.
-        wgpu::Texture texture = device.CreateTexture(&descriptor);
-        Texture* d3dTexture = reinterpret_cast<Texture*>(texture.Get());
-
-        EXPECT_EQ(d3dTexture->GetD3D12Resource()->GetDesc().Alignment,
-                  static_cast<uint64_t>(D3D12_SMALL_RESOURCE_PLACEMENT_ALIGNMENT));
-
-        // Create a larger one (>64KB) that forbids use the smaller alignment.
-        descriptor.size.width = 4096;
-        descriptor.size.height = 4096;
-
-        texture = device.CreateTexture(&descriptor);
-        d3dTexture = reinterpret_cast<Texture*>(texture.Get());
-
-        EXPECT_EQ(d3dTexture->GetD3D12Resource()->GetDesc().Alignment,
-                  static_cast<uint64_t>(D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT));
+class D3D12ResourceHeapTests : public DawnTest {
+  protected:
+    void SetUp() override {
+        DawnTest::SetUp();
+        DAWN_TEST_UNSUPPORTED_IF(UsesWire());
     }
 
-    // Verify creating a UBO will always be 256B aligned.
-    TEST_P(D3D12ResourceHeapTests, AlignUBO) {
-        // Create a small UBO
-        wgpu::BufferDescriptor descriptor;
-        descriptor.size = 4 * 1024;
-        descriptor.usage = wgpu::BufferUsage::Uniform;
+    std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+        mIsBCFormatSupported = SupportsFeatures({wgpu::FeatureName::TextureCompressionBC});
+        if (!mIsBCFormatSupported) {
+            return {};
+        }
 
-        wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
-        Buffer* d3dBuffer = reinterpret_cast<Buffer*>(buffer.Get());
-
-        EXPECT_EQ((d3dBuffer->GetD3D12Resource()->GetDesc().Width %
-                   static_cast<uint64_t>(D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT)),
-                  0u);
-
-        // Create a larger UBO
-        descriptor.size = (4 * 1024 * 1024) + 255;
-        descriptor.usage = wgpu::BufferUsage::Uniform;
-
-        buffer = device.CreateBuffer(&descriptor);
-        d3dBuffer = reinterpret_cast<Buffer*>(buffer.Get());
-
-        EXPECT_EQ((d3dBuffer->GetD3D12Resource()->GetDesc().Width %
-                   static_cast<uint64_t>(D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT)),
-                  0u);
+        return {wgpu::FeatureName::TextureCompressionBC};
     }
 
-    DAWN_INSTANTIATE_TEST(D3D12ResourceHeapTests, D3D12Backend());
+    bool IsBCFormatSupported() const { return mIsBCFormatSupported; }
+
+  private:
+    bool mIsBCFormatSupported = false;
+};
+
+// Verify that creating a small compressed textures will be 4KB aligned.
+TEST_P(D3D12ResourceHeapTests, AlignSmallCompressedTexture) {
+    DAWN_TEST_UNSUPPORTED_IF(!IsBCFormatSupported());
+
+    // TODO(http://crbug.com/dawn/282): Investigate GPU/driver rejections of small alignment.
+    DAWN_SUPPRESS_TEST_IF(IsIntel() || IsNvidia() || IsWARP());
+
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.size.width = 8;
+    descriptor.size.height = 8;
+    descriptor.size.depthOrArrayLayers = 1;
+    descriptor.sampleCount = 1;
+    descriptor.format = wgpu::TextureFormat::BC1RGBAUnorm;
+    descriptor.mipLevelCount = 1;
+    descriptor.usage = wgpu::TextureUsage::TextureBinding;
+
+    // Create a smaller one that allows use of the smaller alignment.
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+    Texture* d3dTexture = reinterpret_cast<Texture*>(texture.Get());
+
+    EXPECT_EQ(d3dTexture->GetD3D12Resource()->GetDesc().Alignment,
+              static_cast<uint64_t>(D3D12_SMALL_RESOURCE_PLACEMENT_ALIGNMENT));
+
+    // Create a larger one (>64KB) that forbids use the smaller alignment.
+    descriptor.size.width = 4096;
+    descriptor.size.height = 4096;
+
+    texture = device.CreateTexture(&descriptor);
+    d3dTexture = reinterpret_cast<Texture*>(texture.Get());
+
+    EXPECT_EQ(d3dTexture->GetD3D12Resource()->GetDesc().Alignment,
+              static_cast<uint64_t>(D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT));
+}
+
+// Verify creating a UBO will always be 256B aligned.
+TEST_P(D3D12ResourceHeapTests, AlignUBO) {
+    // Create a small UBO
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = 4 * 1024;
+    descriptor.usage = wgpu::BufferUsage::Uniform;
+
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+    Buffer* d3dBuffer = reinterpret_cast<Buffer*>(buffer.Get());
+
+    EXPECT_EQ((d3dBuffer->GetD3D12Resource()->GetDesc().Width %
+               static_cast<uint64_t>(D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT)),
+              0u);
+
+    // Create a larger UBO
+    descriptor.size = (4 * 1024 * 1024) + 255;
+    descriptor.usage = wgpu::BufferUsage::Uniform;
+
+    buffer = device.CreateBuffer(&descriptor);
+    d3dBuffer = reinterpret_cast<Buffer*>(buffer.Get());
+
+    EXPECT_EQ((d3dBuffer->GetD3D12Resource()->GetDesc().Width %
+               static_cast<uint64_t>(D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT)),
+              0u);
+}
+
+DAWN_INSTANTIATE_TEST(D3D12ResourceHeapTests, D3D12Backend());
 
 }  // namespace dawn::native::d3d12
diff --git a/src/dawn/tests/white_box/EGLImageWrappingTests.cpp b/src/dawn/tests/white_box/EGLImageWrappingTests.cpp
index ceee355..f20fda0 100644
--- a/src/dawn/tests/white_box/EGLImageWrappingTests.cpp
+++ b/src/dawn/tests/white_box/EGLImageWrappingTests.cpp
@@ -26,92 +26,87 @@
 
 namespace {
 
-    class EGLFunctions {
-      public:
-        EGLFunctions() {
+class EGLFunctions {
+  public:
+    EGLFunctions() {
 #ifdef DAWN_PLATFORM_WINDOWS
-            const char* eglLib = "libEGL.dll";
+        const char* eglLib = "libEGL.dll";
 #else
-            const char* eglLib = "libEGL.so";
+        const char* eglLib = "libEGL.so";
 #endif
-            EXPECT_TRUE(mlibEGL.Open(eglLib));
-            CreateImage = reinterpret_cast<PFNEGLCREATEIMAGEPROC>(LoadProc("eglCreateImage"));
-            DestroyImage = reinterpret_cast<PFNEGLDESTROYIMAGEPROC>(LoadProc("eglDestroyImage"));
-            GetCurrentContext =
-                reinterpret_cast<PFNEGLGETCURRENTCONTEXTPROC>(LoadProc("eglGetCurrentContext"));
-            GetCurrentDisplay =
-                reinterpret_cast<PFNEGLGETCURRENTDISPLAYPROC>(LoadProc("eglGetCurrentDisplay"));
+        EXPECT_TRUE(mlibEGL.Open(eglLib));
+        CreateImage = reinterpret_cast<PFNEGLCREATEIMAGEPROC>(LoadProc("eglCreateImage"));
+        DestroyImage = reinterpret_cast<PFNEGLDESTROYIMAGEPROC>(LoadProc("eglDestroyImage"));
+        GetCurrentContext =
+            reinterpret_cast<PFNEGLGETCURRENTCONTEXTPROC>(LoadProc("eglGetCurrentContext"));
+        GetCurrentDisplay =
+            reinterpret_cast<PFNEGLGETCURRENTDISPLAYPROC>(LoadProc("eglGetCurrentDisplay"));
+    }
+
+  private:
+    void* LoadProc(const char* name) {
+        void* proc = mlibEGL.GetProc(name);
+        EXPECT_NE(proc, nullptr);
+        return proc;
+    }
+
+  public:
+    PFNEGLCREATEIMAGEPROC CreateImage;
+    PFNEGLDESTROYIMAGEPROC DestroyImage;
+    PFNEGLGETCURRENTCONTEXTPROC GetCurrentContext;
+    PFNEGLGETCURRENTDISPLAYPROC GetCurrentDisplay;
+
+  private:
+    DynamicLib mlibEGL;
+};
+
+class ScopedEGLImage {
+  public:
+    ScopedEGLImage(PFNEGLDESTROYIMAGEPROC destroyImage,
+                   PFNGLDELETETEXTURESPROC deleteTextures,
+                   EGLDisplay display,
+                   EGLImage image,
+                   GLuint texture)
+        : mDestroyImage(destroyImage),
+          mDeleteTextures(deleteTextures),
+          mDisplay(display),
+          mImage(image),
+          mTexture(texture) {}
+
+    ScopedEGLImage(ScopedEGLImage&& other) {
+        if (mImage != nullptr) {
+            mDestroyImage(mDisplay, mImage);
         }
-
-      private:
-        void* LoadProc(const char* name) {
-            void* proc = mlibEGL.GetProc(name);
-            EXPECT_NE(proc, nullptr);
-            return proc;
+        if (mTexture != 0) {
+            mDeleteTextures(1, &mTexture);
         }
+        mDestroyImage = std::move(other.mDestroyImage);
+        mDeleteTextures = std::move(other.mDeleteTextures);
+        mDisplay = std::move(other.mDisplay);
+        mImage = std::move(other.mImage);
+        mTexture = std::move(other.mTexture);
+    }
 
-      public:
-        PFNEGLCREATEIMAGEPROC CreateImage;
-        PFNEGLDESTROYIMAGEPROC DestroyImage;
-        PFNEGLGETCURRENTCONTEXTPROC GetCurrentContext;
-        PFNEGLGETCURRENTDISPLAYPROC GetCurrentDisplay;
-
-      private:
-        DynamicLib mlibEGL;
-    };
-
-    class ScopedEGLImage {
-      public:
-        ScopedEGLImage(PFNEGLDESTROYIMAGEPROC destroyImage,
-                       PFNGLDELETETEXTURESPROC deleteTextures,
-                       EGLDisplay display,
-                       EGLImage image,
-                       GLuint texture)
-            : mDestroyImage(destroyImage),
-              mDeleteTextures(deleteTextures),
-              mDisplay(display),
-              mImage(image),
-              mTexture(texture) {
+    ~ScopedEGLImage() {
+        if (mTexture != 0) {
+            mDeleteTextures(1, &mTexture);
         }
-
-        ScopedEGLImage(ScopedEGLImage&& other) {
-            if (mImage != nullptr) {
-                mDestroyImage(mDisplay, mImage);
-            }
-            if (mTexture != 0) {
-                mDeleteTextures(1, &mTexture);
-            }
-            mDestroyImage = std::move(other.mDestroyImage);
-            mDeleteTextures = std::move(other.mDeleteTextures);
-            mDisplay = std::move(other.mDisplay);
-            mImage = std::move(other.mImage);
-            mTexture = std::move(other.mTexture);
+        if (mImage != nullptr) {
+            mDestroyImage(mDisplay, mImage);
         }
+    }
 
-        ~ScopedEGLImage() {
-            if (mTexture != 0) {
-                mDeleteTextures(1, &mTexture);
-            }
-            if (mImage != nullptr) {
-                mDestroyImage(mDisplay, mImage);
-            }
-        }
+    EGLImage getImage() const { return mImage; }
 
-        EGLImage getImage() const {
-            return mImage;
-        }
+    GLuint getTexture() const { return mTexture; }
 
-        GLuint getTexture() const {
-            return mTexture;
-        }
-
-      private:
-        PFNEGLDESTROYIMAGEPROC mDestroyImage = nullptr;
-        PFNGLDELETETEXTURESPROC mDeleteTextures = nullptr;
-        EGLDisplay mDisplay = nullptr;
-        EGLImage mImage = nullptr;
-        GLuint mTexture = 0;
-    };
+  private:
+    PFNEGLDESTROYIMAGEPROC mDestroyImage = nullptr;
+    PFNGLDELETETEXTURESPROC mDeleteTextures = nullptr;
+    EGLDisplay mDisplay = nullptr;
+    EGLImage mImage = nullptr;
+    GLuint mTexture = 0;
+};
 
 }  // anonymous namespace
 
diff --git a/src/dawn/tests/white_box/QueryInternalShaderTests.cpp b/src/dawn/tests/white_box/QueryInternalShaderTests.cpp
index ba429bc..619d6b5 100644
--- a/src/dawn/tests/white_box/QueryInternalShaderTests.cpp
+++ b/src/dawn/tests/white_box/QueryInternalShaderTests.cpp
@@ -22,62 +22,61 @@
 
 namespace {
 
-    void EncodeConvertTimestampsToNanoseconds(wgpu::CommandEncoder encoder,
-                                              wgpu::Buffer timestamps,
-                                              wgpu::Buffer availability,
-                                              wgpu::Buffer params) {
-        ASSERT_TRUE(
-            dawn::native::EncodeConvertTimestampsToNanoseconds(
-                dawn::native::FromAPI(encoder.Get()), dawn::native::FromAPI(timestamps.Get()),
-                dawn::native::FromAPI(availability.Get()), dawn::native::FromAPI(params.Get()))
-                .IsSuccess());
+void EncodeConvertTimestampsToNanoseconds(wgpu::CommandEncoder encoder,
+                                          wgpu::Buffer timestamps,
+                                          wgpu::Buffer availability,
+                                          wgpu::Buffer params) {
+    ASSERT_TRUE(dawn::native::EncodeConvertTimestampsToNanoseconds(
+                    dawn::native::FromAPI(encoder.Get()), dawn::native::FromAPI(timestamps.Get()),
+                    dawn::native::FromAPI(availability.Get()), dawn::native::FromAPI(params.Get()))
+                    .IsSuccess());
+}
+
+class InternalShaderExpectation : public detail::Expectation {
+  public:
+    ~InternalShaderExpectation() override = default;
+
+    InternalShaderExpectation(const uint64_t* values, const unsigned int count) {
+        mExpected.assign(values, values + count);
     }
 
-    class InternalShaderExpectation : public detail::Expectation {
-      public:
-        ~InternalShaderExpectation() override = default;
+    // Expect the actual results are approximately equal to the expected values.
+    testing::AssertionResult Check(const void* data, size_t size) override {
+        DAWN_ASSERT(size == sizeof(uint64_t) * mExpected.size());
+        // The computations in the shader use a multiplier that's a 16bit integer plus a shift
+        // that maximize the multiplier. This means that for the range of periods we care about
+        // (1 to 2^16-1 ns per tick), the high order bit of the multiplier will always be set.
+        // Intuitively this means that we have 15 bits of precision in the computation so we
+        // expect that for the error tolerance.
+        constexpr static float kErrorToleranceRatio = 1.0 / (1 << 15);  // about 3e-5.
 
-        InternalShaderExpectation(const uint64_t* values, const unsigned int count) {
-            mExpected.assign(values, values + count);
-        }
-
-        // Expect the actual results are approximately equal to the expected values.
-        testing::AssertionResult Check(const void* data, size_t size) override {
-            DAWN_ASSERT(size == sizeof(uint64_t) * mExpected.size());
-            // The computations in the shader use a multiplier that's a 16bit integer plus a shift
-            // that maximize the multiplier. This means that for the range of periods we care about
-            // (1 to 2^16-1 ns per tick), the high order bit of the multiplier will always be set.
-            // Intuitively this means that we have 15 bits of precision in the computation so we
-            // expect that for the error tolerance.
-            constexpr static float kErrorToleranceRatio = 1.0 / (1 << 15);  // about 3e-5.
-
-            const uint64_t* actual = static_cast<const uint64_t*>(data);
-            for (size_t i = 0; i < mExpected.size(); ++i) {
-                if (mExpected[i] == 0) {
-                    if (actual[i] != 0) {
-                        return testing::AssertionFailure()
-                               << "Expected data[" << i << "] to be 0, actual " << actual[i]
-                               << std::endl;
-                    }
-                    continue;
-                }
-
-                float errorRate = abs(static_cast<int64_t>(mExpected[i] - actual[i])) /
-                                  static_cast<float>(mExpected[i]);
-                if (errorRate > kErrorToleranceRatio) {
+        const uint64_t* actual = static_cast<const uint64_t*>(data);
+        for (size_t i = 0; i < mExpected.size(); ++i) {
+            if (mExpected[i] == 0) {
+                if (actual[i] != 0) {
                     return testing::AssertionFailure()
-                           << "Expected data[" << i << "] to be " << mExpected[i] << ", actual "
-                           << actual[i] << ". Error rate " << errorRate << " is larger than "
-                           << kErrorToleranceRatio << std::endl;
+                           << "Expected data[" << i << "] to be 0, actual " << actual[i]
+                           << std::endl;
                 }
+                continue;
             }
 
-            return testing::AssertionSuccess();
+            float errorRate = abs(static_cast<int64_t>(mExpected[i] - actual[i])) /
+                              static_cast<float>(mExpected[i]);
+            if (errorRate > kErrorToleranceRatio) {
+                return testing::AssertionFailure()
+                       << "Expected data[" << i << "] to be " << mExpected[i] << ", actual "
+                       << actual[i] << ". Error rate " << errorRate << " is larger than "
+                       << kErrorToleranceRatio << std::endl;
+            }
         }
 
-      private:
-        std::vector<uint64_t> mExpected;
-    };
+        return testing::AssertionSuccess();
+    }
+
+  private:
+    std::vector<uint64_t> mExpected;
+};
 
 }  // anonymous namespace
 
@@ -190,7 +189,7 @@
 //   Expect 0 for unavailable timestamps and nanoseconds for available timestamps in an expected
 //   error tolerance ratio.
 // - The availability buffer passes the data of which slot in timestamps buffer is an initialized
-//   timestamp.
+//    timestamp.
 // - The params buffer passes the timestamp count, the offset in timestamps buffer and the
 //   timestamp period (here use GPU frequency (HZ) on Intel D3D12 to calculate the period in
 //   ns for testing).
diff --git a/src/dawn/tests/white_box/VulkanErrorInjectorTests.cpp b/src/dawn/tests/white_box/VulkanErrorInjectorTests.cpp
index 1249973..8532c2a 100644
--- a/src/dawn/tests/white_box/VulkanErrorInjectorTests.cpp
+++ b/src/dawn/tests/white_box/VulkanErrorInjectorTests.cpp
@@ -23,18 +23,18 @@
 
 namespace {
 
-    class VulkanErrorInjectorTests : public DawnTest {
-      public:
-        void SetUp() override {
-            DawnTest::SetUp();
-            DAWN_TEST_UNSUPPORTED_IF(UsesWire());
+class VulkanErrorInjectorTests : public DawnTest {
+  public:
+    void SetUp() override {
+        DawnTest::SetUp();
+        DAWN_TEST_UNSUPPORTED_IF(UsesWire());
 
-            mDeviceVk = dawn::native::vulkan::ToBackend(dawn::native::FromAPI(device.Get()));
-        }
+        mDeviceVk = dawn::native::vulkan::ToBackend(dawn::native::FromAPI(device.Get()));
+    }
 
-      protected:
-        dawn::native::vulkan::Device* mDeviceVk;
-    };
+  protected:
+    dawn::native::vulkan::Device* mDeviceVk;
+};
 
 }  // anonymous namespace
 
diff --git a/src/dawn/tests/white_box/VulkanImageWrappingTests.cpp b/src/dawn/tests/white_box/VulkanImageWrappingTests.cpp
index 0a34e90..ca8acb8 100644
--- a/src/dawn/tests/white_box/VulkanImageWrappingTests.cpp
+++ b/src/dawn/tests/white_box/VulkanImageWrappingTests.cpp
@@ -24,805 +24,792 @@
 
 namespace dawn::native::vulkan {
 
-    using ExternalTexture = VulkanImageWrappingTestBackend::ExternalTexture;
-    using ExternalSemaphore = VulkanImageWrappingTestBackend::ExternalSemaphore;
+using ExternalTexture = VulkanImageWrappingTestBackend::ExternalTexture;
+using ExternalSemaphore = VulkanImageWrappingTestBackend::ExternalSemaphore;
 
-    namespace {
+namespace {
 
-        class VulkanImageWrappingTestBase : public DawnTest {
-          protected:
-            std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
-                return {wgpu::FeatureName::DawnInternalUsages};
-            }
-
-          public:
-            void SetUp() override {
-                DawnTest::SetUp();
-                DAWN_TEST_UNSUPPORTED_IF(UsesWire());
-
-                mBackend = VulkanImageWrappingTestBackend::Create(device);
-
-                defaultDescriptor.dimension = wgpu::TextureDimension::e2D;
-                defaultDescriptor.format = wgpu::TextureFormat::RGBA8Unorm;
-                defaultDescriptor.size = {1, 1, 1};
-                defaultDescriptor.sampleCount = 1;
-                defaultDescriptor.mipLevelCount = 1;
-                defaultDescriptor.usage = wgpu::TextureUsage::RenderAttachment |
-                                          wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst;
-
-                defaultTexture = mBackend->CreateTexture(1, 1, defaultDescriptor.format,
-                                                         defaultDescriptor.usage);
-            }
-
-            void TearDown() override {
-                if (UsesWire()) {
-                    DawnTest::TearDown();
-                    return;
-                }
-
-                defaultTexture = nullptr;
-                mBackend = nullptr;
-                DawnTest::TearDown();
-            }
-
-            wgpu::Texture WrapVulkanImage(
-                wgpu::Device dawnDevice,
-                const wgpu::TextureDescriptor* textureDescriptor,
-                const ExternalTexture* externalTexture,
-                std::vector<std::unique_ptr<ExternalSemaphore>> semaphores,
-                bool isInitialized = true,
-                bool expectValid = true) {
-                ExternalImageDescriptorVkForTesting descriptor;
-                return WrapVulkanImage(dawnDevice, textureDescriptor, externalTexture,
-                                       std::move(semaphores), descriptor.releasedOldLayout,
-                                       descriptor.releasedNewLayout, isInitialized, expectValid);
-            }
-
-            wgpu::Texture WrapVulkanImage(
-                wgpu::Device dawnDevice,
-                const wgpu::TextureDescriptor* textureDescriptor,
-                const ExternalTexture* externalTexture,
-                std::vector<std::unique_ptr<ExternalSemaphore>> semaphores,
-                VkImageLayout releasedOldLayout,
-                VkImageLayout releasedNewLayout,
-                bool isInitialized = true,
-                bool expectValid = true) {
-                ExternalImageDescriptorVkForTesting descriptor;
-                descriptor.cTextureDescriptor =
-                    reinterpret_cast<const WGPUTextureDescriptor*>(textureDescriptor);
-                descriptor.isInitialized = isInitialized;
-                descriptor.releasedOldLayout = releasedOldLayout;
-                descriptor.releasedNewLayout = releasedNewLayout;
-
-                wgpu::Texture texture = mBackend->WrapImage(dawnDevice, externalTexture, descriptor,
-                                                            std::move(semaphores));
-
-                if (expectValid) {
-                    EXPECT_NE(texture, nullptr) << "Failed to wrap image, are external memory / "
-                                                   "semaphore extensions supported?";
-                } else {
-                    EXPECT_EQ(texture, nullptr);
-                }
-
-                return texture;
-            }
-
-            // Exports the signal from a wrapped texture and ignores it
-            // We have to export the signal before destroying the wrapped texture else it's an
-            // assertion failure
-            void IgnoreSignalSemaphore(wgpu::Texture wrappedTexture) {
-                ExternalImageExportInfoVkForTesting exportInfo;
-                bool result =
-                    mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_GENERAL, &exportInfo);
-                ASSERT(result);
-            }
-
-          protected:
-            std::unique_ptr<VulkanImageWrappingTestBackend> mBackend;
-
-            wgpu::TextureDescriptor defaultDescriptor;
-            std::unique_ptr<ExternalTexture> defaultTexture;
-        };
-
-    }  // namespace
-
-    using VulkanImageWrappingValidationTests = VulkanImageWrappingTestBase;
-
-    // Test no error occurs if the import is valid
-    TEST_P(VulkanImageWrappingValidationTests, SuccessfulImport) {
-        wgpu::Texture texture =
-            WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(), {}, true, true);
-        EXPECT_NE(texture.Get(), nullptr);
-        IgnoreSignalSemaphore(texture);
+class VulkanImageWrappingTestBase : public DawnTest {
+  protected:
+    std::vector<wgpu::FeatureName> GetRequiredFeatures() override {
+        return {wgpu::FeatureName::DawnInternalUsages};
     }
 
-    // Test no error occurs if the import is valid with DawnTextureInternalUsageDescriptor
-    TEST_P(VulkanImageWrappingValidationTests, SuccessfulImportWithInternalUsageDescriptor) {
-        wgpu::DawnTextureInternalUsageDescriptor internalDesc = {};
-        defaultDescriptor.nextInChain = &internalDesc;
-        internalDesc.internalUsage = wgpu::TextureUsage::CopySrc;
-        internalDesc.sType = wgpu::SType::DawnTextureInternalUsageDescriptor;
+  public:
+    void SetUp() override {
+        DawnTest::SetUp();
+        DAWN_TEST_UNSUPPORTED_IF(UsesWire());
 
-        wgpu::Texture texture =
-            WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(), {}, true, true);
-        EXPECT_NE(texture.Get(), nullptr);
-        IgnoreSignalSemaphore(texture);
+        mBackend = VulkanImageWrappingTestBackend::Create(device);
+
+        defaultDescriptor.dimension = wgpu::TextureDimension::e2D;
+        defaultDescriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+        defaultDescriptor.size = {1, 1, 1};
+        defaultDescriptor.sampleCount = 1;
+        defaultDescriptor.mipLevelCount = 1;
+        defaultDescriptor.usage = wgpu::TextureUsage::RenderAttachment |
+                                  wgpu::TextureUsage::CopySrc | wgpu::TextureUsage::CopyDst;
+
+        defaultTexture =
+            mBackend->CreateTexture(1, 1, defaultDescriptor.format, defaultDescriptor.usage);
     }
 
-    // Test an error occurs if an invalid sType is the nextInChain
-    TEST_P(VulkanImageWrappingValidationTests, InvalidTextureDescriptor) {
-        wgpu::ChainedStruct chainedDescriptor;
-        chainedDescriptor.sType = wgpu::SType::SurfaceDescriptorFromWindowsSwapChainPanel;
-        defaultDescriptor.nextInChain = &chainedDescriptor;
-
-        ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapVulkanImage(
-                                device, &defaultDescriptor, defaultTexture.get(), {}, true, false));
-        EXPECT_EQ(texture.Get(), nullptr);
-    }
-
-    // Test an error occurs if the descriptor dimension isn't 2D
-    TEST_P(VulkanImageWrappingValidationTests, InvalidTextureDimension) {
-        defaultDescriptor.dimension = wgpu::TextureDimension::e1D;
-
-        ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapVulkanImage(
-                                device, &defaultDescriptor, defaultTexture.get(), {}, true, false));
-        EXPECT_EQ(texture.Get(), nullptr);
-    }
-
-    // Test an error occurs if the descriptor mip level count isn't 1
-    TEST_P(VulkanImageWrappingValidationTests, InvalidMipLevelCount) {
-        defaultDescriptor.mipLevelCount = 2;
-
-        ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapVulkanImage(
-                                device, &defaultDescriptor, defaultTexture.get(), {}, true, false));
-        EXPECT_EQ(texture.Get(), nullptr);
-    }
-
-    // Test an error occurs if the descriptor depth isn't 1
-    TEST_P(VulkanImageWrappingValidationTests, InvalidDepth) {
-        defaultDescriptor.size.depthOrArrayLayers = 2;
-
-        ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapVulkanImage(
-                                device, &defaultDescriptor, defaultTexture.get(), {}, true, false));
-        EXPECT_EQ(texture.Get(), nullptr);
-    }
-
-    // Test an error occurs if the descriptor sample count isn't 1
-    TEST_P(VulkanImageWrappingValidationTests, InvalidSampleCount) {
-        defaultDescriptor.sampleCount = 4;
-
-        ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapVulkanImage(
-                                device, &defaultDescriptor, defaultTexture.get(), {}, true, false));
-        EXPECT_EQ(texture.Get(), nullptr);
-    }
-
-    // Test an error occurs if we try to export the signal semaphore twice
-    TEST_P(VulkanImageWrappingValidationTests, DoubleSignalSemaphoreExport) {
-        wgpu::Texture texture =
-            WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(), {}, true, true);
-        ASSERT_NE(texture.Get(), nullptr);
-        IgnoreSignalSemaphore(texture);
-
-        ExternalImageExportInfoVkForTesting exportInfo;
-        ASSERT_DEVICE_ERROR(
-            bool success = mBackend->ExportImage(texture, VK_IMAGE_LAYOUT_GENERAL, &exportInfo));
-        ASSERT_FALSE(success);
-        ASSERT_EQ(exportInfo.semaphores.size(), 0u);
-    }
-
-    // Test an error occurs if we try to export the signal semaphore from a normal texture
-    TEST_P(VulkanImageWrappingValidationTests, NormalTextureSignalSemaphoreExport) {
-        wgpu::Texture texture = device.CreateTexture(&defaultDescriptor);
-        ASSERT_NE(texture.Get(), nullptr);
-
-        ExternalImageExportInfoVkForTesting exportInfo;
-        ASSERT_DEVICE_ERROR(
-            bool success = mBackend->ExportImage(texture, VK_IMAGE_LAYOUT_GENERAL, &exportInfo));
-        ASSERT_FALSE(success);
-        ASSERT_EQ(exportInfo.semaphores.size(), 0u);
-    }
-
-    // Test an error occurs if we try to export the signal semaphore from a destroyed texture
-    TEST_P(VulkanImageWrappingValidationTests, DestroyedTextureSignalSemaphoreExport) {
-        wgpu::Texture texture = device.CreateTexture(&defaultDescriptor);
-        ASSERT_NE(texture.Get(), nullptr);
-        texture.Destroy();
-
-        ExternalImageExportInfoVkForTesting exportInfo;
-        ASSERT_DEVICE_ERROR(
-            bool success = mBackend->ExportImage(texture, VK_IMAGE_LAYOUT_GENERAL, &exportInfo));
-        ASSERT_FALSE(success);
-        ASSERT_EQ(exportInfo.semaphores.size(), 0u);
-    }
-
-    // Fixture to test using external memory textures through different usages.
-    // These tests are skipped if the harness is using the wire.
-    class VulkanImageWrappingUsageTests : public VulkanImageWrappingTestBase {
-      public:
-        void SetUp() override {
-            VulkanImageWrappingTestBase::SetUp();
-            if (UsesWire()) {
-                return;
-            }
-
-            // Create another device based on the original
-            backendAdapter =
-                dawn::native::vulkan::ToBackend(dawn::native::FromAPI(device.Get())->GetAdapter());
-            deviceDescriptor.nextInChain = &togglesDesc;
-            togglesDesc.forceEnabledToggles = GetParam().forceEnabledWorkarounds.data();
-            togglesDesc.forceEnabledTogglesCount = GetParam().forceEnabledWorkarounds.size();
-            togglesDesc.forceDisabledToggles = GetParam().forceDisabledWorkarounds.data();
-            togglesDesc.forceDisabledTogglesCount = GetParam().forceDisabledWorkarounds.size();
-
-            secondDeviceVk =
-                dawn::native::vulkan::ToBackend(backendAdapter->APICreateDevice(&deviceDescriptor));
-            secondDevice = wgpu::Device::Acquire(dawn::native::ToAPI(secondDeviceVk));
+    void TearDown() override {
+        if (UsesWire()) {
+            DawnTest::TearDown();
+            return;
         }
 
-      protected:
-        dawn::native::vulkan::Adapter* backendAdapter;
-        dawn::native::DeviceDescriptor deviceDescriptor;
-        dawn::native::DawnTogglesDeviceDescriptor togglesDesc;
+        defaultTexture = nullptr;
+        mBackend = nullptr;
+        DawnTest::TearDown();
+    }
 
-        wgpu::Device secondDevice;
-        dawn::native::vulkan::Device* secondDeviceVk;
+    wgpu::Texture WrapVulkanImage(wgpu::Device dawnDevice,
+                                  const wgpu::TextureDescriptor* textureDescriptor,
+                                  const ExternalTexture* externalTexture,
+                                  std::vector<std::unique_ptr<ExternalSemaphore>> semaphores,
+                                  bool isInitialized = true,
+                                  bool expectValid = true) {
+        ExternalImageDescriptorVkForTesting descriptor;
+        return WrapVulkanImage(dawnDevice, textureDescriptor, externalTexture,
+                               std::move(semaphores), descriptor.releasedOldLayout,
+                               descriptor.releasedNewLayout, isInitialized, expectValid);
+    }
 
-        // Clear a texture on a given device
-        void ClearImage(wgpu::Device dawnDevice,
-                        wgpu::Texture wrappedTexture,
-                        wgpu::Color clearColor) {
-            wgpu::TextureView wrappedView = wrappedTexture.CreateView();
+    wgpu::Texture WrapVulkanImage(wgpu::Device dawnDevice,
+                                  const wgpu::TextureDescriptor* textureDescriptor,
+                                  const ExternalTexture* externalTexture,
+                                  std::vector<std::unique_ptr<ExternalSemaphore>> semaphores,
+                                  VkImageLayout releasedOldLayout,
+                                  VkImageLayout releasedNewLayout,
+                                  bool isInitialized = true,
+                                  bool expectValid = true) {
+        ExternalImageDescriptorVkForTesting descriptor;
+        descriptor.cTextureDescriptor =
+            reinterpret_cast<const WGPUTextureDescriptor*>(textureDescriptor);
+        descriptor.isInitialized = isInitialized;
+        descriptor.releasedOldLayout = releasedOldLayout;
+        descriptor.releasedNewLayout = releasedNewLayout;
 
-            // Submit a clear operation
-            utils::ComboRenderPassDescriptor renderPassDescriptor({wrappedView}, {});
-            renderPassDescriptor.cColorAttachments[0].clearValue = clearColor;
-            renderPassDescriptor.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
+        wgpu::Texture texture =
+            mBackend->WrapImage(dawnDevice, externalTexture, descriptor, std::move(semaphores));
 
-            wgpu::CommandEncoder encoder = dawnDevice.CreateCommandEncoder();
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDescriptor);
-            pass.End();
-
-            wgpu::CommandBuffer commands = encoder.Finish();
-
-            wgpu::Queue queue = dawnDevice.GetQueue();
-            queue.Submit(1, &commands);
+        if (expectValid) {
+            EXPECT_NE(texture, nullptr) << "Failed to wrap image, are external memory / "
+                                           "semaphore extensions supported?";
+        } else {
+            EXPECT_EQ(texture, nullptr);
         }
 
-        // Submits a 1x1x1 copy from source to destination
-        void SimpleCopyTextureToTexture(wgpu::Device dawnDevice,
-                                        wgpu::Queue dawnQueue,
-                                        wgpu::Texture source,
-                                        wgpu::Texture destination) {
-            wgpu::ImageCopyTexture copySrc = utils::CreateImageCopyTexture(source, 0, {0, 0, 0});
-            wgpu::ImageCopyTexture copyDst =
-                utils::CreateImageCopyTexture(destination, 0, {0, 0, 0});
+        return texture;
+    }
 
-            wgpu::Extent3D copySize = {1, 1, 1};
+    // Exports the signal from a wrapped texture and ignores it
+    // We have to export the signal before destroying the wrapped texture else it's an
+    // assertion failure
+    void IgnoreSignalSemaphore(wgpu::Texture wrappedTexture) {
+        ExternalImageExportInfoVkForTesting exportInfo;
+        bool result = mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_GENERAL, &exportInfo);
+        ASSERT(result);
+    }
 
-            wgpu::CommandEncoder encoder = dawnDevice.CreateCommandEncoder();
-            encoder.CopyTextureToTexture(&copySrc, &copyDst, &copySize);
-            wgpu::CommandBuffer commands = encoder.Finish();
+  protected:
+    std::unique_ptr<VulkanImageWrappingTestBackend> mBackend;
 
-            dawnQueue.Submit(1, &commands);
+    wgpu::TextureDescriptor defaultDescriptor;
+    std::unique_ptr<ExternalTexture> defaultTexture;
+};
+
+}  // namespace
+
+using VulkanImageWrappingValidationTests = VulkanImageWrappingTestBase;
+
+// Test no error occurs if the import is valid
+TEST_P(VulkanImageWrappingValidationTests, SuccessfulImport) {
+    wgpu::Texture texture =
+        WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(), {}, true, true);
+    EXPECT_NE(texture.Get(), nullptr);
+    IgnoreSignalSemaphore(texture);
+}
+
+// Test no error occurs if the import is valid with DawnTextureInternalUsageDescriptor
+TEST_P(VulkanImageWrappingValidationTests, SuccessfulImportWithInternalUsageDescriptor) {
+    wgpu::DawnTextureInternalUsageDescriptor internalDesc = {};
+    defaultDescriptor.nextInChain = &internalDesc;
+    internalDesc.internalUsage = wgpu::TextureUsage::CopySrc;
+    internalDesc.sType = wgpu::SType::DawnTextureInternalUsageDescriptor;
+
+    wgpu::Texture texture =
+        WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(), {}, true, true);
+    EXPECT_NE(texture.Get(), nullptr);
+    IgnoreSignalSemaphore(texture);
+}
+
+// Test an error occurs if an invalid sType is the nextInChain
+TEST_P(VulkanImageWrappingValidationTests, InvalidTextureDescriptor) {
+    wgpu::ChainedStruct chainedDescriptor;
+    chainedDescriptor.sType = wgpu::SType::SurfaceDescriptorFromWindowsSwapChainPanel;
+    defaultDescriptor.nextInChain = &chainedDescriptor;
+
+    ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapVulkanImage(
+                            device, &defaultDescriptor, defaultTexture.get(), {}, true, false));
+    EXPECT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the descriptor dimension isn't 2D
+TEST_P(VulkanImageWrappingValidationTests, InvalidTextureDimension) {
+    defaultDescriptor.dimension = wgpu::TextureDimension::e1D;
+
+    ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapVulkanImage(
+                            device, &defaultDescriptor, defaultTexture.get(), {}, true, false));
+    EXPECT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the descriptor mip level count isn't 1
+TEST_P(VulkanImageWrappingValidationTests, InvalidMipLevelCount) {
+    defaultDescriptor.mipLevelCount = 2;
+
+    ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapVulkanImage(
+                            device, &defaultDescriptor, defaultTexture.get(), {}, true, false));
+    EXPECT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the descriptor depth isn't 1
+TEST_P(VulkanImageWrappingValidationTests, InvalidDepth) {
+    defaultDescriptor.size.depthOrArrayLayers = 2;
+
+    ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapVulkanImage(
+                            device, &defaultDescriptor, defaultTexture.get(), {}, true, false));
+    EXPECT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if the descriptor sample count isn't 1
+TEST_P(VulkanImageWrappingValidationTests, InvalidSampleCount) {
+    defaultDescriptor.sampleCount = 4;
+
+    ASSERT_DEVICE_ERROR(wgpu::Texture texture = WrapVulkanImage(
+                            device, &defaultDescriptor, defaultTexture.get(), {}, true, false));
+    EXPECT_EQ(texture.Get(), nullptr);
+}
+
+// Test an error occurs if we try to export the signal semaphore twice
+TEST_P(VulkanImageWrappingValidationTests, DoubleSignalSemaphoreExport) {
+    wgpu::Texture texture =
+        WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(), {}, true, true);
+    ASSERT_NE(texture.Get(), nullptr);
+    IgnoreSignalSemaphore(texture);
+
+    ExternalImageExportInfoVkForTesting exportInfo;
+    ASSERT_DEVICE_ERROR(bool success =
+                            mBackend->ExportImage(texture, VK_IMAGE_LAYOUT_GENERAL, &exportInfo));
+    ASSERT_FALSE(success);
+    ASSERT_EQ(exportInfo.semaphores.size(), 0u);
+}
+
+// Test an error occurs if we try to export the signal semaphore from a normal texture
+TEST_P(VulkanImageWrappingValidationTests, NormalTextureSignalSemaphoreExport) {
+    wgpu::Texture texture = device.CreateTexture(&defaultDescriptor);
+    ASSERT_NE(texture.Get(), nullptr);
+
+    ExternalImageExportInfoVkForTesting exportInfo;
+    ASSERT_DEVICE_ERROR(bool success =
+                            mBackend->ExportImage(texture, VK_IMAGE_LAYOUT_GENERAL, &exportInfo));
+    ASSERT_FALSE(success);
+    ASSERT_EQ(exportInfo.semaphores.size(), 0u);
+}
+
+// Test an error occurs if we try to export the signal semaphore from a destroyed texture
+TEST_P(VulkanImageWrappingValidationTests, DestroyedTextureSignalSemaphoreExport) {
+    wgpu::Texture texture = device.CreateTexture(&defaultDescriptor);
+    ASSERT_NE(texture.Get(), nullptr);
+    texture.Destroy();
+
+    ExternalImageExportInfoVkForTesting exportInfo;
+    ASSERT_DEVICE_ERROR(bool success =
+                            mBackend->ExportImage(texture, VK_IMAGE_LAYOUT_GENERAL, &exportInfo));
+    ASSERT_FALSE(success);
+    ASSERT_EQ(exportInfo.semaphores.size(), 0u);
+}
+
+// Fixture to test using external memory textures through different usages.
+// These tests are skipped if the harness is using the wire.
+class VulkanImageWrappingUsageTests : public VulkanImageWrappingTestBase {
+  public:
+    void SetUp() override {
+        VulkanImageWrappingTestBase::SetUp();
+        if (UsesWire()) {
+            return;
         }
-    };
 
-    // Clear an image in |secondDevice|
-    // Verify clear color is visible in |device|
-    TEST_P(VulkanImageWrappingUsageTests, ClearImageAcrossDevices) {
-        // Import the image on |secondDevice|
-        wgpu::Texture wrappedTexture =
-            WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(), {},
-                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+        // Create another device based on the original
+        backendAdapter =
+            dawn::native::vulkan::ToBackend(dawn::native::FromAPI(device.Get())->GetAdapter());
+        deviceDescriptor.nextInChain = &togglesDesc;
+        togglesDesc.forceEnabledToggles = GetParam().forceEnabledWorkarounds.data();
+        togglesDesc.forceEnabledTogglesCount = GetParam().forceEnabledWorkarounds.size();
+        togglesDesc.forceDisabledToggles = GetParam().forceDisabledWorkarounds.data();
+        togglesDesc.forceDisabledTogglesCount = GetParam().forceDisabledWorkarounds.size();
 
-        // Clear |wrappedTexture| on |secondDevice|
-        ClearImage(secondDevice, wrappedTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
-
-        ExternalImageExportInfoVkForTesting exportInfo;
-        ASSERT_TRUE(mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
-                                          &exportInfo));
-
-        // Import the image to |device|, making sure we wait on signalFd
-        wgpu::Texture nextWrappedTexture = WrapVulkanImage(
-            device, &defaultDescriptor, defaultTexture.get(), std::move(exportInfo.semaphores),
-            exportInfo.releasedOldLayout, exportInfo.releasedNewLayout);
-
-        // Verify |device| sees the changes from |secondDevice|
-        EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), nextWrappedTexture, 0, 0);
-
-        IgnoreSignalSemaphore(nextWrappedTexture);
+        secondDeviceVk =
+            dawn::native::vulkan::ToBackend(backendAdapter->APICreateDevice(&deviceDescriptor));
+        secondDevice = wgpu::Device::Acquire(dawn::native::ToAPI(secondDeviceVk));
     }
 
-    // Clear an image in |secondDevice|
-    // Verify clear color is not visible in |device| if we import the texture as not cleared
-    TEST_P(VulkanImageWrappingUsageTests, UninitializedTextureIsCleared) {
-        // Import the image on |secondDevice|
-        wgpu::Texture wrappedTexture =
-            WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(), {},
-                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+  protected:
+    dawn::native::vulkan::Adapter* backendAdapter;
+    dawn::native::DeviceDescriptor deviceDescriptor;
+    dawn::native::DawnTogglesDeviceDescriptor togglesDesc;
 
-        // Clear |wrappedTexture| on |secondDevice|
-        ClearImage(secondDevice, wrappedTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
+    wgpu::Device secondDevice;
+    dawn::native::vulkan::Device* secondDeviceVk;
 
-        ExternalImageExportInfoVkForTesting exportInfo;
-        ASSERT_TRUE(mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
-                                          &exportInfo));
+    // Clear a texture on a given device
+    void ClearImage(wgpu::Device dawnDevice, wgpu::Texture wrappedTexture, wgpu::Color clearColor) {
+        wgpu::TextureView wrappedView = wrappedTexture.CreateView();
 
-        // Import the image to |device|, making sure we wait on signalFd
-        wgpu::Texture nextWrappedTexture = WrapVulkanImage(
-            device, &defaultDescriptor, defaultTexture.get(), std::move(exportInfo.semaphores),
-            exportInfo.releasedOldLayout, exportInfo.releasedNewLayout, false);
+        // Submit a clear operation
+        utils::ComboRenderPassDescriptor renderPassDescriptor({wrappedView}, {});
+        renderPassDescriptor.cColorAttachments[0].clearValue = clearColor;
+        renderPassDescriptor.cColorAttachments[0].loadOp = wgpu::LoadOp::Clear;
 
-        // Verify |device| doesn't see the changes from |secondDevice|
-        EXPECT_PIXEL_RGBA8_EQ(RGBA8(0, 0, 0, 0), nextWrappedTexture, 0, 0);
+        wgpu::CommandEncoder encoder = dawnDevice.CreateCommandEncoder();
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDescriptor);
+        pass.End();
 
-        IgnoreSignalSemaphore(nextWrappedTexture);
-    }
-
-    // Import a texture into |secondDevice|
-    // Clear the texture on |secondDevice|
-    // Issue a copy of the imported texture inside |device| to |copyDstTexture|
-    // Verify the clear color from |secondDevice| is visible in |copyDstTexture|
-    TEST_P(VulkanImageWrappingUsageTests, CopyTextureToTextureSrcSync) {
-        // Import the image on |secondDevice|
-        wgpu::Texture wrappedTexture =
-            WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(), {},
-                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
-
-        // Clear |wrappedTexture| on |secondDevice|
-        ClearImage(secondDevice, wrappedTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
-
-        ExternalImageExportInfoVkForTesting exportInfo;
-        ASSERT_TRUE(mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
-                                          &exportInfo));
-
-        // Import the image to |device|, making sure we wait on |signalFd|
-        wgpu::Texture deviceWrappedTexture = WrapVulkanImage(
-            device, &defaultDescriptor, defaultTexture.get(), std::move(exportInfo.semaphores),
-            exportInfo.releasedOldLayout, exportInfo.releasedNewLayout);
-
-        // Create a second texture on |device|
-        wgpu::Texture copyDstTexture = device.CreateTexture(&defaultDescriptor);
-
-        // Copy |deviceWrappedTexture| into |copyDstTexture|
-        SimpleCopyTextureToTexture(device, queue, deviceWrappedTexture, copyDstTexture);
-
-        // Verify |copyDstTexture| sees changes from |secondDevice|
-        EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), copyDstTexture, 0, 0);
-
-        IgnoreSignalSemaphore(deviceWrappedTexture);
-    }
-
-    // Import a texture into |device|
-    // Clear texture with color A on |device|
-    // Import same texture into |secondDevice|, waiting on the copy signal
-    // Clear the new texture with color B on |secondDevice|
-    // Copy color B using Texture to Texture copy on |secondDevice|
-    // Import texture back into |device|, waiting on color B signal
-    // Verify texture contains color B
-    // If texture destination isn't synchronized, |secondDevice| could copy color B
-    // into the texture first, then |device| writes color A
-    TEST_P(VulkanImageWrappingUsageTests, CopyTextureToTextureDstSync) {
-        // Import the image on |device|
-        wgpu::Texture wrappedTexture =
-            WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(), {},
-                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
-
-        // Clear |wrappedTexture| on |device|
-        ClearImage(device, wrappedTexture, {5 / 255.0f, 6 / 255.0f, 7 / 255.0f, 8 / 255.0f});
-
-        ExternalImageExportInfoVkForTesting exportInfo;
-        ASSERT_TRUE(mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
-                                          &exportInfo));
-
-        // Import the image to |secondDevice|, making sure we wait on |signalFd|
-        wgpu::Texture secondDeviceWrappedTexture =
-            WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(),
-                            std::move(exportInfo.semaphores), exportInfo.releasedOldLayout,
-                            exportInfo.releasedNewLayout);
-
-        // Create a texture with color B on |secondDevice|
-        wgpu::Texture copySrcTexture = secondDevice.CreateTexture(&defaultDescriptor);
-        ClearImage(secondDevice, copySrcTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
-
-        // Copy color B on |secondDevice|
-        wgpu::Queue secondDeviceQueue = secondDevice.GetQueue();
-        SimpleCopyTextureToTexture(secondDevice, secondDeviceQueue, copySrcTexture,
-                                   secondDeviceWrappedTexture);
-
-        // Re-import back into |device|, waiting on |secondDevice|'s signal
-        ExternalImageExportInfoVkForTesting secondExportInfo;
-        ASSERT_TRUE(mBackend->ExportImage(secondDeviceWrappedTexture,
-                                          VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, &secondExportInfo));
-
-        wgpu::Texture nextWrappedTexture =
-            WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(),
-                            std::move(secondExportInfo.semaphores),
-                            secondExportInfo.releasedOldLayout, secondExportInfo.releasedNewLayout);
-
-        // Verify |nextWrappedTexture| contains the color from our copy
-        EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), nextWrappedTexture, 0, 0);
-
-        IgnoreSignalSemaphore(nextWrappedTexture);
-    }
-
-    // Import a texture from |secondDevice|
-    // Clear the texture on |secondDevice|
-    // Issue a copy of the imported texture inside |device| to |copyDstBuffer|
-    // Verify the clear color from |secondDevice| is visible in |copyDstBuffer|
-    TEST_P(VulkanImageWrappingUsageTests, CopyTextureToBufferSrcSync) {
-        // Import the image on |secondDevice|
-        wgpu::Texture wrappedTexture =
-            WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(), {},
-                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
-
-        // Clear |wrappedTexture| on |secondDevice|
-        ClearImage(secondDevice, wrappedTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
-
-        ExternalImageExportInfoVkForTesting exportInfo;
-        ASSERT_TRUE(mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
-                                          &exportInfo));
-
-        // Import the image to |device|, making sure we wait on |signalFd|
-        wgpu::Texture deviceWrappedTexture = WrapVulkanImage(
-            device, &defaultDescriptor, defaultTexture.get(), std::move(exportInfo.semaphores),
-            exportInfo.releasedOldLayout, exportInfo.releasedNewLayout);
-
-        // Create a destination buffer on |device|
-        wgpu::BufferDescriptor bufferDesc;
-        bufferDesc.size = 4;
-        bufferDesc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::CopySrc;
-        wgpu::Buffer copyDstBuffer = device.CreateBuffer(&bufferDesc);
-
-        // Copy |deviceWrappedTexture| into |copyDstBuffer|
-        wgpu::ImageCopyTexture copySrc =
-            utils::CreateImageCopyTexture(deviceWrappedTexture, 0, {0, 0, 0});
-        wgpu::ImageCopyBuffer copyDst = utils::CreateImageCopyBuffer(copyDstBuffer, 0, 256);
-
-        wgpu::Extent3D copySize = {1, 1, 1};
-
-        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-        encoder.CopyTextureToBuffer(&copySrc, &copyDst, &copySize);
         wgpu::CommandBuffer commands = encoder.Finish();
+
+        wgpu::Queue queue = dawnDevice.GetQueue();
         queue.Submit(1, &commands);
-
-        // Verify |copyDstBuffer| sees changes from |secondDevice|
-        uint32_t expected = 0x04030201;
-        EXPECT_BUFFER_U32_EQ(expected, copyDstBuffer, 0);
-
-        IgnoreSignalSemaphore(deviceWrappedTexture);
     }
 
-    // Import a texture into |device|
-    // Clear texture with color A on |device|
-    // Import same texture into |secondDevice|, waiting on the copy signal
-    // Copy color B using Buffer to Texture copy on |secondDevice|
-    // Import texture back into |device|, waiting on color B signal
-    // Verify texture contains color B
-    // If texture destination isn't synchronized, |secondDevice| could copy color B
-    // into the texture first, then |device| writes color A
-    TEST_P(VulkanImageWrappingUsageTests, CopyBufferToTextureDstSync) {
-        // Import the image on |device|
-        wgpu::Texture wrappedTexture =
-            WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(), {},
-                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
-
-        // Clear |wrappedTexture| on |device|
-        ClearImage(device, wrappedTexture, {5 / 255.0f, 6 / 255.0f, 7 / 255.0f, 8 / 255.0f});
-
-        ExternalImageExportInfoVkForTesting exportInfo;
-        ASSERT_TRUE(mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
-                                          &exportInfo));
-
-        // Import the image to |secondDevice|, making sure we wait on |signalFd|
-        wgpu::Texture secondDeviceWrappedTexture =
-            WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(),
-                            std::move(exportInfo.semaphores), exportInfo.releasedOldLayout,
-                            exportInfo.releasedNewLayout);
-
-        // Copy color B on |secondDevice|
-        wgpu::Queue secondDeviceQueue = secondDevice.GetQueue();
-
-        // Create a buffer on |secondDevice|
-        wgpu::Buffer copySrcBuffer =
-            utils::CreateBufferFromData(secondDevice, wgpu::BufferUsage::CopySrc, {0x04030201});
-
-        // Copy |copySrcBuffer| into |secondDeviceWrappedTexture|
-        wgpu::ImageCopyBuffer copySrc = utils::CreateImageCopyBuffer(copySrcBuffer, 0, 256);
-        wgpu::ImageCopyTexture copyDst =
-            utils::CreateImageCopyTexture(secondDeviceWrappedTexture, 0, {0, 0, 0});
+    // Submits a 1x1x1 copy from source to destination
+    void SimpleCopyTextureToTexture(wgpu::Device dawnDevice,
+                                    wgpu::Queue dawnQueue,
+                                    wgpu::Texture source,
+                                    wgpu::Texture destination) {
+        wgpu::ImageCopyTexture copySrc = utils::CreateImageCopyTexture(source, 0, {0, 0, 0});
+        wgpu::ImageCopyTexture copyDst = utils::CreateImageCopyTexture(destination, 0, {0, 0, 0});
 
         wgpu::Extent3D copySize = {1, 1, 1};
 
+        wgpu::CommandEncoder encoder = dawnDevice.CreateCommandEncoder();
+        encoder.CopyTextureToTexture(&copySrc, &copyDst, &copySize);
+        wgpu::CommandBuffer commands = encoder.Finish();
+
+        dawnQueue.Submit(1, &commands);
+    }
+};
+
+// Clear an image in |secondDevice|
+// Verify clear color is visible in |device|
+TEST_P(VulkanImageWrappingUsageTests, ClearImageAcrossDevices) {
+    // Import the image on |secondDevice|
+    wgpu::Texture wrappedTexture =
+        WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(), {},
+                        VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+
+    // Clear |wrappedTexture| on |secondDevice|
+    ClearImage(secondDevice, wrappedTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
+
+    ExternalImageExportInfoVkForTesting exportInfo;
+    ASSERT_TRUE(
+        mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, &exportInfo));
+
+    // Import the image to |device|, making sure we wait on signalFd
+    wgpu::Texture nextWrappedTexture = WrapVulkanImage(
+        device, &defaultDescriptor, defaultTexture.get(), std::move(exportInfo.semaphores),
+        exportInfo.releasedOldLayout, exportInfo.releasedNewLayout);
+
+    // Verify |device| sees the changes from |secondDevice|
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), nextWrappedTexture, 0, 0);
+
+    IgnoreSignalSemaphore(nextWrappedTexture);
+}
+
+// Clear an image in |secondDevice|
+// Verify clear color is not visible in |device| if we import the texture as not cleared
+TEST_P(VulkanImageWrappingUsageTests, UninitializedTextureIsCleared) {
+    // Import the image on |secondDevice|
+    wgpu::Texture wrappedTexture =
+        WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(), {},
+                        VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+
+    // Clear |wrappedTexture| on |secondDevice|
+    ClearImage(secondDevice, wrappedTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
+
+    ExternalImageExportInfoVkForTesting exportInfo;
+    ASSERT_TRUE(
+        mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, &exportInfo));
+
+    // Import the image to |device|, making sure we wait on signalFd
+    wgpu::Texture nextWrappedTexture = WrapVulkanImage(
+        device, &defaultDescriptor, defaultTexture.get(), std::move(exportInfo.semaphores),
+        exportInfo.releasedOldLayout, exportInfo.releasedNewLayout, false);
+
+    // Verify |device| doesn't see the changes from |secondDevice|
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(0, 0, 0, 0), nextWrappedTexture, 0, 0);
+
+    IgnoreSignalSemaphore(nextWrappedTexture);
+}
+
+// Import a texture into |secondDevice|
+// Clear the texture on |secondDevice|
+// Issue a copy of the imported texture inside |device| to |copyDstTexture|
+// Verify the clear color from |secondDevice| is visible in |copyDstTexture|
+TEST_P(VulkanImageWrappingUsageTests, CopyTextureToTextureSrcSync) {
+    // Import the image on |secondDevice|
+    wgpu::Texture wrappedTexture =
+        WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(), {},
+                        VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+
+    // Clear |wrappedTexture| on |secondDevice|
+    ClearImage(secondDevice, wrappedTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
+
+    ExternalImageExportInfoVkForTesting exportInfo;
+    ASSERT_TRUE(
+        mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, &exportInfo));
+
+    // Import the image to |device|, making sure we wait on |signalFd|
+    wgpu::Texture deviceWrappedTexture = WrapVulkanImage(
+        device, &defaultDescriptor, defaultTexture.get(), std::move(exportInfo.semaphores),
+        exportInfo.releasedOldLayout, exportInfo.releasedNewLayout);
+
+    // Create a second texture on |device|
+    wgpu::Texture copyDstTexture = device.CreateTexture(&defaultDescriptor);
+
+    // Copy |deviceWrappedTexture| into |copyDstTexture|
+    SimpleCopyTextureToTexture(device, queue, deviceWrappedTexture, copyDstTexture);
+
+    // Verify |copyDstTexture| sees changes from |secondDevice|
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), copyDstTexture, 0, 0);
+
+    IgnoreSignalSemaphore(deviceWrappedTexture);
+}
+
+// Import a texture into |device|
+// Clear texture with color A on |device|
+// Import same texture into |secondDevice|, waiting on the copy signal
+// Clear the new texture with color B on |secondDevice|
+// Copy color B using Texture to Texture copy on |secondDevice|
+// Import texture back into |device|, waiting on color B signal
+// Verify texture contains color B
+// If texture destination isn't synchronized, |secondDevice| could copy color B
+// into the texture first, then |device| writes color A
+TEST_P(VulkanImageWrappingUsageTests, CopyTextureToTextureDstSync) {
+    // Import the image on |device|
+    wgpu::Texture wrappedTexture =
+        WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(), {},
+                        VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+
+    // Clear |wrappedTexture| on |device|
+    ClearImage(device, wrappedTexture, {5 / 255.0f, 6 / 255.0f, 7 / 255.0f, 8 / 255.0f});
+
+    ExternalImageExportInfoVkForTesting exportInfo;
+    ASSERT_TRUE(
+        mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &exportInfo));
+
+    // Import the image to |secondDevice|, making sure we wait on |signalFd|
+    wgpu::Texture secondDeviceWrappedTexture = WrapVulkanImage(
+        secondDevice, &defaultDescriptor, defaultTexture.get(), std::move(exportInfo.semaphores),
+        exportInfo.releasedOldLayout, exportInfo.releasedNewLayout);
+
+    // Create a texture with color B on |secondDevice|
+    wgpu::Texture copySrcTexture = secondDevice.CreateTexture(&defaultDescriptor);
+    ClearImage(secondDevice, copySrcTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
+
+    // Copy color B on |secondDevice|
+    wgpu::Queue secondDeviceQueue = secondDevice.GetQueue();
+    SimpleCopyTextureToTexture(secondDevice, secondDeviceQueue, copySrcTexture,
+                               secondDeviceWrappedTexture);
+
+    // Re-import back into |device|, waiting on |secondDevice|'s signal
+    ExternalImageExportInfoVkForTesting secondExportInfo;
+    ASSERT_TRUE(mBackend->ExportImage(secondDeviceWrappedTexture,
+                                      VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, &secondExportInfo));
+
+    wgpu::Texture nextWrappedTexture = WrapVulkanImage(
+        device, &defaultDescriptor, defaultTexture.get(), std::move(secondExportInfo.semaphores),
+        secondExportInfo.releasedOldLayout, secondExportInfo.releasedNewLayout);
+
+    // Verify |nextWrappedTexture| contains the color from our copy
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), nextWrappedTexture, 0, 0);
+
+    IgnoreSignalSemaphore(nextWrappedTexture);
+}
+
+// Import a texture from |secondDevice|
+// Clear the texture on |secondDevice|
+// Issue a copy of the imported texture inside |device| to |copyDstBuffer|
+// Verify the clear color from |secondDevice| is visible in |copyDstBuffer|
+TEST_P(VulkanImageWrappingUsageTests, CopyTextureToBufferSrcSync) {
+    // Import the image on |secondDevice|
+    wgpu::Texture wrappedTexture =
+        WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(), {},
+                        VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+
+    // Clear |wrappedTexture| on |secondDevice|
+    ClearImage(secondDevice, wrappedTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
+
+    ExternalImageExportInfoVkForTesting exportInfo;
+    ASSERT_TRUE(
+        mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, &exportInfo));
+
+    // Import the image to |device|, making sure we wait on |signalFd|
+    wgpu::Texture deviceWrappedTexture = WrapVulkanImage(
+        device, &defaultDescriptor, defaultTexture.get(), std::move(exportInfo.semaphores),
+        exportInfo.releasedOldLayout, exportInfo.releasedNewLayout);
+
+    // Create a destination buffer on |device|
+    wgpu::BufferDescriptor bufferDesc;
+    bufferDesc.size = 4;
+    bufferDesc.usage = wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::CopySrc;
+    wgpu::Buffer copyDstBuffer = device.CreateBuffer(&bufferDesc);
+
+    // Copy |deviceWrappedTexture| into |copyDstBuffer|
+    wgpu::ImageCopyTexture copySrc =
+        utils::CreateImageCopyTexture(deviceWrappedTexture, 0, {0, 0, 0});
+    wgpu::ImageCopyBuffer copyDst = utils::CreateImageCopyBuffer(copyDstBuffer, 0, 256);
+
+    wgpu::Extent3D copySize = {1, 1, 1};
+
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    encoder.CopyTextureToBuffer(&copySrc, &copyDst, &copySize);
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    // Verify |copyDstBuffer| sees changes from |secondDevice|
+    uint32_t expected = 0x04030201;
+    EXPECT_BUFFER_U32_EQ(expected, copyDstBuffer, 0);
+
+    IgnoreSignalSemaphore(deviceWrappedTexture);
+}
+
+// Import a texture into |device|
+// Clear texture with color A on |device|
+// Import same texture into |secondDevice|, waiting on the copy signal
+// Copy color B using Buffer to Texture copy on |secondDevice|
+// Import texture back into |device|, waiting on color B signal
+// Verify texture contains color B
+// If texture destination isn't synchronized, |secondDevice| could copy color B
+// into the texture first, then |device| writes color A
+TEST_P(VulkanImageWrappingUsageTests, CopyBufferToTextureDstSync) {
+    // Import the image on |device|
+    wgpu::Texture wrappedTexture =
+        WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(), {},
+                        VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+
+    // Clear |wrappedTexture| on |device|
+    ClearImage(device, wrappedTexture, {5 / 255.0f, 6 / 255.0f, 7 / 255.0f, 8 / 255.0f});
+
+    ExternalImageExportInfoVkForTesting exportInfo;
+    ASSERT_TRUE(
+        mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, &exportInfo));
+
+    // Import the image to |secondDevice|, making sure we wait on |signalFd|
+    wgpu::Texture secondDeviceWrappedTexture = WrapVulkanImage(
+        secondDevice, &defaultDescriptor, defaultTexture.get(), std::move(exportInfo.semaphores),
+        exportInfo.releasedOldLayout, exportInfo.releasedNewLayout);
+
+    // Copy color B on |secondDevice|
+    wgpu::Queue secondDeviceQueue = secondDevice.GetQueue();
+
+    // Create a buffer on |secondDevice|
+    wgpu::Buffer copySrcBuffer =
+        utils::CreateBufferFromData(secondDevice, wgpu::BufferUsage::CopySrc, {0x04030201});
+
+    // Copy |copySrcBuffer| into |secondDeviceWrappedTexture|
+    wgpu::ImageCopyBuffer copySrc = utils::CreateImageCopyBuffer(copySrcBuffer, 0, 256);
+    wgpu::ImageCopyTexture copyDst =
+        utils::CreateImageCopyTexture(secondDeviceWrappedTexture, 0, {0, 0, 0});
+
+    wgpu::Extent3D copySize = {1, 1, 1};
+
+    wgpu::CommandEncoder encoder = secondDevice.CreateCommandEncoder();
+    encoder.CopyBufferToTexture(&copySrc, &copyDst, &copySize);
+    wgpu::CommandBuffer commands = encoder.Finish();
+    secondDeviceQueue.Submit(1, &commands);
+
+    // Re-import back into |device|, waiting on |secondDevice|'s signal
+    ExternalImageExportInfoVkForTesting secondExportInfo;
+    ASSERT_TRUE(mBackend->ExportImage(secondDeviceWrappedTexture,
+                                      VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, &secondExportInfo));
+
+    wgpu::Texture nextWrappedTexture = WrapVulkanImage(
+        device, &defaultDescriptor, defaultTexture.get(), std::move(secondExportInfo.semaphores),
+        secondExportInfo.releasedOldLayout, secondExportInfo.releasedNewLayout);
+
+    // Verify |nextWrappedTexture| contains the color from our copy
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), nextWrappedTexture, 0, 0);
+
+    IgnoreSignalSemaphore(nextWrappedTexture);
+}
+
+// Import a texture from |secondDevice|
+// Clear the texture on |secondDevice|
+// Issue a copy of the imported texture inside |device| to |copyDstTexture|
+// Issue second copy to |secondCopyDstTexture|
+// Verify the clear color from |secondDevice| is visible in both copies
+TEST_P(VulkanImageWrappingUsageTests, DoubleTextureUsage) {
+    // Import the image on |secondDevice|
+    wgpu::Texture wrappedTexture =
+        WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(), {},
+                        VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+
+    // Clear |wrappedTexture| on |secondDevice|
+    ClearImage(secondDevice, wrappedTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
+
+    ExternalImageExportInfoVkForTesting exportInfo;
+    ASSERT_TRUE(
+        mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, &exportInfo));
+
+    // Import the image to |device|, making sure we wait on |signalFd|
+    wgpu::Texture deviceWrappedTexture = WrapVulkanImage(
+        device, &defaultDescriptor, defaultTexture.get(), std::move(exportInfo.semaphores),
+        exportInfo.releasedOldLayout, exportInfo.releasedNewLayout);
+
+    // Create a second texture on |device|
+    wgpu::Texture copyDstTexture = device.CreateTexture(&defaultDescriptor);
+
+    // Create a third texture on |device|
+    wgpu::Texture secondCopyDstTexture = device.CreateTexture(&defaultDescriptor);
+
+    // Copy |deviceWrappedTexture| into |copyDstTexture|
+    SimpleCopyTextureToTexture(device, queue, deviceWrappedTexture, copyDstTexture);
+
+    // Copy |deviceWrappedTexture| into |secondCopyDstTexture|
+    SimpleCopyTextureToTexture(device, queue, deviceWrappedTexture, secondCopyDstTexture);
+
+    // Verify |copyDstTexture| sees changes from |secondDevice|
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), copyDstTexture, 0, 0);
+
+    // Verify |secondCopyDstTexture| sees changes from |secondDevice|
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), secondCopyDstTexture, 0, 0);
+
+    IgnoreSignalSemaphore(deviceWrappedTexture);
+}
+
+// Tex A on device 3 (external export)
+// Tex B on device 2 (external export)
+// Tex C on device 1 (external export)
+// Clear color for A on device 3
+// Copy A->B on device 3
+// Copy B->C on device 2 (wait on B from previous op)
+// Copy C->D on device 1 (wait on C from previous op)
+// Verify D has same color as A
+TEST_P(VulkanImageWrappingUsageTests, ChainTextureCopy) {
+    // device 1 = |device|
+    // device 2 = |secondDevice|
+    // Create device 3
+    dawn::native::vulkan::Device* thirdDeviceVk =
+        dawn::native::vulkan::ToBackend(backendAdapter->APICreateDevice(&deviceDescriptor));
+    wgpu::Device thirdDevice = wgpu::Device::Acquire(dawn::native::ToAPI(thirdDeviceVk));
+
+    // Make queue for device 2 and 3
+    wgpu::Queue secondDeviceQueue = secondDevice.GetQueue();
+    wgpu::Queue thirdDeviceQueue = thirdDevice.GetQueue();
+
+    // Create textures A, B, C
+    std::unique_ptr<ExternalTexture> textureA =
+        mBackend->CreateTexture(1, 1, wgpu::TextureFormat::RGBA8Unorm, defaultDescriptor.usage);
+    std::unique_ptr<ExternalTexture> textureB =
+        mBackend->CreateTexture(1, 1, wgpu::TextureFormat::RGBA8Unorm, defaultDescriptor.usage);
+    std::unique_ptr<ExternalTexture> textureC =
+        mBackend->CreateTexture(1, 1, wgpu::TextureFormat::RGBA8Unorm, defaultDescriptor.usage);
+
+    // Import TexA, TexB on device 3
+    wgpu::Texture wrappedTexADevice3 =
+        WrapVulkanImage(thirdDevice, &defaultDescriptor, textureA.get(), {},
+                        VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+
+    wgpu::Texture wrappedTexBDevice3 =
+        WrapVulkanImage(thirdDevice, &defaultDescriptor, textureB.get(), {},
+                        VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
+
+    // Clear TexA
+    ClearImage(thirdDevice, wrappedTexADevice3, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
+
+    // Copy A->B
+    SimpleCopyTextureToTexture(thirdDevice, thirdDeviceQueue, wrappedTexADevice3,
+                               wrappedTexBDevice3);
+
+    ExternalImageExportInfoVkForTesting exportInfoTexBDevice3;
+    ASSERT_TRUE(mBackend->ExportImage(wrappedTexBDevice3, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+                                      &exportInfoTexBDevice3));
+    IgnoreSignalSemaphore(wrappedTexADevice3);
+
+    // Import TexB, TexC on device 2
+    wgpu::Texture wrappedTexBDevice2 = WrapVulkanImage(
+        secondDevice, &defaultDescriptor, textureB.get(),
+        std::move(exportInfoTexBDevice3.semaphores), exportInfoTexBDevice3.releasedOldLayout,
+        exportInfoTexBDevice3.releasedNewLayout);
+
+    wgpu::Texture wrappedTexCDevice2 =
+        WrapVulkanImage(secondDevice, &defaultDescriptor, textureC.get(), {},
+                        VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
+
+    // Copy B->C on device 2
+    SimpleCopyTextureToTexture(secondDevice, secondDeviceQueue, wrappedTexBDevice2,
+                               wrappedTexCDevice2);
+
+    ExternalImageExportInfoVkForTesting exportInfoTexCDevice2;
+    ASSERT_TRUE(mBackend->ExportImage(wrappedTexCDevice2, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+                                      &exportInfoTexCDevice2));
+    IgnoreSignalSemaphore(wrappedTexBDevice2);
+
+    // Import TexC on device 1
+    wgpu::Texture wrappedTexCDevice1 = WrapVulkanImage(
+        device, &defaultDescriptor, textureC.get(), std::move(exportInfoTexCDevice2.semaphores),
+        exportInfoTexCDevice2.releasedOldLayout, exportInfoTexCDevice2.releasedNewLayout);
+
+    // Create TexD on device 1
+    wgpu::Texture texD = device.CreateTexture(&defaultDescriptor);
+
+    // Copy C->D on device 1
+    SimpleCopyTextureToTexture(device, queue, wrappedTexCDevice1, texD);
+
+    // Verify D matches clear color
+    EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), texD, 0, 0);
+
+    IgnoreSignalSemaphore(wrappedTexCDevice1);
+}
+
+// Tests a larger image is preserved when importing
+TEST_P(VulkanImageWrappingUsageTests, LargerImage) {
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.size.width = 640;
+    descriptor.size.height = 480;
+    descriptor.size.depthOrArrayLayers = 1;
+    descriptor.sampleCount = 1;
+    descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
+    descriptor.mipLevelCount = 1;
+    descriptor.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc;
+
+    // Fill memory with textures
+    std::vector<wgpu::Texture> textures;
+    for (int i = 0; i < 20; i++) {
+        textures.push_back(device.CreateTexture(&descriptor));
+    }
+
+    wgpu::Queue secondDeviceQueue = secondDevice.GetQueue();
+
+    // Make an image on |secondDevice|
+    std::unique_ptr<ExternalTexture> texture = mBackend->CreateTexture(
+        descriptor.size.width, descriptor.size.height, descriptor.format, descriptor.usage);
+
+    // Import the image on |secondDevice|
+    wgpu::Texture wrappedTexture =
+        WrapVulkanImage(secondDevice, &descriptor, texture.get(), {}, VK_IMAGE_LAYOUT_UNDEFINED,
+                        VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
+
+    // Draw a non-trivial picture
+    uint32_t width = 640, height = 480, pixelSize = 4;
+    uint32_t bytesPerRow = Align(width * pixelSize, kTextureBytesPerRowAlignment);
+    std::vector<unsigned char> data(bytesPerRow * (height - 1) + width * pixelSize);
+
+    for (uint32_t row = 0; row < height; row++) {
+        for (uint32_t col = 0; col < width; col++) {
+            float normRow = static_cast<float>(row) / height;
+            float normCol = static_cast<float>(col) / width;
+            float dist = sqrt(normRow * normRow + normCol * normCol) * 3;
+            dist = dist - static_cast<int>(dist);
+            data[4 * (row * width + col)] = static_cast<unsigned char>(dist * 255);
+            data[4 * (row * width + col) + 1] = static_cast<unsigned char>(dist * 255);
+            data[4 * (row * width + col) + 2] = static_cast<unsigned char>(dist * 255);
+            data[4 * (row * width + col) + 3] = 255;
+        }
+    }
+
+    // Write the picture
+    {
+        wgpu::Buffer copySrcBuffer = utils::CreateBufferFromData(
+            secondDevice, data.data(), data.size(), wgpu::BufferUsage::CopySrc);
+        wgpu::ImageCopyBuffer copySrc = utils::CreateImageCopyBuffer(copySrcBuffer, 0, bytesPerRow);
+        wgpu::ImageCopyTexture copyDst =
+            utils::CreateImageCopyTexture(wrappedTexture, 0, {0, 0, 0});
+        wgpu::Extent3D copySize = {width, height, 1};
+
         wgpu::CommandEncoder encoder = secondDevice.CreateCommandEncoder();
         encoder.CopyBufferToTexture(&copySrc, &copyDst, &copySize);
         wgpu::CommandBuffer commands = encoder.Finish();
         secondDeviceQueue.Submit(1, &commands);
+    }
+    ExternalImageExportInfoVkForTesting exportInfo;
+    ASSERT_TRUE(
+        mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, &exportInfo));
 
-        // Re-import back into |device|, waiting on |secondDevice|'s signal
-        ExternalImageExportInfoVkForTesting secondExportInfo;
-        ASSERT_TRUE(mBackend->ExportImage(secondDeviceWrappedTexture,
-                                          VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, &secondExportInfo));
+    // Import the image on |device|
+    wgpu::Texture nextWrappedTexture =
+        WrapVulkanImage(device, &descriptor, texture.get(), std::move(exportInfo.semaphores),
+                        exportInfo.releasedOldLayout, exportInfo.releasedNewLayout);
 
-        wgpu::Texture nextWrappedTexture =
-            WrapVulkanImage(device, &defaultDescriptor, defaultTexture.get(),
-                            std::move(secondExportInfo.semaphores),
-                            secondExportInfo.releasedOldLayout, secondExportInfo.releasedNewLayout);
+    // Copy the image into a buffer for comparison
+    wgpu::BufferDescriptor copyDesc;
+    copyDesc.size = data.size();
+    copyDesc.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer copyDstBuffer = device.CreateBuffer(&copyDesc);
+    {
+        wgpu::ImageCopyTexture copySrc =
+            utils::CreateImageCopyTexture(nextWrappedTexture, 0, {0, 0, 0});
+        wgpu::ImageCopyBuffer copyDst = utils::CreateImageCopyBuffer(copyDstBuffer, 0, bytesPerRow);
 
-        // Verify |nextWrappedTexture| contains the color from our copy
-        EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), nextWrappedTexture, 0, 0);
+        wgpu::Extent3D copySize = {width, height, 1};
 
-        IgnoreSignalSemaphore(nextWrappedTexture);
+        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+        encoder.CopyTextureToBuffer(&copySrc, &copyDst, &copySize);
+        wgpu::CommandBuffer commands = encoder.Finish();
+        queue.Submit(1, &commands);
     }
 
-    // Import a texture from |secondDevice|
-    // Clear the texture on |secondDevice|
-    // Issue a copy of the imported texture inside |device| to |copyDstTexture|
-    // Issue second copy to |secondCopyDstTexture|
-    // Verify the clear color from |secondDevice| is visible in both copies
-    TEST_P(VulkanImageWrappingUsageTests, DoubleTextureUsage) {
-        // Import the image on |secondDevice|
-        wgpu::Texture wrappedTexture =
-            WrapVulkanImage(secondDevice, &defaultDescriptor, defaultTexture.get(), {},
-                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+    // Check the image is not corrupted on |device|
+    EXPECT_BUFFER_U32_RANGE_EQ(reinterpret_cast<uint32_t*>(data.data()), copyDstBuffer, 0,
+                               data.size() / 4);
 
-        // Clear |wrappedTexture| on |secondDevice|
-        ClearImage(secondDevice, wrappedTexture, {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
+    IgnoreSignalSemaphore(nextWrappedTexture);
+}
 
-        ExternalImageExportInfoVkForTesting exportInfo;
-        ASSERT_TRUE(mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
-                                          &exportInfo));
+// Test that texture descriptor view formats are passed to the backend for wrapped external
+// textures, and that contents may be reinterpreted as sRGB.
+TEST_P(VulkanImageWrappingUsageTests, SRGBReinterpretation) {
+    wgpu::TextureViewDescriptor viewDesc = {};
+    viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
 
-        // Import the image to |device|, making sure we wait on |signalFd|
-        wgpu::Texture deviceWrappedTexture = WrapVulkanImage(
-            device, &defaultDescriptor, defaultTexture.get(), std::move(exportInfo.semaphores),
-            exportInfo.releasedOldLayout, exportInfo.releasedNewLayout);
+    wgpu::TextureDescriptor textureDesc = {};
+    textureDesc.size = {2, 2, 1};
+    textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
+    textureDesc.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::TextureBinding;
+    textureDesc.viewFormatCount = 1;
+    textureDesc.viewFormats = &viewDesc.format;
 
-        // Create a second texture on |device|
-        wgpu::Texture copyDstTexture = device.CreateTexture(&defaultDescriptor);
+    std::unique_ptr<ExternalTexture> backendTexture = mBackend->CreateTexture(
+        textureDesc.size.width, textureDesc.size.height, textureDesc.format, textureDesc.usage);
 
-        // Create a third texture on |device|
-        wgpu::Texture secondCopyDstTexture = device.CreateTexture(&defaultDescriptor);
+    // Import the image on |device|
+    wgpu::Texture texture =
+        WrapVulkanImage(device, &textureDesc, backendTexture.get(), {}, VK_IMAGE_LAYOUT_UNDEFINED,
+                        VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
+    ASSERT_NE(texture.Get(), nullptr);
 
-        // Copy |deviceWrappedTexture| into |copyDstTexture|
-        SimpleCopyTextureToTexture(device, queue, deviceWrappedTexture, copyDstTexture);
+    wgpu::ImageCopyTexture dst = {};
+    dst.texture = texture;
+    std::array<RGBA8, 4> rgbaTextureData = {
+        RGBA8(180, 0, 0, 255),
+        RGBA8(0, 84, 0, 127),
+        RGBA8(0, 0, 62, 100),
+        RGBA8(62, 180, 84, 90),
+    };
 
-        // Copy |deviceWrappedTexture| into |secondCopyDstTexture|
-        SimpleCopyTextureToTexture(device, queue, deviceWrappedTexture, secondCopyDstTexture);
+    wgpu::TextureDataLayout dataLayout = {};
+    dataLayout.bytesPerRow = textureDesc.size.width * sizeof(RGBA8);
 
-        // Verify |copyDstTexture| sees changes from |secondDevice|
-        EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), copyDstTexture, 0, 0);
+    queue.WriteTexture(&dst, rgbaTextureData.data(), rgbaTextureData.size() * sizeof(RGBA8),
+                       &dataLayout, &textureDesc.size);
 
-        // Verify |secondCopyDstTexture| sees changes from |secondDevice|
-        EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), secondCopyDstTexture, 0, 0);
+    wgpu::TextureView textureView = texture.CreateView(&viewDesc);
 
-        IgnoreSignalSemaphore(deviceWrappedTexture);
-    }
-
-    // Tex A on device 3 (external export)
-    // Tex B on device 2 (external export)
-    // Tex C on device 1 (external export)
-    // Clear color for A on device 3
-    // Copy A->B on device 3
-    // Copy B->C on device 2 (wait on B from previous op)
-    // Copy C->D on device 1 (wait on C from previous op)
-    // Verify D has same color as A
-    TEST_P(VulkanImageWrappingUsageTests, ChainTextureCopy) {
-        // device 1 = |device|
-        // device 2 = |secondDevice|
-        // Create device 3
-        dawn::native::vulkan::Device* thirdDeviceVk =
-            dawn::native::vulkan::ToBackend(backendAdapter->APICreateDevice(&deviceDescriptor));
-        wgpu::Device thirdDevice = wgpu::Device::Acquire(dawn::native::ToAPI(thirdDeviceVk));
-
-        // Make queue for device 2 and 3
-        wgpu::Queue secondDeviceQueue = secondDevice.GetQueue();
-        wgpu::Queue thirdDeviceQueue = thirdDevice.GetQueue();
-
-        // Create textures A, B, C
-        std::unique_ptr<ExternalTexture> textureA =
-            mBackend->CreateTexture(1, 1, wgpu::TextureFormat::RGBA8Unorm, defaultDescriptor.usage);
-        std::unique_ptr<ExternalTexture> textureB =
-            mBackend->CreateTexture(1, 1, wgpu::TextureFormat::RGBA8Unorm, defaultDescriptor.usage);
-        std::unique_ptr<ExternalTexture> textureC =
-            mBackend->CreateTexture(1, 1, wgpu::TextureFormat::RGBA8Unorm, defaultDescriptor.usage);
-
-        // Import TexA, TexB on device 3
-        wgpu::Texture wrappedTexADevice3 =
-            WrapVulkanImage(thirdDevice, &defaultDescriptor, textureA.get(), {},
-                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
-
-        wgpu::Texture wrappedTexBDevice3 =
-            WrapVulkanImage(thirdDevice, &defaultDescriptor, textureB.get(), {},
-                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
-
-        // Clear TexA
-        ClearImage(thirdDevice, wrappedTexADevice3,
-                   {1 / 255.0f, 2 / 255.0f, 3 / 255.0f, 4 / 255.0f});
-
-        // Copy A->B
-        SimpleCopyTextureToTexture(thirdDevice, thirdDeviceQueue, wrappedTexADevice3,
-                                   wrappedTexBDevice3);
-
-        ExternalImageExportInfoVkForTesting exportInfoTexBDevice3;
-        ASSERT_TRUE(mBackend->ExportImage(wrappedTexBDevice3, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
-                                          &exportInfoTexBDevice3));
-        IgnoreSignalSemaphore(wrappedTexADevice3);
-
-        // Import TexB, TexC on device 2
-        wgpu::Texture wrappedTexBDevice2 = WrapVulkanImage(
-            secondDevice, &defaultDescriptor, textureB.get(),
-            std::move(exportInfoTexBDevice3.semaphores), exportInfoTexBDevice3.releasedOldLayout,
-            exportInfoTexBDevice3.releasedNewLayout);
-
-        wgpu::Texture wrappedTexCDevice2 =
-            WrapVulkanImage(secondDevice, &defaultDescriptor, textureC.get(), {},
-                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
-
-        // Copy B->C on device 2
-        SimpleCopyTextureToTexture(secondDevice, secondDeviceQueue, wrappedTexBDevice2,
-                                   wrappedTexCDevice2);
-
-        ExternalImageExportInfoVkForTesting exportInfoTexCDevice2;
-        ASSERT_TRUE(mBackend->ExportImage(wrappedTexCDevice2, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
-                                          &exportInfoTexCDevice2));
-        IgnoreSignalSemaphore(wrappedTexBDevice2);
-
-        // Import TexC on device 1
-        wgpu::Texture wrappedTexCDevice1 = WrapVulkanImage(
-            device, &defaultDescriptor, textureC.get(), std::move(exportInfoTexCDevice2.semaphores),
-            exportInfoTexCDevice2.releasedOldLayout, exportInfoTexCDevice2.releasedNewLayout);
-
-        // Create TexD on device 1
-        wgpu::Texture texD = device.CreateTexture(&defaultDescriptor);
-
-        // Copy C->D on device 1
-        SimpleCopyTextureToTexture(device, queue, wrappedTexCDevice1, texD);
-
-        // Verify D matches clear color
-        EXPECT_PIXEL_RGBA8_EQ(RGBA8(1, 2, 3, 4), texD, 0, 0);
-
-        IgnoreSignalSemaphore(wrappedTexCDevice1);
-    }
-
-    // Tests a larger image is preserved when importing
-    TEST_P(VulkanImageWrappingUsageTests, LargerImage) {
-        wgpu::TextureDescriptor descriptor;
-        descriptor.dimension = wgpu::TextureDimension::e2D;
-        descriptor.size.width = 640;
-        descriptor.size.height = 480;
-        descriptor.size.depthOrArrayLayers = 1;
-        descriptor.sampleCount = 1;
-        descriptor.format = wgpu::TextureFormat::RGBA8Unorm;
-        descriptor.mipLevelCount = 1;
-        descriptor.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc;
-
-        // Fill memory with textures
-        std::vector<wgpu::Texture> textures;
-        for (int i = 0; i < 20; i++) {
-            textures.push_back(device.CreateTexture(&descriptor));
-        }
-
-        wgpu::Queue secondDeviceQueue = secondDevice.GetQueue();
-
-        // Make an image on |secondDevice|
-        std::unique_ptr<ExternalTexture> texture = mBackend->CreateTexture(
-            descriptor.size.width, descriptor.size.height, descriptor.format, descriptor.usage);
-
-        // Import the image on |secondDevice|
-        wgpu::Texture wrappedTexture =
-            WrapVulkanImage(secondDevice, &descriptor, texture.get(), {}, VK_IMAGE_LAYOUT_UNDEFINED,
-                            VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
-
-        // Draw a non-trivial picture
-        uint32_t width = 640, height = 480, pixelSize = 4;
-        uint32_t bytesPerRow = Align(width * pixelSize, kTextureBytesPerRowAlignment);
-        std::vector<unsigned char> data(bytesPerRow * (height - 1) + width * pixelSize);
-
-        for (uint32_t row = 0; row < height; row++) {
-            for (uint32_t col = 0; col < width; col++) {
-                float normRow = static_cast<float>(row) / height;
-                float normCol = static_cast<float>(col) / width;
-                float dist = sqrt(normRow * normRow + normCol * normCol) * 3;
-                dist = dist - static_cast<int>(dist);
-                data[4 * (row * width + col)] = static_cast<unsigned char>(dist * 255);
-                data[4 * (row * width + col) + 1] = static_cast<unsigned char>(dist * 255);
-                data[4 * (row * width + col) + 2] = static_cast<unsigned char>(dist * 255);
-                data[4 * (row * width + col) + 3] = 255;
-            }
-        }
-
-        // Write the picture
-        {
-            wgpu::Buffer copySrcBuffer = utils::CreateBufferFromData(
-                secondDevice, data.data(), data.size(), wgpu::BufferUsage::CopySrc);
-            wgpu::ImageCopyBuffer copySrc =
-                utils::CreateImageCopyBuffer(copySrcBuffer, 0, bytesPerRow);
-            wgpu::ImageCopyTexture copyDst =
-                utils::CreateImageCopyTexture(wrappedTexture, 0, {0, 0, 0});
-            wgpu::Extent3D copySize = {width, height, 1};
-
-            wgpu::CommandEncoder encoder = secondDevice.CreateCommandEncoder();
-            encoder.CopyBufferToTexture(&copySrc, &copyDst, &copySize);
-            wgpu::CommandBuffer commands = encoder.Finish();
-            secondDeviceQueue.Submit(1, &commands);
-        }
-        ExternalImageExportInfoVkForTesting exportInfo;
-        ASSERT_TRUE(mBackend->ExportImage(wrappedTexture, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
-                                          &exportInfo));
-
-        // Import the image on |device|
-        wgpu::Texture nextWrappedTexture =
-            WrapVulkanImage(device, &descriptor, texture.get(), std::move(exportInfo.semaphores),
-                            exportInfo.releasedOldLayout, exportInfo.releasedNewLayout);
-
-        // Copy the image into a buffer for comparison
-        wgpu::BufferDescriptor copyDesc;
-        copyDesc.size = data.size();
-        copyDesc.usage = wgpu::BufferUsage::CopySrc | wgpu::BufferUsage::CopyDst;
-        wgpu::Buffer copyDstBuffer = device.CreateBuffer(&copyDesc);
-        {
-            wgpu::ImageCopyTexture copySrc =
-                utils::CreateImageCopyTexture(nextWrappedTexture, 0, {0, 0, 0});
-            wgpu::ImageCopyBuffer copyDst =
-                utils::CreateImageCopyBuffer(copyDstBuffer, 0, bytesPerRow);
-
-            wgpu::Extent3D copySize = {width, height, 1};
-
-            wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-            encoder.CopyTextureToBuffer(&copySrc, &copyDst, &copySize);
-            wgpu::CommandBuffer commands = encoder.Finish();
-            queue.Submit(1, &commands);
-        }
-
-        // Check the image is not corrupted on |device|
-        EXPECT_BUFFER_U32_RANGE_EQ(reinterpret_cast<uint32_t*>(data.data()), copyDstBuffer, 0,
-                                   data.size() / 4);
-
-        IgnoreSignalSemaphore(nextWrappedTexture);
-    }
-
-    // Test that texture descriptor view formats are passed to the backend for wrapped external
-    // textures, and that contents may be reinterpreted as sRGB.
-    TEST_P(VulkanImageWrappingUsageTests, SRGBReinterpretation) {
-        wgpu::TextureViewDescriptor viewDesc = {};
-        viewDesc.format = wgpu::TextureFormat::RGBA8UnormSrgb;
-
-        wgpu::TextureDescriptor textureDesc = {};
-        textureDesc.size = {2, 2, 1};
-        textureDesc.format = wgpu::TextureFormat::RGBA8Unorm;
-        textureDesc.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::TextureBinding;
-        textureDesc.viewFormatCount = 1;
-        textureDesc.viewFormats = &viewDesc.format;
-
-        std::unique_ptr<ExternalTexture> backendTexture = mBackend->CreateTexture(
-            textureDesc.size.width, textureDesc.size.height, textureDesc.format, textureDesc.usage);
-
-        // Import the image on |device|
-        wgpu::Texture texture =
-            WrapVulkanImage(device, &textureDesc, backendTexture.get(), {},
-                            VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
-        ASSERT_NE(texture.Get(), nullptr);
-
-        wgpu::ImageCopyTexture dst = {};
-        dst.texture = texture;
-        std::array<RGBA8, 4> rgbaTextureData = {
-            RGBA8(180, 0, 0, 255),
-            RGBA8(0, 84, 0, 127),
-            RGBA8(0, 0, 62, 100),
-            RGBA8(62, 180, 84, 90),
-        };
-
-        wgpu::TextureDataLayout dataLayout = {};
-        dataLayout.bytesPerRow = textureDesc.size.width * sizeof(RGBA8);
-
-        queue.WriteTexture(&dst, rgbaTextureData.data(), rgbaTextureData.size() * sizeof(RGBA8),
-                           &dataLayout, &textureDesc.size);
-
-        wgpu::TextureView textureView = texture.CreateView(&viewDesc);
-
-        utils::ComboRenderPipelineDescriptor pipelineDesc;
-        pipelineDesc.vertex.module = utils::CreateShaderModule(device, R"(
+    utils::ComboRenderPipelineDescriptor pipelineDesc;
+    pipelineDesc.vertex.module = utils::CreateShaderModule(device, R"(
             @stage(vertex)
             fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
                 var pos = array<vec2<f32>, 6>(
@@ -835,7 +822,7 @@
                 return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
             }
         )");
-        pipelineDesc.cFragment.module = utils::CreateShaderModule(device, R"(
+    pipelineDesc.cFragment.module = utils::CreateShaderModule(device, R"(
             @group(0) @binding(0) var texture : texture_2d<f32>;
 
             @stage(fragment)
@@ -844,45 +831,44 @@
             }
         )");
 
-        utils::BasicRenderPass renderPass =
-            utils::CreateBasicRenderPass(device, textureDesc.size.width, textureDesc.size.height,
-                                         wgpu::TextureFormat::RGBA8Unorm);
-        pipelineDesc.cTargets[0].format = renderPass.colorFormat;
+    utils::BasicRenderPass renderPass = utils::CreateBasicRenderPass(
+        device, textureDesc.size.width, textureDesc.size.height, wgpu::TextureFormat::RGBA8Unorm);
+    pipelineDesc.cTargets[0].format = renderPass.colorFormat;
 
-        wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
-        {
-            wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDesc);
+    wgpu::CommandEncoder encoder = device.CreateCommandEncoder();
+    {
+        wgpu::RenderPipeline pipeline = device.CreateRenderPipeline(&pipelineDesc);
 
-            wgpu::BindGroup bindGroup =
-                utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), {{0, textureView}});
+        wgpu::BindGroup bindGroup =
+            utils::MakeBindGroup(device, pipeline.GetBindGroupLayout(0), {{0, textureView}});
 
-            wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
-            pass.SetPipeline(pipeline);
-            pass.SetBindGroup(0, bindGroup);
-            pass.Draw(6);
-            pass.End();
-        }
-
-        wgpu::CommandBuffer commands = encoder.Finish();
-        queue.Submit(1, &commands);
-
-        EXPECT_PIXEL_RGBA8_BETWEEN(  //
-            RGBA8(116, 0, 0, 255),   //
-            RGBA8(117, 0, 0, 255), renderPass.color, 0, 0);
-        EXPECT_PIXEL_RGBA8_BETWEEN(  //
-            RGBA8(0, 23, 0, 127),    //
-            RGBA8(0, 24, 0, 127), renderPass.color, 1, 0);
-        EXPECT_PIXEL_RGBA8_BETWEEN(  //
-            RGBA8(0, 0, 12, 100),    //
-            RGBA8(0, 0, 13, 100), renderPass.color, 0, 1);
-        EXPECT_PIXEL_RGBA8_BETWEEN(  //
-            RGBA8(12, 116, 23, 90),  //
-            RGBA8(13, 117, 24, 90), renderPass.color, 1, 1);
-
-        IgnoreSignalSemaphore(texture);
+        wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPass.renderPassInfo);
+        pass.SetPipeline(pipeline);
+        pass.SetBindGroup(0, bindGroup);
+        pass.Draw(6);
+        pass.End();
     }
 
-    DAWN_INSTANTIATE_TEST(VulkanImageWrappingValidationTests, VulkanBackend());
-    DAWN_INSTANTIATE_TEST(VulkanImageWrappingUsageTests, VulkanBackend());
+    wgpu::CommandBuffer commands = encoder.Finish();
+    queue.Submit(1, &commands);
+
+    EXPECT_PIXEL_RGBA8_BETWEEN(  //
+        RGBA8(116, 0, 0, 255),   //
+        RGBA8(117, 0, 0, 255), renderPass.color, 0, 0);
+    EXPECT_PIXEL_RGBA8_BETWEEN(  //
+        RGBA8(0, 23, 0, 127),    //
+        RGBA8(0, 24, 0, 127), renderPass.color, 1, 0);
+    EXPECT_PIXEL_RGBA8_BETWEEN(  //
+        RGBA8(0, 0, 12, 100),    //
+        RGBA8(0, 0, 13, 100), renderPass.color, 0, 1);
+    EXPECT_PIXEL_RGBA8_BETWEEN(  //
+        RGBA8(12, 116, 23, 90),  //
+        RGBA8(13, 117, 24, 90), renderPass.color, 1, 1);
+
+    IgnoreSignalSemaphore(texture);
+}
+
+DAWN_INSTANTIATE_TEST(VulkanImageWrappingValidationTests, VulkanBackend());
+DAWN_INSTANTIATE_TEST(VulkanImageWrappingUsageTests, VulkanBackend());
 
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/tests/white_box/VulkanImageWrappingTests.h b/src/dawn/tests/white_box/VulkanImageWrappingTests.h
index 4cc34c6..dd7152b 100644
--- a/src/dawn/tests/white_box/VulkanImageWrappingTests.h
+++ b/src/dawn/tests/white_box/VulkanImageWrappingTests.h
@@ -28,48 +28,47 @@
 
 namespace dawn::native::vulkan {
 
-    struct ExternalImageDescriptorVkForTesting;
-    struct ExternalImageExportInfoVkForTesting;
+struct ExternalImageDescriptorVkForTesting;
+struct ExternalImageExportInfoVkForTesting;
 
-    class VulkanImageWrappingTestBackend {
+class VulkanImageWrappingTestBackend {
+  public:
+    static std::unique_ptr<VulkanImageWrappingTestBackend> Create(const wgpu::Device& device);
+    virtual ~VulkanImageWrappingTestBackend() = default;
+
+    class ExternalTexture : NonCopyable {
       public:
-        static std::unique_ptr<VulkanImageWrappingTestBackend> Create(const wgpu::Device& device);
-        virtual ~VulkanImageWrappingTestBackend() = default;
-
-        class ExternalTexture : NonCopyable {
-          public:
-            virtual ~ExternalTexture() = default;
-        };
-        class ExternalSemaphore : NonCopyable {
-          public:
-            virtual ~ExternalSemaphore() = default;
-        };
-
-        virtual std::unique_ptr<ExternalTexture> CreateTexture(uint32_t width,
-                                                               uint32_t height,
-                                                               wgpu::TextureFormat format,
-                                                               wgpu::TextureUsage usage) = 0;
-        virtual wgpu::Texture WrapImage(
-            const wgpu::Device& device,
-            const ExternalTexture* texture,
-            const ExternalImageDescriptorVkForTesting& descriptor,
-            std::vector<std::unique_ptr<ExternalSemaphore>> semaphores) = 0;
-
-        virtual bool ExportImage(const wgpu::Texture& texture,
-                                 VkImageLayout layout,
-                                 ExternalImageExportInfoVkForTesting* exportInfo) = 0;
+        virtual ~ExternalTexture() = default;
+    };
+    class ExternalSemaphore : NonCopyable {
+      public:
+        virtual ~ExternalSemaphore() = default;
     };
 
-    struct ExternalImageDescriptorVkForTesting : public ExternalImageDescriptorVk {
-      public:
-        ExternalImageDescriptorVkForTesting();
-    };
+    virtual std::unique_ptr<ExternalTexture> CreateTexture(uint32_t width,
+                                                           uint32_t height,
+                                                           wgpu::TextureFormat format,
+                                                           wgpu::TextureUsage usage) = 0;
+    virtual wgpu::Texture WrapImage(const wgpu::Device& device,
+                                    const ExternalTexture* texture,
+                                    const ExternalImageDescriptorVkForTesting& descriptor,
+                                    std::vector<std::unique_ptr<ExternalSemaphore>> semaphores) = 0;
 
-    struct ExternalImageExportInfoVkForTesting : public ExternalImageExportInfoVk {
-      public:
-        ExternalImageExportInfoVkForTesting();
-        std::vector<std::unique_ptr<VulkanImageWrappingTestBackend::ExternalSemaphore>> semaphores;
-    };
+    virtual bool ExportImage(const wgpu::Texture& texture,
+                             VkImageLayout layout,
+                             ExternalImageExportInfoVkForTesting* exportInfo) = 0;
+};
+
+struct ExternalImageDescriptorVkForTesting : public ExternalImageDescriptorVk {
+  public:
+    ExternalImageDescriptorVkForTesting();
+};
+
+struct ExternalImageExportInfoVkForTesting : public ExternalImageExportInfoVk {
+  public:
+    ExternalImageExportInfoVkForTesting();
+    std::vector<std::unique_ptr<VulkanImageWrappingTestBackend::ExternalSemaphore>> semaphores;
+};
 
 }  // namespace dawn::native::vulkan
 
diff --git a/src/dawn/tests/white_box/VulkanImageWrappingTests_DmaBuf.cpp b/src/dawn/tests/white_box/VulkanImageWrappingTests_DmaBuf.cpp
index 9ef7c3a..cccaadc 100644
--- a/src/dawn/tests/white_box/VulkanImageWrappingTests_DmaBuf.cpp
+++ b/src/dawn/tests/white_box/VulkanImageWrappingTests_DmaBuf.cpp
@@ -26,169 +26,161 @@
 
 namespace dawn::native::vulkan {
 
-    ExternalImageDescriptorVkForTesting::ExternalImageDescriptorVkForTesting()
-        : ExternalImageDescriptorVk(ExternalImageType::DmaBuf) {
+ExternalImageDescriptorVkForTesting::ExternalImageDescriptorVkForTesting()
+    : ExternalImageDescriptorVk(ExternalImageType::DmaBuf) {}
+ExternalImageExportInfoVkForTesting::ExternalImageExportInfoVkForTesting()
+    : ExternalImageExportInfoVk(ExternalImageType::DmaBuf) {}
+
+class ExternalSemaphoreDmaBuf : public VulkanImageWrappingTestBackend::ExternalSemaphore {
+  public:
+    explicit ExternalSemaphoreDmaBuf(int handle) : mHandle(handle) {}
+    ~ExternalSemaphoreDmaBuf() override {
+        if (mHandle != -1) {
+            close(mHandle);
+        }
     }
-    ExternalImageExportInfoVkForTesting::ExternalImageExportInfoVkForTesting()
-        : ExternalImageExportInfoVk(ExternalImageType::DmaBuf) {
+    int AcquireHandle() {
+        int handle = mHandle;
+        mHandle = -1;
+        return handle;
     }
 
-    class ExternalSemaphoreDmaBuf : public VulkanImageWrappingTestBackend::ExternalSemaphore {
-      public:
-        explicit ExternalSemaphoreDmaBuf(int handle) : mHandle(handle) {
+  private:
+    int mHandle = -1;
+};
+
+class ExternalTextureDmaBuf : public VulkanImageWrappingTestBackend::ExternalTexture {
+  public:
+    ExternalTextureDmaBuf(gbm_bo* bo, int fd, uint32_t stride, uint64_t drmModifier)
+        : mGbmBo(bo), mFd(fd), stride(stride), drmModifier(drmModifier) {}
+
+    ~ExternalTextureDmaBuf() override {
+        if (mFd != -1) {
+            close(mFd);
         }
-        ~ExternalSemaphoreDmaBuf() override {
-            if (mHandle != -1) {
-                close(mHandle);
-            }
+        if (mGbmBo != nullptr) {
+            gbm_bo_destroy(mGbmBo);
         }
-        int AcquireHandle() {
-            int handle = mHandle;
-            mHandle = -1;
-            return handle;
-        }
-
-      private:
-        int mHandle = -1;
-    };
-
-    class ExternalTextureDmaBuf : public VulkanImageWrappingTestBackend::ExternalTexture {
-      public:
-        ExternalTextureDmaBuf(gbm_bo* bo, int fd, uint32_t stride, uint64_t drmModifier)
-            : mGbmBo(bo), mFd(fd), stride(stride), drmModifier(drmModifier) {
-        }
-
-        ~ExternalTextureDmaBuf() override {
-            if (mFd != -1) {
-                close(mFd);
-            }
-            if (mGbmBo != nullptr) {
-                gbm_bo_destroy(mGbmBo);
-            }
-        }
-
-        int Dup() const {
-            return dup(mFd);
-        }
-
-      private:
-        gbm_bo* mGbmBo = nullptr;
-        int mFd = -1;
-
-      public:
-        const uint32_t stride;
-        const uint64_t drmModifier;
-    };
-
-    class VulkanImageWrappingTestBackendDmaBuf : public VulkanImageWrappingTestBackend {
-      public:
-        explicit VulkanImageWrappingTestBackendDmaBuf(const wgpu::Device& device) {
-        }
-
-        ~VulkanImageWrappingTestBackendDmaBuf() {
-            if (mGbmDevice != nullptr) {
-                gbm_device_destroy(mGbmDevice);
-                mGbmDevice = nullptr;
-            }
-        }
-
-        std::unique_ptr<ExternalTexture> CreateTexture(uint32_t width,
-                                                       uint32_t height,
-                                                       wgpu::TextureFormat format,
-                                                       wgpu::TextureUsage usage) override {
-            EXPECT_EQ(format, wgpu::TextureFormat::RGBA8Unorm);
-
-            gbm_bo* bo = CreateGbmBo(width, height, true);
-
-            return std::make_unique<ExternalTextureDmaBuf>(
-                bo, gbm_bo_get_fd(bo), gbm_bo_get_stride_for_plane(bo, 0), gbm_bo_get_modifier(bo));
-        }
-
-        wgpu::Texture WrapImage(
-            const wgpu::Device& device,
-            const ExternalTexture* texture,
-            const ExternalImageDescriptorVkForTesting& descriptor,
-            std::vector<std::unique_ptr<ExternalSemaphore>> semaphores) override {
-            const ExternalTextureDmaBuf* textureDmaBuf =
-                static_cast<const ExternalTextureDmaBuf*>(texture);
-            std::vector<int> waitFDs;
-            for (auto& semaphore : semaphores) {
-                waitFDs.push_back(
-                    static_cast<ExternalSemaphoreDmaBuf*>(semaphore.get())->AcquireHandle());
-            }
-
-            ExternalImageDescriptorDmaBuf descriptorDmaBuf;
-            *static_cast<ExternalImageDescriptorVk*>(&descriptorDmaBuf) = descriptor;
-
-            descriptorDmaBuf.memoryFD = textureDmaBuf->Dup();
-            descriptorDmaBuf.waitFDs = std::move(waitFDs);
-
-            descriptorDmaBuf.stride = textureDmaBuf->stride;
-            descriptorDmaBuf.drmModifier = textureDmaBuf->drmModifier;
-
-            return dawn::native::vulkan::WrapVulkanImage(device.Get(), &descriptorDmaBuf);
-        }
-
-        bool ExportImage(const wgpu::Texture& texture,
-                         VkImageLayout layout,
-                         ExternalImageExportInfoVkForTesting* exportInfo) override {
-            ExternalImageExportInfoDmaBuf infoDmaBuf;
-            bool success = ExportVulkanImage(texture.Get(), layout, &infoDmaBuf);
-
-            *static_cast<ExternalImageExportInfoVk*>(exportInfo) = infoDmaBuf;
-            for (int fd : infoDmaBuf.semaphoreHandles) {
-                EXPECT_NE(fd, -1);
-                exportInfo->semaphores.push_back(std::make_unique<ExternalSemaphoreDmaBuf>(fd));
-            }
-
-            return success;
-        }
-
-        void CreateGbmDevice() {
-            // Render nodes [1] are the primary interface for communicating with the GPU on
-            // devices that support DRM. The actual filename of the render node is
-            // implementation-specific, so we must scan through all possible filenames to find
-            // one that we can use [2].
-            //
-            // [1] https://dri.freedesktop.org/docs/drm/gpu/drm-uapi.html#render-nodes
-            // [2]
-            // https://cs.chromium.org/chromium/src/ui/ozone/platform/wayland/gpu/drm_render_node_path_finder.cc
-            const uint32_t kRenderNodeStart = 128;
-            const uint32_t kRenderNodeEnd = kRenderNodeStart + 16;
-            const std::string kRenderNodeTemplate = "/dev/dri/renderD";
-
-            int renderNodeFd = -1;
-            for (uint32_t i = kRenderNodeStart; i < kRenderNodeEnd; i++) {
-                std::string renderNode = kRenderNodeTemplate + std::to_string(i);
-                renderNodeFd = open(renderNode.c_str(), O_RDWR);
-                if (renderNodeFd >= 0)
-                    break;
-            }
-            EXPECT_GE(renderNodeFd, 0) << "Failed to get file descriptor for render node";
-
-            gbm_device* gbmDevice = gbm_create_device(renderNodeFd);
-            EXPECT_NE(gbmDevice, nullptr) << "Failed to create GBM device";
-            mGbmDevice = gbmDevice;
-        }
-
-      private:
-        gbm_bo* CreateGbmBo(uint32_t width, uint32_t height, bool linear) {
-            uint32_t flags = GBM_BO_USE_RENDERING;
-            if (linear)
-                flags |= GBM_BO_USE_LINEAR;
-            gbm_bo* gbmBo = gbm_bo_create(mGbmDevice, width, height, GBM_FORMAT_XBGR8888, flags);
-            EXPECT_NE(gbmBo, nullptr) << "Failed to create GBM buffer object";
-            return gbmBo;
-        }
-
-        gbm_device* mGbmDevice = nullptr;
-    };
-
-    // static
-    std::unique_ptr<VulkanImageWrappingTestBackend> VulkanImageWrappingTestBackend::Create(
-        const wgpu::Device& device) {
-        auto backend = std::make_unique<VulkanImageWrappingTestBackendDmaBuf>(device);
-        backend->CreateGbmDevice();
-        return backend;
     }
+
+    int Dup() const { return dup(mFd); }
+
+  private:
+    gbm_bo* mGbmBo = nullptr;
+    int mFd = -1;
+
+  public:
+    const uint32_t stride;
+    const uint64_t drmModifier;
+};
+
+class VulkanImageWrappingTestBackendDmaBuf : public VulkanImageWrappingTestBackend {
+  public:
+    explicit VulkanImageWrappingTestBackendDmaBuf(const wgpu::Device& device) {}
+
+    ~VulkanImageWrappingTestBackendDmaBuf() {
+        if (mGbmDevice != nullptr) {
+            gbm_device_destroy(mGbmDevice);
+            mGbmDevice = nullptr;
+        }
+    }
+
+    std::unique_ptr<ExternalTexture> CreateTexture(uint32_t width,
+                                                   uint32_t height,
+                                                   wgpu::TextureFormat format,
+                                                   wgpu::TextureUsage usage) override {
+        EXPECT_EQ(format, wgpu::TextureFormat::RGBA8Unorm);
+
+        gbm_bo* bo = CreateGbmBo(width, height, true);
+
+        return std::make_unique<ExternalTextureDmaBuf>(
+            bo, gbm_bo_get_fd(bo), gbm_bo_get_stride_for_plane(bo, 0), gbm_bo_get_modifier(bo));
+    }
+
+    wgpu::Texture WrapImage(const wgpu::Device& device,
+                            const ExternalTexture* texture,
+                            const ExternalImageDescriptorVkForTesting& descriptor,
+                            std::vector<std::unique_ptr<ExternalSemaphore>> semaphores) override {
+        const ExternalTextureDmaBuf* textureDmaBuf =
+            static_cast<const ExternalTextureDmaBuf*>(texture);
+        std::vector<int> waitFDs;
+        for (auto& semaphore : semaphores) {
+            waitFDs.push_back(
+                static_cast<ExternalSemaphoreDmaBuf*>(semaphore.get())->AcquireHandle());
+        }
+
+        ExternalImageDescriptorDmaBuf descriptorDmaBuf;
+        *static_cast<ExternalImageDescriptorVk*>(&descriptorDmaBuf) = descriptor;
+
+        descriptorDmaBuf.memoryFD = textureDmaBuf->Dup();
+        descriptorDmaBuf.waitFDs = std::move(waitFDs);
+
+        descriptorDmaBuf.stride = textureDmaBuf->stride;
+        descriptorDmaBuf.drmModifier = textureDmaBuf->drmModifier;
+
+        return dawn::native::vulkan::WrapVulkanImage(device.Get(), &descriptorDmaBuf);
+    }
+
+    bool ExportImage(const wgpu::Texture& texture,
+                     VkImageLayout layout,
+                     ExternalImageExportInfoVkForTesting* exportInfo) override {
+        ExternalImageExportInfoDmaBuf infoDmaBuf;
+        bool success = ExportVulkanImage(texture.Get(), layout, &infoDmaBuf);
+
+        *static_cast<ExternalImageExportInfoVk*>(exportInfo) = infoDmaBuf;
+        for (int fd : infoDmaBuf.semaphoreHandles) {
+            EXPECT_NE(fd, -1);
+            exportInfo->semaphores.push_back(std::make_unique<ExternalSemaphoreDmaBuf>(fd));
+        }
+
+        return success;
+    }
+
+    void CreateGbmDevice() {
+        // Render nodes [1] are the primary interface for communicating with the GPU on
+        // devices that support DRM. The actual filename of the render node is
+        // implementation-specific, so we must scan through all possible filenames to find
+        // one that we can use [2].
+        //
+        // [1] https://dri.freedesktop.org/docs/drm/gpu/drm-uapi.html#render-nodes
+        // [2]
+        // https://cs.chromium.org/chromium/src/ui/ozone/platform/wayland/gpu/drm_render_node_path_finder.cc
+        const uint32_t kRenderNodeStart = 128;
+        const uint32_t kRenderNodeEnd = kRenderNodeStart + 16;
+        const std::string kRenderNodeTemplate = "/dev/dri/renderD";
+
+        int renderNodeFd = -1;
+        for (uint32_t i = kRenderNodeStart; i < kRenderNodeEnd; i++) {
+            std::string renderNode = kRenderNodeTemplate + std::to_string(i);
+            renderNodeFd = open(renderNode.c_str(), O_RDWR);
+            if (renderNodeFd >= 0)
+                break;
+        }
+        EXPECT_GE(renderNodeFd, 0) << "Failed to get file descriptor for render node";
+
+        gbm_device* gbmDevice = gbm_create_device(renderNodeFd);
+        EXPECT_NE(gbmDevice, nullptr) << "Failed to create GBM device";
+        mGbmDevice = gbmDevice;
+    }
+
+  private:
+    gbm_bo* CreateGbmBo(uint32_t width, uint32_t height, bool linear) {
+        uint32_t flags = GBM_BO_USE_RENDERING;
+        if (linear)
+            flags |= GBM_BO_USE_LINEAR;
+        gbm_bo* gbmBo = gbm_bo_create(mGbmDevice, width, height, GBM_FORMAT_XBGR8888, flags);
+        EXPECT_NE(gbmBo, nullptr) << "Failed to create GBM buffer object";
+        return gbmBo;
+    }
+
+    gbm_device* mGbmDevice = nullptr;
+};
+
+// static
+std::unique_ptr<VulkanImageWrappingTestBackend> VulkanImageWrappingTestBackend::Create(
+    const wgpu::Device& device) {
+    auto backend = std::make_unique<VulkanImageWrappingTestBackendDmaBuf>(device);
+    backend->CreateGbmDevice();
+    return backend;
+}
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/tests/white_box/VulkanImageWrappingTests_OpaqueFD.cpp b/src/dawn/tests/white_box/VulkanImageWrappingTests_OpaqueFD.cpp
index 9393c06..c92181c 100644
--- a/src/dawn/tests/white_box/VulkanImageWrappingTests_OpaqueFD.cpp
+++ b/src/dawn/tests/white_box/VulkanImageWrappingTests_OpaqueFD.cpp
@@ -26,256 +26,246 @@
 
 namespace dawn::native::vulkan {
 
-    ExternalImageDescriptorVkForTesting::ExternalImageDescriptorVkForTesting()
-        : ExternalImageDescriptorVk(ExternalImageType::OpaqueFD) {
+ExternalImageDescriptorVkForTesting::ExternalImageDescriptorVkForTesting()
+    : ExternalImageDescriptorVk(ExternalImageType::OpaqueFD) {}
+ExternalImageExportInfoVkForTesting::ExternalImageExportInfoVkForTesting()
+    : ExternalImageExportInfoVk(ExternalImageType::OpaqueFD) {}
+
+class ExternalSemaphoreOpaqueFD : public VulkanImageWrappingTestBackend::ExternalSemaphore {
+  public:
+    explicit ExternalSemaphoreOpaqueFD(int handle) : mHandle(handle) {}
+    ~ExternalSemaphoreOpaqueFD() override {
+        if (mHandle != -1) {
+            close(mHandle);
+        }
     }
-    ExternalImageExportInfoVkForTesting::ExternalImageExportInfoVkForTesting()
-        : ExternalImageExportInfoVk(ExternalImageType::OpaqueFD) {
+    int AcquireHandle() {
+        int handle = mHandle;
+        mHandle = -1;
+        return handle;
     }
 
-    class ExternalSemaphoreOpaqueFD : public VulkanImageWrappingTestBackend::ExternalSemaphore {
-      public:
-        explicit ExternalSemaphoreOpaqueFD(int handle) : mHandle(handle) {
+  private:
+    int mHandle = -1;
+};
+
+class ExternalTextureOpaqueFD : public VulkanImageWrappingTestBackend::ExternalTexture {
+  public:
+    ExternalTextureOpaqueFD(dawn::native::vulkan::Device* device,
+                            int fd,
+                            VkDeviceMemory allocation,
+                            VkImage handle,
+                            VkDeviceSize allocationSize,
+                            uint32_t memoryTypeIndex)
+        : mDevice(device),
+          mFd(fd),
+          mAllocation(allocation),
+          mHandle(handle),
+          allocationSize(allocationSize),
+          memoryTypeIndex(memoryTypeIndex) {}
+
+    ~ExternalTextureOpaqueFD() override {
+        if (mFd != -1) {
+            close(mFd);
         }
-        ~ExternalSemaphoreOpaqueFD() override {
-            if (mHandle != -1) {
-                close(mHandle);
-            }
+        if (mAllocation != VK_NULL_HANDLE) {
+            mDevice->GetFencedDeleter()->DeleteWhenUnused(mAllocation);
         }
-        int AcquireHandle() {
-            int handle = mHandle;
-            mHandle = -1;
-            return handle;
+        if (mHandle != VK_NULL_HANDLE) {
+            mDevice->GetFencedDeleter()->DeleteWhenUnused(mHandle);
+        }
+    }
+
+    int Dup() const { return dup(mFd); }
+
+  private:
+    dawn::native::vulkan::Device* mDevice;
+    int mFd = -1;
+    VkDeviceMemory mAllocation = VK_NULL_HANDLE;
+    VkImage mHandle = VK_NULL_HANDLE;
+
+  public:
+    const VkDeviceSize allocationSize;
+    const uint32_t memoryTypeIndex;
+};
+
+class VulkanImageWrappingTestBackendOpaqueFD : public VulkanImageWrappingTestBackend {
+  public:
+    explicit VulkanImageWrappingTestBackendOpaqueFD(const wgpu::Device& device) : mDevice(device) {
+        mDeviceVk = dawn::native::vulkan::ToBackend(dawn::native::FromAPI(device.Get()));
+    }
+
+    std::unique_ptr<ExternalTexture> CreateTexture(uint32_t width,
+                                                   uint32_t height,
+                                                   wgpu::TextureFormat format,
+                                                   wgpu::TextureUsage usage) override {
+        EXPECT_EQ(format, wgpu::TextureFormat::RGBA8Unorm);
+        VkFormat vulkanFormat = VK_FORMAT_R8G8B8A8_UNORM;
+
+        VkImage handle;
+        ::VkResult result = CreateImage(mDeviceVk, width, height, vulkanFormat, &handle);
+        EXPECT_EQ(result, VK_SUCCESS) << "Failed to create external image";
+
+        VkDeviceMemory allocation;
+        VkDeviceSize allocationSize;
+        uint32_t memoryTypeIndex;
+        ::VkResult resultBool =
+            AllocateMemory(mDeviceVk, handle, &allocation, &allocationSize, &memoryTypeIndex);
+        EXPECT_EQ(resultBool, VK_SUCCESS) << "Failed to allocate external memory";
+
+        result = BindMemory(mDeviceVk, handle, allocation);
+        EXPECT_EQ(result, VK_SUCCESS) << "Failed to bind image memory";
+
+        int fd = GetMemoryFd(mDeviceVk, allocation);
+
+        return std::make_unique<ExternalTextureOpaqueFD>(mDeviceVk, fd, allocation, handle,
+                                                         allocationSize, memoryTypeIndex);
+    }
+
+    wgpu::Texture WrapImage(const wgpu::Device& device,
+                            const ExternalTexture* texture,
+                            const ExternalImageDescriptorVkForTesting& descriptor,
+                            std::vector<std::unique_ptr<ExternalSemaphore>> semaphores) override {
+        const ExternalTextureOpaqueFD* textureOpaqueFD =
+            static_cast<const ExternalTextureOpaqueFD*>(texture);
+        std::vector<int> waitFDs;
+        for (auto& semaphore : semaphores) {
+            waitFDs.push_back(
+                static_cast<ExternalSemaphoreOpaqueFD*>(semaphore.get())->AcquireHandle());
         }
 
-      private:
-        int mHandle = -1;
-    };
+        ExternalImageDescriptorOpaqueFD descriptorOpaqueFD;
+        *static_cast<ExternalImageDescriptorVk*>(&descriptorOpaqueFD) = descriptor;
+        descriptorOpaqueFD.memoryFD = textureOpaqueFD->Dup();
+        descriptorOpaqueFD.allocationSize = textureOpaqueFD->allocationSize;
+        descriptorOpaqueFD.memoryTypeIndex = textureOpaqueFD->memoryTypeIndex;
+        descriptorOpaqueFD.waitFDs = std::move(waitFDs);
 
-    class ExternalTextureOpaqueFD : public VulkanImageWrappingTestBackend::ExternalTexture {
-      public:
-        ExternalTextureOpaqueFD(dawn::native::vulkan::Device* device,
-                                int fd,
-                                VkDeviceMemory allocation,
-                                VkImage handle,
-                                VkDeviceSize allocationSize,
-                                uint32_t memoryTypeIndex)
-            : mDevice(device),
-              mFd(fd),
-              mAllocation(allocation),
-              mHandle(handle),
-              allocationSize(allocationSize),
-              memoryTypeIndex(memoryTypeIndex) {
+        return dawn::native::vulkan::WrapVulkanImage(device.Get(), &descriptorOpaqueFD);
+    }
+
+    bool ExportImage(const wgpu::Texture& texture,
+                     VkImageLayout layout,
+                     ExternalImageExportInfoVkForTesting* exportInfo) override {
+        ExternalImageExportInfoOpaqueFD infoOpaqueFD;
+        bool success = ExportVulkanImage(texture.Get(), layout, &infoOpaqueFD);
+
+        *static_cast<ExternalImageExportInfoVk*>(exportInfo) = infoOpaqueFD;
+        for (int fd : infoOpaqueFD.semaphoreHandles) {
+            EXPECT_NE(fd, -1);
+            exportInfo->semaphores.push_back(std::make_unique<ExternalSemaphoreOpaqueFD>(fd));
         }
 
-        ~ExternalTextureOpaqueFD() override {
-            if (mFd != -1) {
-                close(mFd);
-            }
-            if (mAllocation != VK_NULL_HANDLE) {
-                mDevice->GetFencedDeleter()->DeleteWhenUnused(mAllocation);
-            }
-            if (mHandle != VK_NULL_HANDLE) {
-                mDevice->GetFencedDeleter()->DeleteWhenUnused(mHandle);
-            }
-        }
+        return success;
+    }
 
-        int Dup() const {
-            return dup(mFd);
-        }
+  private:
+    // Creates a VkImage with external memory
+    ::VkResult CreateImage(dawn::native::vulkan::Device* deviceVk,
+                           uint32_t width,
+                           uint32_t height,
+                           VkFormat format,
+                           VkImage* image) {
+        VkExternalMemoryImageCreateInfoKHR externalInfo;
+        externalInfo.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR;
+        externalInfo.pNext = nullptr;
+        externalInfo.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
 
-      private:
-        dawn::native::vulkan::Device* mDevice;
-        int mFd = -1;
-        VkDeviceMemory mAllocation = VK_NULL_HANDLE;
-        VkImage mHandle = VK_NULL_HANDLE;
+        auto usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
+                     VK_IMAGE_USAGE_TRANSFER_DST_BIT;
 
-      public:
-        const VkDeviceSize allocationSize;
-        const uint32_t memoryTypeIndex;
-    };
+        VkImageCreateInfo createInfo;
+        createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
+        createInfo.pNext = &externalInfo;
+        createInfo.flags = VK_IMAGE_CREATE_ALIAS_BIT_KHR;
+        createInfo.imageType = VK_IMAGE_TYPE_2D;
+        createInfo.format = format;
+        createInfo.extent = {width, height, 1};
+        createInfo.mipLevels = 1;
+        createInfo.arrayLayers = 1;
+        createInfo.samples = VK_SAMPLE_COUNT_1_BIT;
+        createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
+        createInfo.usage = usage;
+        createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+        createInfo.queueFamilyIndexCount = 0;
+        createInfo.pQueueFamilyIndices = nullptr;
+        createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
 
-    class VulkanImageWrappingTestBackendOpaqueFD : public VulkanImageWrappingTestBackend {
-      public:
-        explicit VulkanImageWrappingTestBackendOpaqueFD(const wgpu::Device& device)
-            : mDevice(device) {
-            mDeviceVk = dawn::native::vulkan::ToBackend(dawn::native::FromAPI(device.Get()));
-        }
+        return deviceVk->fn.CreateImage(deviceVk->GetVkDevice(), &createInfo, nullptr, &**image);
+    }
 
-        std::unique_ptr<ExternalTexture> CreateTexture(uint32_t width,
-                                                       uint32_t height,
-                                                       wgpu::TextureFormat format,
-                                                       wgpu::TextureUsage usage) override {
-            EXPECT_EQ(format, wgpu::TextureFormat::RGBA8Unorm);
-            VkFormat vulkanFormat = VK_FORMAT_R8G8B8A8_UNORM;
+    // Allocates memory for an image
+    ::VkResult AllocateMemory(dawn::native::vulkan::Device* deviceVk,
+                              VkImage handle,
+                              VkDeviceMemory* allocation,
+                              VkDeviceSize* allocationSize,
+                              uint32_t* memoryTypeIndex) {
+        // Create the image memory and associate it with the container
+        VkMemoryRequirements requirements;
+        deviceVk->fn.GetImageMemoryRequirements(deviceVk->GetVkDevice(), handle, &requirements);
 
-            VkImage handle;
-            ::VkResult result = CreateImage(mDeviceVk, width, height, vulkanFormat, &handle);
-            EXPECT_EQ(result, VK_SUCCESS) << "Failed to create external image";
+        // Import memory from file descriptor
+        VkExportMemoryAllocateInfoKHR externalInfo;
+        externalInfo.sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR;
+        externalInfo.pNext = nullptr;
+        externalInfo.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
 
-            VkDeviceMemory allocation;
-            VkDeviceSize allocationSize;
-            uint32_t memoryTypeIndex;
-            ::VkResult resultBool =
-                AllocateMemory(mDeviceVk, handle, &allocation, &allocationSize, &memoryTypeIndex);
-            EXPECT_EQ(resultBool, VK_SUCCESS) << "Failed to allocate external memory";
+        int bestType = deviceVk->GetResourceMemoryAllocator()->FindBestTypeIndex(
+            requirements, MemoryKind::Opaque);
+        VkMemoryAllocateInfo allocateInfo;
+        allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+        allocateInfo.pNext = &externalInfo;
+        allocateInfo.allocationSize = requirements.size;
+        allocateInfo.memoryTypeIndex = static_cast<uint32_t>(bestType);
 
-            result = BindMemory(mDeviceVk, handle, allocation);
-            EXPECT_EQ(result, VK_SUCCESS) << "Failed to bind image memory";
+        *allocationSize = allocateInfo.allocationSize;
+        *memoryTypeIndex = allocateInfo.memoryTypeIndex;
 
-            int fd = GetMemoryFd(mDeviceVk, allocation);
+        return deviceVk->fn.AllocateMemory(deviceVk->GetVkDevice(), &allocateInfo, nullptr,
+                                           &**allocation);
+    }
 
-            return std::make_unique<ExternalTextureOpaqueFD>(mDeviceVk, fd, allocation, handle,
-                                                             allocationSize, memoryTypeIndex);
-        }
+    // Binds memory to an image
+    ::VkResult BindMemory(dawn::native::vulkan::Device* deviceVk,
+                          VkImage handle,
+                          VkDeviceMemory memory) {
+        return deviceVk->fn.BindImageMemory(deviceVk->GetVkDevice(), handle, memory, 0);
+    }
 
-        wgpu::Texture WrapImage(
-            const wgpu::Device& device,
-            const ExternalTexture* texture,
-            const ExternalImageDescriptorVkForTesting& descriptor,
-            std::vector<std::unique_ptr<ExternalSemaphore>> semaphores) override {
-            const ExternalTextureOpaqueFD* textureOpaqueFD =
-                static_cast<const ExternalTextureOpaqueFD*>(texture);
-            std::vector<int> waitFDs;
-            for (auto& semaphore : semaphores) {
-                waitFDs.push_back(
-                    static_cast<ExternalSemaphoreOpaqueFD*>(semaphore.get())->AcquireHandle());
-            }
+    // Extracts a file descriptor representing memory on a device
+    int GetMemoryFd(dawn::native::vulkan::Device* deviceVk, VkDeviceMemory memory) {
+        VkMemoryGetFdInfoKHR getFdInfo;
+        getFdInfo.sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR;
+        getFdInfo.pNext = nullptr;
+        getFdInfo.memory = memory;
+        getFdInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
 
-            ExternalImageDescriptorOpaqueFD descriptorOpaqueFD;
-            *static_cast<ExternalImageDescriptorVk*>(&descriptorOpaqueFD) = descriptor;
-            descriptorOpaqueFD.memoryFD = textureOpaqueFD->Dup();
-            descriptorOpaqueFD.allocationSize = textureOpaqueFD->allocationSize;
-            descriptorOpaqueFD.memoryTypeIndex = textureOpaqueFD->memoryTypeIndex;
-            descriptorOpaqueFD.waitFDs = std::move(waitFDs);
+        int memoryFd = -1;
+        deviceVk->fn.GetMemoryFdKHR(deviceVk->GetVkDevice(), &getFdInfo, &memoryFd);
 
-            return dawn::native::vulkan::WrapVulkanImage(device.Get(), &descriptorOpaqueFD);
-        }
+        EXPECT_GE(memoryFd, 0) << "Failed to get file descriptor for external memory";
+        return memoryFd;
+    }
 
-        bool ExportImage(const wgpu::Texture& texture,
-                         VkImageLayout layout,
-                         ExternalImageExportInfoVkForTesting* exportInfo) override {
-            ExternalImageExportInfoOpaqueFD infoOpaqueFD;
-            bool success = ExportVulkanImage(texture.Get(), layout, &infoOpaqueFD);
-
-            *static_cast<ExternalImageExportInfoVk*>(exportInfo) = infoOpaqueFD;
-            for (int fd : infoOpaqueFD.semaphoreHandles) {
-                EXPECT_NE(fd, -1);
-                exportInfo->semaphores.push_back(std::make_unique<ExternalSemaphoreOpaqueFD>(fd));
-            }
-
-            return success;
-        }
-
-      private:
-        // Creates a VkImage with external memory
-        ::VkResult CreateImage(dawn::native::vulkan::Device* deviceVk,
+    // Prepares and exports memory for an image on a given device
+    void CreateBindExportImage(dawn::native::vulkan::Device* deviceVk,
                                uint32_t width,
                                uint32_t height,
                                VkFormat format,
-                               VkImage* image) {
-            VkExternalMemoryImageCreateInfoKHR externalInfo;
-            externalInfo.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR;
-            externalInfo.pNext = nullptr;
-            externalInfo.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
+                               VkImage* handle,
+                               VkDeviceMemory* allocation,
+                               VkDeviceSize* allocationSize,
+                               uint32_t* memoryTypeIndex,
+                               int* memoryFd) {}
 
-            auto usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
-                         VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+    wgpu::Device mDevice;
+    dawn::native::vulkan::Device* mDeviceVk;
+};
 
-            VkImageCreateInfo createInfo;
-            createInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
-            createInfo.pNext = &externalInfo;
-            createInfo.flags = VK_IMAGE_CREATE_ALIAS_BIT_KHR;
-            createInfo.imageType = VK_IMAGE_TYPE_2D;
-            createInfo.format = format;
-            createInfo.extent = {width, height, 1};
-            createInfo.mipLevels = 1;
-            createInfo.arrayLayers = 1;
-            createInfo.samples = VK_SAMPLE_COUNT_1_BIT;
-            createInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
-            createInfo.usage = usage;
-            createInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
-            createInfo.queueFamilyIndexCount = 0;
-            createInfo.pQueueFamilyIndices = nullptr;
-            createInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
-
-            return deviceVk->fn.CreateImage(deviceVk->GetVkDevice(), &createInfo, nullptr,
-                                            &**image);
-        }
-
-        // Allocates memory for an image
-        ::VkResult AllocateMemory(dawn::native::vulkan::Device* deviceVk,
-                                  VkImage handle,
-                                  VkDeviceMemory* allocation,
-                                  VkDeviceSize* allocationSize,
-                                  uint32_t* memoryTypeIndex) {
-            // Create the image memory and associate it with the container
-            VkMemoryRequirements requirements;
-            deviceVk->fn.GetImageMemoryRequirements(deviceVk->GetVkDevice(), handle, &requirements);
-
-            // Import memory from file descriptor
-            VkExportMemoryAllocateInfoKHR externalInfo;
-            externalInfo.sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR;
-            externalInfo.pNext = nullptr;
-            externalInfo.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
-
-            int bestType = deviceVk->GetResourceMemoryAllocator()->FindBestTypeIndex(
-                requirements, MemoryKind::Opaque);
-            VkMemoryAllocateInfo allocateInfo;
-            allocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
-            allocateInfo.pNext = &externalInfo;
-            allocateInfo.allocationSize = requirements.size;
-            allocateInfo.memoryTypeIndex = static_cast<uint32_t>(bestType);
-
-            *allocationSize = allocateInfo.allocationSize;
-            *memoryTypeIndex = allocateInfo.memoryTypeIndex;
-
-            return deviceVk->fn.AllocateMemory(deviceVk->GetVkDevice(), &allocateInfo, nullptr,
-                                               &**allocation);
-        }
-
-        // Binds memory to an image
-        ::VkResult BindMemory(dawn::native::vulkan::Device* deviceVk,
-                              VkImage handle,
-                              VkDeviceMemory memory) {
-            return deviceVk->fn.BindImageMemory(deviceVk->GetVkDevice(), handle, memory, 0);
-        }
-
-        // Extracts a file descriptor representing memory on a device
-        int GetMemoryFd(dawn::native::vulkan::Device* deviceVk, VkDeviceMemory memory) {
-            VkMemoryGetFdInfoKHR getFdInfo;
-            getFdInfo.sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR;
-            getFdInfo.pNext = nullptr;
-            getFdInfo.memory = memory;
-            getFdInfo.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
-
-            int memoryFd = -1;
-            deviceVk->fn.GetMemoryFdKHR(deviceVk->GetVkDevice(), &getFdInfo, &memoryFd);
-
-            EXPECT_GE(memoryFd, 0) << "Failed to get file descriptor for external memory";
-            return memoryFd;
-        }
-
-        // Prepares and exports memory for an image on a given device
-        void CreateBindExportImage(dawn::native::vulkan::Device* deviceVk,
-                                   uint32_t width,
-                                   uint32_t height,
-                                   VkFormat format,
-                                   VkImage* handle,
-                                   VkDeviceMemory* allocation,
-                                   VkDeviceSize* allocationSize,
-                                   uint32_t* memoryTypeIndex,
-                                   int* memoryFd) {
-        }
-
-        wgpu::Device mDevice;
-        dawn::native::vulkan::Device* mDeviceVk;
-    };
-
-    // static
-    std::unique_ptr<VulkanImageWrappingTestBackend> VulkanImageWrappingTestBackend::Create(
-        const wgpu::Device& device) {
-        return std::make_unique<VulkanImageWrappingTestBackendOpaqueFD>(device);
-    }
+// static
+std::unique_ptr<VulkanImageWrappingTestBackend> VulkanImageWrappingTestBackend::Create(
+    const wgpu::Device& device) {
+    return std::make_unique<VulkanImageWrappingTestBackendOpaqueFD>(device);
+}
 
 }  // namespace dawn::native::vulkan
diff --git a/src/dawn/utils/BackendBinding.cpp b/src/dawn/utils/BackendBinding.cpp
index f97e6b9..17bea5f 100644
--- a/src/dawn/utils/BackendBinding.cpp
+++ b/src/dawn/utils/BackendBinding.cpp
@@ -19,91 +19,88 @@
 #include "GLFW/glfw3.h"
 
 #if defined(DAWN_ENABLE_BACKEND_OPENGL)
-#    include "dawn/native/OpenGLBackend.h"
+#include "dawn/native/OpenGLBackend.h"
 #endif  // defined(DAWN_ENABLE_BACKEND_OPENGL)
 
 namespace utils {
 
 #if defined(DAWN_ENABLE_BACKEND_D3D12)
-    BackendBinding* CreateD3D12Binding(GLFWwindow* window, WGPUDevice device);
+BackendBinding* CreateD3D12Binding(GLFWwindow* window, WGPUDevice device);
 #endif
 #if defined(DAWN_ENABLE_BACKEND_METAL)
-    BackendBinding* CreateMetalBinding(GLFWwindow* window, WGPUDevice device);
+BackendBinding* CreateMetalBinding(GLFWwindow* window, WGPUDevice device);
 #endif
 #if defined(DAWN_ENABLE_BACKEND_NULL)
-    BackendBinding* CreateNullBinding(GLFWwindow* window, WGPUDevice device);
+BackendBinding* CreateNullBinding(GLFWwindow* window, WGPUDevice device);
 #endif
 #if defined(DAWN_ENABLE_BACKEND_OPENGL)
-    BackendBinding* CreateOpenGLBinding(GLFWwindow* window, WGPUDevice device);
+BackendBinding* CreateOpenGLBinding(GLFWwindow* window, WGPUDevice device);
 #endif
 #if defined(DAWN_ENABLE_BACKEND_VULKAN)
-    BackendBinding* CreateVulkanBinding(GLFWwindow* window, WGPUDevice device);
+BackendBinding* CreateVulkanBinding(GLFWwindow* window, WGPUDevice device);
 #endif
 
-    BackendBinding::BackendBinding(GLFWwindow* window, WGPUDevice device)
-        : mWindow(window), mDevice(device) {
-    }
+BackendBinding::BackendBinding(GLFWwindow* window, WGPUDevice device)
+    : mWindow(window), mDevice(device) {}
 
-    void DiscoverAdapter(dawn::native::Instance* instance,
-                         GLFWwindow* window,
-                         wgpu::BackendType type) {
-        DAWN_UNUSED(type);
-        DAWN_UNUSED(window);
+void DiscoverAdapter(dawn::native::Instance* instance, GLFWwindow* window, wgpu::BackendType type) {
+    DAWN_UNUSED(type);
+    DAWN_UNUSED(window);
 
-        if (type == wgpu::BackendType::OpenGL || type == wgpu::BackendType::OpenGLES) {
+    if (type == wgpu::BackendType::OpenGL || type == wgpu::BackendType::OpenGLES) {
 #if defined(DAWN_ENABLE_BACKEND_OPENGL)
-            glfwMakeContextCurrent(window);
-            auto getProc = reinterpret_cast<void* (*)(const char*)>(glfwGetProcAddress);
-            if (type == wgpu::BackendType::OpenGL) {
-                dawn::native::opengl::AdapterDiscoveryOptions adapterOptions;
-                adapterOptions.getProc = getProc;
-                instance->DiscoverAdapters(&adapterOptions);
-            } else {
-                dawn::native::opengl::AdapterDiscoveryOptionsES adapterOptions;
-                adapterOptions.getProc = getProc;
-                instance->DiscoverAdapters(&adapterOptions);
-            }
-#endif  // defined(DAWN_ENABLE_BACKEND_OPENGL)
+        glfwMakeContextCurrent(window);
+        auto getProc = reinterpret_cast<void* (*)(const char*)>(glfwGetProcAddress);
+        if (type == wgpu::BackendType::OpenGL) {
+            dawn::native::opengl::AdapterDiscoveryOptions adapterOptions;
+            adapterOptions.getProc = getProc;
+            instance->DiscoverAdapters(&adapterOptions);
         } else {
-            instance->DiscoverDefaultAdapters();
+            dawn::native::opengl::AdapterDiscoveryOptionsES adapterOptions;
+            adapterOptions.getProc = getProc;
+            instance->DiscoverAdapters(&adapterOptions);
         }
+#endif  // defined(DAWN_ENABLE_BACKEND_OPENGL)
+    } else {
+        instance->DiscoverDefaultAdapters();
     }
+}
 
-    BackendBinding* CreateBinding(wgpu::BackendType type, GLFWwindow* window, WGPUDevice device) {
-        switch (type) {
+BackendBinding* CreateBinding(wgpu::BackendType type, GLFWwindow* window, WGPUDevice device) {
+    switch (type) {
 #if defined(DAWN_ENABLE_BACKEND_D3D12)
-            case wgpu::BackendType::D3D12:
-                return CreateD3D12Binding(window, device);
+        case wgpu::BackendType::D3D12:
+            return CreateD3D12Binding(window, device);
 #endif
 
 #if defined(DAWN_ENABLE_BACKEND_METAL)
-            case wgpu::BackendType::Metal:
-                return CreateMetalBinding(window, device);
+        case wgpu::BackendType::Metal:
+            return CreateMetalBinding(window, device);
 #endif
 
 #if defined(DAWN_ENABLE_BACKEND_NULL)
-            case wgpu::BackendType::Null:
-                return CreateNullBinding(window, device);
+        case wgpu::BackendType::Null:
+            return CreateNullBinding(window, device);
 #endif
 
 #if defined(DAWN_ENABLE_BACKEND_DESKTOP_GL)
-            case wgpu::BackendType::OpenGL:
-                return CreateOpenGLBinding(window, device);
+        case wgpu::BackendType::OpenGL:
+            return CreateOpenGLBinding(window, device);
 #endif
 
 #if defined(DAWN_ENABLE_BACKEND_OPENGLES)
-            case wgpu::BackendType::OpenGLES:
-                return CreateOpenGLBinding(window, device);
+        case wgpu::BackendType::OpenGLES:
+            return CreateOpenGLBinding(window, device);
 #endif
 
 #if defined(DAWN_ENABLE_BACKEND_VULKAN)
-            case wgpu::BackendType::Vulkan:
-                return CreateVulkanBinding(window, device);
+        case wgpu::BackendType::Vulkan:
+            return CreateVulkanBinding(window, device);
 #endif
 
-            default:
-                return nullptr;
-        }
+        default:
+            return nullptr;
     }
+}
 
 }  // namespace utils
diff --git a/src/dawn/utils/BackendBinding.h b/src/dawn/utils/BackendBinding.h
index 7c988e7..7871934 100644
--- a/src/dawn/utils/BackendBinding.h
+++ b/src/dawn/utils/BackendBinding.h
@@ -22,24 +22,22 @@
 
 namespace utils {
 
-    class BackendBinding {
-      public:
-        virtual ~BackendBinding() = default;
+class BackendBinding {
+  public:
+    virtual ~BackendBinding() = default;
 
-        virtual uint64_t GetSwapChainImplementation() = 0;
-        virtual WGPUTextureFormat GetPreferredSwapChainTextureFormat() = 0;
+    virtual uint64_t GetSwapChainImplementation() = 0;
+    virtual WGPUTextureFormat GetPreferredSwapChainTextureFormat() = 0;
 
-      protected:
-        BackendBinding(GLFWwindow* window, WGPUDevice device);
+  protected:
+    BackendBinding(GLFWwindow* window, WGPUDevice device);
 
-        GLFWwindow* mWindow = nullptr;
-        WGPUDevice mDevice = nullptr;
-    };
+    GLFWwindow* mWindow = nullptr;
+    WGPUDevice mDevice = nullptr;
+};
 
-    void DiscoverAdapter(dawn::native::Instance* instance,
-                         GLFWwindow* window,
-                         wgpu::BackendType type);
-    BackendBinding* CreateBinding(wgpu::BackendType type, GLFWwindow* window, WGPUDevice device);
+void DiscoverAdapter(dawn::native::Instance* instance, GLFWwindow* window, wgpu::BackendType type);
+BackendBinding* CreateBinding(wgpu::BackendType type, GLFWwindow* window, WGPUDevice device);
 
 }  // namespace utils
 
diff --git a/src/dawn/utils/ComboRenderBundleEncoderDescriptor.cpp b/src/dawn/utils/ComboRenderBundleEncoderDescriptor.cpp
index 9c413d2..4b21e32 100644
--- a/src/dawn/utils/ComboRenderBundleEncoderDescriptor.cpp
+++ b/src/dawn/utils/ComboRenderBundleEncoderDescriptor.cpp
@@ -18,11 +18,11 @@
 
 namespace utils {
 
-    ComboRenderBundleEncoderDescriptor::ComboRenderBundleEncoderDescriptor() {
-        wgpu::RenderBundleEncoderDescriptor* descriptor = this;
+ComboRenderBundleEncoderDescriptor::ComboRenderBundleEncoderDescriptor() {
+    wgpu::RenderBundleEncoderDescriptor* descriptor = this;
 
-        descriptor->colorFormatsCount = 0;
-        descriptor->colorFormats = &cColorFormats[0];
-    }
+    descriptor->colorFormatsCount = 0;
+    descriptor->colorFormats = &cColorFormats[0];
+}
 
 }  // namespace utils
diff --git a/src/dawn/utils/ComboRenderBundleEncoderDescriptor.h b/src/dawn/utils/ComboRenderBundleEncoderDescriptor.h
index d91758b..3f4db0a 100644
--- a/src/dawn/utils/ComboRenderBundleEncoderDescriptor.h
+++ b/src/dawn/utils/ComboRenderBundleEncoderDescriptor.h
@@ -22,12 +22,12 @@
 
 namespace utils {
 
-    class ComboRenderBundleEncoderDescriptor : public wgpu::RenderBundleEncoderDescriptor {
-      public:
-        ComboRenderBundleEncoderDescriptor();
+class ComboRenderBundleEncoderDescriptor : public wgpu::RenderBundleEncoderDescriptor {
+  public:
+    ComboRenderBundleEncoderDescriptor();
 
-        std::array<wgpu::TextureFormat, kMaxColorAttachments> cColorFormats;
-    };
+    std::array<wgpu::TextureFormat, kMaxColorAttachments> cColorFormats;
+};
 
 }  // namespace utils
 
diff --git a/src/dawn/utils/ComboRenderPipelineDescriptor.cpp b/src/dawn/utils/ComboRenderPipelineDescriptor.cpp
index 1114af4..78afd80 100644
--- a/src/dawn/utils/ComboRenderPipelineDescriptor.cpp
+++ b/src/dawn/utils/ComboRenderPipelineDescriptor.cpp
@@ -18,128 +18,128 @@
 
 namespace utils {
 
-    ComboVertexState::ComboVertexState() {
-        vertexBufferCount = 0;
+ComboVertexState::ComboVertexState() {
+    vertexBufferCount = 0;
+
+    // Fill the default values for vertexBuffers and vertexAttributes in buffers.
+    wgpu::VertexAttribute vertexAttribute;
+    vertexAttribute.shaderLocation = 0;
+    vertexAttribute.offset = 0;
+    vertexAttribute.format = wgpu::VertexFormat::Float32;
+    for (uint32_t i = 0; i < kMaxVertexAttributes; ++i) {
+        cAttributes[i] = vertexAttribute;
+    }
+    for (uint32_t i = 0; i < kMaxVertexBuffers; ++i) {
+        cVertexBuffers[i].arrayStride = 0;
+        cVertexBuffers[i].stepMode = wgpu::VertexStepMode::Vertex;
+        cVertexBuffers[i].attributeCount = 0;
+        cVertexBuffers[i].attributes = nullptr;
+    }
+    // cVertexBuffers[i].attributes points to somewhere in cAttributes.
+    // cVertexBuffers[0].attributes points to &cAttributes[0] by default. Assuming
+    // cVertexBuffers[0] has two attributes, then cVertexBuffers[1].attributes should point to
+    // &cAttributes[2]. Likewise, if cVertexBuffers[1] has 3 attributes, then
+    // cVertexBuffers[2].attributes should point to &cAttributes[5].
+    cVertexBuffers[0].attributes = &cAttributes[0];
+}
+
+ComboRenderPipelineDescriptor::ComboRenderPipelineDescriptor() {
+    wgpu::RenderPipelineDescriptor* descriptor = this;
+
+    // Set defaults for the vertex state.
+    {
+        wgpu::VertexState* vertex = &descriptor->vertex;
+        vertex->module = nullptr;
+        vertex->entryPoint = "main";
+        vertex->bufferCount = 0;
 
         // Fill the default values for vertexBuffers and vertexAttributes in buffers.
-        wgpu::VertexAttribute vertexAttribute;
-        vertexAttribute.shaderLocation = 0;
-        vertexAttribute.offset = 0;
-        vertexAttribute.format = wgpu::VertexFormat::Float32;
         for (uint32_t i = 0; i < kMaxVertexAttributes; ++i) {
-            cAttributes[i] = vertexAttribute;
+            cAttributes[i].shaderLocation = 0;
+            cAttributes[i].offset = 0;
+            cAttributes[i].format = wgpu::VertexFormat::Float32;
         }
         for (uint32_t i = 0; i < kMaxVertexBuffers; ++i) {
-            cVertexBuffers[i].arrayStride = 0;
-            cVertexBuffers[i].stepMode = wgpu::VertexStepMode::Vertex;
-            cVertexBuffers[i].attributeCount = 0;
-            cVertexBuffers[i].attributes = nullptr;
+            cBuffers[i].arrayStride = 0;
+            cBuffers[i].stepMode = wgpu::VertexStepMode::Vertex;
+            cBuffers[i].attributeCount = 0;
+            cBuffers[i].attributes = nullptr;
         }
-        // cVertexBuffers[i].attributes points to somewhere in cAttributes.
-        // cVertexBuffers[0].attributes points to &cAttributes[0] by default. Assuming
-        // cVertexBuffers[0] has two attributes, then cVertexBuffers[1].attributes should point to
-        // &cAttributes[2]. Likewise, if cVertexBuffers[1] has 3 attributes, then
-        // cVertexBuffers[2].attributes should point to &cAttributes[5].
-        cVertexBuffers[0].attributes = &cAttributes[0];
+        // cBuffers[i].attributes points to somewhere in cAttributes.
+        // cBuffers[0].attributes points to &cAttributes[0] by default. Assuming
+        // cBuffers[0] has two attributes, then cBuffers[1].attributes should point to
+        // &cAttributes[2]. Likewise, if cBuffers[1] has 3 attributes, then
+        // cBuffers[2].attributes should point to &cAttributes[5].
+        cBuffers[0].attributes = &cAttributes[0];
+        vertex->buffers = &cBuffers[0];
     }
 
-    ComboRenderPipelineDescriptor::ComboRenderPipelineDescriptor() {
-        wgpu::RenderPipelineDescriptor* descriptor = this;
-
-        // Set defaults for the vertex state.
-        {
-            wgpu::VertexState* vertex = &descriptor->vertex;
-            vertex->module = nullptr;
-            vertex->entryPoint = "main";
-            vertex->bufferCount = 0;
-
-            // Fill the default values for vertexBuffers and vertexAttributes in buffers.
-            for (uint32_t i = 0; i < kMaxVertexAttributes; ++i) {
-                cAttributes[i].shaderLocation = 0;
-                cAttributes[i].offset = 0;
-                cAttributes[i].format = wgpu::VertexFormat::Float32;
-            }
-            for (uint32_t i = 0; i < kMaxVertexBuffers; ++i) {
-                cBuffers[i].arrayStride = 0;
-                cBuffers[i].stepMode = wgpu::VertexStepMode::Vertex;
-                cBuffers[i].attributeCount = 0;
-                cBuffers[i].attributes = nullptr;
-            }
-            // cBuffers[i].attributes points to somewhere in cAttributes.
-            // cBuffers[0].attributes points to &cAttributes[0] by default. Assuming
-            // cBuffers[0] has two attributes, then cBuffers[1].attributes should point to
-            // &cAttributes[2]. Likewise, if cBuffers[1] has 3 attributes, then
-            // cBuffers[2].attributes should point to &cAttributes[5].
-            cBuffers[0].attributes = &cAttributes[0];
-            vertex->buffers = &cBuffers[0];
-        }
-
-        // Set the defaults for the primitive state
-        {
-            wgpu::PrimitiveState* primitive = &descriptor->primitive;
-            primitive->topology = wgpu::PrimitiveTopology::TriangleList;
-            primitive->stripIndexFormat = wgpu::IndexFormat::Undefined;
-            primitive->frontFace = wgpu::FrontFace::CCW;
-            primitive->cullMode = wgpu::CullMode::None;
-        }
-
-        // Set the defaults for the depth-stencil state
-        {
-            wgpu::StencilFaceState stencilFace;
-            stencilFace.compare = wgpu::CompareFunction::Always;
-            stencilFace.failOp = wgpu::StencilOperation::Keep;
-            stencilFace.depthFailOp = wgpu::StencilOperation::Keep;
-            stencilFace.passOp = wgpu::StencilOperation::Keep;
-
-            cDepthStencil.format = wgpu::TextureFormat::Depth24PlusStencil8;
-            cDepthStencil.depthWriteEnabled = false;
-            cDepthStencil.depthCompare = wgpu::CompareFunction::Always;
-            cDepthStencil.stencilBack = stencilFace;
-            cDepthStencil.stencilFront = stencilFace;
-            cDepthStencil.stencilReadMask = 0xff;
-            cDepthStencil.stencilWriteMask = 0xff;
-            cDepthStencil.depthBias = 0;
-            cDepthStencil.depthBiasSlopeScale = 0.0;
-            cDepthStencil.depthBiasClamp = 0.0;
-        }
-
-        // Set the defaults for the multisample state
-        {
-            wgpu::MultisampleState* multisample = &descriptor->multisample;
-            multisample->count = 1;
-            multisample->mask = 0xFFFFFFFF;
-            multisample->alphaToCoverageEnabled = false;
-        }
-
-        // Set the defaults for the fragment state
-        {
-            cFragment.module = nullptr;
-            cFragment.entryPoint = "main";
-            cFragment.targetCount = 1;
-            cFragment.targets = &cTargets[0];
-            descriptor->fragment = &cFragment;
-
-            wgpu::BlendComponent blendComponent;
-            blendComponent.srcFactor = wgpu::BlendFactor::One;
-            blendComponent.dstFactor = wgpu::BlendFactor::Zero;
-            blendComponent.operation = wgpu::BlendOperation::Add;
-
-            for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
-                cTargets[i].format = wgpu::TextureFormat::RGBA8Unorm;
-                cTargets[i].blend = nullptr;
-                cTargets[i].writeMask = wgpu::ColorWriteMask::All;
-
-                cBlends[i].color = blendComponent;
-                cBlends[i].alpha = blendComponent;
-            }
-        }
+    // Set the defaults for the primitive state
+    {
+        wgpu::PrimitiveState* primitive = &descriptor->primitive;
+        primitive->topology = wgpu::PrimitiveTopology::TriangleList;
+        primitive->stripIndexFormat = wgpu::IndexFormat::Undefined;
+        primitive->frontFace = wgpu::FrontFace::CCW;
+        primitive->cullMode = wgpu::CullMode::None;
     }
 
-    wgpu::DepthStencilState* ComboRenderPipelineDescriptor::EnableDepthStencil(
-        wgpu::TextureFormat format) {
-        this->depthStencil = &cDepthStencil;
-        cDepthStencil.format = format;
-        return &cDepthStencil;
+    // Set the defaults for the depth-stencil state
+    {
+        wgpu::StencilFaceState stencilFace;
+        stencilFace.compare = wgpu::CompareFunction::Always;
+        stencilFace.failOp = wgpu::StencilOperation::Keep;
+        stencilFace.depthFailOp = wgpu::StencilOperation::Keep;
+        stencilFace.passOp = wgpu::StencilOperation::Keep;
+
+        cDepthStencil.format = wgpu::TextureFormat::Depth24PlusStencil8;
+        cDepthStencil.depthWriteEnabled = false;
+        cDepthStencil.depthCompare = wgpu::CompareFunction::Always;
+        cDepthStencil.stencilBack = stencilFace;
+        cDepthStencil.stencilFront = stencilFace;
+        cDepthStencil.stencilReadMask = 0xff;
+        cDepthStencil.stencilWriteMask = 0xff;
+        cDepthStencil.depthBias = 0;
+        cDepthStencil.depthBiasSlopeScale = 0.0;
+        cDepthStencil.depthBiasClamp = 0.0;
     }
 
+    // Set the defaults for the multisample state
+    {
+        wgpu::MultisampleState* multisample = &descriptor->multisample;
+        multisample->count = 1;
+        multisample->mask = 0xFFFFFFFF;
+        multisample->alphaToCoverageEnabled = false;
+    }
+
+    // Set the defaults for the fragment state
+    {
+        cFragment.module = nullptr;
+        cFragment.entryPoint = "main";
+        cFragment.targetCount = 1;
+        cFragment.targets = &cTargets[0];
+        descriptor->fragment = &cFragment;
+
+        wgpu::BlendComponent blendComponent;
+        blendComponent.srcFactor = wgpu::BlendFactor::One;
+        blendComponent.dstFactor = wgpu::BlendFactor::Zero;
+        blendComponent.operation = wgpu::BlendOperation::Add;
+
+        for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
+            cTargets[i].format = wgpu::TextureFormat::RGBA8Unorm;
+            cTargets[i].blend = nullptr;
+            cTargets[i].writeMask = wgpu::ColorWriteMask::All;
+
+            cBlends[i].color = blendComponent;
+            cBlends[i].alpha = blendComponent;
+        }
+    }
+}
+
+wgpu::DepthStencilState* ComboRenderPipelineDescriptor::EnableDepthStencil(
+    wgpu::TextureFormat format) {
+    this->depthStencil = &cDepthStencil;
+    cDepthStencil.format = format;
+    return &cDepthStencil;
+}
+
 }  // namespace utils
diff --git a/src/dawn/utils/ComboRenderPipelineDescriptor.h b/src/dawn/utils/ComboRenderPipelineDescriptor.h
index 71ae690..b1e5c2a 100644
--- a/src/dawn/utils/ComboRenderPipelineDescriptor.h
+++ b/src/dawn/utils/ComboRenderPipelineDescriptor.h
@@ -22,41 +22,41 @@
 
 namespace utils {
 
-    // Primarily used by tests to easily set up the vertex buffer state portion of a RenderPipeline.
-    class ComboVertexState {
-      public:
-        ComboVertexState();
+// Primarily used by tests to easily set up the vertex buffer state portion of a RenderPipeline.
+class ComboVertexState {
+  public:
+    ComboVertexState();
 
-        ComboVertexState(const ComboVertexState&) = delete;
-        ComboVertexState& operator=(const ComboVertexState&) = delete;
-        ComboVertexState(ComboVertexState&&) = delete;
-        ComboVertexState& operator=(ComboVertexState&&) = delete;
+    ComboVertexState(const ComboVertexState&) = delete;
+    ComboVertexState& operator=(const ComboVertexState&) = delete;
+    ComboVertexState(ComboVertexState&&) = delete;
+    ComboVertexState& operator=(ComboVertexState&&) = delete;
 
-        uint32_t vertexBufferCount;
-        std::array<wgpu::VertexBufferLayout, kMaxVertexBuffers> cVertexBuffers;
-        std::array<wgpu::VertexAttribute, kMaxVertexAttributes> cAttributes;
-    };
+    uint32_t vertexBufferCount;
+    std::array<wgpu::VertexBufferLayout, kMaxVertexBuffers> cVertexBuffers;
+    std::array<wgpu::VertexAttribute, kMaxVertexAttributes> cAttributes;
+};
 
-    class ComboRenderPipelineDescriptor : public wgpu::RenderPipelineDescriptor {
-      public:
-        ComboRenderPipelineDescriptor();
+class ComboRenderPipelineDescriptor : public wgpu::RenderPipelineDescriptor {
+  public:
+    ComboRenderPipelineDescriptor();
 
-        ComboRenderPipelineDescriptor(const ComboRenderPipelineDescriptor&) = delete;
-        ComboRenderPipelineDescriptor& operator=(const ComboRenderPipelineDescriptor&) = delete;
-        ComboRenderPipelineDescriptor(ComboRenderPipelineDescriptor&&) = delete;
-        ComboRenderPipelineDescriptor& operator=(ComboRenderPipelineDescriptor&&) = delete;
+    ComboRenderPipelineDescriptor(const ComboRenderPipelineDescriptor&) = delete;
+    ComboRenderPipelineDescriptor& operator=(const ComboRenderPipelineDescriptor&) = delete;
+    ComboRenderPipelineDescriptor(ComboRenderPipelineDescriptor&&) = delete;
+    ComboRenderPipelineDescriptor& operator=(ComboRenderPipelineDescriptor&&) = delete;
 
-        wgpu::DepthStencilState* EnableDepthStencil(
-            wgpu::TextureFormat format = wgpu::TextureFormat::Depth24PlusStencil8);
+    wgpu::DepthStencilState* EnableDepthStencil(
+        wgpu::TextureFormat format = wgpu::TextureFormat::Depth24PlusStencil8);
 
-        std::array<wgpu::VertexBufferLayout, kMaxVertexBuffers> cBuffers;
-        std::array<wgpu::VertexAttribute, kMaxVertexAttributes> cAttributes;
-        std::array<wgpu::ColorTargetState, kMaxColorAttachments> cTargets;
-        std::array<wgpu::BlendState, kMaxColorAttachments> cBlends;
+    std::array<wgpu::VertexBufferLayout, kMaxVertexBuffers> cBuffers;
+    std::array<wgpu::VertexAttribute, kMaxVertexAttributes> cAttributes;
+    std::array<wgpu::ColorTargetState, kMaxColorAttachments> cTargets;
+    std::array<wgpu::BlendState, kMaxColorAttachments> cBlends;
 
-        wgpu::FragmentState cFragment;
-        wgpu::DepthStencilState cDepthStencil;
-    };
+    wgpu::FragmentState cFragment;
+    wgpu::DepthStencilState cDepthStencil;
+};
 
 }  // namespace utils
 
diff --git a/src/dawn/utils/D3D12Binding.cpp b/src/dawn/utils/D3D12Binding.cpp
index cd7f278..bdf44fe 100644
--- a/src/dawn/utils/D3D12Binding.cpp
+++ b/src/dawn/utils/D3D12Binding.cpp
@@ -25,31 +25,29 @@
 
 namespace utils {
 
-    class D3D12Binding : public BackendBinding {
-      public:
-        D3D12Binding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
+class D3D12Binding : public BackendBinding {
+  public:
+    D3D12Binding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {}
+
+    uint64_t GetSwapChainImplementation() override {
+        if (mSwapchainImpl.userData == nullptr) {
+            HWND win32Window = glfwGetWin32Window(mWindow);
+            mSwapchainImpl = dawn::native::d3d12::CreateNativeSwapChainImpl(mDevice, win32Window);
         }
-
-        uint64_t GetSwapChainImplementation() override {
-            if (mSwapchainImpl.userData == nullptr) {
-                HWND win32Window = glfwGetWin32Window(mWindow);
-                mSwapchainImpl =
-                    dawn::native::d3d12::CreateNativeSwapChainImpl(mDevice, win32Window);
-            }
-            return reinterpret_cast<uint64_t>(&mSwapchainImpl);
-        }
-
-        WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
-            ASSERT(mSwapchainImpl.userData != nullptr);
-            return dawn::native::d3d12::GetNativeSwapChainPreferredFormat(&mSwapchainImpl);
-        }
-
-      private:
-        DawnSwapChainImplementation mSwapchainImpl = {};
-    };
-
-    BackendBinding* CreateD3D12Binding(GLFWwindow* window, WGPUDevice device) {
-        return new D3D12Binding(window, device);
+        return reinterpret_cast<uint64_t>(&mSwapchainImpl);
     }
 
+    WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
+        ASSERT(mSwapchainImpl.userData != nullptr);
+        return dawn::native::d3d12::GetNativeSwapChainPreferredFormat(&mSwapchainImpl);
+    }
+
+  private:
+    DawnSwapChainImplementation mSwapchainImpl = {};
+};
+
+BackendBinding* CreateD3D12Binding(GLFWwindow* window, WGPUDevice device) {
+    return new D3D12Binding(window, device);
+}
+
 }  // namespace utils
diff --git a/src/dawn/utils/EmptyDebugLogger.cpp b/src/dawn/utils/EmptyDebugLogger.cpp
index b52b38f..a8b2d24 100644
--- a/src/dawn/utils/EmptyDebugLogger.cpp
+++ b/src/dawn/utils/EmptyDebugLogger.cpp
@@ -16,14 +16,14 @@
 
 namespace utils {
 
-    class EmptyDebugLogger : public PlatformDebugLogger {
-      public:
-        EmptyDebugLogger() = default;
-        ~EmptyDebugLogger() override = default;
-    };
+class EmptyDebugLogger : public PlatformDebugLogger {
+  public:
+    EmptyDebugLogger() = default;
+    ~EmptyDebugLogger() override = default;
+};
 
-    PlatformDebugLogger* CreatePlatformDebugLogger() {
-        return new EmptyDebugLogger();
-    }
+PlatformDebugLogger* CreatePlatformDebugLogger() {
+    return new EmptyDebugLogger();
+}
 
 }  // namespace utils
diff --git a/src/dawn/utils/GLFWUtils.cpp b/src/dawn/utils/GLFWUtils.cpp
index 04cd8e1..5a1b4ba 100644
--- a/src/dawn/utils/GLFWUtils.cpp
+++ b/src/dawn/utils/GLFWUtils.cpp
@@ -15,72 +15,72 @@
 #include <cstdlib>
 #include <utility>
 
-#include "dawn/utils/GLFWUtils.h"
 #include "GLFW/glfw3.h"
 #include "dawn/common/Platform.h"
+#include "dawn/utils/GLFWUtils.h"
 
 #if defined(DAWN_PLATFORM_WINDOWS)
-#    define GLFW_EXPOSE_NATIVE_WIN32
+#define GLFW_EXPOSE_NATIVE_WIN32
 #elif defined(DAWN_USE_X11)
-#    define GLFW_EXPOSE_NATIVE_X11
+#define GLFW_EXPOSE_NATIVE_X11
 #endif
 #include "GLFW/glfw3native.h"
 
 namespace utils {
 
-    void SetupGLFWWindowHintsForBackend(wgpu::BackendType type) {
-        if (type == wgpu::BackendType::OpenGL) {
-            // Ask for OpenGL 4.4 which is what the GL backend requires for compute shaders and
-            // texture views.
-            glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
-            glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 4);
-            glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GLFW_TRUE);
-            glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
-        } else if (type == wgpu::BackendType::OpenGLES) {
-            glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
-            glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 1);
-            glfwWindowHint(GLFW_CLIENT_API, GLFW_OPENGL_ES_API);
-            glfwWindowHint(GLFW_CONTEXT_CREATION_API, GLFW_EGL_CONTEXT_API);
-        } else {
-            // Without this GLFW will initialize a GL context on the window, which prevents using
-            // the window with other APIs (by crashing in weird ways).
-            glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
-        }
+void SetupGLFWWindowHintsForBackend(wgpu::BackendType type) {
+    if (type == wgpu::BackendType::OpenGL) {
+        // Ask for OpenGL 4.4 which is what the GL backend requires for compute shaders and
+        // texture views.
+        glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
+        glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 4);
+        glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GLFW_TRUE);
+        glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
+    } else if (type == wgpu::BackendType::OpenGLES) {
+        glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
+        glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 1);
+        glfwWindowHint(GLFW_CLIENT_API, GLFW_OPENGL_ES_API);
+        glfwWindowHint(GLFW_CONTEXT_CREATION_API, GLFW_EGL_CONTEXT_API);
+    } else {
+        // Without this GLFW will initialize a GL context on the window, which prevents using
+        // the window with other APIs (by crashing in weird ways).
+        glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
     }
+}
 
-    wgpu::Surface CreateSurfaceForWindow(const wgpu::Instance& instance, GLFWwindow* window) {
-        std::unique_ptr<wgpu::ChainedStruct> chainedDescriptor =
-            SetupWindowAndGetSurfaceDescriptor(window);
+wgpu::Surface CreateSurfaceForWindow(const wgpu::Instance& instance, GLFWwindow* window) {
+    std::unique_ptr<wgpu::ChainedStruct> chainedDescriptor =
+        SetupWindowAndGetSurfaceDescriptor(window);
 
-        wgpu::SurfaceDescriptor descriptor;
-        descriptor.nextInChain = chainedDescriptor.get();
-        wgpu::Surface surface = instance.CreateSurface(&descriptor);
+    wgpu::SurfaceDescriptor descriptor;
+    descriptor.nextInChain = chainedDescriptor.get();
+    wgpu::Surface surface = instance.CreateSurface(&descriptor);
 
-        return surface;
-    }
+    return surface;
+}
 
 #if defined(DAWN_PLATFORM_WINDOWS)
-    std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptor(GLFWwindow* window) {
-        std::unique_ptr<wgpu::SurfaceDescriptorFromWindowsHWND> desc =
-            std::make_unique<wgpu::SurfaceDescriptorFromWindowsHWND>();
-        desc->hwnd = glfwGetWin32Window(window);
-        desc->hinstance = GetModuleHandle(nullptr);
-        return std::move(desc);
-    }
+std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptor(GLFWwindow* window) {
+    std::unique_ptr<wgpu::SurfaceDescriptorFromWindowsHWND> desc =
+        std::make_unique<wgpu::SurfaceDescriptorFromWindowsHWND>();
+    desc->hwnd = glfwGetWin32Window(window);
+    desc->hinstance = GetModuleHandle(nullptr);
+    return std::move(desc);
+}
 #elif defined(DAWN_USE_X11)
-    std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptor(GLFWwindow* window) {
-        std::unique_ptr<wgpu::SurfaceDescriptorFromXlibWindow> desc =
-            std::make_unique<wgpu::SurfaceDescriptorFromXlibWindow>();
-        desc->display = glfwGetX11Display();
-        desc->window = glfwGetX11Window(window);
-        return std::move(desc);
-    }
+std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptor(GLFWwindow* window) {
+    std::unique_ptr<wgpu::SurfaceDescriptorFromXlibWindow> desc =
+        std::make_unique<wgpu::SurfaceDescriptorFromXlibWindow>();
+    desc->display = glfwGetX11Display();
+    desc->window = glfwGetX11Window(window);
+    return std::move(desc);
+}
 #elif defined(DAWN_ENABLE_BACKEND_METAL)
-    // SetupWindowAndGetSurfaceDescriptor defined in GLFWUtils_metal.mm
+// SetupWindowAndGetSurfaceDescriptor defined in GLFWUtils_metal.mm
 #else
-    std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptor(GLFWwindow*) {
-        return nullptr;
-    }
+std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptor(GLFWwindow*) {
+    return nullptr;
+}
 #endif
 
 }  // namespace utils
diff --git a/src/dawn/utils/GLFWUtils.h b/src/dawn/utils/GLFWUtils.h
index 7f796c9..2fe5b10 100644
--- a/src/dawn/utils/GLFWUtils.h
+++ b/src/dawn/utils/GLFWUtils.h
@@ -23,19 +23,19 @@
 
 namespace utils {
 
-    // Adds all the necessary glfwWindowHint calls for the next GLFWwindow created to be used with
-    // the specified backend.
-    void SetupGLFWWindowHintsForBackend(wgpu::BackendType type);
+// Adds all the necessary glfwWindowHint calls for the next GLFWwindow created to be used with
+// the specified backend.
+void SetupGLFWWindowHintsForBackend(wgpu::BackendType type);
 
-    // Does the necessary setup on the GLFWwindow to allow creating a wgpu::Surface with it and
-    // calls `instance.CreateSurface` with the correct descriptor for this window.
-    // Returns a null wgpu::Surface on failure.
-    wgpu::Surface CreateSurfaceForWindow(const wgpu::Instance& instance, GLFWwindow* window);
+// Does the necessary setup on the GLFWwindow to allow creating a wgpu::Surface with it and
+// calls `instance.CreateSurface` with the correct descriptor for this window.
+// Returns a null wgpu::Surface on failure.
+wgpu::Surface CreateSurfaceForWindow(const wgpu::Instance& instance, GLFWwindow* window);
 
-    // Use for testing only. Does everything that CreateSurfaceForWindow does except the call to
-    // CreateSurface. Useful to be able to modify the descriptor for testing, or when trying to
-    // avoid using the global proc table.
-    std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptor(GLFWwindow* window);
+// Use for testing only. Does everything that CreateSurfaceForWindow does except the call to
+// CreateSurface. Useful to be able to modify the descriptor for testing, or when trying to
+// avoid using the global proc table.
+std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptor(GLFWwindow* window);
 
 }  // namespace utils
 
diff --git a/src/dawn/utils/GLFWUtils_metal.mm b/src/dawn/utils/GLFWUtils_metal.mm
index 3249887..0762a3d 100644
--- a/src/dawn/utils/GLFWUtils_metal.mm
+++ b/src/dawn/utils/GLFWUtils_metal.mm
@@ -13,7 +13,7 @@
 // limitations under the License.
 
 #if !defined(DAWN_ENABLE_BACKEND_METAL)
-#    error "GLFWUtils_metal.mm requires the Metal backend to be enabled."
+#error "GLFWUtils_metal.mm requires the Metal backend to be enabled."
 #endif  // !defined(DAWN_ENABLE_BACKEND_METAL)
 
 #include "dawn/utils/GLFWUtils.h"
@@ -28,26 +28,26 @@
 
 namespace utils {
 
-    std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptor(GLFWwindow* window) {
-        if (@available(macOS 10.11, *)) {
-            NSWindow* nsWindow = glfwGetCocoaWindow(window);
-            NSView* view = [nsWindow contentView];
+std::unique_ptr<wgpu::ChainedStruct> SetupWindowAndGetSurfaceDescriptor(GLFWwindow* window) {
+    if (@available(macOS 10.11, *)) {
+        NSWindow* nsWindow = glfwGetCocoaWindow(window);
+        NSView* view = [nsWindow contentView];
 
-            // Create a CAMetalLayer that covers the whole window that will be passed to
-            // CreateSurface.
-            [view setWantsLayer:YES];
-            [view setLayer:[CAMetalLayer layer]];
+        // Create a CAMetalLayer that covers the whole window that will be passed to
+        // CreateSurface.
+        [view setWantsLayer:YES];
+        [view setLayer:[CAMetalLayer layer]];
 
-            // Use retina if the window was created with retina support.
-            [[view layer] setContentsScale:[nsWindow backingScaleFactor]];
+        // Use retina if the window was created with retina support.
+        [[view layer] setContentsScale:[nsWindow backingScaleFactor]];
 
-            std::unique_ptr<wgpu::SurfaceDescriptorFromMetalLayer> desc =
-                std::make_unique<wgpu::SurfaceDescriptorFromMetalLayer>();
-            desc->layer = [view layer];
-            return std::move(desc);
-        }
-
-        return nullptr;
+        std::unique_ptr<wgpu::SurfaceDescriptorFromMetalLayer> desc =
+            std::make_unique<wgpu::SurfaceDescriptorFromMetalLayer>();
+        desc->layer = [view layer];
+        return std::move(desc);
     }
 
+    return nullptr;
+}
+
 }  // namespace utils
diff --git a/src/dawn/utils/Glfw3Fuchsia.cpp b/src/dawn/utils/Glfw3Fuchsia.cpp
index 6fb13cc..198a5fd 100644
--- a/src/dawn/utils/Glfw3Fuchsia.cpp
+++ b/src/dawn/utils/Glfw3Fuchsia.cpp
@@ -31,8 +31,7 @@
     return GLFW_TRUE;
 }
 
-void glfwDefaultWindowHints(void) {
-}
+void glfwDefaultWindowHints(void) {}
 
 void glfwWindowHint(int hint, int value) {
     DAWN_UNUSED(hint);
diff --git a/src/dawn/utils/MetalBinding.mm b/src/dawn/utils/MetalBinding.mm
index b35245c..ef34361 100644
--- a/src/dawn/utils/MetalBinding.mm
+++ b/src/dawn/utils/MetalBinding.mm
@@ -25,111 +25,109 @@
 #import <QuartzCore/CAMetalLayer.h>
 
 namespace utils {
-    class SwapChainImplMTL {
-      public:
-        using WSIContext = DawnWSIContextMetal;
+class SwapChainImplMTL {
+  public:
+    using WSIContext = DawnWSIContextMetal;
 
-        SwapChainImplMTL(id nsWindow) : mNsWindow(nsWindow) {
-        }
+    SwapChainImplMTL(id nsWindow) : mNsWindow(nsWindow) {}
 
-        ~SwapChainImplMTL() {
-            [mCurrentTexture release];
-            [mCurrentDrawable release];
-        }
-
-        void Init(DawnWSIContextMetal* ctx) {
-            mMtlDevice = ctx->device;
-            mCommandQueue = ctx->queue;
-        }
-
-        DawnSwapChainError Configure(WGPUTextureFormat format,
-                                     WGPUTextureUsage usage,
-                                     uint32_t width,
-                                     uint32_t height) {
-            if (format != WGPUTextureFormat_BGRA8Unorm) {
-                return "unsupported format";
-            }
-            ASSERT(width > 0);
-            ASSERT(height > 0);
-
-            NSView* contentView = [mNsWindow contentView];
-            [contentView setWantsLayer:YES];
-
-            CGSize size = {};
-            size.width = width;
-            size.height = height;
-
-            mLayer = [CAMetalLayer layer];
-            [mLayer setDevice:mMtlDevice];
-            [mLayer setPixelFormat:MTLPixelFormatBGRA8Unorm];
-            [mLayer setDrawableSize:size];
-
-            constexpr uint32_t kFramebufferOnlyTextureUsages =
-                WGPUTextureUsage_RenderAttachment | WGPUTextureUsage_Present;
-            bool hasOnlyFramebufferUsages = !(usage & (~kFramebufferOnlyTextureUsages));
-            if (hasOnlyFramebufferUsages) {
-                [mLayer setFramebufferOnly:YES];
-            }
-
-            [contentView setLayer:mLayer];
-
-            return DAWN_SWAP_CHAIN_NO_ERROR;
-        }
-
-        DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
-            [mCurrentDrawable release];
-            mCurrentDrawable = [mLayer nextDrawable];
-            [mCurrentDrawable retain];
-
-            [mCurrentTexture release];
-            mCurrentTexture = mCurrentDrawable.texture;
-            [mCurrentTexture retain];
-
-            nextTexture->texture.ptr = reinterpret_cast<void*>(mCurrentTexture);
-
-            return DAWN_SWAP_CHAIN_NO_ERROR;
-        }
-
-        DawnSwapChainError Present() {
-            id<MTLCommandBuffer> commandBuffer = [mCommandQueue commandBuffer];
-            [commandBuffer presentDrawable:mCurrentDrawable];
-            [commandBuffer commit];
-
-            return DAWN_SWAP_CHAIN_NO_ERROR;
-        }
-
-      private:
-        id mNsWindow = nil;
-        id<MTLDevice> mMtlDevice = nil;
-        id<MTLCommandQueue> mCommandQueue = nil;
-
-        CAMetalLayer* mLayer = nullptr;
-        id<CAMetalDrawable> mCurrentDrawable = nil;
-        id<MTLTexture> mCurrentTexture = nil;
-    };
-
-    class MetalBinding : public BackendBinding {
-      public:
-        MetalBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
-        }
-
-        uint64_t GetSwapChainImplementation() override {
-            if (mSwapchainImpl.userData == nullptr) {
-                mSwapchainImpl = CreateSwapChainImplementation(
-                    new SwapChainImplMTL(glfwGetCocoaWindow(mWindow)));
-            }
-            return reinterpret_cast<uint64_t>(&mSwapchainImpl);
-        }
-
-        WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
-            return WGPUTextureFormat_BGRA8Unorm;
-        }
-
-      private:
-        DawnSwapChainImplementation mSwapchainImpl = {};
-    };
-
-    BackendBinding* CreateMetalBinding(GLFWwindow* window, WGPUDevice device) {
-        return new MetalBinding(window, device);
+    ~SwapChainImplMTL() {
+        [mCurrentTexture release];
+        [mCurrentDrawable release];
     }
+
+    void Init(DawnWSIContextMetal* ctx) {
+        mMtlDevice = ctx->device;
+        mCommandQueue = ctx->queue;
+    }
+
+    DawnSwapChainError Configure(WGPUTextureFormat format,
+                                 WGPUTextureUsage usage,
+                                 uint32_t width,
+                                 uint32_t height) {
+        if (format != WGPUTextureFormat_BGRA8Unorm) {
+            return "unsupported format";
+        }
+        ASSERT(width > 0);
+        ASSERT(height > 0);
+
+        NSView* contentView = [mNsWindow contentView];
+        [contentView setWantsLayer:YES];
+
+        CGSize size = {};
+        size.width = width;
+        size.height = height;
+
+        mLayer = [CAMetalLayer layer];
+        [mLayer setDevice:mMtlDevice];
+        [mLayer setPixelFormat:MTLPixelFormatBGRA8Unorm];
+        [mLayer setDrawableSize:size];
+
+        constexpr uint32_t kFramebufferOnlyTextureUsages =
+            WGPUTextureUsage_RenderAttachment | WGPUTextureUsage_Present;
+        bool hasOnlyFramebufferUsages = !(usage & (~kFramebufferOnlyTextureUsages));
+        if (hasOnlyFramebufferUsages) {
+            [mLayer setFramebufferOnly:YES];
+        }
+
+        [contentView setLayer:mLayer];
+
+        return DAWN_SWAP_CHAIN_NO_ERROR;
+    }
+
+    DawnSwapChainError GetNextTexture(DawnSwapChainNextTexture* nextTexture) {
+        [mCurrentDrawable release];
+        mCurrentDrawable = [mLayer nextDrawable];
+        [mCurrentDrawable retain];
+
+        [mCurrentTexture release];
+        mCurrentTexture = mCurrentDrawable.texture;
+        [mCurrentTexture retain];
+
+        nextTexture->texture.ptr = reinterpret_cast<void*>(mCurrentTexture);
+
+        return DAWN_SWAP_CHAIN_NO_ERROR;
+    }
+
+    DawnSwapChainError Present() {
+        id<MTLCommandBuffer> commandBuffer = [mCommandQueue commandBuffer];
+        [commandBuffer presentDrawable:mCurrentDrawable];
+        [commandBuffer commit];
+
+        return DAWN_SWAP_CHAIN_NO_ERROR;
+    }
+
+  private:
+    id mNsWindow = nil;
+    id<MTLDevice> mMtlDevice = nil;
+    id<MTLCommandQueue> mCommandQueue = nil;
+
+    CAMetalLayer* mLayer = nullptr;
+    id<CAMetalDrawable> mCurrentDrawable = nil;
+    id<MTLTexture> mCurrentTexture = nil;
+};
+
+class MetalBinding : public BackendBinding {
+  public:
+    MetalBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {}
+
+    uint64_t GetSwapChainImplementation() override {
+        if (mSwapchainImpl.userData == nullptr) {
+            mSwapchainImpl =
+                CreateSwapChainImplementation(new SwapChainImplMTL(glfwGetCocoaWindow(mWindow)));
+        }
+        return reinterpret_cast<uint64_t>(&mSwapchainImpl);
+    }
+
+    WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
+        return WGPUTextureFormat_BGRA8Unorm;
+    }
+
+  private:
+    DawnSwapChainImplementation mSwapchainImpl = {};
+};
+
+BackendBinding* CreateMetalBinding(GLFWwindow* window, WGPUDevice device) {
+    return new MetalBinding(window, device);
 }
+}  // namespace utils
diff --git a/src/dawn/utils/NullBinding.cpp b/src/dawn/utils/NullBinding.cpp
index f3f1728..cddc0e6 100644
--- a/src/dawn/utils/NullBinding.cpp
+++ b/src/dawn/utils/NullBinding.cpp
@@ -21,27 +21,26 @@
 
 namespace utils {
 
-    class NullBinding : public BackendBinding {
-      public:
-        NullBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
-        }
+class NullBinding : public BackendBinding {
+  public:
+    NullBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {}
 
-        uint64_t GetSwapChainImplementation() override {
-            if (mSwapchainImpl.userData == nullptr) {
-                mSwapchainImpl = dawn::native::null::CreateNativeSwapChainImpl();
-            }
-            return reinterpret_cast<uint64_t>(&mSwapchainImpl);
+    uint64_t GetSwapChainImplementation() override {
+        if (mSwapchainImpl.userData == nullptr) {
+            mSwapchainImpl = dawn::native::null::CreateNativeSwapChainImpl();
         }
-        WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
-            return WGPUTextureFormat_RGBA8Unorm;
-        }
-
-      private:
-        DawnSwapChainImplementation mSwapchainImpl = {};
-    };
-
-    BackendBinding* CreateNullBinding(GLFWwindow* window, WGPUDevice device) {
-        return new NullBinding(window, device);
+        return reinterpret_cast<uint64_t>(&mSwapchainImpl);
     }
+    WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
+        return WGPUTextureFormat_RGBA8Unorm;
+    }
+
+  private:
+    DawnSwapChainImplementation mSwapchainImpl = {};
+};
+
+BackendBinding* CreateNullBinding(GLFWwindow* window, WGPUDevice device) {
+    return new NullBinding(window, device);
+}
 
 }  // namespace utils
diff --git a/src/dawn/utils/OSXTimer.cpp b/src/dawn/utils/OSXTimer.cpp
index 28ff376..ac749b1 100644
--- a/src/dawn/utils/OSXTimer.cpp
+++ b/src/dawn/utils/OSXTimer.cpp
@@ -20,58 +20,55 @@
 
 namespace utils {
 
-    class OSXTimer : public Timer {
-      public:
-        OSXTimer() : Timer(), mRunning(false), mSecondCoeff(0) {
-        }
+class OSXTimer : public Timer {
+  public:
+    OSXTimer() : Timer(), mRunning(false), mSecondCoeff(0) {}
 
-        ~OSXTimer() override = default;
+    ~OSXTimer() override = default;
 
-        void Start() override {
-            mStartTime = mach_absolute_time();
-            // Cache secondCoeff
-            GetSecondCoeff();
-            mRunning = true;
-        }
-
-        void Stop() override {
-            mStopTime = mach_absolute_time();
-            mRunning = false;
-        }
-
-        double GetElapsedTime() const override {
-            if (mRunning) {
-                return mSecondCoeff * (mach_absolute_time() - mStartTime);
-            } else {
-                return mSecondCoeff * (mStopTime - mStartTime);
-            }
-        }
-
-        double GetAbsoluteTime() override {
-            return GetSecondCoeff() * mach_absolute_time();
-        }
-
-      private:
-        double GetSecondCoeff() {
-            // If this is the first time we've run, get the timebase.
-            if (mSecondCoeff == 0.0) {
-                mach_timebase_info_data_t timebaseInfo;
-                mach_timebase_info(&timebaseInfo);
-
-                mSecondCoeff = timebaseInfo.numer * (1.0 / 1000000000) / timebaseInfo.denom;
-            }
-
-            return mSecondCoeff;
-        }
-
-        bool mRunning;
-        uint64_t mStartTime;
-        uint64_t mStopTime;
-        double mSecondCoeff;
-    };
-
-    Timer* CreateTimer() {
-        return new OSXTimer();
+    void Start() override {
+        mStartTime = mach_absolute_time();
+        // Cache secondCoeff
+        GetSecondCoeff();
+        mRunning = true;
     }
 
+    void Stop() override {
+        mStopTime = mach_absolute_time();
+        mRunning = false;
+    }
+
+    double GetElapsedTime() const override {
+        if (mRunning) {
+            return mSecondCoeff * (mach_absolute_time() - mStartTime);
+        } else {
+            return mSecondCoeff * (mStopTime - mStartTime);
+        }
+    }
+
+    double GetAbsoluteTime() override { return GetSecondCoeff() * mach_absolute_time(); }
+
+  private:
+    double GetSecondCoeff() {
+        // If this is the first time we've run, get the timebase.
+        if (mSecondCoeff == 0.0) {
+            mach_timebase_info_data_t timebaseInfo;
+            mach_timebase_info(&timebaseInfo);
+
+            mSecondCoeff = timebaseInfo.numer * (1.0 / 1000000000) / timebaseInfo.denom;
+        }
+
+        return mSecondCoeff;
+    }
+
+    bool mRunning;
+    uint64_t mStartTime;
+    uint64_t mStopTime;
+    double mSecondCoeff;
+};
+
+Timer* CreateTimer() {
+    return new OSXTimer();
+}
+
 }  // namespace utils
diff --git a/src/dawn/utils/ObjCUtils.h b/src/dawn/utils/ObjCUtils.h
index 28aa1bd..8631819 100644
--- a/src/dawn/utils/ObjCUtils.h
+++ b/src/dawn/utils/ObjCUtils.h
@@ -21,8 +21,8 @@
 
 namespace utils {
 
-    // The returned CALayer is autoreleased.
-    void* CreatePlaceholderCALayer();
+// The returned CALayer is autoreleased.
+void* CreatePlaceholderCALayer();
 
 }  // namespace utils
 
diff --git a/src/dawn/utils/ObjCUtils.mm b/src/dawn/utils/ObjCUtils.mm
index 477b030..171f5fc 100644
--- a/src/dawn/utils/ObjCUtils.mm
+++ b/src/dawn/utils/ObjCUtils.mm
@@ -18,8 +18,8 @@
 
 namespace utils {
 
-    void* CreatePlaceholderCALayer() {
-        return [CALayer layer];
-    }
+void* CreatePlaceholderCALayer() {
+    return [CALayer layer];
+}
 
 }  // namespace utils
diff --git a/src/dawn/utils/OpenGLBinding.cpp b/src/dawn/utils/OpenGLBinding.cpp
index 71db286..9d75255 100644
--- a/src/dawn/utils/OpenGLBinding.cpp
+++ b/src/dawn/utils/OpenGLBinding.cpp
@@ -26,31 +26,30 @@
 
 namespace utils {
 
-    class OpenGLBinding : public BackendBinding {
-      public:
-        OpenGLBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
+class OpenGLBinding : public BackendBinding {
+  public:
+    OpenGLBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {}
+
+    uint64_t GetSwapChainImplementation() override {
+        if (mSwapchainImpl.userData == nullptr) {
+            mSwapchainImpl = dawn::native::opengl::CreateNativeSwapChainImpl(
+                mDevice,
+                [](void* userdata) { glfwSwapBuffers(static_cast<GLFWwindow*>(userdata)); },
+                mWindow);
         }
-
-        uint64_t GetSwapChainImplementation() override {
-            if (mSwapchainImpl.userData == nullptr) {
-                mSwapchainImpl = dawn::native::opengl::CreateNativeSwapChainImpl(
-                    mDevice,
-                    [](void* userdata) { glfwSwapBuffers(static_cast<GLFWwindow*>(userdata)); },
-                    mWindow);
-            }
-            return reinterpret_cast<uint64_t>(&mSwapchainImpl);
-        }
-
-        WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
-            return dawn::native::opengl::GetNativeSwapChainPreferredFormat(&mSwapchainImpl);
-        }
-
-      private:
-        DawnSwapChainImplementation mSwapchainImpl = {};
-    };
-
-    BackendBinding* CreateOpenGLBinding(GLFWwindow* window, WGPUDevice device) {
-        return new OpenGLBinding(window, device);
+        return reinterpret_cast<uint64_t>(&mSwapchainImpl);
     }
 
+    WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
+        return dawn::native::opengl::GetNativeSwapChainPreferredFormat(&mSwapchainImpl);
+    }
+
+  private:
+    DawnSwapChainImplementation mSwapchainImpl = {};
+};
+
+BackendBinding* CreateOpenGLBinding(GLFWwindow* window, WGPUDevice device) {
+    return new OpenGLBinding(window, device);
+}
+
 }  // namespace utils
diff --git a/src/dawn/utils/PlatformDebugLogger.h b/src/dawn/utils/PlatformDebugLogger.h
index 23ff4c8..a67af17 100644
--- a/src/dawn/utils/PlatformDebugLogger.h
+++ b/src/dawn/utils/PlatformDebugLogger.h
@@ -17,12 +17,12 @@
 
 namespace utils {
 
-    class PlatformDebugLogger {
-      public:
-        virtual ~PlatformDebugLogger() = default;
-    };
+class PlatformDebugLogger {
+  public:
+    virtual ~PlatformDebugLogger() = default;
+};
 
-    PlatformDebugLogger* CreatePlatformDebugLogger();
+PlatformDebugLogger* CreatePlatformDebugLogger();
 
 }  // namespace utils
 
diff --git a/src/dawn/utils/PosixTimer.cpp b/src/dawn/utils/PosixTimer.cpp
index 1bbc844..9b43e18 100644
--- a/src/dawn/utils/PosixTimer.cpp
+++ b/src/dawn/utils/PosixTimer.cpp
@@ -19,56 +19,53 @@
 
 namespace utils {
 
-    namespace {
+namespace {
 
-        uint64_t GetCurrentTimeNs() {
-            struct timespec currentTime;
-            clock_gettime(CLOCK_MONOTONIC, &currentTime);
-            return currentTime.tv_sec * 1'000'000'000llu + currentTime.tv_nsec;
-        }
+uint64_t GetCurrentTimeNs() {
+    struct timespec currentTime;
+    clock_gettime(CLOCK_MONOTONIC, &currentTime);
+    return currentTime.tv_sec * 1'000'000'000llu + currentTime.tv_nsec;
+}
 
-    }  // anonymous namespace
+}  // anonymous namespace
 
-    class PosixTimer : public Timer {
-      public:
-        PosixTimer() : Timer(), mRunning(false) {
-        }
+class PosixTimer : public Timer {
+  public:
+    PosixTimer() : Timer(), mRunning(false) {}
 
-        ~PosixTimer() override = default;
+    ~PosixTimer() override = default;
 
-        void Start() override {
-            mStartTimeNs = GetCurrentTimeNs();
-            mRunning = true;
-        }
-
-        void Stop() override {
-            mStopTimeNs = GetCurrentTimeNs();
-            mRunning = false;
-        }
-
-        double GetElapsedTime() const override {
-            uint64_t endTimeNs;
-            if (mRunning) {
-                endTimeNs = GetCurrentTimeNs();
-            } else {
-                endTimeNs = mStopTimeNs;
-            }
-
-            return (endTimeNs - mStartTimeNs) * 1e-9;
-        }
-
-        double GetAbsoluteTime() override {
-            return GetCurrentTimeNs() * 1e-9;
-        }
-
-      private:
-        bool mRunning;
-        uint64_t mStartTimeNs;
-        uint64_t mStopTimeNs;
-    };
-
-    Timer* CreateTimer() {
-        return new PosixTimer();
+    void Start() override {
+        mStartTimeNs = GetCurrentTimeNs();
+        mRunning = true;
     }
 
+    void Stop() override {
+        mStopTimeNs = GetCurrentTimeNs();
+        mRunning = false;
+    }
+
+    double GetElapsedTime() const override {
+        uint64_t endTimeNs;
+        if (mRunning) {
+            endTimeNs = GetCurrentTimeNs();
+        } else {
+            endTimeNs = mStopTimeNs;
+        }
+
+        return (endTimeNs - mStartTimeNs) * 1e-9;
+    }
+
+    double GetAbsoluteTime() override { return GetCurrentTimeNs() * 1e-9; }
+
+  private:
+    bool mRunning;
+    uint64_t mStartTimeNs;
+    uint64_t mStopTimeNs;
+};
+
+Timer* CreateTimer() {
+    return new PosixTimer();
+}
+
 }  // namespace utils
diff --git a/src/dawn/utils/ScopedAutoreleasePool.cpp b/src/dawn/utils/ScopedAutoreleasePool.cpp
index 2f5f050..da5b098 100644
--- a/src/dawn/utils/ScopedAutoreleasePool.cpp
+++ b/src/dawn/utils/ScopedAutoreleasePool.cpp
@@ -18,17 +18,16 @@
 
 namespace utils {
 
-    ScopedAutoreleasePool::ScopedAutoreleasePool() : mPool(nullptr) {
-        DAWN_UNUSED(mPool);
-    }
+ScopedAutoreleasePool::ScopedAutoreleasePool() : mPool(nullptr) {
+    DAWN_UNUSED(mPool);
+}
 
-    ScopedAutoreleasePool::~ScopedAutoreleasePool() = default;
+ScopedAutoreleasePool::~ScopedAutoreleasePool() = default;
 
-    ScopedAutoreleasePool::ScopedAutoreleasePool(ScopedAutoreleasePool&& rhs) {
-    }
+ScopedAutoreleasePool::ScopedAutoreleasePool(ScopedAutoreleasePool&& rhs) {}
 
-    ScopedAutoreleasePool& ScopedAutoreleasePool::operator=(ScopedAutoreleasePool&& rhs) {
-        return *this;
-    }
+ScopedAutoreleasePool& ScopedAutoreleasePool::operator=(ScopedAutoreleasePool&& rhs) {
+    return *this;
+}
 
 }  // namespace utils
diff --git a/src/dawn/utils/ScopedAutoreleasePool.h b/src/dawn/utils/ScopedAutoreleasePool.h
index 412ad3e..4f724ca 100644
--- a/src/dawn/utils/ScopedAutoreleasePool.h
+++ b/src/dawn/utils/ScopedAutoreleasePool.h
@@ -21,40 +21,40 @@
 
 namespace utils {
 
-    /**
-     * ScopedAutoreleasePool is a scoped class which initializes an NSAutoreleasePool on
-     * creation, and drains it on destruction. On non-Apple platforms, ScopedAutoreleasePool
-     * is a no-op.
-     *
-     * An autoreleasepool is needed when using protocol objects in Objective-C because Cocoa
-     * expects a pool to always be available in each thread. If a pool is not available, then
-     * autoreleased objects will never be released and will leak.
-     *
-     * In long-running blocks of code or loops, it is important to periodically create and drain
-     * autorelease pools so that memory is recycled. In Dawn's tests, we have an autoreleasepool
-     * per-test. In graphics applications it's advised to create an autoreleasepool around the
-     * frame loop. Ex.)
-     *   void frame() {
-     *     // Any protocol objects will be reclaimed when this object falls out of scope.
-     *     utils::ScopedAutoreleasePool pool;
-     *
-     *     // do rendering ...
-     *   }
-     */
-    class [[nodiscard]] ScopedAutoreleasePool {
-      public:
-        ScopedAutoreleasePool();
-        ~ScopedAutoreleasePool();
+/**
+ * ScopedAutoreleasePool is a scoped class which initializes an NSAutoreleasePool on
+ * creation, and drains it on destruction. On non-Apple platforms, ScopedAutoreleasePool
+ * is a no-op.
+ *
+ * An autoreleasepool is needed when using protocol objects in Objective-C because Cocoa
+ * expects a pool to always be available in each thread. If a pool is not available, then
+ * autoreleased objects will never be released and will leak.
+ *
+ * In long-running blocks of code or loops, it is important to periodically create and drain
+ * autorelease pools so that memory is recycled. In Dawn's tests, we have an autoreleasepool
+ * per-test. In graphics applications it's advised to create an autoreleasepool around the
+ * frame loop. Ex.)
+ *   void frame() {
+ *     // Any protocol objects will be reclaimed when this object falls out of scope.
+ *     utils::ScopedAutoreleasePool pool;
+ *
+ *     // do rendering ...
+ *   }
+ */
+class [[nodiscard]] ScopedAutoreleasePool {
+  public:
+    ScopedAutoreleasePool();
+    ~ScopedAutoreleasePool();
 
-        ScopedAutoreleasePool(const ScopedAutoreleasePool&) = delete;
-        ScopedAutoreleasePool& operator=(const ScopedAutoreleasePool&) = delete;
+    ScopedAutoreleasePool(const ScopedAutoreleasePool&) = delete;
+    ScopedAutoreleasePool& operator=(const ScopedAutoreleasePool&) = delete;
 
-        ScopedAutoreleasePool(ScopedAutoreleasePool &&);
-        ScopedAutoreleasePool& operator=(ScopedAutoreleasePool&&);
+    ScopedAutoreleasePool(ScopedAutoreleasePool&&);
+    ScopedAutoreleasePool& operator=(ScopedAutoreleasePool&&);
 
-      private:
-        void* mPool = nullptr;
-    };
+  private:
+    void* mPool = nullptr;
+};
 
 }  // namespace utils
 
diff --git a/src/dawn/utils/ScopedAutoreleasePool.mm b/src/dawn/utils/ScopedAutoreleasePool.mm
index c4cb9a2..7aa5472 100644
--- a/src/dawn/utils/ScopedAutoreleasePool.mm
+++ b/src/dawn/utils/ScopedAutoreleasePool.mm
@@ -18,27 +18,26 @@
 
 namespace utils {
 
-    ScopedAutoreleasePool::ScopedAutoreleasePool() : mPool([[NSAutoreleasePool alloc] init]) {
-    }
+ScopedAutoreleasePool::ScopedAutoreleasePool() : mPool([[NSAutoreleasePool alloc] init]) {}
 
-    ScopedAutoreleasePool::~ScopedAutoreleasePool() {
-        if (mPool != nullptr) {
-            [static_cast<NSAutoreleasePool*>(mPool) release];
-            mPool = nullptr;
-        }
+ScopedAutoreleasePool::~ScopedAutoreleasePool() {
+    if (mPool != nullptr) {
+        [static_cast<NSAutoreleasePool*>(mPool) release];
+        mPool = nullptr;
     }
+}
 
-    ScopedAutoreleasePool::ScopedAutoreleasePool(ScopedAutoreleasePool&& rhs) {
+ScopedAutoreleasePool::ScopedAutoreleasePool(ScopedAutoreleasePool&& rhs) {
+    mPool = rhs.mPool;
+    rhs.mPool = nullptr;
+}
+
+ScopedAutoreleasePool& ScopedAutoreleasePool::operator=(ScopedAutoreleasePool&& rhs) {
+    if (&rhs != this) {
         mPool = rhs.mPool;
         rhs.mPool = nullptr;
     }
-
-    ScopedAutoreleasePool& ScopedAutoreleasePool::operator=(ScopedAutoreleasePool&& rhs) {
-        if (&rhs != this) {
-            mPool = rhs.mPool;
-            rhs.mPool = nullptr;
-        }
-        return *this;
-    }
+    return *this;
+}
 
 }  // namespace utils
diff --git a/src/dawn/utils/SystemUtils.cpp b/src/dawn/utils/SystemUtils.cpp
index 221fb71..a286b22 100644
--- a/src/dawn/utils/SystemUtils.cpp
+++ b/src/dawn/utils/SystemUtils.cpp
@@ -17,25 +17,25 @@
 #include "dawn/common/Platform.h"
 
 #if defined(DAWN_PLATFORM_WINDOWS)
-#    include <Windows.h>
+#include <Windows.h>
 #elif defined(DAWN_PLATFORM_POSIX)
-#    include <unistd.h>
+#include <unistd.h>
 #else
-#    error "Unsupported platform."
+#error "Unsupported platform."
 #endif
 
 namespace utils {
 
 #if defined(DAWN_PLATFORM_WINDOWS)
-    void USleep(unsigned int usecs) {
-        Sleep(static_cast<DWORD>(usecs / 1000));
-    }
+void USleep(unsigned int usecs) {
+    Sleep(static_cast<DWORD>(usecs / 1000));
+}
 #elif defined(DAWN_PLATFORM_POSIX)
-    void USleep(unsigned int usecs) {
-        usleep(usecs);
-    }
+void USleep(unsigned int usecs) {
+    usleep(usecs);
+}
 #else
-#    error "Implement USleep for your platform."
+#error "Implement USleep for your platform."
 #endif
 
 }  // namespace utils
diff --git a/src/dawn/utils/SystemUtils.h b/src/dawn/utils/SystemUtils.h
index 1be5439..bf14cdd 100644
--- a/src/dawn/utils/SystemUtils.h
+++ b/src/dawn/utils/SystemUtils.h
@@ -17,7 +17,7 @@
 
 namespace utils {
 
-    void USleep(unsigned int usecs);
+void USleep(unsigned int usecs);
 }
 
 #endif  // SRC_DAWN_UTILS_SYSTEMUTILS_H_
diff --git a/src/dawn/utils/TerribleCommandBuffer.cpp b/src/dawn/utils/TerribleCommandBuffer.cpp
index b99243b..3e100d5 100644
--- a/src/dawn/utils/TerribleCommandBuffer.cpp
+++ b/src/dawn/utils/TerribleCommandBuffer.cpp
@@ -18,42 +18,40 @@
 
 namespace utils {
 
-    TerribleCommandBuffer::TerribleCommandBuffer() {
-    }
+TerribleCommandBuffer::TerribleCommandBuffer() {}
 
-    TerribleCommandBuffer::TerribleCommandBuffer(dawn::wire::CommandHandler* handler)
-        : mHandler(handler) {
-    }
+TerribleCommandBuffer::TerribleCommandBuffer(dawn::wire::CommandHandler* handler)
+    : mHandler(handler) {}
 
-    void TerribleCommandBuffer::SetHandler(dawn::wire::CommandHandler* handler) {
-        mHandler = handler;
-    }
+void TerribleCommandBuffer::SetHandler(dawn::wire::CommandHandler* handler) {
+    mHandler = handler;
+}
 
-    size_t TerribleCommandBuffer::GetMaximumAllocationSize() const {
-        return sizeof(mBuffer);
-    }
+size_t TerribleCommandBuffer::GetMaximumAllocationSize() const {
+    return sizeof(mBuffer);
+}
 
-    void* TerribleCommandBuffer::GetCmdSpace(size_t size) {
-        // Note: This returns non-null even if size is zero.
-        if (size > sizeof(mBuffer)) {
+void* TerribleCommandBuffer::GetCmdSpace(size_t size) {
+    // Note: This returns non-null even if size is zero.
+    if (size > sizeof(mBuffer)) {
+        return nullptr;
+    }
+    char* result = &mBuffer[mOffset];
+    if (sizeof(mBuffer) - size < mOffset) {
+        if (!Flush()) {
             return nullptr;
         }
-        char* result = &mBuffer[mOffset];
-        if (sizeof(mBuffer) - size < mOffset) {
-            if (!Flush()) {
-                return nullptr;
-            }
-            return GetCmdSpace(size);
-        }
-
-        mOffset += size;
-        return result;
+        return GetCmdSpace(size);
     }
 
-    bool TerribleCommandBuffer::Flush() {
-        bool success = mHandler->HandleCommands(mBuffer, mOffset) != nullptr;
-        mOffset = 0;
-        return success;
-    }
+    mOffset += size;
+    return result;
+}
+
+bool TerribleCommandBuffer::Flush() {
+    bool success = mHandler->HandleCommands(mBuffer, mOffset) != nullptr;
+    mOffset = 0;
+    return success;
+}
 
 }  // namespace utils
diff --git a/src/dawn/utils/TerribleCommandBuffer.h b/src/dawn/utils/TerribleCommandBuffer.h
index cf6520b..3fac52f 100644
--- a/src/dawn/utils/TerribleCommandBuffer.h
+++ b/src/dawn/utils/TerribleCommandBuffer.h
@@ -19,23 +19,23 @@
 
 namespace utils {
 
-    class TerribleCommandBuffer : public dawn::wire::CommandSerializer {
-      public:
-        TerribleCommandBuffer();
-        explicit TerribleCommandBuffer(dawn::wire::CommandHandler* handler);
+class TerribleCommandBuffer : public dawn::wire::CommandSerializer {
+  public:
+    TerribleCommandBuffer();
+    explicit TerribleCommandBuffer(dawn::wire::CommandHandler* handler);
 
-        void SetHandler(dawn::wire::CommandHandler* handler);
+    void SetHandler(dawn::wire::CommandHandler* handler);
 
-        size_t GetMaximumAllocationSize() const override;
+    size_t GetMaximumAllocationSize() const override;
 
-        void* GetCmdSpace(size_t size) override;
-        bool Flush() override;
+    void* GetCmdSpace(size_t size) override;
+    bool Flush() override;
 
-      private:
-        dawn::wire::CommandHandler* mHandler = nullptr;
-        size_t mOffset = 0;
-        char mBuffer[1000000];
-    };
+  private:
+    dawn::wire::CommandHandler* mHandler = nullptr;
+    size_t mOffset = 0;
+    char mBuffer[1000000];
+};
 
 }  // namespace utils
 
diff --git a/src/dawn/utils/TestUtils.cpp b/src/dawn/utils/TestUtils.cpp
index b33c206..e5b41cf 100644
--- a/src/dawn/utils/TestUtils.cpp
+++ b/src/dawn/utils/TestUtils.cpp
@@ -24,158 +24,156 @@
 
 namespace utils {
 
-    uint32_t GetMinimumBytesPerRow(wgpu::TextureFormat format, uint32_t width) {
-        const uint32_t bytesPerBlock = utils::GetTexelBlockSizeInBytes(format);
-        const uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
-        ASSERT(width % blockWidth == 0);
-        return Align(bytesPerBlock * (width / blockWidth), kTextureBytesPerRowAlignment);
+uint32_t GetMinimumBytesPerRow(wgpu::TextureFormat format, uint32_t width) {
+    const uint32_t bytesPerBlock = utils::GetTexelBlockSizeInBytes(format);
+    const uint32_t blockWidth = utils::GetTextureFormatBlockWidth(format);
+    ASSERT(width % blockWidth == 0);
+    return Align(bytesPerBlock * (width / blockWidth), kTextureBytesPerRowAlignment);
+}
+
+TextureDataCopyLayout GetTextureDataCopyLayoutForTextureAtLevel(wgpu::TextureFormat format,
+                                                                wgpu::Extent3D textureSizeAtLevel0,
+                                                                uint32_t mipmapLevel,
+                                                                wgpu::TextureDimension dimension,
+                                                                uint32_t rowsPerImage) {
+    // Compressed texture formats not supported in this function yet.
+    ASSERT(utils::GetTextureFormatBlockWidth(format) == 1);
+
+    TextureDataCopyLayout layout;
+
+    layout.mipSize = {std::max(textureSizeAtLevel0.width >> mipmapLevel, 1u),
+                      std::max(textureSizeAtLevel0.height >> mipmapLevel, 1u),
+                      textureSizeAtLevel0.depthOrArrayLayers};
+
+    if (dimension == wgpu::TextureDimension::e3D) {
+        layout.mipSize.depthOrArrayLayers =
+            std::max(textureSizeAtLevel0.depthOrArrayLayers >> mipmapLevel, 1u);
     }
 
-    TextureDataCopyLayout GetTextureDataCopyLayoutForTextureAtLevel(
-        wgpu::TextureFormat format,
-        wgpu::Extent3D textureSizeAtLevel0,
-        uint32_t mipmapLevel,
-        wgpu::TextureDimension dimension,
-        uint32_t rowsPerImage) {
-        // Compressed texture formats not supported in this function yet.
-        ASSERT(utils::GetTextureFormatBlockWidth(format) == 1);
+    layout.bytesPerRow = GetMinimumBytesPerRow(format, layout.mipSize.width);
 
-        TextureDataCopyLayout layout;
+    if (rowsPerImage == wgpu::kCopyStrideUndefined) {
+        rowsPerImage = layout.mipSize.height;
+    }
+    layout.rowsPerImage = rowsPerImage;
 
-        layout.mipSize = {std::max(textureSizeAtLevel0.width >> mipmapLevel, 1u),
-                          std::max(textureSizeAtLevel0.height >> mipmapLevel, 1u),
-                          textureSizeAtLevel0.depthOrArrayLayers};
+    uint32_t appliedRowsPerImage = rowsPerImage > 0 ? rowsPerImage : layout.mipSize.height;
+    layout.bytesPerImage = layout.bytesPerRow * appliedRowsPerImage;
 
-        if (dimension == wgpu::TextureDimension::e3D) {
-            layout.mipSize.depthOrArrayLayers =
-                std::max(textureSizeAtLevel0.depthOrArrayLayers >> mipmapLevel, 1u);
-        }
+    layout.byteLength =
+        RequiredBytesInCopy(layout.bytesPerRow, appliedRowsPerImage, layout.mipSize, format);
 
-        layout.bytesPerRow = GetMinimumBytesPerRow(format, layout.mipSize.width);
+    const uint32_t bytesPerTexel = utils::GetTexelBlockSizeInBytes(format);
+    layout.texelBlocksPerRow = layout.bytesPerRow / bytesPerTexel;
+    layout.texelBlocksPerImage = layout.bytesPerImage / bytesPerTexel;
+    layout.texelBlockCount = layout.byteLength / bytesPerTexel;
 
-        if (rowsPerImage == wgpu::kCopyStrideUndefined) {
-            rowsPerImage = layout.mipSize.height;
-        }
-        layout.rowsPerImage = rowsPerImage;
+    return layout;
+}
 
-        uint32_t appliedRowsPerImage = rowsPerImage > 0 ? rowsPerImage : layout.mipSize.height;
-        layout.bytesPerImage = layout.bytesPerRow * appliedRowsPerImage;
+uint64_t RequiredBytesInCopy(uint64_t bytesPerRow,
+                             uint64_t rowsPerImage,
+                             wgpu::Extent3D copyExtent,
+                             wgpu::TextureFormat textureFormat) {
+    uint32_t blockSize = utils::GetTexelBlockSizeInBytes(textureFormat);
+    uint32_t blockWidth = utils::GetTextureFormatBlockWidth(textureFormat);
+    uint32_t blockHeight = utils::GetTextureFormatBlockHeight(textureFormat);
+    ASSERT(copyExtent.width % blockWidth == 0);
+    uint32_t widthInBlocks = copyExtent.width / blockWidth;
+    ASSERT(copyExtent.height % blockHeight == 0);
+    uint32_t heightInBlocks = copyExtent.height / blockHeight;
+    return RequiredBytesInCopy(bytesPerRow, rowsPerImage, widthInBlocks, heightInBlocks,
+                               copyExtent.depthOrArrayLayers, blockSize);
+}
 
-        layout.byteLength =
-            RequiredBytesInCopy(layout.bytesPerRow, appliedRowsPerImage, layout.mipSize, format);
-
-        const uint32_t bytesPerTexel = utils::GetTexelBlockSizeInBytes(format);
-        layout.texelBlocksPerRow = layout.bytesPerRow / bytesPerTexel;
-        layout.texelBlocksPerImage = layout.bytesPerImage / bytesPerTexel;
-        layout.texelBlockCount = layout.byteLength / bytesPerTexel;
-
-        return layout;
+uint64_t RequiredBytesInCopy(uint64_t bytesPerRow,
+                             uint64_t rowsPerImage,
+                             uint64_t widthInBlocks,
+                             uint64_t heightInBlocks,
+                             uint64_t depth,
+                             uint64_t bytesPerBlock) {
+    if (depth == 0) {
+        return 0;
     }
 
-    uint64_t RequiredBytesInCopy(uint64_t bytesPerRow,
-                                 uint64_t rowsPerImage,
-                                 wgpu::Extent3D copyExtent,
-                                 wgpu::TextureFormat textureFormat) {
-        uint32_t blockSize = utils::GetTexelBlockSizeInBytes(textureFormat);
-        uint32_t blockWidth = utils::GetTextureFormatBlockWidth(textureFormat);
-        uint32_t blockHeight = utils::GetTextureFormatBlockHeight(textureFormat);
-        ASSERT(copyExtent.width % blockWidth == 0);
-        uint32_t widthInBlocks = copyExtent.width / blockWidth;
-        ASSERT(copyExtent.height % blockHeight == 0);
-        uint32_t heightInBlocks = copyExtent.height / blockHeight;
-        return RequiredBytesInCopy(bytesPerRow, rowsPerImage, widthInBlocks, heightInBlocks,
-                                   copyExtent.depthOrArrayLayers, blockSize);
+    uint64_t bytesPerImage = bytesPerRow * rowsPerImage;
+    uint64_t requiredBytesInCopy = bytesPerImage * (depth - 1);
+    if (heightInBlocks != 0) {
+        uint64_t lastRowBytes = widthInBlocks * bytesPerBlock;
+        uint64_t lastImageBytes = bytesPerRow * (heightInBlocks - 1) + lastRowBytes;
+        requiredBytesInCopy += lastImageBytes;
     }
+    return requiredBytesInCopy;
+}
 
-    uint64_t RequiredBytesInCopy(uint64_t bytesPerRow,
-                                 uint64_t rowsPerImage,
-                                 uint64_t widthInBlocks,
-                                 uint64_t heightInBlocks,
-                                 uint64_t depth,
-                                 uint64_t bytesPerBlock) {
-        if (depth == 0) {
-            return 0;
-        }
+uint64_t GetTexelCountInCopyRegion(uint64_t bytesPerRow,
+                                   uint64_t rowsPerImage,
+                                   wgpu::Extent3D copyExtent,
+                                   wgpu::TextureFormat textureFormat) {
+    return RequiredBytesInCopy(bytesPerRow, rowsPerImage, copyExtent, textureFormat) /
+           utils::GetTexelBlockSizeInBytes(textureFormat);
+}
 
-        uint64_t bytesPerImage = bytesPerRow * rowsPerImage;
-        uint64_t requiredBytesInCopy = bytesPerImage * (depth - 1);
-        if (heightInBlocks != 0) {
-            uint64_t lastRowBytes = widthInBlocks * bytesPerBlock;
-            uint64_t lastImageBytes = bytesPerRow * (heightInBlocks - 1) + lastRowBytes;
-            requiredBytesInCopy += lastImageBytes;
-        }
-        return requiredBytesInCopy;
+void UnalignDynamicUploader(wgpu::Device device) {
+    std::vector<uint8_t> data = {1};
+
+    wgpu::TextureDescriptor descriptor = {};
+    descriptor.size = {1, 1, 1};
+    descriptor.format = wgpu::TextureFormat::R8Unorm;
+    descriptor.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc;
+    wgpu::Texture texture = device.CreateTexture(&descriptor);
+
+    wgpu::ImageCopyTexture imageCopyTexture = utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
+    wgpu::TextureDataLayout textureDataLayout =
+        utils::CreateTextureDataLayout(0, wgpu::kCopyStrideUndefined);
+    wgpu::Extent3D copyExtent = {1, 1, 1};
+
+    // WriteTexture with exactly 1 byte of data.
+    device.GetQueue().WriteTexture(&imageCopyTexture, data.data(), 1, &textureDataLayout,
+                                   &copyExtent);
+}
+
+uint32_t VertexFormatSize(wgpu::VertexFormat format) {
+    switch (format) {
+        case wgpu::VertexFormat::Uint8x2:
+        case wgpu::VertexFormat::Sint8x2:
+        case wgpu::VertexFormat::Unorm8x2:
+        case wgpu::VertexFormat::Snorm8x2:
+            return 2;
+        case wgpu::VertexFormat::Uint8x4:
+        case wgpu::VertexFormat::Sint8x4:
+        case wgpu::VertexFormat::Unorm8x4:
+        case wgpu::VertexFormat::Snorm8x4:
+        case wgpu::VertexFormat::Uint16x2:
+        case wgpu::VertexFormat::Sint16x2:
+        case wgpu::VertexFormat::Unorm16x2:
+        case wgpu::VertexFormat::Snorm16x2:
+        case wgpu::VertexFormat::Float16x2:
+        case wgpu::VertexFormat::Float32:
+        case wgpu::VertexFormat::Uint32:
+        case wgpu::VertexFormat::Sint32:
+            return 4;
+        case wgpu::VertexFormat::Uint16x4:
+        case wgpu::VertexFormat::Sint16x4:
+        case wgpu::VertexFormat::Unorm16x4:
+        case wgpu::VertexFormat::Snorm16x4:
+        case wgpu::VertexFormat::Float16x4:
+        case wgpu::VertexFormat::Float32x2:
+        case wgpu::VertexFormat::Uint32x2:
+        case wgpu::VertexFormat::Sint32x2:
+            return 8;
+        case wgpu::VertexFormat::Float32x3:
+        case wgpu::VertexFormat::Uint32x3:
+        case wgpu::VertexFormat::Sint32x3:
+            return 12;
+        case wgpu::VertexFormat::Float32x4:
+        case wgpu::VertexFormat::Uint32x4:
+        case wgpu::VertexFormat::Sint32x4:
+            return 16;
+        case wgpu::VertexFormat::Undefined:
+            break;
     }
-
-    uint64_t GetTexelCountInCopyRegion(uint64_t bytesPerRow,
-                                       uint64_t rowsPerImage,
-                                       wgpu::Extent3D copyExtent,
-                                       wgpu::TextureFormat textureFormat) {
-        return RequiredBytesInCopy(bytesPerRow, rowsPerImage, copyExtent, textureFormat) /
-               utils::GetTexelBlockSizeInBytes(textureFormat);
-    }
-
-    void UnalignDynamicUploader(wgpu::Device device) {
-        std::vector<uint8_t> data = {1};
-
-        wgpu::TextureDescriptor descriptor = {};
-        descriptor.size = {1, 1, 1};
-        descriptor.format = wgpu::TextureFormat::R8Unorm;
-        descriptor.usage = wgpu::TextureUsage::CopyDst | wgpu::TextureUsage::CopySrc;
-        wgpu::Texture texture = device.CreateTexture(&descriptor);
-
-        wgpu::ImageCopyTexture imageCopyTexture =
-            utils::CreateImageCopyTexture(texture, 0, {0, 0, 0});
-        wgpu::TextureDataLayout textureDataLayout =
-            utils::CreateTextureDataLayout(0, wgpu::kCopyStrideUndefined);
-        wgpu::Extent3D copyExtent = {1, 1, 1};
-
-        // WriteTexture with exactly 1 byte of data.
-        device.GetQueue().WriteTexture(&imageCopyTexture, data.data(), 1, &textureDataLayout,
-                                       &copyExtent);
-    }
-
-    uint32_t VertexFormatSize(wgpu::VertexFormat format) {
-        switch (format) {
-            case wgpu::VertexFormat::Uint8x2:
-            case wgpu::VertexFormat::Sint8x2:
-            case wgpu::VertexFormat::Unorm8x2:
-            case wgpu::VertexFormat::Snorm8x2:
-                return 2;
-            case wgpu::VertexFormat::Uint8x4:
-            case wgpu::VertexFormat::Sint8x4:
-            case wgpu::VertexFormat::Unorm8x4:
-            case wgpu::VertexFormat::Snorm8x4:
-            case wgpu::VertexFormat::Uint16x2:
-            case wgpu::VertexFormat::Sint16x2:
-            case wgpu::VertexFormat::Unorm16x2:
-            case wgpu::VertexFormat::Snorm16x2:
-            case wgpu::VertexFormat::Float16x2:
-            case wgpu::VertexFormat::Float32:
-            case wgpu::VertexFormat::Uint32:
-            case wgpu::VertexFormat::Sint32:
-                return 4;
-            case wgpu::VertexFormat::Uint16x4:
-            case wgpu::VertexFormat::Sint16x4:
-            case wgpu::VertexFormat::Unorm16x4:
-            case wgpu::VertexFormat::Snorm16x4:
-            case wgpu::VertexFormat::Float16x4:
-            case wgpu::VertexFormat::Float32x2:
-            case wgpu::VertexFormat::Uint32x2:
-            case wgpu::VertexFormat::Sint32x2:
-                return 8;
-            case wgpu::VertexFormat::Float32x3:
-            case wgpu::VertexFormat::Uint32x3:
-            case wgpu::VertexFormat::Sint32x3:
-                return 12;
-            case wgpu::VertexFormat::Float32x4:
-            case wgpu::VertexFormat::Uint32x4:
-            case wgpu::VertexFormat::Sint32x4:
-                return 16;
-            case wgpu::VertexFormat::Undefined:
-                break;
-        }
-        UNREACHABLE();
-    }
+    UNREACHABLE();
+}
 
 }  // namespace utils
diff --git a/src/dawn/utils/TestUtils.h b/src/dawn/utils/TestUtils.h
index d6b39e5..5e119cf 100644
--- a/src/dawn/utils/TestUtils.h
+++ b/src/dawn/utils/TestUtils.h
@@ -19,48 +19,48 @@
 
 namespace utils {
 
-    struct TextureDataCopyLayout {
-        uint64_t byteLength;
-        uint64_t texelBlockCount;
-        uint32_t bytesPerRow;
-        uint32_t rowsPerImage;
-        uint32_t texelBlocksPerRow;
-        uint32_t bytesPerImage;
-        uint32_t texelBlocksPerImage;
-        wgpu::Extent3D mipSize;
-    };
+struct TextureDataCopyLayout {
+    uint64_t byteLength;
+    uint64_t texelBlockCount;
+    uint32_t bytesPerRow;
+    uint32_t rowsPerImage;
+    uint32_t texelBlocksPerRow;
+    uint32_t bytesPerImage;
+    uint32_t texelBlocksPerImage;
+    wgpu::Extent3D mipSize;
+};
 
-    uint32_t GetMinimumBytesPerRow(wgpu::TextureFormat format, uint32_t width);
-    TextureDataCopyLayout GetTextureDataCopyLayoutForTextureAtLevel(
-        wgpu::TextureFormat format,
-        wgpu::Extent3D textureSizeAtLevel0,
-        uint32_t mipmapLevel,
-        wgpu::TextureDimension dimension = wgpu::TextureDimension::e2D,
-        uint32_t rowsPerImage = wgpu::kCopyStrideUndefined);
+uint32_t GetMinimumBytesPerRow(wgpu::TextureFormat format, uint32_t width);
+TextureDataCopyLayout GetTextureDataCopyLayoutForTextureAtLevel(
+    wgpu::TextureFormat format,
+    wgpu::Extent3D textureSizeAtLevel0,
+    uint32_t mipmapLevel,
+    wgpu::TextureDimension dimension = wgpu::TextureDimension::e2D,
+    uint32_t rowsPerImage = wgpu::kCopyStrideUndefined);
 
-    uint64_t RequiredBytesInCopy(uint64_t bytesPerRow,
-                                 uint64_t rowsPerImage,
-                                 wgpu::Extent3D copyExtent,
-                                 wgpu::TextureFormat textureFormat);
-    uint64_t RequiredBytesInCopy(uint64_t bytesPerRow,
-                                 uint64_t rowsPerImage,
-                                 uint64_t widthInBlocks,
-                                 uint64_t heightInBlocks,
-                                 uint64_t depth,
-                                 uint64_t bytesPerBlock);
+uint64_t RequiredBytesInCopy(uint64_t bytesPerRow,
+                             uint64_t rowsPerImage,
+                             wgpu::Extent3D copyExtent,
+                             wgpu::TextureFormat textureFormat);
+uint64_t RequiredBytesInCopy(uint64_t bytesPerRow,
+                             uint64_t rowsPerImage,
+                             uint64_t widthInBlocks,
+                             uint64_t heightInBlocks,
+                             uint64_t depth,
+                             uint64_t bytesPerBlock);
 
-    uint64_t GetTexelCountInCopyRegion(uint64_t bytesPerRow,
-                                       uint64_t rowsPerImage,
-                                       wgpu::Extent3D copyExtent,
-                                       wgpu::TextureFormat textureFormat);
+uint64_t GetTexelCountInCopyRegion(uint64_t bytesPerRow,
+                                   uint64_t rowsPerImage,
+                                   wgpu::Extent3D copyExtent,
+                                   wgpu::TextureFormat textureFormat);
 
-    // A helper function used for testing DynamicUploader offset alignment.
-    // A call of this function will do a Queue::WriteTexture with 1 byte of data,
-    // so that assuming that WriteTexture uses DynamicUploader, the first RingBuffer
-    // in it will contain 1 byte of data.
-    void UnalignDynamicUploader(wgpu::Device device);
+// A helper function used for testing DynamicUploader offset alignment.
+// A call of this function will do a Queue::WriteTexture with 1 byte of data,
+// so that assuming that WriteTexture uses DynamicUploader, the first RingBuffer
+// in it will contain 1 byte of data.
+void UnalignDynamicUploader(wgpu::Device device);
 
-    uint32_t VertexFormatSize(wgpu::VertexFormat format);
+uint32_t VertexFormatSize(wgpu::VertexFormat format);
 
 }  // namespace utils
 
diff --git a/src/dawn/utils/TextureUtils.cpp b/src/dawn/utils/TextureUtils.cpp
index ce5de07..f0c4505 100644
--- a/src/dawn/utils/TextureUtils.cpp
+++ b/src/dawn/utils/TextureUtils.cpp
@@ -15,756 +15,755 @@
 #include "dawn/utils/TextureUtils.h"
 
 namespace utils {
-    bool TextureFormatSupportsStorageTexture(wgpu::TextureFormat format) {
-        switch (format) {
-            case wgpu::TextureFormat::R32Uint:
-            case wgpu::TextureFormat::R32Sint:
-            case wgpu::TextureFormat::R32Float:
-            case wgpu::TextureFormat::RGBA8Unorm:
-            case wgpu::TextureFormat::RGBA8Snorm:
-            case wgpu::TextureFormat::RGBA8Uint:
-            case wgpu::TextureFormat::RGBA8Sint:
-            case wgpu::TextureFormat::RG32Uint:
-            case wgpu::TextureFormat::RG32Sint:
-            case wgpu::TextureFormat::RG32Float:
-            case wgpu::TextureFormat::RGBA16Uint:
-            case wgpu::TextureFormat::RGBA16Sint:
-            case wgpu::TextureFormat::RGBA16Float:
-            case wgpu::TextureFormat::RGBA32Uint:
-            case wgpu::TextureFormat::RGBA32Sint:
-            case wgpu::TextureFormat::RGBA32Float:
-                return true;
+bool TextureFormatSupportsStorageTexture(wgpu::TextureFormat format) {
+    switch (format) {
+        case wgpu::TextureFormat::R32Uint:
+        case wgpu::TextureFormat::R32Sint:
+        case wgpu::TextureFormat::R32Float:
+        case wgpu::TextureFormat::RGBA8Unorm:
+        case wgpu::TextureFormat::RGBA8Snorm:
+        case wgpu::TextureFormat::RGBA8Uint:
+        case wgpu::TextureFormat::RGBA8Sint:
+        case wgpu::TextureFormat::RG32Uint:
+        case wgpu::TextureFormat::RG32Sint:
+        case wgpu::TextureFormat::RG32Float:
+        case wgpu::TextureFormat::RGBA16Uint:
+        case wgpu::TextureFormat::RGBA16Sint:
+        case wgpu::TextureFormat::RGBA16Float:
+        case wgpu::TextureFormat::RGBA32Uint:
+        case wgpu::TextureFormat::RGBA32Sint:
+        case wgpu::TextureFormat::RGBA32Float:
+            return true;
 
-            default:
-                return false;
-        }
-    }
-
-    bool IsBCTextureFormat(wgpu::TextureFormat textureFormat) {
-        switch (textureFormat) {
-            case wgpu::TextureFormat::BC1RGBAUnorm:
-            case wgpu::TextureFormat::BC1RGBAUnormSrgb:
-            case wgpu::TextureFormat::BC4RUnorm:
-            case wgpu::TextureFormat::BC4RSnorm:
-            case wgpu::TextureFormat::BC2RGBAUnorm:
-            case wgpu::TextureFormat::BC2RGBAUnormSrgb:
-            case wgpu::TextureFormat::BC3RGBAUnorm:
-            case wgpu::TextureFormat::BC3RGBAUnormSrgb:
-            case wgpu::TextureFormat::BC5RGUnorm:
-            case wgpu::TextureFormat::BC5RGSnorm:
-            case wgpu::TextureFormat::BC6HRGBUfloat:
-            case wgpu::TextureFormat::BC6HRGBFloat:
-            case wgpu::TextureFormat::BC7RGBAUnorm:
-            case wgpu::TextureFormat::BC7RGBAUnormSrgb:
-                return true;
-
-            default:
-                return false;
-        }
-    }
-
-    bool IsETC2TextureFormat(wgpu::TextureFormat textureFormat) {
-        switch (textureFormat) {
-            case wgpu::TextureFormat::ETC2RGB8Unorm:
-            case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
-            case wgpu::TextureFormat::ETC2RGB8A1Unorm:
-            case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
-            case wgpu::TextureFormat::EACR11Unorm:
-            case wgpu::TextureFormat::EACR11Snorm:
-            case wgpu::TextureFormat::ETC2RGBA8Unorm:
-            case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
-            case wgpu::TextureFormat::EACRG11Unorm:
-            case wgpu::TextureFormat::EACRG11Snorm:
-                return true;
-
-            default:
-                return false;
-        }
-    }
-
-    bool IsASTCTextureFormat(wgpu::TextureFormat textureFormat) {
-        switch (textureFormat) {
-            case wgpu::TextureFormat::ASTC4x4Unorm:
-            case wgpu::TextureFormat::ASTC4x4UnormSrgb:
-            case wgpu::TextureFormat::ASTC5x4Unorm:
-            case wgpu::TextureFormat::ASTC5x4UnormSrgb:
-            case wgpu::TextureFormat::ASTC5x5Unorm:
-            case wgpu::TextureFormat::ASTC5x5UnormSrgb:
-            case wgpu::TextureFormat::ASTC6x5Unorm:
-            case wgpu::TextureFormat::ASTC6x5UnormSrgb:
-            case wgpu::TextureFormat::ASTC6x6Unorm:
-            case wgpu::TextureFormat::ASTC6x6UnormSrgb:
-            case wgpu::TextureFormat::ASTC8x5Unorm:
-            case wgpu::TextureFormat::ASTC8x5UnormSrgb:
-            case wgpu::TextureFormat::ASTC8x6Unorm:
-            case wgpu::TextureFormat::ASTC8x6UnormSrgb:
-            case wgpu::TextureFormat::ASTC8x8Unorm:
-            case wgpu::TextureFormat::ASTC8x8UnormSrgb:
-            case wgpu::TextureFormat::ASTC10x5Unorm:
-            case wgpu::TextureFormat::ASTC10x5UnormSrgb:
-            case wgpu::TextureFormat::ASTC10x6Unorm:
-            case wgpu::TextureFormat::ASTC10x6UnormSrgb:
-            case wgpu::TextureFormat::ASTC10x8Unorm:
-            case wgpu::TextureFormat::ASTC10x8UnormSrgb:
-            case wgpu::TextureFormat::ASTC10x10Unorm:
-            case wgpu::TextureFormat::ASTC10x10UnormSrgb:
-            case wgpu::TextureFormat::ASTC12x10Unorm:
-            case wgpu::TextureFormat::ASTC12x10UnormSrgb:
-            case wgpu::TextureFormat::ASTC12x12Unorm:
-            case wgpu::TextureFormat::ASTC12x12UnormSrgb:
-                return true;
-
-            default:
-                return false;
-        }
-    }
-
-    bool IsDepthOnlyFormat(wgpu::TextureFormat textureFormat) {
-        switch (textureFormat) {
-            case wgpu::TextureFormat::Depth16Unorm:
-            case wgpu::TextureFormat::Depth24Plus:
-            case wgpu::TextureFormat::Depth32Float:
-                return true;
-            default:
-                return false;
-        }
-    }
-
-    bool TextureFormatSupportsMultisampling(wgpu::TextureFormat textureFormat) {
-        if (IsBCTextureFormat(textureFormat) || IsETC2TextureFormat(textureFormat) ||
-            IsASTCTextureFormat(textureFormat)) {
+        default:
             return false;
-        }
+    }
+}
 
-        switch (textureFormat) {
-            case wgpu::TextureFormat::R32Uint:
-            case wgpu::TextureFormat::R32Sint:
-            case wgpu::TextureFormat::RG32Uint:
-            case wgpu::TextureFormat::RG32Sint:
-            case wgpu::TextureFormat::RG32Float:
-            case wgpu::TextureFormat::RGBA32Uint:
-            case wgpu::TextureFormat::RGBA32Sint:
-            case wgpu::TextureFormat::RGBA32Float:
-            case wgpu::TextureFormat::RGB9E5Ufloat:
-            case wgpu::TextureFormat::R8Snorm:
-            case wgpu::TextureFormat::RG8Snorm:
-            case wgpu::TextureFormat::RGBA8Snorm:
-            case wgpu::TextureFormat::RG11B10Ufloat:
-                return false;
+bool IsBCTextureFormat(wgpu::TextureFormat textureFormat) {
+    switch (textureFormat) {
+        case wgpu::TextureFormat::BC1RGBAUnorm:
+        case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+        case wgpu::TextureFormat::BC4RUnorm:
+        case wgpu::TextureFormat::BC4RSnorm:
+        case wgpu::TextureFormat::BC2RGBAUnorm:
+        case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+        case wgpu::TextureFormat::BC3RGBAUnorm:
+        case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+        case wgpu::TextureFormat::BC5RGUnorm:
+        case wgpu::TextureFormat::BC5RGSnorm:
+        case wgpu::TextureFormat::BC6HRGBUfloat:
+        case wgpu::TextureFormat::BC6HRGBFloat:
+        case wgpu::TextureFormat::BC7RGBAUnorm:
+        case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+            return true;
 
-            default:
-                return true;
-        }
+        default:
+            return false;
+    }
+}
+
+bool IsETC2TextureFormat(wgpu::TextureFormat textureFormat) {
+    switch (textureFormat) {
+        case wgpu::TextureFormat::ETC2RGB8Unorm:
+        case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+        case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+        case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+        case wgpu::TextureFormat::EACR11Unorm:
+        case wgpu::TextureFormat::EACR11Snorm:
+        case wgpu::TextureFormat::ETC2RGBA8Unorm:
+        case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+        case wgpu::TextureFormat::EACRG11Unorm:
+        case wgpu::TextureFormat::EACRG11Snorm:
+            return true;
+
+        default:
+            return false;
+    }
+}
+
+bool IsASTCTextureFormat(wgpu::TextureFormat textureFormat) {
+    switch (textureFormat) {
+        case wgpu::TextureFormat::ASTC4x4Unorm:
+        case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+        case wgpu::TextureFormat::ASTC5x4Unorm:
+        case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+        case wgpu::TextureFormat::ASTC5x5Unorm:
+        case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+        case wgpu::TextureFormat::ASTC6x5Unorm:
+        case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+        case wgpu::TextureFormat::ASTC6x6Unorm:
+        case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+        case wgpu::TextureFormat::ASTC8x5Unorm:
+        case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+        case wgpu::TextureFormat::ASTC8x6Unorm:
+        case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+        case wgpu::TextureFormat::ASTC8x8Unorm:
+        case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+        case wgpu::TextureFormat::ASTC10x5Unorm:
+        case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+        case wgpu::TextureFormat::ASTC10x6Unorm:
+        case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+        case wgpu::TextureFormat::ASTC10x8Unorm:
+        case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+        case wgpu::TextureFormat::ASTC10x10Unorm:
+        case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+        case wgpu::TextureFormat::ASTC12x10Unorm:
+        case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+        case wgpu::TextureFormat::ASTC12x12Unorm:
+        case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+            return true;
+
+        default:
+            return false;
+    }
+}
+
+bool IsDepthOnlyFormat(wgpu::TextureFormat textureFormat) {
+    switch (textureFormat) {
+        case wgpu::TextureFormat::Depth16Unorm:
+        case wgpu::TextureFormat::Depth24Plus:
+        case wgpu::TextureFormat::Depth32Float:
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool TextureFormatSupportsMultisampling(wgpu::TextureFormat textureFormat) {
+    if (IsBCTextureFormat(textureFormat) || IsETC2TextureFormat(textureFormat) ||
+        IsASTCTextureFormat(textureFormat)) {
+        return false;
     }
 
-    bool TextureFormatSupportsRendering(wgpu::TextureFormat textureFormat) {
-        switch (textureFormat) {
-            case wgpu::TextureFormat::R8Unorm:
-            case wgpu::TextureFormat::R8Uint:
-            case wgpu::TextureFormat::R8Sint:
-            case wgpu::TextureFormat::RG8Unorm:
-            case wgpu::TextureFormat::RG8Uint:
-            case wgpu::TextureFormat::RG8Sint:
-            case wgpu::TextureFormat::RGBA8Unorm:
-            case wgpu::TextureFormat::RGBA8Uint:
-            case wgpu::TextureFormat::RGBA8Sint:
-            case wgpu::TextureFormat::BGRA8Unorm:
-            case wgpu::TextureFormat::BGRA8UnormSrgb:
-            case wgpu::TextureFormat::R16Uint:
-            case wgpu::TextureFormat::R16Sint:
-            case wgpu::TextureFormat::R16Float:
-            case wgpu::TextureFormat::RG16Uint:
-            case wgpu::TextureFormat::RG16Sint:
-            case wgpu::TextureFormat::RG16Float:
-            case wgpu::TextureFormat::RGBA16Uint:
-            case wgpu::TextureFormat::RGBA16Sint:
-            case wgpu::TextureFormat::RGBA16Float:
-            case wgpu::TextureFormat::R32Uint:
-            case wgpu::TextureFormat::R32Sint:
-            case wgpu::TextureFormat::R32Float:
-            case wgpu::TextureFormat::RG32Uint:
-            case wgpu::TextureFormat::RG32Sint:
-            case wgpu::TextureFormat::RG32Float:
-            case wgpu::TextureFormat::RGBA32Uint:
-            case wgpu::TextureFormat::RGBA32Sint:
-            case wgpu::TextureFormat::RGBA32Float:
-            case wgpu::TextureFormat::RGB10A2Unorm:
-                return true;
+    switch (textureFormat) {
+        case wgpu::TextureFormat::R32Uint:
+        case wgpu::TextureFormat::R32Sint:
+        case wgpu::TextureFormat::RG32Uint:
+        case wgpu::TextureFormat::RG32Sint:
+        case wgpu::TextureFormat::RG32Float:
+        case wgpu::TextureFormat::RGBA32Uint:
+        case wgpu::TextureFormat::RGBA32Sint:
+        case wgpu::TextureFormat::RGBA32Float:
+        case wgpu::TextureFormat::RGB9E5Ufloat:
+        case wgpu::TextureFormat::R8Snorm:
+        case wgpu::TextureFormat::RG8Snorm:
+        case wgpu::TextureFormat::RGBA8Snorm:
+        case wgpu::TextureFormat::RG11B10Ufloat:
+            return false;
 
-            default:
-                return false;
-        }
+        default:
+            return true;
     }
+}
 
-    bool TextureFormatSupportsResolveTarget(wgpu::TextureFormat textureFormat) {
-        switch (textureFormat) {
-            case wgpu::TextureFormat::R8Unorm:
-            case wgpu::TextureFormat::RG8Unorm:
-            case wgpu::TextureFormat::RGBA8Unorm:
-            case wgpu::TextureFormat::RGBA8UnormSrgb:
-            case wgpu::TextureFormat::BGRA8Unorm:
-            case wgpu::TextureFormat::BGRA8UnormSrgb:
-            case wgpu::TextureFormat::R16Float:
-            case wgpu::TextureFormat::RG16Float:
-            case wgpu::TextureFormat::RGBA16Float:
-            case wgpu::TextureFormat::RGB10A2Unorm:
-                return true;
+bool TextureFormatSupportsRendering(wgpu::TextureFormat textureFormat) {
+    switch (textureFormat) {
+        case wgpu::TextureFormat::R8Unorm:
+        case wgpu::TextureFormat::R8Uint:
+        case wgpu::TextureFormat::R8Sint:
+        case wgpu::TextureFormat::RG8Unorm:
+        case wgpu::TextureFormat::RG8Uint:
+        case wgpu::TextureFormat::RG8Sint:
+        case wgpu::TextureFormat::RGBA8Unorm:
+        case wgpu::TextureFormat::RGBA8Uint:
+        case wgpu::TextureFormat::RGBA8Sint:
+        case wgpu::TextureFormat::BGRA8Unorm:
+        case wgpu::TextureFormat::BGRA8UnormSrgb:
+        case wgpu::TextureFormat::R16Uint:
+        case wgpu::TextureFormat::R16Sint:
+        case wgpu::TextureFormat::R16Float:
+        case wgpu::TextureFormat::RG16Uint:
+        case wgpu::TextureFormat::RG16Sint:
+        case wgpu::TextureFormat::RG16Float:
+        case wgpu::TextureFormat::RGBA16Uint:
+        case wgpu::TextureFormat::RGBA16Sint:
+        case wgpu::TextureFormat::RGBA16Float:
+        case wgpu::TextureFormat::R32Uint:
+        case wgpu::TextureFormat::R32Sint:
+        case wgpu::TextureFormat::R32Float:
+        case wgpu::TextureFormat::RG32Uint:
+        case wgpu::TextureFormat::RG32Sint:
+        case wgpu::TextureFormat::RG32Float:
+        case wgpu::TextureFormat::RGBA32Uint:
+        case wgpu::TextureFormat::RGBA32Sint:
+        case wgpu::TextureFormat::RGBA32Float:
+        case wgpu::TextureFormat::RGB10A2Unorm:
+            return true;
 
-            default:
-                return false;
-        }
+        default:
+            return false;
     }
+}
 
-    bool IsStencilOnlyFormat(wgpu::TextureFormat textureFormat) {
-        return textureFormat == wgpu::TextureFormat::Stencil8;
+bool TextureFormatSupportsResolveTarget(wgpu::TextureFormat textureFormat) {
+    switch (textureFormat) {
+        case wgpu::TextureFormat::R8Unorm:
+        case wgpu::TextureFormat::RG8Unorm:
+        case wgpu::TextureFormat::RGBA8Unorm:
+        case wgpu::TextureFormat::RGBA8UnormSrgb:
+        case wgpu::TextureFormat::BGRA8Unorm:
+        case wgpu::TextureFormat::BGRA8UnormSrgb:
+        case wgpu::TextureFormat::R16Float:
+        case wgpu::TextureFormat::RG16Float:
+        case wgpu::TextureFormat::RGBA16Float:
+        case wgpu::TextureFormat::RGB10A2Unorm:
+            return true;
+
+        default:
+            return false;
     }
+}
 
-    uint32_t GetTexelBlockSizeInBytes(wgpu::TextureFormat textureFormat) {
-        switch (textureFormat) {
-            case wgpu::TextureFormat::R8Unorm:
-            case wgpu::TextureFormat::R8Snorm:
-            case wgpu::TextureFormat::R8Uint:
-            case wgpu::TextureFormat::R8Sint:
-            case wgpu::TextureFormat::Stencil8:
-                return 1u;
+bool IsStencilOnlyFormat(wgpu::TextureFormat textureFormat) {
+    return textureFormat == wgpu::TextureFormat::Stencil8;
+}
 
-            case wgpu::TextureFormat::R16Uint:
-            case wgpu::TextureFormat::R16Sint:
-            case wgpu::TextureFormat::R16Float:
-            case wgpu::TextureFormat::RG8Unorm:
-            case wgpu::TextureFormat::RG8Snorm:
-            case wgpu::TextureFormat::RG8Uint:
-            case wgpu::TextureFormat::RG8Sint:
-                return 2u;
+uint32_t GetTexelBlockSizeInBytes(wgpu::TextureFormat textureFormat) {
+    switch (textureFormat) {
+        case wgpu::TextureFormat::R8Unorm:
+        case wgpu::TextureFormat::R8Snorm:
+        case wgpu::TextureFormat::R8Uint:
+        case wgpu::TextureFormat::R8Sint:
+        case wgpu::TextureFormat::Stencil8:
+            return 1u;
 
-            case wgpu::TextureFormat::R32Float:
-            case wgpu::TextureFormat::R32Uint:
-            case wgpu::TextureFormat::R32Sint:
-            case wgpu::TextureFormat::RG16Uint:
-            case wgpu::TextureFormat::RG16Sint:
-            case wgpu::TextureFormat::RG16Float:
-            case wgpu::TextureFormat::RGBA8Unorm:
-            case wgpu::TextureFormat::RGBA8UnormSrgb:
-            case wgpu::TextureFormat::RGBA8Snorm:
-            case wgpu::TextureFormat::RGBA8Uint:
-            case wgpu::TextureFormat::RGBA8Sint:
-            case wgpu::TextureFormat::BGRA8Unorm:
-            case wgpu::TextureFormat::BGRA8UnormSrgb:
-            case wgpu::TextureFormat::RGB10A2Unorm:
-            case wgpu::TextureFormat::RG11B10Ufloat:
-            case wgpu::TextureFormat::RGB9E5Ufloat:
-                return 4u;
+        case wgpu::TextureFormat::R16Uint:
+        case wgpu::TextureFormat::R16Sint:
+        case wgpu::TextureFormat::R16Float:
+        case wgpu::TextureFormat::RG8Unorm:
+        case wgpu::TextureFormat::RG8Snorm:
+        case wgpu::TextureFormat::RG8Uint:
+        case wgpu::TextureFormat::RG8Sint:
+            return 2u;
 
-            case wgpu::TextureFormat::RG32Float:
-            case wgpu::TextureFormat::RG32Uint:
-            case wgpu::TextureFormat::RG32Sint:
-            case wgpu::TextureFormat::RGBA16Uint:
-            case wgpu::TextureFormat::RGBA16Sint:
-            case wgpu::TextureFormat::RGBA16Float:
-                return 8u;
+        case wgpu::TextureFormat::R32Float:
+        case wgpu::TextureFormat::R32Uint:
+        case wgpu::TextureFormat::R32Sint:
+        case wgpu::TextureFormat::RG16Uint:
+        case wgpu::TextureFormat::RG16Sint:
+        case wgpu::TextureFormat::RG16Float:
+        case wgpu::TextureFormat::RGBA8Unorm:
+        case wgpu::TextureFormat::RGBA8UnormSrgb:
+        case wgpu::TextureFormat::RGBA8Snorm:
+        case wgpu::TextureFormat::RGBA8Uint:
+        case wgpu::TextureFormat::RGBA8Sint:
+        case wgpu::TextureFormat::BGRA8Unorm:
+        case wgpu::TextureFormat::BGRA8UnormSrgb:
+        case wgpu::TextureFormat::RGB10A2Unorm:
+        case wgpu::TextureFormat::RG11B10Ufloat:
+        case wgpu::TextureFormat::RGB9E5Ufloat:
+            return 4u;
 
-            case wgpu::TextureFormat::RGBA32Float:
-            case wgpu::TextureFormat::RGBA32Uint:
-            case wgpu::TextureFormat::RGBA32Sint:
-                return 16u;
+        case wgpu::TextureFormat::RG32Float:
+        case wgpu::TextureFormat::RG32Uint:
+        case wgpu::TextureFormat::RG32Sint:
+        case wgpu::TextureFormat::RGBA16Uint:
+        case wgpu::TextureFormat::RGBA16Sint:
+        case wgpu::TextureFormat::RGBA16Float:
+            return 8u;
 
-            case wgpu::TextureFormat::Depth16Unorm:
-                return 2u;
+        case wgpu::TextureFormat::RGBA32Float:
+        case wgpu::TextureFormat::RGBA32Uint:
+        case wgpu::TextureFormat::RGBA32Sint:
+            return 16u;
 
-            case wgpu::TextureFormat::Depth24Plus:
-            case wgpu::TextureFormat::Depth24UnormStencil8:
-            case wgpu::TextureFormat::Depth32Float:
-                return 4u;
+        case wgpu::TextureFormat::Depth16Unorm:
+            return 2u;
 
-            case wgpu::TextureFormat::BC1RGBAUnorm:
-            case wgpu::TextureFormat::BC1RGBAUnormSrgb:
-            case wgpu::TextureFormat::BC4RUnorm:
-            case wgpu::TextureFormat::BC4RSnorm:
-                return 8u;
+        case wgpu::TextureFormat::Depth24Plus:
+        case wgpu::TextureFormat::Depth24UnormStencil8:
+        case wgpu::TextureFormat::Depth32Float:
+            return 4u;
 
-            case wgpu::TextureFormat::BC2RGBAUnorm:
-            case wgpu::TextureFormat::BC2RGBAUnormSrgb:
-            case wgpu::TextureFormat::BC3RGBAUnorm:
-            case wgpu::TextureFormat::BC3RGBAUnormSrgb:
-            case wgpu::TextureFormat::BC5RGUnorm:
-            case wgpu::TextureFormat::BC5RGSnorm:
-            case wgpu::TextureFormat::BC6HRGBUfloat:
-            case wgpu::TextureFormat::BC6HRGBFloat:
-            case wgpu::TextureFormat::BC7RGBAUnorm:
-            case wgpu::TextureFormat::BC7RGBAUnormSrgb:
-                return 16u;
+        case wgpu::TextureFormat::BC1RGBAUnorm:
+        case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+        case wgpu::TextureFormat::BC4RUnorm:
+        case wgpu::TextureFormat::BC4RSnorm:
+            return 8u;
 
-            case wgpu::TextureFormat::ETC2RGB8Unorm:
-            case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
-            case wgpu::TextureFormat::ETC2RGB8A1Unorm:
-            case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
-            case wgpu::TextureFormat::EACR11Unorm:
-            case wgpu::TextureFormat::EACR11Snorm:
-                return 8u;
+        case wgpu::TextureFormat::BC2RGBAUnorm:
+        case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+        case wgpu::TextureFormat::BC3RGBAUnorm:
+        case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+        case wgpu::TextureFormat::BC5RGUnorm:
+        case wgpu::TextureFormat::BC5RGSnorm:
+        case wgpu::TextureFormat::BC6HRGBUfloat:
+        case wgpu::TextureFormat::BC6HRGBFloat:
+        case wgpu::TextureFormat::BC7RGBAUnorm:
+        case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+            return 16u;
 
-            case wgpu::TextureFormat::ETC2RGBA8Unorm:
-            case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
-            case wgpu::TextureFormat::EACRG11Unorm:
-            case wgpu::TextureFormat::EACRG11Snorm:
-                return 16u;
+        case wgpu::TextureFormat::ETC2RGB8Unorm:
+        case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+        case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+        case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+        case wgpu::TextureFormat::EACR11Unorm:
+        case wgpu::TextureFormat::EACR11Snorm:
+            return 8u;
 
-            case wgpu::TextureFormat::ASTC4x4Unorm:
-            case wgpu::TextureFormat::ASTC4x4UnormSrgb:
-            case wgpu::TextureFormat::ASTC5x4Unorm:
-            case wgpu::TextureFormat::ASTC5x4UnormSrgb:
-            case wgpu::TextureFormat::ASTC5x5Unorm:
-            case wgpu::TextureFormat::ASTC5x5UnormSrgb:
-            case wgpu::TextureFormat::ASTC6x5Unorm:
-            case wgpu::TextureFormat::ASTC6x5UnormSrgb:
-            case wgpu::TextureFormat::ASTC6x6Unorm:
-            case wgpu::TextureFormat::ASTC6x6UnormSrgb:
-            case wgpu::TextureFormat::ASTC8x5Unorm:
-            case wgpu::TextureFormat::ASTC8x5UnormSrgb:
-            case wgpu::TextureFormat::ASTC8x6Unorm:
-            case wgpu::TextureFormat::ASTC8x6UnormSrgb:
-            case wgpu::TextureFormat::ASTC8x8Unorm:
-            case wgpu::TextureFormat::ASTC8x8UnormSrgb:
-            case wgpu::TextureFormat::ASTC10x5Unorm:
-            case wgpu::TextureFormat::ASTC10x5UnormSrgb:
-            case wgpu::TextureFormat::ASTC10x6Unorm:
-            case wgpu::TextureFormat::ASTC10x6UnormSrgb:
-            case wgpu::TextureFormat::ASTC10x8Unorm:
-            case wgpu::TextureFormat::ASTC10x8UnormSrgb:
-            case wgpu::TextureFormat::ASTC10x10Unorm:
-            case wgpu::TextureFormat::ASTC10x10UnormSrgb:
-            case wgpu::TextureFormat::ASTC12x10Unorm:
-            case wgpu::TextureFormat::ASTC12x10UnormSrgb:
-            case wgpu::TextureFormat::ASTC12x12Unorm:
-            case wgpu::TextureFormat::ASTC12x12UnormSrgb:
-                return 16u;
+        case wgpu::TextureFormat::ETC2RGBA8Unorm:
+        case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+        case wgpu::TextureFormat::EACRG11Unorm:
+        case wgpu::TextureFormat::EACRG11Snorm:
+            return 16u;
 
-            case wgpu::TextureFormat::Depth24PlusStencil8:
-            case wgpu::TextureFormat::Depth32FloatStencil8:
+        case wgpu::TextureFormat::ASTC4x4Unorm:
+        case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+        case wgpu::TextureFormat::ASTC5x4Unorm:
+        case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+        case wgpu::TextureFormat::ASTC5x5Unorm:
+        case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+        case wgpu::TextureFormat::ASTC6x5Unorm:
+        case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+        case wgpu::TextureFormat::ASTC6x6Unorm:
+        case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+        case wgpu::TextureFormat::ASTC8x5Unorm:
+        case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+        case wgpu::TextureFormat::ASTC8x6Unorm:
+        case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+        case wgpu::TextureFormat::ASTC8x8Unorm:
+        case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+        case wgpu::TextureFormat::ASTC10x5Unorm:
+        case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+        case wgpu::TextureFormat::ASTC10x6Unorm:
+        case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+        case wgpu::TextureFormat::ASTC10x8Unorm:
+        case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+        case wgpu::TextureFormat::ASTC10x10Unorm:
+        case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+        case wgpu::TextureFormat::ASTC12x10Unorm:
+        case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+        case wgpu::TextureFormat::ASTC12x12Unorm:
+        case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+            return 16u;
 
-            // Block size of a multi-planar format depends on aspect.
-            case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+        case wgpu::TextureFormat::Depth24PlusStencil8:
+        case wgpu::TextureFormat::Depth32FloatStencil8:
 
-            case wgpu::TextureFormat::Undefined:
-                break;
-        }
-        UNREACHABLE();
+        // Block size of a multi-planar format depends on aspect.
+        case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+
+        case wgpu::TextureFormat::Undefined:
+            break;
     }
+    UNREACHABLE();
+}
 
-    uint32_t GetTextureFormatBlockWidth(wgpu::TextureFormat textureFormat) {
-        switch (textureFormat) {
-            case wgpu::TextureFormat::R8Unorm:
-            case wgpu::TextureFormat::R8Snorm:
-            case wgpu::TextureFormat::R8Uint:
-            case wgpu::TextureFormat::R8Sint:
-            case wgpu::TextureFormat::R16Uint:
-            case wgpu::TextureFormat::R16Sint:
-            case wgpu::TextureFormat::R16Float:
-            case wgpu::TextureFormat::RG8Unorm:
-            case wgpu::TextureFormat::RG8Snorm:
-            case wgpu::TextureFormat::RG8Uint:
-            case wgpu::TextureFormat::RG8Sint:
-            case wgpu::TextureFormat::R32Float:
-            case wgpu::TextureFormat::R32Uint:
-            case wgpu::TextureFormat::R32Sint:
-            case wgpu::TextureFormat::RG16Uint:
-            case wgpu::TextureFormat::RG16Sint:
-            case wgpu::TextureFormat::RG16Float:
-            case wgpu::TextureFormat::RGBA8Unorm:
-            case wgpu::TextureFormat::RGBA8UnormSrgb:
-            case wgpu::TextureFormat::RGBA8Snorm:
-            case wgpu::TextureFormat::RGBA8Uint:
-            case wgpu::TextureFormat::RGBA8Sint:
-            case wgpu::TextureFormat::BGRA8Unorm:
-            case wgpu::TextureFormat::BGRA8UnormSrgb:
-            case wgpu::TextureFormat::RGB10A2Unorm:
-            case wgpu::TextureFormat::RG11B10Ufloat:
-            case wgpu::TextureFormat::RGB9E5Ufloat:
-            case wgpu::TextureFormat::RG32Float:
-            case wgpu::TextureFormat::RG32Uint:
-            case wgpu::TextureFormat::RG32Sint:
-            case wgpu::TextureFormat::RGBA16Uint:
-            case wgpu::TextureFormat::RGBA16Sint:
-            case wgpu::TextureFormat::RGBA16Float:
-            case wgpu::TextureFormat::RGBA32Float:
-            case wgpu::TextureFormat::RGBA32Uint:
-            case wgpu::TextureFormat::RGBA32Sint:
-            case wgpu::TextureFormat::Depth32Float:
-            case wgpu::TextureFormat::Depth24Plus:
-            case wgpu::TextureFormat::Depth24PlusStencil8:
-            case wgpu::TextureFormat::Depth16Unorm:
-            case wgpu::TextureFormat::Depth24UnormStencil8:
-            case wgpu::TextureFormat::Depth32FloatStencil8:
-            case wgpu::TextureFormat::Stencil8:
-                return 1u;
+uint32_t GetTextureFormatBlockWidth(wgpu::TextureFormat textureFormat) {
+    switch (textureFormat) {
+        case wgpu::TextureFormat::R8Unorm:
+        case wgpu::TextureFormat::R8Snorm:
+        case wgpu::TextureFormat::R8Uint:
+        case wgpu::TextureFormat::R8Sint:
+        case wgpu::TextureFormat::R16Uint:
+        case wgpu::TextureFormat::R16Sint:
+        case wgpu::TextureFormat::R16Float:
+        case wgpu::TextureFormat::RG8Unorm:
+        case wgpu::TextureFormat::RG8Snorm:
+        case wgpu::TextureFormat::RG8Uint:
+        case wgpu::TextureFormat::RG8Sint:
+        case wgpu::TextureFormat::R32Float:
+        case wgpu::TextureFormat::R32Uint:
+        case wgpu::TextureFormat::R32Sint:
+        case wgpu::TextureFormat::RG16Uint:
+        case wgpu::TextureFormat::RG16Sint:
+        case wgpu::TextureFormat::RG16Float:
+        case wgpu::TextureFormat::RGBA8Unorm:
+        case wgpu::TextureFormat::RGBA8UnormSrgb:
+        case wgpu::TextureFormat::RGBA8Snorm:
+        case wgpu::TextureFormat::RGBA8Uint:
+        case wgpu::TextureFormat::RGBA8Sint:
+        case wgpu::TextureFormat::BGRA8Unorm:
+        case wgpu::TextureFormat::BGRA8UnormSrgb:
+        case wgpu::TextureFormat::RGB10A2Unorm:
+        case wgpu::TextureFormat::RG11B10Ufloat:
+        case wgpu::TextureFormat::RGB9E5Ufloat:
+        case wgpu::TextureFormat::RG32Float:
+        case wgpu::TextureFormat::RG32Uint:
+        case wgpu::TextureFormat::RG32Sint:
+        case wgpu::TextureFormat::RGBA16Uint:
+        case wgpu::TextureFormat::RGBA16Sint:
+        case wgpu::TextureFormat::RGBA16Float:
+        case wgpu::TextureFormat::RGBA32Float:
+        case wgpu::TextureFormat::RGBA32Uint:
+        case wgpu::TextureFormat::RGBA32Sint:
+        case wgpu::TextureFormat::Depth32Float:
+        case wgpu::TextureFormat::Depth24Plus:
+        case wgpu::TextureFormat::Depth24PlusStencil8:
+        case wgpu::TextureFormat::Depth16Unorm:
+        case wgpu::TextureFormat::Depth24UnormStencil8:
+        case wgpu::TextureFormat::Depth32FloatStencil8:
+        case wgpu::TextureFormat::Stencil8:
+            return 1u;
 
-            case wgpu::TextureFormat::BC1RGBAUnorm:
-            case wgpu::TextureFormat::BC1RGBAUnormSrgb:
-            case wgpu::TextureFormat::BC4RUnorm:
-            case wgpu::TextureFormat::BC4RSnorm:
-            case wgpu::TextureFormat::BC2RGBAUnorm:
-            case wgpu::TextureFormat::BC2RGBAUnormSrgb:
-            case wgpu::TextureFormat::BC3RGBAUnorm:
-            case wgpu::TextureFormat::BC3RGBAUnormSrgb:
-            case wgpu::TextureFormat::BC5RGUnorm:
-            case wgpu::TextureFormat::BC5RGSnorm:
-            case wgpu::TextureFormat::BC6HRGBUfloat:
-            case wgpu::TextureFormat::BC6HRGBFloat:
-            case wgpu::TextureFormat::BC7RGBAUnorm:
-            case wgpu::TextureFormat::BC7RGBAUnormSrgb:
-            case wgpu::TextureFormat::ETC2RGB8Unorm:
-            case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
-            case wgpu::TextureFormat::ETC2RGB8A1Unorm:
-            case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
-            case wgpu::TextureFormat::ETC2RGBA8Unorm:
-            case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
-            case wgpu::TextureFormat::EACR11Unorm:
-            case wgpu::TextureFormat::EACR11Snorm:
-            case wgpu::TextureFormat::EACRG11Unorm:
-            case wgpu::TextureFormat::EACRG11Snorm:
-                return 4u;
+        case wgpu::TextureFormat::BC1RGBAUnorm:
+        case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+        case wgpu::TextureFormat::BC4RUnorm:
+        case wgpu::TextureFormat::BC4RSnorm:
+        case wgpu::TextureFormat::BC2RGBAUnorm:
+        case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+        case wgpu::TextureFormat::BC3RGBAUnorm:
+        case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+        case wgpu::TextureFormat::BC5RGUnorm:
+        case wgpu::TextureFormat::BC5RGSnorm:
+        case wgpu::TextureFormat::BC6HRGBUfloat:
+        case wgpu::TextureFormat::BC6HRGBFloat:
+        case wgpu::TextureFormat::BC7RGBAUnorm:
+        case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+        case wgpu::TextureFormat::ETC2RGB8Unorm:
+        case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+        case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+        case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+        case wgpu::TextureFormat::ETC2RGBA8Unorm:
+        case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+        case wgpu::TextureFormat::EACR11Unorm:
+        case wgpu::TextureFormat::EACR11Snorm:
+        case wgpu::TextureFormat::EACRG11Unorm:
+        case wgpu::TextureFormat::EACRG11Snorm:
+            return 4u;
 
-            case wgpu::TextureFormat::ASTC4x4Unorm:
-            case wgpu::TextureFormat::ASTC4x4UnormSrgb:
-                return 4u;
-            case wgpu::TextureFormat::ASTC5x4Unorm:
-            case wgpu::TextureFormat::ASTC5x4UnormSrgb:
-            case wgpu::TextureFormat::ASTC5x5Unorm:
-            case wgpu::TextureFormat::ASTC5x5UnormSrgb:
-                return 5u;
-            case wgpu::TextureFormat::ASTC6x5Unorm:
-            case wgpu::TextureFormat::ASTC6x5UnormSrgb:
-            case wgpu::TextureFormat::ASTC6x6Unorm:
-            case wgpu::TextureFormat::ASTC6x6UnormSrgb:
-                return 6u;
-            case wgpu::TextureFormat::ASTC8x5Unorm:
-            case wgpu::TextureFormat::ASTC8x5UnormSrgb:
-            case wgpu::TextureFormat::ASTC8x6Unorm:
-            case wgpu::TextureFormat::ASTC8x6UnormSrgb:
-            case wgpu::TextureFormat::ASTC8x8Unorm:
-            case wgpu::TextureFormat::ASTC8x8UnormSrgb:
-                return 8u;
-            case wgpu::TextureFormat::ASTC10x5Unorm:
-            case wgpu::TextureFormat::ASTC10x5UnormSrgb:
-            case wgpu::TextureFormat::ASTC10x6Unorm:
-            case wgpu::TextureFormat::ASTC10x6UnormSrgb:
-            case wgpu::TextureFormat::ASTC10x8Unorm:
-            case wgpu::TextureFormat::ASTC10x8UnormSrgb:
-            case wgpu::TextureFormat::ASTC10x10Unorm:
-            case wgpu::TextureFormat::ASTC10x10UnormSrgb:
-                return 10u;
-            case wgpu::TextureFormat::ASTC12x10Unorm:
-            case wgpu::TextureFormat::ASTC12x10UnormSrgb:
-            case wgpu::TextureFormat::ASTC12x12Unorm:
-            case wgpu::TextureFormat::ASTC12x12UnormSrgb:
-                return 12u;
+        case wgpu::TextureFormat::ASTC4x4Unorm:
+        case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+            return 4u;
+        case wgpu::TextureFormat::ASTC5x4Unorm:
+        case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+        case wgpu::TextureFormat::ASTC5x5Unorm:
+        case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+            return 5u;
+        case wgpu::TextureFormat::ASTC6x5Unorm:
+        case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+        case wgpu::TextureFormat::ASTC6x6Unorm:
+        case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+            return 6u;
+        case wgpu::TextureFormat::ASTC8x5Unorm:
+        case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+        case wgpu::TextureFormat::ASTC8x6Unorm:
+        case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+        case wgpu::TextureFormat::ASTC8x8Unorm:
+        case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+            return 8u;
+        case wgpu::TextureFormat::ASTC10x5Unorm:
+        case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+        case wgpu::TextureFormat::ASTC10x6Unorm:
+        case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+        case wgpu::TextureFormat::ASTC10x8Unorm:
+        case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+        case wgpu::TextureFormat::ASTC10x10Unorm:
+        case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+            return 10u;
+        case wgpu::TextureFormat::ASTC12x10Unorm:
+        case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+        case wgpu::TextureFormat::ASTC12x12Unorm:
+        case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+            return 12u;
 
-            // Block size of a multi-planar format depends on aspect.
-            case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+        // Block size of a multi-planar format depends on aspect.
+        case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
 
-            case wgpu::TextureFormat::Undefined:
-                break;
-        }
-        UNREACHABLE();
+        case wgpu::TextureFormat::Undefined:
+            break;
     }
+    UNREACHABLE();
+}
 
-    uint32_t GetTextureFormatBlockHeight(wgpu::TextureFormat textureFormat) {
-        switch (textureFormat) {
-            case wgpu::TextureFormat::R8Unorm:
-            case wgpu::TextureFormat::R8Snorm:
-            case wgpu::TextureFormat::R8Uint:
-            case wgpu::TextureFormat::R8Sint:
-            case wgpu::TextureFormat::R16Uint:
-            case wgpu::TextureFormat::R16Sint:
-            case wgpu::TextureFormat::R16Float:
-            case wgpu::TextureFormat::RG8Unorm:
-            case wgpu::TextureFormat::RG8Snorm:
-            case wgpu::TextureFormat::RG8Uint:
-            case wgpu::TextureFormat::RG8Sint:
-            case wgpu::TextureFormat::R32Float:
-            case wgpu::TextureFormat::R32Uint:
-            case wgpu::TextureFormat::R32Sint:
-            case wgpu::TextureFormat::RG16Uint:
-            case wgpu::TextureFormat::RG16Sint:
-            case wgpu::TextureFormat::RG16Float:
-            case wgpu::TextureFormat::RGBA8Unorm:
-            case wgpu::TextureFormat::RGBA8UnormSrgb:
-            case wgpu::TextureFormat::RGBA8Snorm:
-            case wgpu::TextureFormat::RGBA8Uint:
-            case wgpu::TextureFormat::RGBA8Sint:
-            case wgpu::TextureFormat::BGRA8Unorm:
-            case wgpu::TextureFormat::BGRA8UnormSrgb:
-            case wgpu::TextureFormat::RGB10A2Unorm:
-            case wgpu::TextureFormat::RG11B10Ufloat:
-            case wgpu::TextureFormat::RGB9E5Ufloat:
-            case wgpu::TextureFormat::RG32Float:
-            case wgpu::TextureFormat::RG32Uint:
-            case wgpu::TextureFormat::RG32Sint:
-            case wgpu::TextureFormat::RGBA16Uint:
-            case wgpu::TextureFormat::RGBA16Sint:
-            case wgpu::TextureFormat::RGBA16Float:
-            case wgpu::TextureFormat::RGBA32Float:
-            case wgpu::TextureFormat::RGBA32Uint:
-            case wgpu::TextureFormat::RGBA32Sint:
-            case wgpu::TextureFormat::Depth32Float:
-            case wgpu::TextureFormat::Depth24Plus:
-            case wgpu::TextureFormat::Depth24PlusStencil8:
-            case wgpu::TextureFormat::Depth16Unorm:
-            case wgpu::TextureFormat::Depth24UnormStencil8:
-            case wgpu::TextureFormat::Depth32FloatStencil8:
-            case wgpu::TextureFormat::Stencil8:
-                return 1u;
+uint32_t GetTextureFormatBlockHeight(wgpu::TextureFormat textureFormat) {
+    switch (textureFormat) {
+        case wgpu::TextureFormat::R8Unorm:
+        case wgpu::TextureFormat::R8Snorm:
+        case wgpu::TextureFormat::R8Uint:
+        case wgpu::TextureFormat::R8Sint:
+        case wgpu::TextureFormat::R16Uint:
+        case wgpu::TextureFormat::R16Sint:
+        case wgpu::TextureFormat::R16Float:
+        case wgpu::TextureFormat::RG8Unorm:
+        case wgpu::TextureFormat::RG8Snorm:
+        case wgpu::TextureFormat::RG8Uint:
+        case wgpu::TextureFormat::RG8Sint:
+        case wgpu::TextureFormat::R32Float:
+        case wgpu::TextureFormat::R32Uint:
+        case wgpu::TextureFormat::R32Sint:
+        case wgpu::TextureFormat::RG16Uint:
+        case wgpu::TextureFormat::RG16Sint:
+        case wgpu::TextureFormat::RG16Float:
+        case wgpu::TextureFormat::RGBA8Unorm:
+        case wgpu::TextureFormat::RGBA8UnormSrgb:
+        case wgpu::TextureFormat::RGBA8Snorm:
+        case wgpu::TextureFormat::RGBA8Uint:
+        case wgpu::TextureFormat::RGBA8Sint:
+        case wgpu::TextureFormat::BGRA8Unorm:
+        case wgpu::TextureFormat::BGRA8UnormSrgb:
+        case wgpu::TextureFormat::RGB10A2Unorm:
+        case wgpu::TextureFormat::RG11B10Ufloat:
+        case wgpu::TextureFormat::RGB9E5Ufloat:
+        case wgpu::TextureFormat::RG32Float:
+        case wgpu::TextureFormat::RG32Uint:
+        case wgpu::TextureFormat::RG32Sint:
+        case wgpu::TextureFormat::RGBA16Uint:
+        case wgpu::TextureFormat::RGBA16Sint:
+        case wgpu::TextureFormat::RGBA16Float:
+        case wgpu::TextureFormat::RGBA32Float:
+        case wgpu::TextureFormat::RGBA32Uint:
+        case wgpu::TextureFormat::RGBA32Sint:
+        case wgpu::TextureFormat::Depth32Float:
+        case wgpu::TextureFormat::Depth24Plus:
+        case wgpu::TextureFormat::Depth24PlusStencil8:
+        case wgpu::TextureFormat::Depth16Unorm:
+        case wgpu::TextureFormat::Depth24UnormStencil8:
+        case wgpu::TextureFormat::Depth32FloatStencil8:
+        case wgpu::TextureFormat::Stencil8:
+            return 1u;
 
-            case wgpu::TextureFormat::BC1RGBAUnorm:
-            case wgpu::TextureFormat::BC1RGBAUnormSrgb:
-            case wgpu::TextureFormat::BC4RUnorm:
-            case wgpu::TextureFormat::BC4RSnorm:
-            case wgpu::TextureFormat::BC2RGBAUnorm:
-            case wgpu::TextureFormat::BC2RGBAUnormSrgb:
-            case wgpu::TextureFormat::BC3RGBAUnorm:
-            case wgpu::TextureFormat::BC3RGBAUnormSrgb:
-            case wgpu::TextureFormat::BC5RGUnorm:
-            case wgpu::TextureFormat::BC5RGSnorm:
-            case wgpu::TextureFormat::BC6HRGBUfloat:
-            case wgpu::TextureFormat::BC6HRGBFloat:
-            case wgpu::TextureFormat::BC7RGBAUnorm:
-            case wgpu::TextureFormat::BC7RGBAUnormSrgb:
-            case wgpu::TextureFormat::ETC2RGB8Unorm:
-            case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
-            case wgpu::TextureFormat::ETC2RGB8A1Unorm:
-            case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
-            case wgpu::TextureFormat::ETC2RGBA8Unorm:
-            case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
-            case wgpu::TextureFormat::EACR11Unorm:
-            case wgpu::TextureFormat::EACR11Snorm:
-            case wgpu::TextureFormat::EACRG11Unorm:
-            case wgpu::TextureFormat::EACRG11Snorm:
-                return 4u;
+        case wgpu::TextureFormat::BC1RGBAUnorm:
+        case wgpu::TextureFormat::BC1RGBAUnormSrgb:
+        case wgpu::TextureFormat::BC4RUnorm:
+        case wgpu::TextureFormat::BC4RSnorm:
+        case wgpu::TextureFormat::BC2RGBAUnorm:
+        case wgpu::TextureFormat::BC2RGBAUnormSrgb:
+        case wgpu::TextureFormat::BC3RGBAUnorm:
+        case wgpu::TextureFormat::BC3RGBAUnormSrgb:
+        case wgpu::TextureFormat::BC5RGUnorm:
+        case wgpu::TextureFormat::BC5RGSnorm:
+        case wgpu::TextureFormat::BC6HRGBUfloat:
+        case wgpu::TextureFormat::BC6HRGBFloat:
+        case wgpu::TextureFormat::BC7RGBAUnorm:
+        case wgpu::TextureFormat::BC7RGBAUnormSrgb:
+        case wgpu::TextureFormat::ETC2RGB8Unorm:
+        case wgpu::TextureFormat::ETC2RGB8UnormSrgb:
+        case wgpu::TextureFormat::ETC2RGB8A1Unorm:
+        case wgpu::TextureFormat::ETC2RGB8A1UnormSrgb:
+        case wgpu::TextureFormat::ETC2RGBA8Unorm:
+        case wgpu::TextureFormat::ETC2RGBA8UnormSrgb:
+        case wgpu::TextureFormat::EACR11Unorm:
+        case wgpu::TextureFormat::EACR11Snorm:
+        case wgpu::TextureFormat::EACRG11Unorm:
+        case wgpu::TextureFormat::EACRG11Snorm:
+            return 4u;
 
-            case wgpu::TextureFormat::ASTC4x4Unorm:
-            case wgpu::TextureFormat::ASTC4x4UnormSrgb:
-            case wgpu::TextureFormat::ASTC5x4Unorm:
-            case wgpu::TextureFormat::ASTC5x4UnormSrgb:
-                return 4u;
-            case wgpu::TextureFormat::ASTC5x5Unorm:
-            case wgpu::TextureFormat::ASTC5x5UnormSrgb:
-            case wgpu::TextureFormat::ASTC6x5Unorm:
-            case wgpu::TextureFormat::ASTC6x5UnormSrgb:
-            case wgpu::TextureFormat::ASTC8x5Unorm:
-            case wgpu::TextureFormat::ASTC8x5UnormSrgb:
-            case wgpu::TextureFormat::ASTC10x5Unorm:
-            case wgpu::TextureFormat::ASTC10x5UnormSrgb:
-                return 5u;
-            case wgpu::TextureFormat::ASTC6x6Unorm:
-            case wgpu::TextureFormat::ASTC6x6UnormSrgb:
-            case wgpu::TextureFormat::ASTC8x6Unorm:
-            case wgpu::TextureFormat::ASTC8x6UnormSrgb:
-            case wgpu::TextureFormat::ASTC10x6Unorm:
-            case wgpu::TextureFormat::ASTC10x6UnormSrgb:
-                return 6u;
-            case wgpu::TextureFormat::ASTC8x8Unorm:
-            case wgpu::TextureFormat::ASTC8x8UnormSrgb:
-            case wgpu::TextureFormat::ASTC10x8Unorm:
-            case wgpu::TextureFormat::ASTC10x8UnormSrgb:
-                return 8u;
-            case wgpu::TextureFormat::ASTC10x10Unorm:
-            case wgpu::TextureFormat::ASTC10x10UnormSrgb:
-            case wgpu::TextureFormat::ASTC12x10Unorm:
-            case wgpu::TextureFormat::ASTC12x10UnormSrgb:
-                return 10u;
-            case wgpu::TextureFormat::ASTC12x12Unorm:
-            case wgpu::TextureFormat::ASTC12x12UnormSrgb:
-                return 12u;
+        case wgpu::TextureFormat::ASTC4x4Unorm:
+        case wgpu::TextureFormat::ASTC4x4UnormSrgb:
+        case wgpu::TextureFormat::ASTC5x4Unorm:
+        case wgpu::TextureFormat::ASTC5x4UnormSrgb:
+            return 4u;
+        case wgpu::TextureFormat::ASTC5x5Unorm:
+        case wgpu::TextureFormat::ASTC5x5UnormSrgb:
+        case wgpu::TextureFormat::ASTC6x5Unorm:
+        case wgpu::TextureFormat::ASTC6x5UnormSrgb:
+        case wgpu::TextureFormat::ASTC8x5Unorm:
+        case wgpu::TextureFormat::ASTC8x5UnormSrgb:
+        case wgpu::TextureFormat::ASTC10x5Unorm:
+        case wgpu::TextureFormat::ASTC10x5UnormSrgb:
+            return 5u;
+        case wgpu::TextureFormat::ASTC6x6Unorm:
+        case wgpu::TextureFormat::ASTC6x6UnormSrgb:
+        case wgpu::TextureFormat::ASTC8x6Unorm:
+        case wgpu::TextureFormat::ASTC8x6UnormSrgb:
+        case wgpu::TextureFormat::ASTC10x6Unorm:
+        case wgpu::TextureFormat::ASTC10x6UnormSrgb:
+            return 6u;
+        case wgpu::TextureFormat::ASTC8x8Unorm:
+        case wgpu::TextureFormat::ASTC8x8UnormSrgb:
+        case wgpu::TextureFormat::ASTC10x8Unorm:
+        case wgpu::TextureFormat::ASTC10x8UnormSrgb:
+            return 8u;
+        case wgpu::TextureFormat::ASTC10x10Unorm:
+        case wgpu::TextureFormat::ASTC10x10UnormSrgb:
+        case wgpu::TextureFormat::ASTC12x10Unorm:
+        case wgpu::TextureFormat::ASTC12x10UnormSrgb:
+            return 10u;
+        case wgpu::TextureFormat::ASTC12x12Unorm:
+        case wgpu::TextureFormat::ASTC12x12UnormSrgb:
+            return 12u;
 
-            // Block size of a multi-planar format depends on aspect.
-            case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
+        // Block size of a multi-planar format depends on aspect.
+        case wgpu::TextureFormat::R8BG8Biplanar420Unorm:
 
-            case wgpu::TextureFormat::Undefined:
-                break;
-        }
-        UNREACHABLE();
+        case wgpu::TextureFormat::Undefined:
+            break;
     }
+    UNREACHABLE();
+}
 
-    const char* GetWGSLColorTextureComponentType(wgpu::TextureFormat textureFormat) {
-        switch (textureFormat) {
-            case wgpu::TextureFormat::R8Unorm:
-            case wgpu::TextureFormat::R8Snorm:
-            case wgpu::TextureFormat::R16Float:
-            case wgpu::TextureFormat::RG8Unorm:
-            case wgpu::TextureFormat::RG8Snorm:
-            case wgpu::TextureFormat::R32Float:
-            case wgpu::TextureFormat::RG16Float:
-            case wgpu::TextureFormat::RGBA8Unorm:
-            case wgpu::TextureFormat::RGBA8Snorm:
-            case wgpu::TextureFormat::RGB10A2Unorm:
-            case wgpu::TextureFormat::RG11B10Ufloat:
-            case wgpu::TextureFormat::RGB9E5Ufloat:
-            case wgpu::TextureFormat::RG32Float:
-            case wgpu::TextureFormat::RGBA16Float:
-            case wgpu::TextureFormat::RGBA32Float:
-            case wgpu::TextureFormat::BGRA8Unorm:
-            case wgpu::TextureFormat::BGRA8UnormSrgb:
-            case wgpu::TextureFormat::RGBA8UnormSrgb:
-                return "f32";
+const char* GetWGSLColorTextureComponentType(wgpu::TextureFormat textureFormat) {
+    switch (textureFormat) {
+        case wgpu::TextureFormat::R8Unorm:
+        case wgpu::TextureFormat::R8Snorm:
+        case wgpu::TextureFormat::R16Float:
+        case wgpu::TextureFormat::RG8Unorm:
+        case wgpu::TextureFormat::RG8Snorm:
+        case wgpu::TextureFormat::R32Float:
+        case wgpu::TextureFormat::RG16Float:
+        case wgpu::TextureFormat::RGBA8Unorm:
+        case wgpu::TextureFormat::RGBA8Snorm:
+        case wgpu::TextureFormat::RGB10A2Unorm:
+        case wgpu::TextureFormat::RG11B10Ufloat:
+        case wgpu::TextureFormat::RGB9E5Ufloat:
+        case wgpu::TextureFormat::RG32Float:
+        case wgpu::TextureFormat::RGBA16Float:
+        case wgpu::TextureFormat::RGBA32Float:
+        case wgpu::TextureFormat::BGRA8Unorm:
+        case wgpu::TextureFormat::BGRA8UnormSrgb:
+        case wgpu::TextureFormat::RGBA8UnormSrgb:
+            return "f32";
 
-            case wgpu::TextureFormat::R8Uint:
-            case wgpu::TextureFormat::R16Uint:
-            case wgpu::TextureFormat::RG8Uint:
-            case wgpu::TextureFormat::R32Uint:
-            case wgpu::TextureFormat::RG16Uint:
-            case wgpu::TextureFormat::RGBA8Uint:
-            case wgpu::TextureFormat::RG32Uint:
-            case wgpu::TextureFormat::RGBA16Uint:
-            case wgpu::TextureFormat::RGBA32Uint:
-                return "u32";
+        case wgpu::TextureFormat::R8Uint:
+        case wgpu::TextureFormat::R16Uint:
+        case wgpu::TextureFormat::RG8Uint:
+        case wgpu::TextureFormat::R32Uint:
+        case wgpu::TextureFormat::RG16Uint:
+        case wgpu::TextureFormat::RGBA8Uint:
+        case wgpu::TextureFormat::RG32Uint:
+        case wgpu::TextureFormat::RGBA16Uint:
+        case wgpu::TextureFormat::RGBA32Uint:
+            return "u32";
 
-            case wgpu::TextureFormat::R8Sint:
-            case wgpu::TextureFormat::R16Sint:
-            case wgpu::TextureFormat::RG8Sint:
-            case wgpu::TextureFormat::R32Sint:
-            case wgpu::TextureFormat::RG16Sint:
-            case wgpu::TextureFormat::RGBA8Sint:
-            case wgpu::TextureFormat::RG32Sint:
-            case wgpu::TextureFormat::RGBA16Sint:
-            case wgpu::TextureFormat::RGBA32Sint:
-                return "i32";
+        case wgpu::TextureFormat::R8Sint:
+        case wgpu::TextureFormat::R16Sint:
+        case wgpu::TextureFormat::RG8Sint:
+        case wgpu::TextureFormat::R32Sint:
+        case wgpu::TextureFormat::RG16Sint:
+        case wgpu::TextureFormat::RGBA8Sint:
+        case wgpu::TextureFormat::RG32Sint:
+        case wgpu::TextureFormat::RGBA16Sint:
+        case wgpu::TextureFormat::RGBA32Sint:
+            return "i32";
 
-            default:
-                UNREACHABLE();
-        }
+        default:
+            UNREACHABLE();
     }
+}
 
-    uint32_t GetWGSLRenderableColorTextureComponentCount(wgpu::TextureFormat textureFormat) {
-        switch (textureFormat) {
-            case wgpu::TextureFormat::R8Unorm:
-            case wgpu::TextureFormat::R8Uint:
-            case wgpu::TextureFormat::R8Sint:
-            case wgpu::TextureFormat::R16Uint:
-            case wgpu::TextureFormat::R16Sint:
-            case wgpu::TextureFormat::R16Float:
-            case wgpu::TextureFormat::R32Float:
-            case wgpu::TextureFormat::R32Uint:
-            case wgpu::TextureFormat::R32Sint:
-                return 1u;
-            case wgpu::TextureFormat::RG8Unorm:
-            case wgpu::TextureFormat::RG8Uint:
-            case wgpu::TextureFormat::RG8Sint:
-            case wgpu::TextureFormat::RG16Uint:
-            case wgpu::TextureFormat::RG16Sint:
-            case wgpu::TextureFormat::RG16Float:
-            case wgpu::TextureFormat::RG32Float:
-            case wgpu::TextureFormat::RG32Uint:
-            case wgpu::TextureFormat::RG32Sint:
-                return 2u;
-            case wgpu::TextureFormat::RGBA8Unorm:
-            case wgpu::TextureFormat::RGBA8UnormSrgb:
-            case wgpu::TextureFormat::RGBA8Uint:
-            case wgpu::TextureFormat::RGBA8Sint:
-            case wgpu::TextureFormat::BGRA8Unorm:
-            case wgpu::TextureFormat::BGRA8UnormSrgb:
-            case wgpu::TextureFormat::RGB10A2Unorm:
-            case wgpu::TextureFormat::RGBA16Uint:
-            case wgpu::TextureFormat::RGBA16Sint:
-            case wgpu::TextureFormat::RGBA16Float:
-            case wgpu::TextureFormat::RGBA32Float:
-            case wgpu::TextureFormat::RGBA32Uint:
-            case wgpu::TextureFormat::RGBA32Sint:
-                return 4u;
-            default:
-                UNREACHABLE();
-        }
+uint32_t GetWGSLRenderableColorTextureComponentCount(wgpu::TextureFormat textureFormat) {
+    switch (textureFormat) {
+        case wgpu::TextureFormat::R8Unorm:
+        case wgpu::TextureFormat::R8Uint:
+        case wgpu::TextureFormat::R8Sint:
+        case wgpu::TextureFormat::R16Uint:
+        case wgpu::TextureFormat::R16Sint:
+        case wgpu::TextureFormat::R16Float:
+        case wgpu::TextureFormat::R32Float:
+        case wgpu::TextureFormat::R32Uint:
+        case wgpu::TextureFormat::R32Sint:
+            return 1u;
+        case wgpu::TextureFormat::RG8Unorm:
+        case wgpu::TextureFormat::RG8Uint:
+        case wgpu::TextureFormat::RG8Sint:
+        case wgpu::TextureFormat::RG16Uint:
+        case wgpu::TextureFormat::RG16Sint:
+        case wgpu::TextureFormat::RG16Float:
+        case wgpu::TextureFormat::RG32Float:
+        case wgpu::TextureFormat::RG32Uint:
+        case wgpu::TextureFormat::RG32Sint:
+            return 2u;
+        case wgpu::TextureFormat::RGBA8Unorm:
+        case wgpu::TextureFormat::RGBA8UnormSrgb:
+        case wgpu::TextureFormat::RGBA8Uint:
+        case wgpu::TextureFormat::RGBA8Sint:
+        case wgpu::TextureFormat::BGRA8Unorm:
+        case wgpu::TextureFormat::BGRA8UnormSrgb:
+        case wgpu::TextureFormat::RGB10A2Unorm:
+        case wgpu::TextureFormat::RGBA16Uint:
+        case wgpu::TextureFormat::RGBA16Sint:
+        case wgpu::TextureFormat::RGBA16Float:
+        case wgpu::TextureFormat::RGBA32Float:
+        case wgpu::TextureFormat::RGBA32Uint:
+        case wgpu::TextureFormat::RGBA32Sint:
+            return 4u;
+        default:
+            UNREACHABLE();
     }
+}
 
-    const char* GetWGSLImageFormatQualifier(wgpu::TextureFormat textureFormat) {
-        switch (textureFormat) {
-            case wgpu::TextureFormat::RGBA8Unorm:
-                return "rgba8unorm";
-            case wgpu::TextureFormat::RGBA8Snorm:
-                return "rgba8snorm";
-            case wgpu::TextureFormat::RGBA8Uint:
-                return "rgba8uint";
-            case wgpu::TextureFormat::RGBA8Sint:
-                return "rgba8sint";
-            case wgpu::TextureFormat::RGBA16Uint:
-                return "rgba16uint";
-            case wgpu::TextureFormat::RGBA16Sint:
-                return "rgba16sint";
-            case wgpu::TextureFormat::RGBA16Float:
-                return "rgba16float";
-            case wgpu::TextureFormat::R32Uint:
-                return "r32uint";
-            case wgpu::TextureFormat::R32Sint:
-                return "r32sint";
-            case wgpu::TextureFormat::R32Float:
-                return "r32float";
-            case wgpu::TextureFormat::RG32Uint:
-                return "rg32uint";
-            case wgpu::TextureFormat::RG32Sint:
-                return "rg32sint";
-            case wgpu::TextureFormat::RG32Float:
-                return "rg32float";
-            case wgpu::TextureFormat::RGBA32Uint:
-                return "rgba32uint";
-            case wgpu::TextureFormat::RGBA32Sint:
-                return "rgba32sint";
-            case wgpu::TextureFormat::RGBA32Float:
-                return "rgba32float";
+const char* GetWGSLImageFormatQualifier(wgpu::TextureFormat textureFormat) {
+    switch (textureFormat) {
+        case wgpu::TextureFormat::RGBA8Unorm:
+            return "rgba8unorm";
+        case wgpu::TextureFormat::RGBA8Snorm:
+            return "rgba8snorm";
+        case wgpu::TextureFormat::RGBA8Uint:
+            return "rgba8uint";
+        case wgpu::TextureFormat::RGBA8Sint:
+            return "rgba8sint";
+        case wgpu::TextureFormat::RGBA16Uint:
+            return "rgba16uint";
+        case wgpu::TextureFormat::RGBA16Sint:
+            return "rgba16sint";
+        case wgpu::TextureFormat::RGBA16Float:
+            return "rgba16float";
+        case wgpu::TextureFormat::R32Uint:
+            return "r32uint";
+        case wgpu::TextureFormat::R32Sint:
+            return "r32sint";
+        case wgpu::TextureFormat::R32Float:
+            return "r32float";
+        case wgpu::TextureFormat::RG32Uint:
+            return "rg32uint";
+        case wgpu::TextureFormat::RG32Sint:
+            return "rg32sint";
+        case wgpu::TextureFormat::RG32Float:
+            return "rg32float";
+        case wgpu::TextureFormat::RGBA32Uint:
+            return "rgba32uint";
+        case wgpu::TextureFormat::RGBA32Sint:
+            return "rgba32sint";
+        case wgpu::TextureFormat::RGBA32Float:
+            return "rgba32float";
 
-            // The below do not currently exist in the WGSL spec, but are used
-            // for tests that expect compilation failure.
-            case wgpu::TextureFormat::R8Unorm:
-                return "r8unorm";
-            case wgpu::TextureFormat::R8Snorm:
-                return "r8snorm";
-            case wgpu::TextureFormat::R8Uint:
-                return "r8uint";
-            case wgpu::TextureFormat::R8Sint:
-                return "r8sint";
-            case wgpu::TextureFormat::R16Uint:
-                return "r16uint";
-            case wgpu::TextureFormat::R16Sint:
-                return "r16sint";
-            case wgpu::TextureFormat::R16Float:
-                return "r16float";
-            case wgpu::TextureFormat::RG8Unorm:
-                return "rg8unorm";
-            case wgpu::TextureFormat::RG8Snorm:
-                return "rg8snorm";
-            case wgpu::TextureFormat::RG8Uint:
-                return "rg8uint";
-            case wgpu::TextureFormat::RG8Sint:
-                return "rg8sint";
-            case wgpu::TextureFormat::RG16Uint:
-                return "rg16uint";
-            case wgpu::TextureFormat::RG16Sint:
-                return "rg16sint";
-            case wgpu::TextureFormat::RG16Float:
-                return "rg16float";
-            case wgpu::TextureFormat::RGB10A2Unorm:
-                return "rgb10a2unorm";
-            case wgpu::TextureFormat::RG11B10Ufloat:
-                return "rg11b10ufloat";
+        // The below do not currently exist in the WGSL spec, but are used
+        // for tests that expect compilation failure.
+        case wgpu::TextureFormat::R8Unorm:
+            return "r8unorm";
+        case wgpu::TextureFormat::R8Snorm:
+            return "r8snorm";
+        case wgpu::TextureFormat::R8Uint:
+            return "r8uint";
+        case wgpu::TextureFormat::R8Sint:
+            return "r8sint";
+        case wgpu::TextureFormat::R16Uint:
+            return "r16uint";
+        case wgpu::TextureFormat::R16Sint:
+            return "r16sint";
+        case wgpu::TextureFormat::R16Float:
+            return "r16float";
+        case wgpu::TextureFormat::RG8Unorm:
+            return "rg8unorm";
+        case wgpu::TextureFormat::RG8Snorm:
+            return "rg8snorm";
+        case wgpu::TextureFormat::RG8Uint:
+            return "rg8uint";
+        case wgpu::TextureFormat::RG8Sint:
+            return "rg8sint";
+        case wgpu::TextureFormat::RG16Uint:
+            return "rg16uint";
+        case wgpu::TextureFormat::RG16Sint:
+            return "rg16sint";
+        case wgpu::TextureFormat::RG16Float:
+            return "rg16float";
+        case wgpu::TextureFormat::RGB10A2Unorm:
+            return "rgb10a2unorm";
+        case wgpu::TextureFormat::RG11B10Ufloat:
+            return "rg11b10ufloat";
 
-            default:
-                UNREACHABLE();
-        }
+        default:
+            UNREACHABLE();
     }
+}
 
-    wgpu::TextureDimension ViewDimensionToTextureDimension(
-        const wgpu::TextureViewDimension dimension) {
-        switch (dimension) {
-            case wgpu::TextureViewDimension::e2D:
-            case wgpu::TextureViewDimension::e2DArray:
-            case wgpu::TextureViewDimension::Cube:
-            case wgpu::TextureViewDimension::CubeArray:
-                return wgpu::TextureDimension::e2D;
-            case wgpu::TextureViewDimension::e3D:
-                return wgpu::TextureDimension::e3D;
-            // TODO(crbug.com/dawn/814): Implement for 1D texture.
-            case wgpu::TextureViewDimension::e1D:
-            default:
-                UNREACHABLE();
-                break;
-        }
+wgpu::TextureDimension ViewDimensionToTextureDimension(const wgpu::TextureViewDimension dimension) {
+    switch (dimension) {
+        case wgpu::TextureViewDimension::e2D:
+        case wgpu::TextureViewDimension::e2DArray:
+        case wgpu::TextureViewDimension::Cube:
+        case wgpu::TextureViewDimension::CubeArray:
+            return wgpu::TextureDimension::e2D;
+        case wgpu::TextureViewDimension::e3D:
+            return wgpu::TextureDimension::e3D;
+        // TODO(crbug.com/dawn/814): Implement for 1D texture.
+        case wgpu::TextureViewDimension::e1D:
+        default:
+            UNREACHABLE();
+            break;
     }
+}
 
 }  // namespace utils
diff --git a/src/dawn/utils/TextureUtils.h b/src/dawn/utils/TextureUtils.h
index a92bc9d..f92b19b 100644
--- a/src/dawn/utils/TextureUtils.h
+++ b/src/dawn/utils/TextureUtils.h
@@ -22,230 +22,229 @@
 #include "dawn/common/Assert.h"
 
 namespace utils {
-    static constexpr std::array<wgpu::TextureFormat, 95> kAllTextureFormats = {
-        wgpu::TextureFormat::R8Unorm,
-        wgpu::TextureFormat::R8Snorm,
-        wgpu::TextureFormat::R8Uint,
-        wgpu::TextureFormat::R8Sint,
-        wgpu::TextureFormat::R16Uint,
-        wgpu::TextureFormat::R16Sint,
-        wgpu::TextureFormat::R16Float,
-        wgpu::TextureFormat::RG8Unorm,
-        wgpu::TextureFormat::RG8Snorm,
-        wgpu::TextureFormat::RG8Uint,
-        wgpu::TextureFormat::RG8Sint,
-        wgpu::TextureFormat::R32Float,
-        wgpu::TextureFormat::R32Uint,
-        wgpu::TextureFormat::R32Sint,
-        wgpu::TextureFormat::RG16Uint,
-        wgpu::TextureFormat::RG16Sint,
-        wgpu::TextureFormat::RG16Float,
-        wgpu::TextureFormat::RGBA8Unorm,
-        wgpu::TextureFormat::RGBA8UnormSrgb,
-        wgpu::TextureFormat::RGBA8Snorm,
-        wgpu::TextureFormat::RGBA8Uint,
-        wgpu::TextureFormat::RGBA8Sint,
-        wgpu::TextureFormat::BGRA8Unorm,
-        wgpu::TextureFormat::BGRA8UnormSrgb,
-        wgpu::TextureFormat::RGB10A2Unorm,
-        wgpu::TextureFormat::RG11B10Ufloat,
-        wgpu::TextureFormat::RGB9E5Ufloat,
-        wgpu::TextureFormat::RG32Float,
-        wgpu::TextureFormat::RG32Uint,
-        wgpu::TextureFormat::RG32Sint,
-        wgpu::TextureFormat::RGBA16Uint,
-        wgpu::TextureFormat::RGBA16Sint,
-        wgpu::TextureFormat::RGBA16Float,
-        wgpu::TextureFormat::RGBA32Float,
-        wgpu::TextureFormat::RGBA32Uint,
-        wgpu::TextureFormat::RGBA32Sint,
-        wgpu::TextureFormat::Depth16Unorm,
-        wgpu::TextureFormat::Depth32Float,
-        wgpu::TextureFormat::Depth24Plus,
-        wgpu::TextureFormat::Depth24PlusStencil8,
-        wgpu::TextureFormat::Depth24UnormStencil8,
-        wgpu::TextureFormat::Depth32FloatStencil8,
-        wgpu::TextureFormat::Stencil8,
-        wgpu::TextureFormat::BC1RGBAUnorm,
-        wgpu::TextureFormat::BC1RGBAUnormSrgb,
-        wgpu::TextureFormat::BC2RGBAUnorm,
-        wgpu::TextureFormat::BC2RGBAUnormSrgb,
-        wgpu::TextureFormat::BC3RGBAUnorm,
-        wgpu::TextureFormat::BC3RGBAUnormSrgb,
-        wgpu::TextureFormat::BC4RUnorm,
-        wgpu::TextureFormat::BC4RSnorm,
-        wgpu::TextureFormat::BC5RGUnorm,
-        wgpu::TextureFormat::BC5RGSnorm,
-        wgpu::TextureFormat::BC6HRGBUfloat,
-        wgpu::TextureFormat::BC6HRGBFloat,
-        wgpu::TextureFormat::BC7RGBAUnorm,
-        wgpu::TextureFormat::BC7RGBAUnormSrgb,
-        wgpu::TextureFormat::ETC2RGB8Unorm,
-        wgpu::TextureFormat::ETC2RGB8UnormSrgb,
-        wgpu::TextureFormat::ETC2RGB8A1Unorm,
-        wgpu::TextureFormat::ETC2RGB8A1UnormSrgb,
-        wgpu::TextureFormat::ETC2RGBA8Unorm,
-        wgpu::TextureFormat::ETC2RGBA8UnormSrgb,
-        wgpu::TextureFormat::EACR11Unorm,
-        wgpu::TextureFormat::EACR11Snorm,
-        wgpu::TextureFormat::EACRG11Unorm,
-        wgpu::TextureFormat::EACRG11Snorm,
-        wgpu::TextureFormat::ASTC4x4Unorm,
-        wgpu::TextureFormat::ASTC4x4UnormSrgb,
-        wgpu::TextureFormat::ASTC5x4Unorm,
-        wgpu::TextureFormat::ASTC5x4UnormSrgb,
-        wgpu::TextureFormat::ASTC5x5Unorm,
-        wgpu::TextureFormat::ASTC5x5UnormSrgb,
-        wgpu::TextureFormat::ASTC6x5Unorm,
-        wgpu::TextureFormat::ASTC6x5UnormSrgb,
-        wgpu::TextureFormat::ASTC6x6Unorm,
-        wgpu::TextureFormat::ASTC6x6UnormSrgb,
-        wgpu::TextureFormat::ASTC8x5Unorm,
-        wgpu::TextureFormat::ASTC8x5UnormSrgb,
-        wgpu::TextureFormat::ASTC8x6Unorm,
-        wgpu::TextureFormat::ASTC8x6UnormSrgb,
-        wgpu::TextureFormat::ASTC8x8Unorm,
-        wgpu::TextureFormat::ASTC8x8UnormSrgb,
-        wgpu::TextureFormat::ASTC10x5Unorm,
-        wgpu::TextureFormat::ASTC10x5UnormSrgb,
-        wgpu::TextureFormat::ASTC10x6Unorm,
-        wgpu::TextureFormat::ASTC10x6UnormSrgb,
-        wgpu::TextureFormat::ASTC10x8Unorm,
-        wgpu::TextureFormat::ASTC10x8UnormSrgb,
-        wgpu::TextureFormat::ASTC10x10Unorm,
-        wgpu::TextureFormat::ASTC10x10UnormSrgb,
-        wgpu::TextureFormat::ASTC12x10Unorm,
-        wgpu::TextureFormat::ASTC12x10UnormSrgb,
-        wgpu::TextureFormat::ASTC12x12Unorm,
-        wgpu::TextureFormat::ASTC12x12UnormSrgb};
+static constexpr std::array<wgpu::TextureFormat, 95> kAllTextureFormats = {
+    wgpu::TextureFormat::R8Unorm,
+    wgpu::TextureFormat::R8Snorm,
+    wgpu::TextureFormat::R8Uint,
+    wgpu::TextureFormat::R8Sint,
+    wgpu::TextureFormat::R16Uint,
+    wgpu::TextureFormat::R16Sint,
+    wgpu::TextureFormat::R16Float,
+    wgpu::TextureFormat::RG8Unorm,
+    wgpu::TextureFormat::RG8Snorm,
+    wgpu::TextureFormat::RG8Uint,
+    wgpu::TextureFormat::RG8Sint,
+    wgpu::TextureFormat::R32Float,
+    wgpu::TextureFormat::R32Uint,
+    wgpu::TextureFormat::R32Sint,
+    wgpu::TextureFormat::RG16Uint,
+    wgpu::TextureFormat::RG16Sint,
+    wgpu::TextureFormat::RG16Float,
+    wgpu::TextureFormat::RGBA8Unorm,
+    wgpu::TextureFormat::RGBA8UnormSrgb,
+    wgpu::TextureFormat::RGBA8Snorm,
+    wgpu::TextureFormat::RGBA8Uint,
+    wgpu::TextureFormat::RGBA8Sint,
+    wgpu::TextureFormat::BGRA8Unorm,
+    wgpu::TextureFormat::BGRA8UnormSrgb,
+    wgpu::TextureFormat::RGB10A2Unorm,
+    wgpu::TextureFormat::RG11B10Ufloat,
+    wgpu::TextureFormat::RGB9E5Ufloat,
+    wgpu::TextureFormat::RG32Float,
+    wgpu::TextureFormat::RG32Uint,
+    wgpu::TextureFormat::RG32Sint,
+    wgpu::TextureFormat::RGBA16Uint,
+    wgpu::TextureFormat::RGBA16Sint,
+    wgpu::TextureFormat::RGBA16Float,
+    wgpu::TextureFormat::RGBA32Float,
+    wgpu::TextureFormat::RGBA32Uint,
+    wgpu::TextureFormat::RGBA32Sint,
+    wgpu::TextureFormat::Depth16Unorm,
+    wgpu::TextureFormat::Depth32Float,
+    wgpu::TextureFormat::Depth24Plus,
+    wgpu::TextureFormat::Depth24PlusStencil8,
+    wgpu::TextureFormat::Depth24UnormStencil8,
+    wgpu::TextureFormat::Depth32FloatStencil8,
+    wgpu::TextureFormat::Stencil8,
+    wgpu::TextureFormat::BC1RGBAUnorm,
+    wgpu::TextureFormat::BC1RGBAUnormSrgb,
+    wgpu::TextureFormat::BC2RGBAUnorm,
+    wgpu::TextureFormat::BC2RGBAUnormSrgb,
+    wgpu::TextureFormat::BC3RGBAUnorm,
+    wgpu::TextureFormat::BC3RGBAUnormSrgb,
+    wgpu::TextureFormat::BC4RUnorm,
+    wgpu::TextureFormat::BC4RSnorm,
+    wgpu::TextureFormat::BC5RGUnorm,
+    wgpu::TextureFormat::BC5RGSnorm,
+    wgpu::TextureFormat::BC6HRGBUfloat,
+    wgpu::TextureFormat::BC6HRGBFloat,
+    wgpu::TextureFormat::BC7RGBAUnorm,
+    wgpu::TextureFormat::BC7RGBAUnormSrgb,
+    wgpu::TextureFormat::ETC2RGB8Unorm,
+    wgpu::TextureFormat::ETC2RGB8UnormSrgb,
+    wgpu::TextureFormat::ETC2RGB8A1Unorm,
+    wgpu::TextureFormat::ETC2RGB8A1UnormSrgb,
+    wgpu::TextureFormat::ETC2RGBA8Unorm,
+    wgpu::TextureFormat::ETC2RGBA8UnormSrgb,
+    wgpu::TextureFormat::EACR11Unorm,
+    wgpu::TextureFormat::EACR11Snorm,
+    wgpu::TextureFormat::EACRG11Unorm,
+    wgpu::TextureFormat::EACRG11Snorm,
+    wgpu::TextureFormat::ASTC4x4Unorm,
+    wgpu::TextureFormat::ASTC4x4UnormSrgb,
+    wgpu::TextureFormat::ASTC5x4Unorm,
+    wgpu::TextureFormat::ASTC5x4UnormSrgb,
+    wgpu::TextureFormat::ASTC5x5Unorm,
+    wgpu::TextureFormat::ASTC5x5UnormSrgb,
+    wgpu::TextureFormat::ASTC6x5Unorm,
+    wgpu::TextureFormat::ASTC6x5UnormSrgb,
+    wgpu::TextureFormat::ASTC6x6Unorm,
+    wgpu::TextureFormat::ASTC6x6UnormSrgb,
+    wgpu::TextureFormat::ASTC8x5Unorm,
+    wgpu::TextureFormat::ASTC8x5UnormSrgb,
+    wgpu::TextureFormat::ASTC8x6Unorm,
+    wgpu::TextureFormat::ASTC8x6UnormSrgb,
+    wgpu::TextureFormat::ASTC8x8Unorm,
+    wgpu::TextureFormat::ASTC8x8UnormSrgb,
+    wgpu::TextureFormat::ASTC10x5Unorm,
+    wgpu::TextureFormat::ASTC10x5UnormSrgb,
+    wgpu::TextureFormat::ASTC10x6Unorm,
+    wgpu::TextureFormat::ASTC10x6UnormSrgb,
+    wgpu::TextureFormat::ASTC10x8Unorm,
+    wgpu::TextureFormat::ASTC10x8UnormSrgb,
+    wgpu::TextureFormat::ASTC10x10Unorm,
+    wgpu::TextureFormat::ASTC10x10UnormSrgb,
+    wgpu::TextureFormat::ASTC12x10Unorm,
+    wgpu::TextureFormat::ASTC12x10UnormSrgb,
+    wgpu::TextureFormat::ASTC12x12Unorm,
+    wgpu::TextureFormat::ASTC12x12UnormSrgb};
 
-    static constexpr std::array<wgpu::TextureFormat, 40> kFormatsInCoreSpec = {
-        wgpu::TextureFormat::R8Unorm,        wgpu::TextureFormat::R8Snorm,
-        wgpu::TextureFormat::R8Uint,         wgpu::TextureFormat::R8Sint,
-        wgpu::TextureFormat::R16Uint,        wgpu::TextureFormat::R16Sint,
-        wgpu::TextureFormat::R16Float,       wgpu::TextureFormat::RG8Unorm,
-        wgpu::TextureFormat::RG8Snorm,       wgpu::TextureFormat::RG8Uint,
-        wgpu::TextureFormat::RG8Sint,        wgpu::TextureFormat::R32Float,
-        wgpu::TextureFormat::R32Uint,        wgpu::TextureFormat::R32Sint,
-        wgpu::TextureFormat::RG16Uint,       wgpu::TextureFormat::RG16Sint,
-        wgpu::TextureFormat::RG16Float,      wgpu::TextureFormat::RGBA8Unorm,
-        wgpu::TextureFormat::RGBA8UnormSrgb, wgpu::TextureFormat::RGBA8Snorm,
-        wgpu::TextureFormat::RGBA8Uint,      wgpu::TextureFormat::RGBA8Sint,
-        wgpu::TextureFormat::BGRA8Unorm,     wgpu::TextureFormat::BGRA8UnormSrgb,
-        wgpu::TextureFormat::RGB10A2Unorm,   wgpu::TextureFormat::RG11B10Ufloat,
-        wgpu::TextureFormat::RGB9E5Ufloat,   wgpu::TextureFormat::RG32Float,
-        wgpu::TextureFormat::RG32Uint,       wgpu::TextureFormat::RG32Sint,
-        wgpu::TextureFormat::RGBA16Uint,     wgpu::TextureFormat::RGBA16Sint,
-        wgpu::TextureFormat::RGBA16Float,    wgpu::TextureFormat::RGBA32Float,
-        wgpu::TextureFormat::RGBA32Uint,     wgpu::TextureFormat::RGBA32Sint,
-        wgpu::TextureFormat::Depth16Unorm,   wgpu::TextureFormat::Depth32Float,
-        wgpu::TextureFormat::Depth24Plus,    wgpu::TextureFormat::Depth24PlusStencil8,
-    };
+static constexpr std::array<wgpu::TextureFormat, 40> kFormatsInCoreSpec = {
+    wgpu::TextureFormat::R8Unorm,        wgpu::TextureFormat::R8Snorm,
+    wgpu::TextureFormat::R8Uint,         wgpu::TextureFormat::R8Sint,
+    wgpu::TextureFormat::R16Uint,        wgpu::TextureFormat::R16Sint,
+    wgpu::TextureFormat::R16Float,       wgpu::TextureFormat::RG8Unorm,
+    wgpu::TextureFormat::RG8Snorm,       wgpu::TextureFormat::RG8Uint,
+    wgpu::TextureFormat::RG8Sint,        wgpu::TextureFormat::R32Float,
+    wgpu::TextureFormat::R32Uint,        wgpu::TextureFormat::R32Sint,
+    wgpu::TextureFormat::RG16Uint,       wgpu::TextureFormat::RG16Sint,
+    wgpu::TextureFormat::RG16Float,      wgpu::TextureFormat::RGBA8Unorm,
+    wgpu::TextureFormat::RGBA8UnormSrgb, wgpu::TextureFormat::RGBA8Snorm,
+    wgpu::TextureFormat::RGBA8Uint,      wgpu::TextureFormat::RGBA8Sint,
+    wgpu::TextureFormat::BGRA8Unorm,     wgpu::TextureFormat::BGRA8UnormSrgb,
+    wgpu::TextureFormat::RGB10A2Unorm,   wgpu::TextureFormat::RG11B10Ufloat,
+    wgpu::TextureFormat::RGB9E5Ufloat,   wgpu::TextureFormat::RG32Float,
+    wgpu::TextureFormat::RG32Uint,       wgpu::TextureFormat::RG32Sint,
+    wgpu::TextureFormat::RGBA16Uint,     wgpu::TextureFormat::RGBA16Sint,
+    wgpu::TextureFormat::RGBA16Float,    wgpu::TextureFormat::RGBA32Float,
+    wgpu::TextureFormat::RGBA32Uint,     wgpu::TextureFormat::RGBA32Sint,
+    wgpu::TextureFormat::Depth16Unorm,   wgpu::TextureFormat::Depth32Float,
+    wgpu::TextureFormat::Depth24Plus,    wgpu::TextureFormat::Depth24PlusStencil8,
+};
 
-    static constexpr std::array<wgpu::TextureFormat, 14> kBCFormats = {
-        wgpu::TextureFormat::BC1RGBAUnorm,  wgpu::TextureFormat::BC1RGBAUnormSrgb,
-        wgpu::TextureFormat::BC2RGBAUnorm,  wgpu::TextureFormat::BC2RGBAUnormSrgb,
-        wgpu::TextureFormat::BC3RGBAUnorm,  wgpu::TextureFormat::BC3RGBAUnormSrgb,
-        wgpu::TextureFormat::BC4RUnorm,     wgpu::TextureFormat::BC4RSnorm,
-        wgpu::TextureFormat::BC5RGUnorm,    wgpu::TextureFormat::BC5RGSnorm,
-        wgpu::TextureFormat::BC6HRGBUfloat, wgpu::TextureFormat::BC6HRGBFloat,
-        wgpu::TextureFormat::BC7RGBAUnorm,  wgpu::TextureFormat::BC7RGBAUnormSrgb};
+static constexpr std::array<wgpu::TextureFormat, 14> kBCFormats = {
+    wgpu::TextureFormat::BC1RGBAUnorm,  wgpu::TextureFormat::BC1RGBAUnormSrgb,
+    wgpu::TextureFormat::BC2RGBAUnorm,  wgpu::TextureFormat::BC2RGBAUnormSrgb,
+    wgpu::TextureFormat::BC3RGBAUnorm,  wgpu::TextureFormat::BC3RGBAUnormSrgb,
+    wgpu::TextureFormat::BC4RUnorm,     wgpu::TextureFormat::BC4RSnorm,
+    wgpu::TextureFormat::BC5RGUnorm,    wgpu::TextureFormat::BC5RGSnorm,
+    wgpu::TextureFormat::BC6HRGBUfloat, wgpu::TextureFormat::BC6HRGBFloat,
+    wgpu::TextureFormat::BC7RGBAUnorm,  wgpu::TextureFormat::BC7RGBAUnormSrgb};
 
-    static constexpr std::array<wgpu::TextureFormat, 10> kETC2Formats = {
-        wgpu::TextureFormat::ETC2RGB8Unorm,   wgpu::TextureFormat::ETC2RGB8UnormSrgb,
-        wgpu::TextureFormat::ETC2RGB8A1Unorm, wgpu::TextureFormat::ETC2RGB8A1UnormSrgb,
-        wgpu::TextureFormat::ETC2RGBA8Unorm,  wgpu::TextureFormat::ETC2RGBA8UnormSrgb,
-        wgpu::TextureFormat::EACR11Unorm,     wgpu::TextureFormat::EACR11Snorm,
-        wgpu::TextureFormat::EACRG11Unorm,    wgpu::TextureFormat::EACRG11Snorm};
+static constexpr std::array<wgpu::TextureFormat, 10> kETC2Formats = {
+    wgpu::TextureFormat::ETC2RGB8Unorm,   wgpu::TextureFormat::ETC2RGB8UnormSrgb,
+    wgpu::TextureFormat::ETC2RGB8A1Unorm, wgpu::TextureFormat::ETC2RGB8A1UnormSrgb,
+    wgpu::TextureFormat::ETC2RGBA8Unorm,  wgpu::TextureFormat::ETC2RGBA8UnormSrgb,
+    wgpu::TextureFormat::EACR11Unorm,     wgpu::TextureFormat::EACR11Snorm,
+    wgpu::TextureFormat::EACRG11Unorm,    wgpu::TextureFormat::EACRG11Snorm};
 
-    static constexpr std::array<wgpu::TextureFormat, 28> kASTCFormats = {
-        wgpu::TextureFormat::ASTC4x4Unorm,   wgpu::TextureFormat::ASTC4x4UnormSrgb,
-        wgpu::TextureFormat::ASTC5x4Unorm,   wgpu::TextureFormat::ASTC5x4UnormSrgb,
-        wgpu::TextureFormat::ASTC5x5Unorm,   wgpu::TextureFormat::ASTC5x5UnormSrgb,
-        wgpu::TextureFormat::ASTC6x5Unorm,   wgpu::TextureFormat::ASTC6x5UnormSrgb,
-        wgpu::TextureFormat::ASTC6x6Unorm,   wgpu::TextureFormat::ASTC6x6UnormSrgb,
-        wgpu::TextureFormat::ASTC8x5Unorm,   wgpu::TextureFormat::ASTC8x5UnormSrgb,
-        wgpu::TextureFormat::ASTC8x6Unorm,   wgpu::TextureFormat::ASTC8x6UnormSrgb,
-        wgpu::TextureFormat::ASTC8x8Unorm,   wgpu::TextureFormat::ASTC8x8UnormSrgb,
-        wgpu::TextureFormat::ASTC10x5Unorm,  wgpu::TextureFormat::ASTC10x5UnormSrgb,
-        wgpu::TextureFormat::ASTC10x6Unorm,  wgpu::TextureFormat::ASTC10x6UnormSrgb,
-        wgpu::TextureFormat::ASTC10x8Unorm,  wgpu::TextureFormat::ASTC10x8UnormSrgb,
-        wgpu::TextureFormat::ASTC10x10Unorm, wgpu::TextureFormat::ASTC10x10UnormSrgb,
-        wgpu::TextureFormat::ASTC12x10Unorm, wgpu::TextureFormat::ASTC12x10UnormSrgb,
-        wgpu::TextureFormat::ASTC12x12Unorm, wgpu::TextureFormat::ASTC12x12UnormSrgb,
-    };
+static constexpr std::array<wgpu::TextureFormat, 28> kASTCFormats = {
+    wgpu::TextureFormat::ASTC4x4Unorm,   wgpu::TextureFormat::ASTC4x4UnormSrgb,
+    wgpu::TextureFormat::ASTC5x4Unorm,   wgpu::TextureFormat::ASTC5x4UnormSrgb,
+    wgpu::TextureFormat::ASTC5x5Unorm,   wgpu::TextureFormat::ASTC5x5UnormSrgb,
+    wgpu::TextureFormat::ASTC6x5Unorm,   wgpu::TextureFormat::ASTC6x5UnormSrgb,
+    wgpu::TextureFormat::ASTC6x6Unorm,   wgpu::TextureFormat::ASTC6x6UnormSrgb,
+    wgpu::TextureFormat::ASTC8x5Unorm,   wgpu::TextureFormat::ASTC8x5UnormSrgb,
+    wgpu::TextureFormat::ASTC8x6Unorm,   wgpu::TextureFormat::ASTC8x6UnormSrgb,
+    wgpu::TextureFormat::ASTC8x8Unorm,   wgpu::TextureFormat::ASTC8x8UnormSrgb,
+    wgpu::TextureFormat::ASTC10x5Unorm,  wgpu::TextureFormat::ASTC10x5UnormSrgb,
+    wgpu::TextureFormat::ASTC10x6Unorm,  wgpu::TextureFormat::ASTC10x6UnormSrgb,
+    wgpu::TextureFormat::ASTC10x8Unorm,  wgpu::TextureFormat::ASTC10x8UnormSrgb,
+    wgpu::TextureFormat::ASTC10x10Unorm, wgpu::TextureFormat::ASTC10x10UnormSrgb,
+    wgpu::TextureFormat::ASTC12x10Unorm, wgpu::TextureFormat::ASTC12x10UnormSrgb,
+    wgpu::TextureFormat::ASTC12x12Unorm, wgpu::TextureFormat::ASTC12x12UnormSrgb,
+};
 
-    static constexpr std::array<wgpu::TextureFormat, 52> kCompressedFormats = {
-        wgpu::TextureFormat::BC1RGBAUnorm,    wgpu::TextureFormat::BC1RGBAUnormSrgb,
-        wgpu::TextureFormat::BC2RGBAUnorm,    wgpu::TextureFormat::BC2RGBAUnormSrgb,
-        wgpu::TextureFormat::BC3RGBAUnorm,    wgpu::TextureFormat::BC3RGBAUnormSrgb,
-        wgpu::TextureFormat::BC4RUnorm,       wgpu::TextureFormat::BC4RSnorm,
-        wgpu::TextureFormat::BC5RGUnorm,      wgpu::TextureFormat::BC5RGSnorm,
-        wgpu::TextureFormat::BC6HRGBUfloat,   wgpu::TextureFormat::BC6HRGBFloat,
-        wgpu::TextureFormat::BC7RGBAUnorm,    wgpu::TextureFormat::BC7RGBAUnormSrgb,
-        wgpu::TextureFormat::ETC2RGB8Unorm,   wgpu::TextureFormat::ETC2RGB8UnormSrgb,
-        wgpu::TextureFormat::ETC2RGB8A1Unorm, wgpu::TextureFormat::ETC2RGB8A1UnormSrgb,
-        wgpu::TextureFormat::ETC2RGBA8Unorm,  wgpu::TextureFormat::ETC2RGBA8UnormSrgb,
-        wgpu::TextureFormat::EACR11Unorm,     wgpu::TextureFormat::EACR11Snorm,
-        wgpu::TextureFormat::EACRG11Unorm,    wgpu::TextureFormat::EACRG11Snorm,
-        wgpu::TextureFormat::ASTC4x4Unorm,    wgpu::TextureFormat::ASTC4x4UnormSrgb,
-        wgpu::TextureFormat::ASTC5x4Unorm,    wgpu::TextureFormat::ASTC5x4UnormSrgb,
-        wgpu::TextureFormat::ASTC5x5Unorm,    wgpu::TextureFormat::ASTC5x5UnormSrgb,
-        wgpu::TextureFormat::ASTC6x5Unorm,    wgpu::TextureFormat::ASTC6x5UnormSrgb,
-        wgpu::TextureFormat::ASTC6x6Unorm,    wgpu::TextureFormat::ASTC6x6UnormSrgb,
-        wgpu::TextureFormat::ASTC8x5Unorm,    wgpu::TextureFormat::ASTC8x5UnormSrgb,
-        wgpu::TextureFormat::ASTC8x6Unorm,    wgpu::TextureFormat::ASTC8x6UnormSrgb,
-        wgpu::TextureFormat::ASTC8x8Unorm,    wgpu::TextureFormat::ASTC8x8UnormSrgb,
-        wgpu::TextureFormat::ASTC10x5Unorm,   wgpu::TextureFormat::ASTC10x5UnormSrgb,
-        wgpu::TextureFormat::ASTC10x6Unorm,   wgpu::TextureFormat::ASTC10x6UnormSrgb,
-        wgpu::TextureFormat::ASTC10x8Unorm,   wgpu::TextureFormat::ASTC10x8UnormSrgb,
-        wgpu::TextureFormat::ASTC10x10Unorm,  wgpu::TextureFormat::ASTC10x10UnormSrgb,
-        wgpu::TextureFormat::ASTC12x10Unorm,  wgpu::TextureFormat::ASTC12x10UnormSrgb,
-        wgpu::TextureFormat::ASTC12x12Unorm,  wgpu::TextureFormat::ASTC12x12UnormSrgb};
-    static_assert(kCompressedFormats.size() ==
-                      kBCFormats.size() + kETC2Formats.size() + kASTCFormats.size(),
-                  "Number of compressed format must equal number of BC, ETC2, and ASTC formats.");
+static constexpr std::array<wgpu::TextureFormat, 52> kCompressedFormats = {
+    wgpu::TextureFormat::BC1RGBAUnorm,    wgpu::TextureFormat::BC1RGBAUnormSrgb,
+    wgpu::TextureFormat::BC2RGBAUnorm,    wgpu::TextureFormat::BC2RGBAUnormSrgb,
+    wgpu::TextureFormat::BC3RGBAUnorm,    wgpu::TextureFormat::BC3RGBAUnormSrgb,
+    wgpu::TextureFormat::BC4RUnorm,       wgpu::TextureFormat::BC4RSnorm,
+    wgpu::TextureFormat::BC5RGUnorm,      wgpu::TextureFormat::BC5RGSnorm,
+    wgpu::TextureFormat::BC6HRGBUfloat,   wgpu::TextureFormat::BC6HRGBFloat,
+    wgpu::TextureFormat::BC7RGBAUnorm,    wgpu::TextureFormat::BC7RGBAUnormSrgb,
+    wgpu::TextureFormat::ETC2RGB8Unorm,   wgpu::TextureFormat::ETC2RGB8UnormSrgb,
+    wgpu::TextureFormat::ETC2RGB8A1Unorm, wgpu::TextureFormat::ETC2RGB8A1UnormSrgb,
+    wgpu::TextureFormat::ETC2RGBA8Unorm,  wgpu::TextureFormat::ETC2RGBA8UnormSrgb,
+    wgpu::TextureFormat::EACR11Unorm,     wgpu::TextureFormat::EACR11Snorm,
+    wgpu::TextureFormat::EACRG11Unorm,    wgpu::TextureFormat::EACRG11Snorm,
+    wgpu::TextureFormat::ASTC4x4Unorm,    wgpu::TextureFormat::ASTC4x4UnormSrgb,
+    wgpu::TextureFormat::ASTC5x4Unorm,    wgpu::TextureFormat::ASTC5x4UnormSrgb,
+    wgpu::TextureFormat::ASTC5x5Unorm,    wgpu::TextureFormat::ASTC5x5UnormSrgb,
+    wgpu::TextureFormat::ASTC6x5Unorm,    wgpu::TextureFormat::ASTC6x5UnormSrgb,
+    wgpu::TextureFormat::ASTC6x6Unorm,    wgpu::TextureFormat::ASTC6x6UnormSrgb,
+    wgpu::TextureFormat::ASTC8x5Unorm,    wgpu::TextureFormat::ASTC8x5UnormSrgb,
+    wgpu::TextureFormat::ASTC8x6Unorm,    wgpu::TextureFormat::ASTC8x6UnormSrgb,
+    wgpu::TextureFormat::ASTC8x8Unorm,    wgpu::TextureFormat::ASTC8x8UnormSrgb,
+    wgpu::TextureFormat::ASTC10x5Unorm,   wgpu::TextureFormat::ASTC10x5UnormSrgb,
+    wgpu::TextureFormat::ASTC10x6Unorm,   wgpu::TextureFormat::ASTC10x6UnormSrgb,
+    wgpu::TextureFormat::ASTC10x8Unorm,   wgpu::TextureFormat::ASTC10x8UnormSrgb,
+    wgpu::TextureFormat::ASTC10x10Unorm,  wgpu::TextureFormat::ASTC10x10UnormSrgb,
+    wgpu::TextureFormat::ASTC12x10Unorm,  wgpu::TextureFormat::ASTC12x10UnormSrgb,
+    wgpu::TextureFormat::ASTC12x12Unorm,  wgpu::TextureFormat::ASTC12x12UnormSrgb};
+static_assert(kCompressedFormats.size() ==
+                  kBCFormats.size() + kETC2Formats.size() + kASTCFormats.size(),
+              "Number of compressed format must equal number of BC, ETC2, and ASTC formats.");
 
-    static constexpr std::array<wgpu::TextureFormat, 6> kDepthFormats = {
-        wgpu::TextureFormat::Depth16Unorm,         wgpu::TextureFormat::Depth32Float,
-        wgpu::TextureFormat::Depth24Plus,          wgpu::TextureFormat::Depth24PlusStencil8,
-        wgpu::TextureFormat::Depth24UnormStencil8, wgpu::TextureFormat::Depth32FloatStencil8,
-    };
-    static constexpr std::array<wgpu::TextureFormat, 4> kStencilFormats = {
-        wgpu::TextureFormat::Depth24PlusStencil8,
-        wgpu::TextureFormat::Depth24UnormStencil8,
-        wgpu::TextureFormat::Depth32FloatStencil8,
-        wgpu::TextureFormat::Stencil8,
-    };
-    static constexpr std::array<wgpu::TextureFormat, 3> kDepthAndStencilFormats = {
-        wgpu::TextureFormat::Depth24PlusStencil8,
-        wgpu::TextureFormat::Depth24UnormStencil8,
-        wgpu::TextureFormat::Depth32FloatStencil8,
-    };
+static constexpr std::array<wgpu::TextureFormat, 6> kDepthFormats = {
+    wgpu::TextureFormat::Depth16Unorm,         wgpu::TextureFormat::Depth32Float,
+    wgpu::TextureFormat::Depth24Plus,          wgpu::TextureFormat::Depth24PlusStencil8,
+    wgpu::TextureFormat::Depth24UnormStencil8, wgpu::TextureFormat::Depth32FloatStencil8,
+};
+static constexpr std::array<wgpu::TextureFormat, 4> kStencilFormats = {
+    wgpu::TextureFormat::Depth24PlusStencil8,
+    wgpu::TextureFormat::Depth24UnormStencil8,
+    wgpu::TextureFormat::Depth32FloatStencil8,
+    wgpu::TextureFormat::Stencil8,
+};
+static constexpr std::array<wgpu::TextureFormat, 3> kDepthAndStencilFormats = {
+    wgpu::TextureFormat::Depth24PlusStencil8,
+    wgpu::TextureFormat::Depth24UnormStencil8,
+    wgpu::TextureFormat::Depth32FloatStencil8,
+};
 
-    bool TextureFormatSupportsStorageTexture(wgpu::TextureFormat format);
+bool TextureFormatSupportsStorageTexture(wgpu::TextureFormat format);
 
-    bool IsBCTextureFormat(wgpu::TextureFormat textureFormat);
-    bool IsETC2TextureFormat(wgpu::TextureFormat textureFormat);
-    bool IsASTCTextureFormat(wgpu::TextureFormat textureFormat);
+bool IsBCTextureFormat(wgpu::TextureFormat textureFormat);
+bool IsETC2TextureFormat(wgpu::TextureFormat textureFormat);
+bool IsASTCTextureFormat(wgpu::TextureFormat textureFormat);
 
-    bool IsDepthOnlyFormat(wgpu::TextureFormat textureFormat);
-    bool IsStencilOnlyFormat(wgpu::TextureFormat textureFormat);
+bool IsDepthOnlyFormat(wgpu::TextureFormat textureFormat);
+bool IsStencilOnlyFormat(wgpu::TextureFormat textureFormat);
 
-    bool TextureFormatSupportsMultisampling(wgpu::TextureFormat textureFormat);
-    bool TextureFormatSupportsResolveTarget(wgpu::TextureFormat textureFormat);
-    bool TextureFormatSupportsRendering(wgpu::TextureFormat textureFormat);
+bool TextureFormatSupportsMultisampling(wgpu::TextureFormat textureFormat);
+bool TextureFormatSupportsResolveTarget(wgpu::TextureFormat textureFormat);
+bool TextureFormatSupportsRendering(wgpu::TextureFormat textureFormat);
 
-    uint32_t GetTexelBlockSizeInBytes(wgpu::TextureFormat textureFormat);
-    uint32_t GetTextureFormatBlockWidth(wgpu::TextureFormat textureFormat);
-    uint32_t GetTextureFormatBlockHeight(wgpu::TextureFormat textureFormat);
+uint32_t GetTexelBlockSizeInBytes(wgpu::TextureFormat textureFormat);
+uint32_t GetTextureFormatBlockWidth(wgpu::TextureFormat textureFormat);
+uint32_t GetTextureFormatBlockHeight(wgpu::TextureFormat textureFormat);
 
-    const char* GetWGSLColorTextureComponentType(wgpu::TextureFormat textureFormat);
-    const char* GetWGSLImageFormatQualifier(wgpu::TextureFormat textureFormat);
-    uint32_t GetWGSLRenderableColorTextureComponentCount(wgpu::TextureFormat textureFormat);
+const char* GetWGSLColorTextureComponentType(wgpu::TextureFormat textureFormat);
+const char* GetWGSLImageFormatQualifier(wgpu::TextureFormat textureFormat);
+uint32_t GetWGSLRenderableColorTextureComponentCount(wgpu::TextureFormat textureFormat);
 
-    wgpu::TextureDimension ViewDimensionToTextureDimension(
-        const wgpu::TextureViewDimension dimension);
+wgpu::TextureDimension ViewDimensionToTextureDimension(const wgpu::TextureViewDimension dimension);
 }  // namespace utils
 
 #endif  // SRC_DAWN_UTILS_TEXTUREUTILS_H_
diff --git a/src/dawn/utils/Timer.h b/src/dawn/utils/Timer.h
index cc61343..a7438e7 100644
--- a/src/dawn/utils/Timer.h
+++ b/src/dawn/utils/Timer.h
@@ -17,24 +17,23 @@
 
 namespace utils {
 
-    class Timer {
-      public:
-        virtual ~Timer() {
-        }
+class Timer {
+  public:
+    virtual ~Timer() {}
 
-        // Timer functionality: Use start() and stop() to record the duration and use
-        // getElapsedTime() to query that duration.  If getElapsedTime() is called in between, it
-        // will report the elapsed time since start().
-        virtual void Start() = 0;
-        virtual void Stop() = 0;
-        virtual double GetElapsedTime() const = 0;
+    // Timer functionality: Use start() and stop() to record the duration and use
+    // getElapsedTime() to query that duration.  If getElapsedTime() is called in between, it
+    // will report the elapsed time since start().
+    virtual void Start() = 0;
+    virtual void Stop() = 0;
+    virtual double GetElapsedTime() const = 0;
 
-        // Timestamp functionality: Use getAbsoluteTime() to get an absolute time with an unknown
-        // origin. This time moves forward regardless of start()/stop().
-        virtual double GetAbsoluteTime() = 0;
-    };
+    // Timestamp functionality: Use getAbsoluteTime() to get an absolute time with an unknown
+    // origin. This time moves forward regardless of start()/stop().
+    virtual double GetAbsoluteTime() = 0;
+};
 
-    Timer* CreateTimer();
+Timer* CreateTimer();
 
 }  // namespace utils
 
diff --git a/src/dawn/utils/VulkanBinding.cpp b/src/dawn/utils/VulkanBinding.cpp
index ec2323c..1736de8 100644
--- a/src/dawn/utils/VulkanBinding.cpp
+++ b/src/dawn/utils/VulkanBinding.cpp
@@ -24,34 +24,33 @@
 
 namespace utils {
 
-    class VulkanBinding : public BackendBinding {
-      public:
-        VulkanBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {
-        }
+class VulkanBinding : public BackendBinding {
+  public:
+    VulkanBinding(GLFWwindow* window, WGPUDevice device) : BackendBinding(window, device) {}
 
-        uint64_t GetSwapChainImplementation() override {
-            if (mSwapchainImpl.userData == nullptr) {
-                VkSurfaceKHR surface = VK_NULL_HANDLE;
-                if (glfwCreateWindowSurface(dawn::native::vulkan::GetInstance(mDevice), mWindow,
-                                            nullptr, &surface) != VK_SUCCESS) {
-                    ASSERT(false);
-                }
-
-                mSwapchainImpl = dawn::native::vulkan::CreateNativeSwapChainImpl(mDevice, surface);
+    uint64_t GetSwapChainImplementation() override {
+        if (mSwapchainImpl.userData == nullptr) {
+            VkSurfaceKHR surface = VK_NULL_HANDLE;
+            if (glfwCreateWindowSurface(dawn::native::vulkan::GetInstance(mDevice), mWindow,
+                                        nullptr, &surface) != VK_SUCCESS) {
+                ASSERT(false);
             }
-            return reinterpret_cast<uint64_t>(&mSwapchainImpl);
-        }
-        WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
-            ASSERT(mSwapchainImpl.userData != nullptr);
-            return dawn::native::vulkan::GetNativeSwapChainPreferredFormat(&mSwapchainImpl);
-        }
 
-      private:
-        DawnSwapChainImplementation mSwapchainImpl = {};
-    };
-
-    BackendBinding* CreateVulkanBinding(GLFWwindow* window, WGPUDevice device) {
-        return new VulkanBinding(window, device);
+            mSwapchainImpl = dawn::native::vulkan::CreateNativeSwapChainImpl(mDevice, surface);
+        }
+        return reinterpret_cast<uint64_t>(&mSwapchainImpl);
     }
+    WGPUTextureFormat GetPreferredSwapChainTextureFormat() override {
+        ASSERT(mSwapchainImpl.userData != nullptr);
+        return dawn::native::vulkan::GetNativeSwapChainPreferredFormat(&mSwapchainImpl);
+    }
+
+  private:
+    DawnSwapChainImplementation mSwapchainImpl = {};
+};
+
+BackendBinding* CreateVulkanBinding(GLFWwindow* window, WGPUDevice device) {
+    return new VulkanBinding(window, device);
+}
 
 }  // namespace utils
diff --git a/src/dawn/utils/WGPUHelpers.cpp b/src/dawn/utils/WGPUHelpers.cpp
index 52e070a..e2126f0 100644
--- a/src/dawn/utils/WGPUHelpers.cpp
+++ b/src/dawn/utils/WGPUHelpers.cpp
@@ -27,366 +27,359 @@
 #include "spirv-tools/optimizer.hpp"
 
 namespace utils {
-    wgpu::ShaderModule CreateShaderModuleFromASM(const wgpu::Device& device, const char* source) {
-        // Use SPIRV-Tools's C API to assemble the SPIR-V assembly text to binary. Because the types
-        // aren't RAII, we don't return directly on success and instead always go through the code
-        // path that destroys the SPIRV-Tools objects.
-        wgpu::ShaderModule result = nullptr;
+wgpu::ShaderModule CreateShaderModuleFromASM(const wgpu::Device& device, const char* source) {
+    // Use SPIRV-Tools's C API to assemble the SPIR-V assembly text to binary. Because the types
+    // aren't RAII, we don't return directly on success and instead always go through the code
+    // path that destroys the SPIRV-Tools objects.
+    wgpu::ShaderModule result = nullptr;
 
-        spv_context context = spvContextCreate(SPV_ENV_UNIVERSAL_1_3);
-        ASSERT(context != nullptr);
+    spv_context context = spvContextCreate(SPV_ENV_UNIVERSAL_1_3);
+    ASSERT(context != nullptr);
 
-        spv_binary spirv = nullptr;
-        spv_diagnostic diagnostic = nullptr;
-        if (spvTextToBinary(context, source, strlen(source), &spirv, &diagnostic) == SPV_SUCCESS) {
-            ASSERT(spirv != nullptr);
-            ASSERT(spirv->wordCount <= std::numeric_limits<uint32_t>::max());
+    spv_binary spirv = nullptr;
+    spv_diagnostic diagnostic = nullptr;
+    if (spvTextToBinary(context, source, strlen(source), &spirv, &diagnostic) == SPV_SUCCESS) {
+        ASSERT(spirv != nullptr);
+        ASSERT(spirv->wordCount <= std::numeric_limits<uint32_t>::max());
 
-            wgpu::ShaderModuleSPIRVDescriptor spirvDesc;
-            spirvDesc.codeSize = static_cast<uint32_t>(spirv->wordCount);
-            spirvDesc.code = spirv->code;
+        wgpu::ShaderModuleSPIRVDescriptor spirvDesc;
+        spirvDesc.codeSize = static_cast<uint32_t>(spirv->wordCount);
+        spirvDesc.code = spirv->code;
 
-            wgpu::ShaderModuleDescriptor descriptor;
-            descriptor.nextInChain = &spirvDesc;
-            result = device.CreateShaderModule(&descriptor);
-        } else {
-            ASSERT(diagnostic != nullptr);
-            dawn::WarningLog() << "CreateShaderModuleFromASM SPIRV assembly error:"
-                               << diagnostic->position.line + 1 << ":"
-                               << diagnostic->position.column + 1 << ": " << diagnostic->error;
-        }
-
-        spvDiagnosticDestroy(diagnostic);
-        spvBinaryDestroy(spirv);
-        spvContextDestroy(context);
-
-        return result;
-    }
-
-    wgpu::ShaderModule CreateShaderModule(const wgpu::Device& device, const char* source) {
-        wgpu::ShaderModuleWGSLDescriptor wgslDesc;
-        wgslDesc.source = source;
         wgpu::ShaderModuleDescriptor descriptor;
-        descriptor.nextInChain = &wgslDesc;
-        return device.CreateShaderModule(&descriptor);
+        descriptor.nextInChain = &spirvDesc;
+        result = device.CreateShaderModule(&descriptor);
+    } else {
+        ASSERT(diagnostic != nullptr);
+        dawn::WarningLog() << "CreateShaderModuleFromASM SPIRV assembly error:"
+                           << diagnostic->position.line + 1 << ":"
+                           << diagnostic->position.column + 1 << ": " << diagnostic->error;
     }
 
-    wgpu::Buffer CreateBufferFromData(const wgpu::Device& device,
-                                      const void* data,
-                                      uint64_t size,
-                                      wgpu::BufferUsage usage) {
-        wgpu::BufferDescriptor descriptor;
-        descriptor.size = size;
-        descriptor.usage = usage | wgpu::BufferUsage::CopyDst;
-        wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+    spvDiagnosticDestroy(diagnostic);
+    spvBinaryDestroy(spirv);
+    spvContextDestroy(context);
 
-        device.GetQueue().WriteBuffer(buffer, 0, data, size);
-        return buffer;
+    return result;
+}
+
+wgpu::ShaderModule CreateShaderModule(const wgpu::Device& device, const char* source) {
+    wgpu::ShaderModuleWGSLDescriptor wgslDesc;
+    wgslDesc.source = source;
+    wgpu::ShaderModuleDescriptor descriptor;
+    descriptor.nextInChain = &wgslDesc;
+    return device.CreateShaderModule(&descriptor);
+}
+
+wgpu::Buffer CreateBufferFromData(const wgpu::Device& device,
+                                  const void* data,
+                                  uint64_t size,
+                                  wgpu::BufferUsage usage) {
+    wgpu::BufferDescriptor descriptor;
+    descriptor.size = size;
+    descriptor.usage = usage | wgpu::BufferUsage::CopyDst;
+    wgpu::Buffer buffer = device.CreateBuffer(&descriptor);
+
+    device.GetQueue().WriteBuffer(buffer, 0, data, size);
+    return buffer;
+}
+
+ComboRenderPassDescriptor::ComboRenderPassDescriptor(
+    std::initializer_list<wgpu::TextureView> colorAttachmentInfo,
+    wgpu::TextureView depthStencil) {
+    for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
+        cColorAttachments[i].loadOp = wgpu::LoadOp::Clear;
+        cColorAttachments[i].storeOp = wgpu::StoreOp::Store;
+        cColorAttachments[i].clearValue = {0.0f, 0.0f, 0.0f, 0.0f};
     }
 
-    ComboRenderPassDescriptor::ComboRenderPassDescriptor(
-        std::initializer_list<wgpu::TextureView> colorAttachmentInfo,
-        wgpu::TextureView depthStencil) {
-        for (uint32_t i = 0; i < kMaxColorAttachments; ++i) {
-            cColorAttachments[i].loadOp = wgpu::LoadOp::Clear;
-            cColorAttachments[i].storeOp = wgpu::StoreOp::Store;
-            cColorAttachments[i].clearValue = {0.0f, 0.0f, 0.0f, 0.0f};
+    cDepthStencilAttachmentInfo.depthClearValue = 1.0f;
+    cDepthStencilAttachmentInfo.stencilClearValue = 0;
+    cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
+    cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
+    cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
+    cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+
+    colorAttachmentCount = static_cast<uint32_t>(colorAttachmentInfo.size());
+    uint32_t colorAttachmentIndex = 0;
+    for (const wgpu::TextureView& colorAttachment : colorAttachmentInfo) {
+        if (colorAttachment.Get() != nullptr) {
+            cColorAttachments[colorAttachmentIndex].view = colorAttachment;
         }
+        ++colorAttachmentIndex;
+    }
+    colorAttachments = cColorAttachments.data();
 
-        cDepthStencilAttachmentInfo.depthClearValue = 1.0f;
-        cDepthStencilAttachmentInfo.stencilClearValue = 0;
-        cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Clear;
-        cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Store;
-        cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Clear;
-        cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Store;
+    if (depthStencil.Get() != nullptr) {
+        cDepthStencilAttachmentInfo.view = depthStencil;
+        depthStencilAttachment = &cDepthStencilAttachmentInfo;
+    } else {
+        depthStencilAttachment = nullptr;
+    }
+}
 
-        colorAttachmentCount = static_cast<uint32_t>(colorAttachmentInfo.size());
-        uint32_t colorAttachmentIndex = 0;
-        for (const wgpu::TextureView& colorAttachment : colorAttachmentInfo) {
-            if (colorAttachment.Get() != nullptr) {
-                cColorAttachments[colorAttachmentIndex].view = colorAttachment;
-            }
-            ++colorAttachmentIndex;
-        }
-        colorAttachments = cColorAttachments.data();
+ComboRenderPassDescriptor::ComboRenderPassDescriptor(const ComboRenderPassDescriptor& other) {
+    *this = other;
+}
 
-        if (depthStencil.Get() != nullptr) {
-            cDepthStencilAttachmentInfo.view = depthStencil;
-            depthStencilAttachment = &cDepthStencilAttachmentInfo;
-        } else {
-            depthStencilAttachment = nullptr;
-        }
+const ComboRenderPassDescriptor& ComboRenderPassDescriptor::operator=(
+    const ComboRenderPassDescriptor& otherRenderPass) {
+    cDepthStencilAttachmentInfo = otherRenderPass.cDepthStencilAttachmentInfo;
+    cColorAttachments = otherRenderPass.cColorAttachments;
+    colorAttachmentCount = otherRenderPass.colorAttachmentCount;
+
+    colorAttachments = cColorAttachments.data();
+
+    if (otherRenderPass.depthStencilAttachment != nullptr) {
+        // Assign desc.depthStencilAttachment to this->depthStencilAttachmentInfo;
+        depthStencilAttachment = &cDepthStencilAttachmentInfo;
+    } else {
+        depthStencilAttachment = nullptr;
     }
 
-    ComboRenderPassDescriptor::ComboRenderPassDescriptor(const ComboRenderPassDescriptor& other) {
-        *this = other;
+    return *this;
+}
+void ComboRenderPassDescriptor::UnsetDepthStencilLoadStoreOpsForFormat(wgpu::TextureFormat format) {
+    switch (format) {
+        case wgpu::TextureFormat::Depth24Plus:
+        case wgpu::TextureFormat::Depth32Float:
+        case wgpu::TextureFormat::Depth16Unorm:
+            cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
+            cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
+            break;
+        case wgpu::TextureFormat::Stencil8:
+            cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
+            cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
+            break;
+        default:
+            break;
     }
+}
 
-    const ComboRenderPassDescriptor& ComboRenderPassDescriptor::operator=(
-        const ComboRenderPassDescriptor& otherRenderPass) {
-        cDepthStencilAttachmentInfo = otherRenderPass.cDepthStencilAttachmentInfo;
-        cColorAttachments = otherRenderPass.cColorAttachments;
-        colorAttachmentCount = otherRenderPass.colorAttachmentCount;
+BasicRenderPass::BasicRenderPass()
+    : width(0),
+      height(0),
+      color(nullptr),
+      colorFormat(wgpu::TextureFormat::RGBA8Unorm),
+      renderPassInfo({}) {}
 
-        colorAttachments = cColorAttachments.data();
+BasicRenderPass::BasicRenderPass(uint32_t texWidth,
+                                 uint32_t texHeight,
+                                 wgpu::Texture colorAttachment,
+                                 wgpu::TextureFormat textureFormat)
+    : width(texWidth),
+      height(texHeight),
+      color(colorAttachment),
+      colorFormat(textureFormat),
+      renderPassInfo({colorAttachment.CreateView()}) {}
 
-        if (otherRenderPass.depthStencilAttachment != nullptr) {
-            // Assign desc.depthStencilAttachment to this->depthStencilAttachmentInfo;
-            depthStencilAttachment = &cDepthStencilAttachmentInfo;
-        } else {
-            depthStencilAttachment = nullptr;
-        }
+BasicRenderPass CreateBasicRenderPass(const wgpu::Device& device,
+                                      uint32_t width,
+                                      uint32_t height,
+                                      wgpu::TextureFormat format) {
+    DAWN_ASSERT(width > 0 && height > 0);
 
-        return *this;
-    }
-    void ComboRenderPassDescriptor::UnsetDepthStencilLoadStoreOpsForFormat(
-        wgpu::TextureFormat format) {
-        switch (format) {
-            case wgpu::TextureFormat::Depth24Plus:
-            case wgpu::TextureFormat::Depth32Float:
-            case wgpu::TextureFormat::Depth16Unorm:
-                cDepthStencilAttachmentInfo.stencilLoadOp = wgpu::LoadOp::Undefined;
-                cDepthStencilAttachmentInfo.stencilStoreOp = wgpu::StoreOp::Undefined;
-                break;
-            case wgpu::TextureFormat::Stencil8:
-                cDepthStencilAttachmentInfo.depthLoadOp = wgpu::LoadOp::Undefined;
-                cDepthStencilAttachmentInfo.depthStoreOp = wgpu::StoreOp::Undefined;
-                break;
-            default:
-                break;
-        }
-    }
+    wgpu::TextureDescriptor descriptor;
+    descriptor.dimension = wgpu::TextureDimension::e2D;
+    descriptor.size.width = width;
+    descriptor.size.height = height;
+    descriptor.size.depthOrArrayLayers = 1;
+    descriptor.sampleCount = 1;
+    descriptor.format = format;
+    descriptor.mipLevelCount = 1;
+    descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
+    wgpu::Texture color = device.CreateTexture(&descriptor);
 
-    BasicRenderPass::BasicRenderPass()
-        : width(0),
-          height(0),
-          color(nullptr),
-          colorFormat(wgpu::TextureFormat::RGBA8Unorm),
-          renderPassInfo({}) {
-    }
+    return BasicRenderPass(width, height, color);
+}
 
-    BasicRenderPass::BasicRenderPass(uint32_t texWidth,
-                                     uint32_t texHeight,
-                                     wgpu::Texture colorAttachment,
-                                     wgpu::TextureFormat textureFormat)
-        : width(texWidth),
-          height(texHeight),
-          color(colorAttachment),
-          colorFormat(textureFormat),
-          renderPassInfo({colorAttachment.CreateView()}) {
-    }
+wgpu::ImageCopyBuffer CreateImageCopyBuffer(wgpu::Buffer buffer,
+                                            uint64_t offset,
+                                            uint32_t bytesPerRow,
+                                            uint32_t rowsPerImage) {
+    wgpu::ImageCopyBuffer imageCopyBuffer = {};
+    imageCopyBuffer.buffer = buffer;
+    imageCopyBuffer.layout = CreateTextureDataLayout(offset, bytesPerRow, rowsPerImage);
 
-    BasicRenderPass CreateBasicRenderPass(const wgpu::Device& device,
-                                          uint32_t width,
-                                          uint32_t height,
-                                          wgpu::TextureFormat format) {
-        DAWN_ASSERT(width > 0 && height > 0);
+    return imageCopyBuffer;
+}
 
-        wgpu::TextureDescriptor descriptor;
-        descriptor.dimension = wgpu::TextureDimension::e2D;
-        descriptor.size.width = width;
-        descriptor.size.height = height;
-        descriptor.size.depthOrArrayLayers = 1;
-        descriptor.sampleCount = 1;
-        descriptor.format = format;
-        descriptor.mipLevelCount = 1;
-        descriptor.usage = wgpu::TextureUsage::RenderAttachment | wgpu::TextureUsage::CopySrc;
-        wgpu::Texture color = device.CreateTexture(&descriptor);
+wgpu::ImageCopyTexture CreateImageCopyTexture(wgpu::Texture texture,
+                                              uint32_t mipLevel,
+                                              wgpu::Origin3D origin,
+                                              wgpu::TextureAspect aspect) {
+    wgpu::ImageCopyTexture imageCopyTexture;
+    imageCopyTexture.texture = texture;
+    imageCopyTexture.mipLevel = mipLevel;
+    imageCopyTexture.origin = origin;
+    imageCopyTexture.aspect = aspect;
 
-        return BasicRenderPass(width, height, color);
-    }
+    return imageCopyTexture;
+}
 
-    wgpu::ImageCopyBuffer CreateImageCopyBuffer(wgpu::Buffer buffer,
-                                                uint64_t offset,
+wgpu::TextureDataLayout CreateTextureDataLayout(uint64_t offset,
                                                 uint32_t bytesPerRow,
                                                 uint32_t rowsPerImage) {
-        wgpu::ImageCopyBuffer imageCopyBuffer = {};
-        imageCopyBuffer.buffer = buffer;
-        imageCopyBuffer.layout = CreateTextureDataLayout(offset, bytesPerRow, rowsPerImage);
+    wgpu::TextureDataLayout textureDataLayout;
+    textureDataLayout.offset = offset;
+    textureDataLayout.bytesPerRow = bytesPerRow;
+    textureDataLayout.rowsPerImage = rowsPerImage;
 
-        return imageCopyBuffer;
+    return textureDataLayout;
+}
+
+wgpu::PipelineLayout MakeBasicPipelineLayout(const wgpu::Device& device,
+                                             const wgpu::BindGroupLayout* bindGroupLayout) {
+    wgpu::PipelineLayoutDescriptor descriptor;
+    if (bindGroupLayout != nullptr) {
+        descriptor.bindGroupLayoutCount = 1;
+        descriptor.bindGroupLayouts = bindGroupLayout;
+    } else {
+        descriptor.bindGroupLayoutCount = 0;
+        descriptor.bindGroupLayouts = nullptr;
+    }
+    return device.CreatePipelineLayout(&descriptor);
+}
+
+wgpu::PipelineLayout MakePipelineLayout(const wgpu::Device& device,
+                                        std::vector<wgpu::BindGroupLayout> bgls) {
+    wgpu::PipelineLayoutDescriptor descriptor;
+    descriptor.bindGroupLayoutCount = uint32_t(bgls.size());
+    descriptor.bindGroupLayouts = bgls.data();
+    return device.CreatePipelineLayout(&descriptor);
+}
+
+wgpu::BindGroupLayout MakeBindGroupLayout(
+    const wgpu::Device& device,
+    std::initializer_list<BindingLayoutEntryInitializationHelper> entriesInitializer) {
+    std::vector<wgpu::BindGroupLayoutEntry> entries;
+    for (const BindingLayoutEntryInitializationHelper& entry : entriesInitializer) {
+        entries.push_back(entry);
     }
 
-    wgpu::ImageCopyTexture CreateImageCopyTexture(wgpu::Texture texture,
-                                                  uint32_t mipLevel,
-                                                  wgpu::Origin3D origin,
-                                                  wgpu::TextureAspect aspect) {
-        wgpu::ImageCopyTexture imageCopyTexture;
-        imageCopyTexture.texture = texture;
-        imageCopyTexture.mipLevel = mipLevel;
-        imageCopyTexture.origin = origin;
-        imageCopyTexture.aspect = aspect;
+    wgpu::BindGroupLayoutDescriptor descriptor;
+    descriptor.entryCount = static_cast<uint32_t>(entries.size());
+    descriptor.entries = entries.data();
+    return device.CreateBindGroupLayout(&descriptor);
+}
 
-        return imageCopyTexture;
+BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+    uint32_t entryBinding,
+    wgpu::ShaderStage entryVisibility,
+    wgpu::BufferBindingType bufferType,
+    bool bufferHasDynamicOffset,
+    uint64_t bufferMinBindingSize) {
+    binding = entryBinding;
+    visibility = entryVisibility;
+    buffer.type = bufferType;
+    buffer.hasDynamicOffset = bufferHasDynamicOffset;
+    buffer.minBindingSize = bufferMinBindingSize;
+}
+
+BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+    uint32_t entryBinding,
+    wgpu::ShaderStage entryVisibility,
+    wgpu::SamplerBindingType samplerType) {
+    binding = entryBinding;
+    visibility = entryVisibility;
+    sampler.type = samplerType;
+}
+
+BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+    uint32_t entryBinding,
+    wgpu::ShaderStage entryVisibility,
+    wgpu::TextureSampleType textureSampleType,
+    wgpu::TextureViewDimension textureViewDimension,
+    bool textureMultisampled) {
+    binding = entryBinding;
+    visibility = entryVisibility;
+    texture.sampleType = textureSampleType;
+    texture.viewDimension = textureViewDimension;
+    texture.multisampled = textureMultisampled;
+}
+
+BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+    uint32_t entryBinding,
+    wgpu::ShaderStage entryVisibility,
+    wgpu::StorageTextureAccess storageTextureAccess,
+    wgpu::TextureFormat format,
+    wgpu::TextureViewDimension textureViewDimension) {
+    binding = entryBinding;
+    visibility = entryVisibility;
+    storageTexture.access = storageTextureAccess;
+    storageTexture.format = format;
+    storageTexture.viewDimension = textureViewDimension;
+}
+
+// ExternalTextureBindingLayout never contains data, so just make one that can be reused instead
+// of declaring a new one every time it's needed.
+wgpu::ExternalTextureBindingLayout kExternalTextureBindingLayout = {};
+
+BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+    uint32_t entryBinding,
+    wgpu::ShaderStage entryVisibility,
+    wgpu::ExternalTextureBindingLayout* bindingLayout) {
+    binding = entryBinding;
+    visibility = entryVisibility;
+    nextInChain = bindingLayout;
+}
+
+BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
+    const wgpu::BindGroupLayoutEntry& entry)
+    : wgpu::BindGroupLayoutEntry(entry) {}
+
+BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
+                                                         const wgpu::Sampler& sampler)
+    : binding(binding), sampler(sampler) {}
+
+BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
+                                                         const wgpu::TextureView& textureView)
+    : binding(binding), textureView(textureView) {}
+
+BindingInitializationHelper::BindingInitializationHelper(
+    uint32_t binding,
+    const wgpu::ExternalTexture& externalTexture)
+    : binding(binding) {
+    externalTextureBindingEntry.externalTexture = externalTexture;
+}
+
+BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
+                                                         const wgpu::Buffer& buffer,
+                                                         uint64_t offset,
+                                                         uint64_t size)
+    : binding(binding), buffer(buffer), offset(offset), size(size) {}
+
+wgpu::BindGroupEntry BindingInitializationHelper::GetAsBinding() const {
+    wgpu::BindGroupEntry result;
+
+    result.binding = binding;
+    result.sampler = sampler;
+    result.textureView = textureView;
+    result.buffer = buffer;
+    result.offset = offset;
+    result.size = size;
+    if (externalTextureBindingEntry.externalTexture != nullptr) {
+        result.nextInChain = &externalTextureBindingEntry;
     }
 
-    wgpu::TextureDataLayout CreateTextureDataLayout(uint64_t offset,
-                                                    uint32_t bytesPerRow,
-                                                    uint32_t rowsPerImage) {
-        wgpu::TextureDataLayout textureDataLayout;
-        textureDataLayout.offset = offset;
-        textureDataLayout.bytesPerRow = bytesPerRow;
-        textureDataLayout.rowsPerImage = rowsPerImage;
+    return result;
+}
 
-        return textureDataLayout;
+wgpu::BindGroup MakeBindGroup(
+    const wgpu::Device& device,
+    const wgpu::BindGroupLayout& layout,
+    std::initializer_list<BindingInitializationHelper> entriesInitializer) {
+    std::vector<wgpu::BindGroupEntry> entries;
+    for (const BindingInitializationHelper& helper : entriesInitializer) {
+        entries.push_back(helper.GetAsBinding());
     }
 
-    wgpu::PipelineLayout MakeBasicPipelineLayout(const wgpu::Device& device,
-                                                 const wgpu::BindGroupLayout* bindGroupLayout) {
-        wgpu::PipelineLayoutDescriptor descriptor;
-        if (bindGroupLayout != nullptr) {
-            descriptor.bindGroupLayoutCount = 1;
-            descriptor.bindGroupLayouts = bindGroupLayout;
-        } else {
-            descriptor.bindGroupLayoutCount = 0;
-            descriptor.bindGroupLayouts = nullptr;
-        }
-        return device.CreatePipelineLayout(&descriptor);
-    }
+    wgpu::BindGroupDescriptor descriptor;
+    descriptor.layout = layout;
+    descriptor.entryCount = checked_cast<uint32_t>(entries.size());
+    descriptor.entries = entries.data();
 
-    wgpu::PipelineLayout MakePipelineLayout(const wgpu::Device& device,
-                                            std::vector<wgpu::BindGroupLayout> bgls) {
-        wgpu::PipelineLayoutDescriptor descriptor;
-        descriptor.bindGroupLayoutCount = uint32_t(bgls.size());
-        descriptor.bindGroupLayouts = bgls.data();
-        return device.CreatePipelineLayout(&descriptor);
-    }
-
-    wgpu::BindGroupLayout MakeBindGroupLayout(
-        const wgpu::Device& device,
-        std::initializer_list<BindingLayoutEntryInitializationHelper> entriesInitializer) {
-        std::vector<wgpu::BindGroupLayoutEntry> entries;
-        for (const BindingLayoutEntryInitializationHelper& entry : entriesInitializer) {
-            entries.push_back(entry);
-        }
-
-        wgpu::BindGroupLayoutDescriptor descriptor;
-        descriptor.entryCount = static_cast<uint32_t>(entries.size());
-        descriptor.entries = entries.data();
-        return device.CreateBindGroupLayout(&descriptor);
-    }
-
-    BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
-        uint32_t entryBinding,
-        wgpu::ShaderStage entryVisibility,
-        wgpu::BufferBindingType bufferType,
-        bool bufferHasDynamicOffset,
-        uint64_t bufferMinBindingSize) {
-        binding = entryBinding;
-        visibility = entryVisibility;
-        buffer.type = bufferType;
-        buffer.hasDynamicOffset = bufferHasDynamicOffset;
-        buffer.minBindingSize = bufferMinBindingSize;
-    }
-
-    BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
-        uint32_t entryBinding,
-        wgpu::ShaderStage entryVisibility,
-        wgpu::SamplerBindingType samplerType) {
-        binding = entryBinding;
-        visibility = entryVisibility;
-        sampler.type = samplerType;
-    }
-
-    BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
-        uint32_t entryBinding,
-        wgpu::ShaderStage entryVisibility,
-        wgpu::TextureSampleType textureSampleType,
-        wgpu::TextureViewDimension textureViewDimension,
-        bool textureMultisampled) {
-        binding = entryBinding;
-        visibility = entryVisibility;
-        texture.sampleType = textureSampleType;
-        texture.viewDimension = textureViewDimension;
-        texture.multisampled = textureMultisampled;
-    }
-
-    BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
-        uint32_t entryBinding,
-        wgpu::ShaderStage entryVisibility,
-        wgpu::StorageTextureAccess storageTextureAccess,
-        wgpu::TextureFormat format,
-        wgpu::TextureViewDimension textureViewDimension) {
-        binding = entryBinding;
-        visibility = entryVisibility;
-        storageTexture.access = storageTextureAccess;
-        storageTexture.format = format;
-        storageTexture.viewDimension = textureViewDimension;
-    }
-
-    // ExternalTextureBindingLayout never contains data, so just make one that can be reused instead
-    // of declaring a new one every time it's needed.
-    wgpu::ExternalTextureBindingLayout kExternalTextureBindingLayout = {};
-
-    BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
-        uint32_t entryBinding,
-        wgpu::ShaderStage entryVisibility,
-        wgpu::ExternalTextureBindingLayout* bindingLayout) {
-        binding = entryBinding;
-        visibility = entryVisibility;
-        nextInChain = bindingLayout;
-    }
-
-    BindingLayoutEntryInitializationHelper::BindingLayoutEntryInitializationHelper(
-        const wgpu::BindGroupLayoutEntry& entry)
-        : wgpu::BindGroupLayoutEntry(entry) {
-    }
-
-    BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
-                                                             const wgpu::Sampler& sampler)
-        : binding(binding), sampler(sampler) {
-    }
-
-    BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
-                                                             const wgpu::TextureView& textureView)
-        : binding(binding), textureView(textureView) {
-    }
-
-    BindingInitializationHelper::BindingInitializationHelper(
-        uint32_t binding,
-        const wgpu::ExternalTexture& externalTexture)
-        : binding(binding) {
-        externalTextureBindingEntry.externalTexture = externalTexture;
-    }
-
-    BindingInitializationHelper::BindingInitializationHelper(uint32_t binding,
-                                                             const wgpu::Buffer& buffer,
-                                                             uint64_t offset,
-                                                             uint64_t size)
-        : binding(binding), buffer(buffer), offset(offset), size(size) {
-    }
-
-    wgpu::BindGroupEntry BindingInitializationHelper::GetAsBinding() const {
-        wgpu::BindGroupEntry result;
-
-        result.binding = binding;
-        result.sampler = sampler;
-        result.textureView = textureView;
-        result.buffer = buffer;
-        result.offset = offset;
-        result.size = size;
-        if (externalTextureBindingEntry.externalTexture != nullptr) {
-            result.nextInChain = &externalTextureBindingEntry;
-        }
-
-        return result;
-    }
-
-    wgpu::BindGroup MakeBindGroup(
-        const wgpu::Device& device,
-        const wgpu::BindGroupLayout& layout,
-        std::initializer_list<BindingInitializationHelper> entriesInitializer) {
-        std::vector<wgpu::BindGroupEntry> entries;
-        for (const BindingInitializationHelper& helper : entriesInitializer) {
-            entries.push_back(helper.GetAsBinding());
-        }
-
-        wgpu::BindGroupDescriptor descriptor;
-        descriptor.layout = layout;
-        descriptor.entryCount = checked_cast<uint32_t>(entries.size());
-        descriptor.entries = entries.data();
-
-        return device.CreateBindGroup(&descriptor);
-    }
+    return device.CreateBindGroup(&descriptor);
+}
 
 }  // namespace utils
diff --git a/src/dawn/utils/WGPUHelpers.h b/src/dawn/utils/WGPUHelpers.h
index 67199fc..fb6ddd5 100644
--- a/src/dawn/utils/WGPUHelpers.h
+++ b/src/dawn/utils/WGPUHelpers.h
@@ -25,157 +25,155 @@
 
 namespace utils {
 
-    enum Expectation { Success, Failure };
+enum Expectation { Success, Failure };
 
-    wgpu::ShaderModule CreateShaderModuleFromASM(const wgpu::Device& device, const char* source);
-    wgpu::ShaderModule CreateShaderModule(const wgpu::Device& device, const char* source);
+wgpu::ShaderModule CreateShaderModuleFromASM(const wgpu::Device& device, const char* source);
+wgpu::ShaderModule CreateShaderModule(const wgpu::Device& device, const char* source);
 
-    wgpu::Buffer CreateBufferFromData(const wgpu::Device& device,
-                                      const void* data,
-                                      uint64_t size,
-                                      wgpu::BufferUsage usage);
+wgpu::Buffer CreateBufferFromData(const wgpu::Device& device,
+                                  const void* data,
+                                  uint64_t size,
+                                  wgpu::BufferUsage usage);
 
-    template <typename T>
-    wgpu::Buffer CreateBufferFromData(const wgpu::Device& device,
-                                      wgpu::BufferUsage usage,
-                                      std::initializer_list<T> data) {
-        return CreateBufferFromData(device, data.begin(), uint32_t(sizeof(T) * data.size()), usage);
-    }
+template <typename T>
+wgpu::Buffer CreateBufferFromData(const wgpu::Device& device,
+                                  wgpu::BufferUsage usage,
+                                  std::initializer_list<T> data) {
+    return CreateBufferFromData(device, data.begin(), uint32_t(sizeof(T) * data.size()), usage);
+}
 
-    wgpu::ImageCopyBuffer CreateImageCopyBuffer(wgpu::Buffer buffer,
-                                                uint64_t offset = 0,
-                                                uint32_t bytesPerRow = wgpu::kCopyStrideUndefined,
+wgpu::ImageCopyBuffer CreateImageCopyBuffer(wgpu::Buffer buffer,
+                                            uint64_t offset = 0,
+                                            uint32_t bytesPerRow = wgpu::kCopyStrideUndefined,
+                                            uint32_t rowsPerImage = wgpu::kCopyStrideUndefined);
+wgpu::ImageCopyTexture CreateImageCopyTexture(
+    wgpu::Texture texture,
+    uint32_t level = 0,
+    wgpu::Origin3D origin = {0, 0, 0},
+    wgpu::TextureAspect aspect = wgpu::TextureAspect::All);
+wgpu::TextureDataLayout CreateTextureDataLayout(uint64_t offset,
+                                                uint32_t bytesPerRow,
                                                 uint32_t rowsPerImage = wgpu::kCopyStrideUndefined);
-    wgpu::ImageCopyTexture CreateImageCopyTexture(
-        wgpu::Texture texture,
-        uint32_t level = 0,
-        wgpu::Origin3D origin = {0, 0, 0},
-        wgpu::TextureAspect aspect = wgpu::TextureAspect::All);
-    wgpu::TextureDataLayout CreateTextureDataLayout(
-        uint64_t offset,
-        uint32_t bytesPerRow,
-        uint32_t rowsPerImage = wgpu::kCopyStrideUndefined);
 
-    struct ComboRenderPassDescriptor : public wgpu::RenderPassDescriptor {
-      public:
-        ComboRenderPassDescriptor(std::initializer_list<wgpu::TextureView> colorAttachmentInfo,
-                                  wgpu::TextureView depthStencil = wgpu::TextureView());
+struct ComboRenderPassDescriptor : public wgpu::RenderPassDescriptor {
+  public:
+    ComboRenderPassDescriptor(std::initializer_list<wgpu::TextureView> colorAttachmentInfo,
+                              wgpu::TextureView depthStencil = wgpu::TextureView());
 
-        ComboRenderPassDescriptor(const ComboRenderPassDescriptor& otherRenderPass);
-        const ComboRenderPassDescriptor& operator=(
-            const ComboRenderPassDescriptor& otherRenderPass);
+    ComboRenderPassDescriptor(const ComboRenderPassDescriptor& otherRenderPass);
+    const ComboRenderPassDescriptor& operator=(const ComboRenderPassDescriptor& otherRenderPass);
 
-        void UnsetDepthStencilLoadStoreOpsForFormat(wgpu::TextureFormat format);
+    void UnsetDepthStencilLoadStoreOpsForFormat(wgpu::TextureFormat format);
 
-        std::array<wgpu::RenderPassColorAttachment, kMaxColorAttachments> cColorAttachments;
-        wgpu::RenderPassDepthStencilAttachment cDepthStencilAttachmentInfo = {};
-    };
+    std::array<wgpu::RenderPassColorAttachment, kMaxColorAttachments> cColorAttachments;
+    wgpu::RenderPassDepthStencilAttachment cDepthStencilAttachmentInfo = {};
+};
 
-    struct BasicRenderPass {
-      public:
-        BasicRenderPass();
-        BasicRenderPass(uint32_t width,
-                        uint32_t height,
-                        wgpu::Texture color,
-                        wgpu::TextureFormat texture = kDefaultColorFormat);
+struct BasicRenderPass {
+  public:
+    BasicRenderPass();
+    BasicRenderPass(uint32_t width,
+                    uint32_t height,
+                    wgpu::Texture color,
+                    wgpu::TextureFormat texture = kDefaultColorFormat);
 
-        static constexpr wgpu::TextureFormat kDefaultColorFormat = wgpu::TextureFormat::RGBA8Unorm;
+    static constexpr wgpu::TextureFormat kDefaultColorFormat = wgpu::TextureFormat::RGBA8Unorm;
 
-        uint32_t width;
-        uint32_t height;
-        wgpu::Texture color;
-        wgpu::TextureFormat colorFormat;
-        utils::ComboRenderPassDescriptor renderPassInfo;
-    };
-    BasicRenderPass CreateBasicRenderPass(
-        const wgpu::Device& device,
-        uint32_t width,
-        uint32_t height,
-        wgpu::TextureFormat format = BasicRenderPass::kDefaultColorFormat);
+    uint32_t width;
+    uint32_t height;
+    wgpu::Texture color;
+    wgpu::TextureFormat colorFormat;
+    utils::ComboRenderPassDescriptor renderPassInfo;
+};
+BasicRenderPass CreateBasicRenderPass(
+    const wgpu::Device& device,
+    uint32_t width,
+    uint32_t height,
+    wgpu::TextureFormat format = BasicRenderPass::kDefaultColorFormat);
 
-    wgpu::PipelineLayout MakeBasicPipelineLayout(const wgpu::Device& device,
-                                                 const wgpu::BindGroupLayout* bindGroupLayout);
+wgpu::PipelineLayout MakeBasicPipelineLayout(const wgpu::Device& device,
+                                             const wgpu::BindGroupLayout* bindGroupLayout);
 
-    wgpu::PipelineLayout MakePipelineLayout(const wgpu::Device& device,
-                                            std::vector<wgpu::BindGroupLayout> bgls);
+wgpu::PipelineLayout MakePipelineLayout(const wgpu::Device& device,
+                                        std::vector<wgpu::BindGroupLayout> bgls);
 
-    extern wgpu::ExternalTextureBindingLayout kExternalTextureBindingLayout;
+extern wgpu::ExternalTextureBindingLayout kExternalTextureBindingLayout;
 
-    // Helpers to make creating bind group layouts look nicer:
-    //
-    //   utils::MakeBindGroupLayout(device, {
-    //       {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform},
-    //       {1, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering},
-    //       {3, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}
-    //   });
+// Helpers to make creating bind group layouts look nicer:
+//
+//   utils::MakeBindGroupLayout(device, {
+//       {0, wgpu::ShaderStage::Vertex, wgpu::BufferBindingType::Uniform},
+//       {1, wgpu::ShaderStage::Fragment, wgpu::SamplerBindingType::Filtering},
+//       {3, wgpu::ShaderStage::Fragment, wgpu::TextureSampleType::Float}
+//   });
 
-    struct BindingLayoutEntryInitializationHelper : wgpu::BindGroupLayoutEntry {
-        BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
-                                               wgpu::ShaderStage entryVisibility,
-                                               wgpu::BufferBindingType bufferType,
-                                               bool bufferHasDynamicOffset = false,
-                                               uint64_t bufferMinBindingSize = 0);
-        BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
-                                               wgpu::ShaderStage entryVisibility,
-                                               wgpu::SamplerBindingType samplerType);
-        BindingLayoutEntryInitializationHelper(
-            uint32_t entryBinding,
-            wgpu::ShaderStage entryVisibility,
-            wgpu::TextureSampleType textureSampleType,
-            wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D,
-            bool textureMultisampled = false);
-        BindingLayoutEntryInitializationHelper(
-            uint32_t entryBinding,
-            wgpu::ShaderStage entryVisibility,
-            wgpu::StorageTextureAccess storageTextureAccess,
-            wgpu::TextureFormat format,
-            wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D);
-        BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
-                                               wgpu::ShaderStage entryVisibility,
-                                               wgpu::ExternalTextureBindingLayout* bindingLayout);
+struct BindingLayoutEntryInitializationHelper : wgpu::BindGroupLayoutEntry {
+    BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
+                                           wgpu::ShaderStage entryVisibility,
+                                           wgpu::BufferBindingType bufferType,
+                                           bool bufferHasDynamicOffset = false,
+                                           uint64_t bufferMinBindingSize = 0);
+    BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
+                                           wgpu::ShaderStage entryVisibility,
+                                           wgpu::SamplerBindingType samplerType);
+    BindingLayoutEntryInitializationHelper(
+        uint32_t entryBinding,
+        wgpu::ShaderStage entryVisibility,
+        wgpu::TextureSampleType textureSampleType,
+        wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D,
+        bool textureMultisampled = false);
+    BindingLayoutEntryInitializationHelper(
+        uint32_t entryBinding,
+        wgpu::ShaderStage entryVisibility,
+        wgpu::StorageTextureAccess storageTextureAccess,
+        wgpu::TextureFormat format,
+        wgpu::TextureViewDimension viewDimension = wgpu::TextureViewDimension::e2D);
+    BindingLayoutEntryInitializationHelper(uint32_t entryBinding,
+                                           wgpu::ShaderStage entryVisibility,
+                                           wgpu::ExternalTextureBindingLayout* bindingLayout);
 
-        // NOLINTNEXTLINE(runtime/explicit)
-        BindingLayoutEntryInitializationHelper(const wgpu::BindGroupLayoutEntry& entry);
-    };
+    // NOLINTNEXTLINE(runtime/explicit)
+    BindingLayoutEntryInitializationHelper(const wgpu::BindGroupLayoutEntry& entry);
+};
 
-    wgpu::BindGroupLayout MakeBindGroupLayout(
-        const wgpu::Device& device,
-        std::initializer_list<BindingLayoutEntryInitializationHelper> entriesInitializer);
+wgpu::BindGroupLayout MakeBindGroupLayout(
+    const wgpu::Device& device,
+    std::initializer_list<BindingLayoutEntryInitializationHelper> entriesInitializer);
 
-    // Helpers to make creating bind groups look nicer:
-    //
-    //   utils::MakeBindGroup(device, layout, {
-    //       {0, mySampler},
-    //       {1, myBuffer, offset, size},
-    //       {3, myTextureView}
-    //   });
+// Helpers to make creating bind groups look nicer:
+//
+//   utils::MakeBindGroup(device, layout, {
+//       {0, mySampler},
+//       {1, myBuffer, offset, size},
+//       {3, myTextureView}
+//   });
 
-    // Structure with one constructor per-type of bindings, so that the initializer_list accepts
-    // bindings with the right type and no extra information.
-    struct BindingInitializationHelper {
-        BindingInitializationHelper(uint32_t binding, const wgpu::Sampler& sampler);
-        BindingInitializationHelper(uint32_t binding, const wgpu::TextureView& textureView);
-        BindingInitializationHelper(uint32_t binding, const wgpu::ExternalTexture& externalTexture);
-        BindingInitializationHelper(uint32_t binding,
-                                    const wgpu::Buffer& buffer,
-                                    uint64_t offset = 0,
-                                    uint64_t size = wgpu::kWholeSize);
+// Structure with one constructor per-type of bindings, so that the initializer_list accepts
+// bindings with the right type and no extra information.
+struct BindingInitializationHelper {
+    BindingInitializationHelper(uint32_t binding, const wgpu::Sampler& sampler);
+    BindingInitializationHelper(uint32_t binding, const wgpu::TextureView& textureView);
+    BindingInitializationHelper(uint32_t binding, const wgpu::ExternalTexture& externalTexture);
+    BindingInitializationHelper(uint32_t binding,
+                                const wgpu::Buffer& buffer,
+                                uint64_t offset = 0,
+                                uint64_t size = wgpu::kWholeSize);
 
-        wgpu::BindGroupEntry GetAsBinding() const;
+    wgpu::BindGroupEntry GetAsBinding() const;
 
-        uint32_t binding;
-        wgpu::Sampler sampler;
-        wgpu::TextureView textureView;
-        wgpu::Buffer buffer;
-        wgpu::ExternalTextureBindingEntry externalTextureBindingEntry;
-        uint64_t offset = 0;
-        uint64_t size = 0;
-    };
+    uint32_t binding;
+    wgpu::Sampler sampler;
+    wgpu::TextureView textureView;
+    wgpu::Buffer buffer;
+    wgpu::ExternalTextureBindingEntry externalTextureBindingEntry;
+    uint64_t offset = 0;
+    uint64_t size = 0;
+};
 
-    wgpu::BindGroup MakeBindGroup(
-        const wgpu::Device& device,
-        const wgpu::BindGroupLayout& layout,
-        std::initializer_list<BindingInitializationHelper> entriesInitializer);
+wgpu::BindGroup MakeBindGroup(
+    const wgpu::Device& device,
+    const wgpu::BindGroupLayout& layout,
+    std::initializer_list<BindingInitializationHelper> entriesInitializer);
 
 }  // namespace utils
 
diff --git a/src/dawn/utils/WindowsDebugLogger.cpp b/src/dawn/utils/WindowsDebugLogger.cpp
index b4125bc..3f7fbed 100644
--- a/src/dawn/utils/WindowsDebugLogger.cpp
+++ b/src/dawn/utils/WindowsDebugLogger.cpp
@@ -22,90 +22,89 @@
 
 namespace utils {
 
-    class WindowsDebugLogger : public PlatformDebugLogger {
-      public:
-        WindowsDebugLogger() : PlatformDebugLogger() {
-            if (IsDebuggerPresent()) {
-                // This condition is true when running inside Visual Studio or some other debugger.
-                // Messages are already printed there so we don't need to do anything.
-                return;
-            }
+class WindowsDebugLogger : public PlatformDebugLogger {
+  public:
+    WindowsDebugLogger() : PlatformDebugLogger() {
+        if (IsDebuggerPresent()) {
+            // This condition is true when running inside Visual Studio or some other debugger.
+            // Messages are already printed there so we don't need to do anything.
+            return;
+        }
 
-            mShouldExitHandle = CreateEventA(nullptr, TRUE, FALSE, nullptr);
-            ASSERT(mShouldExitHandle != nullptr);
+        mShouldExitHandle = CreateEventA(nullptr, TRUE, FALSE, nullptr);
+        ASSERT(mShouldExitHandle != nullptr);
 
-            mThread = std::thread(
-                [](HANDLE shouldExit) {
-                    // https://blogs.msdn.microsoft.com/reiley/2011/07/29/a-debugging-approach-to-outputdebugstring/
-                    // for the layout of this struct.
-                    struct {
-                        DWORD process_id;
-                        char data[4096 - sizeof(DWORD)];
-                    }* dbWinBuffer = nullptr;
+        mThread = std::thread(
+            [](HANDLE shouldExit) {
+                // https://blogs.msdn.microsoft.com/reiley/2011/07/29/a-debugging-approach-to-outputdebugstring/
+                // for the layout of this struct.
+                struct {
+                    DWORD process_id;
+                    char data[4096 - sizeof(DWORD)];
+                }* dbWinBuffer = nullptr;
 
-                    HANDLE file = CreateFileMappingA(INVALID_HANDLE_VALUE, nullptr, PAGE_READWRITE,
-                                                     0, sizeof(*dbWinBuffer), "DBWIN_BUFFER");
-                    ASSERT(file != nullptr);
-                    ASSERT(file != INVALID_HANDLE_VALUE);
+                HANDLE file = CreateFileMappingA(INVALID_HANDLE_VALUE, nullptr, PAGE_READWRITE, 0,
+                                                 sizeof(*dbWinBuffer), "DBWIN_BUFFER");
+                ASSERT(file != nullptr);
+                ASSERT(file != INVALID_HANDLE_VALUE);
 
-                    dbWinBuffer = static_cast<decltype(dbWinBuffer)>(
-                        MapViewOfFile(file, SECTION_MAP_READ, 0, 0, 0));
-                    ASSERT(dbWinBuffer != nullptr);
+                dbWinBuffer = static_cast<decltype(dbWinBuffer)>(
+                    MapViewOfFile(file, SECTION_MAP_READ, 0, 0, 0));
+                ASSERT(dbWinBuffer != nullptr);
 
-                    HANDLE dbWinBufferReady =
-                        CreateEventA(nullptr, FALSE, FALSE, "DBWIN_BUFFER_READY");
-                    ASSERT(dbWinBufferReady != nullptr);
+                HANDLE dbWinBufferReady = CreateEventA(nullptr, FALSE, FALSE, "DBWIN_BUFFER_READY");
+                ASSERT(dbWinBufferReady != nullptr);
 
-                    HANDLE dbWinDataReady = CreateEventA(nullptr, FALSE, FALSE, "DBWIN_DATA_READY");
-                    ASSERT(dbWinDataReady != nullptr);
+                HANDLE dbWinDataReady = CreateEventA(nullptr, FALSE, FALSE, "DBWIN_DATA_READY");
+                ASSERT(dbWinDataReady != nullptr);
 
-                    std::array<HANDLE, 2> waitHandles = {shouldExit, dbWinDataReady};
-                    while (true) {
-                        SetEvent(dbWinBufferReady);
-                        DWORD wait = WaitForMultipleObjects(waitHandles.size(), waitHandles.data(),
-                                                            FALSE, INFINITE);
-                        if (wait == WAIT_OBJECT_0) {
-                            break;
-                        }
-                        ASSERT(wait == WAIT_OBJECT_0 + 1);
-                        fprintf(stderr, "%.*s\n", static_cast<int>(sizeof(dbWinBuffer->data)),
-                                dbWinBuffer->data);
-                        fflush(stderr);
+                std::array<HANDLE, 2> waitHandles = {shouldExit, dbWinDataReady};
+                while (true) {
+                    SetEvent(dbWinBufferReady);
+                    DWORD wait = WaitForMultipleObjects(waitHandles.size(), waitHandles.data(),
+                                                        FALSE, INFINITE);
+                    if (wait == WAIT_OBJECT_0) {
+                        break;
                     }
+                    ASSERT(wait == WAIT_OBJECT_0 + 1);
+                    fprintf(stderr, "%.*s\n", static_cast<int>(sizeof(dbWinBuffer->data)),
+                            dbWinBuffer->data);
+                    fflush(stderr);
+                }
 
-                    CloseHandle(dbWinDataReady);
-                    CloseHandle(dbWinBufferReady);
-                    UnmapViewOfFile(dbWinBuffer);
-                    CloseHandle(file);
-                },
-                mShouldExitHandle);
-        }
-
-        ~WindowsDebugLogger() override {
-            if (IsDebuggerPresent()) {
-                // This condition is true when running inside Visual Studio or some other debugger.
-                // Messages are already printed there so we don't need to do anything.
-                return;
-            }
-
-            if (mShouldExitHandle != nullptr) {
-                BOOL result = SetEvent(mShouldExitHandle);
-                ASSERT(result != 0);
-                CloseHandle(mShouldExitHandle);
-            }
-
-            if (mThread.joinable()) {
-                mThread.join();
-            }
-        }
-
-      private:
-        std::thread mThread;
-        HANDLE mShouldExitHandle = INVALID_HANDLE_VALUE;
-    };
-
-    PlatformDebugLogger* CreatePlatformDebugLogger() {
-        return new WindowsDebugLogger();
+                CloseHandle(dbWinDataReady);
+                CloseHandle(dbWinBufferReady);
+                UnmapViewOfFile(dbWinBuffer);
+                CloseHandle(file);
+            },
+            mShouldExitHandle);
     }
 
+    ~WindowsDebugLogger() override {
+        if (IsDebuggerPresent()) {
+            // This condition is true when running inside Visual Studio or some other debugger.
+            // Messages are already printed there so we don't need to do anything.
+            return;
+        }
+
+        if (mShouldExitHandle != nullptr) {
+            BOOL result = SetEvent(mShouldExitHandle);
+            ASSERT(result != 0);
+            CloseHandle(mShouldExitHandle);
+        }
+
+        if (mThread.joinable()) {
+            mThread.join();
+        }
+    }
+
+  private:
+    std::thread mThread;
+    HANDLE mShouldExitHandle = INVALID_HANDLE_VALUE;
+};
+
+PlatformDebugLogger* CreatePlatformDebugLogger() {
+    return new WindowsDebugLogger();
+}
+
 }  // namespace utils
diff --git a/src/dawn/utils/WindowsTimer.cpp b/src/dawn/utils/WindowsTimer.cpp
index 4cb360b..99e1d73 100644
--- a/src/dawn/utils/WindowsTimer.cpp
+++ b/src/dawn/utils/WindowsTimer.cpp
@@ -18,72 +18,71 @@
 
 namespace utils {
 
-    class WindowsTimer : public Timer {
-      public:
-        WindowsTimer() : Timer(), mRunning(false), mFrequency(0) {
-        }
+class WindowsTimer : public Timer {
+  public:
+    WindowsTimer() : Timer(), mRunning(false), mFrequency(0) {}
 
-        ~WindowsTimer() override = default;
+    ~WindowsTimer() override = default;
 
-        void Start() override {
-            LARGE_INTEGER curTime;
-            QueryPerformanceCounter(&curTime);
-            mStartTime = curTime.QuadPart;
+    void Start() override {
+        LARGE_INTEGER curTime;
+        QueryPerformanceCounter(&curTime);
+        mStartTime = curTime.QuadPart;
 
-            // Cache the frequency
-            GetFrequency();
+        // Cache the frequency
+        GetFrequency();
 
-            mRunning = true;
-        }
-
-        void Stop() override {
-            LARGE_INTEGER curTime;
-            QueryPerformanceCounter(&curTime);
-            mStopTime = curTime.QuadPart;
-
-            mRunning = false;
-        }
-
-        double GetElapsedTime() const override {
-            LONGLONG endTime;
-            if (mRunning) {
-                LARGE_INTEGER curTime;
-                QueryPerformanceCounter(&curTime);
-                endTime = curTime.QuadPart;
-            } else {
-                endTime = mStopTime;
-            }
-
-            return static_cast<double>(endTime - mStartTime) / mFrequency;
-        }
-
-        double GetAbsoluteTime() override {
-            LARGE_INTEGER curTime;
-            QueryPerformanceCounter(&curTime);
-
-            return static_cast<double>(curTime.QuadPart) / GetFrequency();
-        }
-
-      private:
-        LONGLONG GetFrequency() {
-            if (mFrequency == 0) {
-                LARGE_INTEGER frequency = {};
-                QueryPerformanceFrequency(&frequency);
-
-                mFrequency = frequency.QuadPart;
-            }
-
-            return mFrequency;
-        }
-
-        bool mRunning;
-        LONGLONG mStartTime;
-        LONGLONG mStopTime;
-        LONGLONG mFrequency;
-    };
-
-    Timer* CreateTimer() {
-        return new WindowsTimer();
+        mRunning = true;
     }
 
+    void Stop() override {
+        LARGE_INTEGER curTime;
+        QueryPerformanceCounter(&curTime);
+        mStopTime = curTime.QuadPart;
+
+        mRunning = false;
+    }
+
+    double GetElapsedTime() const override {
+        LONGLONG endTime;
+        if (mRunning) {
+            LARGE_INTEGER curTime;
+            QueryPerformanceCounter(&curTime);
+            endTime = curTime.QuadPart;
+        } else {
+            endTime = mStopTime;
+        }
+
+        return static_cast<double>(endTime - mStartTime) / mFrequency;
+    }
+
+    double GetAbsoluteTime() override {
+        LARGE_INTEGER curTime;
+        QueryPerformanceCounter(&curTime);
+
+        return static_cast<double>(curTime.QuadPart) / GetFrequency();
+    }
+
+  private:
+    LONGLONG GetFrequency() {
+        if (mFrequency == 0) {
+            LARGE_INTEGER frequency = {};
+            QueryPerformanceFrequency(&frequency);
+
+            mFrequency = frequency.QuadPart;
+        }
+
+        return mFrequency;
+    }
+
+    bool mRunning;
+    LONGLONG mStartTime;
+    LONGLONG mStopTime;
+    LONGLONG mFrequency;
+};
+
+Timer* CreateTimer() {
+    return new WindowsTimer();
+}
+
 }  // namespace utils
diff --git a/src/dawn/utils/WireHelper.cpp b/src/dawn/utils/WireHelper.cpp
index 294fb5f..c00006f 100644
--- a/src/dawn/utils/WireHelper.cpp
+++ b/src/dawn/utils/WireHelper.cpp
@@ -32,147 +32,132 @@
 
 namespace utils {
 
-    namespace {
+namespace {
 
-        class WireServerTraceLayer : public dawn::wire::CommandHandler {
-          public:
-            WireServerTraceLayer(const char* dir, dawn::wire::CommandHandler* handler)
-                : dawn::wire::CommandHandler(), mDir(dir), mHandler(handler) {
-                const char* sep = GetPathSeparator();
-                if (mDir.size() > 0 && mDir.back() != *sep) {
-                    mDir += sep;
-                }
-            }
-
-            void BeginWireTrace(const char* name) {
-                std::string filename = name;
-                // Replace slashes in gtest names with underscores so everything is in one
-                // directory.
-                std::replace(filename.begin(), filename.end(), '/', '_');
-                std::replace(filename.begin(), filename.end(), '\\', '_');
-
-                // Prepend the filename with the directory.
-                filename = mDir + filename;
-
-                ASSERT(!mFile.is_open());
-                mFile.open(filename,
-                           std::ios_base::out | std::ios_base::binary | std::ios_base::trunc);
-
-                // Write the initial 8 bytes. This means the fuzzer should never inject an
-                // error.
-                const uint64_t injectedErrorIndex = 0xFFFF'FFFF'FFFF'FFFF;
-                mFile.write(reinterpret_cast<const char*>(&injectedErrorIndex),
-                            sizeof(injectedErrorIndex));
-            }
-
-            const volatile char* HandleCommands(const volatile char* commands,
-                                                size_t size) override {
-                if (mFile.is_open()) {
-                    mFile.write(const_cast<const char*>(commands), size);
-                }
-                return mHandler->HandleCommands(commands, size);
-            }
-
-          private:
-            std::string mDir;
-            dawn::wire::CommandHandler* mHandler;
-            std::ofstream mFile;
-        };
-
-        class WireHelperDirect : public WireHelper {
-          public:
-            WireHelperDirect() {
-                dawnProcSetProcs(&dawn::native::GetProcs());
-            }
-
-            std::pair<wgpu::Device, WGPUDevice> RegisterDevice(WGPUDevice backendDevice) override {
-                ASSERT(backendDevice != nullptr);
-                return std::make_pair(wgpu::Device::Acquire(backendDevice), backendDevice);
-            }
-
-            void BeginWireTrace(const char* name) override {
-            }
-
-            bool FlushClient() override {
-                return true;
-            }
-
-            bool FlushServer() override {
-                return true;
-            }
-        };
-
-        class WireHelperProxy : public WireHelper {
-          public:
-            explicit WireHelperProxy(const char* wireTraceDir) {
-                mC2sBuf = std::make_unique<utils::TerribleCommandBuffer>();
-                mS2cBuf = std::make_unique<utils::TerribleCommandBuffer>();
-
-                dawn::wire::WireServerDescriptor serverDesc = {};
-                serverDesc.procs = &dawn::native::GetProcs();
-                serverDesc.serializer = mS2cBuf.get();
-
-                mWireServer.reset(new dawn::wire::WireServer(serverDesc));
-                mC2sBuf->SetHandler(mWireServer.get());
-
-                if (wireTraceDir != nullptr && strlen(wireTraceDir) > 0) {
-                    mWireServerTraceLayer.reset(
-                        new WireServerTraceLayer(wireTraceDir, mWireServer.get()));
-                    mC2sBuf->SetHandler(mWireServerTraceLayer.get());
-                }
-
-                dawn::wire::WireClientDescriptor clientDesc = {};
-                clientDesc.serializer = mC2sBuf.get();
-
-                mWireClient.reset(new dawn::wire::WireClient(clientDesc));
-                mS2cBuf->SetHandler(mWireClient.get());
-                dawnProcSetProcs(&dawn::wire::client::GetProcs());
-            }
-
-            std::pair<wgpu::Device, WGPUDevice> RegisterDevice(WGPUDevice backendDevice) override {
-                ASSERT(backendDevice != nullptr);
-
-                auto reservation = mWireClient->ReserveDevice();
-                mWireServer->InjectDevice(backendDevice, reservation.id, reservation.generation);
-                dawn::native::GetProcs().deviceRelease(backendDevice);
-
-                return std::make_pair(wgpu::Device::Acquire(reservation.device), backendDevice);
-            }
-
-            void BeginWireTrace(const char* name) override {
-                if (mWireServerTraceLayer) {
-                    return mWireServerTraceLayer->BeginWireTrace(name);
-                }
-            }
-
-            bool FlushClient() override {
-                return mC2sBuf->Flush();
-            }
-
-            bool FlushServer() override {
-                return mS2cBuf->Flush();
-            }
-
-          private:
-            std::unique_ptr<utils::TerribleCommandBuffer> mC2sBuf;
-            std::unique_ptr<utils::TerribleCommandBuffer> mS2cBuf;
-            std::unique_ptr<WireServerTraceLayer> mWireServerTraceLayer;
-            std::unique_ptr<dawn::wire::WireServer> mWireServer;
-            std::unique_ptr<dawn::wire::WireClient> mWireClient;
-        };
-
-    }  // anonymous namespace
-
-    std::unique_ptr<WireHelper> CreateWireHelper(bool useWire, const char* wireTraceDir) {
-        if (useWire) {
-            return std::unique_ptr<WireHelper>(new WireHelperProxy(wireTraceDir));
-        } else {
-            return std::unique_ptr<WireHelper>(new WireHelperDirect());
+class WireServerTraceLayer : public dawn::wire::CommandHandler {
+  public:
+    WireServerTraceLayer(const char* dir, dawn::wire::CommandHandler* handler)
+        : dawn::wire::CommandHandler(), mDir(dir), mHandler(handler) {
+        const char* sep = GetPathSeparator();
+        if (mDir.size() > 0 && mDir.back() != *sep) {
+            mDir += sep;
         }
     }
 
-    WireHelper::~WireHelper() {
-        dawnProcSetProcs(nullptr);
+    void BeginWireTrace(const char* name) {
+        std::string filename = name;
+        // Replace slashes in gtest names with underscores so everything is in one
+        // directory.
+        std::replace(filename.begin(), filename.end(), '/', '_');
+        std::replace(filename.begin(), filename.end(), '\\', '_');
+
+        // Prepend the filename with the directory.
+        filename = mDir + filename;
+
+        ASSERT(!mFile.is_open());
+        mFile.open(filename, std::ios_base::out | std::ios_base::binary | std::ios_base::trunc);
+
+        // Write the initial 8 bytes. This means the fuzzer should never inject an
+        // error.
+        const uint64_t injectedErrorIndex = 0xFFFF'FFFF'FFFF'FFFF;
+        mFile.write(reinterpret_cast<const char*>(&injectedErrorIndex), sizeof(injectedErrorIndex));
     }
 
+    const volatile char* HandleCommands(const volatile char* commands, size_t size) override {
+        if (mFile.is_open()) {
+            mFile.write(const_cast<const char*>(commands), size);
+        }
+        return mHandler->HandleCommands(commands, size);
+    }
+
+  private:
+    std::string mDir;
+    dawn::wire::CommandHandler* mHandler;
+    std::ofstream mFile;
+};
+
+class WireHelperDirect : public WireHelper {
+  public:
+    WireHelperDirect() { dawnProcSetProcs(&dawn::native::GetProcs()); }
+
+    std::pair<wgpu::Device, WGPUDevice> RegisterDevice(WGPUDevice backendDevice) override {
+        ASSERT(backendDevice != nullptr);
+        return std::make_pair(wgpu::Device::Acquire(backendDevice), backendDevice);
+    }
+
+    void BeginWireTrace(const char* name) override {}
+
+    bool FlushClient() override { return true; }
+
+    bool FlushServer() override { return true; }
+};
+
+class WireHelperProxy : public WireHelper {
+  public:
+    explicit WireHelperProxy(const char* wireTraceDir) {
+        mC2sBuf = std::make_unique<utils::TerribleCommandBuffer>();
+        mS2cBuf = std::make_unique<utils::TerribleCommandBuffer>();
+
+        dawn::wire::WireServerDescriptor serverDesc = {};
+        serverDesc.procs = &dawn::native::GetProcs();
+        serverDesc.serializer = mS2cBuf.get();
+
+        mWireServer.reset(new dawn::wire::WireServer(serverDesc));
+        mC2sBuf->SetHandler(mWireServer.get());
+
+        if (wireTraceDir != nullptr && strlen(wireTraceDir) > 0) {
+            mWireServerTraceLayer.reset(new WireServerTraceLayer(wireTraceDir, mWireServer.get()));
+            mC2sBuf->SetHandler(mWireServerTraceLayer.get());
+        }
+
+        dawn::wire::WireClientDescriptor clientDesc = {};
+        clientDesc.serializer = mC2sBuf.get();
+
+        mWireClient.reset(new dawn::wire::WireClient(clientDesc));
+        mS2cBuf->SetHandler(mWireClient.get());
+        dawnProcSetProcs(&dawn::wire::client::GetProcs());
+    }
+
+    std::pair<wgpu::Device, WGPUDevice> RegisterDevice(WGPUDevice backendDevice) override {
+        ASSERT(backendDevice != nullptr);
+
+        auto reservation = mWireClient->ReserveDevice();
+        mWireServer->InjectDevice(backendDevice, reservation.id, reservation.generation);
+        dawn::native::GetProcs().deviceRelease(backendDevice);
+
+        return std::make_pair(wgpu::Device::Acquire(reservation.device), backendDevice);
+    }
+
+    void BeginWireTrace(const char* name) override {
+        if (mWireServerTraceLayer) {
+            return mWireServerTraceLayer->BeginWireTrace(name);
+        }
+    }
+
+    bool FlushClient() override { return mC2sBuf->Flush(); }
+
+    bool FlushServer() override { return mS2cBuf->Flush(); }
+
+  private:
+    std::unique_ptr<utils::TerribleCommandBuffer> mC2sBuf;
+    std::unique_ptr<utils::TerribleCommandBuffer> mS2cBuf;
+    std::unique_ptr<WireServerTraceLayer> mWireServerTraceLayer;
+    std::unique_ptr<dawn::wire::WireServer> mWireServer;
+    std::unique_ptr<dawn::wire::WireClient> mWireClient;
+};
+
+}  // anonymous namespace
+
+std::unique_ptr<WireHelper> CreateWireHelper(bool useWire, const char* wireTraceDir) {
+    if (useWire) {
+        return std::unique_ptr<WireHelper>(new WireHelperProxy(wireTraceDir));
+    } else {
+        return std::unique_ptr<WireHelper>(new WireHelperDirect());
+    }
+}
+
+WireHelper::~WireHelper() {
+    dawnProcSetProcs(nullptr);
+}
+
 }  // namespace utils
diff --git a/src/dawn/utils/WireHelper.h b/src/dawn/utils/WireHelper.h
index 3bc709d..73c45ad 100644
--- a/src/dawn/utils/WireHelper.h
+++ b/src/dawn/utils/WireHelper.h
@@ -23,22 +23,22 @@
 
 namespace utils {
 
-    class WireHelper {
-      public:
-        virtual ~WireHelper();
+class WireHelper {
+  public:
+    virtual ~WireHelper();
 
-        // Registers the device on the wire, if present.
-        // Returns a pair of the client device and backend device.
-        // The function should take ownership of |backendDevice|.
-        virtual std::pair<wgpu::Device, WGPUDevice> RegisterDevice(WGPUDevice backendDevice) = 0;
+    // Registers the device on the wire, if present.
+    // Returns a pair of the client device and backend device.
+    // The function should take ownership of |backendDevice|.
+    virtual std::pair<wgpu::Device, WGPUDevice> RegisterDevice(WGPUDevice backendDevice) = 0;
 
-        virtual void BeginWireTrace(const char* name) = 0;
+    virtual void BeginWireTrace(const char* name) = 0;
 
-        virtual bool FlushClient() = 0;
-        virtual bool FlushServer() = 0;
-    };
+    virtual bool FlushClient() = 0;
+    virtual bool FlushServer() = 0;
+};
 
-    std::unique_ptr<WireHelper> CreateWireHelper(bool useWire, const char* wireTraceDir = nullptr);
+std::unique_ptr<WireHelper> CreateWireHelper(bool useWire, const char* wireTraceDir = nullptr);
 
 }  // namespace utils
 
diff --git a/src/dawn/wire/BufferConsumer.h b/src/dawn/wire/BufferConsumer.h
index 34fe0d9..1ae8451 100644
--- a/src/dawn/wire/BufferConsumer.h
+++ b/src/dawn/wire/BufferConsumer.h
@@ -21,64 +21,59 @@
 
 namespace dawn::wire {
 
-    // BufferConsumer is a utility class that allows reading bytes from a buffer
-    // while simultaneously decrementing the amount of remaining space by exactly
-    // the amount read. It helps prevent bugs where incrementing a pointer and
-    // decrementing a size value are not kept in sync.
-    // BufferConsumer also contains bounds checks to prevent reading out-of-bounds.
-    template <typename BufferT>
-    class BufferConsumer {
-        static_assert(sizeof(BufferT) == 1,
-                      "BufferT must be 1-byte, but may have const/volatile qualifiers.");
+// BufferConsumer is a utility class that allows reading bytes from a buffer
+// while simultaneously decrementing the amount of remaining space by exactly
+// the amount read. It helps prevent bugs where incrementing a pointer and
+// decrementing a size value are not kept in sync.
+// BufferConsumer also contains bounds checks to prevent reading out-of-bounds.
+template <typename BufferT>
+class BufferConsumer {
+    static_assert(sizeof(BufferT) == 1,
+                  "BufferT must be 1-byte, but may have const/volatile qualifiers.");
 
-      public:
-        BufferConsumer(BufferT* buffer, size_t size) : mBuffer(buffer), mSize(size) {
-        }
+  public:
+    BufferConsumer(BufferT* buffer, size_t size) : mBuffer(buffer), mSize(size) {}
 
-        BufferT* Buffer() const {
-            return mBuffer;
-        }
-        size_t AvailableSize() const {
-            return mSize;
-        }
+    BufferT* Buffer() const { return mBuffer; }
+    size_t AvailableSize() const { return mSize; }
 
-      protected:
-        template <typename T, typename N>
-        WireResult NextN(N count, T** data);
+  protected:
+    template <typename T, typename N>
+    WireResult NextN(N count, T** data);
 
-        template <typename T>
-        WireResult Next(T** data);
+    template <typename T>
+    WireResult Next(T** data);
 
-        template <typename T>
-        WireResult Peek(T** data);
+    template <typename T>
+    WireResult Peek(T** data);
 
-      private:
-        BufferT* mBuffer;
-        size_t mSize;
-    };
+  private:
+    BufferT* mBuffer;
+    size_t mSize;
+};
 
-    class SerializeBuffer : public BufferConsumer<char> {
-      public:
-        using BufferConsumer::BufferConsumer;
-        using BufferConsumer::Next;
-        using BufferConsumer::NextN;
-    };
+class SerializeBuffer : public BufferConsumer<char> {
+  public:
+    using BufferConsumer::BufferConsumer;
+    using BufferConsumer::Next;
+    using BufferConsumer::NextN;
+};
 
-    class DeserializeBuffer : public BufferConsumer<const volatile char> {
-      public:
-        using BufferConsumer::BufferConsumer;
-        using BufferConsumer::Peek;
+class DeserializeBuffer : public BufferConsumer<const volatile char> {
+  public:
+    using BufferConsumer::BufferConsumer;
+    using BufferConsumer::Peek;
 
-        template <typename T, typename N>
-        WireResult ReadN(N count, const volatile T** data) {
-            return NextN(count, data);
-        }
+    template <typename T, typename N>
+    WireResult ReadN(N count, const volatile T** data) {
+        return NextN(count, data);
+    }
 
-        template <typename T>
-        WireResult Read(const volatile T** data) {
-            return Next(data);
-        }
-    };
+    template <typename T>
+    WireResult Read(const volatile T** data) {
+        return Next(data);
+    }
+};
 
 }  // namespace dawn::wire
 
diff --git a/src/dawn/wire/BufferConsumer_impl.h b/src/dawn/wire/BufferConsumer_impl.h
index 11c771b..6b5d0a1 100644
--- a/src/dawn/wire/BufferConsumer_impl.h
+++ b/src/dawn/wire/BufferConsumer_impl.h
@@ -22,52 +22,52 @@
 
 namespace dawn::wire {
 
-    template <typename BufferT>
-    template <typename T>
-    WireResult BufferConsumer<BufferT>::Peek(T** data) {
-        if (sizeof(T) > mSize) {
-            return WireResult::FatalError;
-        }
-
-        *data = reinterpret_cast<T*>(mBuffer);
-        return WireResult::Success;
+template <typename BufferT>
+template <typename T>
+WireResult BufferConsumer<BufferT>::Peek(T** data) {
+    if (sizeof(T) > mSize) {
+        return WireResult::FatalError;
     }
 
-    template <typename BufferT>
-    template <typename T>
-    WireResult BufferConsumer<BufferT>::Next(T** data) {
-        if (sizeof(T) > mSize) {
-            return WireResult::FatalError;
-        }
+    *data = reinterpret_cast<T*>(mBuffer);
+    return WireResult::Success;
+}
 
-        *data = reinterpret_cast<T*>(mBuffer);
-        mBuffer += sizeof(T);
-        mSize -= sizeof(T);
-        return WireResult::Success;
+template <typename BufferT>
+template <typename T>
+WireResult BufferConsumer<BufferT>::Next(T** data) {
+    if (sizeof(T) > mSize) {
+        return WireResult::FatalError;
     }
 
-    template <typename BufferT>
-    template <typename T, typename N>
-    WireResult BufferConsumer<BufferT>::NextN(N count, T** data) {
-        static_assert(std::is_unsigned<N>::value, "|count| argument of NextN must be unsigned.");
+    *data = reinterpret_cast<T*>(mBuffer);
+    mBuffer += sizeof(T);
+    mSize -= sizeof(T);
+    return WireResult::Success;
+}
 
-        constexpr size_t kMaxCountWithoutOverflows = std::numeric_limits<size_t>::max() / sizeof(T);
-        if (count > kMaxCountWithoutOverflows) {
-            return WireResult::FatalError;
-        }
+template <typename BufferT>
+template <typename T, typename N>
+WireResult BufferConsumer<BufferT>::NextN(N count, T** data) {
+    static_assert(std::is_unsigned<N>::value, "|count| argument of NextN must be unsigned.");
 
-        // Cannot overflow because |count| is not greater than |kMaxCountWithoutOverflows|.
-        size_t totalSize = sizeof(T) * count;
-        if (totalSize > mSize) {
-            return WireResult::FatalError;
-        }
-
-        *data = reinterpret_cast<T*>(mBuffer);
-        mBuffer += totalSize;
-        mSize -= totalSize;
-        return WireResult::Success;
+    constexpr size_t kMaxCountWithoutOverflows = std::numeric_limits<size_t>::max() / sizeof(T);
+    if (count > kMaxCountWithoutOverflows) {
+        return WireResult::FatalError;
     }
 
+    // Cannot overflow because |count| is not greater than |kMaxCountWithoutOverflows|.
+    size_t totalSize = sizeof(T) * count;
+    if (totalSize > mSize) {
+        return WireResult::FatalError;
+    }
+
+    *data = reinterpret_cast<T*>(mBuffer);
+    mBuffer += totalSize;
+    mSize -= totalSize;
+    return WireResult::Success;
+}
+
 }  // namespace dawn::wire
 
 #endif  // SRC_DAWN_WIRE_BUFFERCONSUMER_IMPL_H_
diff --git a/src/dawn/wire/ChunkedCommandHandler.cpp b/src/dawn/wire/ChunkedCommandHandler.cpp
index a176f26..9c3dc0e 100644
--- a/src/dawn/wire/ChunkedCommandHandler.cpp
+++ b/src/dawn/wire/ChunkedCommandHandler.cpp
@@ -22,59 +22,58 @@
 
 namespace dawn::wire {
 
-    ChunkedCommandHandler::~ChunkedCommandHandler() = default;
+ChunkedCommandHandler::~ChunkedCommandHandler() = default;
 
-    const volatile char* ChunkedCommandHandler::HandleCommands(const volatile char* commands,
-                                                               size_t size) {
-        if (mChunkedCommandRemainingSize > 0) {
-            // If there is a chunked command in flight, append the command data.
-            // We append at most |mChunkedCommandRemainingSize| which is enough to finish the
-            // in-flight chunked command, and then pass the rest along to a second call to
-            // |HandleCommandsImpl|.
-            size_t chunkSize = std::min(size, mChunkedCommandRemainingSize);
+const volatile char* ChunkedCommandHandler::HandleCommands(const volatile char* commands,
+                                                           size_t size) {
+    if (mChunkedCommandRemainingSize > 0) {
+        // If there is a chunked command in flight, append the command data.
+        // We append at most |mChunkedCommandRemainingSize| which is enough to finish the
+        // in-flight chunked command, and then pass the rest along to a second call to
+        // |HandleCommandsImpl|.
+        size_t chunkSize = std::min(size, mChunkedCommandRemainingSize);
 
-            memcpy(mChunkedCommandData.get() + mChunkedCommandPutOffset,
-                   const_cast<const char*>(commands), chunkSize);
-            mChunkedCommandPutOffset += chunkSize;
-            mChunkedCommandRemainingSize -= chunkSize;
+        memcpy(mChunkedCommandData.get() + mChunkedCommandPutOffset,
+               const_cast<const char*>(commands), chunkSize);
+        mChunkedCommandPutOffset += chunkSize;
+        mChunkedCommandRemainingSize -= chunkSize;
 
-            commands += chunkSize;
-            size -= chunkSize;
+        commands += chunkSize;
+        size -= chunkSize;
 
-            if (mChunkedCommandRemainingSize == 0) {
-                // Once the chunked command is complete, pass the data to the command handler
-                // implemenation.
-                auto chunkedCommandData = std::move(mChunkedCommandData);
-                if (HandleCommandsImpl(chunkedCommandData.get(), mChunkedCommandPutOffset) ==
-                    nullptr) {
-                    // |HandleCommandsImpl| returns nullptr on error. Forward any errors
-                    // out.
-                    return nullptr;
-                }
+        if (mChunkedCommandRemainingSize == 0) {
+            // Once the chunked command is complete, pass the data to the command handler
+            // implemenation.
+            auto chunkedCommandData = std::move(mChunkedCommandData);
+            if (HandleCommandsImpl(chunkedCommandData.get(), mChunkedCommandPutOffset) == nullptr) {
+                // |HandleCommandsImpl| returns nullptr on error. Forward any errors
+                // out.
+                return nullptr;
             }
         }
-
-        return HandleCommandsImpl(commands, size);
     }
 
-    ChunkedCommandHandler::ChunkedCommandsResult ChunkedCommandHandler::BeginChunkedCommandData(
-        const volatile char* commands,
-        size_t commandSize,
-        size_t initialSize) {
-        ASSERT(!mChunkedCommandData);
+    return HandleCommandsImpl(commands, size);
+}
 
-        // Reserve space for all the command data we're expecting, and copy the initial data
-        // to the start of the memory.
-        mChunkedCommandData.reset(AllocNoThrow<char>(commandSize));
-        if (!mChunkedCommandData) {
-            return ChunkedCommandsResult::Error;
-        }
+ChunkedCommandHandler::ChunkedCommandsResult ChunkedCommandHandler::BeginChunkedCommandData(
+    const volatile char* commands,
+    size_t commandSize,
+    size_t initialSize) {
+    ASSERT(!mChunkedCommandData);
 
-        memcpy(mChunkedCommandData.get(), const_cast<const char*>(commands), initialSize);
-        mChunkedCommandPutOffset = initialSize;
-        mChunkedCommandRemainingSize = commandSize - initialSize;
-
-        return ChunkedCommandsResult::Consumed;
+    // Reserve space for all the command data we're expecting, and copy the initial data
+    // to the start of the memory.
+    mChunkedCommandData.reset(AllocNoThrow<char>(commandSize));
+    if (!mChunkedCommandData) {
+        return ChunkedCommandsResult::Error;
     }
 
+    memcpy(mChunkedCommandData.get(), const_cast<const char*>(commands), initialSize);
+    mChunkedCommandPutOffset = initialSize;
+    mChunkedCommandRemainingSize = commandSize - initialSize;
+
+    return ChunkedCommandsResult::Consumed;
+}
+
 }  // namespace dawn::wire
diff --git a/src/dawn/wire/ChunkedCommandHandler.h b/src/dawn/wire/ChunkedCommandHandler.h
index 03577cd..c84c4ef 100644
--- a/src/dawn/wire/ChunkedCommandHandler.h
+++ b/src/dawn/wire/ChunkedCommandHandler.h
@@ -25,48 +25,46 @@
 
 namespace dawn::wire {
 
-    class ChunkedCommandHandler : public CommandHandler {
-      public:
-        const volatile char* HandleCommands(const volatile char* commands, size_t size) override;
-        ~ChunkedCommandHandler() override;
+class ChunkedCommandHandler : public CommandHandler {
+  public:
+    const volatile char* HandleCommands(const volatile char* commands, size_t size) override;
+    ~ChunkedCommandHandler() override;
 
-      protected:
-        enum class ChunkedCommandsResult {
-            Passthrough,
-            Consumed,
-            Error,
-        };
-
-        // Returns |true| if the commands were entirely consumed into the chunked command vector
-        // and should be handled later once we receive all the command data.
-        // Returns |false| if commands should be handled now immediately.
-        ChunkedCommandsResult HandleChunkedCommands(const volatile char* commands, size_t size) {
-            uint64_t commandSize64 =
-                reinterpret_cast<const volatile CmdHeader*>(commands)->commandSize;
-
-            if (commandSize64 > std::numeric_limits<size_t>::max()) {
-                return ChunkedCommandsResult::Error;
-            }
-            size_t commandSize = static_cast<size_t>(commandSize64);
-            if (size < commandSize) {
-                return BeginChunkedCommandData(commands, commandSize, size);
-            }
-            return ChunkedCommandsResult::Passthrough;
-        }
-
-      private:
-        virtual const volatile char* HandleCommandsImpl(const volatile char* commands,
-                                                        size_t size) = 0;
-
-        ChunkedCommandsResult BeginChunkedCommandData(const volatile char* commands,
-                                                      size_t commandSize,
-                                                      size_t initialSize);
-
-        size_t mChunkedCommandRemainingSize = 0;
-        size_t mChunkedCommandPutOffset = 0;
-        std::unique_ptr<char[]> mChunkedCommandData;
+  protected:
+    enum class ChunkedCommandsResult {
+        Passthrough,
+        Consumed,
+        Error,
     };
 
+    // Returns |true| if the commands were entirely consumed into the chunked command vector
+    // and should be handled later once we receive all the command data.
+    // Returns |false| if commands should be handled now immediately.
+    ChunkedCommandsResult HandleChunkedCommands(const volatile char* commands, size_t size) {
+        uint64_t commandSize64 = reinterpret_cast<const volatile CmdHeader*>(commands)->commandSize;
+
+        if (commandSize64 > std::numeric_limits<size_t>::max()) {
+            return ChunkedCommandsResult::Error;
+        }
+        size_t commandSize = static_cast<size_t>(commandSize64);
+        if (size < commandSize) {
+            return BeginChunkedCommandData(commands, commandSize, size);
+        }
+        return ChunkedCommandsResult::Passthrough;
+    }
+
+  private:
+    virtual const volatile char* HandleCommandsImpl(const volatile char* commands, size_t size) = 0;
+
+    ChunkedCommandsResult BeginChunkedCommandData(const volatile char* commands,
+                                                  size_t commandSize,
+                                                  size_t initialSize);
+
+    size_t mChunkedCommandRemainingSize = 0;
+    size_t mChunkedCommandPutOffset = 0;
+    std::unique_ptr<char[]> mChunkedCommandData;
+};
+
 }  // namespace dawn::wire
 
 #endif  // SRC_DAWN_WIRE_CHUNKEDCOMMANDHANDLER_H_
diff --git a/src/dawn/wire/ChunkedCommandSerializer.cpp b/src/dawn/wire/ChunkedCommandSerializer.cpp
index b2e4a56..380fae1 100644
--- a/src/dawn/wire/ChunkedCommandSerializer.cpp
+++ b/src/dawn/wire/ChunkedCommandSerializer.cpp
@@ -16,23 +16,22 @@
 
 namespace dawn::wire {
 
-    ChunkedCommandSerializer::ChunkedCommandSerializer(CommandSerializer* serializer)
-        : mSerializer(serializer), mMaxAllocationSize(serializer->GetMaximumAllocationSize()) {
-    }
+ChunkedCommandSerializer::ChunkedCommandSerializer(CommandSerializer* serializer)
+    : mSerializer(serializer), mMaxAllocationSize(serializer->GetMaximumAllocationSize()) {}
 
-    void ChunkedCommandSerializer::SerializeChunkedCommand(const char* allocatedBuffer,
-                                                           size_t remainingSize) {
-        while (remainingSize > 0) {
-            size_t chunkSize = std::min(remainingSize, mMaxAllocationSize);
-            void* dst = mSerializer->GetCmdSpace(chunkSize);
-            if (dst == nullptr) {
-                return;
-            }
-            memcpy(dst, allocatedBuffer, chunkSize);
-
-            allocatedBuffer += chunkSize;
-            remainingSize -= chunkSize;
+void ChunkedCommandSerializer::SerializeChunkedCommand(const char* allocatedBuffer,
+                                                       size_t remainingSize) {
+    while (remainingSize > 0) {
+        size_t chunkSize = std::min(remainingSize, mMaxAllocationSize);
+        void* dst = mSerializer->GetCmdSpace(chunkSize);
+        if (dst == nullptr) {
+            return;
         }
+        memcpy(dst, allocatedBuffer, chunkSize);
+
+        allocatedBuffer += chunkSize;
+        remainingSize -= chunkSize;
     }
+}
 
 }  // namespace dawn::wire
diff --git a/src/dawn/wire/ChunkedCommandSerializer.h b/src/dawn/wire/ChunkedCommandSerializer.h
index 54d5f11..7ac72e5 100644
--- a/src/dawn/wire/ChunkedCommandSerializer.h
+++ b/src/dawn/wire/ChunkedCommandSerializer.h
@@ -27,88 +27,88 @@
 
 namespace dawn::wire {
 
-    class ChunkedCommandSerializer {
-      public:
-        explicit ChunkedCommandSerializer(CommandSerializer* serializer);
+class ChunkedCommandSerializer {
+  public:
+    explicit ChunkedCommandSerializer(CommandSerializer* serializer);
 
-        template <typename Cmd>
-        void SerializeCommand(const Cmd& cmd) {
-            SerializeCommand(cmd, 0, [](SerializeBuffer*) { return WireResult::Success; });
-        }
+    template <typename Cmd>
+    void SerializeCommand(const Cmd& cmd) {
+        SerializeCommand(cmd, 0, [](SerializeBuffer*) { return WireResult::Success; });
+    }
 
-        template <typename Cmd, typename ExtraSizeSerializeFn>
-        void SerializeCommand(const Cmd& cmd,
+    template <typename Cmd, typename ExtraSizeSerializeFn>
+    void SerializeCommand(const Cmd& cmd,
+                          size_t extraSize,
+                          ExtraSizeSerializeFn&& SerializeExtraSize) {
+        SerializeCommandImpl(
+            cmd,
+            [](const Cmd& cmd, size_t requiredSize, SerializeBuffer* serializeBuffer) {
+                return cmd.Serialize(requiredSize, serializeBuffer);
+            },
+            extraSize, std::forward<ExtraSizeSerializeFn>(SerializeExtraSize));
+    }
+
+    template <typename Cmd>
+    void SerializeCommand(const Cmd& cmd, const ObjectIdProvider& objectIdProvider) {
+        SerializeCommand(cmd, objectIdProvider, 0,
+                         [](SerializeBuffer*) { return WireResult::Success; });
+    }
+
+    template <typename Cmd, typename ExtraSizeSerializeFn>
+    void SerializeCommand(const Cmd& cmd,
+                          const ObjectIdProvider& objectIdProvider,
+                          size_t extraSize,
+                          ExtraSizeSerializeFn&& SerializeExtraSize) {
+        SerializeCommandImpl(
+            cmd,
+            [&objectIdProvider](const Cmd& cmd, size_t requiredSize,
+                                SerializeBuffer* serializeBuffer) {
+                return cmd.Serialize(requiredSize, serializeBuffer, objectIdProvider);
+            },
+            extraSize, std::forward<ExtraSizeSerializeFn>(SerializeExtraSize));
+    }
+
+  private:
+    template <typename Cmd, typename SerializeCmdFn, typename ExtraSizeSerializeFn>
+    void SerializeCommandImpl(const Cmd& cmd,
+                              SerializeCmdFn&& SerializeCmd,
                               size_t extraSize,
                               ExtraSizeSerializeFn&& SerializeExtraSize) {
-            SerializeCommandImpl(
-                cmd,
-                [](const Cmd& cmd, size_t requiredSize, SerializeBuffer* serializeBuffer) {
-                    return cmd.Serialize(requiredSize, serializeBuffer);
-                },
-                extraSize, std::forward<ExtraSizeSerializeFn>(SerializeExtraSize));
-        }
+        size_t commandSize = cmd.GetRequiredSize();
+        size_t requiredSize = commandSize + extraSize;
 
-        template <typename Cmd>
-        void SerializeCommand(const Cmd& cmd, const ObjectIdProvider& objectIdProvider) {
-            SerializeCommand(cmd, objectIdProvider, 0,
-                             [](SerializeBuffer*) { return WireResult::Success; });
-        }
-
-        template <typename Cmd, typename ExtraSizeSerializeFn>
-        void SerializeCommand(const Cmd& cmd,
-                              const ObjectIdProvider& objectIdProvider,
-                              size_t extraSize,
-                              ExtraSizeSerializeFn&& SerializeExtraSize) {
-            SerializeCommandImpl(
-                cmd,
-                [&objectIdProvider](const Cmd& cmd, size_t requiredSize,
-                                    SerializeBuffer* serializeBuffer) {
-                    return cmd.Serialize(requiredSize, serializeBuffer, objectIdProvider);
-                },
-                extraSize, std::forward<ExtraSizeSerializeFn>(SerializeExtraSize));
-        }
-
-      private:
-        template <typename Cmd, typename SerializeCmdFn, typename ExtraSizeSerializeFn>
-        void SerializeCommandImpl(const Cmd& cmd,
-                                  SerializeCmdFn&& SerializeCmd,
-                                  size_t extraSize,
-                                  ExtraSizeSerializeFn&& SerializeExtraSize) {
-            size_t commandSize = cmd.GetRequiredSize();
-            size_t requiredSize = commandSize + extraSize;
-
-            if (requiredSize <= mMaxAllocationSize) {
-                char* allocatedBuffer = static_cast<char*>(mSerializer->GetCmdSpace(requiredSize));
-                if (allocatedBuffer != nullptr) {
-                    SerializeBuffer serializeBuffer(allocatedBuffer, requiredSize);
-                    WireResult r1 = SerializeCmd(cmd, requiredSize, &serializeBuffer);
-                    WireResult r2 = SerializeExtraSize(&serializeBuffer);
-                    if (DAWN_UNLIKELY(r1 != WireResult::Success || r2 != WireResult::Success)) {
-                        mSerializer->OnSerializeError();
-                    }
+        if (requiredSize <= mMaxAllocationSize) {
+            char* allocatedBuffer = static_cast<char*>(mSerializer->GetCmdSpace(requiredSize));
+            if (allocatedBuffer != nullptr) {
+                SerializeBuffer serializeBuffer(allocatedBuffer, requiredSize);
+                WireResult r1 = SerializeCmd(cmd, requiredSize, &serializeBuffer);
+                WireResult r2 = SerializeExtraSize(&serializeBuffer);
+                if (DAWN_UNLIKELY(r1 != WireResult::Success || r2 != WireResult::Success)) {
+                    mSerializer->OnSerializeError();
                 }
-                return;
             }
-
-            auto cmdSpace = std::unique_ptr<char[]>(AllocNoThrow<char>(requiredSize));
-            if (!cmdSpace) {
-                return;
-            }
-            SerializeBuffer serializeBuffer(cmdSpace.get(), requiredSize);
-            WireResult r1 = SerializeCmd(cmd, requiredSize, &serializeBuffer);
-            WireResult r2 = SerializeExtraSize(&serializeBuffer);
-            if (DAWN_UNLIKELY(r1 != WireResult::Success || r2 != WireResult::Success)) {
-                mSerializer->OnSerializeError();
-                return;
-            }
-            SerializeChunkedCommand(cmdSpace.get(), requiredSize);
+            return;
         }
 
-        void SerializeChunkedCommand(const char* allocatedBuffer, size_t remainingSize);
+        auto cmdSpace = std::unique_ptr<char[]>(AllocNoThrow<char>(requiredSize));
+        if (!cmdSpace) {
+            return;
+        }
+        SerializeBuffer serializeBuffer(cmdSpace.get(), requiredSize);
+        WireResult r1 = SerializeCmd(cmd, requiredSize, &serializeBuffer);
+        WireResult r2 = SerializeExtraSize(&serializeBuffer);
+        if (DAWN_UNLIKELY(r1 != WireResult::Success || r2 != WireResult::Success)) {
+            mSerializer->OnSerializeError();
+            return;
+        }
+        SerializeChunkedCommand(cmdSpace.get(), requiredSize);
+    }
 
-        CommandSerializer* mSerializer;
-        size_t mMaxAllocationSize;
-    };
+    void SerializeChunkedCommand(const char* allocatedBuffer, size_t remainingSize);
+
+    CommandSerializer* mSerializer;
+    size_t mMaxAllocationSize;
+};
 
 }  // namespace dawn::wire
 
diff --git a/src/dawn/wire/SupportedFeatures.cpp b/src/dawn/wire/SupportedFeatures.cpp
index 2d5a9f8..8dd6d58 100644
--- a/src/dawn/wire/SupportedFeatures.cpp
+++ b/src/dawn/wire/SupportedFeatures.cpp
@@ -16,34 +16,34 @@
 
 namespace dawn::wire {
 
-    // Note: Upon updating this list, please also update serialization/deserialization
-    // of limit structs on Adapter/Device initialization.
-    bool IsFeatureSupported(WGPUFeatureName feature) {
-        switch (feature) {
-            case WGPUFeatureName_Undefined:
-            case WGPUFeatureName_Force32:
-            case WGPUFeatureName_DawnNative:
-                return false;
-            case WGPUFeatureName_Depth24UnormStencil8:
-            case WGPUFeatureName_Depth32FloatStencil8:
-            case WGPUFeatureName_TimestampQuery:
-            case WGPUFeatureName_PipelineStatisticsQuery:
-            case WGPUFeatureName_TextureCompressionBC:
-            case WGPUFeatureName_TextureCompressionETC2:
-            case WGPUFeatureName_TextureCompressionASTC:
-            case WGPUFeatureName_IndirectFirstInstance:
-            case WGPUFeatureName_DepthClamping:
-            case WGPUFeatureName_DawnShaderFloat16:
-            case WGPUFeatureName_DawnInternalUsages:
-            case WGPUFeatureName_DawnMultiPlanarFormats:
-                return true;
-        }
-
-        // Catch-all, for unsupported features.
-        // "default:" is not used so we get compiler errors for
-        // newly added, unhandled features, but still catch completely
-        // unknown enums.
-        return false;
+// Note: Upon updating this list, please also update serialization/deserialization
+// of limit structs on Adapter/Device initialization.
+bool IsFeatureSupported(WGPUFeatureName feature) {
+    switch (feature) {
+        case WGPUFeatureName_Undefined:
+        case WGPUFeatureName_Force32:
+        case WGPUFeatureName_DawnNative:
+            return false;
+        case WGPUFeatureName_Depth24UnormStencil8:
+        case WGPUFeatureName_Depth32FloatStencil8:
+        case WGPUFeatureName_TimestampQuery:
+        case WGPUFeatureName_PipelineStatisticsQuery:
+        case WGPUFeatureName_TextureCompressionBC:
+        case WGPUFeatureName_TextureCompressionETC2:
+        case WGPUFeatureName_TextureCompressionASTC:
+        case WGPUFeatureName_IndirectFirstInstance:
+        case WGPUFeatureName_DepthClamping:
+        case WGPUFeatureName_DawnShaderFloat16:
+        case WGPUFeatureName_DawnInternalUsages:
+        case WGPUFeatureName_DawnMultiPlanarFormats:
+            return true;
     }
 
+    // Catch-all, for unsupported features.
+    // "default:" is not used so we get compiler errors for
+    // newly added, unhandled features, but still catch completely
+    // unknown enums.
+    return false;
+}
+
 }  // namespace dawn::wire
diff --git a/src/dawn/wire/SupportedFeatures.h b/src/dawn/wire/SupportedFeatures.h
index 602d291..26a82d9 100644
--- a/src/dawn/wire/SupportedFeatures.h
+++ b/src/dawn/wire/SupportedFeatures.h
@@ -19,7 +19,7 @@
 
 namespace dawn::wire {
 
-    bool IsFeatureSupported(WGPUFeatureName feature);
+bool IsFeatureSupported(WGPUFeatureName feature);
 
 }  // namespace dawn::wire
 
diff --git a/src/dawn/wire/Wire.cpp b/src/dawn/wire/Wire.cpp
index af3e6be..d752851 100644
--- a/src/dawn/wire/Wire.cpp
+++ b/src/dawn/wire/Wire.cpp
@@ -16,13 +16,12 @@
 
 namespace dawn::wire {
 
-    CommandSerializer::CommandSerializer() = default;
-    CommandSerializer::~CommandSerializer() = default;
+CommandSerializer::CommandSerializer() = default;
+CommandSerializer::~CommandSerializer() = default;
 
-    void CommandSerializer::OnSerializeError() {
-    }
+void CommandSerializer::OnSerializeError() {}
 
-    CommandHandler::CommandHandler() = default;
-    CommandHandler::~CommandHandler() = default;
+CommandHandler::CommandHandler() = default;
+CommandHandler::~CommandHandler() = default;
 
 }  // namespace dawn::wire
diff --git a/src/dawn/wire/WireClient.cpp b/src/dawn/wire/WireClient.cpp
index 0446da8..624cc03 100644
--- a/src/dawn/wire/WireClient.cpp
+++ b/src/dawn/wire/WireClient.cpp
@@ -17,66 +17,65 @@
 
 namespace dawn::wire {
 
-    WireClient::WireClient(const WireClientDescriptor& descriptor)
-        : mImpl(new client::Client(descriptor.serializer, descriptor.memoryTransferService)) {
-    }
+WireClient::WireClient(const WireClientDescriptor& descriptor)
+    : mImpl(new client::Client(descriptor.serializer, descriptor.memoryTransferService)) {}
 
-    WireClient::~WireClient() {
-        mImpl.reset();
-    }
+WireClient::~WireClient() {
+    mImpl.reset();
+}
 
-    const volatile char* WireClient::HandleCommands(const volatile char* commands, size_t size) {
-        return mImpl->HandleCommands(commands, size);
-    }
+const volatile char* WireClient::HandleCommands(const volatile char* commands, size_t size) {
+    return mImpl->HandleCommands(commands, size);
+}
 
-    ReservedTexture WireClient::ReserveTexture(WGPUDevice device) {
-        return mImpl->ReserveTexture(device);
-    }
+ReservedTexture WireClient::ReserveTexture(WGPUDevice device) {
+    return mImpl->ReserveTexture(device);
+}
 
-    ReservedSwapChain WireClient::ReserveSwapChain(WGPUDevice device) {
-        return mImpl->ReserveSwapChain(device);
-    }
+ReservedSwapChain WireClient::ReserveSwapChain(WGPUDevice device) {
+    return mImpl->ReserveSwapChain(device);
+}
 
-    ReservedDevice WireClient::ReserveDevice() {
-        return mImpl->ReserveDevice();
-    }
+ReservedDevice WireClient::ReserveDevice() {
+    return mImpl->ReserveDevice();
+}
 
-    ReservedInstance WireClient::ReserveInstance() {
-        return mImpl->ReserveInstance();
-    }
+ReservedInstance WireClient::ReserveInstance() {
+    return mImpl->ReserveInstance();
+}
 
-    void WireClient::ReclaimTextureReservation(const ReservedTexture& reservation) {
-        mImpl->ReclaimTextureReservation(reservation);
-    }
+void WireClient::ReclaimTextureReservation(const ReservedTexture& reservation) {
+    mImpl->ReclaimTextureReservation(reservation);
+}
 
-    void WireClient::ReclaimSwapChainReservation(const ReservedSwapChain& reservation) {
-        mImpl->ReclaimSwapChainReservation(reservation);
-    }
+void WireClient::ReclaimSwapChainReservation(const ReservedSwapChain& reservation) {
+    mImpl->ReclaimSwapChainReservation(reservation);
+}
 
-    void WireClient::ReclaimDeviceReservation(const ReservedDevice& reservation) {
-        mImpl->ReclaimDeviceReservation(reservation);
-    }
+void WireClient::ReclaimDeviceReservation(const ReservedDevice& reservation) {
+    mImpl->ReclaimDeviceReservation(reservation);
+}
 
-    void WireClient::ReclaimInstanceReservation(const ReservedInstance& reservation) {
-        mImpl->ReclaimInstanceReservation(reservation);
-    }
+void WireClient::ReclaimInstanceReservation(const ReservedInstance& reservation) {
+    mImpl->ReclaimInstanceReservation(reservation);
+}
 
-    void WireClient::Disconnect() {
-        mImpl->Disconnect();
-    }
+void WireClient::Disconnect() {
+    mImpl->Disconnect();
+}
 
-    namespace client {
-        MemoryTransferService::MemoryTransferService() = default;
+namespace client {
+MemoryTransferService::MemoryTransferService() = default;
 
-        MemoryTransferService::~MemoryTransferService() = default;
+MemoryTransferService::~MemoryTransferService() = default;
 
-        MemoryTransferService::ReadHandle::ReadHandle() = default;
+MemoryTransferService::ReadHandle::ReadHandle() = default;
 
-        MemoryTransferService::ReadHandle::~ReadHandle() = default;
+MemoryTransferService::ReadHandle::~ReadHandle() = default;
 
-        MemoryTransferService::WriteHandle::WriteHandle() = default;
+MemoryTransferService::WriteHandle::WriteHandle() = default;
 
-        MemoryTransferService::WriteHandle::~WriteHandle() = default;
-    }  // namespace client
+MemoryTransferService::WriteHandle::~WriteHandle() = default;
+}  // namespace client
 
 }  // namespace dawn::wire
diff --git a/src/dawn/wire/WireDeserializeAllocator.cpp b/src/dawn/wire/WireDeserializeAllocator.cpp
index e0a3432..95fb5f0 100644
--- a/src/dawn/wire/WireDeserializeAllocator.cpp
+++ b/src/dawn/wire/WireDeserializeAllocator.cpp
@@ -17,44 +17,44 @@
 #include <algorithm>
 
 namespace dawn::wire {
-    WireDeserializeAllocator::WireDeserializeAllocator() {
-        Reset();
+WireDeserializeAllocator::WireDeserializeAllocator() {
+    Reset();
+}
+
+WireDeserializeAllocator::~WireDeserializeAllocator() {
+    Reset();
+}
+
+void* WireDeserializeAllocator::GetSpace(size_t size) {
+    // Return space in the current buffer if possible first.
+    if (mRemainingSize >= size) {
+        char* buffer = mCurrentBuffer;
+        mCurrentBuffer += size;
+        mRemainingSize -= size;
+        return buffer;
     }
 
-    WireDeserializeAllocator::~WireDeserializeAllocator() {
-        Reset();
+    // Otherwise allocate a new buffer and try again.
+    size_t allocationSize = std::max(size, size_t(2048));
+    char* allocation = static_cast<char*>(malloc(allocationSize));
+    if (allocation == nullptr) {
+        return nullptr;
     }
 
-    void* WireDeserializeAllocator::GetSpace(size_t size) {
-        // Return space in the current buffer if possible first.
-        if (mRemainingSize >= size) {
-            char* buffer = mCurrentBuffer;
-            mCurrentBuffer += size;
-            mRemainingSize -= size;
-            return buffer;
-        }
+    mAllocations.push_back(allocation);
+    mCurrentBuffer = allocation;
+    mRemainingSize = allocationSize;
+    return GetSpace(size);
+}
 
-        // Otherwise allocate a new buffer and try again.
-        size_t allocationSize = std::max(size, size_t(2048));
-        char* allocation = static_cast<char*>(malloc(allocationSize));
-        if (allocation == nullptr) {
-            return nullptr;
-        }
-
-        mAllocations.push_back(allocation);
-        mCurrentBuffer = allocation;
-        mRemainingSize = allocationSize;
-        return GetSpace(size);
+void WireDeserializeAllocator::Reset() {
+    for (auto allocation : mAllocations) {
+        free(allocation);
     }
+    mAllocations.clear();
 
-    void WireDeserializeAllocator::Reset() {
-        for (auto allocation : mAllocations) {
-            free(allocation);
-        }
-        mAllocations.clear();
-
-        // The initial buffer is the inline buffer so that some allocations can be skipped
-        mCurrentBuffer = mStaticBuffer;
-        mRemainingSize = sizeof(mStaticBuffer);
-    }
+    // The initial buffer is the inline buffer so that some allocations can be skipped
+    mCurrentBuffer = mStaticBuffer;
+    mRemainingSize = sizeof(mStaticBuffer);
+}
 }  // namespace dawn::wire
diff --git a/src/dawn/wire/WireDeserializeAllocator.h b/src/dawn/wire/WireDeserializeAllocator.h
index eb24fd3..6293624 100644
--- a/src/dawn/wire/WireDeserializeAllocator.h
+++ b/src/dawn/wire/WireDeserializeAllocator.h
@@ -20,24 +20,24 @@
 #include "dawn/wire/WireCmd_autogen.h"
 
 namespace dawn::wire {
-    // A really really simple implementation of the DeserializeAllocator. It's main feature
-    // is that it has some inline storage so as to avoid allocations for the majority of
-    // commands.
-    class WireDeserializeAllocator : public DeserializeAllocator {
-      public:
-        WireDeserializeAllocator();
-        virtual ~WireDeserializeAllocator();
+// A really really simple implementation of the DeserializeAllocator. It's main feature
+// is that it has some inline storage so as to avoid allocations for the majority of
+// commands.
+class WireDeserializeAllocator : public DeserializeAllocator {
+  public:
+    WireDeserializeAllocator();
+    virtual ~WireDeserializeAllocator();
 
-        void* GetSpace(size_t size) override;
+    void* GetSpace(size_t size) override;
 
-        void Reset();
+    void Reset();
 
-      private:
-        size_t mRemainingSize = 0;
-        char* mCurrentBuffer = nullptr;
-        char mStaticBuffer[2048];
-        std::vector<char*> mAllocations;
-    };
+  private:
+    size_t mRemainingSize = 0;
+    char* mCurrentBuffer = nullptr;
+    char mStaticBuffer[2048];
+    std::vector<char*> mAllocations;
+};
 }  // namespace dawn::wire
 
 #endif  // SRC_DAWN_WIRE_WIREDESERIALIZEALLOCATOR_H_
diff --git a/src/dawn/wire/WireResult.h b/src/dawn/wire/WireResult.h
index aeb1fdf..41f8a56 100644
--- a/src/dawn/wire/WireResult.h
+++ b/src/dawn/wire/WireResult.h
@@ -19,10 +19,10 @@
 
 namespace dawn::wire {
 
-    enum class [[nodiscard]] WireResult{
-        Success,
-        FatalError,
-    };
+enum class [[nodiscard]] WireResult{
+    Success,
+    FatalError,
+};
 
 // Macro to simplify error handling, similar to DAWN_TRY but for WireResult.
 #define WIRE_TRY(EXPR)                                          \
diff --git a/src/dawn/wire/WireServer.cpp b/src/dawn/wire/WireServer.cpp
index bf9b0a1..c806fe0 100644
--- a/src/dawn/wire/WireServer.cpp
+++ b/src/dawn/wire/WireServer.cpp
@@ -17,67 +17,66 @@
 
 namespace dawn::wire {
 
-    WireServer::WireServer(const WireServerDescriptor& descriptor)
-        : mImpl(new server::Server(*descriptor.procs,
-                                   descriptor.serializer,
-                                   descriptor.memoryTransferService)) {
-    }
+WireServer::WireServer(const WireServerDescriptor& descriptor)
+    : mImpl(new server::Server(*descriptor.procs,
+                               descriptor.serializer,
+                               descriptor.memoryTransferService)) {}
 
-    WireServer::~WireServer() {
-        mImpl.reset();
-    }
+WireServer::~WireServer() {
+    mImpl.reset();
+}
 
-    const volatile char* WireServer::HandleCommands(const volatile char* commands, size_t size) {
-        return mImpl->HandleCommands(commands, size);
-    }
+const volatile char* WireServer::HandleCommands(const volatile char* commands, size_t size) {
+    return mImpl->HandleCommands(commands, size);
+}
 
-    bool WireServer::InjectTexture(WGPUTexture texture,
-                                   uint32_t id,
-                                   uint32_t generation,
-                                   uint32_t deviceId,
-                                   uint32_t deviceGeneration) {
-        return mImpl->InjectTexture(texture, id, generation, deviceId, deviceGeneration);
-    }
+bool WireServer::InjectTexture(WGPUTexture texture,
+                               uint32_t id,
+                               uint32_t generation,
+                               uint32_t deviceId,
+                               uint32_t deviceGeneration) {
+    return mImpl->InjectTexture(texture, id, generation, deviceId, deviceGeneration);
+}
 
-    bool WireServer::InjectSwapChain(WGPUSwapChain swapchain,
-                                     uint32_t id,
-                                     uint32_t generation,
-                                     uint32_t deviceId,
-                                     uint32_t deviceGeneration) {
-        return mImpl->InjectSwapChain(swapchain, id, generation, deviceId, deviceGeneration);
-    }
+bool WireServer::InjectSwapChain(WGPUSwapChain swapchain,
+                                 uint32_t id,
+                                 uint32_t generation,
+                                 uint32_t deviceId,
+                                 uint32_t deviceGeneration) {
+    return mImpl->InjectSwapChain(swapchain, id, generation, deviceId, deviceGeneration);
+}
 
-    bool WireServer::InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation) {
-        return mImpl->InjectDevice(device, id, generation);
-    }
+bool WireServer::InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation) {
+    return mImpl->InjectDevice(device, id, generation);
+}
 
-    bool WireServer::InjectInstance(WGPUInstance instance, uint32_t id, uint32_t generation) {
-        return mImpl->InjectInstance(instance, id, generation);
-    }
+bool WireServer::InjectInstance(WGPUInstance instance, uint32_t id, uint32_t generation) {
+    return mImpl->InjectInstance(instance, id, generation);
+}
 
-    WGPUDevice WireServer::GetDevice(uint32_t id, uint32_t generation) {
-        return mImpl->GetDevice(id, generation);
-    }
+WGPUDevice WireServer::GetDevice(uint32_t id, uint32_t generation) {
+    return mImpl->GetDevice(id, generation);
+}
 
-    namespace server {
-        MemoryTransferService::MemoryTransferService() = default;
+namespace server {
+MemoryTransferService::MemoryTransferService() = default;
 
-        MemoryTransferService::~MemoryTransferService() = default;
+MemoryTransferService::~MemoryTransferService() = default;
 
-        MemoryTransferService::ReadHandle::ReadHandle() = default;
+MemoryTransferService::ReadHandle::ReadHandle() = default;
 
-        MemoryTransferService::ReadHandle::~ReadHandle() = default;
+MemoryTransferService::ReadHandle::~ReadHandle() = default;
 
-        MemoryTransferService::WriteHandle::WriteHandle() = default;
+MemoryTransferService::WriteHandle::WriteHandle() = default;
 
-        MemoryTransferService::WriteHandle::~WriteHandle() = default;
+MemoryTransferService::WriteHandle::~WriteHandle() = default;
 
-        void MemoryTransferService::WriteHandle::SetTarget(void* data) {
-            mTargetData = data;
-        }
-        void MemoryTransferService::WriteHandle::SetDataLength(size_t dataLength) {
-            mDataLength = dataLength;
-        }
-    }  // namespace server
+void MemoryTransferService::WriteHandle::SetTarget(void* data) {
+    mTargetData = data;
+}
+void MemoryTransferService::WriteHandle::SetDataLength(size_t dataLength) {
+    mDataLength = dataLength;
+}
+}  // namespace server
 
 }  // namespace dawn::wire
diff --git a/src/dawn/wire/client/Adapter.cpp b/src/dawn/wire/client/Adapter.cpp
index b2dcc87..3f55044 100644
--- a/src/dawn/wire/client/Adapter.cpp
+++ b/src/dawn/wire/client/Adapter.cpp
@@ -19,115 +19,115 @@
 
 namespace dawn::wire::client {
 
-    Adapter::~Adapter() {
-        mRequestDeviceRequests.CloseAll([](RequestDeviceData* request) {
-            request->callback(WGPURequestDeviceStatus_Unknown, nullptr,
-                              "Adapter destroyed before callback", request->userdata);
-        });
+Adapter::~Adapter() {
+    mRequestDeviceRequests.CloseAll([](RequestDeviceData* request) {
+        request->callback(WGPURequestDeviceStatus_Unknown, nullptr,
+                          "Adapter destroyed before callback", request->userdata);
+    });
+}
+
+void Adapter::CancelCallbacksForDisconnect() {
+    mRequestDeviceRequests.CloseAll([](RequestDeviceData* request) {
+        request->callback(WGPURequestDeviceStatus_Unknown, nullptr, "GPU connection lost",
+                          request->userdata);
+    });
+}
+
+bool Adapter::GetLimits(WGPUSupportedLimits* limits) const {
+    return mLimitsAndFeatures.GetLimits(limits);
+}
+
+bool Adapter::HasFeature(WGPUFeatureName feature) const {
+    return mLimitsAndFeatures.HasFeature(feature);
+}
+
+size_t Adapter::EnumerateFeatures(WGPUFeatureName* features) const {
+    return mLimitsAndFeatures.EnumerateFeatures(features);
+}
+
+void Adapter::SetLimits(const WGPUSupportedLimits* limits) {
+    return mLimitsAndFeatures.SetLimits(limits);
+}
+
+void Adapter::SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount) {
+    return mLimitsAndFeatures.SetFeatures(features, featuresCount);
+}
+
+void Adapter::SetProperties(const WGPUAdapterProperties* properties) {
+    mProperties = *properties;
+    mProperties.nextInChain = nullptr;
+}
+
+void Adapter::GetProperties(WGPUAdapterProperties* properties) const {
+    *properties = mProperties;
+}
+
+void Adapter::RequestDevice(const WGPUDeviceDescriptor* descriptor,
+                            WGPURequestDeviceCallback callback,
+                            void* userdata) {
+    if (client->IsDisconnected()) {
+        callback(WGPURequestDeviceStatus_Error, nullptr, "GPU connection lost", userdata);
+        return;
     }
 
-    void Adapter::CancelCallbacksForDisconnect() {
-        mRequestDeviceRequests.CloseAll([](RequestDeviceData* request) {
-            request->callback(WGPURequestDeviceStatus_Unknown, nullptr, "GPU connection lost",
-                              request->userdata);
-        });
+    auto* allocation = client->DeviceAllocator().New(client);
+    uint64_t serial = mRequestDeviceRequests.Add({callback, allocation->object->id, userdata});
+
+    AdapterRequestDeviceCmd cmd;
+    cmd.adapterId = this->id;
+    cmd.requestSerial = serial;
+    cmd.deviceObjectHandle = ObjectHandle(allocation->object->id, allocation->generation);
+    cmd.descriptor = descriptor;
+
+    client->SerializeCommand(cmd);
+}
+
+bool Client::DoAdapterRequestDeviceCallback(Adapter* adapter,
+                                            uint64_t requestSerial,
+                                            WGPURequestDeviceStatus status,
+                                            const char* message,
+                                            const WGPUSupportedLimits* limits,
+                                            uint32_t featuresCount,
+                                            const WGPUFeatureName* features) {
+    // May have been deleted or recreated so this isn't an error.
+    if (adapter == nullptr) {
+        return true;
+    }
+    return adapter->OnRequestDeviceCallback(requestSerial, status, message, limits, featuresCount,
+                                            features);
+}
+
+bool Adapter::OnRequestDeviceCallback(uint64_t requestSerial,
+                                      WGPURequestDeviceStatus status,
+                                      const char* message,
+                                      const WGPUSupportedLimits* limits,
+                                      uint32_t featuresCount,
+                                      const WGPUFeatureName* features) {
+    RequestDeviceData request;
+    if (!mRequestDeviceRequests.Acquire(requestSerial, &request)) {
+        return false;
     }
 
-    bool Adapter::GetLimits(WGPUSupportedLimits* limits) const {
-        return mLimitsAndFeatures.GetLimits(limits);
-    }
+    Device* device = client->DeviceAllocator().GetObject(request.deviceObjectId);
 
-    bool Adapter::HasFeature(WGPUFeatureName feature) const {
-        return mLimitsAndFeatures.HasFeature(feature);
-    }
-
-    size_t Adapter::EnumerateFeatures(WGPUFeatureName* features) const {
-        return mLimitsAndFeatures.EnumerateFeatures(features);
-    }
-
-    void Adapter::SetLimits(const WGPUSupportedLimits* limits) {
-        return mLimitsAndFeatures.SetLimits(limits);
-    }
-
-    void Adapter::SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount) {
-        return mLimitsAndFeatures.SetFeatures(features, featuresCount);
-    }
-
-    void Adapter::SetProperties(const WGPUAdapterProperties* properties) {
-        mProperties = *properties;
-        mProperties.nextInChain = nullptr;
-    }
-
-    void Adapter::GetProperties(WGPUAdapterProperties* properties) const {
-        *properties = mProperties;
-    }
-
-    void Adapter::RequestDevice(const WGPUDeviceDescriptor* descriptor,
-                                WGPURequestDeviceCallback callback,
-                                void* userdata) {
-        if (client->IsDisconnected()) {
-            callback(WGPURequestDeviceStatus_Error, nullptr, "GPU connection lost", userdata);
-            return;
-        }
-
-        auto* allocation = client->DeviceAllocator().New(client);
-        uint64_t serial = mRequestDeviceRequests.Add({callback, allocation->object->id, userdata});
-
-        AdapterRequestDeviceCmd cmd;
-        cmd.adapterId = this->id;
-        cmd.requestSerial = serial;
-        cmd.deviceObjectHandle = ObjectHandle(allocation->object->id, allocation->generation);
-        cmd.descriptor = descriptor;
-
-        client->SerializeCommand(cmd);
-    }
-
-    bool Client::DoAdapterRequestDeviceCallback(Adapter* adapter,
-                                                uint64_t requestSerial,
-                                                WGPURequestDeviceStatus status,
-                                                const char* message,
-                                                const WGPUSupportedLimits* limits,
-                                                uint32_t featuresCount,
-                                                const WGPUFeatureName* features) {
-        // May have been deleted or recreated so this isn't an error.
-        if (adapter == nullptr) {
-            return true;
-        }
-        return adapter->OnRequestDeviceCallback(requestSerial, status, message, limits,
-                                                featuresCount, features);
-    }
-
-    bool Adapter::OnRequestDeviceCallback(uint64_t requestSerial,
-                                          WGPURequestDeviceStatus status,
-                                          const char* message,
-                                          const WGPUSupportedLimits* limits,
-                                          uint32_t featuresCount,
-                                          const WGPUFeatureName* features) {
-        RequestDeviceData request;
-        if (!mRequestDeviceRequests.Acquire(requestSerial, &request)) {
-            return false;
-        }
-
-        Device* device = client->DeviceAllocator().GetObject(request.deviceObjectId);
-
-        // If the return status is a failure we should give a null device to the callback and
-        // free the allocation.
-        if (status != WGPURequestDeviceStatus_Success) {
-            client->DeviceAllocator().Free(device);
-            request.callback(status, nullptr, message, request.userdata);
-            return true;
-        }
-
-        device->SetLimits(limits);
-        device->SetFeatures(features, featuresCount);
-
-        request.callback(status, ToAPI(device), message, request.userdata);
+    // If the return status is a failure we should give a null device to the callback and
+    // free the allocation.
+    if (status != WGPURequestDeviceStatus_Success) {
+        client->DeviceAllocator().Free(device);
+        request.callback(status, nullptr, message, request.userdata);
         return true;
     }
 
-    WGPUDevice Adapter::CreateDevice(const WGPUDeviceDescriptor*) {
-        dawn::ErrorLog() << "adapter.CreateDevice not supported with dawn_wire.";
-        return nullptr;
-    }
+    device->SetLimits(limits);
+    device->SetFeatures(features, featuresCount);
+
+    request.callback(status, ToAPI(device), message, request.userdata);
+    return true;
+}
+
+WGPUDevice Adapter::CreateDevice(const WGPUDeviceDescriptor*) {
+    dawn::ErrorLog() << "adapter.CreateDevice not supported with dawn_wire.";
+    return nullptr;
+}
 
 }  // namespace dawn::wire::client
diff --git a/src/dawn/wire/client/Adapter.h b/src/dawn/wire/client/Adapter.h
index 615e88a..d2ae706 100644
--- a/src/dawn/wire/client/Adapter.h
+++ b/src/dawn/wire/client/Adapter.h
@@ -25,45 +25,45 @@
 
 namespace dawn::wire::client {
 
-    class Adapter final : public ObjectBase {
-      public:
-        using ObjectBase::ObjectBase;
+class Adapter final : public ObjectBase {
+  public:
+    using ObjectBase::ObjectBase;
 
-        ~Adapter();
-        void CancelCallbacksForDisconnect() override;
+    ~Adapter();
+    void CancelCallbacksForDisconnect() override;
 
-        bool GetLimits(WGPUSupportedLimits* limits) const;
-        bool HasFeature(WGPUFeatureName feature) const;
-        size_t EnumerateFeatures(WGPUFeatureName* features) const;
-        void SetLimits(const WGPUSupportedLimits* limits);
-        void SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount);
-        void SetProperties(const WGPUAdapterProperties* properties);
-        void GetProperties(WGPUAdapterProperties* properties) const;
-        void RequestDevice(const WGPUDeviceDescriptor* descriptor,
-                           WGPURequestDeviceCallback callback,
-                           void* userdata);
+    bool GetLimits(WGPUSupportedLimits* limits) const;
+    bool HasFeature(WGPUFeatureName feature) const;
+    size_t EnumerateFeatures(WGPUFeatureName* features) const;
+    void SetLimits(const WGPUSupportedLimits* limits);
+    void SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount);
+    void SetProperties(const WGPUAdapterProperties* properties);
+    void GetProperties(WGPUAdapterProperties* properties) const;
+    void RequestDevice(const WGPUDeviceDescriptor* descriptor,
+                       WGPURequestDeviceCallback callback,
+                       void* userdata);
 
-        bool OnRequestDeviceCallback(uint64_t requestSerial,
-                                     WGPURequestDeviceStatus status,
-                                     const char* message,
-                                     const WGPUSupportedLimits* limits,
-                                     uint32_t featuresCount,
-                                     const WGPUFeatureName* features);
+    bool OnRequestDeviceCallback(uint64_t requestSerial,
+                                 WGPURequestDeviceStatus status,
+                                 const char* message,
+                                 const WGPUSupportedLimits* limits,
+                                 uint32_t featuresCount,
+                                 const WGPUFeatureName* features);
 
-        // Unimplementable. Only availale in dawn_native.
-        WGPUDevice CreateDevice(const WGPUDeviceDescriptor*);
+    // Unimplementable. Only availale in dawn_native.
+    WGPUDevice CreateDevice(const WGPUDeviceDescriptor*);
 
-      private:
-        LimitsAndFeatures mLimitsAndFeatures;
-        WGPUAdapterProperties mProperties;
+  private:
+    LimitsAndFeatures mLimitsAndFeatures;
+    WGPUAdapterProperties mProperties;
 
-        struct RequestDeviceData {
-            WGPURequestDeviceCallback callback = nullptr;
-            ObjectId deviceObjectId;
-            void* userdata = nullptr;
-        };
-        RequestTracker<RequestDeviceData> mRequestDeviceRequests;
+    struct RequestDeviceData {
+        WGPURequestDeviceCallback callback = nullptr;
+        ObjectId deviceObjectId;
+        void* userdata = nullptr;
     };
+    RequestTracker<RequestDeviceData> mRequestDeviceRequests;
+};
 
 }  // namespace dawn::wire::client
 
diff --git a/src/dawn/wire/client/Buffer.cpp b/src/dawn/wire/client/Buffer.cpp
index e7033c1..9760e3d 100644
--- a/src/dawn/wire/client/Buffer.cpp
+++ b/src/dawn/wire/client/Buffer.cpp
@@ -24,388 +24,383 @@
 
 namespace dawn::wire::client {
 
-    // static
-    WGPUBuffer Buffer::Create(Device* device, const WGPUBufferDescriptor* descriptor) {
-        Client* wireClient = device->client;
+// static
+WGPUBuffer Buffer::Create(Device* device, const WGPUBufferDescriptor* descriptor) {
+    Client* wireClient = device->client;
 
-        bool mappable =
-            (descriptor->usage & (WGPUBufferUsage_MapRead | WGPUBufferUsage_MapWrite)) != 0 ||
-            descriptor->mappedAtCreation;
-        if (mappable && descriptor->size >= std::numeric_limits<size_t>::max()) {
-            device->InjectError(WGPUErrorType_OutOfMemory, "Buffer is too large for map usage");
-            return device->CreateErrorBuffer();
+    bool mappable =
+        (descriptor->usage & (WGPUBufferUsage_MapRead | WGPUBufferUsage_MapWrite)) != 0 ||
+        descriptor->mappedAtCreation;
+    if (mappable && descriptor->size >= std::numeric_limits<size_t>::max()) {
+        device->InjectError(WGPUErrorType_OutOfMemory, "Buffer is too large for map usage");
+        return device->CreateErrorBuffer();
+    }
+
+    std::unique_ptr<MemoryTransferService::ReadHandle> readHandle = nullptr;
+    std::unique_ptr<MemoryTransferService::WriteHandle> writeHandle = nullptr;
+
+    DeviceCreateBufferCmd cmd;
+    cmd.deviceId = device->id;
+    cmd.descriptor = descriptor;
+    cmd.readHandleCreateInfoLength = 0;
+    cmd.readHandleCreateInfo = nullptr;
+    cmd.writeHandleCreateInfoLength = 0;
+    cmd.writeHandleCreateInfo = nullptr;
+
+    if (mappable) {
+        if ((descriptor->usage & WGPUBufferUsage_MapRead) != 0) {
+            // Create the read handle on buffer creation.
+            readHandle.reset(
+                wireClient->GetMemoryTransferService()->CreateReadHandle(descriptor->size));
+            if (readHandle == nullptr) {
+                device->InjectError(WGPUErrorType_OutOfMemory, "Failed to create buffer mapping");
+                return device->CreateErrorBuffer();
+            }
+            cmd.readHandleCreateInfoLength = readHandle->SerializeCreateSize();
         }
 
-        std::unique_ptr<MemoryTransferService::ReadHandle> readHandle = nullptr;
-        std::unique_ptr<MemoryTransferService::WriteHandle> writeHandle = nullptr;
+        if ((descriptor->usage & WGPUBufferUsage_MapWrite) != 0 || descriptor->mappedAtCreation) {
+            // Create the write handle on buffer creation.
+            writeHandle.reset(
+                wireClient->GetMemoryTransferService()->CreateWriteHandle(descriptor->size));
+            if (writeHandle == nullptr) {
+                device->InjectError(WGPUErrorType_OutOfMemory, "Failed to create buffer mapping");
+                return device->CreateErrorBuffer();
+            }
+            cmd.writeHandleCreateInfoLength = writeHandle->SerializeCreateSize();
+        }
+    }
 
-        DeviceCreateBufferCmd cmd;
-        cmd.deviceId = device->id;
-        cmd.descriptor = descriptor;
-        cmd.readHandleCreateInfoLength = 0;
-        cmd.readHandleCreateInfo = nullptr;
-        cmd.writeHandleCreateInfoLength = 0;
-        cmd.writeHandleCreateInfo = nullptr;
+    // Create the buffer and send the creation command.
+    // This must happen after any potential device->CreateErrorBuffer()
+    // as server expects allocating ids to be monotonically increasing
+    auto* bufferObjectAndSerial = wireClient->BufferAllocator().New(wireClient);
+    Buffer* buffer = bufferObjectAndSerial->object.get();
+    buffer->mDevice = device;
+    buffer->mDeviceIsAlive = device->GetAliveWeakPtr();
+    buffer->mSize = descriptor->size;
+    buffer->mDestructWriteHandleOnUnmap = false;
 
-        if (mappable) {
-            if ((descriptor->usage & WGPUBufferUsage_MapRead) != 0) {
-                // Create the read handle on buffer creation.
-                readHandle.reset(
-                    wireClient->GetMemoryTransferService()->CreateReadHandle(descriptor->size));
-                if (readHandle == nullptr) {
-                    device->InjectError(WGPUErrorType_OutOfMemory,
-                                        "Failed to create buffer mapping");
-                    return device->CreateErrorBuffer();
-                }
-                cmd.readHandleCreateInfoLength = readHandle->SerializeCreateSize();
+    if (descriptor->mappedAtCreation) {
+        // If the buffer is mapped at creation, a write handle is created and will be
+        // destructed on unmap if the buffer doesn't have MapWrite usage
+        // The buffer is mapped right now.
+        buffer->mMapState = MapState::MappedAtCreation;
+
+        // This flag is for write handle created by mappedAtCreation
+        // instead of MapWrite usage. We don't have such a case for read handle
+        buffer->mDestructWriteHandleOnUnmap = (descriptor->usage & WGPUBufferUsage_MapWrite) == 0;
+
+        buffer->mMapOffset = 0;
+        buffer->mMapSize = buffer->mSize;
+        ASSERT(writeHandle != nullptr);
+        buffer->mMappedData = writeHandle->GetData();
+    }
+
+    cmd.result = ObjectHandle{buffer->id, bufferObjectAndSerial->generation};
+
+    wireClient->SerializeCommand(
+        cmd, cmd.readHandleCreateInfoLength + cmd.writeHandleCreateInfoLength,
+        [&](SerializeBuffer* serializeBuffer) {
+            if (readHandle != nullptr) {
+                char* readHandleBuffer;
+                WIRE_TRY(serializeBuffer->NextN(cmd.readHandleCreateInfoLength, &readHandleBuffer));
+                // Serialize the ReadHandle into the space after the command.
+                readHandle->SerializeCreate(readHandleBuffer);
+                buffer->mReadHandle = std::move(readHandle);
+            }
+            if (writeHandle != nullptr) {
+                char* writeHandleBuffer;
+                WIRE_TRY(
+                    serializeBuffer->NextN(cmd.writeHandleCreateInfoLength, &writeHandleBuffer));
+                // Serialize the WriteHandle into the space after the command.
+                writeHandle->SerializeCreate(writeHandleBuffer);
+                buffer->mWriteHandle = std::move(writeHandle);
             }
 
-            if ((descriptor->usage & WGPUBufferUsage_MapWrite) != 0 ||
-                descriptor->mappedAtCreation) {
-                // Create the write handle on buffer creation.
-                writeHandle.reset(
-                    wireClient->GetMemoryTransferService()->CreateWriteHandle(descriptor->size));
-                if (writeHandle == nullptr) {
-                    device->InjectError(WGPUErrorType_OutOfMemory,
-                                        "Failed to create buffer mapping");
-                    return device->CreateErrorBuffer();
+            return WireResult::Success;
+        });
+    return ToAPI(buffer);
+}
+
+// static
+WGPUBuffer Buffer::CreateError(Device* device) {
+    auto* allocation = device->client->BufferAllocator().New(device->client);
+    allocation->object->mDevice = device;
+    allocation->object->mDeviceIsAlive = device->GetAliveWeakPtr();
+
+    DeviceCreateErrorBufferCmd cmd;
+    cmd.self = ToAPI(device);
+    cmd.result = ObjectHandle{allocation->object->id, allocation->generation};
+    device->client->SerializeCommand(cmd);
+
+    return ToAPI(allocation->object.get());
+}
+
+Buffer::~Buffer() {
+    ClearAllCallbacks(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
+    FreeMappedData();
+}
+
+void Buffer::CancelCallbacksForDisconnect() {
+    ClearAllCallbacks(WGPUBufferMapAsyncStatus_DeviceLost);
+}
+
+void Buffer::ClearAllCallbacks(WGPUBufferMapAsyncStatus status) {
+    mRequests.CloseAll([status](MapRequestData* request) {
+        if (request->callback != nullptr) {
+            request->callback(status, request->userdata);
+        }
+    });
+}
+
+void Buffer::MapAsync(WGPUMapModeFlags mode,
+                      size_t offset,
+                      size_t size,
+                      WGPUBufferMapCallback callback,
+                      void* userdata) {
+    if (client->IsDisconnected()) {
+        return callback(WGPUBufferMapAsyncStatus_DeviceLost, userdata);
+    }
+
+    // Handle the defaulting of size required by WebGPU.
+    if ((size == WGPU_WHOLE_MAP_SIZE) && (offset <= mSize)) {
+        size = mSize - offset;
+    }
+
+    // Create the request structure that will hold information while this mapping is
+    // in flight.
+    MapRequestData request = {};
+    request.callback = callback;
+    request.userdata = userdata;
+    request.offset = offset;
+    request.size = size;
+    if (mode & WGPUMapMode_Read) {
+        request.type = MapRequestType::Read;
+    } else if (mode & WGPUMapMode_Write) {
+        request.type = MapRequestType::Write;
+    }
+
+    uint64_t serial = mRequests.Add(std::move(request));
+
+    // Serialize the command to send to the server.
+    BufferMapAsyncCmd cmd;
+    cmd.bufferId = this->id;
+    cmd.requestSerial = serial;
+    cmd.mode = mode;
+    cmd.offset = offset;
+    cmd.size = size;
+
+    client->SerializeCommand(cmd);
+}
+
+bool Buffer::OnMapAsyncCallback(uint64_t requestSerial,
+                                uint32_t status,
+                                uint64_t readDataUpdateInfoLength,
+                                const uint8_t* readDataUpdateInfo) {
+    MapRequestData request;
+    if (!mRequests.Acquire(requestSerial, &request)) {
+        return false;
+    }
+
+    auto FailRequest = [&request]() -> bool {
+        if (request.callback != nullptr) {
+            request.callback(WGPUBufferMapAsyncStatus_DeviceLost, request.userdata);
+        }
+        return false;
+    };
+
+    // Take into account the client-side status of the request if the server says it is a
+    // success.
+    if (status == WGPUBufferMapAsyncStatus_Success) {
+        status = request.clientStatus;
+    }
+
+    if (status == WGPUBufferMapAsyncStatus_Success) {
+        switch (request.type) {
+            case MapRequestType::Read: {
+                if (readDataUpdateInfoLength > std::numeric_limits<size_t>::max()) {
+                    // This is the size of data deserialized from the command stream, which must
+                    // be CPU-addressable.
+                    return FailRequest();
                 }
-                cmd.writeHandleCreateInfoLength = writeHandle->SerializeCreateSize();
+
+                // Validate to prevent bad map request; buffer destroyed during map request
+                if (mReadHandle == nullptr) {
+                    return FailRequest();
+                }
+                // Update user map data with server returned data
+                if (!mReadHandle->DeserializeDataUpdate(
+                        readDataUpdateInfo, static_cast<size_t>(readDataUpdateInfoLength),
+                        request.offset, request.size)) {
+                    return FailRequest();
+                }
+                mMapState = MapState::MappedForRead;
+                mMappedData = const_cast<void*>(mReadHandle->GetData());
+                break;
             }
+            case MapRequestType::Write: {
+                if (mWriteHandle == nullptr) {
+                    return FailRequest();
+                }
+                mMapState = MapState::MappedForWrite;
+                mMappedData = mWriteHandle->GetData();
+                break;
+            }
+            default:
+                UNREACHABLE();
         }
 
-        // Create the buffer and send the creation command.
-        // This must happen after any potential device->CreateErrorBuffer()
-        // as server expects allocating ids to be monotonically increasing
-        auto* bufferObjectAndSerial = wireClient->BufferAllocator().New(wireClient);
-        Buffer* buffer = bufferObjectAndSerial->object.get();
-        buffer->mDevice = device;
-        buffer->mDeviceIsAlive = device->GetAliveWeakPtr();
-        buffer->mSize = descriptor->size;
-        buffer->mDestructWriteHandleOnUnmap = false;
+        mMapOffset = request.offset;
+        mMapSize = request.size;
+    }
 
-        if (descriptor->mappedAtCreation) {
-            // If the buffer is mapped at creation, a write handle is created and will be
-            // destructed on unmap if the buffer doesn't have MapWrite usage
-            // The buffer is mapped right now.
-            buffer->mMapState = MapState::MappedAtCreation;
+    if (request.callback) {
+        request.callback(static_cast<WGPUBufferMapAsyncStatus>(status), request.userdata);
+    }
 
-            // This flag is for write handle created by mappedAtCreation
-            // instead of MapWrite usage. We don't have such a case for read handle
-            buffer->mDestructWriteHandleOnUnmap =
-                (descriptor->usage & WGPUBufferUsage_MapWrite) == 0;
+    return true;
+}
 
-            buffer->mMapOffset = 0;
-            buffer->mMapSize = buffer->mSize;
-            ASSERT(writeHandle != nullptr);
-            buffer->mMappedData = writeHandle->GetData();
-        }
+void* Buffer::GetMappedRange(size_t offset, size_t size) {
+    if (!IsMappedForWriting() || !CheckGetMappedRangeOffsetSize(offset, size)) {
+        return nullptr;
+    }
+    return static_cast<uint8_t*>(mMappedData) + offset;
+}
 
-        cmd.result = ObjectHandle{buffer->id, bufferObjectAndSerial->generation};
+const void* Buffer::GetConstMappedRange(size_t offset, size_t size) {
+    if (!(IsMappedForWriting() || IsMappedForReading()) ||
+        !CheckGetMappedRangeOffsetSize(offset, size)) {
+        return nullptr;
+    }
+    return static_cast<uint8_t*>(mMappedData) + offset;
+}
 
-        wireClient->SerializeCommand(
-            cmd, cmd.readHandleCreateInfoLength + cmd.writeHandleCreateInfoLength,
-            [&](SerializeBuffer* serializeBuffer) {
-                if (readHandle != nullptr) {
-                    char* readHandleBuffer;
-                    WIRE_TRY(
-                        serializeBuffer->NextN(cmd.readHandleCreateInfoLength, &readHandleBuffer));
-                    // Serialize the ReadHandle into the space after the command.
-                    readHandle->SerializeCreate(readHandleBuffer);
-                    buffer->mReadHandle = std::move(readHandle);
-                }
-                if (writeHandle != nullptr) {
-                    char* writeHandleBuffer;
-                    WIRE_TRY(serializeBuffer->NextN(cmd.writeHandleCreateInfoLength,
-                                                    &writeHandleBuffer));
-                    // Serialize the WriteHandle into the space after the command.
-                    writeHandle->SerializeCreate(writeHandleBuffer);
-                    buffer->mWriteHandle = std::move(writeHandle);
-                }
+void Buffer::Unmap() {
+    // Invalidate the local pointer, and cancel all other in-flight requests that would
+    // turn into errors anyway (you can't double map). This prevents race when the following
+    // happens, where the application code would have unmapped a buffer but still receive a
+    // callback:
+    //   - Client -> Server: MapRequest1, Unmap, MapRequest2
+    //   - Server -> Client: Result of MapRequest1
+    //   - Unmap locally on the client
+    //   - Server -> Client: Result of MapRequest2
+
+    // mWriteHandle can still be nullptr if buffer has been destroyed before unmap
+    if ((mMapState == MapState::MappedForWrite || mMapState == MapState::MappedAtCreation) &&
+        mWriteHandle != nullptr) {
+        // Writes need to be flushed before Unmap is sent. Unmap calls all associated
+        // in-flight callbacks which may read the updated data.
+
+        // Get the serialization size of data update writes.
+        size_t writeDataUpdateInfoLength =
+            mWriteHandle->SizeOfSerializeDataUpdate(mMapOffset, mMapSize);
+
+        BufferUpdateMappedDataCmd cmd;
+        cmd.bufferId = id;
+        cmd.writeDataUpdateInfoLength = writeDataUpdateInfoLength;
+        cmd.writeDataUpdateInfo = nullptr;
+        cmd.offset = mMapOffset;
+        cmd.size = mMapSize;
+
+        client->SerializeCommand(
+            cmd, writeDataUpdateInfoLength, [&](SerializeBuffer* serializeBuffer) {
+                char* writeHandleBuffer;
+                WIRE_TRY(serializeBuffer->NextN(writeDataUpdateInfoLength, &writeHandleBuffer));
+
+                // Serialize flush metadata into the space after the command.
+                // This closes the handle for writing.
+                mWriteHandle->SerializeDataUpdate(writeHandleBuffer, cmd.offset, cmd.size);
 
                 return WireResult::Success;
             });
-        return ToAPI(buffer);
-    }
 
-    // static
-    WGPUBuffer Buffer::CreateError(Device* device) {
-        auto* allocation = device->client->BufferAllocator().New(device->client);
-        allocation->object->mDevice = device;
-        allocation->object->mDeviceIsAlive = device->GetAliveWeakPtr();
-
-        DeviceCreateErrorBufferCmd cmd;
-        cmd.self = ToAPI(device);
-        cmd.result = ObjectHandle{allocation->object->id, allocation->generation};
-        device->client->SerializeCommand(cmd);
-
-        return ToAPI(allocation->object.get());
-    }
-
-    Buffer::~Buffer() {
-        ClearAllCallbacks(WGPUBufferMapAsyncStatus_DestroyedBeforeCallback);
-        FreeMappedData();
-    }
-
-    void Buffer::CancelCallbacksForDisconnect() {
-        ClearAllCallbacks(WGPUBufferMapAsyncStatus_DeviceLost);
-    }
-
-    void Buffer::ClearAllCallbacks(WGPUBufferMapAsyncStatus status) {
-        mRequests.CloseAll([status](MapRequestData* request) {
-            if (request->callback != nullptr) {
-                request->callback(status, request->userdata);
-            }
-        });
-    }
-
-    void Buffer::MapAsync(WGPUMapModeFlags mode,
-                          size_t offset,
-                          size_t size,
-                          WGPUBufferMapCallback callback,
-                          void* userdata) {
-        if (client->IsDisconnected()) {
-            return callback(WGPUBufferMapAsyncStatus_DeviceLost, userdata);
-        }
-
-        // Handle the defaulting of size required by WebGPU.
-        if ((size == WGPU_WHOLE_MAP_SIZE) && (offset <= mSize)) {
-            size = mSize - offset;
-        }
-
-        // Create the request structure that will hold information while this mapping is
-        // in flight.
-        MapRequestData request = {};
-        request.callback = callback;
-        request.userdata = userdata;
-        request.offset = offset;
-        request.size = size;
-        if (mode & WGPUMapMode_Read) {
-            request.type = MapRequestType::Read;
-        } else if (mode & WGPUMapMode_Write) {
-            request.type = MapRequestType::Write;
-        }
-
-        uint64_t serial = mRequests.Add(std::move(request));
-
-        // Serialize the command to send to the server.
-        BufferMapAsyncCmd cmd;
-        cmd.bufferId = this->id;
-        cmd.requestSerial = serial;
-        cmd.mode = mode;
-        cmd.offset = offset;
-        cmd.size = size;
-
-        client->SerializeCommand(cmd);
-    }
-
-    bool Buffer::OnMapAsyncCallback(uint64_t requestSerial,
-                                    uint32_t status,
-                                    uint64_t readDataUpdateInfoLength,
-                                    const uint8_t* readDataUpdateInfo) {
-        MapRequestData request;
-        if (!mRequests.Acquire(requestSerial, &request)) {
-            return false;
-        }
-
-        auto FailRequest = [&request]() -> bool {
-            if (request.callback != nullptr) {
-                request.callback(WGPUBufferMapAsyncStatus_DeviceLost, request.userdata);
-            }
-            return false;
-        };
-
-        // Take into account the client-side status of the request if the server says it is a
-        // success.
-        if (status == WGPUBufferMapAsyncStatus_Success) {
-            status = request.clientStatus;
-        }
-
-        if (status == WGPUBufferMapAsyncStatus_Success) {
-            switch (request.type) {
-                case MapRequestType::Read: {
-                    if (readDataUpdateInfoLength > std::numeric_limits<size_t>::max()) {
-                        // This is the size of data deserialized from the command stream, which must
-                        // be CPU-addressable.
-                        return FailRequest();
-                    }
-
-                    // Validate to prevent bad map request; buffer destroyed during map request
-                    if (mReadHandle == nullptr) {
-                        return FailRequest();
-                    }
-                    // Update user map data with server returned data
-                    if (!mReadHandle->DeserializeDataUpdate(
-                            readDataUpdateInfo, static_cast<size_t>(readDataUpdateInfoLength),
-                            request.offset, request.size)) {
-                        return FailRequest();
-                    }
-                    mMapState = MapState::MappedForRead;
-                    mMappedData = const_cast<void*>(mReadHandle->GetData());
-                    break;
-                }
-                case MapRequestType::Write: {
-                    if (mWriteHandle == nullptr) {
-                        return FailRequest();
-                    }
-                    mMapState = MapState::MappedForWrite;
-                    mMappedData = mWriteHandle->GetData();
-                    break;
-                }
-                default:
-                    UNREACHABLE();
-            }
-
-            mMapOffset = request.offset;
-            mMapSize = request.size;
-        }
-
-        if (request.callback) {
-            request.callback(static_cast<WGPUBufferMapAsyncStatus>(status), request.userdata);
-        }
-
-        return true;
-    }
-
-    void* Buffer::GetMappedRange(size_t offset, size_t size) {
-        if (!IsMappedForWriting() || !CheckGetMappedRangeOffsetSize(offset, size)) {
-            return nullptr;
-        }
-        return static_cast<uint8_t*>(mMappedData) + offset;
-    }
-
-    const void* Buffer::GetConstMappedRange(size_t offset, size_t size) {
-        if (!(IsMappedForWriting() || IsMappedForReading()) ||
-            !CheckGetMappedRangeOffsetSize(offset, size)) {
-            return nullptr;
-        }
-        return static_cast<uint8_t*>(mMappedData) + offset;
-    }
-
-    void Buffer::Unmap() {
-        // Invalidate the local pointer, and cancel all other in-flight requests that would
-        // turn into errors anyway (you can't double map). This prevents race when the following
-        // happens, where the application code would have unmapped a buffer but still receive a
-        // callback:
-        //   - Client -> Server: MapRequest1, Unmap, MapRequest2
-        //   - Server -> Client: Result of MapRequest1
-        //   - Unmap locally on the client
-        //   - Server -> Client: Result of MapRequest2
-
-        // mWriteHandle can still be nullptr if buffer has been destroyed before unmap
-        if ((mMapState == MapState::MappedForWrite || mMapState == MapState::MappedAtCreation) &&
-            mWriteHandle != nullptr) {
-            // Writes need to be flushed before Unmap is sent. Unmap calls all associated
-            // in-flight callbacks which may read the updated data.
-
-            // Get the serialization size of data update writes.
-            size_t writeDataUpdateInfoLength =
-                mWriteHandle->SizeOfSerializeDataUpdate(mMapOffset, mMapSize);
-
-            BufferUpdateMappedDataCmd cmd;
-            cmd.bufferId = id;
-            cmd.writeDataUpdateInfoLength = writeDataUpdateInfoLength;
-            cmd.writeDataUpdateInfo = nullptr;
-            cmd.offset = mMapOffset;
-            cmd.size = mMapSize;
-
-            client->SerializeCommand(
-                cmd, writeDataUpdateInfoLength, [&](SerializeBuffer* serializeBuffer) {
-                    char* writeHandleBuffer;
-                    WIRE_TRY(serializeBuffer->NextN(writeDataUpdateInfoLength, &writeHandleBuffer));
-
-                    // Serialize flush metadata into the space after the command.
-                    // This closes the handle for writing.
-                    mWriteHandle->SerializeDataUpdate(writeHandleBuffer, cmd.offset, cmd.size);
-
-                    return WireResult::Success;
-                });
-
-            // If mDestructWriteHandleOnUnmap is true, that means the write handle is merely
-            // for mappedAtCreation usage. It is destroyed on unmap after flush to server
-            // instead of at buffer destruction.
-            if (mMapState == MapState::MappedAtCreation && mDestructWriteHandleOnUnmap) {
-                mWriteHandle = nullptr;
-                if (mReadHandle) {
-                    // If it's both mappedAtCreation and MapRead we need to reset
-                    // mMappedData to readHandle's GetData(). This could be changed to
-                    // merging read/write handle in future
-                    mMappedData = const_cast<void*>(mReadHandle->GetData());
-                }
+        // If mDestructWriteHandleOnUnmap is true, that means the write handle is merely
+        // for mappedAtCreation usage. It is destroyed on unmap after flush to server
+        // instead of at buffer destruction.
+        if (mMapState == MapState::MappedAtCreation && mDestructWriteHandleOnUnmap) {
+            mWriteHandle = nullptr;
+            if (mReadHandle) {
+                // If it's both mappedAtCreation and MapRead we need to reset
+                // mMappedData to readHandle's GetData(). This could be changed to
+                // merging read/write handle in future
+                mMappedData = const_cast<void*>(mReadHandle->GetData());
             }
         }
-
-        // Free map access tokens
-        mMapState = MapState::Unmapped;
-        mMapOffset = 0;
-        mMapSize = 0;
-
-        // Tag all mapping requests still in flight as unmapped before callback.
-        mRequests.ForAll([](MapRequestData* request) {
-            if (request->clientStatus == WGPUBufferMapAsyncStatus_Success) {
-                request->clientStatus = WGPUBufferMapAsyncStatus_UnmappedBeforeCallback;
-            }
-        });
-
-        BufferUnmapCmd cmd;
-        cmd.self = ToAPI(this);
-        client->SerializeCommand(cmd);
     }
 
-    void Buffer::Destroy() {
-        // Remove the current mapping and destroy Read/WriteHandles.
-        FreeMappedData();
+    // Free map access tokens
+    mMapState = MapState::Unmapped;
+    mMapOffset = 0;
+    mMapSize = 0;
 
-        // Tag all mapping requests still in flight as destroyed before callback.
-        mRequests.ForAll([](MapRequestData* request) {
-            if (request->clientStatus == WGPUBufferMapAsyncStatus_Success) {
-                request->clientStatus = WGPUBufferMapAsyncStatus_DestroyedBeforeCallback;
-            }
-        });
-
-        BufferDestroyCmd cmd;
-        cmd.self = ToAPI(this);
-        client->SerializeCommand(cmd);
-    }
-
-    bool Buffer::IsMappedForReading() const {
-        return mMapState == MapState::MappedForRead;
-    }
-
-    bool Buffer::IsMappedForWriting() const {
-        return mMapState == MapState::MappedForWrite || mMapState == MapState::MappedAtCreation;
-    }
-
-    bool Buffer::CheckGetMappedRangeOffsetSize(size_t offset, size_t size) const {
-        if (offset % 8 != 0 || offset < mMapOffset || offset > mSize) {
-            return false;
+    // Tag all mapping requests still in flight as unmapped before callback.
+    mRequests.ForAll([](MapRequestData* request) {
+        if (request->clientStatus == WGPUBufferMapAsyncStatus_Success) {
+            request->clientStatus = WGPUBufferMapAsyncStatus_UnmappedBeforeCallback;
         }
+    });
 
-        size_t rangeSize = size == WGPU_WHOLE_MAP_SIZE ? mSize - offset : size;
+    BufferUnmapCmd cmd;
+    cmd.self = ToAPI(this);
+    client->SerializeCommand(cmd);
+}
 
-        if (rangeSize % 4 != 0 || rangeSize > mMapSize) {
-            return false;
+void Buffer::Destroy() {
+    // Remove the current mapping and destroy Read/WriteHandles.
+    FreeMappedData();
+
+    // Tag all mapping requests still in flight as destroyed before callback.
+    mRequests.ForAll([](MapRequestData* request) {
+        if (request->clientStatus == WGPUBufferMapAsyncStatus_Success) {
+            request->clientStatus = WGPUBufferMapAsyncStatus_DestroyedBeforeCallback;
         }
+    });
 
-        size_t offsetInMappedRange = offset - mMapOffset;
-        return offsetInMappedRange <= mMapSize - rangeSize;
+    BufferDestroyCmd cmd;
+    cmd.self = ToAPI(this);
+    client->SerializeCommand(cmd);
+}
+
+bool Buffer::IsMappedForReading() const {
+    return mMapState == MapState::MappedForRead;
+}
+
+bool Buffer::IsMappedForWriting() const {
+    return mMapState == MapState::MappedForWrite || mMapState == MapState::MappedAtCreation;
+}
+
+bool Buffer::CheckGetMappedRangeOffsetSize(size_t offset, size_t size) const {
+    if (offset % 8 != 0 || offset < mMapOffset || offset > mSize) {
+        return false;
     }
 
-    void Buffer::FreeMappedData() {
+    size_t rangeSize = size == WGPU_WHOLE_MAP_SIZE ? mSize - offset : size;
+
+    if (rangeSize % 4 != 0 || rangeSize > mMapSize) {
+        return false;
+    }
+
+    size_t offsetInMappedRange = offset - mMapOffset;
+    return offsetInMappedRange <= mMapSize - rangeSize;
+}
+
+void Buffer::FreeMappedData() {
 #if defined(DAWN_ENABLE_ASSERTS)
-        // When in "debug" mode, 0xCA-out the mapped data when we free it so that in we can detect
-        // use-after-free of the mapped data. This is particularly useful for WebGPU test about the
-        // interaction of mapping and GC.
-        if (mMappedData) {
-            memset(static_cast<uint8_t*>(mMappedData) + mMapOffset, 0xCA, mMapSize);
-        }
+    // When in "debug" mode, 0xCA-out the mapped data when we free it so that in we can detect
+    // use-after-free of the mapped data. This is particularly useful for WebGPU test about the
+    // interaction of mapping and GC.
+    if (mMappedData) {
+        memset(static_cast<uint8_t*>(mMappedData) + mMapOffset, 0xCA, mMapSize);
+    }
 #endif  // defined(DAWN_ENABLE_ASSERTS)
 
-        mMapOffset = 0;
-        mMapSize = 0;
-        mReadHandle = nullptr;
-        mWriteHandle = nullptr;
-        mMappedData = nullptr;
-    }
+    mMapOffset = 0;
+    mMapSize = 0;
+    mReadHandle = nullptr;
+    mWriteHandle = nullptr;
+    mMappedData = nullptr;
+}
 
 }  // namespace dawn::wire::client
diff --git a/src/dawn/wire/client/Buffer.h b/src/dawn/wire/client/Buffer.h
index 17ff4ee..be11598 100644
--- a/src/dawn/wire/client/Buffer.h
+++ b/src/dawn/wire/client/Buffer.h
@@ -24,87 +24,87 @@
 
 namespace dawn::wire::client {
 
-    class Device;
+class Device;
 
-    class Buffer final : public ObjectBase {
-      public:
-        using ObjectBase::ObjectBase;
+class Buffer final : public ObjectBase {
+  public:
+    using ObjectBase::ObjectBase;
 
-        static WGPUBuffer Create(Device* device, const WGPUBufferDescriptor* descriptor);
-        static WGPUBuffer CreateError(Device* device);
+    static WGPUBuffer Create(Device* device, const WGPUBufferDescriptor* descriptor);
+    static WGPUBuffer CreateError(Device* device);
 
-        ~Buffer();
+    ~Buffer();
 
-        bool OnMapAsyncCallback(uint64_t requestSerial,
-                                uint32_t status,
-                                uint64_t readDataUpdateInfoLength,
-                                const uint8_t* readDataUpdateInfo);
-        void MapAsync(WGPUMapModeFlags mode,
-                      size_t offset,
-                      size_t size,
-                      WGPUBufferMapCallback callback,
-                      void* userdata);
-        void* GetMappedRange(size_t offset, size_t size);
-        const void* GetConstMappedRange(size_t offset, size_t size);
-        void Unmap();
+    bool OnMapAsyncCallback(uint64_t requestSerial,
+                            uint32_t status,
+                            uint64_t readDataUpdateInfoLength,
+                            const uint8_t* readDataUpdateInfo);
+    void MapAsync(WGPUMapModeFlags mode,
+                  size_t offset,
+                  size_t size,
+                  WGPUBufferMapCallback callback,
+                  void* userdata);
+    void* GetMappedRange(size_t offset, size_t size);
+    const void* GetConstMappedRange(size_t offset, size_t size);
+    void Unmap();
 
-        void Destroy();
+    void Destroy();
 
-      private:
-        void CancelCallbacksForDisconnect() override;
-        void ClearAllCallbacks(WGPUBufferMapAsyncStatus status);
+  private:
+    void CancelCallbacksForDisconnect() override;
+    void ClearAllCallbacks(WGPUBufferMapAsyncStatus status);
 
-        bool IsMappedForReading() const;
-        bool IsMappedForWriting() const;
-        bool CheckGetMappedRangeOffsetSize(size_t offset, size_t size) const;
+    bool IsMappedForReading() const;
+    bool IsMappedForWriting() const;
+    bool CheckGetMappedRangeOffsetSize(size_t offset, size_t size) const;
 
-        void FreeMappedData();
+    void FreeMappedData();
 
-        Device* mDevice;
+    Device* mDevice;
 
-        enum class MapRequestType { None, Read, Write };
+    enum class MapRequestType { None, Read, Write };
 
-        enum class MapState {
-            Unmapped,
-            MappedForRead,
-            MappedForWrite,
-            MappedAtCreation,
-        };
-
-        // We want to defer all the validation to the server, which means we could have multiple
-        // map request in flight at a single time and need to track them separately.
-        // On well-behaved applications, only one request should exist at a single time.
-        struct MapRequestData {
-            WGPUBufferMapCallback callback = nullptr;
-            void* userdata = nullptr;
-            size_t offset = 0;
-            size_t size = 0;
-
-            // When the buffer is destroyed or unmapped too early, the unmappedBeforeX status takes
-            // precedence over the success value returned from the server. However Error statuses
-            // from the server take precedence over the client-side status.
-            WGPUBufferMapAsyncStatus clientStatus = WGPUBufferMapAsyncStatus_Success;
-
-            MapRequestType type = MapRequestType::None;
-        };
-        RequestTracker<MapRequestData> mRequests;
-        uint64_t mSize = 0;
-
-        // Only one mapped pointer can be active at a time because Unmap clears all the in-flight
-        // requests.
-        // TODO(enga): Use a tagged pointer to save space.
-        std::unique_ptr<MemoryTransferService::ReadHandle> mReadHandle = nullptr;
-        std::unique_ptr<MemoryTransferService::WriteHandle> mWriteHandle = nullptr;
-        MapState mMapState = MapState::Unmapped;
-        bool mDestructWriteHandleOnUnmap = false;
-
-        void* mMappedData = nullptr;
-        size_t mMapOffset = 0;
-        size_t mMapSize = 0;
-
-        std::weak_ptr<bool> mDeviceIsAlive;
+    enum class MapState {
+        Unmapped,
+        MappedForRead,
+        MappedForWrite,
+        MappedAtCreation,
     };
 
+    // We want to defer all the validation to the server, which means we could have multiple
+    // map request in flight at a single time and need to track them separately.
+    // On well-behaved applications, only one request should exist at a single time.
+    struct MapRequestData {
+        WGPUBufferMapCallback callback = nullptr;
+        void* userdata = nullptr;
+        size_t offset = 0;
+        size_t size = 0;
+
+        // When the buffer is destroyed or unmapped too early, the unmappedBeforeX status takes
+        // precedence over the success value returned from the server. However Error statuses
+        // from the server take precedence over the client-side status.
+        WGPUBufferMapAsyncStatus clientStatus = WGPUBufferMapAsyncStatus_Success;
+
+        MapRequestType type = MapRequestType::None;
+    };
+    RequestTracker<MapRequestData> mRequests;
+    uint64_t mSize = 0;
+
+    // Only one mapped pointer can be active at a time because Unmap clears all the in-flight
+    // requests.
+    // TODO(enga): Use a tagged pointer to save space.
+    std::unique_ptr<MemoryTransferService::ReadHandle> mReadHandle = nullptr;
+    std::unique_ptr<MemoryTransferService::WriteHandle> mWriteHandle = nullptr;
+    MapState mMapState = MapState::Unmapped;
+    bool mDestructWriteHandleOnUnmap = false;
+
+    void* mMappedData = nullptr;
+    size_t mMapOffset = 0;
+    size_t mMapSize = 0;
+
+    std::weak_ptr<bool> mDeviceIsAlive;
+};
+
 }  // namespace dawn::wire::client
 
 #endif  // SRC_DAWN_WIRE_CLIENT_BUFFER_H_
diff --git a/src/dawn/wire/client/Client.cpp b/src/dawn/wire/client/Client.cpp
index 5db8444..6b15628 100644
--- a/src/dawn/wire/client/Client.cpp
+++ b/src/dawn/wire/client/Client.cpp
@@ -19,153 +19,147 @@
 
 namespace dawn::wire::client {
 
-    namespace {
+namespace {
 
-        class NoopCommandSerializer final : public CommandSerializer {
-          public:
-            static NoopCommandSerializer* GetInstance() {
-                static NoopCommandSerializer gNoopCommandSerializer;
-                return &gNoopCommandSerializer;
-            }
-
-            ~NoopCommandSerializer() = default;
-
-            size_t GetMaximumAllocationSize() const final {
-                return 0;
-            }
-            void* GetCmdSpace(size_t size) final {
-                return nullptr;
-            }
-            bool Flush() final {
-                return false;
-            }
-        };
-
-    }  // anonymous namespace
-
-    Client::Client(CommandSerializer* serializer, MemoryTransferService* memoryTransferService)
-        : ClientBase(), mSerializer(serializer), mMemoryTransferService(memoryTransferService) {
-        if (mMemoryTransferService == nullptr) {
-            // If a MemoryTransferService is not provided, fall back to inline memory.
-            mOwnedMemoryTransferService = CreateInlineMemoryTransferService();
-            mMemoryTransferService = mOwnedMemoryTransferService.get();
-        }
+class NoopCommandSerializer final : public CommandSerializer {
+  public:
+    static NoopCommandSerializer* GetInstance() {
+        static NoopCommandSerializer gNoopCommandSerializer;
+        return &gNoopCommandSerializer;
     }
 
-    Client::~Client() {
-        DestroyAllObjects();
+    ~NoopCommandSerializer() = default;
+
+    size_t GetMaximumAllocationSize() const final { return 0; }
+    void* GetCmdSpace(size_t size) final { return nullptr; }
+    bool Flush() final { return false; }
+};
+
+}  // anonymous namespace
+
+Client::Client(CommandSerializer* serializer, MemoryTransferService* memoryTransferService)
+    : ClientBase(), mSerializer(serializer), mMemoryTransferService(memoryTransferService) {
+    if (mMemoryTransferService == nullptr) {
+        // If a MemoryTransferService is not provided, fall back to inline memory.
+        mOwnedMemoryTransferService = CreateInlineMemoryTransferService();
+        mMemoryTransferService = mOwnedMemoryTransferService.get();
     }
+}
 
-    void Client::DestroyAllObjects() {
-        for (auto& objectList : mObjects) {
-            ObjectType objectType = static_cast<ObjectType>(&objectList - mObjects.data());
-            if (objectType == ObjectType::Device) {
-                continue;
-            }
-            while (!objectList.empty()) {
-                ObjectBase* object = objectList.head()->value();
+Client::~Client() {
+    DestroyAllObjects();
+}
 
-                DestroyObjectCmd cmd;
-                cmd.objectType = objectType;
-                cmd.objectId = object->id;
-                SerializeCommand(cmd);
-                FreeObject(objectType, object);
-            }
+void Client::DestroyAllObjects() {
+    for (auto& objectList : mObjects) {
+        ObjectType objectType = static_cast<ObjectType>(&objectList - mObjects.data());
+        if (objectType == ObjectType::Device) {
+            continue;
         }
-
-        while (!mObjects[ObjectType::Device].empty()) {
-            ObjectBase* object = mObjects[ObjectType::Device].head()->value();
+        while (!objectList.empty()) {
+            ObjectBase* object = objectList.head()->value();
 
             DestroyObjectCmd cmd;
-            cmd.objectType = ObjectType::Device;
+            cmd.objectType = objectType;
             cmd.objectId = object->id;
             SerializeCommand(cmd);
-            FreeObject(ObjectType::Device, object);
+            FreeObject(objectType, object);
         }
     }
 
-    ReservedTexture Client::ReserveTexture(WGPUDevice device) {
-        auto* allocation = TextureAllocator().New(this);
+    while (!mObjects[ObjectType::Device].empty()) {
+        ObjectBase* object = mObjects[ObjectType::Device].head()->value();
 
-        ReservedTexture result;
-        result.texture = ToAPI(allocation->object.get());
-        result.id = allocation->object->id;
-        result.generation = allocation->generation;
-        result.deviceId = FromAPI(device)->id;
-        result.deviceGeneration = DeviceAllocator().GetGeneration(FromAPI(device)->id);
-        return result;
+        DestroyObjectCmd cmd;
+        cmd.objectType = ObjectType::Device;
+        cmd.objectId = object->id;
+        SerializeCommand(cmd);
+        FreeObject(ObjectType::Device, object);
     }
+}
 
-    ReservedSwapChain Client::ReserveSwapChain(WGPUDevice device) {
-        auto* allocation = SwapChainAllocator().New(this);
+ReservedTexture Client::ReserveTexture(WGPUDevice device) {
+    auto* allocation = TextureAllocator().New(this);
 
-        ReservedSwapChain result;
-        result.swapchain = ToAPI(allocation->object.get());
-        result.id = allocation->object->id;
-        result.generation = allocation->generation;
-        result.deviceId = FromAPI(device)->id;
-        result.deviceGeneration = DeviceAllocator().GetGeneration(FromAPI(device)->id);
-        return result;
-    }
+    ReservedTexture result;
+    result.texture = ToAPI(allocation->object.get());
+    result.id = allocation->object->id;
+    result.generation = allocation->generation;
+    result.deviceId = FromAPI(device)->id;
+    result.deviceGeneration = DeviceAllocator().GetGeneration(FromAPI(device)->id);
+    return result;
+}
 
-    ReservedDevice Client::ReserveDevice() {
-        auto* allocation = DeviceAllocator().New(this);
+ReservedSwapChain Client::ReserveSwapChain(WGPUDevice device) {
+    auto* allocation = SwapChainAllocator().New(this);
 
-        ReservedDevice result;
-        result.device = ToAPI(allocation->object.get());
-        result.id = allocation->object->id;
-        result.generation = allocation->generation;
-        return result;
-    }
+    ReservedSwapChain result;
+    result.swapchain = ToAPI(allocation->object.get());
+    result.id = allocation->object->id;
+    result.generation = allocation->generation;
+    result.deviceId = FromAPI(device)->id;
+    result.deviceGeneration = DeviceAllocator().GetGeneration(FromAPI(device)->id);
+    return result;
+}
 
-    ReservedInstance Client::ReserveInstance() {
-        auto* allocation = InstanceAllocator().New(this);
+ReservedDevice Client::ReserveDevice() {
+    auto* allocation = DeviceAllocator().New(this);
 
-        ReservedInstance result;
-        result.instance = ToAPI(allocation->object.get());
-        result.id = allocation->object->id;
-        result.generation = allocation->generation;
-        return result;
-    }
+    ReservedDevice result;
+    result.device = ToAPI(allocation->object.get());
+    result.id = allocation->object->id;
+    result.generation = allocation->generation;
+    return result;
+}
 
-    void Client::ReclaimTextureReservation(const ReservedTexture& reservation) {
-        TextureAllocator().Free(FromAPI(reservation.texture));
-    }
+ReservedInstance Client::ReserveInstance() {
+    auto* allocation = InstanceAllocator().New(this);
 
-    void Client::ReclaimSwapChainReservation(const ReservedSwapChain& reservation) {
-        SwapChainAllocator().Free(FromAPI(reservation.swapchain));
-    }
+    ReservedInstance result;
+    result.instance = ToAPI(allocation->object.get());
+    result.id = allocation->object->id;
+    result.generation = allocation->generation;
+    return result;
+}
 
-    void Client::ReclaimDeviceReservation(const ReservedDevice& reservation) {
-        DeviceAllocator().Free(FromAPI(reservation.device));
-    }
+void Client::ReclaimTextureReservation(const ReservedTexture& reservation) {
+    TextureAllocator().Free(FromAPI(reservation.texture));
+}
 
-    void Client::ReclaimInstanceReservation(const ReservedInstance& reservation) {
-        InstanceAllocator().Free(FromAPI(reservation.instance));
-    }
+void Client::ReclaimSwapChainReservation(const ReservedSwapChain& reservation) {
+    SwapChainAllocator().Free(FromAPI(reservation.swapchain));
+}
 
-    void Client::Disconnect() {
-        mDisconnected = true;
-        mSerializer = ChunkedCommandSerializer(NoopCommandSerializer::GetInstance());
+void Client::ReclaimDeviceReservation(const ReservedDevice& reservation) {
+    DeviceAllocator().Free(FromAPI(reservation.device));
+}
 
-        auto& deviceList = mObjects[ObjectType::Device];
-        {
-            for (LinkNode<ObjectBase>* device = deviceList.head(); device != deviceList.end();
-                 device = device->next()) {
-                static_cast<Device*>(device->value())
-                    ->HandleDeviceLost(WGPUDeviceLostReason_Undefined, "GPU connection lost");
-            }
-        }
-        for (auto& objectList : mObjects) {
-            for (LinkNode<ObjectBase>* object = objectList.head(); object != objectList.end();
-                 object = object->next()) {
-                object->value()->CancelCallbacksForDisconnect();
-            }
+void Client::ReclaimInstanceReservation(const ReservedInstance& reservation) {
+    InstanceAllocator().Free(FromAPI(reservation.instance));
+}
+
+void Client::Disconnect() {
+    mDisconnected = true;
+    mSerializer = ChunkedCommandSerializer(NoopCommandSerializer::GetInstance());
+
+    auto& deviceList = mObjects[ObjectType::Device];
+    {
+        for (LinkNode<ObjectBase>* device = deviceList.head(); device != deviceList.end();
+             device = device->next()) {
+            static_cast<Device*>(device->value())
+                ->HandleDeviceLost(WGPUDeviceLostReason_Undefined, "GPU connection lost");
         }
     }
-
-    bool Client::IsDisconnected() const {
-        return mDisconnected;
+    for (auto& objectList : mObjects) {
+        for (LinkNode<ObjectBase>* object = objectList.head(); object != objectList.end();
+             object = object->next()) {
+            object->value()->CancelCallbacksForDisconnect();
+        }
     }
+}
+
+bool Client::IsDisconnected() const {
+    return mDisconnected;
+}
 
 }  // namespace dawn::wire::client
diff --git a/src/dawn/wire/client/Client.h b/src/dawn/wire/client/Client.h
index d9039b5..d045f0c 100644
--- a/src/dawn/wire/client/Client.h
+++ b/src/dawn/wire/client/Client.h
@@ -17,11 +17,11 @@
 
 #include <memory>
 
-#include "dawn/webgpu.h"
-#include "dawn/wire/Wire.h"
 #include "dawn/common/LinkedList.h"
 #include "dawn/common/NonCopyable.h"
+#include "dawn/webgpu.h"
 #include "dawn/wire/ChunkedCommandSerializer.h"
+#include "dawn/wire/Wire.h"
 #include "dawn/wire/WireClient.h"
 #include "dawn/wire/WireCmd_autogen.h"
 #include "dawn/wire/WireDeserializeAllocator.h"
@@ -29,67 +29,64 @@
 
 namespace dawn::wire::client {
 
-    class Device;
-    class MemoryTransferService;
+class Device;
+class MemoryTransferService;
 
-    class Client : public ClientBase {
-      public:
-        Client(CommandSerializer* serializer, MemoryTransferService* memoryTransferService);
-        ~Client() override;
+class Client : public ClientBase {
+  public:
+    Client(CommandSerializer* serializer, MemoryTransferService* memoryTransferService);
+    ~Client() override;
 
-        // ChunkedCommandHandler implementation
-        const volatile char* HandleCommandsImpl(const volatile char* commands,
-                                                size_t size) override;
+    // ChunkedCommandHandler implementation
+    const volatile char* HandleCommandsImpl(const volatile char* commands, size_t size) override;
 
-        MemoryTransferService* GetMemoryTransferService() const {
-            return mMemoryTransferService;
-        }
+    MemoryTransferService* GetMemoryTransferService() const { return mMemoryTransferService; }
 
-        ReservedTexture ReserveTexture(WGPUDevice device);
-        ReservedSwapChain ReserveSwapChain(WGPUDevice device);
-        ReservedDevice ReserveDevice();
-        ReservedInstance ReserveInstance();
+    ReservedTexture ReserveTexture(WGPUDevice device);
+    ReservedSwapChain ReserveSwapChain(WGPUDevice device);
+    ReservedDevice ReserveDevice();
+    ReservedInstance ReserveInstance();
 
-        void ReclaimTextureReservation(const ReservedTexture& reservation);
-        void ReclaimSwapChainReservation(const ReservedSwapChain& reservation);
-        void ReclaimDeviceReservation(const ReservedDevice& reservation);
-        void ReclaimInstanceReservation(const ReservedInstance& reservation);
+    void ReclaimTextureReservation(const ReservedTexture& reservation);
+    void ReclaimSwapChainReservation(const ReservedSwapChain& reservation);
+    void ReclaimDeviceReservation(const ReservedDevice& reservation);
+    void ReclaimInstanceReservation(const ReservedInstance& reservation);
 
-        template <typename Cmd>
-        void SerializeCommand(const Cmd& cmd) {
-            mSerializer.SerializeCommand(cmd, *this);
-        }
+    template <typename Cmd>
+    void SerializeCommand(const Cmd& cmd) {
+        mSerializer.SerializeCommand(cmd, *this);
+    }
 
-        template <typename Cmd, typename ExtraSizeSerializeFn>
-        void SerializeCommand(const Cmd& cmd,
-                              size_t extraSize,
-                              ExtraSizeSerializeFn&& SerializeExtraSize) {
-            mSerializer.SerializeCommand(cmd, *this, extraSize, SerializeExtraSize);
-        }
+    template <typename Cmd, typename ExtraSizeSerializeFn>
+    void SerializeCommand(const Cmd& cmd,
+                          size_t extraSize,
+                          ExtraSizeSerializeFn&& SerializeExtraSize) {
+        mSerializer.SerializeCommand(cmd, *this, extraSize, SerializeExtraSize);
+    }
 
-        void Disconnect();
-        bool IsDisconnected() const;
+    void Disconnect();
+    bool IsDisconnected() const;
 
-        template <typename T>
-        void TrackObject(T* object) {
-            mObjects[ObjectTypeToTypeEnum<T>::value].Append(object);
-        }
+    template <typename T>
+    void TrackObject(T* object) {
+        mObjects[ObjectTypeToTypeEnum<T>::value].Append(object);
+    }
 
-      private:
-        void DestroyAllObjects();
+  private:
+    void DestroyAllObjects();
 
 #include "dawn/wire/client/ClientPrototypes_autogen.inc"
 
-        ChunkedCommandSerializer mSerializer;
-        WireDeserializeAllocator mAllocator;
-        MemoryTransferService* mMemoryTransferService = nullptr;
-        std::unique_ptr<MemoryTransferService> mOwnedMemoryTransferService = nullptr;
+    ChunkedCommandSerializer mSerializer;
+    WireDeserializeAllocator mAllocator;
+    MemoryTransferService* mMemoryTransferService = nullptr;
+    std::unique_ptr<MemoryTransferService> mOwnedMemoryTransferService = nullptr;
 
-        PerObjectType<LinkedList<ObjectBase>> mObjects;
-        bool mDisconnected = false;
-    };
+    PerObjectType<LinkedList<ObjectBase>> mObjects;
+    bool mDisconnected = false;
+};
 
-    std::unique_ptr<MemoryTransferService> CreateInlineMemoryTransferService();
+std::unique_ptr<MemoryTransferService> CreateInlineMemoryTransferService();
 
 }  // namespace dawn::wire::client
 
diff --git a/src/dawn/wire/client/ClientDoers.cpp b/src/dawn/wire/client/ClientDoers.cpp
index 862ad3e..9103854 100644
--- a/src/dawn/wire/client/ClientDoers.cpp
+++ b/src/dawn/wire/client/ClientDoers.cpp
@@ -20,114 +20,114 @@
 
 namespace dawn::wire::client {
 
-    bool Client::DoDeviceUncapturedErrorCallback(Device* device,
-                                                 WGPUErrorType errorType,
-                                                 const char* message) {
-        switch (errorType) {
-            case WGPUErrorType_NoError:
-            case WGPUErrorType_Validation:
-            case WGPUErrorType_OutOfMemory:
-            case WGPUErrorType_Unknown:
-            case WGPUErrorType_DeviceLost:
-                break;
-            default:
-                return false;
-        }
-        if (device == nullptr) {
-            // The device might have been deleted or recreated so this isn't an error.
-            return true;
-        }
-        device->HandleError(errorType, message);
-        return true;
+bool Client::DoDeviceUncapturedErrorCallback(Device* device,
+                                             WGPUErrorType errorType,
+                                             const char* message) {
+    switch (errorType) {
+        case WGPUErrorType_NoError:
+        case WGPUErrorType_Validation:
+        case WGPUErrorType_OutOfMemory:
+        case WGPUErrorType_Unknown:
+        case WGPUErrorType_DeviceLost:
+            break;
+        default:
+            return false;
     }
-
-    bool Client::DoDeviceLoggingCallback(Device* device,
-                                         WGPULoggingType loggingType,
-                                         const char* message) {
-        if (device == nullptr) {
-            // The device might have been deleted or recreated so this isn't an error.
-            return true;
-        }
-        device->HandleLogging(loggingType, message);
-        return true;
-    }
-
-    bool Client::DoDeviceLostCallback(Device* device,
-                                      WGPUDeviceLostReason reason,
-                                      char const* message) {
-        if (device == nullptr) {
-            // The device might have been deleted or recreated so this isn't an error.
-            return true;
-        }
-        device->HandleDeviceLost(reason, message);
-        return true;
-    }
-
-    bool Client::DoDevicePopErrorScopeCallback(Device* device,
-                                               uint64_t requestSerial,
-                                               WGPUErrorType errorType,
-                                               const char* message) {
-        if (device == nullptr) {
-            // The device might have been deleted or recreated so this isn't an error.
-            return true;
-        }
-        return device->OnPopErrorScopeCallback(requestSerial, errorType, message);
-    }
-
-    bool Client::DoBufferMapAsyncCallback(Buffer* buffer,
-                                          uint64_t requestSerial,
-                                          uint32_t status,
-                                          uint64_t readDataUpdateInfoLength,
-                                          const uint8_t* readDataUpdateInfo) {
-        // The buffer might have been deleted or recreated so this isn't an error.
-        if (buffer == nullptr) {
-            return true;
-        }
-        return buffer->OnMapAsyncCallback(requestSerial, status, readDataUpdateInfoLength,
-                                          readDataUpdateInfo);
-    }
-
-    bool Client::DoQueueWorkDoneCallback(Queue* queue,
-                                         uint64_t requestSerial,
-                                         WGPUQueueWorkDoneStatus status) {
-        // The queue might have been deleted or recreated so this isn't an error.
-        if (queue == nullptr) {
-            return true;
-        }
-        return queue->OnWorkDoneCallback(requestSerial, status);
-    }
-
-    bool Client::DoDeviceCreateComputePipelineAsyncCallback(Device* device,
-                                                            uint64_t requestSerial,
-                                                            WGPUCreatePipelineAsyncStatus status,
-                                                            const char* message) {
+    if (device == nullptr) {
         // The device might have been deleted or recreated so this isn't an error.
-        if (device == nullptr) {
-            return true;
-        }
-        return device->OnCreateComputePipelineAsyncCallback(requestSerial, status, message);
+        return true;
     }
+    device->HandleError(errorType, message);
+    return true;
+}
 
-    bool Client::DoDeviceCreateRenderPipelineAsyncCallback(Device* device,
-                                                           uint64_t requestSerial,
-                                                           WGPUCreatePipelineAsyncStatus status,
-                                                           const char* message) {
+bool Client::DoDeviceLoggingCallback(Device* device,
+                                     WGPULoggingType loggingType,
+                                     const char* message) {
+    if (device == nullptr) {
         // The device might have been deleted or recreated so this isn't an error.
-        if (device == nullptr) {
-            return true;
-        }
-        return device->OnCreateRenderPipelineAsyncCallback(requestSerial, status, message);
+        return true;
     }
+    device->HandleLogging(loggingType, message);
+    return true;
+}
 
-    bool Client::DoShaderModuleGetCompilationInfoCallback(ShaderModule* shaderModule,
-                                                          uint64_t requestSerial,
-                                                          WGPUCompilationInfoRequestStatus status,
-                                                          const WGPUCompilationInfo* info) {
-        // The shader module might have been deleted or recreated so this isn't an error.
-        if (shaderModule == nullptr) {
-            return true;
-        }
-        return shaderModule->GetCompilationInfoCallback(requestSerial, status, info);
+bool Client::DoDeviceLostCallback(Device* device,
+                                  WGPUDeviceLostReason reason,
+                                  char const* message) {
+    if (device == nullptr) {
+        // The device might have been deleted or recreated so this isn't an error.
+        return true;
     }
+    device->HandleDeviceLost(reason, message);
+    return true;
+}
+
+bool Client::DoDevicePopErrorScopeCallback(Device* device,
+                                           uint64_t requestSerial,
+                                           WGPUErrorType errorType,
+                                           const char* message) {
+    if (device == nullptr) {
+        // The device might have been deleted or recreated so this isn't an error.
+        return true;
+    }
+    return device->OnPopErrorScopeCallback(requestSerial, errorType, message);
+}
+
+bool Client::DoBufferMapAsyncCallback(Buffer* buffer,
+                                      uint64_t requestSerial,
+                                      uint32_t status,
+                                      uint64_t readDataUpdateInfoLength,
+                                      const uint8_t* readDataUpdateInfo) {
+    // The buffer might have been deleted or recreated so this isn't an error.
+    if (buffer == nullptr) {
+        return true;
+    }
+    return buffer->OnMapAsyncCallback(requestSerial, status, readDataUpdateInfoLength,
+                                      readDataUpdateInfo);
+}
+
+bool Client::DoQueueWorkDoneCallback(Queue* queue,
+                                     uint64_t requestSerial,
+                                     WGPUQueueWorkDoneStatus status) {
+    // The queue might have been deleted or recreated so this isn't an error.
+    if (queue == nullptr) {
+        return true;
+    }
+    return queue->OnWorkDoneCallback(requestSerial, status);
+}
+
+bool Client::DoDeviceCreateComputePipelineAsyncCallback(Device* device,
+                                                        uint64_t requestSerial,
+                                                        WGPUCreatePipelineAsyncStatus status,
+                                                        const char* message) {
+    // The device might have been deleted or recreated so this isn't an error.
+    if (device == nullptr) {
+        return true;
+    }
+    return device->OnCreateComputePipelineAsyncCallback(requestSerial, status, message);
+}
+
+bool Client::DoDeviceCreateRenderPipelineAsyncCallback(Device* device,
+                                                       uint64_t requestSerial,
+                                                       WGPUCreatePipelineAsyncStatus status,
+                                                       const char* message) {
+    // The device might have been deleted or recreated so this isn't an error.
+    if (device == nullptr) {
+        return true;
+    }
+    return device->OnCreateRenderPipelineAsyncCallback(requestSerial, status, message);
+}
+
+bool Client::DoShaderModuleGetCompilationInfoCallback(ShaderModule* shaderModule,
+                                                      uint64_t requestSerial,
+                                                      WGPUCompilationInfoRequestStatus status,
+                                                      const WGPUCompilationInfo* info) {
+    // The shader module might have been deleted or recreated so this isn't an error.
+    if (shaderModule == nullptr) {
+        return true;
+    }
+    return shaderModule->GetCompilationInfoCallback(requestSerial, status, info);
+}
 
 }  // namespace dawn::wire::client
diff --git a/src/dawn/wire/client/ClientInlineMemoryTransferService.cpp b/src/dawn/wire/client/ClientInlineMemoryTransferService.cpp
index c9882b3..0174d01 100644
--- a/src/dawn/wire/client/ClientInlineMemoryTransferService.cpp
+++ b/src/dawn/wire/client/ClientInlineMemoryTransferService.cpp
@@ -23,111 +23,98 @@
 
 namespace dawn::wire::client {
 
-    class InlineMemoryTransferService : public MemoryTransferService {
-        class ReadHandleImpl : public ReadHandle {
-          public:
-            explicit ReadHandleImpl(std::unique_ptr<uint8_t[]> stagingData, size_t size)
-                : mStagingData(std::move(stagingData)), mSize(size) {
-            }
-
-            ~ReadHandleImpl() override = default;
-
-            size_t SerializeCreateSize() override {
-                return 0;
-            }
-
-            void SerializeCreate(void*) override {
-            }
-
-            const void* GetData() override {
-                return mStagingData.get();
-            }
-
-            bool DeserializeDataUpdate(const void* deserializePointer,
-                                       size_t deserializeSize,
-                                       size_t offset,
-                                       size_t size) override {
-                if (deserializeSize != size || deserializePointer == nullptr) {
-                    return false;
-                }
-
-                if (offset > mSize || size > mSize - offset) {
-                    return false;
-                }
-
-                void* start = static_cast<uint8_t*>(mStagingData.get()) + offset;
-                memcpy(start, deserializePointer, size);
-                return true;
-            }
-
-          private:
-            std::unique_ptr<uint8_t[]> mStagingData;
-            size_t mSize;
-        };
-
-        class WriteHandleImpl : public WriteHandle {
-          public:
-            explicit WriteHandleImpl(std::unique_ptr<uint8_t[]> stagingData, size_t size)
-                : mStagingData(std::move(stagingData)), mSize(size) {
-            }
-
-            ~WriteHandleImpl() override = default;
-
-            size_t SerializeCreateSize() override {
-                return 0;
-            }
-
-            void SerializeCreate(void*) override {
-            }
-
-            void* GetData() override {
-                return mStagingData.get();
-            }
-
-            size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override {
-                ASSERT(offset <= mSize);
-                ASSERT(size <= mSize - offset);
-                return size;
-            }
-
-            void SerializeDataUpdate(void* serializePointer, size_t offset, size_t size) override {
-                ASSERT(mStagingData != nullptr);
-                ASSERT(serializePointer != nullptr);
-                ASSERT(offset <= mSize);
-                ASSERT(size <= mSize - offset);
-                memcpy(serializePointer, static_cast<uint8_t*>(mStagingData.get()) + offset, size);
-            }
-
-          private:
-            std::unique_ptr<uint8_t[]> mStagingData;
-            size_t mSize;
-        };
-
+class InlineMemoryTransferService : public MemoryTransferService {
+    class ReadHandleImpl : public ReadHandle {
       public:
-        InlineMemoryTransferService() {
-        }
-        ~InlineMemoryTransferService() override = default;
+        explicit ReadHandleImpl(std::unique_ptr<uint8_t[]> stagingData, size_t size)
+            : mStagingData(std::move(stagingData)), mSize(size) {}
 
-        ReadHandle* CreateReadHandle(size_t size) override {
-            auto stagingData = std::unique_ptr<uint8_t[]>(AllocNoThrow<uint8_t>(size));
-            if (stagingData) {
-                return new ReadHandleImpl(std::move(stagingData), size);
+        ~ReadHandleImpl() override = default;
+
+        size_t SerializeCreateSize() override { return 0; }
+
+        void SerializeCreate(void*) override {}
+
+        const void* GetData() override { return mStagingData.get(); }
+
+        bool DeserializeDataUpdate(const void* deserializePointer,
+                                   size_t deserializeSize,
+                                   size_t offset,
+                                   size_t size) override {
+            if (deserializeSize != size || deserializePointer == nullptr) {
+                return false;
             }
-            return nullptr;
+
+            if (offset > mSize || size > mSize - offset) {
+                return false;
+            }
+
+            void* start = static_cast<uint8_t*>(mStagingData.get()) + offset;
+            memcpy(start, deserializePointer, size);
+            return true;
         }
 
-        WriteHandle* CreateWriteHandle(size_t size) override {
-            auto stagingData = std::unique_ptr<uint8_t[]>(AllocNoThrow<uint8_t>(size));
-            if (stagingData) {
-                memset(stagingData.get(), 0, size);
-                return new WriteHandleImpl(std::move(stagingData), size);
-            }
-            return nullptr;
-        }
+      private:
+        std::unique_ptr<uint8_t[]> mStagingData;
+        size_t mSize;
     };
 
-    std::unique_ptr<MemoryTransferService> CreateInlineMemoryTransferService() {
-        return std::make_unique<InlineMemoryTransferService>();
+    class WriteHandleImpl : public WriteHandle {
+      public:
+        explicit WriteHandleImpl(std::unique_ptr<uint8_t[]> stagingData, size_t size)
+            : mStagingData(std::move(stagingData)), mSize(size) {}
+
+        ~WriteHandleImpl() override = default;
+
+        size_t SerializeCreateSize() override { return 0; }
+
+        void SerializeCreate(void*) override {}
+
+        void* GetData() override { return mStagingData.get(); }
+
+        size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override {
+            ASSERT(offset <= mSize);
+            ASSERT(size <= mSize - offset);
+            return size;
+        }
+
+        void SerializeDataUpdate(void* serializePointer, size_t offset, size_t size) override {
+            ASSERT(mStagingData != nullptr);
+            ASSERT(serializePointer != nullptr);
+            ASSERT(offset <= mSize);
+            ASSERT(size <= mSize - offset);
+            memcpy(serializePointer, static_cast<uint8_t*>(mStagingData.get()) + offset, size);
+        }
+
+      private:
+        std::unique_ptr<uint8_t[]> mStagingData;
+        size_t mSize;
+    };
+
+  public:
+    InlineMemoryTransferService() {}
+    ~InlineMemoryTransferService() override = default;
+
+    ReadHandle* CreateReadHandle(size_t size) override {
+        auto stagingData = std::unique_ptr<uint8_t[]>(AllocNoThrow<uint8_t>(size));
+        if (stagingData) {
+            return new ReadHandleImpl(std::move(stagingData), size);
+        }
+        return nullptr;
     }
 
+    WriteHandle* CreateWriteHandle(size_t size) override {
+        auto stagingData = std::unique_ptr<uint8_t[]>(AllocNoThrow<uint8_t>(size));
+        if (stagingData) {
+            memset(stagingData.get(), 0, size);
+            return new WriteHandleImpl(std::move(stagingData), size);
+        }
+        return nullptr;
+    }
+};
+
+std::unique_ptr<MemoryTransferService> CreateInlineMemoryTransferService() {
+    return std::make_unique<InlineMemoryTransferService>();
+}
+
 }  // namespace dawn::wire::client
diff --git a/src/dawn/wire/client/ClientMemoryTransferService_mock.cpp b/src/dawn/wire/client/ClientMemoryTransferService_mock.cpp
index 44ca3ed..46016db 100644
--- a/src/dawn/wire/client/ClientMemoryTransferService_mock.cpp
+++ b/src/dawn/wire/client/ClientMemoryTransferService_mock.cpp
@@ -19,87 +19,82 @@
 
 namespace dawn::wire::client {
 
-    MockMemoryTransferService::MockReadHandle::MockReadHandle(MockMemoryTransferService* service)
-        : ReadHandle(), mService(service) {
-    }
+MockMemoryTransferService::MockReadHandle::MockReadHandle(MockMemoryTransferService* service)
+    : ReadHandle(), mService(service) {}
 
-    MockMemoryTransferService::MockReadHandle::~MockReadHandle() {
-        mService->OnReadHandleDestroy(this);
-    }
+MockMemoryTransferService::MockReadHandle::~MockReadHandle() {
+    mService->OnReadHandleDestroy(this);
+}
 
-    size_t MockMemoryTransferService::MockReadHandle::SerializeCreateSize() {
-        return mService->OnReadHandleSerializeCreateSize(this);
-    }
+size_t MockMemoryTransferService::MockReadHandle::SerializeCreateSize() {
+    return mService->OnReadHandleSerializeCreateSize(this);
+}
 
-    void MockMemoryTransferService::MockReadHandle::SerializeCreate(void* serializePointer) {
-        mService->OnReadHandleSerializeCreate(this, serializePointer);
-    }
+void MockMemoryTransferService::MockReadHandle::SerializeCreate(void* serializePointer) {
+    mService->OnReadHandleSerializeCreate(this, serializePointer);
+}
 
-    const void* MockMemoryTransferService::MockReadHandle::GetData() {
-        return mService->OnReadHandleGetData(this);
-    }
+const void* MockMemoryTransferService::MockReadHandle::GetData() {
+    return mService->OnReadHandleGetData(this);
+}
 
-    bool MockMemoryTransferService::MockReadHandle::DeserializeDataUpdate(
-        const void* deserializePointer,
-        size_t deserializeSize,
-        size_t offset,
-        size_t size) {
-        ASSERT(deserializeSize % sizeof(uint32_t) == 0);
-        return mService->OnReadHandleDeserializeDataUpdate(
-            this, reinterpret_cast<const uint32_t*>(deserializePointer), deserializeSize, offset,
-            size);
-    }
+bool MockMemoryTransferService::MockReadHandle::DeserializeDataUpdate(
+    const void* deserializePointer,
+    size_t deserializeSize,
+    size_t offset,
+    size_t size) {
+    ASSERT(deserializeSize % sizeof(uint32_t) == 0);
+    return mService->OnReadHandleDeserializeDataUpdate(
+        this, reinterpret_cast<const uint32_t*>(deserializePointer), deserializeSize, offset, size);
+}
 
-    MockMemoryTransferService::MockWriteHandle::MockWriteHandle(MockMemoryTransferService* service)
-        : WriteHandle(), mService(service) {
-    }
+MockMemoryTransferService::MockWriteHandle::MockWriteHandle(MockMemoryTransferService* service)
+    : WriteHandle(), mService(service) {}
 
-    MockMemoryTransferService::MockWriteHandle::~MockWriteHandle() {
-        mService->OnWriteHandleDestroy(this);
-    }
+MockMemoryTransferService::MockWriteHandle::~MockWriteHandle() {
+    mService->OnWriteHandleDestroy(this);
+}
 
-    size_t MockMemoryTransferService::MockWriteHandle::SerializeCreateSize() {
-        return mService->OnWriteHandleSerializeCreateSize(this);
-    }
+size_t MockMemoryTransferService::MockWriteHandle::SerializeCreateSize() {
+    return mService->OnWriteHandleSerializeCreateSize(this);
+}
 
-    void MockMemoryTransferService::MockWriteHandle::SerializeCreate(void* serializePointer) {
-        mService->OnWriteHandleSerializeCreate(this, serializePointer);
-    }
+void MockMemoryTransferService::MockWriteHandle::SerializeCreate(void* serializePointer) {
+    mService->OnWriteHandleSerializeCreate(this, serializePointer);
+}
 
-    void* MockMemoryTransferService::MockWriteHandle::GetData() {
-        return mService->OnWriteHandleGetData(this);
-    }
+void* MockMemoryTransferService::MockWriteHandle::GetData() {
+    return mService->OnWriteHandleGetData(this);
+}
 
-    size_t MockMemoryTransferService::MockWriteHandle::SizeOfSerializeDataUpdate(size_t offset,
-                                                                                 size_t size) {
-        return mService->OnWriteHandleSizeOfSerializeDataUpdate(this, offset, size);
-    }
+size_t MockMemoryTransferService::MockWriteHandle::SizeOfSerializeDataUpdate(size_t offset,
+                                                                             size_t size) {
+    return mService->OnWriteHandleSizeOfSerializeDataUpdate(this, offset, size);
+}
 
-    void MockMemoryTransferService::MockWriteHandle::SerializeDataUpdate(void* serializePointer,
-                                                                         size_t offset,
-                                                                         size_t size) {
-        mService->OnWriteHandleSerializeDataUpdate(this, serializePointer, offset, size);
-    }
+void MockMemoryTransferService::MockWriteHandle::SerializeDataUpdate(void* serializePointer,
+                                                                     size_t offset,
+                                                                     size_t size) {
+    mService->OnWriteHandleSerializeDataUpdate(this, serializePointer, offset, size);
+}
 
-    MockMemoryTransferService::MockMemoryTransferService() = default;
-    MockMemoryTransferService::~MockMemoryTransferService() = default;
+MockMemoryTransferService::MockMemoryTransferService() = default;
+MockMemoryTransferService::~MockMemoryTransferService() = default;
 
-    MockMemoryTransferService::ReadHandle* MockMemoryTransferService::CreateReadHandle(
-        size_t size) {
-        return OnCreateReadHandle(size);
-    }
+MockMemoryTransferService::ReadHandle* MockMemoryTransferService::CreateReadHandle(size_t size) {
+    return OnCreateReadHandle(size);
+}
 
-    MockMemoryTransferService::WriteHandle* MockMemoryTransferService::CreateWriteHandle(
-        size_t size) {
-        return OnCreateWriteHandle(size);
-    }
+MockMemoryTransferService::WriteHandle* MockMemoryTransferService::CreateWriteHandle(size_t size) {
+    return OnCreateWriteHandle(size);
+}
 
-    MockMemoryTransferService::MockReadHandle* MockMemoryTransferService::NewReadHandle() {
-        return new MockReadHandle(this);
-    }
+MockMemoryTransferService::MockReadHandle* MockMemoryTransferService::NewReadHandle() {
+    return new MockReadHandle(this);
+}
 
-    MockMemoryTransferService::MockWriteHandle* MockMemoryTransferService::NewWriteHandle() {
-        return new MockWriteHandle(this);
-    }
+MockMemoryTransferService::MockWriteHandle* MockMemoryTransferService::NewWriteHandle() {
+    return new MockWriteHandle(this);
+}
 
 }  // namespace dawn::wire::client
diff --git a/src/dawn/wire/client/ClientMemoryTransferService_mock.h b/src/dawn/wire/client/ClientMemoryTransferService_mock.h
index 7e33afa..1e67d0b 100644
--- a/src/dawn/wire/client/ClientMemoryTransferService_mock.h
+++ b/src/dawn/wire/client/ClientMemoryTransferService_mock.h
@@ -22,78 +22,78 @@
 
 namespace dawn::wire::client {
 
-    class MockMemoryTransferService : public MemoryTransferService {
+class MockMemoryTransferService : public MemoryTransferService {
+  public:
+    class MockReadHandle : public ReadHandle {
       public:
-        class MockReadHandle : public ReadHandle {
-          public:
-            explicit MockReadHandle(MockMemoryTransferService* service);
-            ~MockReadHandle() override;
+        explicit MockReadHandle(MockMemoryTransferService* service);
+        ~MockReadHandle() override;
 
-            size_t SerializeCreateSize() override;
-            void SerializeCreate(void* serializePointer) override;
-            const void* GetData() override;
-            bool DeserializeDataUpdate(const void* deserializePointer,
-                                       size_t deserializeSize,
-                                       size_t offset,
-                                       size_t size) override;
+        size_t SerializeCreateSize() override;
+        void SerializeCreate(void* serializePointer) override;
+        const void* GetData() override;
+        bool DeserializeDataUpdate(const void* deserializePointer,
+                                   size_t deserializeSize,
+                                   size_t offset,
+                                   size_t size) override;
 
-          private:
-            MockMemoryTransferService* mService;
-        };
-
-        class MockWriteHandle : public WriteHandle {
-          public:
-            explicit MockWriteHandle(MockMemoryTransferService* service);
-            ~MockWriteHandle() override;
-
-            size_t SerializeCreateSize() override;
-            void SerializeCreate(void* serializePointer) override;
-            void* GetData() override;
-            size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override;
-            void SerializeDataUpdate(void* serializePointer, size_t offset, size_t size) override;
-
-          private:
-            MockMemoryTransferService* mService;
-        };
-
-        MockMemoryTransferService();
-        ~MockMemoryTransferService() override;
-
-        ReadHandle* CreateReadHandle(size_t) override;
-        WriteHandle* CreateWriteHandle(size_t) override;
-
-        MockReadHandle* NewReadHandle();
-        MockWriteHandle* NewWriteHandle();
-
-        MOCK_METHOD(ReadHandle*, OnCreateReadHandle, (size_t));
-        MOCK_METHOD(WriteHandle*, OnCreateWriteHandle, (size_t));
-
-        MOCK_METHOD(size_t, OnReadHandleSerializeCreateSize, (const ReadHandle*));
-        MOCK_METHOD(void, OnReadHandleSerializeCreate, (const ReadHandle*, void* serializePointer));
-        MOCK_METHOD((const void*), OnReadHandleGetData, (const ReadHandle*));
-        MOCK_METHOD(bool,
-                    OnReadHandleDeserializeDataUpdate,
-                    (const ReadHandle*,
-                     const uint32_t* deserializePointer,
-                     size_t deserializeSize,
-                     size_t offset,
-                     size_t size));
-        MOCK_METHOD(void, OnReadHandleDestroy, (const ReadHandle*));
-
-        MOCK_METHOD(size_t, OnWriteHandleSerializeCreateSize, (const void* WriteHandle));
-        MOCK_METHOD(void,
-                    OnWriteHandleSerializeCreate,
-                    (const void* WriteHandle, void* serializePointer));
-        MOCK_METHOD((void*), OnWriteHandleGetData, (const void* WriteHandle));
-        MOCK_METHOD(size_t,
-                    OnWriteHandleSizeOfSerializeDataUpdate,
-                    (const void* WriteHandle, size_t offset, size_t size));
-        MOCK_METHOD(size_t,
-                    OnWriteHandleSerializeDataUpdate,
-                    (const void* WriteHandle, void* serializePointer, size_t offset, size_t size));
-        MOCK_METHOD(void, OnWriteHandleDestroy, (const void* WriteHandle));
+      private:
+        MockMemoryTransferService* mService;
     };
 
+    class MockWriteHandle : public WriteHandle {
+      public:
+        explicit MockWriteHandle(MockMemoryTransferService* service);
+        ~MockWriteHandle() override;
+
+        size_t SerializeCreateSize() override;
+        void SerializeCreate(void* serializePointer) override;
+        void* GetData() override;
+        size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override;
+        void SerializeDataUpdate(void* serializePointer, size_t offset, size_t size) override;
+
+      private:
+        MockMemoryTransferService* mService;
+    };
+
+    MockMemoryTransferService();
+    ~MockMemoryTransferService() override;
+
+    ReadHandle* CreateReadHandle(size_t) override;
+    WriteHandle* CreateWriteHandle(size_t) override;
+
+    MockReadHandle* NewReadHandle();
+    MockWriteHandle* NewWriteHandle();
+
+    MOCK_METHOD(ReadHandle*, OnCreateReadHandle, (size_t));
+    MOCK_METHOD(WriteHandle*, OnCreateWriteHandle, (size_t));
+
+    MOCK_METHOD(size_t, OnReadHandleSerializeCreateSize, (const ReadHandle*));
+    MOCK_METHOD(void, OnReadHandleSerializeCreate, (const ReadHandle*, void* serializePointer));
+    MOCK_METHOD((const void*), OnReadHandleGetData, (const ReadHandle*));
+    MOCK_METHOD(bool,
+                OnReadHandleDeserializeDataUpdate,
+                (const ReadHandle*,
+                 const uint32_t* deserializePointer,
+                 size_t deserializeSize,
+                 size_t offset,
+                 size_t size));
+    MOCK_METHOD(void, OnReadHandleDestroy, (const ReadHandle*));
+
+    MOCK_METHOD(size_t, OnWriteHandleSerializeCreateSize, (const void* WriteHandle));
+    MOCK_METHOD(void,
+                OnWriteHandleSerializeCreate,
+                (const void* WriteHandle, void* serializePointer));
+    MOCK_METHOD((void*), OnWriteHandleGetData, (const void* WriteHandle));
+    MOCK_METHOD(size_t,
+                OnWriteHandleSizeOfSerializeDataUpdate,
+                (const void* WriteHandle, size_t offset, size_t size));
+    MOCK_METHOD(size_t,
+                OnWriteHandleSerializeDataUpdate,
+                (const void* WriteHandle, void* serializePointer, size_t offset, size_t size));
+    MOCK_METHOD(void, OnWriteHandleDestroy, (const void* WriteHandle));
+};
+
 }  // namespace dawn::wire::client
 
 #endif  // SRC_DAWN_WIRE_CLIENT_CLIENTMEMORYTRANSFERSERVICE_MOCK_H_
diff --git a/src/dawn/wire/client/Device.cpp b/src/dawn/wire/client/Device.cpp
index ddcb280..b672d39 100644
--- a/src/dawn/wire/client/Device.cpp
+++ b/src/dawn/wire/client/Device.cpp
@@ -24,304 +24,301 @@
 
 namespace dawn::wire::client {
 
-    Device::Device(Client* clientIn, uint32_t initialRefcount, uint32_t initialId)
-        : ObjectBase(clientIn, initialRefcount, initialId), mIsAlive(std::make_shared<bool>()) {
+Device::Device(Client* clientIn, uint32_t initialRefcount, uint32_t initialId)
+    : ObjectBase(clientIn, initialRefcount, initialId), mIsAlive(std::make_shared<bool>()) {
 #if defined(DAWN_ENABLE_ASSERTS)
-        mErrorCallback = [](WGPUErrorType, char const*, void*) {
-            static bool calledOnce = false;
-            if (!calledOnce) {
-                calledOnce = true;
-                dawn::WarningLog() << "No Dawn device uncaptured error callback was set. This is "
-                                      "probably not intended. If you really want to ignore errors "
-                                      "and suppress this message, set the callback to null.";
-            }
-        };
+    mErrorCallback = [](WGPUErrorType, char const*, void*) {
+        static bool calledOnce = false;
+        if (!calledOnce) {
+            calledOnce = true;
+            dawn::WarningLog() << "No Dawn device uncaptured error callback was set. This is "
+                                  "probably not intended. If you really want to ignore errors "
+                                  "and suppress this message, set the callback to null.";
+        }
+    };
 
-        mDeviceLostCallback = [](WGPUDeviceLostReason, char const*, void*) {
-            static bool calledOnce = false;
-            if (!calledOnce) {
-                calledOnce = true;
-                dawn::WarningLog() << "No Dawn device lost callback was set. This is probably not "
-                                      "intended. If you really want to ignore device lost "
-                                      "and suppress this message, set the callback to null.";
-            }
-        };
+    mDeviceLostCallback = [](WGPUDeviceLostReason, char const*, void*) {
+        static bool calledOnce = false;
+        if (!calledOnce) {
+            calledOnce = true;
+            dawn::WarningLog() << "No Dawn device lost callback was set. This is probably not "
+                                  "intended. If you really want to ignore device lost "
+                                  "and suppress this message, set the callback to null.";
+        }
+    };
 #endif  // DAWN_ENABLE_ASSERTS
-    }
+}
 
-    Device::~Device() {
-        mErrorScopes.CloseAll([](ErrorScopeData* request) {
-            request->callback(WGPUErrorType_Unknown, "Device destroyed before callback",
-                              request->userdata);
-        });
+Device::~Device() {
+    mErrorScopes.CloseAll([](ErrorScopeData* request) {
+        request->callback(WGPUErrorType_Unknown, "Device destroyed before callback",
+                          request->userdata);
+    });
 
-        mCreatePipelineAsyncRequests.CloseAll([](CreatePipelineAsyncRequest* request) {
-            if (request->createComputePipelineAsyncCallback != nullptr) {
-                request->createComputePipelineAsyncCallback(
-                    WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
-                    "Device destroyed before callback", request->userdata);
-            } else {
-                ASSERT(request->createRenderPipelineAsyncCallback != nullptr);
-                request->createRenderPipelineAsyncCallback(
-                    WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
-                    "Device destroyed before callback", request->userdata);
-            }
-        });
-    }
-
-    bool Device::GetLimits(WGPUSupportedLimits* limits) const {
-        return mLimitsAndFeatures.GetLimits(limits);
-    }
-
-    bool Device::HasFeature(WGPUFeatureName feature) const {
-        return mLimitsAndFeatures.HasFeature(feature);
-    }
-
-    size_t Device::EnumerateFeatures(WGPUFeatureName* features) const {
-        return mLimitsAndFeatures.EnumerateFeatures(features);
-    }
-
-    void Device::SetLimits(const WGPUSupportedLimits* limits) {
-        return mLimitsAndFeatures.SetLimits(limits);
-    }
-
-    void Device::SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount) {
-        return mLimitsAndFeatures.SetFeatures(features, featuresCount);
-    }
-
-    void Device::HandleError(WGPUErrorType errorType, const char* message) {
-        if (mErrorCallback) {
-            mErrorCallback(errorType, message, mErrorUserdata);
+    mCreatePipelineAsyncRequests.CloseAll([](CreatePipelineAsyncRequest* request) {
+        if (request->createComputePipelineAsyncCallback != nullptr) {
+            request->createComputePipelineAsyncCallback(
+                WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
+                "Device destroyed before callback", request->userdata);
+        } else {
+            ASSERT(request->createRenderPipelineAsyncCallback != nullptr);
+            request->createRenderPipelineAsyncCallback(
+                WGPUCreatePipelineAsyncStatus_DeviceDestroyed, nullptr,
+                "Device destroyed before callback", request->userdata);
         }
-    }
+    });
+}
 
-    void Device::HandleLogging(WGPULoggingType loggingType, const char* message) {
-        if (mLoggingCallback) {
-            // Since client always run in single thread, calling the callback directly is safe.
-            mLoggingCallback(loggingType, message, mLoggingUserdata);
+bool Device::GetLimits(WGPUSupportedLimits* limits) const {
+    return mLimitsAndFeatures.GetLimits(limits);
+}
+
+bool Device::HasFeature(WGPUFeatureName feature) const {
+    return mLimitsAndFeatures.HasFeature(feature);
+}
+
+size_t Device::EnumerateFeatures(WGPUFeatureName* features) const {
+    return mLimitsAndFeatures.EnumerateFeatures(features);
+}
+
+void Device::SetLimits(const WGPUSupportedLimits* limits) {
+    return mLimitsAndFeatures.SetLimits(limits);
+}
+
+void Device::SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount) {
+    return mLimitsAndFeatures.SetFeatures(features, featuresCount);
+}
+
+void Device::HandleError(WGPUErrorType errorType, const char* message) {
+    if (mErrorCallback) {
+        mErrorCallback(errorType, message, mErrorUserdata);
+    }
+}
+
+void Device::HandleLogging(WGPULoggingType loggingType, const char* message) {
+    if (mLoggingCallback) {
+        // Since client always run in single thread, calling the callback directly is safe.
+        mLoggingCallback(loggingType, message, mLoggingUserdata);
+    }
+}
+
+void Device::HandleDeviceLost(WGPUDeviceLostReason reason, const char* message) {
+    if (mDeviceLostCallback && !mDidRunLostCallback) {
+        mDidRunLostCallback = true;
+        mDeviceLostCallback(reason, message, mDeviceLostUserdata);
+    }
+}
+
+void Device::CancelCallbacksForDisconnect() {
+    mErrorScopes.CloseAll([](ErrorScopeData* request) {
+        request->callback(WGPUErrorType_DeviceLost, "Device lost", request->userdata);
+    });
+
+    mCreatePipelineAsyncRequests.CloseAll([](CreatePipelineAsyncRequest* request) {
+        if (request->createComputePipelineAsyncCallback != nullptr) {
+            request->createComputePipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceLost,
+                                                        nullptr, "Device lost", request->userdata);
+        } else {
+            ASSERT(request->createRenderPipelineAsyncCallback != nullptr);
+            request->createRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceLost,
+                                                       nullptr, "Device lost", request->userdata);
         }
-    }
+    });
+}
 
-    void Device::HandleDeviceLost(WGPUDeviceLostReason reason, const char* message) {
-        if (mDeviceLostCallback && !mDidRunLostCallback) {
-            mDidRunLostCallback = true;
-            mDeviceLostCallback(reason, message, mDeviceLostUserdata);
-        }
-    }
+std::weak_ptr<bool> Device::GetAliveWeakPtr() {
+    return mIsAlive;
+}
 
-    void Device::CancelCallbacksForDisconnect() {
-        mErrorScopes.CloseAll([](ErrorScopeData* request) {
-            request->callback(WGPUErrorType_DeviceLost, "Device lost", request->userdata);
-        });
+void Device::SetUncapturedErrorCallback(WGPUErrorCallback errorCallback, void* errorUserdata) {
+    mErrorCallback = errorCallback;
+    mErrorUserdata = errorUserdata;
+}
 
-        mCreatePipelineAsyncRequests.CloseAll([](CreatePipelineAsyncRequest* request) {
-            if (request->createComputePipelineAsyncCallback != nullptr) {
-                request->createComputePipelineAsyncCallback(
-                    WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr, "Device lost",
-                    request->userdata);
-            } else {
-                ASSERT(request->createRenderPipelineAsyncCallback != nullptr);
-                request->createRenderPipelineAsyncCallback(WGPUCreatePipelineAsyncStatus_DeviceLost,
-                                                           nullptr, "Device lost",
-                                                           request->userdata);
-            }
-        });
-    }
+void Device::SetLoggingCallback(WGPULoggingCallback callback, void* userdata) {
+    mLoggingCallback = callback;
+    mLoggingUserdata = userdata;
+}
 
-    std::weak_ptr<bool> Device::GetAliveWeakPtr() {
-        return mIsAlive;
-    }
+void Device::SetDeviceLostCallback(WGPUDeviceLostCallback callback, void* userdata) {
+    mDeviceLostCallback = callback;
+    mDeviceLostUserdata = userdata;
+}
 
-    void Device::SetUncapturedErrorCallback(WGPUErrorCallback errorCallback, void* errorUserdata) {
-        mErrorCallback = errorCallback;
-        mErrorUserdata = errorUserdata;
-    }
-
-    void Device::SetLoggingCallback(WGPULoggingCallback callback, void* userdata) {
-        mLoggingCallback = callback;
-        mLoggingUserdata = userdata;
-    }
-
-    void Device::SetDeviceLostCallback(WGPUDeviceLostCallback callback, void* userdata) {
-        mDeviceLostCallback = callback;
-        mDeviceLostUserdata = userdata;
-    }
-
-    bool Device::PopErrorScope(WGPUErrorCallback callback, void* userdata) {
-        // TODO(crbug.com/dawn/1324) Replace bool return with void when users are updated.
-        if (client->IsDisconnected()) {
-            callback(WGPUErrorType_DeviceLost, "GPU device disconnected", userdata);
-            return true;
-        }
-
-        uint64_t serial = mErrorScopes.Add({callback, userdata});
-        DevicePopErrorScopeCmd cmd;
-        cmd.deviceId = this->id;
-        cmd.requestSerial = serial;
-        client->SerializeCommand(cmd);
+bool Device::PopErrorScope(WGPUErrorCallback callback, void* userdata) {
+    // TODO(crbug.com/dawn/1324) Replace bool return with void when users are updated.
+    if (client->IsDisconnected()) {
+        callback(WGPUErrorType_DeviceLost, "GPU device disconnected", userdata);
         return true;
     }
 
-    bool Device::OnPopErrorScopeCallback(uint64_t requestSerial,
-                                         WGPUErrorType type,
-                                         const char* message) {
-        switch (type) {
-            case WGPUErrorType_NoError:
-            case WGPUErrorType_Validation:
-            case WGPUErrorType_OutOfMemory:
-            case WGPUErrorType_Unknown:
-            case WGPUErrorType_DeviceLost:
-                break;
-            default:
-                return false;
-        }
+    uint64_t serial = mErrorScopes.Add({callback, userdata});
+    DevicePopErrorScopeCmd cmd;
+    cmd.deviceId = this->id;
+    cmd.requestSerial = serial;
+    client->SerializeCommand(cmd);
+    return true;
+}
 
-        ErrorScopeData request;
-        if (!mErrorScopes.Acquire(requestSerial, &request)) {
+bool Device::OnPopErrorScopeCallback(uint64_t requestSerial,
+                                     WGPUErrorType type,
+                                     const char* message) {
+    switch (type) {
+        case WGPUErrorType_NoError:
+        case WGPUErrorType_Validation:
+        case WGPUErrorType_OutOfMemory:
+        case WGPUErrorType_Unknown:
+        case WGPUErrorType_DeviceLost:
+            break;
+        default:
             return false;
-        }
-
-        request.callback(type, message, request.userdata);
-        return true;
     }
 
-    void Device::InjectError(WGPUErrorType type, const char* message) {
-        DeviceInjectErrorCmd cmd;
+    ErrorScopeData request;
+    if (!mErrorScopes.Acquire(requestSerial, &request)) {
+        return false;
+    }
+
+    request.callback(type, message, request.userdata);
+    return true;
+}
+
+void Device::InjectError(WGPUErrorType type, const char* message) {
+    DeviceInjectErrorCmd cmd;
+    cmd.self = ToAPI(this);
+    cmd.type = type;
+    cmd.message = message;
+    client->SerializeCommand(cmd);
+}
+
+WGPUBuffer Device::CreateBuffer(const WGPUBufferDescriptor* descriptor) {
+    return Buffer::Create(this, descriptor);
+}
+
+WGPUBuffer Device::CreateErrorBuffer() {
+    return Buffer::CreateError(this);
+}
+
+WGPUQueue Device::GetQueue() {
+    // The queue is lazily created because if a Device is created by
+    // Reserve/Inject, we cannot send the GetQueue message until
+    // it has been injected on the Server. It cannot happen immediately
+    // on construction.
+    if (mQueue == nullptr) {
+        // Get the primary queue for this device.
+        auto* allocation = client->QueueAllocator().New(client);
+        mQueue = allocation->object.get();
+
+        DeviceGetQueueCmd cmd;
         cmd.self = ToAPI(this);
-        cmd.type = type;
-        cmd.message = message;
-        client->SerializeCommand(cmd);
-    }
-
-    WGPUBuffer Device::CreateBuffer(const WGPUBufferDescriptor* descriptor) {
-        return Buffer::Create(this, descriptor);
-    }
-
-    WGPUBuffer Device::CreateErrorBuffer() {
-        return Buffer::CreateError(this);
-    }
-
-    WGPUQueue Device::GetQueue() {
-        // The queue is lazily created because if a Device is created by
-        // Reserve/Inject, we cannot send the GetQueue message until
-        // it has been injected on the Server. It cannot happen immediately
-        // on construction.
-        if (mQueue == nullptr) {
-            // Get the primary queue for this device.
-            auto* allocation = client->QueueAllocator().New(client);
-            mQueue = allocation->object.get();
-
-            DeviceGetQueueCmd cmd;
-            cmd.self = ToAPI(this);
-            cmd.result = ObjectHandle{allocation->object->id, allocation->generation};
-
-            client->SerializeCommand(cmd);
-        }
-
-        mQueue->refcount++;
-        return ToAPI(mQueue);
-    }
-
-    void Device::CreateComputePipelineAsync(WGPUComputePipelineDescriptor const* descriptor,
-                                            WGPUCreateComputePipelineAsyncCallback callback,
-                                            void* userdata) {
-        if (client->IsDisconnected()) {
-            return callback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
-                            "GPU device disconnected", userdata);
-        }
-
-        auto* allocation = client->ComputePipelineAllocator().New(client);
-
-        CreatePipelineAsyncRequest request = {};
-        request.createComputePipelineAsyncCallback = callback;
-        request.userdata = userdata;
-        request.pipelineObjectID = allocation->object->id;
-
-        uint64_t serial = mCreatePipelineAsyncRequests.Add(std::move(request));
-
-        DeviceCreateComputePipelineAsyncCmd cmd;
-        cmd.deviceId = this->id;
-        cmd.descriptor = descriptor;
-        cmd.requestSerial = serial;
-        cmd.pipelineObjectHandle = ObjectHandle{allocation->object->id, allocation->generation};
+        cmd.result = ObjectHandle{allocation->object->id, allocation->generation};
 
         client->SerializeCommand(cmd);
     }
 
-    bool Device::OnCreateComputePipelineAsyncCallback(uint64_t requestSerial,
-                                                      WGPUCreatePipelineAsyncStatus status,
-                                                      const char* message) {
-        CreatePipelineAsyncRequest request;
-        if (!mCreatePipelineAsyncRequests.Acquire(requestSerial, &request)) {
-            return false;
-        }
+    mQueue->refcount++;
+    return ToAPI(mQueue);
+}
 
-        auto pipelineAllocation =
-            client->ComputePipelineAllocator().GetObject(request.pipelineObjectID);
+void Device::CreateComputePipelineAsync(WGPUComputePipelineDescriptor const* descriptor,
+                                        WGPUCreateComputePipelineAsyncCallback callback,
+                                        void* userdata) {
+    if (client->IsDisconnected()) {
+        return callback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
+                        "GPU device disconnected", userdata);
+    }
 
-        // If the return status is a failure we should give a null pipeline to the callback and
-        // free the allocation.
-        if (status != WGPUCreatePipelineAsyncStatus_Success) {
-            client->ComputePipelineAllocator().Free(pipelineAllocation);
-            request.createComputePipelineAsyncCallback(status, nullptr, message, request.userdata);
-            return true;
-        }
+    auto* allocation = client->ComputePipelineAllocator().New(client);
 
-        WGPUComputePipeline pipeline = reinterpret_cast<WGPUComputePipeline>(pipelineAllocation);
-        request.createComputePipelineAsyncCallback(status, pipeline, message, request.userdata);
+    CreatePipelineAsyncRequest request = {};
+    request.createComputePipelineAsyncCallback = callback;
+    request.userdata = userdata;
+    request.pipelineObjectID = allocation->object->id;
 
+    uint64_t serial = mCreatePipelineAsyncRequests.Add(std::move(request));
+
+    DeviceCreateComputePipelineAsyncCmd cmd;
+    cmd.deviceId = this->id;
+    cmd.descriptor = descriptor;
+    cmd.requestSerial = serial;
+    cmd.pipelineObjectHandle = ObjectHandle{allocation->object->id, allocation->generation};
+
+    client->SerializeCommand(cmd);
+}
+
+bool Device::OnCreateComputePipelineAsyncCallback(uint64_t requestSerial,
+                                                  WGPUCreatePipelineAsyncStatus status,
+                                                  const char* message) {
+    CreatePipelineAsyncRequest request;
+    if (!mCreatePipelineAsyncRequests.Acquire(requestSerial, &request)) {
+        return false;
+    }
+
+    auto pipelineAllocation =
+        client->ComputePipelineAllocator().GetObject(request.pipelineObjectID);
+
+    // If the return status is a failure we should give a null pipeline to the callback and
+    // free the allocation.
+    if (status != WGPUCreatePipelineAsyncStatus_Success) {
+        client->ComputePipelineAllocator().Free(pipelineAllocation);
+        request.createComputePipelineAsyncCallback(status, nullptr, message, request.userdata);
         return true;
     }
 
-    void Device::CreateRenderPipelineAsync(WGPURenderPipelineDescriptor const* descriptor,
-                                           WGPUCreateRenderPipelineAsyncCallback callback,
-                                           void* userdata) {
-        if (client->IsDisconnected()) {
-            return callback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
-                            "GPU device disconnected", userdata);
-        }
+    WGPUComputePipeline pipeline = reinterpret_cast<WGPUComputePipeline>(pipelineAllocation);
+    request.createComputePipelineAsyncCallback(status, pipeline, message, request.userdata);
 
-        auto* allocation = client->RenderPipelineAllocator().New(client);
+    return true;
+}
 
-        CreatePipelineAsyncRequest request = {};
-        request.createRenderPipelineAsyncCallback = callback;
-        request.userdata = userdata;
-        request.pipelineObjectID = allocation->object->id;
-
-        uint64_t serial = mCreatePipelineAsyncRequests.Add(std::move(request));
-
-        DeviceCreateRenderPipelineAsyncCmd cmd;
-        cmd.deviceId = this->id;
-        cmd.descriptor = descriptor;
-        cmd.requestSerial = serial;
-        cmd.pipelineObjectHandle = ObjectHandle(allocation->object->id, allocation->generation);
-
-        client->SerializeCommand(cmd);
+void Device::CreateRenderPipelineAsync(WGPURenderPipelineDescriptor const* descriptor,
+                                       WGPUCreateRenderPipelineAsyncCallback callback,
+                                       void* userdata) {
+    if (client->IsDisconnected()) {
+        return callback(WGPUCreatePipelineAsyncStatus_DeviceLost, nullptr,
+                        "GPU device disconnected", userdata);
     }
 
-    bool Device::OnCreateRenderPipelineAsyncCallback(uint64_t requestSerial,
-                                                     WGPUCreatePipelineAsyncStatus status,
-                                                     const char* message) {
-        CreatePipelineAsyncRequest request;
-        if (!mCreatePipelineAsyncRequests.Acquire(requestSerial, &request)) {
-            return false;
-        }
+    auto* allocation = client->RenderPipelineAllocator().New(client);
 
-        auto pipelineAllocation =
-            client->RenderPipelineAllocator().GetObject(request.pipelineObjectID);
+    CreatePipelineAsyncRequest request = {};
+    request.createRenderPipelineAsyncCallback = callback;
+    request.userdata = userdata;
+    request.pipelineObjectID = allocation->object->id;
 
-        // If the return status is a failure we should give a null pipeline to the callback and
-        // free the allocation.
-        if (status != WGPUCreatePipelineAsyncStatus_Success) {
-            client->RenderPipelineAllocator().Free(pipelineAllocation);
-            request.createRenderPipelineAsyncCallback(status, nullptr, message, request.userdata);
-            return true;
-        }
+    uint64_t serial = mCreatePipelineAsyncRequests.Add(std::move(request));
 
-        WGPURenderPipeline pipeline = reinterpret_cast<WGPURenderPipeline>(pipelineAllocation);
-        request.createRenderPipelineAsyncCallback(status, pipeline, message, request.userdata);
+    DeviceCreateRenderPipelineAsyncCmd cmd;
+    cmd.deviceId = this->id;
+    cmd.descriptor = descriptor;
+    cmd.requestSerial = serial;
+    cmd.pipelineObjectHandle = ObjectHandle(allocation->object->id, allocation->generation);
 
+    client->SerializeCommand(cmd);
+}
+
+bool Device::OnCreateRenderPipelineAsyncCallback(uint64_t requestSerial,
+                                                 WGPUCreatePipelineAsyncStatus status,
+                                                 const char* message) {
+    CreatePipelineAsyncRequest request;
+    if (!mCreatePipelineAsyncRequests.Acquire(requestSerial, &request)) {
+        return false;
+    }
+
+    auto pipelineAllocation = client->RenderPipelineAllocator().GetObject(request.pipelineObjectID);
+
+    // If the return status is a failure we should give a null pipeline to the callback and
+    // free the allocation.
+    if (status != WGPUCreatePipelineAsyncStatus_Success) {
+        client->RenderPipelineAllocator().Free(pipelineAllocation);
+        request.createRenderPipelineAsyncCallback(status, nullptr, message, request.userdata);
         return true;
     }
 
+    WGPURenderPipeline pipeline = reinterpret_cast<WGPURenderPipeline>(pipelineAllocation);
+    request.createRenderPipelineAsyncCallback(status, pipeline, message, request.userdata);
+
+    return true;
+}
+
 }  // namespace dawn::wire::client
diff --git a/src/dawn/wire/client/Device.h b/src/dawn/wire/client/Device.h
index 10cd96f..f613e6a 100644
--- a/src/dawn/wire/client/Device.h
+++ b/src/dawn/wire/client/Device.h
@@ -27,83 +27,81 @@
 
 namespace dawn::wire::client {
 
-    class Client;
-    class Queue;
+class Client;
+class Queue;
 
-    class Device final : public ObjectBase {
-      public:
-        Device(Client* client, uint32_t refcount, uint32_t id);
-        ~Device();
+class Device final : public ObjectBase {
+  public:
+    Device(Client* client, uint32_t refcount, uint32_t id);
+    ~Device();
 
-        void SetUncapturedErrorCallback(WGPUErrorCallback errorCallback, void* errorUserdata);
-        void SetLoggingCallback(WGPULoggingCallback errorCallback, void* errorUserdata);
-        void SetDeviceLostCallback(WGPUDeviceLostCallback errorCallback, void* errorUserdata);
-        void InjectError(WGPUErrorType type, const char* message);
-        void PushErrorScope(WGPUErrorFilter filter);
-        bool PopErrorScope(WGPUErrorCallback callback, void* userdata);
-        WGPUBuffer CreateBuffer(const WGPUBufferDescriptor* descriptor);
-        WGPUBuffer CreateErrorBuffer();
-        WGPUComputePipeline CreateComputePipeline(WGPUComputePipelineDescriptor const* descriptor);
-        void CreateComputePipelineAsync(WGPUComputePipelineDescriptor const* descriptor,
-                                        WGPUCreateComputePipelineAsyncCallback callback,
-                                        void* userdata);
-        void CreateRenderPipelineAsync(WGPURenderPipelineDescriptor const* descriptor,
-                                       WGPUCreateRenderPipelineAsyncCallback callback,
-                                       void* userdata);
+    void SetUncapturedErrorCallback(WGPUErrorCallback errorCallback, void* errorUserdata);
+    void SetLoggingCallback(WGPULoggingCallback errorCallback, void* errorUserdata);
+    void SetDeviceLostCallback(WGPUDeviceLostCallback errorCallback, void* errorUserdata);
+    void InjectError(WGPUErrorType type, const char* message);
+    void PushErrorScope(WGPUErrorFilter filter);
+    bool PopErrorScope(WGPUErrorCallback callback, void* userdata);
+    WGPUBuffer CreateBuffer(const WGPUBufferDescriptor* descriptor);
+    WGPUBuffer CreateErrorBuffer();
+    WGPUComputePipeline CreateComputePipeline(WGPUComputePipelineDescriptor const* descriptor);
+    void CreateComputePipelineAsync(WGPUComputePipelineDescriptor const* descriptor,
+                                    WGPUCreateComputePipelineAsyncCallback callback,
+                                    void* userdata);
+    void CreateRenderPipelineAsync(WGPURenderPipelineDescriptor const* descriptor,
+                                   WGPUCreateRenderPipelineAsyncCallback callback,
+                                   void* userdata);
 
-        void HandleError(WGPUErrorType errorType, const char* message);
-        void HandleLogging(WGPULoggingType loggingType, const char* message);
-        void HandleDeviceLost(WGPUDeviceLostReason reason, const char* message);
-        bool OnPopErrorScopeCallback(uint64_t requestSerial,
-                                     WGPUErrorType type,
-                                     const char* message);
-        bool OnCreateComputePipelineAsyncCallback(uint64_t requestSerial,
-                                                  WGPUCreatePipelineAsyncStatus status,
-                                                  const char* message);
-        bool OnCreateRenderPipelineAsyncCallback(uint64_t requestSerial,
-                                                 WGPUCreatePipelineAsyncStatus status,
-                                                 const char* message);
+    void HandleError(WGPUErrorType errorType, const char* message);
+    void HandleLogging(WGPULoggingType loggingType, const char* message);
+    void HandleDeviceLost(WGPUDeviceLostReason reason, const char* message);
+    bool OnPopErrorScopeCallback(uint64_t requestSerial, WGPUErrorType type, const char* message);
+    bool OnCreateComputePipelineAsyncCallback(uint64_t requestSerial,
+                                              WGPUCreatePipelineAsyncStatus status,
+                                              const char* message);
+    bool OnCreateRenderPipelineAsyncCallback(uint64_t requestSerial,
+                                             WGPUCreatePipelineAsyncStatus status,
+                                             const char* message);
 
-        bool GetLimits(WGPUSupportedLimits* limits) const;
-        bool HasFeature(WGPUFeatureName feature) const;
-        size_t EnumerateFeatures(WGPUFeatureName* features) const;
-        void SetLimits(const WGPUSupportedLimits* limits);
-        void SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount);
+    bool GetLimits(WGPUSupportedLimits* limits) const;
+    bool HasFeature(WGPUFeatureName feature) const;
+    size_t EnumerateFeatures(WGPUFeatureName* features) const;
+    void SetLimits(const WGPUSupportedLimits* limits);
+    void SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount);
 
-        WGPUQueue GetQueue();
+    WGPUQueue GetQueue();
 
-        void CancelCallbacksForDisconnect() override;
+    void CancelCallbacksForDisconnect() override;
 
-        std::weak_ptr<bool> GetAliveWeakPtr();
+    std::weak_ptr<bool> GetAliveWeakPtr();
 
-      private:
-        LimitsAndFeatures mLimitsAndFeatures;
-        struct ErrorScopeData {
-            WGPUErrorCallback callback = nullptr;
-            void* userdata = nullptr;
-        };
-        RequestTracker<ErrorScopeData> mErrorScopes;
-
-        struct CreatePipelineAsyncRequest {
-            WGPUCreateComputePipelineAsyncCallback createComputePipelineAsyncCallback = nullptr;
-            WGPUCreateRenderPipelineAsyncCallback createRenderPipelineAsyncCallback = nullptr;
-            void* userdata = nullptr;
-            ObjectId pipelineObjectID;
-        };
-        RequestTracker<CreatePipelineAsyncRequest> mCreatePipelineAsyncRequests;
-
-        WGPUErrorCallback mErrorCallback = nullptr;
-        WGPUDeviceLostCallback mDeviceLostCallback = nullptr;
-        WGPULoggingCallback mLoggingCallback = nullptr;
-        bool mDidRunLostCallback = false;
-        void* mErrorUserdata = nullptr;
-        void* mDeviceLostUserdata = nullptr;
-        void* mLoggingUserdata = nullptr;
-
-        Queue* mQueue = nullptr;
-
-        std::shared_ptr<bool> mIsAlive;
+  private:
+    LimitsAndFeatures mLimitsAndFeatures;
+    struct ErrorScopeData {
+        WGPUErrorCallback callback = nullptr;
+        void* userdata = nullptr;
     };
+    RequestTracker<ErrorScopeData> mErrorScopes;
+
+    struct CreatePipelineAsyncRequest {
+        WGPUCreateComputePipelineAsyncCallback createComputePipelineAsyncCallback = nullptr;
+        WGPUCreateRenderPipelineAsyncCallback createRenderPipelineAsyncCallback = nullptr;
+        void* userdata = nullptr;
+        ObjectId pipelineObjectID;
+    };
+    RequestTracker<CreatePipelineAsyncRequest> mCreatePipelineAsyncRequests;
+
+    WGPUErrorCallback mErrorCallback = nullptr;
+    WGPUDeviceLostCallback mDeviceLostCallback = nullptr;
+    WGPULoggingCallback mLoggingCallback = nullptr;
+    bool mDidRunLostCallback = false;
+    void* mErrorUserdata = nullptr;
+    void* mDeviceLostUserdata = nullptr;
+    void* mLoggingUserdata = nullptr;
+
+    Queue* mQueue = nullptr;
+
+    std::shared_ptr<bool> mIsAlive;
+};
 
 }  // namespace dawn::wire::client
 
diff --git a/src/dawn/wire/client/Instance.cpp b/src/dawn/wire/client/Instance.cpp
index de27d47..b3a996f 100644
--- a/src/dawn/wire/client/Instance.cpp
+++ b/src/dawn/wire/client/Instance.cpp
@@ -18,84 +18,84 @@
 
 namespace dawn::wire::client {
 
-    Instance::~Instance() {
-        mRequestAdapterRequests.CloseAll([](RequestAdapterData* request) {
-            request->callback(WGPURequestAdapterStatus_Unknown, nullptr,
-                              "Instance destroyed before callback", request->userdata);
-        });
+Instance::~Instance() {
+    mRequestAdapterRequests.CloseAll([](RequestAdapterData* request) {
+        request->callback(WGPURequestAdapterStatus_Unknown, nullptr,
+                          "Instance destroyed before callback", request->userdata);
+    });
+}
+
+void Instance::CancelCallbacksForDisconnect() {
+    mRequestAdapterRequests.CloseAll([](RequestAdapterData* request) {
+        request->callback(WGPURequestAdapterStatus_Unknown, nullptr, "GPU connection lost",
+                          request->userdata);
+    });
+}
+
+void Instance::RequestAdapter(const WGPURequestAdapterOptions* options,
+                              WGPURequestAdapterCallback callback,
+                              void* userdata) {
+    if (client->IsDisconnected()) {
+        callback(WGPURequestAdapterStatus_Error, nullptr, "GPU connection lost", userdata);
+        return;
     }
 
-    void Instance::CancelCallbacksForDisconnect() {
-        mRequestAdapterRequests.CloseAll([](RequestAdapterData* request) {
-            request->callback(WGPURequestAdapterStatus_Unknown, nullptr, "GPU connection lost",
-                              request->userdata);
-        });
-    }
+    auto* allocation = client->AdapterAllocator().New(client);
+    uint64_t serial = mRequestAdapterRequests.Add({callback, allocation->object->id, userdata});
 
-    void Instance::RequestAdapter(const WGPURequestAdapterOptions* options,
-                                  WGPURequestAdapterCallback callback,
-                                  void* userdata) {
-        if (client->IsDisconnected()) {
-            callback(WGPURequestAdapterStatus_Error, nullptr, "GPU connection lost", userdata);
-            return;
-        }
+    InstanceRequestAdapterCmd cmd;
+    cmd.instanceId = this->id;
+    cmd.requestSerial = serial;
+    cmd.adapterObjectHandle = ObjectHandle(allocation->object->id, allocation->generation);
+    cmd.options = options;
 
-        auto* allocation = client->AdapterAllocator().New(client);
-        uint64_t serial = mRequestAdapterRequests.Add({callback, allocation->object->id, userdata});
+    client->SerializeCommand(cmd);
+}
 
-        InstanceRequestAdapterCmd cmd;
-        cmd.instanceId = this->id;
-        cmd.requestSerial = serial;
-        cmd.adapterObjectHandle = ObjectHandle(allocation->object->id, allocation->generation);
-        cmd.options = options;
-
-        client->SerializeCommand(cmd);
-    }
-
-    bool Client::DoInstanceRequestAdapterCallback(Instance* instance,
-                                                  uint64_t requestSerial,
-                                                  WGPURequestAdapterStatus status,
-                                                  const char* message,
-                                                  const WGPUAdapterProperties* properties,
-                                                  const WGPUSupportedLimits* limits,
-                                                  uint32_t featuresCount,
-                                                  const WGPUFeatureName* features) {
-        // May have been deleted or recreated so this isn't an error.
-        if (instance == nullptr) {
-            return true;
-        }
-        return instance->OnRequestAdapterCallback(requestSerial, status, message, properties,
-                                                  limits, featuresCount, features);
-    }
-
-    bool Instance::OnRequestAdapterCallback(uint64_t requestSerial,
-                                            WGPURequestAdapterStatus status,
-                                            const char* message,
-                                            const WGPUAdapterProperties* properties,
-                                            const WGPUSupportedLimits* limits,
-                                            uint32_t featuresCount,
-                                            const WGPUFeatureName* features) {
-        RequestAdapterData request;
-        if (!mRequestAdapterRequests.Acquire(requestSerial, &request)) {
-            return false;
-        }
-
-        Adapter* adapter = client->AdapterAllocator().GetObject(request.adapterObjectId);
-
-        // If the return status is a failure we should give a null adapter to the callback and
-        // free the allocation.
-        if (status != WGPURequestAdapterStatus_Success) {
-            client->AdapterAllocator().Free(adapter);
-            request.callback(status, nullptr, message, request.userdata);
-            return true;
-        }
-
-        adapter->SetProperties(properties);
-        adapter->SetLimits(limits);
-        adapter->SetFeatures(features, featuresCount);
-
-        request.callback(status, ToAPI(adapter), message, request.userdata);
+bool Client::DoInstanceRequestAdapterCallback(Instance* instance,
+                                              uint64_t requestSerial,
+                                              WGPURequestAdapterStatus status,
+                                              const char* message,
+                                              const WGPUAdapterProperties* properties,
+                                              const WGPUSupportedLimits* limits,
+                                              uint32_t featuresCount,
+                                              const WGPUFeatureName* features) {
+    // May have been deleted or recreated so this isn't an error.
+    if (instance == nullptr) {
         return true;
     }
+    return instance->OnRequestAdapterCallback(requestSerial, status, message, properties, limits,
+                                              featuresCount, features);
+}
+
+bool Instance::OnRequestAdapterCallback(uint64_t requestSerial,
+                                        WGPURequestAdapterStatus status,
+                                        const char* message,
+                                        const WGPUAdapterProperties* properties,
+                                        const WGPUSupportedLimits* limits,
+                                        uint32_t featuresCount,
+                                        const WGPUFeatureName* features) {
+    RequestAdapterData request;
+    if (!mRequestAdapterRequests.Acquire(requestSerial, &request)) {
+        return false;
+    }
+
+    Adapter* adapter = client->AdapterAllocator().GetObject(request.adapterObjectId);
+
+    // If the return status is a failure we should give a null adapter to the callback and
+    // free the allocation.
+    if (status != WGPURequestAdapterStatus_Success) {
+        client->AdapterAllocator().Free(adapter);
+        request.callback(status, nullptr, message, request.userdata);
+        return true;
+    }
+
+    adapter->SetProperties(properties);
+    adapter->SetLimits(limits);
+    adapter->SetFeatures(features, featuresCount);
+
+    request.callback(status, ToAPI(adapter), message, request.userdata);
+    return true;
+}
 
 }  // namespace dawn::wire::client
diff --git a/src/dawn/wire/client/Instance.h b/src/dawn/wire/client/Instance.h
index 8f11e72..a4c74dc 100644
--- a/src/dawn/wire/client/Instance.h
+++ b/src/dawn/wire/client/Instance.h
@@ -24,32 +24,32 @@
 
 namespace dawn::wire::client {
 
-    class Instance final : public ObjectBase {
-      public:
-        using ObjectBase::ObjectBase;
+class Instance final : public ObjectBase {
+  public:
+    using ObjectBase::ObjectBase;
 
-        ~Instance();
-        void CancelCallbacksForDisconnect() override;
+    ~Instance();
+    void CancelCallbacksForDisconnect() override;
 
-        void RequestAdapter(const WGPURequestAdapterOptions* options,
-                            WGPURequestAdapterCallback callback,
-                            void* userdata);
-        bool OnRequestAdapterCallback(uint64_t requestSerial,
-                                      WGPURequestAdapterStatus status,
-                                      const char* message,
-                                      const WGPUAdapterProperties* properties,
-                                      const WGPUSupportedLimits* limits,
-                                      uint32_t featuresCount,
-                                      const WGPUFeatureName* features);
+    void RequestAdapter(const WGPURequestAdapterOptions* options,
+                        WGPURequestAdapterCallback callback,
+                        void* userdata);
+    bool OnRequestAdapterCallback(uint64_t requestSerial,
+                                  WGPURequestAdapterStatus status,
+                                  const char* message,
+                                  const WGPUAdapterProperties* properties,
+                                  const WGPUSupportedLimits* limits,
+                                  uint32_t featuresCount,
+                                  const WGPUFeatureName* features);
 
-      private:
-        struct RequestAdapterData {
-            WGPURequestAdapterCallback callback = nullptr;
-            ObjectId adapterObjectId;
-            void* userdata = nullptr;
-        };
-        RequestTracker<RequestAdapterData> mRequestAdapterRequests;
+  private:
+    struct RequestAdapterData {
+        WGPURequestAdapterCallback callback = nullptr;
+        ObjectId adapterObjectId;
+        void* userdata = nullptr;
     };
+    RequestTracker<RequestAdapterData> mRequestAdapterRequests;
+};
 
 }  // namespace dawn::wire::client
 
diff --git a/src/dawn/wire/client/LimitsAndFeatures.cpp b/src/dawn/wire/client/LimitsAndFeatures.cpp
index a2c753c..95460cd 100644
--- a/src/dawn/wire/client/LimitsAndFeatures.cpp
+++ b/src/dawn/wire/client/LimitsAndFeatures.cpp
@@ -19,45 +19,45 @@
 
 namespace dawn::wire::client {
 
-    bool LimitsAndFeatures::GetLimits(WGPUSupportedLimits* limits) const {
-        ASSERT(limits != nullptr);
-        if (limits->nextInChain != nullptr) {
-            return false;
-        }
-        *limits = mLimits;
-        return true;
+bool LimitsAndFeatures::GetLimits(WGPUSupportedLimits* limits) const {
+    ASSERT(limits != nullptr);
+    if (limits->nextInChain != nullptr) {
+        return false;
     }
+    *limits = mLimits;
+    return true;
+}
 
-    bool LimitsAndFeatures::HasFeature(WGPUFeatureName feature) const {
-        return mFeatures.count(feature) != 0;
-    }
+bool LimitsAndFeatures::HasFeature(WGPUFeatureName feature) const {
+    return mFeatures.count(feature) != 0;
+}
 
-    size_t LimitsAndFeatures::EnumerateFeatures(WGPUFeatureName* features) const {
-        if (features != nullptr) {
-            for (WGPUFeatureName f : mFeatures) {
-                *features = f;
-                ++features;
-            }
-        }
-        return mFeatures.size();
-    }
-
-    void LimitsAndFeatures::SetLimits(const WGPUSupportedLimits* limits) {
-        ASSERT(limits != nullptr);
-        mLimits = *limits;
-        mLimits.nextInChain = nullptr;
-    }
-
-    void LimitsAndFeatures::SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount) {
-        ASSERT(features != nullptr || featuresCount == 0);
-        for (uint32_t i = 0; i < featuresCount; ++i) {
-            // Filter out features that the server supports, but the client does not.
-            // (Could be different versions)
-            if (!IsFeatureSupported(features[i])) {
-                continue;
-            }
-            mFeatures.insert(features[i]);
+size_t LimitsAndFeatures::EnumerateFeatures(WGPUFeatureName* features) const {
+    if (features != nullptr) {
+        for (WGPUFeatureName f : mFeatures) {
+            *features = f;
+            ++features;
         }
     }
+    return mFeatures.size();
+}
+
+void LimitsAndFeatures::SetLimits(const WGPUSupportedLimits* limits) {
+    ASSERT(limits != nullptr);
+    mLimits = *limits;
+    mLimits.nextInChain = nullptr;
+}
+
+void LimitsAndFeatures::SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount) {
+    ASSERT(features != nullptr || featuresCount == 0);
+    for (uint32_t i = 0; i < featuresCount; ++i) {
+        // Filter out features that the server supports, but the client does not.
+        // (Could be different versions)
+        if (!IsFeatureSupported(features[i])) {
+            continue;
+        }
+        mFeatures.insert(features[i]);
+    }
+}
 
 }  // namespace dawn::wire::client
diff --git a/src/dawn/wire/client/LimitsAndFeatures.h b/src/dawn/wire/client/LimitsAndFeatures.h
index 567d938..d97b833 100644
--- a/src/dawn/wire/client/LimitsAndFeatures.h
+++ b/src/dawn/wire/client/LimitsAndFeatures.h
@@ -21,19 +21,19 @@
 
 namespace dawn::wire::client {
 
-    class LimitsAndFeatures {
-      public:
-        bool GetLimits(WGPUSupportedLimits* limits) const;
-        bool HasFeature(WGPUFeatureName feature) const;
-        size_t EnumerateFeatures(WGPUFeatureName* features) const;
+class LimitsAndFeatures {
+  public:
+    bool GetLimits(WGPUSupportedLimits* limits) const;
+    bool HasFeature(WGPUFeatureName feature) const;
+    size_t EnumerateFeatures(WGPUFeatureName* features) const;
 
-        void SetLimits(const WGPUSupportedLimits* limits);
-        void SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount);
+    void SetLimits(const WGPUSupportedLimits* limits);
+    void SetFeatures(const WGPUFeatureName* features, uint32_t featuresCount);
 
-      private:
-        WGPUSupportedLimits mLimits;
-        std::unordered_set<WGPUFeatureName> mFeatures;
-    };
+  private:
+    WGPUSupportedLimits mLimits;
+    std::unordered_set<WGPUFeatureName> mFeatures;
+};
 
 }  // namespace dawn::wire::client
 
diff --git a/src/dawn/wire/client/ObjectAllocator.h b/src/dawn/wire/client/ObjectAllocator.h
index 407b997..60b8fa8 100644
--- a/src/dawn/wire/client/ObjectAllocator.h
+++ b/src/dawn/wire/client/ObjectAllocator.h
@@ -26,86 +26,83 @@
 
 namespace dawn::wire::client {
 
-    template <typename T>
-    class ObjectAllocator {
-      public:
-        struct ObjectAndSerial {
-            ObjectAndSerial(std::unique_ptr<T> object, uint32_t generation)
-                : object(std::move(object)), generation(generation) {
-            }
-            std::unique_ptr<T> object;
-            uint32_t generation;
-        };
-
-        ObjectAllocator() {
-            // ID 0 is nullptr
-            mObjects.emplace_back(nullptr, 0);
-        }
-
-        template <typename Client>
-        ObjectAndSerial* New(Client* client) {
-            uint32_t id = GetNewId();
-            auto object = std::make_unique<T>(client, 1, id);
-            client->TrackObject(object.get());
-
-            if (id >= mObjects.size()) {
-                ASSERT(id == mObjects.size());
-                mObjects.emplace_back(std::move(object), 0);
-            } else {
-                ASSERT(mObjects[id].object == nullptr);
-
-                mObjects[id].generation++;
-                // The generation should never overflow. We don't recycle ObjectIds that would
-                // overflow their next generation.
-                ASSERT(mObjects[id].generation != 0);
-
-                mObjects[id].object = std::move(object);
-            }
-
-            return &mObjects[id];
-        }
-        void Free(T* obj) {
-            ASSERT(obj->IsInList());
-            if (DAWN_LIKELY(mObjects[obj->id].generation != std::numeric_limits<uint32_t>::max())) {
-                // Only recycle this ObjectId if the generation won't overflow on the next
-                // allocation.
-                FreeId(obj->id);
-            }
-            mObjects[obj->id].object = nullptr;
-        }
-
-        T* GetObject(uint32_t id) {
-            if (id >= mObjects.size()) {
-                return nullptr;
-            }
-            return mObjects[id].object.get();
-        }
-
-        uint32_t GetGeneration(uint32_t id) {
-            if (id >= mObjects.size()) {
-                return 0;
-            }
-            return mObjects[id].generation;
-        }
-
-      private:
-        uint32_t GetNewId() {
-            if (mFreeIds.empty()) {
-                return mCurrentId++;
-            }
-            uint32_t id = mFreeIds.back();
-            mFreeIds.pop_back();
-            return id;
-        }
-        void FreeId(uint32_t id) {
-            mFreeIds.push_back(id);
-        }
-
-        // 0 is an ID reserved to represent nullptr
-        uint32_t mCurrentId = 1;
-        std::vector<uint32_t> mFreeIds;
-        std::vector<ObjectAndSerial> mObjects;
+template <typename T>
+class ObjectAllocator {
+  public:
+    struct ObjectAndSerial {
+        ObjectAndSerial(std::unique_ptr<T> object, uint32_t generation)
+            : object(std::move(object)), generation(generation) {}
+        std::unique_ptr<T> object;
+        uint32_t generation;
     };
+
+    ObjectAllocator() {
+        // ID 0 is nullptr
+        mObjects.emplace_back(nullptr, 0);
+    }
+
+    template <typename Client>
+    ObjectAndSerial* New(Client* client) {
+        uint32_t id = GetNewId();
+        auto object = std::make_unique<T>(client, 1, id);
+        client->TrackObject(object.get());
+
+        if (id >= mObjects.size()) {
+            ASSERT(id == mObjects.size());
+            mObjects.emplace_back(std::move(object), 0);
+        } else {
+            ASSERT(mObjects[id].object == nullptr);
+
+            mObjects[id].generation++;
+            // The generation should never overflow. We don't recycle ObjectIds that would
+            // overflow their next generation.
+            ASSERT(mObjects[id].generation != 0);
+
+            mObjects[id].object = std::move(object);
+        }
+
+        return &mObjects[id];
+    }
+    void Free(T* obj) {
+        ASSERT(obj->IsInList());
+        if (DAWN_LIKELY(mObjects[obj->id].generation != std::numeric_limits<uint32_t>::max())) {
+            // Only recycle this ObjectId if the generation won't overflow on the next
+            // allocation.
+            FreeId(obj->id);
+        }
+        mObjects[obj->id].object = nullptr;
+    }
+
+    T* GetObject(uint32_t id) {
+        if (id >= mObjects.size()) {
+            return nullptr;
+        }
+        return mObjects[id].object.get();
+    }
+
+    uint32_t GetGeneration(uint32_t id) {
+        if (id >= mObjects.size()) {
+            return 0;
+        }
+        return mObjects[id].generation;
+    }
+
+  private:
+    uint32_t GetNewId() {
+        if (mFreeIds.empty()) {
+            return mCurrentId++;
+        }
+        uint32_t id = mFreeIds.back();
+        mFreeIds.pop_back();
+        return id;
+    }
+    void FreeId(uint32_t id) { mFreeIds.push_back(id); }
+
+    // 0 is an ID reserved to represent nullptr
+    uint32_t mCurrentId = 1;
+    std::vector<uint32_t> mFreeIds;
+    std::vector<ObjectAndSerial> mObjects;
+};
 }  // namespace dawn::wire::client
 
 #endif  // SRC_DAWN_WIRE_CLIENT_OBJECTALLOCATOR_H_
diff --git a/src/dawn/wire/client/ObjectBase.h b/src/dawn/wire/client/ObjectBase.h
index 0d6484e..a404444 100644
--- a/src/dawn/wire/client/ObjectBase.h
+++ b/src/dawn/wire/client/ObjectBase.h
@@ -22,29 +22,25 @@
 
 namespace dawn::wire::client {
 
-    class Client;
+class Client;
 
-    // All objects on the client side have:
-    //  - A pointer to the Client to get where to serialize commands
-    //  - The external reference count
-    //  - An ID that is used to refer to this object when talking with the server side
-    //  - A next/prev pointer. They are part of a linked list of objects of the same type.
-    struct ObjectBase : public LinkNode<ObjectBase> {
-        ObjectBase(Client* client, uint32_t refcount, uint32_t id)
-            : client(client), refcount(refcount), id(id) {
-        }
+// All objects on the client side have:
+//  - A pointer to the Client to get where to serialize commands
+//  - The external reference count
+//  - An ID that is used to refer to this object when talking with the server side
+//  - A next/prev pointer. They are part of a linked list of objects of the same type.
+struct ObjectBase : public LinkNode<ObjectBase> {
+    ObjectBase(Client* client, uint32_t refcount, uint32_t id)
+        : client(client), refcount(refcount), id(id) {}
 
-        ~ObjectBase() {
-            RemoveFromList();
-        }
+    ~ObjectBase() { RemoveFromList(); }
 
-        virtual void CancelCallbacksForDisconnect() {
-        }
+    virtual void CancelCallbacksForDisconnect() {}
 
-        Client* const client;
-        uint32_t refcount;
-        const uint32_t id;
-    };
+    Client* const client;
+    uint32_t refcount;
+    const uint32_t id;
+};
 
 }  // namespace dawn::wire::client
 
diff --git a/src/dawn/wire/client/Queue.cpp b/src/dawn/wire/client/Queue.cpp
index 37d97d7..79f5632 100644
--- a/src/dawn/wire/client/Queue.cpp
+++ b/src/dawn/wire/client/Queue.cpp
@@ -19,80 +19,77 @@
 
 namespace dawn::wire::client {
 
-    Queue::~Queue() {
-        ClearAllCallbacks(WGPUQueueWorkDoneStatus_Unknown);
+Queue::~Queue() {
+    ClearAllCallbacks(WGPUQueueWorkDoneStatus_Unknown);
+}
+
+bool Queue::OnWorkDoneCallback(uint64_t requestSerial, WGPUQueueWorkDoneStatus status) {
+    OnWorkDoneData request;
+    if (!mOnWorkDoneRequests.Acquire(requestSerial, &request)) {
+        return false;
     }
 
-    bool Queue::OnWorkDoneCallback(uint64_t requestSerial, WGPUQueueWorkDoneStatus status) {
-        OnWorkDoneData request;
-        if (!mOnWorkDoneRequests.Acquire(requestSerial, &request)) {
-            return false;
+    request.callback(status, request.userdata);
+    return true;
+}
+
+void Queue::OnSubmittedWorkDone(uint64_t signalValue,
+                                WGPUQueueWorkDoneCallback callback,
+                                void* userdata) {
+    if (client->IsDisconnected()) {
+        callback(WGPUQueueWorkDoneStatus_DeviceLost, userdata);
+        return;
+    }
+
+    uint64_t serial = mOnWorkDoneRequests.Add({callback, userdata});
+
+    QueueOnSubmittedWorkDoneCmd cmd;
+    cmd.queueId = this->id;
+    cmd.signalValue = signalValue;
+    cmd.requestSerial = serial;
+
+    client->SerializeCommand(cmd);
+}
+
+void Queue::WriteBuffer(WGPUBuffer cBuffer, uint64_t bufferOffset, const void* data, size_t size) {
+    Buffer* buffer = FromAPI(cBuffer);
+
+    QueueWriteBufferCmd cmd;
+    cmd.queueId = id;
+    cmd.bufferId = buffer->id;
+    cmd.bufferOffset = bufferOffset;
+    cmd.data = static_cast<const uint8_t*>(data);
+    cmd.size = size;
+
+    client->SerializeCommand(cmd);
+}
+
+void Queue::WriteTexture(const WGPUImageCopyTexture* destination,
+                         const void* data,
+                         size_t dataSize,
+                         const WGPUTextureDataLayout* dataLayout,
+                         const WGPUExtent3D* writeSize) {
+    QueueWriteTextureCmd cmd;
+    cmd.queueId = id;
+    cmd.destination = destination;
+    cmd.data = static_cast<const uint8_t*>(data);
+    cmd.dataSize = dataSize;
+    cmd.dataLayout = dataLayout;
+    cmd.writeSize = writeSize;
+
+    client->SerializeCommand(cmd);
+}
+
+void Queue::CancelCallbacksForDisconnect() {
+    ClearAllCallbacks(WGPUQueueWorkDoneStatus_DeviceLost);
+}
+
+void Queue::ClearAllCallbacks(WGPUQueueWorkDoneStatus status) {
+    mOnWorkDoneRequests.CloseAll([status](OnWorkDoneData* request) {
+        if (request->callback != nullptr) {
+            request->callback(status, request->userdata);
         }
-
-        request.callback(status, request.userdata);
-        return true;
-    }
-
-    void Queue::OnSubmittedWorkDone(uint64_t signalValue,
-                                    WGPUQueueWorkDoneCallback callback,
-                                    void* userdata) {
-        if (client->IsDisconnected()) {
-            callback(WGPUQueueWorkDoneStatus_DeviceLost, userdata);
-            return;
-        }
-
-        uint64_t serial = mOnWorkDoneRequests.Add({callback, userdata});
-
-        QueueOnSubmittedWorkDoneCmd cmd;
-        cmd.queueId = this->id;
-        cmd.signalValue = signalValue;
-        cmd.requestSerial = serial;
-
-        client->SerializeCommand(cmd);
-    }
-
-    void Queue::WriteBuffer(WGPUBuffer cBuffer,
-                            uint64_t bufferOffset,
-                            const void* data,
-                            size_t size) {
-        Buffer* buffer = FromAPI(cBuffer);
-
-        QueueWriteBufferCmd cmd;
-        cmd.queueId = id;
-        cmd.bufferId = buffer->id;
-        cmd.bufferOffset = bufferOffset;
-        cmd.data = static_cast<const uint8_t*>(data);
-        cmd.size = size;
-
-        client->SerializeCommand(cmd);
-    }
-
-    void Queue::WriteTexture(const WGPUImageCopyTexture* destination,
-                             const void* data,
-                             size_t dataSize,
-                             const WGPUTextureDataLayout* dataLayout,
-                             const WGPUExtent3D* writeSize) {
-        QueueWriteTextureCmd cmd;
-        cmd.queueId = id;
-        cmd.destination = destination;
-        cmd.data = static_cast<const uint8_t*>(data);
-        cmd.dataSize = dataSize;
-        cmd.dataLayout = dataLayout;
-        cmd.writeSize = writeSize;
-
-        client->SerializeCommand(cmd);
-    }
-
-    void Queue::CancelCallbacksForDisconnect() {
-        ClearAllCallbacks(WGPUQueueWorkDoneStatus_DeviceLost);
-    }
-
-    void Queue::ClearAllCallbacks(WGPUQueueWorkDoneStatus status) {
-        mOnWorkDoneRequests.CloseAll([status](OnWorkDoneData* request) {
-            if (request->callback != nullptr) {
-                request->callback(status, request->userdata);
-            }
-        });
-    }
+    });
+}
 
 }  // namespace dawn::wire::client
diff --git a/src/dawn/wire/client/Queue.h b/src/dawn/wire/client/Queue.h
index f95ca40..6b59d96 100644
--- a/src/dawn/wire/client/Queue.h
+++ b/src/dawn/wire/client/Queue.h
@@ -23,34 +23,34 @@
 
 namespace dawn::wire::client {
 
-    class Queue final : public ObjectBase {
-      public:
-        using ObjectBase::ObjectBase;
-        ~Queue();
+class Queue final : public ObjectBase {
+  public:
+    using ObjectBase::ObjectBase;
+    ~Queue();
 
-        bool OnWorkDoneCallback(uint64_t requestSerial, WGPUQueueWorkDoneStatus status);
+    bool OnWorkDoneCallback(uint64_t requestSerial, WGPUQueueWorkDoneStatus status);
 
-        // Dawn API
-        void OnSubmittedWorkDone(uint64_t signalValue,
-                                 WGPUQueueWorkDoneCallback callback,
-                                 void* userdata);
-        void WriteBuffer(WGPUBuffer cBuffer, uint64_t bufferOffset, const void* data, size_t size);
-        void WriteTexture(const WGPUImageCopyTexture* destination,
-                          const void* data,
-                          size_t dataSize,
-                          const WGPUTextureDataLayout* dataLayout,
-                          const WGPUExtent3D* writeSize);
+    // Dawn API
+    void OnSubmittedWorkDone(uint64_t signalValue,
+                             WGPUQueueWorkDoneCallback callback,
+                             void* userdata);
+    void WriteBuffer(WGPUBuffer cBuffer, uint64_t bufferOffset, const void* data, size_t size);
+    void WriteTexture(const WGPUImageCopyTexture* destination,
+                      const void* data,
+                      size_t dataSize,
+                      const WGPUTextureDataLayout* dataLayout,
+                      const WGPUExtent3D* writeSize);
 
-      private:
-        void CancelCallbacksForDisconnect() override;
-        void ClearAllCallbacks(WGPUQueueWorkDoneStatus status);
+  private:
+    void CancelCallbacksForDisconnect() override;
+    void ClearAllCallbacks(WGPUQueueWorkDoneStatus status);
 
-        struct OnWorkDoneData {
-            WGPUQueueWorkDoneCallback callback = nullptr;
-            void* userdata = nullptr;
-        };
-        RequestTracker<OnWorkDoneData> mOnWorkDoneRequests;
+    struct OnWorkDoneData {
+        WGPUQueueWorkDoneCallback callback = nullptr;
+        void* userdata = nullptr;
     };
+    RequestTracker<OnWorkDoneData> mOnWorkDoneRequests;
+};
 
 }  // namespace dawn::wire::client
 
diff --git a/src/dawn/wire/client/RequestTracker.h b/src/dawn/wire/client/RequestTracker.h
index a39175a..d1d6cc0 100644
--- a/src/dawn/wire/client/RequestTracker.h
+++ b/src/dawn/wire/client/RequestTracker.h
@@ -24,59 +24,57 @@
 
 namespace dawn::wire::client {
 
-    class Device;
-    class MemoryTransferService;
+class Device;
+class MemoryTransferService;
 
-    template <typename Request>
-    class RequestTracker : NonCopyable {
-      public:
-        ~RequestTracker() {
-            ASSERT(mRequests.empty());
+template <typename Request>
+class RequestTracker : NonCopyable {
+  public:
+    ~RequestTracker() { ASSERT(mRequests.empty()); }
+
+    uint64_t Add(Request&& request) {
+        mSerial++;
+        mRequests.emplace(mSerial, request);
+        return mSerial;
+    }
+
+    bool Acquire(uint64_t serial, Request* request) {
+        auto it = mRequests.find(serial);
+        if (it == mRequests.end()) {
+            return false;
         }
+        *request = std::move(it->second);
+        mRequests.erase(it);
+        return true;
+    }
 
-        uint64_t Add(Request&& request) {
-            mSerial++;
-            mRequests.emplace(mSerial, request);
-            return mSerial;
-        }
-
-        bool Acquire(uint64_t serial, Request* request) {
-            auto it = mRequests.find(serial);
-            if (it == mRequests.end()) {
-                return false;
-            }
-            *request = std::move(it->second);
-            mRequests.erase(it);
-            return true;
-        }
-
-        template <typename CloseFunc>
-        void CloseAll(CloseFunc&& closeFunc) {
-            // Call closeFunc on all requests while handling reentrancy where the callback of some
-            // requests may add some additional requests. We guarantee all callbacks for requests
-            // are called exactly onces, so keep closing new requests if the first batch added more.
-            // It is fine to loop infinitely here if that's what the application makes use do.
-            while (!mRequests.empty()) {
-                // Move mRequests to a local variable so that further reentrant modifications of
-                // mRequests don't invalidate the iterators.
-                auto allRequests = std::move(mRequests);
-                for (auto& [_, request] : allRequests) {
-                    closeFunc(&request);
-                }
+    template <typename CloseFunc>
+    void CloseAll(CloseFunc&& closeFunc) {
+        // Call closeFunc on all requests while handling reentrancy where the callback of some
+        // requests may add some additional requests. We guarantee all callbacks for requests
+        // are called exactly onces, so keep closing new requests if the first batch added more.
+        // It is fine to loop infinitely here if that's what the application makes use do.
+        while (!mRequests.empty()) {
+            // Move mRequests to a local variable so that further reentrant modifications of
+            // mRequests don't invalidate the iterators.
+            auto allRequests = std::move(mRequests);
+            for (auto& [_, request] : allRequests) {
+                closeFunc(&request);
             }
         }
+    }
 
-        template <typename F>
-        void ForAll(F&& f) {
-            for (auto& [_, request] : mRequests) {
-                f(&request);
-            }
+    template <typename F>
+    void ForAll(F&& f) {
+        for (auto& [_, request] : mRequests) {
+            f(&request);
         }
+    }
 
-      private:
-        uint64_t mSerial;
-        std::map<uint64_t, Request> mRequests;
-    };
+  private:
+    uint64_t mSerial;
+    std::map<uint64_t, Request> mRequests;
+};
 
 }  // namespace dawn::wire::client
 
diff --git a/src/dawn/wire/client/ShaderModule.cpp b/src/dawn/wire/client/ShaderModule.cpp
index ce25ef7..e7cbb4e 100644
--- a/src/dawn/wire/client/ShaderModule.cpp
+++ b/src/dawn/wire/client/ShaderModule.cpp
@@ -18,47 +18,47 @@
 
 namespace dawn::wire::client {
 
-    ShaderModule::~ShaderModule() {
-        ClearAllCallbacks(WGPUCompilationInfoRequestStatus_Unknown);
+ShaderModule::~ShaderModule() {
+    ClearAllCallbacks(WGPUCompilationInfoRequestStatus_Unknown);
+}
+
+void ShaderModule::GetCompilationInfo(WGPUCompilationInfoCallback callback, void* userdata) {
+    if (client->IsDisconnected()) {
+        callback(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, userdata);
+        return;
     }
 
-    void ShaderModule::GetCompilationInfo(WGPUCompilationInfoCallback callback, void* userdata) {
-        if (client->IsDisconnected()) {
-            callback(WGPUCompilationInfoRequestStatus_DeviceLost, nullptr, userdata);
-            return;
+    uint64_t serial = mCompilationInfoRequests.Add({callback, userdata});
+
+    ShaderModuleGetCompilationInfoCmd cmd;
+    cmd.shaderModuleId = this->id;
+    cmd.requestSerial = serial;
+
+    client->SerializeCommand(cmd);
+}
+
+bool ShaderModule::GetCompilationInfoCallback(uint64_t requestSerial,
+                                              WGPUCompilationInfoRequestStatus status,
+                                              const WGPUCompilationInfo* info) {
+    CompilationInfoRequest request;
+    if (!mCompilationInfoRequests.Acquire(requestSerial, &request)) {
+        return false;
+    }
+
+    request.callback(status, info, request.userdata);
+    return true;
+}
+
+void ShaderModule::CancelCallbacksForDisconnect() {
+    ClearAllCallbacks(WGPUCompilationInfoRequestStatus_DeviceLost);
+}
+
+void ShaderModule::ClearAllCallbacks(WGPUCompilationInfoRequestStatus status) {
+    mCompilationInfoRequests.CloseAll([status](CompilationInfoRequest* request) {
+        if (request->callback != nullptr) {
+            request->callback(status, nullptr, request->userdata);
         }
-
-        uint64_t serial = mCompilationInfoRequests.Add({callback, userdata});
-
-        ShaderModuleGetCompilationInfoCmd cmd;
-        cmd.shaderModuleId = this->id;
-        cmd.requestSerial = serial;
-
-        client->SerializeCommand(cmd);
-    }
-
-    bool ShaderModule::GetCompilationInfoCallback(uint64_t requestSerial,
-                                                  WGPUCompilationInfoRequestStatus status,
-                                                  const WGPUCompilationInfo* info) {
-        CompilationInfoRequest request;
-        if (!mCompilationInfoRequests.Acquire(requestSerial, &request)) {
-            return false;
-        }
-
-        request.callback(status, info, request.userdata);
-        return true;
-    }
-
-    void ShaderModule::CancelCallbacksForDisconnect() {
-        ClearAllCallbacks(WGPUCompilationInfoRequestStatus_DeviceLost);
-    }
-
-    void ShaderModule::ClearAllCallbacks(WGPUCompilationInfoRequestStatus status) {
-        mCompilationInfoRequests.CloseAll([status](CompilationInfoRequest* request) {
-            if (request->callback != nullptr) {
-                request->callback(status, nullptr, request->userdata);
-            }
-        });
-    }
+    });
+}
 
 }  // namespace dawn::wire::client
diff --git a/src/dawn/wire/client/ShaderModule.h b/src/dawn/wire/client/ShaderModule.h
index 3646611..d8869ba 100644
--- a/src/dawn/wire/client/ShaderModule.h
+++ b/src/dawn/wire/client/ShaderModule.h
@@ -22,26 +22,26 @@
 
 namespace dawn::wire::client {
 
-    class ShaderModule final : public ObjectBase {
-      public:
-        using ObjectBase::ObjectBase;
-        ~ShaderModule();
+class ShaderModule final : public ObjectBase {
+  public:
+    using ObjectBase::ObjectBase;
+    ~ShaderModule();
 
-        void GetCompilationInfo(WGPUCompilationInfoCallback callback, void* userdata);
-        bool GetCompilationInfoCallback(uint64_t requestSerial,
-                                        WGPUCompilationInfoRequestStatus status,
-                                        const WGPUCompilationInfo* info);
+    void GetCompilationInfo(WGPUCompilationInfoCallback callback, void* userdata);
+    bool GetCompilationInfoCallback(uint64_t requestSerial,
+                                    WGPUCompilationInfoRequestStatus status,
+                                    const WGPUCompilationInfo* info);
 
-      private:
-        void CancelCallbacksForDisconnect() override;
-        void ClearAllCallbacks(WGPUCompilationInfoRequestStatus status);
+  private:
+    void CancelCallbacksForDisconnect() override;
+    void ClearAllCallbacks(WGPUCompilationInfoRequestStatus status);
 
-        struct CompilationInfoRequest {
-            WGPUCompilationInfoCallback callback = nullptr;
-            void* userdata = nullptr;
-        };
-        RequestTracker<CompilationInfoRequest> mCompilationInfoRequests;
+    struct CompilationInfoRequest {
+        WGPUCompilationInfoCallback callback = nullptr;
+        void* userdata = nullptr;
     };
+    RequestTracker<CompilationInfoRequest> mCompilationInfoRequests;
+};
 
 }  // namespace dawn::wire::client
 
diff --git a/src/dawn/wire/server/ObjectStorage.h b/src/dawn/wire/server/ObjectStorage.h
index 0c2d4d8..06fbc04 100644
--- a/src/dawn/wire/server/ObjectStorage.h
+++ b/src/dawn/wire/server/ObjectStorage.h
@@ -27,204 +27,202 @@
 
 namespace dawn::wire::server {
 
-    struct DeviceInfo {
-        std::unordered_set<uint64_t> childObjectTypesAndIds;
-        Server* server;
-        ObjectHandle self;
-    };
+struct DeviceInfo {
+    std::unordered_set<uint64_t> childObjectTypesAndIds;
+    Server* server;
+    ObjectHandle self;
+};
 
-    // Whether this object has been allocated, or reserved for async object creation.
-    // Used by the KnownObjects queries
-    enum class AllocationState : uint32_t {
-        Free,
-        Reserved,
-        Allocated,
-    };
+// Whether this object has been allocated, or reserved for async object creation.
+// Used by the KnownObjects queries
+enum class AllocationState : uint32_t {
+    Free,
+    Reserved,
+    Allocated,
+};
 
-    template <typename T>
-    struct ObjectDataBase {
-        // The backend-provided handle and generation to this object.
-        T handle;
-        uint32_t generation = 0;
+template <typename T>
+struct ObjectDataBase {
+    // The backend-provided handle and generation to this object.
+    T handle;
+    uint32_t generation = 0;
 
-        AllocationState state;
+    AllocationState state;
 
-        // This points to an allocation that is owned by the device.
-        DeviceInfo* deviceInfo = nullptr;
-    };
+    // This points to an allocation that is owned by the device.
+    DeviceInfo* deviceInfo = nullptr;
+};
 
-    // Stores what the backend knows about the type.
-    template <typename T>
-    struct ObjectData : public ObjectDataBase<T> {};
+// Stores what the backend knows about the type.
+template <typename T>
+struct ObjectData : public ObjectDataBase<T> {};
 
-    enum class BufferMapWriteState { Unmapped, Mapped, MapError };
+enum class BufferMapWriteState { Unmapped, Mapped, MapError };
 
-    template <>
-    struct ObjectData<WGPUBuffer> : public ObjectDataBase<WGPUBuffer> {
-        // TODO(enga): Use a tagged pointer to save space.
-        std::unique_ptr<MemoryTransferService::ReadHandle> readHandle;
-        std::unique_ptr<MemoryTransferService::WriteHandle> writeHandle;
-        BufferMapWriteState mapWriteState = BufferMapWriteState::Unmapped;
-        WGPUBufferUsageFlags usage = WGPUBufferUsage_None;
-        // Indicate if writeHandle needs to be destroyed on unmap
-        bool mappedAtCreation = false;
-    };
+template <>
+struct ObjectData<WGPUBuffer> : public ObjectDataBase<WGPUBuffer> {
+    // TODO(enga): Use a tagged pointer to save space.
+    std::unique_ptr<MemoryTransferService::ReadHandle> readHandle;
+    std::unique_ptr<MemoryTransferService::WriteHandle> writeHandle;
+    BufferMapWriteState mapWriteState = BufferMapWriteState::Unmapped;
+    WGPUBufferUsageFlags usage = WGPUBufferUsage_None;
+    // Indicate if writeHandle needs to be destroyed on unmap
+    bool mappedAtCreation = false;
+};
 
-    // Pack the ObjectType and ObjectId as a single value for storage in
-    // an std::unordered_set. This lets us avoid providing our own hash and
-    // equality comparison operators.
-    inline uint64_t PackObjectTypeAndId(ObjectType type, ObjectId id) {
-        static_assert(sizeof(ObjectType) * 8 <= 32);
-        static_assert(sizeof(ObjectId) * 8 <= 32);
-        return (static_cast<uint64_t>(type) << 32) + id;
+// Pack the ObjectType and ObjectId as a single value for storage in
+// an std::unordered_set. This lets us avoid providing our own hash and
+// equality comparison operators.
+inline uint64_t PackObjectTypeAndId(ObjectType type, ObjectId id) {
+    static_assert(sizeof(ObjectType) * 8 <= 32);
+    static_assert(sizeof(ObjectId) * 8 <= 32);
+    return (static_cast<uint64_t>(type) << 32) + id;
+}
+
+inline std::pair<ObjectType, ObjectId> UnpackObjectTypeAndId(uint64_t payload) {
+    ObjectType type = static_cast<ObjectType>(payload >> 32);
+    ObjectId id = payload & 0xFFFFFFFF;
+    return std::make_pair(type, id);
+}
+
+template <>
+struct ObjectData<WGPUDevice> : public ObjectDataBase<WGPUDevice> {
+    // Store |info| as a separate allocation so that its address does not move.
+    // The pointer to |info| is stored in device child objects.
+    std::unique_ptr<DeviceInfo> info = std::make_unique<DeviceInfo>();
+};
+
+// Keeps track of the mapping between client IDs and backend objects.
+template <typename T>
+class KnownObjects {
+  public:
+    using Data = ObjectData<T>;
+
+    KnownObjects() {
+        // Reserve ID 0 so that it can be used to represent nullptr for optional object values
+        // in the wire format. However don't tag it as allocated so that it is an error to ask
+        // KnownObjects for ID 0.
+        Data reservation;
+        reservation.handle = nullptr;
+        reservation.state = AllocationState::Free;
+        mKnown.push_back(std::move(reservation));
     }
 
-    inline std::pair<ObjectType, ObjectId> UnpackObjectTypeAndId(uint64_t payload) {
-        ObjectType type = static_cast<ObjectType>(payload >> 32);
-        ObjectId id = payload & 0xFFFFFFFF;
-        return std::make_pair(type, id);
+    // Get a backend objects for a given client ID.
+    // Returns nullptr if the ID hasn't previously been allocated.
+    const Data* Get(uint32_t id, AllocationState expected = AllocationState::Allocated) const {
+        if (id >= mKnown.size()) {
+            return nullptr;
+        }
+
+        const Data* data = &mKnown[id];
+
+        if (data->state != expected) {
+            return nullptr;
+        }
+
+        return data;
+    }
+    Data* Get(uint32_t id, AllocationState expected = AllocationState::Allocated) {
+        if (id >= mKnown.size()) {
+            return nullptr;
+        }
+
+        Data* data = &mKnown[id];
+
+        if (data->state != expected) {
+            return nullptr;
+        }
+
+        return data;
     }
 
-    template <>
-    struct ObjectData<WGPUDevice> : public ObjectDataBase<WGPUDevice> {
-        // Store |info| as a separate allocation so that its address does not move.
-        // The pointer to |info| is stored in device child objects.
-        std::unique_ptr<DeviceInfo> info = std::make_unique<DeviceInfo>();
-    };
-
-    // Keeps track of the mapping between client IDs and backend objects.
-    template <typename T>
-    class KnownObjects {
-      public:
-        using Data = ObjectData<T>;
-
-        KnownObjects() {
-            // Reserve ID 0 so that it can be used to represent nullptr for optional object values
-            // in the wire format. However don't tag it as allocated so that it is an error to ask
-            // KnownObjects for ID 0.
-            Data reservation;
-            reservation.handle = nullptr;
-            reservation.state = AllocationState::Free;
-            mKnown.push_back(std::move(reservation));
+    // Allocates the data for a given ID and returns it.
+    // Returns nullptr if the ID is already allocated, or too far ahead, or if ID is 0 (ID 0 is
+    // reserved for nullptr). Invalidates all the Data*
+    Data* Allocate(uint32_t id, AllocationState state = AllocationState::Allocated) {
+        if (id == 0 || id > mKnown.size()) {
+            return nullptr;
         }
 
-        // Get a backend objects for a given client ID.
-        // Returns nullptr if the ID hasn't previously been allocated.
-        const Data* Get(uint32_t id, AllocationState expected = AllocationState::Allocated) const {
-            if (id >= mKnown.size()) {
-                return nullptr;
-            }
+        Data data;
+        data.state = state;
+        data.handle = nullptr;
 
-            const Data* data = &mKnown[id];
-
-            if (data->state != expected) {
-                return nullptr;
-            }
-
-            return data;
-        }
-        Data* Get(uint32_t id, AllocationState expected = AllocationState::Allocated) {
-            if (id >= mKnown.size()) {
-                return nullptr;
-            }
-
-            Data* data = &mKnown[id];
-
-            if (data->state != expected) {
-                return nullptr;
-            }
-
-            return data;
+        if (id >= mKnown.size()) {
+            mKnown.push_back(std::move(data));
+            return &mKnown.back();
         }
 
-        // Allocates the data for a given ID and returns it.
-        // Returns nullptr if the ID is already allocated, or too far ahead, or if ID is 0 (ID 0 is
-        // reserved for nullptr). Invalidates all the Data*
-        Data* Allocate(uint32_t id, AllocationState state = AllocationState::Allocated) {
-            if (id == 0 || id > mKnown.size()) {
-                return nullptr;
-            }
-
-            Data data;
-            data.state = state;
-            data.handle = nullptr;
-
-            if (id >= mKnown.size()) {
-                mKnown.push_back(std::move(data));
-                return &mKnown.back();
-            }
-
-            if (mKnown[id].state != AllocationState::Free) {
-                return nullptr;
-            }
-
-            mKnown[id] = std::move(data);
-            return &mKnown[id];
+        if (mKnown[id].state != AllocationState::Free) {
+            return nullptr;
         }
 
-        // Marks an ID as deallocated
-        void Free(uint32_t id) {
-            ASSERT(id < mKnown.size());
-            mKnown[id].state = AllocationState::Free;
-        }
+        mKnown[id] = std::move(data);
+        return &mKnown[id];
+    }
 
-        std::vector<T> AcquireAllHandles() {
-            std::vector<T> objects;
-            for (Data& data : mKnown) {
-                if (data.state == AllocationState::Allocated && data.handle != nullptr) {
-                    objects.push_back(data.handle);
-                    data.state = AllocationState::Free;
-                    data.handle = nullptr;
-                }
-            }
+    // Marks an ID as deallocated
+    void Free(uint32_t id) {
+        ASSERT(id < mKnown.size());
+        mKnown[id].state = AllocationState::Free;
+    }
 
-            return objects;
-        }
-
-        std::vector<T> GetAllHandles() {
-            std::vector<T> objects;
-            for (Data& data : mKnown) {
-                if (data.state == AllocationState::Allocated && data.handle != nullptr) {
-                    objects.push_back(data.handle);
-                }
-            }
-
-            return objects;
-        }
-
-      private:
-        std::vector<Data> mKnown;
-    };
-
-    // ObjectIds are lost in deserialization. Store the ids of deserialized
-    // objects here so they can be used in command handlers. This is useful
-    // for creating ReturnWireCmds which contain client ids
-    template <typename T>
-    class ObjectIdLookupTable {
-      public:
-        void Store(T key, ObjectId id) {
-            mTable[key] = id;
-        }
-
-        // Return the cached ObjectId, or 0 (null handle)
-        ObjectId Get(T key) const {
-            const auto it = mTable.find(key);
-            if (it != mTable.end()) {
-                return it->second;
-            }
-            return 0;
-        }
-
-        void Remove(T key) {
-            auto it = mTable.find(key);
-            if (it != mTable.end()) {
-                mTable.erase(it);
+    std::vector<T> AcquireAllHandles() {
+        std::vector<T> objects;
+        for (Data& data : mKnown) {
+            if (data.state == AllocationState::Allocated && data.handle != nullptr) {
+                objects.push_back(data.handle);
+                data.state = AllocationState::Free;
+                data.handle = nullptr;
             }
         }
 
-      private:
-        std::map<T, ObjectId> mTable;
-    };
+        return objects;
+    }
+
+    std::vector<T> GetAllHandles() {
+        std::vector<T> objects;
+        for (Data& data : mKnown) {
+            if (data.state == AllocationState::Allocated && data.handle != nullptr) {
+                objects.push_back(data.handle);
+            }
+        }
+
+        return objects;
+    }
+
+  private:
+    std::vector<Data> mKnown;
+};
+
+// ObjectIds are lost in deserialization. Store the ids of deserialized
+// objects here so they can be used in command handlers. This is useful
+// for creating ReturnWireCmds which contain client ids
+template <typename T>
+class ObjectIdLookupTable {
+  public:
+    void Store(T key, ObjectId id) { mTable[key] = id; }
+
+    // Return the cached ObjectId, or 0 (null handle)
+    ObjectId Get(T key) const {
+        const auto it = mTable.find(key);
+        if (it != mTable.end()) {
+            return it->second;
+        }
+        return 0;
+    }
+
+    void Remove(T key) {
+        auto it = mTable.find(key);
+        if (it != mTable.end()) {
+            mTable.erase(it);
+        }
+    }
+
+  private:
+    std::map<T, ObjectId> mTable;
+};
 
 }  // namespace dawn::wire::server
 
diff --git a/src/dawn/wire/server/Server.cpp b/src/dawn/wire/server/Server.cpp
index b0d4ba2..565154e 100644
--- a/src/dawn/wire/server/Server.cpp
+++ b/src/dawn/wire/server/Server.cpp
@@ -17,197 +17,197 @@
 
 namespace dawn::wire::server {
 
-    Server::Server(const DawnProcTable& procs,
-                   CommandSerializer* serializer,
-                   MemoryTransferService* memoryTransferService)
-        : mSerializer(serializer),
-          mProcs(procs),
-          mMemoryTransferService(memoryTransferService),
-          mIsAlive(std::make_shared<bool>(true)) {
-        if (mMemoryTransferService == nullptr) {
-            // If a MemoryTransferService is not provided, fallback to inline memory.
-            mOwnedMemoryTransferService = CreateInlineMemoryTransferService();
-            mMemoryTransferService = mOwnedMemoryTransferService.get();
-        }
+Server::Server(const DawnProcTable& procs,
+               CommandSerializer* serializer,
+               MemoryTransferService* memoryTransferService)
+    : mSerializer(serializer),
+      mProcs(procs),
+      mMemoryTransferService(memoryTransferService),
+      mIsAlive(std::make_shared<bool>(true)) {
+    if (mMemoryTransferService == nullptr) {
+        // If a MemoryTransferService is not provided, fallback to inline memory.
+        mOwnedMemoryTransferService = CreateInlineMemoryTransferService();
+        mMemoryTransferService = mOwnedMemoryTransferService.get();
+    }
+}
+
+Server::~Server() {
+    // Un-set the error and lost callbacks since we cannot forward them
+    // after the server has been destroyed.
+    for (WGPUDevice device : DeviceObjects().GetAllHandles()) {
+        ClearDeviceCallbacks(device);
+    }
+    DestroyAllObjects(mProcs);
+}
+
+bool Server::InjectTexture(WGPUTexture texture,
+                           uint32_t id,
+                           uint32_t generation,
+                           uint32_t deviceId,
+                           uint32_t deviceGeneration) {
+    ASSERT(texture != nullptr);
+    ObjectData<WGPUDevice>* device = DeviceObjects().Get(deviceId);
+    if (device == nullptr || device->generation != deviceGeneration) {
+        return false;
     }
 
-    Server::~Server() {
-        // Un-set the error and lost callbacks since we cannot forward them
-        // after the server has been destroyed.
-        for (WGPUDevice device : DeviceObjects().GetAllHandles()) {
-            ClearDeviceCallbacks(device);
-        }
-        DestroyAllObjects(mProcs);
+    ObjectData<WGPUTexture>* data = TextureObjects().Allocate(id);
+    if (data == nullptr) {
+        return false;
     }
 
-    bool Server::InjectTexture(WGPUTexture texture,
-                               uint32_t id,
-                               uint32_t generation,
-                               uint32_t deviceId,
-                               uint32_t deviceGeneration) {
-        ASSERT(texture != nullptr);
-        ObjectData<WGPUDevice>* device = DeviceObjects().Get(deviceId);
-        if (device == nullptr || device->generation != deviceGeneration) {
-            return false;
-        }
+    data->handle = texture;
+    data->generation = generation;
+    data->state = AllocationState::Allocated;
+    data->deviceInfo = device->info.get();
 
-        ObjectData<WGPUTexture>* data = TextureObjects().Allocate(id);
-        if (data == nullptr) {
-            return false;
-        }
-
-        data->handle = texture;
-        data->generation = generation;
-        data->state = AllocationState::Allocated;
-        data->deviceInfo = device->info.get();
-
-        if (!TrackDeviceChild(data->deviceInfo, ObjectType::Texture, id)) {
-            return false;
-        }
-
-        // The texture is externally owned so it shouldn't be destroyed when we receive a destroy
-        // message from the client. Add a reference to counterbalance the eventual release.
-        mProcs.textureReference(texture);
-
-        return true;
+    if (!TrackDeviceChild(data->deviceInfo, ObjectType::Texture, id)) {
+        return false;
     }
 
-    bool Server::InjectSwapChain(WGPUSwapChain swapchain,
-                                 uint32_t id,
-                                 uint32_t generation,
-                                 uint32_t deviceId,
-                                 uint32_t deviceGeneration) {
-        ASSERT(swapchain != nullptr);
-        ObjectData<WGPUDevice>* device = DeviceObjects().Get(deviceId);
-        if (device == nullptr || device->generation != deviceGeneration) {
-            return false;
-        }
+    // The texture is externally owned so it shouldn't be destroyed when we receive a destroy
+    // message from the client. Add a reference to counterbalance the eventual release.
+    mProcs.textureReference(texture);
 
-        ObjectData<WGPUSwapChain>* data = SwapChainObjects().Allocate(id);
-        if (data == nullptr) {
-            return false;
-        }
+    return true;
+}
 
-        data->handle = swapchain;
-        data->generation = generation;
-        data->state = AllocationState::Allocated;
-        data->deviceInfo = device->info.get();
-
-        if (!TrackDeviceChild(data->deviceInfo, ObjectType::SwapChain, id)) {
-            return false;
-        }
-
-        // The texture is externally owned so it shouldn't be destroyed when we receive a destroy
-        // message from the client. Add a reference to counterbalance the eventual release.
-        mProcs.swapChainReference(swapchain);
-
-        return true;
+bool Server::InjectSwapChain(WGPUSwapChain swapchain,
+                             uint32_t id,
+                             uint32_t generation,
+                             uint32_t deviceId,
+                             uint32_t deviceGeneration) {
+    ASSERT(swapchain != nullptr);
+    ObjectData<WGPUDevice>* device = DeviceObjects().Get(deviceId);
+    if (device == nullptr || device->generation != deviceGeneration) {
+        return false;
     }
 
-    bool Server::InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation) {
-        ASSERT(device != nullptr);
-        ObjectData<WGPUDevice>* data = DeviceObjects().Allocate(id);
-        if (data == nullptr) {
-            return false;
-        }
-
-        data->handle = device;
-        data->generation = generation;
-        data->state = AllocationState::Allocated;
-        data->info->server = this;
-        data->info->self = ObjectHandle{id, generation};
-
-        // The device is externally owned so it shouldn't be destroyed when we receive a destroy
-        // message from the client. Add a reference to counterbalance the eventual release.
-        mProcs.deviceReference(device);
-
-        // Set callbacks to forward errors to the client.
-        SetForwardingDeviceCallbacks(data);
-        return true;
+    ObjectData<WGPUSwapChain>* data = SwapChainObjects().Allocate(id);
+    if (data == nullptr) {
+        return false;
     }
 
-    bool Server::InjectInstance(WGPUInstance instance, uint32_t id, uint32_t generation) {
-        ASSERT(instance != nullptr);
-        ObjectData<WGPUInstance>* data = InstanceObjects().Allocate(id);
-        if (data == nullptr) {
-            return false;
-        }
+    data->handle = swapchain;
+    data->generation = generation;
+    data->state = AllocationState::Allocated;
+    data->deviceInfo = device->info.get();
 
-        data->handle = instance;
-        data->generation = generation;
-        data->state = AllocationState::Allocated;
-
-        // The instance is externally owned so it shouldn't be destroyed when we receive a destroy
-        // message from the client. Add a reference to counterbalance the eventual release.
-        mProcs.instanceReference(instance);
-
-        return true;
+    if (!TrackDeviceChild(data->deviceInfo, ObjectType::SwapChain, id)) {
+        return false;
     }
 
-    WGPUDevice Server::GetDevice(uint32_t id, uint32_t generation) {
-        ObjectData<WGPUDevice>* data = DeviceObjects().Get(id);
-        if (data == nullptr || data->generation != generation) {
-            return nullptr;
-        }
-        return data->handle;
+    // The texture is externally owned so it shouldn't be destroyed when we receive a destroy
+    // message from the client. Add a reference to counterbalance the eventual release.
+    mProcs.swapChainReference(swapchain);
+
+    return true;
+}
+
+bool Server::InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation) {
+    ASSERT(device != nullptr);
+    ObjectData<WGPUDevice>* data = DeviceObjects().Allocate(id);
+    if (data == nullptr) {
+        return false;
     }
 
-    void Server::SetForwardingDeviceCallbacks(ObjectData<WGPUDevice>* deviceObject) {
-        // Note: these callbacks are manually inlined here since they do not acquire and
-        // free their userdata. Also unlike other callbacks, these are cleared and unset when
-        // the server is destroyed, so we don't need to check if the server is still alive
-        // inside them.
-        mProcs.deviceSetUncapturedErrorCallback(
-            deviceObject->handle,
-            [](WGPUErrorType type, const char* message, void* userdata) {
-                DeviceInfo* info = static_cast<DeviceInfo*>(userdata);
-                info->server->OnUncapturedError(info->self, type, message);
-            },
-            deviceObject->info.get());
-        // Set callback to post warning and other infomation to client.
-        // Almost the same with UncapturedError.
-        mProcs.deviceSetLoggingCallback(
-            deviceObject->handle,
-            [](WGPULoggingType type, const char* message, void* userdata) {
-                DeviceInfo* info = static_cast<DeviceInfo*>(userdata);
-                info->server->OnLogging(info->self, type, message);
-            },
-            deviceObject->info.get());
-        mProcs.deviceSetDeviceLostCallback(
-            deviceObject->handle,
-            [](WGPUDeviceLostReason reason, const char* message, void* userdata) {
-                DeviceInfo* info = static_cast<DeviceInfo*>(userdata);
-                info->server->OnDeviceLost(info->self, reason, message);
-            },
-            deviceObject->info.get());
+    data->handle = device;
+    data->generation = generation;
+    data->state = AllocationState::Allocated;
+    data->info->server = this;
+    data->info->self = ObjectHandle{id, generation};
+
+    // The device is externally owned so it shouldn't be destroyed when we receive a destroy
+    // message from the client. Add a reference to counterbalance the eventual release.
+    mProcs.deviceReference(device);
+
+    // Set callbacks to forward errors to the client.
+    SetForwardingDeviceCallbacks(data);
+    return true;
+}
+
+bool Server::InjectInstance(WGPUInstance instance, uint32_t id, uint32_t generation) {
+    ASSERT(instance != nullptr);
+    ObjectData<WGPUInstance>* data = InstanceObjects().Allocate(id);
+    if (data == nullptr) {
+        return false;
     }
 
-    void Server::ClearDeviceCallbacks(WGPUDevice device) {
-        // Un-set the error and lost callbacks since we cannot forward them
-        // after the server has been destroyed.
-        mProcs.deviceSetUncapturedErrorCallback(device, nullptr, nullptr);
-        mProcs.deviceSetLoggingCallback(device, nullptr, nullptr);
-        mProcs.deviceSetDeviceLostCallback(device, nullptr, nullptr);
-    }
+    data->handle = instance;
+    data->generation = generation;
+    data->state = AllocationState::Allocated;
 
-    bool TrackDeviceChild(DeviceInfo* info, ObjectType type, ObjectId id) {
-        auto [_, inserted] = info->childObjectTypesAndIds.insert(PackObjectTypeAndId(type, id));
-        if (!inserted) {
-            // An object of this type and id already exists.
-            return false;
-        }
-        return true;
-    }
+    // The instance is externally owned so it shouldn't be destroyed when we receive a destroy
+    // message from the client. Add a reference to counterbalance the eventual release.
+    mProcs.instanceReference(instance);
 
-    bool UntrackDeviceChild(DeviceInfo* info, ObjectType type, ObjectId id) {
-        auto& children = info->childObjectTypesAndIds;
-        auto it = children.find(PackObjectTypeAndId(type, id));
-        if (it == children.end()) {
-            // An object of this type and id was already deleted.
-            return false;
-        }
-        children.erase(it);
-        return true;
+    return true;
+}
+
+WGPUDevice Server::GetDevice(uint32_t id, uint32_t generation) {
+    ObjectData<WGPUDevice>* data = DeviceObjects().Get(id);
+    if (data == nullptr || data->generation != generation) {
+        return nullptr;
     }
+    return data->handle;
+}
+
+void Server::SetForwardingDeviceCallbacks(ObjectData<WGPUDevice>* deviceObject) {
+    // Note: these callbacks are manually inlined here since they do not acquire and
+    // free their userdata. Also unlike other callbacks, these are cleared and unset when
+    // the server is destroyed, so we don't need to check if the server is still alive
+    // inside them.
+    mProcs.deviceSetUncapturedErrorCallback(
+        deviceObject->handle,
+        [](WGPUErrorType type, const char* message, void* userdata) {
+            DeviceInfo* info = static_cast<DeviceInfo*>(userdata);
+            info->server->OnUncapturedError(info->self, type, message);
+        },
+        deviceObject->info.get());
+    // Set callback to post warning and other infomation to client.
+    // Almost the same with UncapturedError.
+    mProcs.deviceSetLoggingCallback(
+        deviceObject->handle,
+        [](WGPULoggingType type, const char* message, void* userdata) {
+            DeviceInfo* info = static_cast<DeviceInfo*>(userdata);
+            info->server->OnLogging(info->self, type, message);
+        },
+        deviceObject->info.get());
+    mProcs.deviceSetDeviceLostCallback(
+        deviceObject->handle,
+        [](WGPUDeviceLostReason reason, const char* message, void* userdata) {
+            DeviceInfo* info = static_cast<DeviceInfo*>(userdata);
+            info->server->OnDeviceLost(info->self, reason, message);
+        },
+        deviceObject->info.get());
+}
+
+void Server::ClearDeviceCallbacks(WGPUDevice device) {
+    // Un-set the error and lost callbacks since we cannot forward them
+    // after the server has been destroyed.
+    mProcs.deviceSetUncapturedErrorCallback(device, nullptr, nullptr);
+    mProcs.deviceSetLoggingCallback(device, nullptr, nullptr);
+    mProcs.deviceSetDeviceLostCallback(device, nullptr, nullptr);
+}
+
+bool TrackDeviceChild(DeviceInfo* info, ObjectType type, ObjectId id) {
+    auto [_, inserted] = info->childObjectTypesAndIds.insert(PackObjectTypeAndId(type, id));
+    if (!inserted) {
+        // An object of this type and id already exists.
+        return false;
+    }
+    return true;
+}
+
+bool UntrackDeviceChild(DeviceInfo* info, ObjectType type, ObjectId id) {
+    auto& children = info->childObjectTypesAndIds;
+    auto it = children.find(PackObjectTypeAndId(type, id));
+    if (it == children.end()) {
+        // An object of this type and id was already deleted.
+        return false;
+    }
+    children.erase(it);
+    return true;
+}
 
 }  // namespace dawn::wire::server
diff --git a/src/dawn/wire/server/Server.h b/src/dawn/wire/server/Server.h
index de0dab4..52901b0 100644
--- a/src/dawn/wire/server/Server.h
+++ b/src/dawn/wire/server/Server.h
@@ -23,223 +23,221 @@
 
 namespace dawn::wire::server {
 
-    class Server;
-    class MemoryTransferService;
+class Server;
+class MemoryTransferService;
 
-    // CallbackUserdata and its derived classes are intended to be created by
-    // Server::MakeUserdata<T> and then passed as the userdata argument for Dawn
-    // callbacks.
-    // It contains a pointer back to the Server so that the callback can call the
-    // Server to perform operations like serialization, and it contains a weak pointer
-    // |serverIsAlive|. If the weak pointer has expired, it means the server has
-    // been destroyed and the callback must not use the Server pointer.
-    // To assist with checking |serverIsAlive| and lifetime management of the userdata,
-    // |ForwardToServer| (defined later in this file) can be used to acquire the userdata,
-    // return early if |serverIsAlive| has expired, and then forward the arguments
-    // to userdata->server->MyCallbackHandler.
-    //
-    // Example Usage:
-    //
-    // struct MyUserdata : CallbackUserdata { uint32_t foo; };
-    //
-    // auto userdata = MakeUserdata<MyUserdata>();
-    // userdata->foo = 2;
-    //
-    // callMyCallbackHandler(
-    //      ForwardToServer<&Server::MyCallbackHandler>,
-    //      userdata.release());
-    //
-    // void Server::MyCallbackHandler(MyUserdata* userdata, Other args) { }
-    struct CallbackUserdata {
-        Server* const server;
-        std::weak_ptr<bool> const serverIsAlive;
+// CallbackUserdata and its derived classes are intended to be created by
+// Server::MakeUserdata<T> and then passed as the userdata argument for Dawn
+// callbacks.
+// It contains a pointer back to the Server so that the callback can call the
+// Server to perform operations like serialization, and it contains a weak pointer
+// |serverIsAlive|. If the weak pointer has expired, it means the server has
+// been destroyed and the callback must not use the Server pointer.
+// To assist with checking |serverIsAlive| and lifetime management of the userdata,
+// |ForwardToServer| (defined later in this file) can be used to acquire the userdata,
+// return early if |serverIsAlive| has expired, and then forward the arguments
+// to userdata->server->MyCallbackHandler.
+//
+// Example Usage:
+//
+// struct MyUserdata : CallbackUserdata { uint32_t foo; };
+//
+// auto userdata = MakeUserdata<MyUserdata>();
+// userdata->foo = 2;
+//
+// callMyCallbackHandler(
+//      ForwardToServer<&Server::MyCallbackHandler>,
+//      userdata.release());
+//
+// void Server::MyCallbackHandler(MyUserdata* userdata, Other args) { }
+struct CallbackUserdata {
+    Server* const server;
+    std::weak_ptr<bool> const serverIsAlive;
 
-        CallbackUserdata() = delete;
-        CallbackUserdata(Server* server, const std::shared_ptr<bool>& serverIsAlive)
-            : server(server), serverIsAlive(serverIsAlive) {
-        }
-    };
+    CallbackUserdata() = delete;
+    CallbackUserdata(Server* server, const std::shared_ptr<bool>& serverIsAlive)
+        : server(server), serverIsAlive(serverIsAlive) {}
+};
 
-    template <auto F>
-    struct ForwardToServerHelper {
-        template <typename _>
-        struct ExtractedTypes;
+template <auto F>
+struct ForwardToServerHelper {
+    template <typename _>
+    struct ExtractedTypes;
 
-        // An internal structure used to unpack the various types that compose the type of F
-        template <typename Return, typename Class, typename Userdata, typename... Args>
-        struct ExtractedTypes<Return (Class::*)(Userdata*, Args...)> {
-            using UntypedCallback = Return (*)(Args..., void*);
-            static Return Callback(Args... args, void* userdata) {
-                // Acquire the userdata, and cast it to UserdataT.
-                std::unique_ptr<Userdata> data(static_cast<Userdata*>(userdata));
-                if (data->serverIsAlive.expired()) {
-                    // Do nothing if the server has already been destroyed.
-                    return;
-                }
-                // Forward the arguments and the typed userdata to the Server:: member function.
-                (data->server->*F)(data.get(), std::forward<decltype(args)>(args)...);
+    // An internal structure used to unpack the various types that compose the type of F
+    template <typename Return, typename Class, typename Userdata, typename... Args>
+    struct ExtractedTypes<Return (Class::*)(Userdata*, Args...)> {
+        using UntypedCallback = Return (*)(Args..., void*);
+        static Return Callback(Args... args, void* userdata) {
+            // Acquire the userdata, and cast it to UserdataT.
+            std::unique_ptr<Userdata> data(static_cast<Userdata*>(userdata));
+            if (data->serverIsAlive.expired()) {
+                // Do nothing if the server has already been destroyed.
+                return;
             }
-        };
-
-        static constexpr typename ExtractedTypes<decltype(F)>::UntypedCallback Create() {
-            return ExtractedTypes<decltype(F)>::Callback;
+            // Forward the arguments and the typed userdata to the Server:: member function.
+            (data->server->*F)(data.get(), std::forward<decltype(args)>(args)...);
         }
     };
 
-    template <auto F>
-    constexpr auto ForwardToServer = ForwardToServerHelper<F>::Create();
+    static constexpr typename ExtractedTypes<decltype(F)>::UntypedCallback Create() {
+        return ExtractedTypes<decltype(F)>::Callback;
+    }
+};
 
-    struct MapUserdata : CallbackUserdata {
-        using CallbackUserdata::CallbackUserdata;
+template <auto F>
+constexpr auto ForwardToServer = ForwardToServerHelper<F>::Create();
 
-        ObjectHandle buffer;
-        WGPUBuffer bufferObj;
-        uint64_t requestSerial;
-        uint64_t offset;
-        uint64_t size;
-        WGPUMapModeFlags mode;
-    };
+struct MapUserdata : CallbackUserdata {
+    using CallbackUserdata::CallbackUserdata;
 
-    struct ErrorScopeUserdata : CallbackUserdata {
-        using CallbackUserdata::CallbackUserdata;
+    ObjectHandle buffer;
+    WGPUBuffer bufferObj;
+    uint64_t requestSerial;
+    uint64_t offset;
+    uint64_t size;
+    WGPUMapModeFlags mode;
+};
 
-        ObjectHandle device;
-        uint64_t requestSerial;
-    };
+struct ErrorScopeUserdata : CallbackUserdata {
+    using CallbackUserdata::CallbackUserdata;
 
-    struct ShaderModuleGetCompilationInfoUserdata : CallbackUserdata {
-        using CallbackUserdata::CallbackUserdata;
+    ObjectHandle device;
+    uint64_t requestSerial;
+};
 
-        ObjectHandle shaderModule;
-        uint64_t requestSerial;
-    };
+struct ShaderModuleGetCompilationInfoUserdata : CallbackUserdata {
+    using CallbackUserdata::CallbackUserdata;
 
-    struct QueueWorkDoneUserdata : CallbackUserdata {
-        using CallbackUserdata::CallbackUserdata;
+    ObjectHandle shaderModule;
+    uint64_t requestSerial;
+};
 
-        ObjectHandle queue;
-        uint64_t requestSerial;
-    };
+struct QueueWorkDoneUserdata : CallbackUserdata {
+    using CallbackUserdata::CallbackUserdata;
 
-    struct CreatePipelineAsyncUserData : CallbackUserdata {
-        using CallbackUserdata::CallbackUserdata;
+    ObjectHandle queue;
+    uint64_t requestSerial;
+};
 
-        ObjectHandle device;
-        uint64_t requestSerial;
-        ObjectId pipelineObjectID;
-    };
+struct CreatePipelineAsyncUserData : CallbackUserdata {
+    using CallbackUserdata::CallbackUserdata;
 
-    struct RequestAdapterUserdata : CallbackUserdata {
-        using CallbackUserdata::CallbackUserdata;
+    ObjectHandle device;
+    uint64_t requestSerial;
+    ObjectId pipelineObjectID;
+};
 
-        ObjectHandle instance;
-        uint64_t requestSerial;
-        ObjectId adapterObjectId;
-    };
+struct RequestAdapterUserdata : CallbackUserdata {
+    using CallbackUserdata::CallbackUserdata;
 
-    struct RequestDeviceUserdata : CallbackUserdata {
-        using CallbackUserdata::CallbackUserdata;
+    ObjectHandle instance;
+    uint64_t requestSerial;
+    ObjectId adapterObjectId;
+};
 
-        ObjectHandle adapter;
-        uint64_t requestSerial;
-        ObjectId deviceObjectId;
-    };
+struct RequestDeviceUserdata : CallbackUserdata {
+    using CallbackUserdata::CallbackUserdata;
 
-    class Server : public ServerBase {
-      public:
-        Server(const DawnProcTable& procs,
-               CommandSerializer* serializer,
-               MemoryTransferService* memoryTransferService);
-        ~Server() override;
+    ObjectHandle adapter;
+    uint64_t requestSerial;
+    ObjectId deviceObjectId;
+};
 
-        // ChunkedCommandHandler implementation
-        const volatile char* HandleCommandsImpl(const volatile char* commands,
-                                                size_t size) override;
+class Server : public ServerBase {
+  public:
+    Server(const DawnProcTable& procs,
+           CommandSerializer* serializer,
+           MemoryTransferService* memoryTransferService);
+    ~Server() override;
 
-        bool InjectTexture(WGPUTexture texture,
-                           uint32_t id,
-                           uint32_t generation,
-                           uint32_t deviceId,
-                           uint32_t deviceGeneration);
+    // ChunkedCommandHandler implementation
+    const volatile char* HandleCommandsImpl(const volatile char* commands, size_t size) override;
 
-        bool InjectSwapChain(WGPUSwapChain swapchain,
-                             uint32_t id,
-                             uint32_t generation,
-                             uint32_t deviceId,
-                             uint32_t deviceGeneration);
+    bool InjectTexture(WGPUTexture texture,
+                       uint32_t id,
+                       uint32_t generation,
+                       uint32_t deviceId,
+                       uint32_t deviceGeneration);
 
-        bool InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation);
+    bool InjectSwapChain(WGPUSwapChain swapchain,
+                         uint32_t id,
+                         uint32_t generation,
+                         uint32_t deviceId,
+                         uint32_t deviceGeneration);
 
-        bool InjectInstance(WGPUInstance instance, uint32_t id, uint32_t generation);
+    bool InjectDevice(WGPUDevice device, uint32_t id, uint32_t generation);
 
-        WGPUDevice GetDevice(uint32_t id, uint32_t generation);
+    bool InjectInstance(WGPUInstance instance, uint32_t id, uint32_t generation);
 
-        template <typename T,
-                  typename Enable = std::enable_if<std::is_base_of<CallbackUserdata, T>::value>>
-        std::unique_ptr<T> MakeUserdata() {
-            return std::unique_ptr<T>(new T(this, mIsAlive));
-        }
+    WGPUDevice GetDevice(uint32_t id, uint32_t generation);
 
-      private:
-        template <typename Cmd>
-        void SerializeCommand(const Cmd& cmd) {
-            mSerializer.SerializeCommand(cmd);
-        }
+    template <typename T,
+              typename Enable = std::enable_if<std::is_base_of<CallbackUserdata, T>::value>>
+    std::unique_ptr<T> MakeUserdata() {
+        return std::unique_ptr<T>(new T(this, mIsAlive));
+    }
 
-        template <typename Cmd, typename ExtraSizeSerializeFn>
-        void SerializeCommand(const Cmd& cmd,
-                              size_t extraSize,
-                              ExtraSizeSerializeFn&& SerializeExtraSize) {
-            mSerializer.SerializeCommand(cmd, extraSize, SerializeExtraSize);
-        }
+  private:
+    template <typename Cmd>
+    void SerializeCommand(const Cmd& cmd) {
+        mSerializer.SerializeCommand(cmd);
+    }
 
-        void SetForwardingDeviceCallbacks(ObjectData<WGPUDevice>* deviceObject);
-        void ClearDeviceCallbacks(WGPUDevice device);
+    template <typename Cmd, typename ExtraSizeSerializeFn>
+    void SerializeCommand(const Cmd& cmd,
+                          size_t extraSize,
+                          ExtraSizeSerializeFn&& SerializeExtraSize) {
+        mSerializer.SerializeCommand(cmd, extraSize, SerializeExtraSize);
+    }
 
-        // Error callbacks
-        void OnUncapturedError(ObjectHandle device, WGPUErrorType type, const char* message);
-        void OnDeviceLost(ObjectHandle device, WGPUDeviceLostReason reason, const char* message);
-        void OnLogging(ObjectHandle device, WGPULoggingType type, const char* message);
-        void OnDevicePopErrorScope(ErrorScopeUserdata* userdata,
-                                   WGPUErrorType type,
-                                   const char* message);
-        void OnBufferMapAsyncCallback(MapUserdata* userdata, WGPUBufferMapAsyncStatus status);
-        void OnQueueWorkDone(QueueWorkDoneUserdata* userdata, WGPUQueueWorkDoneStatus status);
-        void OnCreateComputePipelineAsyncCallback(CreatePipelineAsyncUserData* userdata,
-                                                  WGPUCreatePipelineAsyncStatus status,
-                                                  WGPUComputePipeline pipeline,
-                                                  const char* message);
-        void OnCreateRenderPipelineAsyncCallback(CreatePipelineAsyncUserData* userdata,
-                                                 WGPUCreatePipelineAsyncStatus status,
-                                                 WGPURenderPipeline pipeline,
-                                                 const char* message);
-        void OnShaderModuleGetCompilationInfo(ShaderModuleGetCompilationInfoUserdata* userdata,
-                                              WGPUCompilationInfoRequestStatus status,
-                                              const WGPUCompilationInfo* info);
-        void OnRequestAdapterCallback(RequestAdapterUserdata* userdata,
-                                      WGPURequestAdapterStatus status,
-                                      WGPUAdapter adapter,
-                                      const char* message);
-        void OnRequestDeviceCallback(RequestDeviceUserdata* userdata,
-                                     WGPURequestDeviceStatus status,
-                                     WGPUDevice device,
-                                     const char* message);
+    void SetForwardingDeviceCallbacks(ObjectData<WGPUDevice>* deviceObject);
+    void ClearDeviceCallbacks(WGPUDevice device);
+
+    // Error callbacks
+    void OnUncapturedError(ObjectHandle device, WGPUErrorType type, const char* message);
+    void OnDeviceLost(ObjectHandle device, WGPUDeviceLostReason reason, const char* message);
+    void OnLogging(ObjectHandle device, WGPULoggingType type, const char* message);
+    void OnDevicePopErrorScope(ErrorScopeUserdata* userdata,
+                               WGPUErrorType type,
+                               const char* message);
+    void OnBufferMapAsyncCallback(MapUserdata* userdata, WGPUBufferMapAsyncStatus status);
+    void OnQueueWorkDone(QueueWorkDoneUserdata* userdata, WGPUQueueWorkDoneStatus status);
+    void OnCreateComputePipelineAsyncCallback(CreatePipelineAsyncUserData* userdata,
+                                              WGPUCreatePipelineAsyncStatus status,
+                                              WGPUComputePipeline pipeline,
+                                              const char* message);
+    void OnCreateRenderPipelineAsyncCallback(CreatePipelineAsyncUserData* userdata,
+                                             WGPUCreatePipelineAsyncStatus status,
+                                             WGPURenderPipeline pipeline,
+                                             const char* message);
+    void OnShaderModuleGetCompilationInfo(ShaderModuleGetCompilationInfoUserdata* userdata,
+                                          WGPUCompilationInfoRequestStatus status,
+                                          const WGPUCompilationInfo* info);
+    void OnRequestAdapterCallback(RequestAdapterUserdata* userdata,
+                                  WGPURequestAdapterStatus status,
+                                  WGPUAdapter adapter,
+                                  const char* message);
+    void OnRequestDeviceCallback(RequestDeviceUserdata* userdata,
+                                 WGPURequestDeviceStatus status,
+                                 WGPUDevice device,
+                                 const char* message);
 
 #include "dawn/wire/server/ServerPrototypes_autogen.inc"
 
-        WireDeserializeAllocator mAllocator;
-        ChunkedCommandSerializer mSerializer;
-        DawnProcTable mProcs;
-        std::unique_ptr<MemoryTransferService> mOwnedMemoryTransferService = nullptr;
-        MemoryTransferService* mMemoryTransferService = nullptr;
+    WireDeserializeAllocator mAllocator;
+    ChunkedCommandSerializer mSerializer;
+    DawnProcTable mProcs;
+    std::unique_ptr<MemoryTransferService> mOwnedMemoryTransferService = nullptr;
+    MemoryTransferService* mMemoryTransferService = nullptr;
 
-        std::shared_ptr<bool> mIsAlive;
-    };
+    std::shared_ptr<bool> mIsAlive;
+};
 
-    bool TrackDeviceChild(DeviceInfo* device, ObjectType type, ObjectId id);
-    bool UntrackDeviceChild(DeviceInfo* device, ObjectType type, ObjectId id);
+bool TrackDeviceChild(DeviceInfo* device, ObjectType type, ObjectId id);
+bool UntrackDeviceChild(DeviceInfo* device, ObjectType type, ObjectId id);
 
-    std::unique_ptr<MemoryTransferService> CreateInlineMemoryTransferService();
+std::unique_ptr<MemoryTransferService> CreateInlineMemoryTransferService();
 
 }  // namespace dawn::wire::server
 
diff --git a/src/dawn/wire/server/ServerAdapter.cpp b/src/dawn/wire/server/ServerAdapter.cpp
index 8a6901e..67e5d46 100644
--- a/src/dawn/wire/server/ServerAdapter.cpp
+++ b/src/dawn/wire/server/ServerAdapter.cpp
@@ -19,93 +19,93 @@
 
 namespace dawn::wire::server {
 
-    bool Server::DoAdapterRequestDevice(ObjectId adapterId,
-                                        uint64_t requestSerial,
-                                        ObjectHandle deviceHandle,
-                                        const WGPUDeviceDescriptor* descriptor) {
-        auto* adapter = AdapterObjects().Get(adapterId);
-        if (adapter == nullptr) {
-            return false;
-        }
-
-        auto* resultData = DeviceObjects().Allocate(deviceHandle.id, AllocationState::Reserved);
-        if (resultData == nullptr) {
-            return false;
-        }
-
-        resultData->generation = deviceHandle.generation;
-
-        auto userdata = MakeUserdata<RequestDeviceUserdata>();
-        userdata->adapter = ObjectHandle{adapterId, adapter->generation};
-        userdata->requestSerial = requestSerial;
-        userdata->deviceObjectId = deviceHandle.id;
-
-        mProcs.adapterRequestDevice(adapter->handle, descriptor,
-                                    ForwardToServer<&Server::OnRequestDeviceCallback>,
-                                    userdata.release());
-        return true;
+bool Server::DoAdapterRequestDevice(ObjectId adapterId,
+                                    uint64_t requestSerial,
+                                    ObjectHandle deviceHandle,
+                                    const WGPUDeviceDescriptor* descriptor) {
+    auto* adapter = AdapterObjects().Get(adapterId);
+    if (adapter == nullptr) {
+        return false;
     }
 
-    void Server::OnRequestDeviceCallback(RequestDeviceUserdata* data,
-                                         WGPURequestDeviceStatus status,
-                                         WGPUDevice device,
-                                         const char* message) {
-        auto* deviceObject = DeviceObjects().Get(data->deviceObjectId, AllocationState::Reserved);
-        // Should be impossible to fail. ObjectIds can't be freed by a destroy command until
-        // they move from Reserved to Allocated, or if they are destroyed here.
-        ASSERT(deviceObject != nullptr);
+    auto* resultData = DeviceObjects().Allocate(deviceHandle.id, AllocationState::Reserved);
+    if (resultData == nullptr) {
+        return false;
+    }
 
-        ReturnAdapterRequestDeviceCallbackCmd cmd = {};
-        cmd.adapter = data->adapter;
-        cmd.requestSerial = data->requestSerial;
-        cmd.status = status;
-        cmd.message = message;
+    resultData->generation = deviceHandle.generation;
 
-        if (status != WGPURequestDeviceStatus_Success) {
+    auto userdata = MakeUserdata<RequestDeviceUserdata>();
+    userdata->adapter = ObjectHandle{adapterId, adapter->generation};
+    userdata->requestSerial = requestSerial;
+    userdata->deviceObjectId = deviceHandle.id;
+
+    mProcs.adapterRequestDevice(adapter->handle, descriptor,
+                                ForwardToServer<&Server::OnRequestDeviceCallback>,
+                                userdata.release());
+    return true;
+}
+
+void Server::OnRequestDeviceCallback(RequestDeviceUserdata* data,
+                                     WGPURequestDeviceStatus status,
+                                     WGPUDevice device,
+                                     const char* message) {
+    auto* deviceObject = DeviceObjects().Get(data->deviceObjectId, AllocationState::Reserved);
+    // Should be impossible to fail. ObjectIds can't be freed by a destroy command until
+    // they move from Reserved to Allocated, or if they are destroyed here.
+    ASSERT(deviceObject != nullptr);
+
+    ReturnAdapterRequestDeviceCallbackCmd cmd = {};
+    cmd.adapter = data->adapter;
+    cmd.requestSerial = data->requestSerial;
+    cmd.status = status;
+    cmd.message = message;
+
+    if (status != WGPURequestDeviceStatus_Success) {
+        // Free the ObjectId which will make it unusable.
+        DeviceObjects().Free(data->deviceObjectId);
+        ASSERT(device == nullptr);
+        SerializeCommand(cmd);
+        return;
+    }
+
+    std::vector<WGPUFeatureName> features;
+
+    size_t featuresCount = mProcs.deviceEnumerateFeatures(device, nullptr);
+    features.resize(featuresCount);
+    mProcs.deviceEnumerateFeatures(device, features.data());
+
+    // The client should only be able to request supported features, so all enumerated
+    // features that were enabled must also be supported by the wire.
+    // Note: We fail the callback here, instead of immediately upon receiving
+    // the request to preserve callback ordering.
+    for (WGPUFeatureName f : features) {
+        if (!IsFeatureSupported(f)) {
+            // Release the device.
+            mProcs.deviceRelease(device);
             // Free the ObjectId which will make it unusable.
             DeviceObjects().Free(data->deviceObjectId);
-            ASSERT(device == nullptr);
+
+            cmd.status = WGPURequestDeviceStatus_Error;
+            cmd.message = "Requested feature not supported.";
             SerializeCommand(cmd);
             return;
         }
-
-        std::vector<WGPUFeatureName> features;
-
-        size_t featuresCount = mProcs.deviceEnumerateFeatures(device, nullptr);
-        features.resize(featuresCount);
-        mProcs.deviceEnumerateFeatures(device, features.data());
-
-        // The client should only be able to request supported features, so all enumerated
-        // features that were enabled must also be supported by the wire.
-        // Note: We fail the callback here, instead of immediately upon receiving
-        // the request to preserve callback ordering.
-        for (WGPUFeatureName f : features) {
-            if (!IsFeatureSupported(f)) {
-                // Release the device.
-                mProcs.deviceRelease(device);
-                // Free the ObjectId which will make it unusable.
-                DeviceObjects().Free(data->deviceObjectId);
-
-                cmd.status = WGPURequestDeviceStatus_Error;
-                cmd.message = "Requested feature not supported.";
-                SerializeCommand(cmd);
-                return;
-            }
-        }
-
-        cmd.featuresCount = static_cast<uint32_t>(features.size());
-        cmd.features = features.data();
-
-        WGPUSupportedLimits limits = {};
-        mProcs.deviceGetLimits(device, &limits);
-        cmd.limits = &limits;
-
-        // Assign the handle and allocated status if the device is created successfully.
-        deviceObject->state = AllocationState::Allocated;
-        deviceObject->handle = device;
-        SetForwardingDeviceCallbacks(deviceObject);
-
-        SerializeCommand(cmd);
     }
 
+    cmd.featuresCount = static_cast<uint32_t>(features.size());
+    cmd.features = features.data();
+
+    WGPUSupportedLimits limits = {};
+    mProcs.deviceGetLimits(device, &limits);
+    cmd.limits = &limits;
+
+    // Assign the handle and allocated status if the device is created successfully.
+    deviceObject->state = AllocationState::Allocated;
+    deviceObject->handle = device;
+    SetForwardingDeviceCallbacks(deviceObject);
+
+    SerializeCommand(cmd);
+}
+
 }  // namespace dawn::wire::server
diff --git a/src/dawn/wire/server/ServerBuffer.cpp b/src/dawn/wire/server/ServerBuffer.cpp
index 8979093..ff17809 100644
--- a/src/dawn/wire/server/ServerBuffer.cpp
+++ b/src/dawn/wire/server/ServerBuffer.cpp
@@ -22,262 +22,257 @@
 
 namespace dawn::wire::server {
 
-    bool Server::PreHandleBufferUnmap(const BufferUnmapCmd& cmd) {
-        auto* buffer = BufferObjects().Get(cmd.selfId);
-        DAWN_ASSERT(buffer != nullptr);
+bool Server::PreHandleBufferUnmap(const BufferUnmapCmd& cmd) {
+    auto* buffer = BufferObjects().Get(cmd.selfId);
+    DAWN_ASSERT(buffer != nullptr);
 
-        if (buffer->mappedAtCreation && !(buffer->usage & WGPUMapMode_Write)) {
-            // This indicates the writeHandle is for mappedAtCreation only. Destroy on unmap
-            // writeHandle could have possibly been deleted if buffer is already destroyed so we
-            // don't assert it's non-null
-            buffer->writeHandle = nullptr;
-        }
-
-        buffer->mapWriteState = BufferMapWriteState::Unmapped;
-
-        return true;
-    }
-
-    bool Server::PreHandleBufferDestroy(const BufferDestroyCmd& cmd) {
-        // Destroying a buffer does an implicit unmapping.
-        auto* buffer = BufferObjects().Get(cmd.selfId);
-        DAWN_ASSERT(buffer != nullptr);
-
-        // The buffer was destroyed. Clear the Read/WriteHandle.
-        buffer->readHandle = nullptr;
+    if (buffer->mappedAtCreation && !(buffer->usage & WGPUMapMode_Write)) {
+        // This indicates the writeHandle is for mappedAtCreation only. Destroy on unmap
+        // writeHandle could have possibly been deleted if buffer is already destroyed so we
+        // don't assert it's non-null
         buffer->writeHandle = nullptr;
-        buffer->mapWriteState = BufferMapWriteState::Unmapped;
+    }
 
+    buffer->mapWriteState = BufferMapWriteState::Unmapped;
+
+    return true;
+}
+
+bool Server::PreHandleBufferDestroy(const BufferDestroyCmd& cmd) {
+    // Destroying a buffer does an implicit unmapping.
+    auto* buffer = BufferObjects().Get(cmd.selfId);
+    DAWN_ASSERT(buffer != nullptr);
+
+    // The buffer was destroyed. Clear the Read/WriteHandle.
+    buffer->readHandle = nullptr;
+    buffer->writeHandle = nullptr;
+    buffer->mapWriteState = BufferMapWriteState::Unmapped;
+
+    return true;
+}
+
+bool Server::DoBufferMapAsync(ObjectId bufferId,
+                              uint64_t requestSerial,
+                              WGPUMapModeFlags mode,
+                              uint64_t offset64,
+                              uint64_t size64) {
+    // These requests are just forwarded to the buffer, with userdata containing what the
+    // client will require in the return command.
+
+    // The null object isn't valid as `self`
+    if (bufferId == 0) {
+        return false;
+    }
+
+    auto* buffer = BufferObjects().Get(bufferId);
+    if (buffer == nullptr) {
+        return false;
+    }
+
+    std::unique_ptr<MapUserdata> userdata = MakeUserdata<MapUserdata>();
+    userdata->buffer = ObjectHandle{bufferId, buffer->generation};
+    userdata->bufferObj = buffer->handle;
+    userdata->requestSerial = requestSerial;
+    userdata->mode = mode;
+
+    // Make sure that the deserialized offset and size are no larger than
+    // std::numeric_limits<size_t>::max() so that they are CPU-addressable, and size is not
+    // WGPU_WHOLE_MAP_SIZE, which is by definition std::numeric_limits<size_t>::max(). Since
+    // client does the default size computation, we should always have a valid actual size here
+    // in server. All other invalid actual size can be caught by dawn native side validation.
+    if (offset64 > std::numeric_limits<size_t>::max() || size64 >= WGPU_WHOLE_MAP_SIZE) {
+        OnBufferMapAsyncCallback(userdata.get(), WGPUBufferMapAsyncStatus_Error);
         return true;
     }
 
-    bool Server::DoBufferMapAsync(ObjectId bufferId,
-                                  uint64_t requestSerial,
-                                  WGPUMapModeFlags mode,
-                                  uint64_t offset64,
-                                  uint64_t size64) {
-        // These requests are just forwarded to the buffer, with userdata containing what the
-        // client will require in the return command.
+    size_t offset = static_cast<size_t>(offset64);
+    size_t size = static_cast<size_t>(size64);
 
-        // The null object isn't valid as `self`
-        if (bufferId == 0) {
-            return false;
-        }
+    userdata->offset = offset;
+    userdata->size = size;
 
-        auto* buffer = BufferObjects().Get(bufferId);
-        if (buffer == nullptr) {
-            return false;
-        }
+    mProcs.bufferMapAsync(buffer->handle, mode, offset, size,
+                          ForwardToServer<&Server::OnBufferMapAsyncCallback>, userdata.release());
 
-        std::unique_ptr<MapUserdata> userdata = MakeUserdata<MapUserdata>();
-        userdata->buffer = ObjectHandle{bufferId, buffer->generation};
-        userdata->bufferObj = buffer->handle;
-        userdata->requestSerial = requestSerial;
-        userdata->mode = mode;
+    return true;
+}
 
-        // Make sure that the deserialized offset and size are no larger than
-        // std::numeric_limits<size_t>::max() so that they are CPU-addressable, and size is not
-        // WGPU_WHOLE_MAP_SIZE, which is by definition std::numeric_limits<size_t>::max(). Since
-        // client does the default size computation, we should always have a valid actual size here
-        // in server. All other invalid actual size can be caught by dawn native side validation.
-        if (offset64 > std::numeric_limits<size_t>::max() || size64 >= WGPU_WHOLE_MAP_SIZE) {
-            OnBufferMapAsyncCallback(userdata.get(), WGPUBufferMapAsyncStatus_Error);
-            return true;
-        }
-
-        size_t offset = static_cast<size_t>(offset64);
-        size_t size = static_cast<size_t>(size64);
-
-        userdata->offset = offset;
-        userdata->size = size;
-
-        mProcs.bufferMapAsync(buffer->handle, mode, offset, size,
-                              ForwardToServer<&Server::OnBufferMapAsyncCallback>,
-                              userdata.release());
-
-        return true;
+bool Server::DoDeviceCreateBuffer(ObjectId deviceId,
+                                  const WGPUBufferDescriptor* descriptor,
+                                  ObjectHandle bufferResult,
+                                  uint64_t readHandleCreateInfoLength,
+                                  const uint8_t* readHandleCreateInfo,
+                                  uint64_t writeHandleCreateInfoLength,
+                                  const uint8_t* writeHandleCreateInfo) {
+    auto* device = DeviceObjects().Get(deviceId);
+    if (device == nullptr) {
+        return false;
     }
 
-    bool Server::DoDeviceCreateBuffer(ObjectId deviceId,
-                                      const WGPUBufferDescriptor* descriptor,
-                                      ObjectHandle bufferResult,
-                                      uint64_t readHandleCreateInfoLength,
-                                      const uint8_t* readHandleCreateInfo,
-                                      uint64_t writeHandleCreateInfoLength,
-                                      const uint8_t* writeHandleCreateInfo) {
-        auto* device = DeviceObjects().Get(deviceId);
-        if (device == nullptr) {
-            return false;
-        }
-
-        // Create and register the buffer object.
-        auto* resultData = BufferObjects().Allocate(bufferResult.id);
-        if (resultData == nullptr) {
-            return false;
-        }
-        resultData->generation = bufferResult.generation;
-        resultData->handle = mProcs.deviceCreateBuffer(device->handle, descriptor);
-        resultData->deviceInfo = device->info.get();
-        resultData->usage = descriptor->usage;
-        resultData->mappedAtCreation = descriptor->mappedAtCreation;
-        if (!TrackDeviceChild(resultData->deviceInfo, ObjectType::Buffer, bufferResult.id)) {
-            return false;
-        }
-
-        // isReadMode and isWriteMode could be true at the same time if usage contains
-        // WGPUMapMode_Read and buffer is mappedAtCreation
-        bool isReadMode = descriptor->usage & WGPUMapMode_Read;
-        bool isWriteMode = descriptor->usage & WGPUMapMode_Write || descriptor->mappedAtCreation;
-
-        // This is the size of data deserialized from the command stream to create the read/write
-        // handle, which must be CPU-addressable.
-        if (readHandleCreateInfoLength > std::numeric_limits<size_t>::max() ||
-            writeHandleCreateInfoLength > std::numeric_limits<size_t>::max() ||
-            readHandleCreateInfoLength >
-                std::numeric_limits<size_t>::max() - writeHandleCreateInfoLength) {
-            return false;
-        }
-
-        if (isWriteMode) {
-            MemoryTransferService::WriteHandle* writeHandle = nullptr;
-            // Deserialize metadata produced from the client to create a companion server handle.
-            if (!mMemoryTransferService->DeserializeWriteHandle(
-                    writeHandleCreateInfo, static_cast<size_t>(writeHandleCreateInfoLength),
-                    &writeHandle)) {
-                return false;
-            }
-            ASSERT(writeHandle != nullptr);
-            resultData->writeHandle.reset(writeHandle);
-            writeHandle->SetDataLength(descriptor->size);
-
-            if (descriptor->mappedAtCreation) {
-                void* mapping =
-                    mProcs.bufferGetMappedRange(resultData->handle, 0, descriptor->size);
-                if (mapping == nullptr) {
-                    // A zero mapping is used to indicate an allocation error of an error buffer.
-                    // This is a valid case and isn't fatal. Remember the buffer is an error so as
-                    // to skip subsequent mapping operations.
-                    resultData->mapWriteState = BufferMapWriteState::MapError;
-                    return true;
-                }
-                ASSERT(mapping != nullptr);
-                writeHandle->SetTarget(mapping);
-
-                resultData->mapWriteState = BufferMapWriteState::Mapped;
-            }
-        }
-
-        if (isReadMode) {
-            MemoryTransferService::ReadHandle* readHandle = nullptr;
-            // Deserialize metadata produced from the client to create a companion server handle.
-            if (!mMemoryTransferService->DeserializeReadHandle(
-                    readHandleCreateInfo, static_cast<size_t>(readHandleCreateInfoLength),
-                    &readHandle)) {
-                return false;
-            }
-            ASSERT(readHandle != nullptr);
-
-            resultData->readHandle.reset(readHandle);
-        }
-
-        return true;
+    // Create and register the buffer object.
+    auto* resultData = BufferObjects().Allocate(bufferResult.id);
+    if (resultData == nullptr) {
+        return false;
+    }
+    resultData->generation = bufferResult.generation;
+    resultData->handle = mProcs.deviceCreateBuffer(device->handle, descriptor);
+    resultData->deviceInfo = device->info.get();
+    resultData->usage = descriptor->usage;
+    resultData->mappedAtCreation = descriptor->mappedAtCreation;
+    if (!TrackDeviceChild(resultData->deviceInfo, ObjectType::Buffer, bufferResult.id)) {
+        return false;
     }
 
-    bool Server::DoBufferUpdateMappedData(ObjectId bufferId,
-                                          uint64_t writeDataUpdateInfoLength,
-                                          const uint8_t* writeDataUpdateInfo,
-                                          uint64_t offset,
-                                          uint64_t size) {
-        // The null object isn't valid as `self`
-        if (bufferId == 0) {
-            return false;
-        }
+    // isReadMode and isWriteMode could be true at the same time if usage contains
+    // WGPUMapMode_Read and buffer is mappedAtCreation
+    bool isReadMode = descriptor->usage & WGPUMapMode_Read;
+    bool isWriteMode = descriptor->usage & WGPUMapMode_Write || descriptor->mappedAtCreation;
 
-        if (writeDataUpdateInfoLength > std::numeric_limits<size_t>::max() ||
-            offset > std::numeric_limits<size_t>::max() ||
-            size > std::numeric_limits<size_t>::max()) {
-            return false;
-        }
+    // This is the size of data deserialized from the command stream to create the read/write
+    // handle, which must be CPU-addressable.
+    if (readHandleCreateInfoLength > std::numeric_limits<size_t>::max() ||
+        writeHandleCreateInfoLength > std::numeric_limits<size_t>::max() ||
+        readHandleCreateInfoLength >
+            std::numeric_limits<size_t>::max() - writeHandleCreateInfoLength) {
+        return false;
+    }
 
-        auto* buffer = BufferObjects().Get(bufferId);
-        if (buffer == nullptr) {
+    if (isWriteMode) {
+        MemoryTransferService::WriteHandle* writeHandle = nullptr;
+        // Deserialize metadata produced from the client to create a companion server handle.
+        if (!mMemoryTransferService->DeserializeWriteHandle(
+                writeHandleCreateInfo, static_cast<size_t>(writeHandleCreateInfoLength),
+                &writeHandle)) {
             return false;
         }
-        switch (buffer->mapWriteState) {
-            case BufferMapWriteState::Unmapped:
-                return false;
-            case BufferMapWriteState::MapError:
-                // The buffer is mapped but there was an error allocating mapped data.
-                // Do not perform the memcpy.
+        ASSERT(writeHandle != nullptr);
+        resultData->writeHandle.reset(writeHandle);
+        writeHandle->SetDataLength(descriptor->size);
+
+        if (descriptor->mappedAtCreation) {
+            void* mapping = mProcs.bufferGetMappedRange(resultData->handle, 0, descriptor->size);
+            if (mapping == nullptr) {
+                // A zero mapping is used to indicate an allocation error of an error buffer.
+                // This is a valid case and isn't fatal. Remember the buffer is an error so as
+                // to skip subsequent mapping operations.
+                resultData->mapWriteState = BufferMapWriteState::MapError;
                 return true;
-            case BufferMapWriteState::Mapped:
-                break;
+            }
+            ASSERT(mapping != nullptr);
+            writeHandle->SetTarget(mapping);
+
+            resultData->mapWriteState = BufferMapWriteState::Mapped;
         }
-        if (!buffer->writeHandle) {
-            // This check is performed after the check for the MapError state. It is permissible
-            // to Unmap and attempt to update mapped data of an error buffer.
+    }
+
+    if (isReadMode) {
+        MemoryTransferService::ReadHandle* readHandle = nullptr;
+        // Deserialize metadata produced from the client to create a companion server handle.
+        if (!mMemoryTransferService->DeserializeReadHandle(
+                readHandleCreateInfo, static_cast<size_t>(readHandleCreateInfoLength),
+                &readHandle)) {
             return false;
         }
+        ASSERT(readHandle != nullptr);
 
-        // Deserialize the flush info and flush updated data from the handle into the target
-        // of the handle. The target is set via WriteHandle::SetTarget.
-        return buffer->writeHandle->DeserializeDataUpdate(
-            writeDataUpdateInfo, static_cast<size_t>(writeDataUpdateInfoLength),
-            static_cast<size_t>(offset), static_cast<size_t>(size));
+        resultData->readHandle.reset(readHandle);
     }
 
-    void Server::OnBufferMapAsyncCallback(MapUserdata* data, WGPUBufferMapAsyncStatus status) {
-        // Skip sending the callback if the buffer has already been destroyed.
-        auto* bufferData = BufferObjects().Get(data->buffer.id);
-        if (bufferData == nullptr || bufferData->generation != data->buffer.generation) {
-            return;
-        }
+    return true;
+}
 
-        bool isRead = data->mode & WGPUMapMode_Read;
-        bool isSuccess = status == WGPUBufferMapAsyncStatus_Success;
-
-        ReturnBufferMapAsyncCallbackCmd cmd;
-        cmd.buffer = data->buffer;
-        cmd.requestSerial = data->requestSerial;
-        cmd.status = status;
-        cmd.readDataUpdateInfoLength = 0;
-        cmd.readDataUpdateInfo = nullptr;
-
-        const void* readData = nullptr;
-        if (isSuccess) {
-            if (isRead) {
-                // Get the serialization size of the message to initialize ReadHandle data.
-                readData =
-                    mProcs.bufferGetConstMappedRange(data->bufferObj, data->offset, data->size);
-                cmd.readDataUpdateInfoLength =
-                    bufferData->readHandle->SizeOfSerializeDataUpdate(data->offset, data->size);
-            } else {
-                ASSERT(data->mode & WGPUMapMode_Write);
-                // The in-flight map request returned successfully.
-                bufferData->mapWriteState = BufferMapWriteState::Mapped;
-                // Set the target of the WriteHandle to the mapped buffer data.
-                // writeHandle Target always refers to the buffer base address.
-                // but we call getMappedRange exactly with the range of data that is potentially
-                // modified (i.e. we don't want getMappedRange(0, wholeBufferSize) if only a
-                // subset of the buffer is actually mapped) in case the implementation does some
-                // range tracking.
-                bufferData->writeHandle->SetTarget(
-                    static_cast<uint8_t*>(
-                        mProcs.bufferGetMappedRange(data->bufferObj, data->offset, data->size)) -
-                    data->offset);
-            }
-        }
-
-        SerializeCommand(cmd, cmd.readDataUpdateInfoLength, [&](SerializeBuffer* serializeBuffer) {
-            if (isSuccess && isRead) {
-                char* readHandleBuffer;
-                WIRE_TRY(serializeBuffer->NextN(cmd.readDataUpdateInfoLength, &readHandleBuffer));
-                // The in-flight map request returned successfully.
-                bufferData->readHandle->SerializeDataUpdate(readData, data->offset, data->size,
-                                                            readHandleBuffer);
-            }
-            return WireResult::Success;
-        });
+bool Server::DoBufferUpdateMappedData(ObjectId bufferId,
+                                      uint64_t writeDataUpdateInfoLength,
+                                      const uint8_t* writeDataUpdateInfo,
+                                      uint64_t offset,
+                                      uint64_t size) {
+    // The null object isn't valid as `self`
+    if (bufferId == 0) {
+        return false;
     }
 
+    if (writeDataUpdateInfoLength > std::numeric_limits<size_t>::max() ||
+        offset > std::numeric_limits<size_t>::max() || size > std::numeric_limits<size_t>::max()) {
+        return false;
+    }
+
+    auto* buffer = BufferObjects().Get(bufferId);
+    if (buffer == nullptr) {
+        return false;
+    }
+    switch (buffer->mapWriteState) {
+        case BufferMapWriteState::Unmapped:
+            return false;
+        case BufferMapWriteState::MapError:
+            // The buffer is mapped but there was an error allocating mapped data.
+            // Do not perform the memcpy.
+            return true;
+        case BufferMapWriteState::Mapped:
+            break;
+    }
+    if (!buffer->writeHandle) {
+        // This check is performed after the check for the MapError state. It is permissible
+        // to Unmap and attempt to update mapped data of an error buffer.
+        return false;
+    }
+
+    // Deserialize the flush info and flush updated data from the handle into the target
+    // of the handle. The target is set via WriteHandle::SetTarget.
+    return buffer->writeHandle->DeserializeDataUpdate(
+        writeDataUpdateInfo, static_cast<size_t>(writeDataUpdateInfoLength),
+        static_cast<size_t>(offset), static_cast<size_t>(size));
+}
+
+void Server::OnBufferMapAsyncCallback(MapUserdata* data, WGPUBufferMapAsyncStatus status) {
+    // Skip sending the callback if the buffer has already been destroyed.
+    auto* bufferData = BufferObjects().Get(data->buffer.id);
+    if (bufferData == nullptr || bufferData->generation != data->buffer.generation) {
+        return;
+    }
+
+    bool isRead = data->mode & WGPUMapMode_Read;
+    bool isSuccess = status == WGPUBufferMapAsyncStatus_Success;
+
+    ReturnBufferMapAsyncCallbackCmd cmd;
+    cmd.buffer = data->buffer;
+    cmd.requestSerial = data->requestSerial;
+    cmd.status = status;
+    cmd.readDataUpdateInfoLength = 0;
+    cmd.readDataUpdateInfo = nullptr;
+
+    const void* readData = nullptr;
+    if (isSuccess) {
+        if (isRead) {
+            // Get the serialization size of the message to initialize ReadHandle data.
+            readData = mProcs.bufferGetConstMappedRange(data->bufferObj, data->offset, data->size);
+            cmd.readDataUpdateInfoLength =
+                bufferData->readHandle->SizeOfSerializeDataUpdate(data->offset, data->size);
+        } else {
+            ASSERT(data->mode & WGPUMapMode_Write);
+            // The in-flight map request returned successfully.
+            bufferData->mapWriteState = BufferMapWriteState::Mapped;
+            // Set the target of the WriteHandle to the mapped buffer data.
+            // writeHandle Target always refers to the buffer base address.
+            // but we call getMappedRange exactly with the range of data that is potentially
+            // modified (i.e. we don't want getMappedRange(0, wholeBufferSize) if only a
+            // subset of the buffer is actually mapped) in case the implementation does some
+            // range tracking.
+            bufferData->writeHandle->SetTarget(static_cast<uint8_t*>(mProcs.bufferGetMappedRange(
+                                                   data->bufferObj, data->offset, data->size)) -
+                                               data->offset);
+        }
+    }
+
+    SerializeCommand(cmd, cmd.readDataUpdateInfoLength, [&](SerializeBuffer* serializeBuffer) {
+        if (isSuccess && isRead) {
+            char* readHandleBuffer;
+            WIRE_TRY(serializeBuffer->NextN(cmd.readDataUpdateInfoLength, &readHandleBuffer));
+            // The in-flight map request returned successfully.
+            bufferData->readHandle->SerializeDataUpdate(readData, data->offset, data->size,
+                                                        readHandleBuffer);
+        }
+        return WireResult::Success;
+    });
+}
+
 }  // namespace dawn::wire::server
diff --git a/src/dawn/wire/server/ServerDevice.cpp b/src/dawn/wire/server/ServerDevice.cpp
index 45fb6b8..0a1fff5 100644
--- a/src/dawn/wire/server/ServerDevice.cpp
+++ b/src/dawn/wire/server/ServerDevice.cpp
@@ -16,185 +16,181 @@
 
 namespace dawn::wire::server {
 
-    namespace {
+namespace {
 
-        template <ObjectType objectType, typename Pipeline>
-        void HandleCreateRenderPipelineAsyncCallbackResult(KnownObjects<Pipeline>* knownObjects,
-                                                           WGPUCreatePipelineAsyncStatus status,
-                                                           Pipeline pipeline,
-                                                           CreatePipelineAsyncUserData* data) {
-            // May be null if the device was destroyed. Device destruction destroys child
-            // objects on the wire.
-            auto* pipelineObject =
-                knownObjects->Get(data->pipelineObjectID, AllocationState::Reserved);
-            // Should be impossible to fail. ObjectIds can't be freed by a destroy command until
-            // they move from Reserved to Allocated, or if they are destroyed here.
-            ASSERT(pipelineObject != nullptr);
+template <ObjectType objectType, typename Pipeline>
+void HandleCreateRenderPipelineAsyncCallbackResult(KnownObjects<Pipeline>* knownObjects,
+                                                   WGPUCreatePipelineAsyncStatus status,
+                                                   Pipeline pipeline,
+                                                   CreatePipelineAsyncUserData* data) {
+    // May be null if the device was destroyed. Device destruction destroys child
+    // objects on the wire.
+    auto* pipelineObject = knownObjects->Get(data->pipelineObjectID, AllocationState::Reserved);
+    // Should be impossible to fail. ObjectIds can't be freed by a destroy command until
+    // they move from Reserved to Allocated, or if they are destroyed here.
+    ASSERT(pipelineObject != nullptr);
 
-            if (status == WGPUCreatePipelineAsyncStatus_Success) {
-                // Assign the handle and allocated status if the pipeline is created successfully.
-                pipelineObject->state = AllocationState::Allocated;
-                pipelineObject->handle = pipeline;
+    if (status == WGPUCreatePipelineAsyncStatus_Success) {
+        // Assign the handle and allocated status if the pipeline is created successfully.
+        pipelineObject->state = AllocationState::Allocated;
+        pipelineObject->handle = pipeline;
 
-                // This should be impossible to fail. It would require a command to be sent that
-                // creates a duplicate ObjectId, which would fail validation.
-                bool success = TrackDeviceChild(pipelineObject->deviceInfo, objectType,
-                                                data->pipelineObjectID);
-                ASSERT(success);
-            } else {
-                // Otherwise, free the ObjectId which will make it unusable.
-                knownObjects->Free(data->pipelineObjectID);
-                ASSERT(pipeline == nullptr);
-            }
-        }
+        // This should be impossible to fail. It would require a command to be sent that
+        // creates a duplicate ObjectId, which would fail validation.
+        bool success =
+            TrackDeviceChild(pipelineObject->deviceInfo, objectType, data->pipelineObjectID);
+        ASSERT(success);
+    } else {
+        // Otherwise, free the ObjectId which will make it unusable.
+        knownObjects->Free(data->pipelineObjectID);
+        ASSERT(pipeline == nullptr);
+    }
+}
 
-    }  // anonymous namespace
+}  // anonymous namespace
 
-    void Server::OnUncapturedError(ObjectHandle device, WGPUErrorType type, const char* message) {
-        ReturnDeviceUncapturedErrorCallbackCmd cmd;
-        cmd.device = device;
-        cmd.type = type;
-        cmd.message = message;
+void Server::OnUncapturedError(ObjectHandle device, WGPUErrorType type, const char* message) {
+    ReturnDeviceUncapturedErrorCallbackCmd cmd;
+    cmd.device = device;
+    cmd.type = type;
+    cmd.message = message;
 
-        SerializeCommand(cmd);
+    SerializeCommand(cmd);
+}
+
+void Server::OnDeviceLost(ObjectHandle device, WGPUDeviceLostReason reason, const char* message) {
+    ReturnDeviceLostCallbackCmd cmd;
+    cmd.device = device;
+    cmd.reason = reason;
+    cmd.message = message;
+
+    SerializeCommand(cmd);
+}
+
+void Server::OnLogging(ObjectHandle device, WGPULoggingType type, const char* message) {
+    ReturnDeviceLoggingCallbackCmd cmd;
+    cmd.device = device;
+    cmd.type = type;
+    cmd.message = message;
+
+    SerializeCommand(cmd);
+}
+
+bool Server::DoDevicePopErrorScope(ObjectId deviceId, uint64_t requestSerial) {
+    auto* device = DeviceObjects().Get(deviceId);
+    if (device == nullptr) {
+        return false;
     }
 
-    void Server::OnDeviceLost(ObjectHandle device,
-                              WGPUDeviceLostReason reason,
-                              const char* message) {
-        ReturnDeviceLostCallbackCmd cmd;
-        cmd.device = device;
-        cmd.reason = reason;
-        cmd.message = message;
+    auto userdata = MakeUserdata<ErrorScopeUserdata>();
+    userdata->requestSerial = requestSerial;
+    userdata->device = ObjectHandle{deviceId, device->generation};
 
-        SerializeCommand(cmd);
+    mProcs.devicePopErrorScope(device->handle, ForwardToServer<&Server::OnDevicePopErrorScope>,
+                               userdata.release());
+    return true;
+}
+
+void Server::OnDevicePopErrorScope(ErrorScopeUserdata* userdata,
+                                   WGPUErrorType type,
+                                   const char* message) {
+    ReturnDevicePopErrorScopeCallbackCmd cmd;
+    cmd.device = userdata->device;
+    cmd.requestSerial = userdata->requestSerial;
+    cmd.type = type;
+    cmd.message = message;
+
+    SerializeCommand(cmd);
+}
+
+bool Server::DoDeviceCreateComputePipelineAsync(ObjectId deviceId,
+                                                uint64_t requestSerial,
+                                                ObjectHandle pipelineObjectHandle,
+                                                const WGPUComputePipelineDescriptor* descriptor) {
+    auto* device = DeviceObjects().Get(deviceId);
+    if (device == nullptr) {
+        return false;
     }
 
-    void Server::OnLogging(ObjectHandle device, WGPULoggingType type, const char* message) {
-        ReturnDeviceLoggingCallbackCmd cmd;
-        cmd.device = device;
-        cmd.type = type;
-        cmd.message = message;
-
-        SerializeCommand(cmd);
+    auto* resultData =
+        ComputePipelineObjects().Allocate(pipelineObjectHandle.id, AllocationState::Reserved);
+    if (resultData == nullptr) {
+        return false;
     }
 
-    bool Server::DoDevicePopErrorScope(ObjectId deviceId, uint64_t requestSerial) {
-        auto* device = DeviceObjects().Get(deviceId);
-        if (device == nullptr) {
-            return false;
-        }
+    resultData->generation = pipelineObjectHandle.generation;
+    resultData->deviceInfo = device->info.get();
 
-        auto userdata = MakeUserdata<ErrorScopeUserdata>();
-        userdata->requestSerial = requestSerial;
-        userdata->device = ObjectHandle{deviceId, device->generation};
+    auto userdata = MakeUserdata<CreatePipelineAsyncUserData>();
+    userdata->device = ObjectHandle{deviceId, device->generation};
+    userdata->requestSerial = requestSerial;
+    userdata->pipelineObjectID = pipelineObjectHandle.id;
 
-        mProcs.devicePopErrorScope(device->handle, ForwardToServer<&Server::OnDevicePopErrorScope>,
-                                   userdata.release());
-        return true;
+    mProcs.deviceCreateComputePipelineAsync(
+        device->handle, descriptor, ForwardToServer<&Server::OnCreateComputePipelineAsyncCallback>,
+        userdata.release());
+    return true;
+}
+
+void Server::OnCreateComputePipelineAsyncCallback(CreatePipelineAsyncUserData* data,
+                                                  WGPUCreatePipelineAsyncStatus status,
+                                                  WGPUComputePipeline pipeline,
+                                                  const char* message) {
+    HandleCreateRenderPipelineAsyncCallbackResult<ObjectType::ComputePipeline>(
+        &ComputePipelineObjects(), status, pipeline, data);
+
+    ReturnDeviceCreateComputePipelineAsyncCallbackCmd cmd;
+    cmd.device = data->device;
+    cmd.status = status;
+    cmd.requestSerial = data->requestSerial;
+    cmd.message = message;
+
+    SerializeCommand(cmd);
+}
+
+bool Server::DoDeviceCreateRenderPipelineAsync(ObjectId deviceId,
+                                               uint64_t requestSerial,
+                                               ObjectHandle pipelineObjectHandle,
+                                               const WGPURenderPipelineDescriptor* descriptor) {
+    auto* device = DeviceObjects().Get(deviceId);
+    if (device == nullptr) {
+        return false;
     }
 
-    void Server::OnDevicePopErrorScope(ErrorScopeUserdata* userdata,
-                                       WGPUErrorType type,
-                                       const char* message) {
-        ReturnDevicePopErrorScopeCallbackCmd cmd;
-        cmd.device = userdata->device;
-        cmd.requestSerial = userdata->requestSerial;
-        cmd.type = type;
-        cmd.message = message;
-
-        SerializeCommand(cmd);
+    auto* resultData =
+        RenderPipelineObjects().Allocate(pipelineObjectHandle.id, AllocationState::Reserved);
+    if (resultData == nullptr) {
+        return false;
     }
 
-    bool Server::DoDeviceCreateComputePipelineAsync(
-        ObjectId deviceId,
-        uint64_t requestSerial,
-        ObjectHandle pipelineObjectHandle,
-        const WGPUComputePipelineDescriptor* descriptor) {
-        auto* device = DeviceObjects().Get(deviceId);
-        if (device == nullptr) {
-            return false;
-        }
+    resultData->generation = pipelineObjectHandle.generation;
+    resultData->deviceInfo = device->info.get();
 
-        auto* resultData =
-            ComputePipelineObjects().Allocate(pipelineObjectHandle.id, AllocationState::Reserved);
-        if (resultData == nullptr) {
-            return false;
-        }
+    auto userdata = MakeUserdata<CreatePipelineAsyncUserData>();
+    userdata->device = ObjectHandle{deviceId, device->generation};
+    userdata->requestSerial = requestSerial;
+    userdata->pipelineObjectID = pipelineObjectHandle.id;
 
-        resultData->generation = pipelineObjectHandle.generation;
-        resultData->deviceInfo = device->info.get();
+    mProcs.deviceCreateRenderPipelineAsync(
+        device->handle, descriptor, ForwardToServer<&Server::OnCreateRenderPipelineAsyncCallback>,
+        userdata.release());
+    return true;
+}
 
-        auto userdata = MakeUserdata<CreatePipelineAsyncUserData>();
-        userdata->device = ObjectHandle{deviceId, device->generation};
-        userdata->requestSerial = requestSerial;
-        userdata->pipelineObjectID = pipelineObjectHandle.id;
+void Server::OnCreateRenderPipelineAsyncCallback(CreatePipelineAsyncUserData* data,
+                                                 WGPUCreatePipelineAsyncStatus status,
+                                                 WGPURenderPipeline pipeline,
+                                                 const char* message) {
+    HandleCreateRenderPipelineAsyncCallbackResult<ObjectType::RenderPipeline>(
+        &RenderPipelineObjects(), status, pipeline, data);
 
-        mProcs.deviceCreateComputePipelineAsync(
-            device->handle, descriptor,
-            ForwardToServer<&Server::OnCreateComputePipelineAsyncCallback>, userdata.release());
-        return true;
-    }
+    ReturnDeviceCreateRenderPipelineAsyncCallbackCmd cmd;
+    cmd.device = data->device;
+    cmd.status = status;
+    cmd.requestSerial = data->requestSerial;
+    cmd.message = message;
 
-    void Server::OnCreateComputePipelineAsyncCallback(CreatePipelineAsyncUserData* data,
-                                                      WGPUCreatePipelineAsyncStatus status,
-                                                      WGPUComputePipeline pipeline,
-                                                      const char* message) {
-        HandleCreateRenderPipelineAsyncCallbackResult<ObjectType::ComputePipeline>(
-            &ComputePipelineObjects(), status, pipeline, data);
-
-        ReturnDeviceCreateComputePipelineAsyncCallbackCmd cmd;
-        cmd.device = data->device;
-        cmd.status = status;
-        cmd.requestSerial = data->requestSerial;
-        cmd.message = message;
-
-        SerializeCommand(cmd);
-    }
-
-    bool Server::DoDeviceCreateRenderPipelineAsync(ObjectId deviceId,
-                                                   uint64_t requestSerial,
-                                                   ObjectHandle pipelineObjectHandle,
-                                                   const WGPURenderPipelineDescriptor* descriptor) {
-        auto* device = DeviceObjects().Get(deviceId);
-        if (device == nullptr) {
-            return false;
-        }
-
-        auto* resultData =
-            RenderPipelineObjects().Allocate(pipelineObjectHandle.id, AllocationState::Reserved);
-        if (resultData == nullptr) {
-            return false;
-        }
-
-        resultData->generation = pipelineObjectHandle.generation;
-        resultData->deviceInfo = device->info.get();
-
-        auto userdata = MakeUserdata<CreatePipelineAsyncUserData>();
-        userdata->device = ObjectHandle{deviceId, device->generation};
-        userdata->requestSerial = requestSerial;
-        userdata->pipelineObjectID = pipelineObjectHandle.id;
-
-        mProcs.deviceCreateRenderPipelineAsync(
-            device->handle, descriptor,
-            ForwardToServer<&Server::OnCreateRenderPipelineAsyncCallback>, userdata.release());
-        return true;
-    }
-
-    void Server::OnCreateRenderPipelineAsyncCallback(CreatePipelineAsyncUserData* data,
-                                                     WGPUCreatePipelineAsyncStatus status,
-                                                     WGPURenderPipeline pipeline,
-                                                     const char* message) {
-        HandleCreateRenderPipelineAsyncCallbackResult<ObjectType::RenderPipeline>(
-            &RenderPipelineObjects(), status, pipeline, data);
-
-        ReturnDeviceCreateRenderPipelineAsyncCallbackCmd cmd;
-        cmd.device = data->device;
-        cmd.status = status;
-        cmd.requestSerial = data->requestSerial;
-        cmd.message = message;
-
-        SerializeCommand(cmd);
-    }
+    SerializeCommand(cmd);
+}
 
 }  // namespace dawn::wire::server
diff --git a/src/dawn/wire/server/ServerInlineMemoryTransferService.cpp b/src/dawn/wire/server/ServerInlineMemoryTransferService.cpp
index e89a8bc..6f5884a 100644
--- a/src/dawn/wire/server/ServerInlineMemoryTransferService.cpp
+++ b/src/dawn/wire/server/ServerInlineMemoryTransferService.cpp
@@ -21,75 +21,70 @@
 
 namespace dawn::wire::server {
 
-    class InlineMemoryTransferService : public MemoryTransferService {
+class InlineMemoryTransferService : public MemoryTransferService {
+  public:
+    class ReadHandleImpl : public ReadHandle {
       public:
-        class ReadHandleImpl : public ReadHandle {
-          public:
-            ReadHandleImpl() {
-            }
-            ~ReadHandleImpl() override = default;
+        ReadHandleImpl() {}
+        ~ReadHandleImpl() override = default;
 
-            size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override {
-                return size;
-            }
+        size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override { return size; }
 
-            void SerializeDataUpdate(const void* data,
-                                     size_t offset,
-                                     size_t size,
-                                     void* serializePointer) override {
-                if (size > 0) {
-                    ASSERT(data != nullptr);
-                    ASSERT(serializePointer != nullptr);
-                    memcpy(serializePointer, data, size);
-                }
+        void SerializeDataUpdate(const void* data,
+                                 size_t offset,
+                                 size_t size,
+                                 void* serializePointer) override {
+            if (size > 0) {
+                ASSERT(data != nullptr);
+                ASSERT(serializePointer != nullptr);
+                memcpy(serializePointer, data, size);
             }
-        };
-
-        class WriteHandleImpl : public WriteHandle {
-          public:
-            WriteHandleImpl() {
-            }
-            ~WriteHandleImpl() override = default;
-
-            bool DeserializeDataUpdate(const void* deserializePointer,
-                                       size_t deserializeSize,
-                                       size_t offset,
-                                       size_t size) override {
-                if (deserializeSize != size || mTargetData == nullptr ||
-                    deserializePointer == nullptr) {
-                    return false;
-                }
-                if ((offset >= mDataLength && offset > 0) || size > mDataLength - offset) {
-                    return false;
-                }
-                memcpy(static_cast<uint8_t*>(mTargetData) + offset, deserializePointer, size);
-                return true;
-            }
-        };
-
-        InlineMemoryTransferService() {
         }
-        ~InlineMemoryTransferService() override = default;
+    };
 
-        bool DeserializeReadHandle(const void* deserializePointer,
+    class WriteHandleImpl : public WriteHandle {
+      public:
+        WriteHandleImpl() {}
+        ~WriteHandleImpl() override = default;
+
+        bool DeserializeDataUpdate(const void* deserializePointer,
                                    size_t deserializeSize,
-                                   ReadHandle** readHandle) override {
-            ASSERT(readHandle != nullptr);
-            *readHandle = new ReadHandleImpl();
-            return true;
-        }
-
-        bool DeserializeWriteHandle(const void* deserializePointer,
-                                    size_t deserializeSize,
-                                    WriteHandle** writeHandle) override {
-            ASSERT(writeHandle != nullptr);
-            *writeHandle = new WriteHandleImpl();
+                                   size_t offset,
+                                   size_t size) override {
+            if (deserializeSize != size || mTargetData == nullptr ||
+                deserializePointer == nullptr) {
+                return false;
+            }
+            if ((offset >= mDataLength && offset > 0) || size > mDataLength - offset) {
+                return false;
+            }
+            memcpy(static_cast<uint8_t*>(mTargetData) + offset, deserializePointer, size);
             return true;
         }
     };
 
-    std::unique_ptr<MemoryTransferService> CreateInlineMemoryTransferService() {
-        return std::make_unique<InlineMemoryTransferService>();
+    InlineMemoryTransferService() {}
+    ~InlineMemoryTransferService() override = default;
+
+    bool DeserializeReadHandle(const void* deserializePointer,
+                               size_t deserializeSize,
+                               ReadHandle** readHandle) override {
+        ASSERT(readHandle != nullptr);
+        *readHandle = new ReadHandleImpl();
+        return true;
     }
 
+    bool DeserializeWriteHandle(const void* deserializePointer,
+                                size_t deserializeSize,
+                                WriteHandle** writeHandle) override {
+        ASSERT(writeHandle != nullptr);
+        *writeHandle = new WriteHandleImpl();
+        return true;
+    }
+};
+
+std::unique_ptr<MemoryTransferService> CreateInlineMemoryTransferService() {
+    return std::make_unique<InlineMemoryTransferService>();
+}
+
 }  // namespace dawn::wire::server
diff --git a/src/dawn/wire/server/ServerInstance.cpp b/src/dawn/wire/server/ServerInstance.cpp
index 1099800..d3998a3 100644
--- a/src/dawn/wire/server/ServerInstance.cpp
+++ b/src/dawn/wire/server/ServerInstance.cpp
@@ -15,86 +15,85 @@
 #include <algorithm>
 #include <vector>
 
-#include "dawn/wire/server/Server.h"
 #include "dawn/wire/SupportedFeatures.h"
+#include "dawn/wire/server/Server.h"
 
 namespace dawn::wire::server {
 
-    bool Server::DoInstanceRequestAdapter(ObjectId instanceId,
-                                          uint64_t requestSerial,
-                                          ObjectHandle adapterHandle,
-                                          const WGPURequestAdapterOptions* options) {
-        auto* instance = InstanceObjects().Get(instanceId);
-        if (instance == nullptr) {
-            return false;
-        }
-
-        auto* resultData = AdapterObjects().Allocate(adapterHandle.id, AllocationState::Reserved);
-        if (resultData == nullptr) {
-            return false;
-        }
-
-        resultData->generation = adapterHandle.generation;
-
-        auto userdata = MakeUserdata<RequestAdapterUserdata>();
-        userdata->instance = ObjectHandle{instanceId, instance->generation};
-        userdata->requestSerial = requestSerial;
-        userdata->adapterObjectId = adapterHandle.id;
-
-        mProcs.instanceRequestAdapter(instance->handle, options,
-                                      ForwardToServer<&Server::OnRequestAdapterCallback>,
-                                      userdata.release());
-        return true;
+bool Server::DoInstanceRequestAdapter(ObjectId instanceId,
+                                      uint64_t requestSerial,
+                                      ObjectHandle adapterHandle,
+                                      const WGPURequestAdapterOptions* options) {
+    auto* instance = InstanceObjects().Get(instanceId);
+    if (instance == nullptr) {
+        return false;
     }
 
-    void Server::OnRequestAdapterCallback(RequestAdapterUserdata* data,
-                                          WGPURequestAdapterStatus status,
-                                          WGPUAdapter adapter,
-                                          const char* message) {
-        auto* adapterObject =
-            AdapterObjects().Get(data->adapterObjectId, AllocationState::Reserved);
-        // Should be impossible to fail. ObjectIds can't be freed by a destroy command until
-        // they move from Reserved to Allocated, or if they are destroyed here.
-        ASSERT(adapterObject != nullptr);
+    auto* resultData = AdapterObjects().Allocate(adapterHandle.id, AllocationState::Reserved);
+    if (resultData == nullptr) {
+        return false;
+    }
 
-        ReturnInstanceRequestAdapterCallbackCmd cmd = {};
-        cmd.instance = data->instance;
-        cmd.requestSerial = data->requestSerial;
-        cmd.status = status;
-        cmd.message = message;
+    resultData->generation = adapterHandle.generation;
 
-        if (status != WGPURequestAdapterStatus_Success) {
-            // Free the ObjectId which will make it unusable.
-            AdapterObjects().Free(data->adapterObjectId);
-            ASSERT(adapter == nullptr);
-            SerializeCommand(cmd);
-            return;
-        }
+    auto userdata = MakeUserdata<RequestAdapterUserdata>();
+    userdata->instance = ObjectHandle{instanceId, instance->generation};
+    userdata->requestSerial = requestSerial;
+    userdata->adapterObjectId = adapterHandle.id;
 
-        WGPUAdapterProperties properties = {};
-        WGPUSupportedLimits limits = {};
-        std::vector<WGPUFeatureName> features;
+    mProcs.instanceRequestAdapter(instance->handle, options,
+                                  ForwardToServer<&Server::OnRequestAdapterCallback>,
+                                  userdata.release());
+    return true;
+}
 
-        // Assign the handle and allocated status if the adapter is created successfully.
-        adapterObject->state = AllocationState::Allocated;
-        adapterObject->handle = adapter;
+void Server::OnRequestAdapterCallback(RequestAdapterUserdata* data,
+                                      WGPURequestAdapterStatus status,
+                                      WGPUAdapter adapter,
+                                      const char* message) {
+    auto* adapterObject = AdapterObjects().Get(data->adapterObjectId, AllocationState::Reserved);
+    // Should be impossible to fail. ObjectIds can't be freed by a destroy command until
+    // they move from Reserved to Allocated, or if they are destroyed here.
+    ASSERT(adapterObject != nullptr);
 
-        size_t featuresCount = mProcs.adapterEnumerateFeatures(adapter, nullptr);
-        features.resize(featuresCount);
-        mProcs.adapterEnumerateFeatures(adapter, features.data());
+    ReturnInstanceRequestAdapterCallbackCmd cmd = {};
+    cmd.instance = data->instance;
+    cmd.requestSerial = data->requestSerial;
+    cmd.status = status;
+    cmd.message = message;
 
-        // Hide features the wire cannot support.
-        auto it = std::partition(features.begin(), features.end(), IsFeatureSupported);
-
-        cmd.featuresCount = static_cast<uint32_t>(std::distance(features.begin(), it));
-        cmd.features = features.data();
-
-        mProcs.adapterGetProperties(adapter, &properties);
-        mProcs.adapterGetLimits(adapter, &limits);
-        cmd.properties = &properties;
-        cmd.limits = &limits;
-
+    if (status != WGPURequestAdapterStatus_Success) {
+        // Free the ObjectId which will make it unusable.
+        AdapterObjects().Free(data->adapterObjectId);
+        ASSERT(adapter == nullptr);
         SerializeCommand(cmd);
+        return;
     }
 
+    WGPUAdapterProperties properties = {};
+    WGPUSupportedLimits limits = {};
+    std::vector<WGPUFeatureName> features;
+
+    // Assign the handle and allocated status if the adapter is created successfully.
+    adapterObject->state = AllocationState::Allocated;
+    adapterObject->handle = adapter;
+
+    size_t featuresCount = mProcs.adapterEnumerateFeatures(adapter, nullptr);
+    features.resize(featuresCount);
+    mProcs.adapterEnumerateFeatures(adapter, features.data());
+
+    // Hide features the wire cannot support.
+    auto it = std::partition(features.begin(), features.end(), IsFeatureSupported);
+
+    cmd.featuresCount = static_cast<uint32_t>(std::distance(features.begin(), it));
+    cmd.features = features.data();
+
+    mProcs.adapterGetProperties(adapter, &properties);
+    mProcs.adapterGetLimits(adapter, &limits);
+    cmd.properties = &properties;
+    cmd.limits = &limits;
+
+    SerializeCommand(cmd);
+}
+
 }  // namespace dawn::wire::server
diff --git a/src/dawn/wire/server/ServerMemoryTransferService_mock.cpp b/src/dawn/wire/server/ServerMemoryTransferService_mock.cpp
index 758c344..20fd8b6 100644
--- a/src/dawn/wire/server/ServerMemoryTransferService_mock.cpp
+++ b/src/dawn/wire/server/ServerMemoryTransferService_mock.cpp
@@ -18,74 +18,71 @@
 
 namespace dawn::wire::server {
 
-    MockMemoryTransferService::MockReadHandle::MockReadHandle(MockMemoryTransferService* service)
-        : ReadHandle(), mService(service) {
-    }
+MockMemoryTransferService::MockReadHandle::MockReadHandle(MockMemoryTransferService* service)
+    : ReadHandle(), mService(service) {}
 
-    MockMemoryTransferService::MockReadHandle::~MockReadHandle() {
-        mService->OnReadHandleDestroy(this);
-    }
+MockMemoryTransferService::MockReadHandle::~MockReadHandle() {
+    mService->OnReadHandleDestroy(this);
+}
 
-    size_t MockMemoryTransferService::MockReadHandle::SizeOfSerializeDataUpdate(size_t offset,
-                                                                                size_t size) {
-        return mService->OnReadHandleSizeOfSerializeDataUpdate(this, offset, size);
-    }
+size_t MockMemoryTransferService::MockReadHandle::SizeOfSerializeDataUpdate(size_t offset,
+                                                                            size_t size) {
+    return mService->OnReadHandleSizeOfSerializeDataUpdate(this, offset, size);
+}
 
-    void MockMemoryTransferService::MockReadHandle::SerializeDataUpdate(const void* data,
-                                                                        size_t offset,
-                                                                        size_t size,
-                                                                        void* serializePointer) {
-        mService->OnReadHandleSerializeDataUpdate(this, data, offset, size, serializePointer);
-    }
+void MockMemoryTransferService::MockReadHandle::SerializeDataUpdate(const void* data,
+                                                                    size_t offset,
+                                                                    size_t size,
+                                                                    void* serializePointer) {
+    mService->OnReadHandleSerializeDataUpdate(this, data, offset, size, serializePointer);
+}
 
-    MockMemoryTransferService::MockWriteHandle::MockWriteHandle(MockMemoryTransferService* service)
-        : WriteHandle(), mService(service) {
-    }
+MockMemoryTransferService::MockWriteHandle::MockWriteHandle(MockMemoryTransferService* service)
+    : WriteHandle(), mService(service) {}
 
-    MockMemoryTransferService::MockWriteHandle::~MockWriteHandle() {
-        mService->OnWriteHandleDestroy(this);
-    }
+MockMemoryTransferService::MockWriteHandle::~MockWriteHandle() {
+    mService->OnWriteHandleDestroy(this);
+}
 
-    const uint32_t* MockMemoryTransferService::MockWriteHandle::GetData() const {
-        return reinterpret_cast<const uint32_t*>(mTargetData);
-    }
+const uint32_t* MockMemoryTransferService::MockWriteHandle::GetData() const {
+    return reinterpret_cast<const uint32_t*>(mTargetData);
+}
 
-    bool MockMemoryTransferService::MockWriteHandle::DeserializeDataUpdate(
-        const void* deserializePointer,
-        size_t deserializeSize,
-        size_t offset,
-        size_t size) {
-        ASSERT(deserializeSize % sizeof(uint32_t) == 0);
-        return mService->OnWriteHandleDeserializeDataUpdate(
-            this, reinterpret_cast<const uint32_t*>(deserializePointer), deserializeSize, offset,
-            size);
-    }
+bool MockMemoryTransferService::MockWriteHandle::DeserializeDataUpdate(
+    const void* deserializePointer,
+    size_t deserializeSize,
+    size_t offset,
+    size_t size) {
+    ASSERT(deserializeSize % sizeof(uint32_t) == 0);
+    return mService->OnWriteHandleDeserializeDataUpdate(
+        this, reinterpret_cast<const uint32_t*>(deserializePointer), deserializeSize, offset, size);
+}
 
-    MockMemoryTransferService::MockMemoryTransferService() = default;
-    MockMemoryTransferService::~MockMemoryTransferService() = default;
+MockMemoryTransferService::MockMemoryTransferService() = default;
+MockMemoryTransferService::~MockMemoryTransferService() = default;
 
-    bool MockMemoryTransferService::DeserializeReadHandle(const void* deserializePointer,
-                                                          size_t deserializeSize,
-                                                          ReadHandle** readHandle) {
-        ASSERT(deserializeSize % sizeof(uint32_t) == 0);
-        return OnDeserializeReadHandle(reinterpret_cast<const uint32_t*>(deserializePointer),
-                                       deserializeSize, readHandle);
-    }
+bool MockMemoryTransferService::DeserializeReadHandle(const void* deserializePointer,
+                                                      size_t deserializeSize,
+                                                      ReadHandle** readHandle) {
+    ASSERT(deserializeSize % sizeof(uint32_t) == 0);
+    return OnDeserializeReadHandle(reinterpret_cast<const uint32_t*>(deserializePointer),
+                                   deserializeSize, readHandle);
+}
 
-    bool MockMemoryTransferService::DeserializeWriteHandle(const void* deserializePointer,
-                                                           size_t deserializeSize,
-                                                           WriteHandle** writeHandle) {
-        ASSERT(deserializeSize % sizeof(uint32_t) == 0);
-        return OnDeserializeWriteHandle(reinterpret_cast<const uint32_t*>(deserializePointer),
-                                        deserializeSize, writeHandle);
-    }
+bool MockMemoryTransferService::DeserializeWriteHandle(const void* deserializePointer,
+                                                       size_t deserializeSize,
+                                                       WriteHandle** writeHandle) {
+    ASSERT(deserializeSize % sizeof(uint32_t) == 0);
+    return OnDeserializeWriteHandle(reinterpret_cast<const uint32_t*>(deserializePointer),
+                                    deserializeSize, writeHandle);
+}
 
-    MockMemoryTransferService::MockReadHandle* MockMemoryTransferService::NewReadHandle() {
-        return new MockReadHandle(this);
-    }
+MockMemoryTransferService::MockReadHandle* MockMemoryTransferService::NewReadHandle() {
+    return new MockReadHandle(this);
+}
 
-    MockMemoryTransferService::MockWriteHandle* MockMemoryTransferService::NewWriteHandle() {
-        return new MockWriteHandle(this);
-    }
+MockMemoryTransferService::MockWriteHandle* MockMemoryTransferService::NewWriteHandle() {
+    return new MockWriteHandle(this);
+}
 
 }  // namespace dawn::wire::server
diff --git a/src/dawn/wire/server/ServerMemoryTransferService_mock.h b/src/dawn/wire/server/ServerMemoryTransferService_mock.h
index 6cc72a6..1d02ccb 100644
--- a/src/dawn/wire/server/ServerMemoryTransferService_mock.h
+++ b/src/dawn/wire/server/ServerMemoryTransferService_mock.h
@@ -22,87 +22,87 @@
 
 namespace dawn::wire::server {
 
-    class MockMemoryTransferService : public MemoryTransferService {
+class MockMemoryTransferService : public MemoryTransferService {
+  public:
+    class MockReadHandle : public ReadHandle {
       public:
-        class MockReadHandle : public ReadHandle {
-          public:
-            explicit MockReadHandle(MockMemoryTransferService* service);
-            ~MockReadHandle() override;
+        explicit MockReadHandle(MockMemoryTransferService* service);
+        ~MockReadHandle() override;
 
-            size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override;
-            void SerializeDataUpdate(const void* data,
-                                     size_t offset,
-                                     size_t size,
-                                     void* serializePointer) override;
+        size_t SizeOfSerializeDataUpdate(size_t offset, size_t size) override;
+        void SerializeDataUpdate(const void* data,
+                                 size_t offset,
+                                 size_t size,
+                                 void* serializePointer) override;
 
-          private:
-            MockMemoryTransferService* mService;
-        };
-
-        class MockWriteHandle : public WriteHandle {
-          public:
-            explicit MockWriteHandle(MockMemoryTransferService* service);
-            ~MockWriteHandle() override;
-
-            bool DeserializeDataUpdate(const void* deserializePointer,
-                                       size_t deserializeSize,
-                                       size_t offset,
-                                       size_t size) override;
-
-            const uint32_t* GetData() const;
-
-          private:
-            MockMemoryTransferService* mService;
-        };
-
-        MockMemoryTransferService();
-        ~MockMemoryTransferService() override;
-
-        bool DeserializeReadHandle(const void* deserializePointer,
-                                   size_t deserializeSize,
-                                   ReadHandle** readHandle) override;
-
-        bool DeserializeWriteHandle(const void* deserializePointer,
-                                    size_t deserializeSize,
-                                    WriteHandle** writeHandle) override;
-
-        MockReadHandle* NewReadHandle();
-        MockWriteHandle* NewWriteHandle();
-
-        MOCK_METHOD(bool,
-                    OnDeserializeReadHandle,
-                    (const uint32_t* deserializePointer,
-                     size_t deserializeSize,
-                     ReadHandle** readHandle));
-
-        MOCK_METHOD(bool,
-                    OnDeserializeWriteHandle,
-                    (const uint32_t* deserializePointer,
-                     size_t deserializeSize,
-                     WriteHandle** writeHandle));
-
-        MOCK_METHOD(size_t,
-                    OnReadHandleSizeOfSerializeDataUpdate,
-                    (const ReadHandle* readHandle, size_t offset, size_t size));
-        MOCK_METHOD(void,
-                    OnReadHandleSerializeDataUpdate,
-                    (const ReadHandle* readHandle,
-                     const void* data,
-                     size_t offset,
-                     size_t size,
-                     void* serializePointer));
-        MOCK_METHOD(void, OnReadHandleDestroy, (const ReadHandle* readHandle));
-
-        MOCK_METHOD(bool,
-                    OnWriteHandleDeserializeDataUpdate,
-                    (const WriteHandle* writeHandle,
-                     const uint32_t* deserializePointer,
-                     size_t deserializeSize,
-                     size_t offset,
-                     size_t size));
-        MOCK_METHOD(void, OnWriteHandleDestroy, (const WriteHandle* writeHandle));
+      private:
+        MockMemoryTransferService* mService;
     };
 
+    class MockWriteHandle : public WriteHandle {
+      public:
+        explicit MockWriteHandle(MockMemoryTransferService* service);
+        ~MockWriteHandle() override;
+
+        bool DeserializeDataUpdate(const void* deserializePointer,
+                                   size_t deserializeSize,
+                                   size_t offset,
+                                   size_t size) override;
+
+        const uint32_t* GetData() const;
+
+      private:
+        MockMemoryTransferService* mService;
+    };
+
+    MockMemoryTransferService();
+    ~MockMemoryTransferService() override;
+
+    bool DeserializeReadHandle(const void* deserializePointer,
+                               size_t deserializeSize,
+                               ReadHandle** readHandle) override;
+
+    bool DeserializeWriteHandle(const void* deserializePointer,
+                                size_t deserializeSize,
+                                WriteHandle** writeHandle) override;
+
+    MockReadHandle* NewReadHandle();
+    MockWriteHandle* NewWriteHandle();
+
+    MOCK_METHOD(bool,
+                OnDeserializeReadHandle,
+                (const uint32_t* deserializePointer,
+                 size_t deserializeSize,
+                 ReadHandle** readHandle));
+
+    MOCK_METHOD(bool,
+                OnDeserializeWriteHandle,
+                (const uint32_t* deserializePointer,
+                 size_t deserializeSize,
+                 WriteHandle** writeHandle));
+
+    MOCK_METHOD(size_t,
+                OnReadHandleSizeOfSerializeDataUpdate,
+                (const ReadHandle* readHandle, size_t offset, size_t size));
+    MOCK_METHOD(void,
+                OnReadHandleSerializeDataUpdate,
+                (const ReadHandle* readHandle,
+                 const void* data,
+                 size_t offset,
+                 size_t size,
+                 void* serializePointer));
+    MOCK_METHOD(void, OnReadHandleDestroy, (const ReadHandle* readHandle));
+
+    MOCK_METHOD(bool,
+                OnWriteHandleDeserializeDataUpdate,
+                (const WriteHandle* writeHandle,
+                 const uint32_t* deserializePointer,
+                 size_t deserializeSize,
+                 size_t offset,
+                 size_t size));
+    MOCK_METHOD(void, OnWriteHandleDestroy, (const WriteHandle* writeHandle));
+};
+
 }  // namespace dawn::wire::server
 
 #endif  // SRC_DAWN_WIRE_SERVER_SERVERMEMORYTRANSFERSERVICE_MOCK_H_
diff --git a/src/dawn/wire/server/ServerQueue.cpp b/src/dawn/wire/server/ServerQueue.cpp
index 80528e7..3ea8dab 100644
--- a/src/dawn/wire/server/ServerQueue.cpp
+++ b/src/dawn/wire/server/ServerQueue.cpp
@@ -19,87 +19,84 @@
 
 namespace dawn::wire::server {
 
-    void Server::OnQueueWorkDone(QueueWorkDoneUserdata* data, WGPUQueueWorkDoneStatus status) {
-        ReturnQueueWorkDoneCallbackCmd cmd;
-        cmd.queue = data->queue;
-        cmd.requestSerial = data->requestSerial;
-        cmd.status = status;
+void Server::OnQueueWorkDone(QueueWorkDoneUserdata* data, WGPUQueueWorkDoneStatus status) {
+    ReturnQueueWorkDoneCallbackCmd cmd;
+    cmd.queue = data->queue;
+    cmd.requestSerial = data->requestSerial;
+    cmd.status = status;
 
-        SerializeCommand(cmd);
+    SerializeCommand(cmd);
+}
+
+bool Server::DoQueueOnSubmittedWorkDone(ObjectId queueId,
+                                        uint64_t signalValue,
+                                        uint64_t requestSerial) {
+    auto* queue = QueueObjects().Get(queueId);
+    if (queue == nullptr) {
+        return false;
     }
 
-    bool Server::DoQueueOnSubmittedWorkDone(ObjectId queueId,
-                                            uint64_t signalValue,
-                                            uint64_t requestSerial) {
-        auto* queue = QueueObjects().Get(queueId);
-        if (queue == nullptr) {
+    auto userdata = MakeUserdata<QueueWorkDoneUserdata>();
+    userdata->queue = ObjectHandle{queueId, queue->generation};
+    userdata->requestSerial = requestSerial;
+
+    mProcs.queueOnSubmittedWorkDone(queue->handle, signalValue,
+                                    ForwardToServer<&Server::OnQueueWorkDone>, userdata.release());
+    return true;
+}
+
+bool Server::DoQueueWriteBuffer(ObjectId queueId,
+                                ObjectId bufferId,
+                                uint64_t bufferOffset,
+                                const uint8_t* data,
+                                uint64_t size) {
+    // The null object isn't valid as `self` or `buffer` so we can combine the check with the
+    // check that the ID is valid.
+    auto* queue = QueueObjects().Get(queueId);
+    auto* buffer = BufferObjects().Get(bufferId);
+    if (queue == nullptr || buffer == nullptr) {
+        return false;
+    }
+
+    if (size > std::numeric_limits<size_t>::max()) {
+        auto* device = DeviceObjects().Get(queue->deviceInfo->self.id);
+        if (device == nullptr) {
             return false;
         }
-
-        auto userdata = MakeUserdata<QueueWorkDoneUserdata>();
-        userdata->queue = ObjectHandle{queueId, queue->generation};
-        userdata->requestSerial = requestSerial;
-
-        mProcs.queueOnSubmittedWorkDone(queue->handle, signalValue,
-                                        ForwardToServer<&Server::OnQueueWorkDone>,
-                                        userdata.release());
-        return true;
+        return DoDeviceInjectError(reinterpret_cast<WGPUDevice>(device), WGPUErrorType_OutOfMemory,
+                                   "Data size too large for write texture.");
     }
 
-    bool Server::DoQueueWriteBuffer(ObjectId queueId,
-                                    ObjectId bufferId,
-                                    uint64_t bufferOffset,
-                                    const uint8_t* data,
-                                    uint64_t size) {
-        // The null object isn't valid as `self` or `buffer` so we can combine the check with the
-        // check that the ID is valid.
-        auto* queue = QueueObjects().Get(queueId);
-        auto* buffer = BufferObjects().Get(bufferId);
-        if (queue == nullptr || buffer == nullptr) {
+    mProcs.queueWriteBuffer(queue->handle, buffer->handle, bufferOffset, data,
+                            static_cast<size_t>(size));
+    return true;
+}
+
+bool Server::DoQueueWriteTexture(ObjectId queueId,
+                                 const WGPUImageCopyTexture* destination,
+                                 const uint8_t* data,
+                                 uint64_t dataSize,
+                                 const WGPUTextureDataLayout* dataLayout,
+                                 const WGPUExtent3D* writeSize) {
+    // The null object isn't valid as `self` so we can combine the check with the
+    // check that the ID is valid.
+    auto* queue = QueueObjects().Get(queueId);
+    if (queue == nullptr) {
+        return false;
+    }
+
+    if (dataSize > std::numeric_limits<size_t>::max()) {
+        auto* device = DeviceObjects().Get(queue->deviceInfo->self.id);
+        if (device == nullptr) {
             return false;
         }
-
-        if (size > std::numeric_limits<size_t>::max()) {
-            auto* device = DeviceObjects().Get(queue->deviceInfo->self.id);
-            if (device == nullptr) {
-                return false;
-            }
-            return DoDeviceInjectError(reinterpret_cast<WGPUDevice>(device),
-                                       WGPUErrorType_OutOfMemory,
-                                       "Data size too large for write texture.");
-        }
-
-        mProcs.queueWriteBuffer(queue->handle, buffer->handle, bufferOffset, data,
-                                static_cast<size_t>(size));
-        return true;
+        return DoDeviceInjectError(reinterpret_cast<WGPUDevice>(device), WGPUErrorType_OutOfMemory,
+                                   "Data size too large for write texture.");
     }
 
-    bool Server::DoQueueWriteTexture(ObjectId queueId,
-                                     const WGPUImageCopyTexture* destination,
-                                     const uint8_t* data,
-                                     uint64_t dataSize,
-                                     const WGPUTextureDataLayout* dataLayout,
-                                     const WGPUExtent3D* writeSize) {
-        // The null object isn't valid as `self` so we can combine the check with the
-        // check that the ID is valid.
-        auto* queue = QueueObjects().Get(queueId);
-        if (queue == nullptr) {
-            return false;
-        }
-
-        if (dataSize > std::numeric_limits<size_t>::max()) {
-            auto* device = DeviceObjects().Get(queue->deviceInfo->self.id);
-            if (device == nullptr) {
-                return false;
-            }
-            return DoDeviceInjectError(reinterpret_cast<WGPUDevice>(device),
-                                       WGPUErrorType_OutOfMemory,
-                                       "Data size too large for write texture.");
-        }
-
-        mProcs.queueWriteTexture(queue->handle, destination, data, static_cast<size_t>(dataSize),
-                                 dataLayout, writeSize);
-        return true;
-    }
+    mProcs.queueWriteTexture(queue->handle, destination, data, static_cast<size_t>(dataSize),
+                             dataLayout, writeSize);
+    return true;
+}
 
 }  // namespace dawn::wire::server
diff --git a/src/dawn/wire/server/ServerShaderModule.cpp b/src/dawn/wire/server/ServerShaderModule.cpp
index 82b0461..9a3f3da 100644
--- a/src/dawn/wire/server/ServerShaderModule.cpp
+++ b/src/dawn/wire/server/ServerShaderModule.cpp
@@ -18,32 +18,32 @@
 
 namespace dawn::wire::server {
 
-    bool Server::DoShaderModuleGetCompilationInfo(ObjectId shaderModuleId, uint64_t requestSerial) {
-        auto* shaderModule = ShaderModuleObjects().Get(shaderModuleId);
-        if (shaderModule == nullptr) {
-            return false;
-        }
-
-        auto userdata = MakeUserdata<ShaderModuleGetCompilationInfoUserdata>();
-        userdata->shaderModule = ObjectHandle{shaderModuleId, shaderModule->generation};
-        userdata->requestSerial = requestSerial;
-
-        mProcs.shaderModuleGetCompilationInfo(
-            shaderModule->handle, ForwardToServer<&Server::OnShaderModuleGetCompilationInfo>,
-            userdata.release());
-        return true;
+bool Server::DoShaderModuleGetCompilationInfo(ObjectId shaderModuleId, uint64_t requestSerial) {
+    auto* shaderModule = ShaderModuleObjects().Get(shaderModuleId);
+    if (shaderModule == nullptr) {
+        return false;
     }
 
-    void Server::OnShaderModuleGetCompilationInfo(ShaderModuleGetCompilationInfoUserdata* data,
-                                                  WGPUCompilationInfoRequestStatus status,
-                                                  const WGPUCompilationInfo* info) {
-        ReturnShaderModuleGetCompilationInfoCallbackCmd cmd;
-        cmd.shaderModule = data->shaderModule;
-        cmd.requestSerial = data->requestSerial;
-        cmd.status = status;
-        cmd.info = info;
+    auto userdata = MakeUserdata<ShaderModuleGetCompilationInfoUserdata>();
+    userdata->shaderModule = ObjectHandle{shaderModuleId, shaderModule->generation};
+    userdata->requestSerial = requestSerial;
 
-        SerializeCommand(cmd);
-    }
+    mProcs.shaderModuleGetCompilationInfo(
+        shaderModule->handle, ForwardToServer<&Server::OnShaderModuleGetCompilationInfo>,
+        userdata.release());
+    return true;
+}
+
+void Server::OnShaderModuleGetCompilationInfo(ShaderModuleGetCompilationInfoUserdata* data,
+                                              WGPUCompilationInfoRequestStatus status,
+                                              const WGPUCompilationInfo* info) {
+    ReturnShaderModuleGetCompilationInfoCallbackCmd cmd;
+    cmd.shaderModule = data->shaderModule;
+    cmd.requestSerial = data->requestSerial;
+    cmd.status = status;
+    cmd.info = info;
+
+    SerializeCommand(cmd);
+}
 
 }  // namespace dawn::wire::server